Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / huron / src / mmu.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: mmu.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)mmu.s 1.6 07/07/11 SMI"
50
51/*
52 * Niagara2 MMU code
53 */
54#include <sys/asm_linkage.h>
55#include <hprivregs.h>
56#include <asi.h>
57#include <traps.h>
58#include <mmu.h>
59#include <sun4v/traps.h>
60#include <sun4v/mmu.h>
61
62#include <guest.h>
63#include <offsets.h>
64#include <debug.h>
65#include <util.h>
66#include <error_regs.h>
67#include <error_asm.h>
68
69 ! %g1 cpup
70 ! %g2 8k-aligned real addr from tag access
71 ENTRY_NP(rdmmu_miss)
72 ! offset handling
73 ! XXX if hypervisor access then panic instead of watchdog_guest
74 VCPU2GUEST_STRUCT(%g1, %g7)
75 set 8 KB, %g5
76 RA2PA_RANGE_CONV(%g7, %g2, %g5, 1f, %g4, %g3)
77 ! %g3 PA
782:
79 ! tte valid, cp, writable, priv
80 mov 1, %g2
81 sllx %g2, 63, %g2
82 or %g2, TTE_CP | TTE_P | TTE_W, %g2
83 or %g3, %g2, %g3
84 mov TLB_IN_REAL, %g2 ! Real bit
85 stxa %g3, [%g2]ASI_DTLB_DATA_IN
86 retry
87
881:
89 RANGE_CHECK_IO(%g7, %g2, %g5, .rdmmu_miss_found, .rdmmu_miss_not_found,
90 %g3, %g4)
91.rdmmu_miss_found:
92 mov %g2, %g3
93
94 ! tte valid, e, writable, priv
95 mov 1, %g2
96 sllx %g2, 63, %g2
97 or %g2, TTE_E | TTE_P | TTE_W, %g2
98 or %g3, %g2, %g3
99 mov TLB_IN_REAL, %g2 ! Real bit
100 stxa %g3, [%g2]ASI_DTLB_DATA_IN
101 retry
102
103 ALTENTRY(rdmmu_miss_not_found2)
104.rdmmu_miss_not_found:
1051:
106 ! %g2 real address
107 ! LEGION_GOT_HERE
108 wrpr %g0, TT_DAX, %tt
109 mov MMU_FT_INVALIDRA, %g1
110 ba,pt %xcc, dmmu_err_common ! (%g1=ft, %g2=addr, %g3=ctx)
111 mov 0, %g3
112 SET_SIZE(rdmmu_miss)
113
114 ! %g1 cpup
115 ! %g2 8k-aligned real addr from tag access
116
117 ENTRY_NP(rimmu_miss)
118 mov MMU_TAG_ACCESS, %g2
119 ldxa [%g2]ASI_IMMU, %g2 /* tag access */
120 set ((1 << 13) - 1), %g3
121 andn %g2, %g3, %g2
122
123 VCPU2GUEST_STRUCT(%g1, %g3)
124 RA2PA_RANGE_CONV(%g3, %g2, %g0, 1f, %g4, %g1)
125 ! %g1 PA
126
127
128 ! tte valid, cp, writable, priv
129 mov 1, %g2
130 sllx %g2, 63, %g2
131 or %g2, TTE_CP | TTE_P | TTE_W, %g2
132 or %g1, %g2, %g1
133 mov TLB_IN_REAL, %g2 ! Real bit
134 stxa %g1, [%g2]ASI_ITLB_DATA_IN
135 retry
136
1371:
138 ! %g2 real address
139 ! LEGION_GOT_HERE
140 wrpr %g0, TT_IAX, %tt
141 mov MMU_FT_INVALIDRA, %g1
142 ba,pt %xcc, immu_err_common ! (%g1=ft, %g2=addr, %g3=ctx)
143 mov 0, %g3
144 SET_SIZE(rimmu_miss)
145
146 /*
147 * Normal tlb miss handlers
148 *
149 * Guest miss area:
150 *
151 * NB: If it's possible to context switch a guest then
152 * the tag access register (tag target too?) needs to
153 * be saved/restored.
154 */
155
156 /* %g1 contains per CPU area */
157 /* %g3 contains immu tag target */
158 ENTRY_NP(immu_miss_ctx0)
159
160 VCPU2GUEST_STRUCT(%g1, %g6)
161 add %g6, GUEST_PERM_MAPPINGS_LOCK, %g2
162 SPINLOCK_ENTER(%g2, %g3, %g4)
163
164 /*
165 * Look for a possible miss on a permanent entry.
166 * Note that the permanent mapping can have one of
167 * three states :-
168 *
169 * valid - TTE.V != 0. This is a valid mapping, check for
170 * a match. If not a match, continue the search
171 * with the next permanent mapping from the array.
172 * If it is a match, we have a hit, update the TLB
173 * and retry.
174 *
175 * invalid - TTE != 0 && TTE.V == 0. This is a TTE which has
176 * been used for a permanent mapping but has been
177 * subsequently unmapped, setting the TTE.V bit to 0.
178 * This is not a match, continue the search
179 * with the next permanent mapping from the array.
180 *
181 * invalid - TTE == 0 && TTE.V == 0. This is a TTE which is
182 * still uninitialised and has never been used for a
183 * permanent mapping. This means that the other
184 * entries in the permanent mapping array are also
185 * unused (as we always use the first available
186 * permanent mapping array element for a mapping) so
187 * we can stop searching for a permanent mapping now,
188 * break out of the loop.
189 */
190 mov MMU_TAG_ACCESS, %g1
191 ldxa [%g1]ASI_IMMU, %g1
192 add %g6, GUEST_PERM_MAPPINGS, %g2
193 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
194
195 /*
196 * for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
197 * if (!table[i]->tte.v) {
198 * continue;
199 * }
200 * shift = TTE_PAGE_SHIFT(table[i]->tte);
201 * if ((table[i]->va >> shift) == (va >> shift)) {
202 * break;
203 * }
204 * }
205 */
206.ipmap_loop:
207 !! %g1 = tag access
208 !! %g2 = permanent mapping table base address
209 !! %g3 = current offset into table
210 !! %g5 = matching entry
211 add %g2, %g3, %g5
212
213 /*
214 * if (!tte) {
215 * uninitialised, no more mappings, miss;
216 * }
217 * if (!tte.v) {
218 * initialised but invalid, get next, continue;
219 * }
220 */
221 ldx [%g5 + MAPPING_TTE], %g6
222 brlz,pt %g6, 1f ! TTE.V == 1
223 nop
224 brz,pt %g6, .ipmap_miss ! TTE == 0
225 nop
226 ba,pt %xcc, .ipmap_continue ! TTE != 0 && TTE.V == 0
227 deccc GUEST_PERM_MAPPINGS_INCR, %g3
2281:
229 /*
230 * (valid TTE, check for hit)
231 * shift = TTE_PAGE_SHIFT(m->tte);
232 * if ((m->va >> shift) == (va >> shift)) {
233 * break;
234 * }
235 */
236 TTE_SHIFT_NOCHECK(%g6, %g7, %g4)
237 ldx [%g5 + MAPPING_VA], %g6
238 srlx %g6, %g7, %g6
239 srlx %g1, %g7, %g7
240 cmp %g6, %g7
241 be,a,pt %xcc, .ipmap_hit
242 ldx [%g5 + MAPPING_TTE], %g5
243 ! not a match
244 deccc GUEST_PERM_MAPPINGS_INCR, %g3
245 /* FALLTHRU */
246.ipmap_continue:
247 bgeu,pt %xcc, .ipmap_loop
248 nop
249
250 ba,a,pt %xcc, .ipmap_miss
251 nop
252
253.ipmap_hit:
254 !! %g5 = tte (with pa) of matching entry
255
256 GUEST_STRUCT(%g6)
257
258 stxa %g5, [%g0]ASI_ITLB_DATA_IN
259 inc GUEST_PERM_MAPPINGS_LOCK, %g6
260 SPINLOCK_EXIT(%g6)
261 retry
262
263.ipmap_miss:
264 VCPU_GUEST_STRUCT(%g1, %g6)
265 inc GUEST_PERM_MAPPINGS_LOCK, %g6
266 SPINLOCK_EXIT(%g6)
267
268 rdpr %gl, %g2
269 ba,pt %xcc, immu_miss_common
270 ldxa [%g0]ASI_IMMU, %g3 /* tag target */
271 SET_SIZE(immu_miss_ctx0)
272
273 /* %g1 contains per CPU area */
274 /* %g2 contains %gl */
275 /* %g3 contains immu tag target */
276 ENTRY_NP(immu_miss_common)
277 ALTENTRY(immu_miss_ctxnon0)
278 cmp %g2, MAXPGL
279 bgu,pn %xcc, watchdog_guest /* enforce %gl <= MAXPGL */
280 ldx [%g1 + CPU_MMU_AREA], %g2
281 brz,pn %g2, watchdog_guest /* enforce CPU_MMU_AREA != 0 */
282 nop
283
284 srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
285
286 /* if ctx == 0 and ctx0 set TSBs used, take slow trap */
287 /* if ctx != 0 and ctxnon0 set TSBs used, take slow trap */
288 mov CPU_NTSBS_CTXN, %g7
289 movrz %g4, CPU_NTSBS_CTX0, %g7
290 ldx [%g1 + %g7], %g7
291 brnz,pn %g7, .islowmiss
292 nop
293
294.ifastmiss:
295 /* update MMU_FAULT_AREA_INSTR */
296#ifdef TSBMISS_ALIGN_ADDR
297 mov MMU_TAG_ACCESS, %g3
298 ldxa [%g3]ASI_IMMU, %g3 /* tag access */
299 set (NCTXS - 1), %g5
300 andn %g3, %g5, %g4
301 and %g3, %g5, %g3
302 stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
303 stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
304#else /* !TSBMISS_ALIGN_ADDR */
305 mov MMU_TAG_TARGET, %g3
306 ldxa [%g3]ASI_IMMU, %g3 /* tag target */
307 srlx %g3, 48, %g3
308 stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
309 rdpr %tpc, %g4
310 stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
311#endif /* !TSBMISS_ALIGN_ADDR */
312 /* fast misses do not update MMU_FAULT_AREA_IFT with MMU_FT_FASTMISS */
313 wrpr %g0, TT_FAST_IMMU_MISS, %tt
314 rdpr %tba, %g3
315 add %g3, (TT_FAST_IMMU_MISS << TT_OFFSET_SHIFT), %g3
3167:
317 rdpr %tl, %g2
318 cmp %g2, 1 /* trap happended at TL=0 */
319 be,pt %xcc, 1f
320 .empty
321 set TRAPTABLE_SIZE, %g5
322
323 cmp %g2, MAXPTL
324 bgu watchdog_guest
325 add %g5, %g3, %g3
326
3271:
328 TRAP_GUEST(%g3, %g1, %g2)
329 /*NOTREACHED*/
330
331.islowmiss:
332 /* update MMU_FAULT_AREA_INSTR */
333#ifdef TSBMISS_ALIGN_ADDR
334 mov MMU_TAG_ACCESS, %g3
335 ldxa [%g3]ASI_IMMU, %g3 /* tag access */
336 set (NCTXS - 1), %g5
337 andn %g3, %g5, %g4
338 and %g3, %g5, %g3
339 stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
340 stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
341#else /* !TSBMISS_ALIGN_ADDR */
342 mov MMU_TAG_TARGET, %g3
343 ldxa [%g3]ASI_IMMU, %g3 /* tag target */
344 srlx %g3, 48, %g3
345 stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
346 rdpr %tpc, %g4
347 stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
348#endif /* !TSBMISS_ALIGN_ADDR */
349 mov MMU_FT_MISS, %g4
350 stx %g4, [%g2 + MMU_FAULT_AREA_IFT]
351 wrpr %g0, TT_IMMU_MISS, %tt
352 rdpr %pstate, %g3
353 or %g3, PSTATE_PRIV, %g3
354 wrpr %g3, %pstate
355 rdpr %tba, %g3
356 add %g3, (TT_IMMU_MISS << TT_OFFSET_SHIFT), %g3
357 ba,a 7b
358 nop
359 SET_SIZE(immu_miss_common)
360 SET_SIZE(immu_miss_ctxnon0)
361
362 /* %g1 contains per CPU area */
363 /* %g3 contains dmmu tag target */
364 ENTRY_NP(dmmu_miss_ctx0)
365
366 VCPU2GUEST_STRUCT(%g1, %g6)
367 add %g6, GUEST_PERM_MAPPINGS_LOCK, %g2
368 SPINLOCK_ENTER(%g2, %g3, %g4)
369
370 /*
371 * Look for a possible miss on a permanent entry.
372 * Note that the permanent mapping can have one of
373 * three states :-
374 *
375 * valid - TTE.V != 0. This is a valid mapping, check for
376 * a match. If not a match, continue the search
377 * with the next permanent mapping from the array.
378 * If it is a match, we have a hit, update the TLB
379 * and retry.
380 *
381 * invalid - TTE != 0 && TTE.V == 0. This is a TTE which has
382 * been used for a permanent mapping but has been
383 * subsequently unmapped, setting the TTE.V bit to 0.
384 * This is not a match, continue the search
385 * with the next permanent mapping from the array.
386 *
387 * invalid - TTE == 0 && TTE.V == 0. This is a TTE which is
388 * still uninitialised and has never been used for a
389 * permanent mapping. This means that the other
390 * entries in the permanent mapping array are also
391 * unused (as we always use the first available
392 * permanent mapping array element for a mapping) so
393 * we can stop searching for a permanent mapping now,
394 * break out of the loop.
395 */
396 mov MMU_TAG_ACCESS, %g1
397 ldxa [%g1]ASI_DMMU, %g1
398 add %g6, GUEST_PERM_MAPPINGS, %g2
399 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
400
401 /*
402 * for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
403 * if (!table[i]->tte) {
404 * uninitialised, no more mappings, miss;
405 * }
406 * if (!table[i]->tte.v) {
407 * initialised but invalid, get next, continue;
408 * }
409 * (valid TTE, check for hit)
410 * shift = TTE_PAGE_SHIFT(table[i]->tte);
411 * if ((table[i]->va >> shift) == (va >> shift)) {
412 * break;
413 * }
414 * }
415 */
416.dpmap_loop:
417 !! %g1 = tag access
418 !! %g2 = permanent mapping table base address
419 !! %g3 = current offset into table
420 !! %g5 = matching entry
421 add %g2, %g3, %g5
422
423 /*
424 * if (!tte) {
425 * uninitialised, no more mappings, miss;
426 * }
427 * if (!tte.v) {
428 * initialised but invalid, get next, continue;
429 * }
430 */
431 ldx [%g5 + MAPPING_TTE], %g6
432 brlz,pt %g6, 1f ! TTE.V == 1
433 nop
434 brz,pt %g6, .dpmap_miss ! TTE == 0
435 nop
436 ba,pt %xcc, .dpmap_continue ! TTE != 0 && TTE.V == 0
437 deccc GUEST_PERM_MAPPINGS_INCR, %g3
4381:
439 /*
440 * shift = TTE_PAGE_SHIFT(m->tte);
441 * if ((m->va >> shift) == (va >> shift)) {
442 * break;
443 * }
444 */
445 TTE_SHIFT_NOCHECK(%g6, %g7, %g4)
446 ldx [%g5 + MAPPING_VA], %g6
447 srlx %g6, %g7, %g6
448 srlx %g1, %g7, %g7
449 cmp %g6, %g7
450 be,a,pt %xcc, .dpmap_hit
451 ldx [%g5 + MAPPING_TTE], %g5
452 ! not a match
453 deccc GUEST_PERM_MAPPINGS_INCR, %g3
454 /* FALLTHRU */
455
456.dpmap_continue:
457 bgeu,pt %xcc, .dpmap_loop
458 nop
459
460 ba,a,pt %xcc, .dpmap_miss
461 nop
462
463.dpmap_hit:
464 !! %g5 = tte (with pa) of matching entry
465
466 GUEST_STRUCT(%g6)
467
468 stxa %g5, [%g0]ASI_DTLB_DATA_IN
469 inc GUEST_PERM_MAPPINGS_LOCK, %g6
470 SPINLOCK_EXIT(%g6)
471 retry
472
473.dpmap_miss:
474 VCPU_GUEST_STRUCT(%g1, %g6)
475 inc GUEST_PERM_MAPPINGS_LOCK, %g6
476 SPINLOCK_EXIT(%g6)
477
478 rdpr %gl, %g2
479 ba,pt %xcc, dmmu_miss_common
480 ldxa [%g0]ASI_DMMU, %g3 /* tag target */
481 SET_SIZE(dmmu_miss_ctx0)
482
483 /* %g1 contains per CPU area */
484 /* %g2 contains %gl */
485 /* %g3 contains dmmu tag target */
486 ENTRY_NP(dmmu_miss_common)
487 ALTENTRY(dmmu_miss_ctxnon0)
488 cmp %g2, MAXPGL
489 bgu,pn %xcc, watchdog_guest /* enforce %gl <= MAXPGL */
490 ldx [%g1 + CPU_MMU_AREA], %g2
491 brz,pn %g2, watchdog_guest /* enforce CPU_MMU_AREA != 0 */
492 nop
493
494 srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
495
496 /* if ctx == 0 and ctx0 set TSBs used, take slow trap */
497 /* if ctx != 0 and ctxnon0 set TSBs used, take slow trap */
498 mov CPU_NTSBS_CTXN, %g7
499 movrz %g4, CPU_NTSBS_CTX0, %g7
500 ldx [%g1 + %g7], %g7
501 brnz,pn %g7, .dslowmiss
502 nop
503
504.dfastmiss:
505 /* update MMU_FAULT_AREA_DATA */
506 mov MMU_TAG_ACCESS, %g3
507 ldxa [%g3]ASI_DMMU, %g3 /* tag access */
508 set (NCTXS - 1), %g5
509 andn %g3, %g5, %g4
510 and %g3, %g5, %g5
511 stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
512 stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
513 /* fast misses do not update MMU_FAULT_AREA_DFT with MMU_FT_FASTMISS */
514 wrpr %g0, TT_FAST_DMMU_MISS, %tt
515 rdpr %tba, %g3
516 add %g3, (TT_FAST_DMMU_MISS << TT_OFFSET_SHIFT), %g3
5177:
518 rdpr %tl, %g2
519 cmp %g2, 1 /* trap happened at TL=0 */
520 be,pt %xcc, 1f
521 .empty
522 set TRAPTABLE_SIZE, %g5
523
524 cmp %g2, MAXPTL
525 bgu watchdog_guest
526 add %g5, %g3, %g3
527
5281:
529 TRAP_GUEST(%g3, %g1, %g2)
530 /*NOTREACHED*/
531
532.dslowmiss:
533 /* update MMU_FAULT_AREA_DATA */
534 mov MMU_TAG_ACCESS, %g3
535 ldxa [%g3]ASI_DMMU, %g3 /* tag access */
536 set (NCTXS - 1), %g5
537 andn %g3, %g5, %g4
538 and %g3, %g5, %g5
539 stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
540 stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
541 mov MMU_FT_MISS, %g4
542 stx %g4, [%g2 + MMU_FAULT_AREA_DFT]
543 wrpr %g0, TT_DMMU_MISS, %tt
544 rdpr %pstate, %g3
545 or %g3, PSTATE_PRIV, %g3
546 wrpr %g3, %pstate
547 rdpr %tba, %g3
548 add %g3, (TT_DMMU_MISS << TT_OFFSET_SHIFT), %g3
549 ba,a 7b
550 nop
551 SET_SIZE(dmmu_miss_common)
552 SET_SIZE(dmmu_miss_ctxnon0)
553
554 /* %g2 contains guest's miss info pointer (hv phys addr) */
555 ENTRY_NP(dmmu_prot)
556 /*
557 * Update MMU_FAULT_AREA_DATA
558 */
559 mov MMU_TAG_ACCESS, %g3
560 ldxa [%g3]ASI_DMMU, %g3 /* tag access */
561 set (NCTXS - 1), %g5
562 andn %g3, %g5, %g4
563 and %g3, %g5, %g5
564 stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
565 stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
566 /* fast misses do not update MMU_FAULT_AREA_DFT with MMU_FT_FASTPROT */
567 wrpr %g0, TT_FAST_DMMU_PROT, %tt /* already set? XXXQ */
568 rdpr %tba, %g3
569 add %g3, (TT_FAST_DMMU_PROT << TT_OFFSET_SHIFT), %g3
570
571 rdpr %tl, %g2
572 cmp %g2, 1 /* trap happened at tl=0 */
573 be,pt %xcc, 1f
574 .empty
575 set TRAPTABLE_SIZE, %g5
576
577 cmp %g2, MAXPTL
578 bgu,pn %xcc, watchdog_guest
579 add %g5, %g3, %g3
580
5811:
582 TRAP_GUEST(%g3, %g1, %g2)
583 /*NOTREACHED*/
584 SET_SIZE(dmmu_prot)
585
586
587/*
588 * set all TSB base registers to dummy
589 * call sequence and store a copy in
590 * cpu.mra[0->7].
591 *
592 * in:
593 * %g7 return address
594 *
595 * volatile:
596 * %g1
597 */
598 ENTRY_NP(set_dummytsb_ctx0)
599 ROOT_STRUCT(%g1)
600 ldx [%g1 + CONFIG_DUMMYTSB], %g1
601 STRAND_STRUCT(%g2)
602 add %g2, STRAND_MRA, %g3
603
604 mov TSB_CFG_CTX0_0, %g2
605 stxa %g1, [%g2]ASI_MMU_TSB
606 stx %g1, [%g3]
607 mov TSB_CFG_CTX0_1, %g2
608 stxa %g1, [%g2]ASI_MMU_TSB
609 stx %g1, [%g3 + (STRAND_MRA_INCR * 1)]
610 mov TSB_CFG_CTX0_2, %g2
611 stxa %g1, [%g2]ASI_MMU_TSB
612 stx %g1, [%g3 + (STRAND_MRA_INCR * 2)]
613 mov TSB_CFG_CTX0_3, %g2
614 stxa %g1, [%g2]ASI_MMU_TSB
615 stx %g1, [%g3 + (STRAND_MRA_INCR * 3)]
616
617 HVRET
618 SET_SIZE(set_dummytsb_ctx0)
619
620 ENTRY_NP(set_dummytsb_ctxN)
621 ROOT_STRUCT(%g1)
622 ldx [%g1 + CONFIG_DUMMYTSB], %g1
623 STRAND_STRUCT(%g2)
624 add %g2, STRAND_MRA, %g3
625
626 mov TSB_CFG_CTXN_0, %g2
627 stxa %g1, [%g2]ASI_MMU_TSB
628 stx %g1, [%g3 + (STRAND_MRA_INCR * 4)]
629 mov TSB_CFG_CTXN_1, %g2
630 stxa %g1, [%g2]ASI_MMU_TSB
631 stx %g1, [%g3 + (STRAND_MRA_INCR * 5)]
632 mov TSB_CFG_CTXN_2, %g2
633 stxa %g1, [%g2]ASI_MMU_TSB
634 stx %g1, [%g3 + (STRAND_MRA_INCR * 6)]
635 mov TSB_CFG_CTXN_3, %g2
636 stxa %g1, [%g2]ASI_MMU_TSB
637 stx %g1, [%g3 + (STRAND_MRA_INCR * 7)]
638 HVRET
639 SET_SIZE(set_dummytsb_ctxN)
640
641
642/*
643 * Initialize hardware tablewalk configuration registers.
644 *
645 * in:
646 * %g7 return address
647 *
648 * volatile:
649 * %g1-%g2
650 */
651 ENTRY_NP(mmu_hwtw_init)
652
653 /*
654 * If no value has been set in the MD, the default is to set
655 * HWTW Predict mode.
656 */
657 ROOT_STRUCT(%g1)
658 ldx [%g1 + CONFIG_SYS_HWTW_MODE], %g1
659 movrlz %g1, HWTW_PREDICT_MODE, %g1
660 cmp %g1, HWTW_PREDICT_MODE
661 movg %xcc, HWTW_PREDICT_MODE, %g1
662 mov HWTW_CFG, %g2
663 stxa %g1, [%g2]ASI_MMU_CFG
664
665 mov MMU_REAL_RANGE_0, %g1
666 stxa %g0, [%g1]ASI_MMU_HWTW
667 mov MMU_REAL_RANGE_1, %g1
668 stxa %g0, [%g1]ASI_MMU_HWTW
669 mov MMU_REAL_RANGE_2, %g1
670 stxa %g0, [%g1]ASI_MMU_HWTW
671 mov MMU_REAL_RANGE_3, %g1
672 stxa %g0, [%g1]ASI_MMU_HWTW
673
674 HVRET
675 SET_SIZE(mmu_hwtw_init)
676
677 /*
678 * %g1 - contains the Data Fault Type
679 */
680 ENTRY_NP(dmmu_err)
681 mov MMU_SFAR, %g2
682 ldxa [%g2]ASI_DMMU, %g2
683 mov MMU_TAG_ACCESS, %g3
684 ldxa [%g3]ASI_DMMU, %g3
685 set (NCTXS - 1), %g4
686 and %g3, %g4, %g3
687
688 ALTENTRY(dmmu_err_common)
689 /*
690 * %g1 - fault type
691 * %g2 - fault addr
692 * %g3 - fault ctx
693 */
694 VCPU_STRUCT(%g4)
695 ldx [%g4 + CPU_MMU_AREA], %g4
696 brz,pn %g4, watchdog_guest
697 nop
698 stx %g1, [%g4 + MMU_FAULT_AREA_DFT]
699 stx %g2, [%g4 + MMU_FAULT_AREA_DADDR]
700 stx %g3, [%g4 + MMU_FAULT_AREA_DCTX]
701
702 rdhpr %htstate, %g1
703 btst HTSTATE_HPRIV, %g1
704 bnz,pn %xcc, badtrap
705 rdpr %tba, %g1
706 rdpr %tt, %g2
707 sllx %g2, TT_OFFSET_SHIFT, %g2
708 add %g1, %g2, %g1
709 rdpr %tl, %g3
710 cmp %g3, MAXPTL
711 bgu,pn %xcc, watchdog_guest
712 clr %g2
713 cmp %g3, 1
714 movne %xcc, 1, %g2
715 sllx %g2, 14, %g2
716 add %g1, %g2, %g1
717 TRAP_GUEST(%g1, %g2, %g3)
718 /*NOTREACHED*/
719 SET_SIZE(dmmu_err_common)
720 SET_SIZE(dmmu_err)
721
722 /*
723 * %g1 - contains the Instruction Fault Type
724 */
725 ENTRY_NP(immu_err)
726 rdpr %tpc, %g2
727#if 1 /* XXXQ */
728 /* %tl>1: nucleus */
729 /* %tl==1: primary */
730 mov %g0, %g3
731#endif
732
733 ALTENTRY(immu_err_common)
734 /*
735 * %g1 - fault type
736 * %g2 - fault addr
737 * %g3 - fault ctx
738 */
739 VCPU_STRUCT(%g4)
740 ldx [%g4 + CPU_MMU_AREA], %g4
741 brz,pn %g4, watchdog_guest
742 nop
743 stx %g1, [%g4 + MMU_FAULT_AREA_IFT]
744 stx %g2, [%g4 + MMU_FAULT_AREA_IADDR]
745 stx %g3, [%g4 + MMU_FAULT_AREA_ICTX]
746
747 rdhpr %htstate, %g1
748 btst HTSTATE_HPRIV, %g1
749 bnz,pn %xcc, badtrap
750 rdpr %tba, %g1
751 rdpr %tt, %g2
752 sllx %g2, TT_OFFSET_SHIFT, %g2
753 add %g1, %g2, %g1
754 rdpr %tl, %g3
755 cmp %g3, MAXPTL
756 bgu,pn %xcc, watchdog_guest
757 clr %g2
758 cmp %g3, 1
759 movne %xcc, 1, %g2
760 sllx %g2, 14, %g2
761 add %g1, %g2, %g1
762 TRAP_GUEST(%g1, %g2, %g3)
763 /*NOTREACHED*/
764 SET_SIZE(immu_err_common)
765 SET_SIZE(immu_err)
766
767 /*
768 * instruction_invalid_TSB_entry trap
769 */
770 ENTRY_NP(itsb_err)
771
772 /*
773 * Find the RA for the VA from the Tag Access register.
774 * Get the PA of the TTE from each D-TSB pointer register.
775 * Read the TTE Tag/data from that PA and check whether the
776 * tag matches. If we have a match, get the RA from the TTE data.
777 *
778 * N2 HWTW puts the PA of the four TSB entries it checked into
779 * the MMU I/D-TSB Pointer registers.
780 */
781 mov MMU_TAG_TARGET, %g2
782 ldxa [%g2]ASI_IMMU, %g2
783 srlx %g2, TAGTRG_CTX_RSHIFT, %g4
784 brnz %g4, .itsb_err_ctxn
785 mov MMU_ITSB_PTR_0, %g3
786
787.itsb_err_ctx0:
788 mov TSB_CFG_CTX0_0, %g1
7890:
790 !! %g1 TSB Config Register
791 !! %g2 Tag Target
792 !! %g3 TSB Pointer Register
793
794 ldxa [%g1]ASI_MMU_TSB, %g5 ! %g5 TSB Config
795 brgez,pn %g5, 1f
796 nop
797
798 ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of I-TSB entry
799 brz,pn %g6, 1f
800 nop
801
802 ! load the TTE tag/data from the TSB
803 ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
804
805 brgez,pn %g5, 1f ! check TTE.data.v bit 63,
806 ! if not set, TTE invalid
807
808 cmp %g4, %g2 ! TTE.Tag == Tag Target ?
809 be,pn %xcc, .itsb_err_RA_found
810 nop
811
8121:
813 ! get next TSB pointer and configuration register
814 inc 8, %g1 ! TSB Config + 8
815 cmp %g3, MMU_ITSB_PTR_3
816 bl,pt %xcc, 0b
817 inc 8, %g3 ! ITSB_PTR VA + 8
818
819 ! no TTE found for this VA. That must mean it got evicted from
820 ! the TSB
821 retry
822
823.itsb_err_ctxn:
824 mov TSB_CFG_CTXN_0, %g1
8250:
826 !! %g1 TSB Config Register
827 !! %g2 Tag Target
828 !! %g3 TSB Pointer Register
829
830 ldxa [%g1]ASI_MMU_TSB, %g7 ! %g7 TSB Config
831 brgez,pn %g7, 1f
832 nop
833
834 ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of I-TSB entry
835 brz,pn %g6, 1f
836 nop
837
838 ! load the TTE tag/data from the TSB
839 ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
840
841 brgez,pn %g5, 1f ! check TTE.data.v bit 63,
842 ! if not set, TTE invalid
843
844 /*
845 * Check whether "use-context-0" or "use-context-1" is in effect
846 * if so, ignore the context when checking for a tag match.
847 */
848 srlx %g7, TSB_CFG_USE_CTX1_SHIFT, %g7
849 and %g7, (USE_TSB_PRIMARY_CTX | USE_TSB_SECONDARY_CTX), %g7
850
851 sllx %g2, TAGTRG_VA_LSHIFT, %g6 ! clear [63:42] of Tag Target
852 srlx %g6, TAGTRG_VA_LSHIFT, %g6 ! (context)
853 movrz %g7, %g2, %g6 ! go with masked Tag Target?
854
855 cmp %g4, %g6 ! TTE.tag == Tag Target ?
856 be,pn %xcc, .itsb_err_RA_found
857 nop
8581:
859 ! get next TSB pointer and configuration register
860 inc 8, %g1 ! TSB Config + 8
861 cmp %g3, MMU_ITSB_PTR_3
862 bl,pt %xcc, 0b
863 inc 8, %g3 ! ITSB_PTR + 8
864
865 ! no TTE found for this VA. That must mean it got evicted from
866 ! the TSB
867 retry
868
869.itsb_err_RA_found:
870
871 ! found the TSB entry for the VA
872 ! %g5 TTE.data, RA is bits[55:13]
873 srlx %g5, 13, %g5
874 sllx %g5, 13 + 63 - 55, %g5
875 srlx %g5, 63 - 55, %g2 ! RA -> %g2
876
877 /*
878 * RA[55:40] must be zero
879 */
880 srlx %g2, RA_55_40_SHIFT, %g3
881 set RA_55_40_MASK, %g5
882 and %g3, %g5, %g3
883 brnz,pn %g3, .itsb_invalid_ra_err
884 nop
885
886 /*
887 * Find the guest memory segment that contains this RA
888 * If this RA is not allocated to the guest, revector to the
889 * guests trap handler. Note that this can be either a
890 * memory or I/O segment.
891 */
892 GUEST_STRUCT(%g5)
893 RA_GET_SEGMENT(%g5, %g2, %g3, %g4)
894 ! %g3 segment
895
896 /*
897 * If we have a valid segment for this RA, set up the RA->PA
898 * translation in the MMU HWTW range/offset registers
899 */
900 brnz,pn %g3, .tsb_err_check_hwtw_regs
901 nop
902
903 /*
904 * No valid guest memory segment for this RA -or-
905 * RA[55:40] not zero
906 */
907.itsb_invalid_ra_err:
908 rdpr %tba, %g1
909 mov TT_IAX, %g2
910 wrpr %g2, %tt
911 sllx %g2, TT_OFFSET_SHIFT, %g2
912 add %g1, %g2, %g1
913 rdpr %tl, %g3
914 cmp %g3, MAXPTL
915 bgu,pn %xcc, watchdog_guest
916 clr %g2
917 cmp %g3, 1
918 movne %xcc, 1, %g2
919 sllx %g2, 14, %g2
920 VCPU_STRUCT(%g3)
921 ldx [%g3 + CPU_MMU_AREA], %g3
922 brz,pn %g3, watchdog_guest ! Nothing we can do about this
923 nop
924 !! %g3 - MMU_FAULT_AREA
925 rdpr %tpc, %g4
926 stx %g4, [%g3 + MMU_FAULT_AREA_IADDR]
927 mov MMU_TAG_ACCESS, %g5
928 ldxa [%g5]ASI_IMMU, %g5
929 set (NCTXS - 1), %g6
930 and %g5, %g6, %g5
931 stx %g5, [%g3 + MMU_FAULT_AREA_ICTX]
932 mov MMU_FT_INVALIDRA, %g6
933 stx %g6, [%g3 + MMU_FAULT_AREA_IFT]
934 add %g1, %g2, %g1
935 TRAP_GUEST(%g1, %g2, %g3)
936 /*NOTREACHED*/
937 SET_SIZE(itsb_err)
938
939 /*
940 * data_invalid_TSB_entry trap
941 */
942 ENTRY_NP(dtsb_err)
943
944 /*
945 * Find the RA for the VA from the Tag Access register.
946 * Get the PA of the TTE from each D-TSB pointer register.
947 * Read the TTE Tag/data from that PA and check whether the
948 * tag matches. If we have a match, get the RA from the TTE data.
949 *
950 * N2 HWTW puts the PA of the four TSB entries it checked into
951 * the MMU I/D-TSB Pointer registers.
952 */
953 mov MMU_TAG_TARGET, %g2
954 ldxa [%g2]ASI_DMMU, %g2
955 srlx %g2, TAGTRG_CTX_RSHIFT, %g4
956 brnz %g4, .dtsb_err_ctxn
957 mov MMU_DTSB_PTR_0, %g3
958
959.dtsb_err_ctx0:
960 mov TSB_CFG_CTX0_0, %g1
9610:
962 !! %g1 TSB Config Register
963 !! %g2 Tag Target
964 !! %g3 TSB Pointer Register
965
966 ldxa [%g1]ASI_MMU_TSB, %g5 ! %g5 TSB Config
967 brgez,pn %g5, 1f
968 nop
969
970 ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of D-TSB entry
971 brz,pn %g6, 1f
972 nop
973
974 ! load the TTE tag/data from the TSB
975 ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
976
977 brgez,pn %g5, 1f ! check TTE.data.v bit 63,
978 ! if not set, TTE invalid
979
980 cmp %g4, %g2 ! TTE.Tag == Tag Target ?
981 be,pn %xcc, .dtsb_err_RA_found
982 nop
983
9841:
985 ! get next TSB pointer and configuration register
986 inc 8, %g1 ! TSB Config + 8
987 cmp %g3, MMU_DTSB_PTR_3
988 bl,pt %xcc, 0b
989 inc 8, %g3 ! DTSB_PTR VA + 8
990
991 ! no TTE found for this VA. That must mean it got evicted from
992 ! the TSB
993 retry
994
995.dtsb_err_ctxn:
996 mov TSB_CFG_CTXN_0, %g1
9970:
998 !! %g1 TSB Config Register
999 !! %g2 Tag Target
1000 !! %g3 TSB Pointer Register
1001
1002 ldxa [%g1]ASI_MMU_TSB, %g7 ! %g7 TSB Config
1003 brgez,pn %g7, 1f
1004 nop
1005
1006 ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of D-TSB entry
1007 brz,pn %g6, 1f
1008 nop
1009
1010 ! load the TTE tag/data from the TSB
1011 ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
1012
1013 brgez,pn %g5, 1f ! check TTE.data.v bit 63,
1014 ! if not set, TTE invalid
1015
1016 /*
1017 * Check whether "use-context-0" or "use-context-1" is in effect
1018 * if so, ignore the context when checking for a tag match.
1019 */
1020 srlx %g7, TSB_CFG_USE_CTX1_SHIFT, %g7
1021 and %g7, (USE_TSB_PRIMARY_CTX | USE_TSB_SECONDARY_CTX), %g7
1022
1023 sllx %g2, TAGTRG_VA_LSHIFT, %g6 ! clear [63:42] of Tag Target
1024 srlx %g6, TAGTRG_VA_LSHIFT, %g6 ! (context)
1025 movrz %g7, %g2, %g6 ! go with masked Tag Target?
1026
1027 cmp %g4, %g6 ! TTE.tag == Tag Target ?
1028 be,pn %xcc, .dtsb_err_RA_found
1029 nop
10301:
1031 ! get next TSB pointer and configuration register
1032 inc 8, %g1 ! TSB Config + 8
1033 cmp %g3, MMU_DTSB_PTR_3
1034 bl,pt %xcc, 0b
1035 inc 8, %g3 ! DTSB_PTR VA + 8
1036
1037 ! no TTE found for this VA. That must mean it got evicted from
1038 ! the TSB
1039 retry
1040
1041.dtsb_err_RA_found:
1042
1043 ! found the TSB entry for the VA
1044 ! %g5 TTE.data, RA is bits[55:13]
1045 srlx %g5, 13, %g5
1046 sllx %g5, 13 + 63 - 55, %g5
1047 srlx %g5, 63 - 55, %g2 ! RA -> %g2
1048
1049 /*
1050 * RA[55:40] must be zero
1051 */
1052 srlx %g2, RA_55_40_SHIFT, %g3
1053 set RA_55_40_MASK, %g5
1054 and %g3, %g5, %g3
1055 brnz,a,pn %g3, .dtsb_invalid_ra_err
1056 nop
1057
1058 /*
1059 * Find the guest memory segment that contains this RA
1060 * If this RA is not allocated to the guest, revector to the
1061 * guests trap handler. Note that this can be either a
1062 * memory or I/O segment.
1063 */
1064 GUEST_STRUCT(%g5)
1065 RA_GET_SEGMENT(%g5, %g2, %g3, %g4)
1066 ! %g3 segment
1067 brz,pn %g3, .dtsb_invalid_ra_err
1068 nop
1069
1070 /*
1071 * We have a valid guest memory segment for this RA. Use this
1072 * to populate one of the MMU Real Range/Physical Offset registers
1073 * Find the first disabled Real range/offset registers. If all are
1074 * enabled, disable all four range/offset pairs and start again
1075 */
1076.tsb_err_check_hwtw_regs:
1077 ! %g2 RA
1078 ! %g3 guest memory segment
1079 mov MMU_REAL_RANGE_0, %g4
1080 ldxa [%g4]ASI_MMU_HWTW, %g5
1081 brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
1082 mov MMU_PHYS_OFF_0, %g5
1083
1084 mov MMU_REAL_RANGE_1, %g4
1085 ldxa [%g4]ASI_MMU_HWTW, %g5
1086 brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
1087 mov MMU_PHYS_OFF_1, %g5
1088
1089 mov MMU_REAL_RANGE_2, %g4
1090 ldxa [%g4]ASI_MMU_HWTW, %g5
1091 brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
1092 mov MMU_PHYS_OFF_2, %g5
1093
1094 mov MMU_REAL_RANGE_3, %g4
1095 ldxa [%g4]ASI_MMU_HWTW, %g5
1096 brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
1097 mov MMU_PHYS_OFF_3, %g5
1098
1099 ! all the HWTW range/offsets in use, disable them all
1100 mov MMU_REAL_RANGE_0, %g4
1101 stxa %g0, [%g4]ASI_MMU_HWTW
1102 mov MMU_REAL_RANGE_1, %g4
1103 stxa %g0, [%g4]ASI_MMU_HWTW
1104 mov MMU_REAL_RANGE_2, %g4
1105 stxa %g0, [%g4]ASI_MMU_HWTW
1106 mov MMU_REAL_RANGE_3, %g4
1107 stxa %g0, [%g4]ASI_MMU_HWTW
1108 mov MMU_PHYS_OFF_3, %g5
1109
1110 ! fall through, leave range/offset 0/1/2 for next time to save
1111 ! a little search time
1112
1113.tsb_err_ra_hwtw_insert:
1114 /*
1115 * Insert the base/limit/offset from the guest memory segment into
1116 * the MMU Real Range/Physical Offset registers.
1117 *
1118 * Note that the base/limit/offset are >> 13 for the MMU HWTW registers
1119 *
1120 * %g3 guest memory segment
1121 * %g4 VA of ASI_MMU_HWTW of REAL_RANGE
1122 * %g5 VA of ASI_MMU_HWTW of PHYS_OFFSET
1123 */
1124 mov 1, %g2
1125 sllx %g2, 63, %g2 ! MMU Real Range enable bit[63]
1126 ldx [%g3 + RA2PA_SEGMENT_LIMIT], %g6
1127 srlx %g6, 13, %g6
1128 sllx %g6, REALRANGE_BOUNDS_SHIFT, %g6 ! MMU Real Range limit bits[53:27]
1129 or %g6, %g2, %g2
1130 ldx [%g3 + RA2PA_SEGMENT_BASE], %g6
1131 srlx %g6, 13, %g6
1132 sllx %g6, REALRANGE_BASE_SHIFT, %g6 ! MMU Real Range base bits[26:0]
1133 or %g6, %g2, %g2
1134 stxa %g2, [%g4]ASI_MMU_HWTW ! MMU Real Range
1135
1136 ldx [%g3 + RA2PA_SEGMENT_OFFSET], %g6
1137 srlx %g6, 13, %g6
1138 sllx %g6, PHYSOFF_SHIFT, %g6
1139 stxa %g6, [%g5]ASI_MMU_HWTW ! MMU Physical Offset
1140
1141 /*
1142 * Now we have a valid RA->PA translation ready for the VA, the HWTW
1143 * TSB TTE RA->PA translation will succeed so we just re-execute
1144 * the instruction
1145 */
1146
1147 retry
1148
1149 /*
1150 * No valid guest memory segment for this RA -or-
1151 * RA[55:40] not zero
1152 */
1153.dtsb_invalid_ra_err:
1154 rdpr %tba, %g1
1155 mov TT_DAX, %g2
1156 wrpr %g2, %tt
1157 sllx %g2, TT_OFFSET_SHIFT, %g2
1158 add %g1, %g2, %g1
1159 rdpr %tl, %g3
1160 cmp %g3, MAXPTL
1161 bgu,pn %xcc, watchdog_guest
1162 clr %g2
1163 cmp %g3, 1
1164 movne %xcc, 1, %g2
1165 sllx %g2, 14, %g2
1166 VCPU_STRUCT(%g3)
1167 ldx [%g3 + CPU_MMU_AREA], %g3
1168 brz,pn %g3, watchdog_guest ! Nothing we can do about this
1169 nop
1170 !! %g3 - MMU_FAULT_AREA
1171 stx %g0, [%g3 + MMU_FAULT_AREA_DADDR] /* XXX */
1172 mov MMU_TAG_ACCESS, %g5
1173 ldxa [%g5]ASI_DMMU, %g5
1174 set (NCTXS - 1), %g6
1175 and %g5, %g6, %g5
1176 stx %g5, [%g3 + MMU_FAULT_AREA_DCTX]
1177 mov MMU_FT_INVALIDRA, %g6
1178 stx %g6, [%g3 + MMU_FAULT_AREA_DFT]
1179 add %g1, %g2, %g1
1180 TRAP_GUEST(%g1, %g2, %g3)
1181 /*NOTREACHED*/
1182 SET_SIZE(dtsb_err)