Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / huron / src / errors_mmu.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: errors_mmu.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49#pragma ident "@(#)errors_mmu.s 1.3 07/07/19 SMI"
50
51#include <sys/asm_linkage.h>
52#include <hypervisor.h>
53#include <asi.h>
54#include <mmu.h>
55#include <hprivregs.h>
56
57#include <offsets.h>
58#include <util.h>
59#include <error_defs.h>
60#include <error_regs.h>
61#include <error_asm.h>
62#include <debug.h>
63
64 ENTRY(dtlb_dump)
65
66 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
67
68 /*
69 * Avoid causing errors when reading the TLB registers
70 */
71 mov CORE_ERR_REPORT_EN, %g5
72 ldxa [%g5]ASI_ERR_EN, %g3
73 setx (ERR_DTDP | ERR_DTTM | ERR_DTTP | ERR_HWTWMU), %g4, %g6
74 andn %g3, %g6, %g3
75 stxa %g3, [%g5]ASI_ERR_EN
76
77 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
78 add %g1, ERR_DIAG_DATA_DTLB, %g1
79 /*
80 * now store the DTLB tag/data entries
81 */
82 set 0, %g3 /* TLB entry = 0 */
831: ldxa [%g3] ASI_DTLB_TAG, %g6 /* Tag */
84 stx %g6, [%g1 + ERR_TLB_TAG] /* save tag */
85 ldxa [%g3] ASI_DTLB_DATA_ACC, %g6 /* Tag */
86 stx %g6, [%g1 + ERR_TLB_DATA]
87 add %g3, 0x8, %g3 /* entry++ */
88 cmp %g3, 0x400 /* done? */
89 bnz 1b /* loop back */
90 add %g1, ERR_DIAG_DATA_DTLB_INCR, %g1 /* increment */
91
92 /*
93 * Re-enable TLB errors
94 */
95 mov CORE_ERR_REPORT_EN, %g5
96 ldxa [%g5]ASI_ERR_EN, %g3
97 setx (ERR_DTDP | ERR_DTTM | ERR_DTTP | ERR_HWTWMU), %g4, %g6
98 or %g3, %g6, %g3
99 stxa %g3, [%g5]ASI_ERR_EN
100
101 HVRET
102 SET_SIZE(dtlb_dump)
103
104 ENTRY(dtlb_demap_all)
105
106 set TLB_DEMAP_ALL_TYPE, %g3
107 stxa %g0, [%g3]ASI_DMMU_DEMAP
108
109 HVRET
110 SET_SIZE(dtlb_demap_all)
111
112 ENTRY(itlb_dump)
113
114 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
115
116 /*
117 * Avoid causing errors when reading the TLB registers
118 */
119 mov CORE_ERR_REPORT_EN, %g5
120 ldxa [%g5]ASI_ERR_EN, %g3
121 setx (ERR_ITDP | ERR_ITTM | ERR_ITTP | ERR_HWTWMU), %g4, %g6
122 andn %g3, %g6, %g3
123 stxa %g3, [%g5]ASI_ERR_EN
124
125 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
126 add %g1, ERR_DIAG_DATA_ITLB, %g1
127
128 set 0, %g3 /* TLB entry = 0 */
1291: ldxa [%g3] ASI_ITLB_TAG, %g6 /* Tag */
130 stx %g6, [%g1 + ERR_TLB_TAG] /* save tag */
131 ldxa [%g3] ASI_ITLB_DATA_ACC, %g6 /* Tag */
132 stx %g6, [%g1 + ERR_TLB_DATA]
133 add %g3, 0x8, %g3 /* entry++ */
134 cmp %g3, 0x200 /* done? */
135 bnz 1b /* loop back */
136 add %g1, ERR_DIAG_DATA_ITLB_INCR, %g1 /* increment */
137
138 /*
139 * Re-enable TLB errors
140 */
141 mov CORE_ERR_REPORT_EN, %g5
142 ldxa [%g5]ASI_ERR_EN, %g3
143 setx (ERR_ITDP | ERR_ITTM | ERR_ITTP | ERR_HWTWMU), %g4, %g6
144 or %g3, %g6, %g3
145 stxa %g3, [%g5]ASI_ERR_EN
146
147 HVRET
148
149 SET_SIZE(itlb_dump)
150
151 ENTRY(itlb_demap_all)
152 set TLB_DEMAP_ALL_TYPE, %g3
153 stxa %g0, [%g3]ASI_IMMU_DEMAP
154
155 HVRET
156 SET_SIZE(itlb_demap_all)
157
158 /*
159 * Dump MRA diagnostic data
160 * %g7 return address
161 */
162 ENTRY(dump_mra)
163
164 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
165 brz,pn %g1, dump_mra_exit_nocerer
166 .empty
167
168 GET_ERR_DSFAR(%g4, %g5)
169
170 /*
171 * get diag_buf->err_mmu_regs
172 */
173 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
174 add %g1, ERR_DIAG_DATA_MMU_REGS, %g1
175
176 /*
177 * get MRA index from D-SFAR[2:0]
178 */
179 srlx %g4, DSFAR_MRA_INDEX_SHIFT, %g4
180 and %g4, DSFAR_MRA_INDEX_MASK, %g4
181
182 /*
183 * Avoid causing errors when reading the MMU registers
184 * by disabling CERER.MRAU
185 */
186 mov CORE_ERR_REPORT_EN, %g5
187 ldxa [%g5]ASI_ERR_EN, %g3
188 setx ERR_MRAU, %g2, %g6
189 andn %g3, %g6, %g3
190 stxa %g3, [%g5]ASI_ERR_EN
191
192 /*
193 * get MRA Parity
194 */
195 sllx %g4, ASI_MRA_INDEX_SHIFT, %g3
196 ldxa [%g3]ASI_MRA_ACCESS, %g3
197 and %g3, MRA_PARITY_MASK, %g3
198
199 /*
200 * store MRA parity
201 */
202 add %g1, ERR_MMU_PARITY, %g2
203 mulx %g4, ERR_MMU_PARITY_INCR, %g4
204 stub %g3, [%g2 + %g4]
205
206 /*
207 * store MMU registers
208 */
209 mov TSB_CFG_CTX0_0, %g4
210 ldxa [%g4]ASI_MMU_TSB, %g4
211 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 0)]
212 mov TSB_CFG_CTX0_1, %g4
213 ldxa [%g4]ASI_MMU_TSB, %g4
214 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 1)]
215 mov TSB_CFG_CTX0_2, %g4
216 ldxa [%g4]ASI_MMU_TSB, %g4
217 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 2)]
218 mov TSB_CFG_CTX0_3, %g4
219 ldxa [%g4]ASI_MMU_TSB, %g4
220 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 3)]
221 mov TSB_CFG_CTXN_0, %g4
222 ldxa [%g4]ASI_MMU_TSB, %g4
223 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 0)]
224 mov TSB_CFG_CTXN_1, %g4
225 ldxa [%g4]ASI_MMU_TSB, %g4
226 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 1)]
227 mov TSB_CFG_CTXN_2, %g4
228 ldxa [%g4]ASI_MMU_TSB, %g4
229 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 2)]
230 mov TSB_CFG_CTXN_3, %g4
231 ldxa [%g4]ASI_MMU_TSB, %g4
232 stx %g4, [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 3)]
233 mov MMU_REAL_RANGE_0, %g4
234 ldxa [%g4]ASI_MMU_HWTW, %g4
235 stx %g4, [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_REAL_RANGE_INCR * 0)]
236 mov MMU_REAL_RANGE_1, %g4
237 ldxa [%g4]ASI_MMU_HWTW, %g4
238 stx %g4, [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_REAL_RANGE_INCR * 1)]
239 mov MMU_REAL_RANGE_2, %g4
240 ldxa [%g4]ASI_MMU_HWTW, %g4
241 stx %g4, [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_REAL_RANGE_INCR * 2)]
242 mov MMU_REAL_RANGE_3, %g4
243 ldxa [%g4]ASI_MMU_HWTW, %g4
244 stx %g4, [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_REAL_RANGE_INCR * 3)]
245 mov MMU_PHYS_OFF_0, %g4
246 ldxa [%g4]ASI_MMU_HWTW, %g4
247 stx %g4, [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 0)]
248 mov MMU_PHYS_OFF_1, %g4
249 ldxa [%g4]ASI_MMU_HWTW, %g4
250 stx %g4, [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 1)]
251 mov MMU_PHYS_OFF_2, %g4
252 ldxa [%g4]ASI_MMU_HWTW, %g4
253 stx %g4, [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 2)]
254 mov MMU_PHYS_OFF_3, %g4
255 ldxa [%g4]ASI_MMU_HWTW, %g4
256 stx %g4, [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 3)]
257
258 /*
259 * reenable CERER.MRAU
260 */
261 mov CORE_ERR_REPORT_EN, %g5
262 ldxa [%g5]ASI_ERR_EN, %g3
263 setx ERR_MRAU, %g4, %g6
264 or %g3, %g6, %g3
265 stxa %g3, [%g5]ASI_ERR_EN
266
267dump_mra_exit_nocerer:
268
269 HVRET
270
271 SET_SIZE(dump_mra)
272
273 /*
274 * Fix MMU register array parity errors
275 * %g7 return address
276 */
277 ENTRY(correct_imra)
278 ba correct_mra_common
279 nop
280 SET_SIZE(correct_imra)
281
282 ENTRY(correct_dmra)
283 ba correct_mra_common
284 nop
285 SET_SIZE(correct_dmra)
286
287 ENTRY(correct_mra_common)
288 /*
289 * Disable MRA errors
290 */
291 mov CORE_ERR_REPORT_EN, %g3
292 ldxa [%g3]ASI_ERR_EN, %g4
293 setx ERR_MRAU, %g5, %g6
294 andn %g4, %g6, %g6
295 stxa %g6, [%g3]ASI_ERR_EN
296
297 /*
298 * Get error MRA index from D-SFAR[2:0]
299 * %g2: MRA error index 0->7
300 */
301 GET_ERR_DSFAR(%g2, %g3)
302 srlx %g2, DSFAR_MRA_INDEX_SHIFT, %g2
303 and %g2, DSFAR_MRA_INDEX_MASK, %g2
304
305 /*
306 * Reload the error MRA register with the clean MRA data.
307 *
308 * Since there are 8 MRA entries with their clean data
309 * stored in 16 arrays in the strand struct (strand.mra[0->15]),
310 * to reload 2 registers for each MRA entry 0->7, we loop
311 * through index 0->15 twice, first looping on the even
312 * indices, then the odd ones for the second round.
313 *
314 * %g3: strand.mra index 0->15
315 * %g4: clean copy from strand.mra
316 *
317 */
318 STRAND_STRUCT(%g1)
319 mulx %g2, 2, %g3 ! start with an even index
3201:
321 /*
322 * MRA index 0->3: MMU z/nz_tsb_cfg
323 * MRA index 4->7: MMU real_range/physical_offset
324 */
325 cmp %g3, 7
326 bg 2f
327 nop
328
329 mulx %g3, STRAND_MRA_INCR, %g5
330 add %g5, STRAND_MRA, %g5
331 ldx [%g1 + %g5], %g4
332
333 cmp %g3, 0
334 move %xcc, TSB_CFG_CTX0_0, %g5
335 cmp %g3, 1
336 move %xcc, TSB_CFG_CTX0_1, %g5
337 cmp %g3, 2
338 move %xcc, TSB_CFG_CTX0_2, %g5
339 cmp %g3, 3
340 move %xcc, TSB_CFG_CTX0_3, %g5
341 cmp %g3, 4
342 move %xcc, TSB_CFG_CTXN_0, %g5
343 cmp %g3, 5
344 move %xcc, TSB_CFG_CTXN_1, %g5
345 cmp %g3, 6
346 move %xcc, TSB_CFG_CTXN_2, %g5
347 cmp %g3, 7
348 move %xcc, TSB_CFG_CTXN_3, %g5
349 stxa %g4, [%g5]ASI_MMU_TSB
350 btst 1, %g3 ! index&1 ?
351 bz,pt %icc, 1b
352 add %g3, 1, %g3 ! loop back on odd indices
353
354 ba correct_mra_exit
355 nop
3562:
357 /*
358 * For errors in the MMU Real Range/Offset registers we just
359 * clear the Real Range register. Then we will take an
360 * invalid_TSB_entry trap and refill the registers
361 */
362 cmp %g3, 8
363 move %xcc, MMU_REAL_RANGE_0, %g5
364 cmp %g3, 9 ! PHYS_OFFSET_0
365 move %xcc, MMU_REAL_RANGE_0, %g5
366 cmp %g3, 10
367 move %xcc, MMU_REAL_RANGE_1, %g5
368 cmp %g3, 11 ! PHYS_OFFSET_1
369 move %xcc, MMU_REAL_RANGE_1, %g5
370 cmp %g3, 12
371 move %xcc, MMU_REAL_RANGE_2, %g5
372 cmp %g3, 13 ! PHYS_OFFSET_2
373 move %xcc, MMU_REAL_RANGE_2, %g5
374 cmp %g3, 14
375 move %xcc, MMU_REAL_RANGE_3, %g5
376 cmp %g3, 15 ! PHYS_OFFSET_3
377 move %xcc, MMU_REAL_RANGE_3, %g5
378 stxa %g0, [%g5]ASI_MMU_HWTW
379 btst 1, %g3 ! index&1 ?
380 bz,pt %icc, 1b
381 add %g3, 1, %g3 ! loop back on odd indices
382
383correct_mra_exit:
384 /*
385 * Set CORE_ERR_ENABLE back to original
386 */
387 mov CORE_ERR_REPORT_EN, %g3
388 ldxa [%g3]ASI_ERR_EN, %g4
389 setx ERR_MRAU, %g5, %g6
390 or %g4, %g6, %g4
391 stxa %g4, [%g3]ASI_ERR_EN
392
393 HVRET
394 SET_SIZE(correct_mra_common)
395
396 /*
397 * print the contents of the diag-buf I-TLB
398 * %g7 return address
399 */
400 ENTRY(itlb_print)
401#ifdef DEBUG_LEGION
402 mov %g7, %g6
403 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
404
405 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
406 add %g1, ERR_DIAG_DATA_ITLB, %g1
407 mov 0, %g3
4081:
409 PRINT("I-TLB entry: 0x");
410 srlx %g3, 3, %g4
411 PRINTX(%g4)
412 PRINT(" : TAG : 0x")
413 ldx [%g1 + ERR_TLB_TAG], %g4
414 PRINTX(%g4)
415 PRINT(" : DATA : 0x")
416 ldx [%g1 + ERR_TLB_DATA], %g4
417 PRINTX(%g4)
418 PRINT("\r\n")
419 add %g3, 0x8, %g3 /* entry++ */
420 cmp %g3, 0x200 /* done? */
421 bnz 1b /* loop back */
422 add %g1, ERR_DIAG_DATA_ITLB_INCR, %g1 /* increment */
423
424 mov %g6, %g7
425#endif /* DEBUG */
426
427 HVRET
428 SET_SIZE(itlb_print)
429
430 /*
431 * print the contents of the diag-buf D-TLB
432 * %g7 return address
433 */
434 ENTRY(dtlb_print)
435#ifdef DEBUG_LEGION
436 mov %g7, %g6
437 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
438
439 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
440 add %g1, ERR_DIAG_DATA_DTLB, %g1
441 mov 0, %g3
4421:
443 PRINT("D-TLB entry: 0x");
444 srlx %g3, 3, %g4
445 PRINTX(%g4)
446 PRINT(" : TAG : 0x")
447 ldx [%g1 + ERR_TLB_TAG], %g4
448 PRINTX(%g4)
449 PRINT(" : DATA : 0x")
450 ldx [%g1 + ERR_TLB_DATA], %g4
451 PRINTX(%g4)
452 PRINT("\r\n")
453 add %g3, 0x8, %g3 /* entry++ */
454 cmp %g3, 0x400 /* done? */
455 bnz 1b /* loop back */
456 add %g1, ERR_DIAG_DATA_DTLB_INCR, %g1 /* increment */
457
458 mov %g6, %g7
459#endif /* DEBUG */
460
461 HVRET
462 SET_SIZE(dtlb_print)
463
464 /*
465 * print the failing MRA data
466 * %g7 return address
467 */
468 ENTRY(mra_print)
469#ifdef DEBUG_LEGION
470 mov %g7, %g6
471 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
472
473 /*
474 * get diag_buf->err_mmu_regs
475 */
476 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
477 add %g1, ERR_DIAG_DATA_MMU_REGS, %g1
478
479 PRINT("PARITY 0: 0x")
480 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 0)], %g2
481 PRINTX(%g2)
482 PRINT("PARITY 1: 0x")
483 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 1)], %g2
484 PRINTX(%g2)
485 PRINT("PARITY 2: 0x")
486 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 2)], %g2
487 PRINTX(%g2)
488 PRINT("PARITY 3: 0x")
489 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 3)], %g2
490 PRINTX(%g2)
491 PRINT("PARITY 4: 0x")
492 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 4)], %g2
493 PRINTX(%g2)
494 PRINT("PARITY 5: 0x")
495 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 5)], %g2
496 PRINTX(%g2)
497 PRINT("PARITY 6: 0x")
498 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 6)], %g2
499 PRINTX(%g2)
500 PRINT("PARITY 7: 0x")
501 ldub [%g1 + ERR_MMU_PARITY + (ERR_MMU_PARITY_INCR * 7)], %g2
502 PRINTX(%g2)
503 PRINT("\r\nTSB_CFG_CTX0_0: 0x")
504 ldx [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 0)], %g2
505 PRINTX(%g2)
506 PRINT("\r\nTSB_CFG_CTX0_1: 0x")
507 ldx [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 1)], %g2
508 PRINTX(%g2)
509 PRINT("\r\nTSB_CFG_CTX0_2: 0x")
510 ldx [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 2)], %g2
511 PRINTX(%g2)
512 PRINT("\r\nTSB_CFG_CTX0_3: 0x")
513 ldx [%g1 + ERR_MMU_TSB_CFG_CTX0 + (ERR_MMU_TSB_CFG_CTX0_INCR * 3)], %g2
514 PRINTX(%g2)
515 PRINT("\r\nTSB_CFG_CTXNZ_0: 0x")
516 ldx [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 0)], %g2
517 PRINTX(%g2)
518 PRINT("\r\nTSB_CFG_CTXNZ_1: 0x")
519 ldx [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 1)], %g2
520 PRINTX(%g2)
521 PRINT("\r\nTSB_CFG_CTXNZ_2: 0x")
522 ldx [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 2)], %g2
523 PRINTX(%g2)
524 PRINT("\r\nTSB_CFG_CTXNZ_3: 0x")
525 ldx [%g1 + ERR_MMU_TSB_CFG_CTXNZ + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 3)], %g2
526 PRINTX(%g2)
527 PRINT("\r\nREAL_RANGE_0: 0x")
528 ldx [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_REAL_RANGE_INCR * 0)], %g2
529 PRINTX(%g2)
530 PRINT("\r\nREAL_RANGE_1: 0x")
531 ldx [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_REAL_RANGE_INCR * 1)], %g2
532 PRINTX(%g2)
533 PRINT("\r\nREAL_RANGE_2: 0x")
534 ldx [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_REAL_RANGE_INCR * 2)], %g2
535 PRINTX(%g2)
536 PRINT("\r\nREAL_RANGE_3: 0x")
537 ldx [%g1 + ERR_MMU_REAL_RANGE + (ERR_MMU_TSB_CFG_CTXNZ_INCR * 3)], %g2
538 PRINTX(%g2)
539 PRINT("\r\nPHYS_OFFSET_0: 0x")
540 ldx [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 0)], %g2
541 PRINTX(%g2)
542 PRINT("\r\nPHYS_OFFSET_1: 0x")
543 ldx [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 1)], %g2
544 PRINTX(%g2)
545 PRINT("\r\nPHYS_OFFSET_2: 0x")
546 ldx [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 2)], %g2
547 PRINTX(%g2)
548 PRINT("\r\nPHYS_OFFSET_3: 0x")
549 ldx [%g1 + ERR_MMU_PHYS_OFFSET + (ERR_MMU_PHYS_OFFSET_INCR * 3)], %g2
550 PRINTX(%g2)
551 PRINT("\r\n")
552
553 mov %g6, %g7
554#endif /* DEBUG */
555
556 HVRET
557 SET_SIZE(mra_print)
558