Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / huron / src / errors_cmp.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: errors_cmp.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49#pragma ident "@(#)errors_cmp.s 1.4 07/09/11 SMI"
50
51#include <sys/asm_linkage.h>
52#include <sun4v/asi.h>
53#include <sun4v/queue.h>
54#include <sparcv9/misc.h>
55#include <sun4v/traps.h>
56#include <hypervisor.h>
57#include <asi.h>
58#include <mmu.h>
59#include <hprivregs.h>
60#include <config.h>
61
62#include <offsets.h>
63#include <util.h>
64#include <error_defs.h>
65#include <error_regs.h>
66#include <error_asm.h>
67
68 /*
69 * Dump STB diagnostic data
70 * %g7 return address
71 */
72 ENTRY(dump_store_buffer)
73
74 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
75
76 /*
77 * get diag_buf->err_stb
78 */
79 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
80 add %g1, ERR_DIAG_DATA_STB, %g1
81
82 /*
83 * STB index from DFESR[57:55]
84 */
85 GET_ERR_DFESR(%g4, %g3)
86 srlx %g4, DFESR_STB_INDEX_SHIFT, %g4
87 and %g4, DFESR_STB_INDEX_MASK, %g3
88 sllx %g3, ASI_STB_ENTRY_SHIFT, %g3
89
90 /*
91 * Store Buffer data
92 */
93 or %g3, ASI_STB_FIELD_DATA, %g5
94 ldxa [%g5]ASI_STB_ACCESS, %g2
95 stx %g2, [%g1 + ERR_STB_DATA]
96 /*
97 * Store Buffer data ECC
98 */
99 or %g3, ASI_STB_FIELD_DATA_ECC, %g5
100 ldxa [%g5]ASI_STB_ACCESS, %g2
101 stx %g2, [%g1 + ERR_STB_DATA_ECC]
102 /*
103 * Store Buffer control and address parity
104 */
105 or %g3, ASI_STB_FIELD_PARITY, %g5
106 ldxa [%g5]ASI_STB_ACCESS, %g2
107 stx %g2, [%g1 + ERR_STB_PARITY]
108 /*
109 * Store Buffer address and byte marks
110 */
111 or %g3, ASI_STB_FIELD_MARKS, %g5
112 ldxa [%g5]ASI_STB_ACCESS, %g2
113 stx %g2, [%g1 + ERR_STB_MARKS]
114 /*
115 * Store Buffer current STB pointer
116 */
117 or %g3, ASI_STB_FIELD_CURR_PTR, %g5
118 ldxa [%g5]ASI_STB_ACCESS, %g2
119 stx %g2, [%g1 + ERR_STB_CURR_PTR]
120
121 HVRET
122
123 SET_SIZE(dump_store_buffer)
124
125 /*
126 * Clear a StoreBuffer error
127 * %g7 return address
128 */
129 ENTRY(correct_stb)
130
131 membar #Sync
132
133 HVRET
134
135 SET_SIZE(correct_stb)
136
137 /*
138 * Dump scratchpad diagnostic data
139 * %g7 return address
140 */
141 ENTRY(dump_scratchpad)
142
143 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
144
145 /*
146 * get diag_buf->err-scratchpad
147 */
148 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
149 add %g1, ERR_DIAG_DATA_SCRATCHPAD, %g1
150
151 GET_ERR_DSFAR(%g4, %g3)
152
153 /*
154 * Scratchpad index from D-SFAR[2:0]
155 * %g4 D-SFAR
156 */
157 srlx %g4, DSFAR_SCRATCHPAD_INDEX_SHIFT, %g4
158 and %g4, DSFAR_SCRATCHPAD_INDEX_MASK, %g3
159 sllx %g3, ASI_SCRATCHPAD_INDEX_SHIFT, %g3
160
161 /*
162 * Scratchpad data
163 */
164 or %g3, ASI_SCRATCHPAD_DATA_NP_DATA, %g5
165 ldxa [%g5]ASI_SCRATCHPAD_ACCESS, %g2
166 stx %g2, [%g1 + ERR_SCRATCHPAD_DATA]
167 /*
168 * Scratchpad ECC
169 */
170 or %g3, ASI_SCRATCHPAD_DATA_NP_ECC, %g5
171 ldxa [%g5]ASI_SCRATCHPAD_ACCESS, %g2
172 stx %g2, [%g1 + ERR_SCRATCHPAD_ECC]
173
174 HVRET
175
176 SET_SIZE(dump_scratchpad)
177
178 /*
179 * Dump trap stack array diagnostic data
180 * %g7 return address
181 */
182 ENTRY(dump_trapstack)
183
184 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
185
186 /*
187 * get diag_buf->err_tsa
188 */
189 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
190 add %g1, ERR_DIAG_DATA_TSA, %g1
191
192 GET_ERR_DSFAR(%g4, %g3)
193
194 /*
195 * Trapstack index from D-SFAR[2:0]
196 * %g4 D-SFAR
197 */
198 srlx %g4, DSFAR_TSA_INDEX_SHIFT, %g4
199 and %g4, DSFAR_TSA_INDEX_MASK, %g3
200 sllx %g3, ASI_TSA_INDEX_SHIFT, %g3
201
202 /*
203 * Trapstack ECC
204 */
205 ldxa [%g3]ASI_TSA_ACCESS, %g2
206 stx %g2, [%g1 + ERR_TSA_ECC]
207
208 /*
209 * We can got the precise internal_processor_error
210 * trap from %tl - 1. Read the trap registers from that
211 * TL and store them. To avoid recursive errors we need to
212 * disable CERER.TSAC/CERER.TSAU
213 */
214 mov CORE_ERR_REPORT_EN, %g3
215 ldxa [%g3]ASI_ERR_EN, %g4
216 setx (ERR_TSAU | ERR_TSAC), %g5, %g6
217 andn %g4, %g6, %g6
218 stxa %g6, [%g3]ASI_ERR_EN
219
220 rdpr %tl, %g5
221 dec %g5
222
223 stx %g5, [%g1 + ERR_TSA_TL]
224 /*
225 * Note that we could have got the error at TL = 0, (from Legion).
226 * In that case we don't want to decrement TL as
227 * reading the other trap registers with TL = 0 is not allowed.
228 */
229 brnz,a,pt %g5, 1f
230 wrpr %g5, %tl ! delay slot
2311:
232 rdpr %tt, %g2
233 stx %g2, [%g1 + ERR_TSA_TT]
234 rdpr %tstate, %g2
235 stx %g2, [%g1 + ERR_TSA_TSTATE]
236 rdhpr %htstate, %g2
237 stx %g2, [%g1 + ERR_TSA_HTSTATE]
238 rdpr %tpc, %g2
239 stx %g2, [%g1 + ERR_TSA_TPC]
240 rdpr %tnpc, %g2
241 stx %g2, [%g1 + ERR_TSA_TNPC]
242
243 /*
244 * Back to correct TL
245 */
246
247 inc %g5
248 wrpr %g5, %tl
249
250 /*
251 * TSA ECC covers mondo queues also
252 */
253 mov ERROR_RESUMABLE_QUEUE_HEAD, %g5
254 ldxa [%g5]ASI_QUEUE, %g5
255 stx %g5, [%g1 + ERR_TSA_ERR_RES_QHEAD]
256 mov ERROR_RESUMABLE_QUEUE_TAIL, %g5
257 ldxa [%g5]ASI_QUEUE, %g5
258 stx %g5, [%g1 + ERR_TSA_ERR_RES_QTAIL]
259 mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g5
260 ldxa [%g5]ASI_QUEUE, %g5
261 stx %g5, [%g1 + ERR_TSA_ERR_NONRES_QHEAD]
262 mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g5
263 ldxa [%g5]ASI_QUEUE, %g5
264 stx %g5, [%g1 + ERR_TSA_ERR_NONRES_QTAIL]
265 mov CPU_MONDO_QUEUE_HEAD, %g5
266 ldxa [%g5]ASI_QUEUE, %g5
267 stx %g5, [%g1 + ERR_TSA_CPU_MONDO_QHEAD]
268 mov CPU_MONDO_QUEUE_TAIL, %g5
269 ldxa [%g5]ASI_QUEUE, %g5
270 stx %g5, [%g1 + ERR_TSA_CPU_MONDO_QTAIL]
271 mov DEV_MONDO_QUEUE_HEAD, %g5
272 ldxa [%g5]ASI_QUEUE, %g5
273 stx %g5, [%g1 + ERR_TSA_DEV_MONDO_QHEAD]
274 mov DEV_MONDO_QUEUE_TAIL, %g5
275 ldxa [%g5]ASI_QUEUE, %g5
276 stx %g5, [%g1 + ERR_TSA_DEV_MONDO_QTAIL]
277
278 /*
279 * Set CORE_ERR_ENABLE back to original
280 */
281 stxa %g4, [%g3]ASI_ERR_EN
282
283 HVRET
284
285 SET_SIZE(dump_trapstack)
286
287 /*
288 * Fix Trap Stack array ECC errors
289 * args
290 * %g7 return address
291 */
292 ENTRY(correct_trapstack)
293
294 GET_ERR_DSFAR(%g4, %g5)
295 srlx %g4, DSFAR_TSA_INDEX_SHIFT, %g5
296 and %g5, DSFAR_TSA_INDEX_MASK, %g5
297 ! %g5 index
298 cmp %g5, 7 ! TSA entry not used
299 be correct_trapstack_exit
300 nop
301
302 setx core_array_ecc_syndrome_table, %g3, %g2
303 RELOC_OFFSET(%g6, %g3)
304 sub %g2, %g3, %g2
305 ! %g2 ecc syndrome table
306
307 srlx %g4, DSFAR_TSA_EVEN_SYNDROME_SHIFT, %g6
308 and %g6, DSFAR_TSA_SYNDROME_MASK, %g6
309 ! %g6 syndrome
310
311 mulx %g6, ECC_SYNDROME_TABLE_ENTRY_SIZE, %g6
312 add %g2, %g6, %g3
313 ldub [%g3], %g3
314 ! %g3 correction mask for lower 68 bits
315 cmp %g3, ECC_ne
316 bne 1f
317 nop
318
319 srlx %g4, DSFAR_TSA_ODD_SYNDROME_SHIFT, %g6
320 and %g6, DSFAR_TSA_SYNDROME_MASK, %g6
321 mulx %g6, ECC_SYNDROME_TABLE_ENTRY_SIZE, %g6
322 add %g2, %g6, %g3
323 ldub [%g3], %g3
324 ! %g3 correction mask for upper 68 bits
325 cmp %g3, ECC_LAST_BIT
326 ble,a %xcc, 1f ! if the syndrome is for a bit,
327 add %g3, ECC_LAST_BIT, %g3 ! move it up to the top 67 bits
3281:
329
330 ! %g3 syndrome
331 cmp %g3, ECC_ne ! No error
332 be correct_trapstack_exit
333 cmp %g3, ECC_U ! Uncorrectable double (or 2n) bit error
334 be convert_tsac_to_tsau
335 cmp %g3, ECC_M ! Triple or worse (2n + 1) bit error
336 be convert_tsac_to_tsau
337
338 /*
339 * Disable TSAC errors
340 */
341 mov CORE_ERR_REPORT_EN, %g6
342 ldxa [%g6]ASI_ERR_EN, %g4
343 setx ERR_TSAC, %g2, %g6
344 andn %g4, %g6, %g4
345 mov CORE_ERR_REPORT_EN, %g6
346 stxa %g4, [%g6]ASI_ERR_EN
347
348 ! %g5 index
349 cmp %g5, 6 ! mondo/dev/error queues
350 be correct_trapstack_queues
351 nop
352
353 /*
354 * error is in the trap registers
355 * %g5 index [0 -> 5]
356 * %g3 bit in error
357 *
358 * We use %g3 to determine which of the trap registers is in
359 * error, don't change the order of these checks.
360 */
361
362 cmp %g3, ECC_C0
363 bl 1f
364 nop
365
366 /*
367 * Checkbit or unused bit error
368 * read/write any trap register to correct - so we read write them all
369 */
370 rdpr %tl, %g5
371 dec %g5
372 CORRECT_TSA_ALL_REGS(%g5, %g4, %g2, correct_trapstack_exit)
373 /* NOTREACHED */
374
3751:
376 rdpr %tl, %g5
377 dec %g5
378
379 /*
380 * We have a single bit error in one of the trap registers
381 * %g3 bit in error
382 * %g5 trap level when error occurred
383 */
384 cmp %g3, TSA_TNPC_HI_BIT
385 bg,a %xcc, 1f
386 sub %g3, TSA_TNPC_LO_BIT, %g3
387 add %g3, 2, %g3 ! bits[45:0] -> TNPC[47:2]
388 CORRECT_TSA_PREG(%tnpc, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
3891:
390 cmp %g3, TSA_TPC_HI_BIT
391 bg,a %xcc, 1f
392 sub %g3, TSA_TPC_LO_BIT, %g3
393 add %g3, 2, %g3 ! bits[45:0] -> TPC[47:2]
394 CORRECT_TSA_PREG(%tpc, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
3951:
396 cmp %g3, TSA_TT_HI_BIT
397 bg,a %xcc, 1f
398 sub %g3, TSA_TT_LO_BIT, %g3
399 CORRECT_TSA_PREG(%tt, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4001:
401 cmp %g3, TSA_TSTATE_CWP_HI_BIT
402 bg,a %xcc, 1f
403 sub %g3, TSA_TSTATE_CWP_LO_BIT, %g3
404 CORRECT_TSA_PREG(%tt, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4051:
406 cmp %g3, TSA_HTSTATE_TLZ_HI_BIT
407 bg,a %xcc, 1f
408 sub %g3, TSA_HTSTATE_TLZ_LO_BIT, %g3
409 CORRECT_TSA_HREG(%htstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4101:
411 cmp %g3, TSA_TSTATE_PSTATE_IE_HI_BIT
412 bg,a %xcc, 1f
413 mov 9, %g3 !tstate.pstate.ie bit 9
414 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4151:
416 cmp %g3, TSA_TSTATE_PSTATE_PRIV_HI_BIT
417 bg,a %xcc, 1f
418 mov 10, %g3 !tstate.pstate.priv bit 10
419 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4201:
421 cmp %g3, TSA_TSTATE_PSTATE_AM_HI_BIT
422 bg,a %xcc, 1f
423 mov 11, %g3 !tstate.pstate.am bit 11
424 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4251:
426 cmp %g3, TSA_TSTATE_PSTATE_PEF_HI_BIT
427 bg,a %xcc, 1f
428 mov 12, %g3 !tstate.pstate.pef bit 12
429 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4301:
431 cmp %g3, TSA_HTSTATE_RED_HI_BIT
432 bg,a %xcc, 1f
433 mov 5, %g3 !htstate.red 5
434 CORRECT_TSA_HREG(%htstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4351:
436 cmp %g3, TSA_HTSTATE_PRIV_HI_BIT
437 bg,a %xcc, 1f
438 mov 2, %g3 !htstate.priv bit 2
439 CORRECT_TSA_HREG(%htstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4401:
441 cmp %g3, TSA_TSTATE_PSTATE_TCT_HI_BIT
442 bg,a %xcc, 1f
443 mov 20, %g3 !tstate.pstate.tct bit 20
444 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4451:
446 cmp %g3, TSA_TSTATE_PSTATE_TLE_HI_BIT
447 bg,a %xcc, 1f
448 mov 16, %g3 !tstate.pstate.tle bit 16
449 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4501:
451 cmp %g3, TSA_TSTATE_PSTATE_CLE_HI_BIT
452 bg,a %xcc, 1f
453 mov 17, %g3 !tstate.pstate.cle bit 17
454 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4551:
456 cmp %g3, TSA_HTSTATE_IBE_HI_BIT
457 bg,a %xcc, 1f
458 mov 10, %g3 !htstate.ibe bit 10
459 CORRECT_TSA_HREG(%htstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4601:
461 cmp %g3, TSA_TSTATE_ASI_HI_BIT
462 bg,a %xcc, 1f
463 sub %g3, TSA_TSTATE_ASI_LO_BIT, %g3
464 add %g3, 24, %g3 ! tstate.asi [31:24}
465 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4661:
467 cmp %g3, TSA_TSTATE_CCR_HI_BIT
468 bg,a %xcc, 1f
469 sub %g3, TSA_TSTATE_CCR_LO_BIT, %g3
470 add %g3, 32, %g3 ! tstate.ccr [39:32]
471 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
4721:
473
474 cmp %g3, TSA_TSTATE_GL_HI_BIT
475 bg,a %xcc, correct_trapstack_error_in_ecc
476 sub %g3, TSA_TSTATE_GL_LO_BIT, %g3
477 add %g3, 40, %g3 ! tstate.gl [41:40]
478 CORRECT_TSA_PREG(%tstate, %g5, %g3, %g4, %g2, %g6, correct_trapstack_exit)
479 /* NOTREACHED*/
480
481correct_trapstack_error_in_ecc:
482 ! should not get here ...
483 /*
484 * Set CORE_ERR_ENABLE back to original
485 */
486 mov CORE_ERR_REPORT_EN, %g3
487 ldxa [%g3]ASI_ERR_EN, %g4
488 setx ERR_TSAC, %g2, %g6
489 or %g4, %g6, %g4
490 stxa %g4, [%g3]ASI_ERR_EN
491 ba convert_tsac_to_tsau
492 nop
493
494correct_trapstack_queues:
495 /*
496 * error is in the queue ASI registers
497 * %g3 bit in error
498 *
499 * We use %g3 to determine which of the queue ASI registers is in
500 * error, don't change the order of these checks.
501 */
502 cmp %g3, TSA_NONRES_ERR_QUEUE_TAIL_HI_BIT
503 bg,a %xcc, 1f
504 sub %g3, TSA_NONRES_ERR_QUEUE_TAIL_LO_BIT, %g3 ! bits [21:14] -> [13:6]
505 add %g3, 6, %g3
506 CORRECT_TSA_QUEUE(ERROR_NONRESUMABLE_QUEUE_TAIL,
507 %g3, %g4, %g2, %g6, correct_trapstack_exit)
5081:
509 cmp %g3, TSA_NONRES_ERR_QUEUE_HEAD_HI_BIT
510 bg,a %xcc, 1f
511 sub %g3, TSA_NONRES_ERR_QUEUE_HEAD_LO_BIT, %g3 ! bits [29:22] -> [13:6]
512 add %g3, 6, %g3
513 CORRECT_TSA_QUEUE(ERROR_NONRESUMABLE_QUEUE_HEAD,
514 %g3, %g4, %g2, %g6, correct_trapstack_exit)
5151:
516 cmp %g3, TSA_RES_ERR_QUEUE_TAIL_HI_BIT
517 bg,a %xcc, 1f
518 sub %g3, TSA_RES_ERR_QUEUE_TAIL_LO_BIT, %g3 ! bits [37:20] -> [13:6]
519 add %g3, 6, %g3
520 CORRECT_TSA_QUEUE(ERROR_RESUMABLE_QUEUE_TAIL,
521 %g3, %g4, %g2, %g6, correct_trapstack_exit)
5221:
523 cmp %g3, TSA_RES_ERR_QUEUE_HEAD_HI_BIT
524 bg,a %xcc, 1f
525 sub %g3, TSA_RES_ERR_QUEUE_HEAD_LO_BIT, %g3 ! bits [45:38] -> [13:6]
526 add %g3, 6, %g3
527 CORRECT_TSA_QUEUE(ERROR_RESUMABLE_QUEUE_HEAD,
528 %g3, %g4, %g2, %g6, correct_trapstack_exit)
5291:
530 cmp %g3, TSA_DEV_QUEUE_TAIL_HI_BIT
531 bg,a %xcc, 1f
532 sub %g3, TSA_DEV_QUEUE_TAIL_LO_BIT, %g3 ! bits [67:60] -> [13:6]
533 add %g3, 6, %g3
534 CORRECT_TSA_QUEUE(DEV_MONDO_QUEUE_TAIL,
535 %g3, %g4, %g2, %g6, correct_trapstack_exit)
5361:
537 cmp %g3, TSA_DEV_QUEUE_HEAD_HI_BIT
538 bg,a %xcc, 1f
539 sub %g3, TSA_DEV_QUEUE_HEAD_LO_BIT, %g3 ! bits [75:68] -> [13:6]
540 add %g3, 6, %g3
541 CORRECT_TSA_QUEUE(DEV_MONDO_QUEUE_HEAD,
542 %g3, %g4, %g2, %g6, correct_trapstack_exit)
5431:
544 cmp %g3, TSA_MONDO_QUEUE_TAIL_HI_BIT
545 bg,a %xcc, 1f
546 sub %g3, TSA_MONDO_QUEUE_TAIL_LO_BIT, %g3 ! bits [83:76] -> [13:6]
547 add %g3, 6, %g3
548 CORRECT_TSA_QUEUE(CPU_MONDO_QUEUE_TAIL,
549 %g3, %g4, %g2, %g6, correct_trapstack_exit)
5501:
551 cmp %g3, TSA_MONDO_QUEUE_HEAD_HI_BIT
552 bg,a %xcc, correct_trapstack_error_in_ecc
553 sub %g3, TSA_MONDO_QUEUE_HEAD_LO_BIT, %g3 ! bits [91:84] -> [13:6]
554 add %g3, 6, %g3
555 CORRECT_TSA_QUEUE(CPU_MONDO_QUEUE_HEAD,
556 %g3, %g4, %g2, %g6, correct_trapstack_exit)
557 /* NOTREACHED*/
558
559correct_trapstack_exit:
560 /*
561 * Set CORE_ERR_ENABLE back to original
562 */
563 mov CORE_ERR_REPORT_EN, %g6
564 ldxa [%g6]ASI_ERR_EN, %g4
565 setx ERR_TSAC, %g2, %g3
566 or %g4, %g3, %g4
567 stxa %g4, [%g6]ASI_ERR_EN
568
569 HVRET
570
571convert_tsac_to_tsau:
572 /*
573 * We know that TSAU is (TSAC entry + 1) so
574 * get the error table entry and move it forward
575 * to the TSAU entry
576 */
577 CONVERT_CE_TO_UE(-1)
578 /* NOTREACHED */
579
580 SET_SIZE(correct_trapstack)
581
582 /*
583 * Dump Tick_compare diagnostic data
584 * %g7 return address
585 */
586 ENTRY(dump_tick_compare)
587
588 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
589
590 /*
591 * get diag_buf->err-tca
592 */
593 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
594 add %g1, ERR_DIAG_DATA_TCA, %g1
595
596 GET_ERR_DSFAR(%g4, %g3)
597
598 /*
599 * TCA index from D-SFAR[2:0]
600 * %g4 D-SFAR
601 */
602 srlx %g4, DSFAR_TCA_INDEX_SHIFT, %g4
603 and %g4, DSFAR_TCA_INDEX_MASK, %g3
604 sllx %g3, ASI_TICK_INDEX_SHIFT, %g3
605
606 /*
607 * TCA data
608 */
609 or %g3, ASI_TICK_DATA_NP_DATA, %g5
610 ldxa [%g5]ASI_TICK_ACCESS, %g2
611 stx %g2, [%g1 + ERR_TCA_DATA]
612 /*
613 * TCA ECC
614 */
615 or %g3, ASI_TICK_DATA_NP_ECC, %g5
616 ldxa [%g5]ASI_TICK_ACCESS, %g2
617 stx %g2, [%g1 + ERR_TCA_ECC]
618
619 HVRET
620
621 SET_SIZE(dump_tick_compare)
622
623 /*
624 * TCCP
625 * Index/syndrome for a precise TCCP error stored in DESR
626 *
627 * %g7 return address
628 */
629 ENTRY(correct_tick_tccp)
630
631 GET_ERR_DSFAR(%g4, %g5)
632 srlx %g4, DSFAR_TCA_INDEX_SHIFT, %g5
633 and %g5, DSFAR_TCA_INDEX_MASK, %g5
634 ! %g5 index
635
636 srlx %g4, DSFAR_TCA_SYNDROME_SHIFT, %g4
637 and %g4, DSFAR_TCA_SYNDROME_MASK, %g4
638 ! %g4 syndrome
639
640 ba correct_tick_compare ! tail call
641 nop
642 SET_SIZE(correct_tick_tccp)
643
644 /*
645 * TCCD
646 * Index/syndrome for a disrupting TCCD error stored in DESR
647 *
648 * %g7 return address
649 */
650 ENTRY(correct_tick_tccd)
651
652 GET_ERR_DESR(%g4, %g5)
653 srlx %g4, DESR_TCA_INDEX_SHIFT, %g5
654 and %g5, DESR_TCA_INDEX_MASK, %g5
655 ! %g5 index
656
657 srlx %g4, DESR_TCA_SYNDROME_SHIFT, %g4
658 and %g4, DESR_TCA_SYNDROME_MASK, %g4
659 ! %g4 syndrome
660
661 ba correct_tick_compare ! tail call
662 nop
663 SET_SIZE(correct_tick_tccd)
664
665 /*
666 * Fix tick_compare error
667 *
668 * %g4 syndrome
669 * %g5 index
670 * %g2 - %g5 clobbered
671 * %g7 return address
672 *
673 * TCA_ECC_ERRATA The correct value is not returned when we read
674 * from the TCA diagnostic registers
675 */
676 ENTRY(correct_tick_compare)
677 setx core_array_ecc_syndrome_table, %g2, %g3
678 RELOC_OFFSET(%g6, %g2)
679 sub %g3, %g2, %g3
680 ! %g3 ecc syndrome table
681
682 mulx %g4, ECC_SYNDROME_TABLE_ENTRY_SIZE, %g4
683 add %g3, %g4, %g3
684 ldub [%g3], %g3
685 ! %g3 correction mask
686
687 cmp %g3, ECC_ne ! no error
688 be correct_tick_compare_exit
689 cmp %g3, ECC_U ! Uncorrectable double (or 2n) bit error
690 be convert_tccp_to_tcup
691 cmp %g3, ECC_M ! Triple or worse (2n + 1) bit error
692 be convert_tccp_to_tcup
693 .empty
694
695 mov 1, %g6
696 sllx %g6, %g3, %g6
697 ! no correction mask for checkbit errors
698 cmp %g3, ECC_C0
699 movge %xcc, %g0, %g6
700 ! %g6 correction mask
701
702#ifdef ERRATA_TICK_INDEX
703 /*
704 * On precise internal_processor_error traps, the
705 * tick register index from the D-SFAR may be incorrect.
706 * This does not apply to disrupting sw_recoverable_error
707 * traps.
708 */
709 rdpr %tt, %g1
710 cmp %g1, TT_PROCERR
711 bne,pt %xcc, 3f
712 nop
713
714 /*
715 * If it's a correction bit error, %g6 == 0, it is safe
716 * to read/write all HSTICK/TICK/STICK CMP registers and
717 * continue as we will get the correct value from the
718 * diagnostic register (without causing an error trap) and
719 * the write of this value will clear the ECC error.
720 */
721 brnz,pt %g6, 1f
722 nop
723
724#ifdef TCA_ECC_ERRATA
725 /*
726 * We don't have any information on whether interrupts
727 * are enabled for TICK_CMPR, and we can't get valid
728 * data from either the TICKCMPR register or the diagnostic
729 * register, so we clear the error by writing (TICK + delay)
730 * which will also trigger a TICKCMPR interrupt. The guest
731 * will have to figure out whether its a spurious interrupt
732 * or not.
733 */
734 set ERR_TCA_INCREMENT, %g4
735 rd %tick, %g5
736 add %g5, %g4, %g5
737 wr %g5, TICKCMP
738
739 /*
740 * We don't have any information on whether interrupts
741 * are enabled for HSTICK_CMPR, and we can't get valid
742 * data from either the HSTICKCMPR register or the diagnostic
743 * register, so we clear the error by writing (STICK + delay)
744 * which will also trigger a HSTICKCMPR interrupt. The HV
745 * will have to figure out whether its a spurious interrupt
746 * or not.
747 */
748 rd STICK, %g5
749 add %g5, %g4, %g5
750 wrhpr %g5, %hstick_cmpr
751
752 /*
753 * We don't have any information on whether interrupts
754 * are enabled for STICK_CMPR, and we can't get valid
755 * data from either the STICKCMPR register or the diagnostic
756 * register, so we clear the error by writing (STICK + delay)
757 * which will also trigger a STICKCMPR interrupt. The guest
758 * will have to figure out whether its a spurious interrupt
759 * or not.
760 */
761 rd STICK, %g5
762 add %g5, %g4, %g5
763 wr %g5, STICKCMP
764
765#else /* !TCA_ECC_ERRATA */
766
767 mov TCA_TICK_CMPR, %g5
768 sllx %g5, ASI_TICK_INDEX_SHIFT, %g5
769 or %g5, ASI_TICK_DATA_NP_DATA, %g5
770 ldxa [%g5]ASI_TICK_ACCESS, %g5
771 ! %g5 tick_cmpr value
772 wr %g5, TICKCMP
773
774 mov TCA_STICK_CMPR, %g5
775 sllx %g5, ASI_TICK_INDEX_SHIFT, %g5
776 or %g5, ASI_TICK_DATA_NP_DATA, %g5
777 ldxa [%g5]ASI_TICK_ACCESS, %g5
778 ! %g5 stick_cmpr value
779 wr %g5, STICKCMP
780
781 mov TCA_HSTICK_COMPARE, %g5
782 sllx %g5, ASI_TICK_INDEX_SHIFT, %g5
783 or %g5, ASI_TICK_DATA_NP_DATA, %g5
784 ldxa [%g5]ASI_TICK_ACCESS, %g5
785 ! %g5 hstick_compare value
786 wrhpr %g5, %hstick_cmpr
787
788#endif /* TCA_ECC_ERRATA */
789
790 ba correct_tick_compare_exit
791 nop
792
7931:
794 /*
795 * If the error occurred in hyperprivileged mode, we
796 * know that this was an access to the hstick_cmpr
797 * register as the HV never accesses TICKCMP/STICKCMP registers
798 * (except when using the error injector).
799 */
800 rdhpr %htstate, %g1
801 btst HTSTATE_HPRIV, %g1
802 bnz correct_htick_compare
803 mov TCA_HSTICK_COMPARE, %g5
804
805 /*
806 * This precise trap was caused by an ECC error during a rdasr/rdhpr
807 * access to one of the tick compare registers. This ECC error will
808 * also cause the h/w comparisons to trigger a disrupting trap with
809 * the correct index set. Check if PSTATE.IE will be enabled after
810 * the RETRY, and if SETER.DE is set. If these conditions are met
811 * and we delay by at least 128 cycles, we will take the disrupting
812 * trap and be able to correct the ECC error. The instruction which
813 * caused this precise trap will then execute correctly. If either
814 * of these conditions is not satisfied we convert the error into a UE.
815 */
816
817 ! PSTATE.IE enabled on RETRY ?
818 rdpr %tstate, %g1
819 srlx %g1, TSTATE_PSTATE_SHIFT, %g1
820 and %g1, PSTATE_IE, %g1
821 brz,pn %g1, 2f
822 nop
823
824 ! SETER.DE enabled after RETRY ?
825 mov CORE_ERR_TRAP_EN, %g1
826 ldxa [%g1]ASI_ERR_EN, %g1
827 setx ERR_DE, %g2, %g3
828 btst %g1, %g3
829 bz,pn %xcc, 2f
830 nop
831
832 /*
833 * We can just return without correcting the error. In which
834 * case we will handle the error as normal, sending a report
835 * to the SP. This will quarantee a sufficient delay.
836 *
837 * We will get a disrupting trap immediately on exiting the
838 * error trap handler via RETRY.
839 *
840 * The Diagnosis Engine will receive two error reports, one for
841 * this precise trap, one for the disrupting trap we expect
842 * on exiting this error handler.
843 */
844 ba correct_tick_compare_exit
845 nop
846
8472:
848 /*
849 * We don't know which it is - TICKCMP/STICKCMP, and we won't take
850 * a disrupting trap if we retry the instruction, so we clear
851 * TICKCMP/STICKCMP to get rid of the error condition and treat
852 * this as a UE.
853 */
854 wr %g0, TICKCMP
855 wr %g0, STICKCMP ! FIXME ?
856 ba convert_tccp_to_tcup
857 nop
8583:
859#endif /* ERRATA_TICK_INDEX */
860
861 cmp %g5, TCA_TICK_CMPR
862 be correct_tick_cmpr
863 cmp %g5, TCA_STICK_CMPR
864 be correct_stick_cmpr
865 cmp %g5, TCA_HSTICK_COMPARE
866 be correct_htick_compare
867 nop
868
869 ! should not get here ...
870 /* FALLTHRU */
871
872convert_tccp_to_tcup:
873 /*
874 * We know that TCUP/TCUD is (TCCP/TCCD entry + 1) so
875 * get the error table entry and move it forward
876 * to the TCUP entry
877 */
878 CONVERT_CE_TO_UE(-1)
879 /* NOTREACHED */
880
881correct_tick_cmpr:
882#ifdef TCA_ECC_ERRATA
883 /*
884 * We don't have any information on whether interrupts
885 * are enabled for TICK_CMPR, and we can't get valid
886 * data from either the TICKCMPR register or the diagnostic
887 * register, so we clear the error by writing (TICK + delay)
888 * which will also trigger a TICKCMPR interrupt. The guest
889 * will have to figure out whether its a spurious interrupt
890 * or not.
891 */
892 set ERR_TCA_INCREMENT, %g4
893 rd %tick, %g5
894 add %g5, %g4, %g5
895 wr %g5, TICKCMP
896#else
897 /*
898 * read tick_cmpr from diagnostic ASI
899 * get syndrome from D-SFAR, correction code from
900 * core_array_ecc_syndrome_table
901 * xor correction mask with value and write back to ASR
902 */
903 sllx %g5, ASI_TICK_INDEX_SHIFT, %g5
904 or %g5, ASI_TICK_DATA_NP_DATA, %g5
905 ldxa [%g5]ASI_TICK_ACCESS, %g5
906 ! %g5 tick_cmpr value
907 xor %g5, %g6, %g5
908 wr %g5, TICKCMP
909#endif
910 ba correct_tick_compare_exit
911 nop
912
913correct_stick_cmpr:
914#ifdef TCA_ECC_ERRATA
915 /*
916 * We don't have any information on whether interrupts
917 * are enabled for STICK_CMPR, and we can't get valid
918 * data from either the STICKCMPR register or the diagnostic
919 * register, so we clear the error by writing (STICK + delay)
920 * which will also trigger a STICKCMPR interrupt. The guest
921 * will have to figure out whether its a spurious interrupt
922 * or not.
923 */
924 set ERR_TCA_INCREMENT, %g4
925 rd STICK, %g5
926 add %g5, %g4, %g5
927 wr %g5, STICKCMP
928#else
929 /*
930 * read stick_cmpr from diagnostic ASI
931 * get syndrome from D-SFAR, correction code from
932 * core_array_ecc_syndrome_table
933 * xor correction mask with value and write back to ASR
934 */
935 sllx %g5, ASI_TICK_INDEX_SHIFT, %g5
936 or %g5, ASI_TICK_DATA_NP_DATA, %g5
937 ldxa [%g5]ASI_TICK_ACCESS, %g5
938 ! %g5 stick_cmpr value
939 xor %g5, %g6, %g5
940 wr %g5, STICKCMP
941#endif
942 ba correct_tick_compare_exit
943 nop
944
945correct_htick_compare:
946#ifdef TCA_ECC_ERRATA
947 /*
948 * We don't have any information on whether interrupts
949 * are enabled for HSTICK_CMPR, and we can't get valid
950 * data from either the HSTICKCMPR register or the diagnostic
951 * register, so we clear the error by writing (STICK + delay)
952 * which will also trigger a HSTICKCMPR interrupt. The HV
953 * will have to figure out whether its a spurious interrupt
954 * or not.
955 */
956 set ERR_TCA_INCREMENT, %g4
957 rd STICK, %g5
958 add %g5, %g4, %g5
959 wrhpr %g5, %hstick_cmpr
960#else
961 /*
962 * read hstick_compare from diagnostic ASI
963 * get syndrome from D-SFAR, correction code from
964 * core_array_ecc_syndrome_table
965 * xor correction mask with value and write back to ASR
966 */
967 sllx %g5, ASI_TICK_INDEX_SHIFT, %g5
968 or %g5, ASI_TICK_DATA_NP_DATA, %g5
969 ldxa [%g5]ASI_TICK_ACCESS, %g5
970 ! %g5 hstick_compare value
971 xor %g5, %g6, %g5
972 wrhpr %g5, %hstick_cmpr
973#endif
974 /* FALLTHRU */
975
976correct_tick_compare_exit:
977 HVRET
978
979 SET_SIZE(correct_tick_compare)
980
981 /*
982 * Clear UEs from Tick register array
983 */
984 ENTRY(clear_tick_compare)
985
986 wr %g0, TICKCMP
987 wr %g0, STICKCMP
988 wrhpr %g0, %hstick_cmpr
989 HVRET
990
991 SET_SIZE(clear_tick_compare)
992
993 /*
994 * Fix scratchpad array error
995 * %g2 - %g5 clobbered
996 * %g7 return address
997 */
998 ENTRY(correct_scac)
999
1000 GET_ERR_DSFAR(%g4, %g5)
1001 srlx %g4, DSFAR_SCRATCHPAD_INDEX_SHIFT, %g5
1002 and %g5, DSFAR_SCRATCHPAD_INDEX_MASK, %g5
1003 sllx %g5, ASI_SCRATCHPAD_INDEX_SHIFT, %g5
1004 ! %g5 (scratchpad register * 8) => VA for
1005 ! diagnostic ASI_SCRATCHPAD_ACCESS register access
1006
1007 /*
1008 * If this is a hypervisor scratchpad register it was reloaded
1009 * with the correct data. As long as we didn't clobber the globals
1010 * we are good to go ...
1011 */
1012 cmp %g5, HSCRATCH_VCPU_STRUCT
1013 be,pt %xcc, 1f
1014 nop
1015
1016 cmp %g5, HSCRATCH_STRAND_STRUCT
1017 be,pt %xcc, 1f
1018 nop
1019
1020 ba 2f
1021 nop
10221:
1023 rdpr %tstate, %g2
1024 srlx %g2, TSTATE_GL_SHIFT, %g2
1025 and %g2, TSTATE_GL_MASK, %g2
1026 rdpr %gl, %g3
1027 cmp %g2, %g3
1028 be,pt %xcc, convert_scac_to_scau
1029 nop
1030
1031 /*
1032 * It's a HV scratchpad register, we haven't clobbered the
1033 * globals, the register was corrected in the trap handler,
1034 * just return
1035 */
1036 ba,pt %xcc, correct_scac_exit
1037 nop
1038
10392:
1040 /*
1041 * read scratchpad from diagnostic ASI
1042 * get syndrome from D-SFAR, correction code from
1043 * core_array_ecc_syndrome_table
1044 * xor correction mask with value and write back to ASI
1045 */
1046 srlx %g4, DSFAR_SCRATCHPAD_SYNDROME_SHIFT, %g4
1047 and %g4, DSFAR_SCRATCHPAD_SYNDROME_MASK, %g4
1048 ! %g4 syndrome
1049
1050 setx core_array_ecc_syndrome_table, %g2, %g3
1051 RELOC_OFFSET(%g6, %g2)
1052 sub %g3, %g2, %g3
1053 ! %g3 ecc syndrome table
1054
1055 mulx %g4, ECC_SYNDROME_TABLE_ENTRY_SIZE, %g4
1056 add %g3, %g4, %g3
1057 ldub [%g3], %g3
1058 ! %g3 correction mask
1059
1060 cmp %g3, ECC_ne ! no error
1061 be correct_scac_exit
1062 cmp %g3, ECC_U ! Uncorrectable double (or 2n) bit error
1063 be convert_scac_to_scau
1064 cmp %g3, ECC_M ! Triple or worse (2n + 1) bit error
1065 be convert_scac_to_scau
1066 mov 1, %g4
1067 sllx %g4, %g3, %g4
1068 ! no correction mask for checkbit errors
1069 cmp %g3, ECC_C0
1070 movge %xcc, %g0, %g4
1071 ! %g4 correction mask
1072
1073 ! %g5 scratchpad register VA (index * 8)
1074 ldxa [%g5]ASI_SCRATCHPAD_ACCESS, %g6
1075 ! %g6 scratchpad register value
1076
1077 xor %g6, %g4, %g6
1078 stxa %g6, [%g5]ASI_SCRATCHPAD
1079
1080correct_scac_exit:
1081 HVRET
1082
1083convert_scac_to_scau:
1084 /*
1085 * We know that SCAU is (SCAC entry + 1) so
1086 * get the error table entry and move it forward
1087 * to the SCAU entry
1088 */
1089 CONVERT_CE_TO_UE(-1)
1090 /* NOTREACHED */
1091
1092 SET_SIZE(correct_scac)
1093
1094 /*
1095 * Fix scratchpad array UE if possible
1096 * %g2 - %g5 clobbered
1097 * %g7 return address
1098 */
1099 ENTRY(correct_scau)
1100
1101 GET_ERR_DSFAR(%g4, %g5)
1102 srlx %g4, DSFAR_SCRATCHPAD_INDEX_SHIFT, %g5
1103 and %g5, DSFAR_SCRATCHPAD_INDEX_MASK, %g5
1104 sllx %g5, 3, %g5
1105 ! %g5 (scratchpad register * 8) => VA for
1106 ! diagnostic ASI_SCRATCHPAD_ACCESS register access
1107
1108
1109 /*
1110 * If this is a hypervisor scratchpad register and we
1111 * haven't overwritten the trap globals, we can correct
1112 * this.
1113 */
1114 cmp %g5, HSCRATCH0
1115 blt %xcc, correct_scau_exit
1116 nop
1117 cmp %g5, HSCRATCH1
1118 bgt %xcc, correct_scau_exit
1119 nop
1120
1121 rdpr %tstate, %g2
1122 srlx %g2, TSTATE_GL_SHIFT, %g2
1123 and %g2, TSTATE_GL_MASK, %g2
1124 rdpr %gl, %g3
1125 cmp %g2, %g3
1126 bne,pt %xcc, convert_scau_to_scac
1127 nop
1128
1129 /*
1130 * Error was corrected on entry to trap handler.
1131 * See SCRATCHPAD_ERROR() macro.
1132 */
1133correct_scau_exit:
1134 HVRET
1135
1136convert_scau_to_scac:
1137 /*
1138 * We know that SCAC is (SCAU entry - 1) so
1139 * get the error table entry and move it back
1140 * to the SCAC entry
1141 */
1142 CONVERT_CE_TO_UE(+1)
1143 /* NOTREACHED */
1144
1145 SET_SIZE(correct_scau)
1146
1147 /*
1148 * Populate a sun4v ereport packet for STB errors
1149 * with invalid real address and size == 8
1150 * %g7 return address
1151 */
1152 ENTRY(stb_sun4v_report)
1153 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1154 brz,pn %g2, stb_sun4v_report_exit
1155 mov ERR_INVALID_RA, %g3
1156 stx %g3, [%g2 + ERR_SUN4V_RPRT_ADDR]
1157 mov 8, %g3
1158 st %g3, [%g2 + ERR_SUN4V_RPRT_SZ]
1159stb_sun4v_report_exit:
1160 HVRET
1161
1162 SET_SIZE(stb_sun4v_report)
1163
1164 /*
1165 * Populate a sun4v ereport packet for SCA errors
1166 * ASI == ASI_SCRATCHPAD
1167 * VA == SCA index
1168 *
1169 * %g7 return address
1170 */
1171 ENTRY(sca_sun4v_report)
1172 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1173 brz,pn %g2, sca_sun4v_report_exit
1174 mov ASI_SCRATCHPAD, %g3
1175 stub %g3, [%g2 + ERR_SUN4V_RPRT_ASI]
1176 /*
1177 * Scratchpad index from D-SFAR[2:0]
1178 */
1179 GET_ERR_DSFAR(%g4, %g3)
1180 srlx %g4, DSFAR_SCRATCHPAD_INDEX_SHIFT, %g4
1181 and %g4, DSFAR_SCRATCHPAD_INDEX_MASK, %g3
1182 sllx %g3, ASI_SCRATCHPAD_INDEX_SHIFT, %g3 ! index -> VA
1183 stx %g3, [%g2 + ERR_SUN4V_RPRT_ADDR]
1184 /*
1185 * SZ set to 8 bytes for a single ASI
1186 */
1187 mov 8, %g3
1188 st %g3, [%g2 + ERR_SUN4V_RPRT_SZ]
1189sca_sun4v_report_exit:
1190 HVRET
1191
1192 SET_SIZE(sca_sun4v_report)
1193
1194 /*
1195 * Populate a sun4v ereport packet for Tick_compare errors
1196 * %g7 return address
1197 */
1198 ENTRY(tick_sun4v_report)
1199
1200 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1201 brz,pn %g2, tick_sun4v_report_exit
1202 .empty
1203
1204 /*
1205 * TCA index from D-SFAR[2:0]
1206 * 00 TICK_CMPR
1207 * 01 STICK_CMPR
1208 * 10 HSTICK_COMPARE
1209 * 11 Reserved.
1210 *
1211 * We only send a guest report for tick/stick_cmpr
1212 */
1213 GET_ERR_DSFAR(%g4, %g3)
1214 srlx %g4, DSFAR_TCA_INDEX_SHIFT, %g4
1215 and %g4, DSFAR_TCA_INDEX_MASK, %g3
1216 cmp %g3, 2 ! HSTICK_COMPARE
1217 blu,pn %xcc, 1f
1218 nop
1219
1220 stx %g0, [%g2 + ERR_SUN4V_RPRT_ATTR]
1221 HVRET
1222
12231:
1224 ! ASR 0x17 tick_cmpr, 0x19 STICK_CMPR
1225 mov 0x17, %g4
1226 brnz,a,pn %g3, 2f
1227 mov 0x19, %g4
12282:
1229 set SUN4V_VALID_REG, %g3
1230 or %g4, %g3, %g4
1231 stuh %g4, [%g2 + ERR_SUN4V_RPRT_REG]
1232
1233tick_sun4v_report_exit:
1234
1235 HVRET
1236
1237 SET_SIZE(tick_sun4v_report)
1238
1239 /*
1240 * Populate a sun4v ereport packet for TrapStack errors
1241 * ATTR == PREG
1242 * PREG = TPC[rs1]
1243 *
1244 * %g7 return address
1245 */
1246 ENTRY(tsa_sun4v_report)
1247 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1248 brz,pn %g2, tsa_sun4v_report_exit
1249 /*
1250 * Trapstack index from D-SFAR[2:0]
1251 */
1252 GET_ERR_DSFAR(%g4, %g3)
1253 srlx %g4, DSFAR_TSA_INDEX_SHIFT, %g5
1254 and %g5, DSFAR_TSA_INDEX_MASK, %g5
1255 ! %g5 index
1256 ! index == 6 -> mondo queues, 7 -> not used
1257 cmp %g5, 5
1258 bgeu,pn %xcc, 1f
1259 set SUN4V_VALID_REG, %g4
1260 or %g5, %g4, %g5
1261 stuh %g5, [%g2 + ERR_SUN4V_RPRT_REG]
1262 /*
1263 * SZ set to 8 bytes for a single ASI
1264 */
1265 mov 8, %g3
1266 st %g3, [%g2 + ERR_SUN4V_RPRT_SZ]
1267 HVRET
12681:
1269 /*
1270 * No guest report
1271 */
1272 stx %g0, [%g2 + ERR_SUN4V_RPRT_ATTR]
1273
1274tsa_sun4v_report_exit:
1275 HVRET
1276
1277 SET_SIZE(tsa_sun4v_report)
1278
1279 /*
1280 * Dump MAMEM data
1281 */
1282 ENTRY(dump_mamu)
1283
1284 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
1285
1286 /*
1287 * get diag_buf->err_mamu
1288 */
1289 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
1290 add %g1, ERR_DIAG_DATA_MAMU, %g1
1291
1292 mov ASI_MAU_CONTROL, %g2
1293 ldxa [%g2]ASI_STREAM, %g3
1294 stx %g3, [%g1 + ERR_MA_CTL]
1295 mov ASI_MAU_MPA, %g2
1296 ldxa [%g2]ASI_STREAM, %g3
1297 stx %g3, [%g1 + ERR_MA_PA]
1298 mov ASI_MAU_NP, %g2
1299 ldxa [%g2]ASI_STREAM, %g3
1300 stx %g3, [%g1 + ERR_MA_NP]
1301 mov ASI_MAU_SYNC, %g2
1302 ldxa [%g2]ASI_STREAM, %g3
1303 stx %g3, [%g1 + ERR_MA_SYNC]
1304 mov ASI_MAU_ADDR, %g2
1305 ldxa [%g2]ASI_STREAM, %g3
1306 stx %g3, [%g1 + ERR_MA_ADDR]
1307
1308 HVRET
1309
1310 SET_SIZE(dump_mamu)
1311
1312 ENTRY(dump_reg_ecc)
1313
1314 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
1315
1316 GET_ERR_DSFSR(%g3, %g2)
1317 GET_ERR_DSFAR(%g4, %g2)
1318
1319 /*
1320 * get diag_buf->err_reg
1321 */
1322 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
1323 add %g1, ERR_DIAG_DATA_REG, %g1
1324
1325 /*
1326 * %g3 D-SFSR
1327 */
1328 cmp %g3, DSFSR_FRFU
1329 bl 1f
1330 nop
1331
1332 /*
1333 * FRF index from D-SFAR[5:1]
1334 * %g4 D-SFAR
1335 */
1336 and %g4, DSFAR_FRF_DBL_REG_MASK, %g3
1337 stx %g3, [%g1 + ERR_DIAG_BUF_SPARC_DSFAR]
1338
1339 sllx %g3, ASI_FRF_ECC_INDEX_SHIFT, %g3
1340 ldxa [%g3]ASI_FRF_ECC_REG, %g3
1341 stx %g3, [%g1 + ERR_REG_ECC]
1342 HVRET
13431:
1344 /*
1345 * Convert the IRF index to the Sparc V9 equivalent
1346 */
1347 srlx %g4, DSFAR_IRF_INDEX_SHIFT, %g2
1348 and %g2, DSFAR_IRF_INDEX_MASK, %g2
1349 CONVERT_IRF_INDEX(%g2, %g5)
1350 andn %g4, DSFAR_IRF_INDEX_MASK, %g5
1351 or %g5, %g2, %g5
1352 stx %g5, [%g1 + ERR_DIAG_BUF_SPARC_DSFAR]
1353
1354 /*
1355 * IRF index from D-SFAR[4:0]
1356 * %g4 D-SFAR
1357 */
1358 mov DSFAR_IRF_INDEX_MASK, %g3
1359 srlx %g4, DSFAR_IRF_INDEX_SHIFT, %g4
1360 and %g4, DSFAR_IRF_INDEX_MASK, %g3
1361 sllx %g3, ASI_IRF_ECC_INDEX_SHIFT, %g3
1362
1363 ldxa [%g3]ASI_IRF_ECC_REG, %g3
1364 stx %g3, [%g1 + ERR_REG_ECC]
1365
1366 HVRET
1367 SET_SIZE(dump_reg_ecc)
1368
1369 /*
1370 * Add the integer reg number to the sun4v guest report
1371 */
1372 ENTRY(irf_sun4v_report)
1373 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1374 GET_ERR_DSFAR(%g3, %g5)
1375 srlx %g3, DSFAR_IRF_INDEX_SHIFT, %g3
1376 and %g3, DSFAR_IRF_INDEX_MASK, %g3
1377
1378 /*
1379 * Convert the IRF index to the Sparc V9 equivalent
1380 */
1381 CONVERT_IRF_INDEX(%g3, %g5)
1382 set SUN4V_VALID_REG, %g4
1383 or %g3, %g4, %g3
1384 stub %g3, [%g2 + ERR_SUN4V_RPRT_ASI]
1385 HVRET
1386 SET_SIZE(irf_sun4v_report)
1387
1388 /*
1389 * Add the FP reg number to the sun4v guest report
1390 */
1391 ENTRY(frf_sun4v_report)
1392 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1393 GET_ERR_DSFAR(%g3, %g5)
1394 srlx %g3, DSFAR_FRF_INDEX_SHIFT, %g3
1395 and %g3, DSFAR_FRF_INDEX_MASK, %g3
1396 set SUN4V_VALID_REG, %g4
1397 or %g3, %g4, %g3
1398 stub %g3, [%g2 + ERR_SUN4V_RPRT_ASI]
1399 HVRET
1400 SET_SIZE(frf_sun4v_report)
1401
1402 /*
1403 * Check whether the FRU error is transient or persistent
1404 * or if the floating point register file is failing.
1405 * If TL == 1
1406 * clear_frf_ue
1407 * TRANSIENT
1408 * if TL > 1 {
1409 * if (D-SFSR.[TL - 1].FRFU && D-SFAR.[TL -1] == D-SFAR) {
1410 * previous trap was identical,
1411 * PERSISTENT
1412 * }
1413 * if (D-SFSR.[TL - 1].FRFU {
1414 * FAILURE
1415 * }
1416 * clear_frf_ue
1417 * TRANSIENT
1418 * }
1419 *
1420 */
1421
1422 ENTRY(correct_frfu)
1423 STORE_ERR_RETURN_ADDR(%g7, %g1, %g2)
1424
1425 rdpr %tl, %g2
1426 cmp %g2, 1
1427 bg,pn %xcc, 1f
1428 nop
1429
1430 HVCALL(clear_frf_ue)
1431
1432 /*
1433 * If we get to here we have created a sun4v FRF
1434 * precise non-resumable error report and the
1435 * FP UE has been cleared.
1436 */
1437 ba correct_frfu_exit
1438 nop
1439
14401:
1441 /*
1442 * TL > 1
1443 * Either we have nested FRF traps, in which case we have a
1444 * failed RF and we mark the CPU as bad, or we have a different
1445 * trap type at (TL - 1).
1446 */
1447 STRAND_STRUCT(%g1)
1448 sub %g2, 2, %g2 ! (TL - 1) - 1 for diag_buf
1449 mulx %g2, STRAND_ERR_ESR_INCR, %g2
1450 add %g1, %g2, %g3
1451 add %g3, STRAND_ERR_DSFSR, %g3
1452 ldx [%g3], %g3 ! D-SFSR.[TL - 1]
1453 cmp %g3, DSFSR_FRFU
1454 be %xcc, 2f
1455 cmp %g3, DSFSR_FRFC
1456 be %xcc, 2f
1457
1458 /*
1459 * Not a nested FRF error, clear it and return
1460 */
1461 HVCALL(clear_frf_ue)
1462
1463 /*
1464 * If we get to here we have created a sun4v FRF
1465 * precise non-resumable error report and the
1466 * FP UE has been cleared. return
1467 */
1468 ba correct_frfu_exit
1469 nop
1470
14712:
1472 /*
1473 * we have nested FRF errors
1474 * mark the CPU as bad and send a CPU Sun4v report
1475 */
1476 SET_CPU_IN_ERROR(%g2, %g3)
1477 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1478 mov SUN4V_CPU_RPRT, %g3
1479 mov 1, %g4
1480 sllx %g4, %g3, %g3
1481 st %g3, [%g2 + ERR_SUN4V_RPRT_ATTR]
1482 mov EDESC_UE_RESUMABLE, %g3
1483 st %g3, [%g2 + ERR_SUN4V_RPRT_EDESC]
1484
1485correct_frfu_exit:
1486 GET_ERR_RETURN_ADDR(%g7, %g1)
1487 HVRET
1488 SET_SIZE(correct_frfu)
1489
1490 /*
1491 * clear_frf_ue() [LEAF function]
1492 *
1493 * Clear the UE in the floating-point register file
1494 * Arguments:
1495 * %g1 - %g4 - scratch
1496 * %g5, %g6 - preserverd
1497 * %g7 - return address
1498 */
1499 ENTRY_NP(clear_frf_ue)
1500 GET_ERR_DSFAR(%g2, %g4)
1501 srlx %g2, DSFAR_FRF_INDEX_SHIFT, %g2
1502 and %g2, DSFAR_FRF_INDEX_MASK, %g2
1503 ! %g2 6-bit fpreg index
1504
1505 ! ensure FPRS.FEF is set. PSTATE.PEF is set by the Sparc h/w when
1506 ! a trap is taken
1507 rd %fprs, %g4
1508 btst FPRS_FEF, %g4
1509 bz,a,pn %xcc, 1f ! no: set it
1510 wr %g4, FPRS_FEF, %fprs ! yes: annulled
15111:
1512
1513 ! Now clear the register in error
1514 ba 1f
1515 rd %pc, %g3 ! %g3 = base address
1516
1517 ! an array of instruction blocks indexed by register number to
1518 ! clear the floating-point register reported in error
1519 ! The first 32 entries use single-precision register
1520 ! The next 32 entries clear the double-precision register
1521 ba fp_clear_done
1522 fzeros %f0 ! clear %f0
1523 ba fp_clear_done
1524 fzeros %f1 ! clear %f1
1525 ba fp_clear_done
1526 fzeros %f2 ! clear %f2
1527 ba fp_clear_done
1528 fzeros %f3 ! clear %f3
1529 ba fp_clear_done
1530 fzeros %f4 ! clear %f4
1531 ba fp_clear_done
1532 fzeros %f5 ! clear %f5
1533 ba fp_clear_done
1534 fzeros %f6 ! clear %f6
1535 ba fp_clear_done
1536 fzeros %f7 ! clear %f7
1537 ba fp_clear_done
1538 fzeros %f8 ! clear %f8
1539 ba fp_clear_done
1540 fzeros %f9 ! clear %f9
1541 ba fp_clear_done
1542 fzeros %f10 ! clear %f10
1543 ba fp_clear_done
1544 fzeros %f11 ! clear %f11
1545 ba fp_clear_done
1546 fzeros %f12 ! clear %f12
1547 ba fp_clear_done
1548 fzeros %f13 ! clear %f13
1549 ba fp_clear_done
1550 fzeros %f14 ! clear %f14
1551 ba fp_clear_done
1552 fzeros %f15 ! clear %f15
1553 ba fp_clear_done
1554 fzeros %f16 ! clear %f16
1555 ba fp_clear_done
1556 fzeros %f17 ! clear %f17
1557 ba fp_clear_done
1558 fzeros %f18 ! clear %f18
1559 ba fp_clear_done
1560 fzeros %f19 ! clear %f19
1561 ba fp_clear_done
1562 fzeros %f20 ! clear %f20
1563 ba fp_clear_done
1564 fzeros %f21 ! clear %f21
1565 ba fp_clear_done
1566 fzeros %f22 ! clear %f22
1567 ba fp_clear_done
1568 fzeros %f23 ! clear %f23
1569 ba fp_clear_done
1570 fzeros %f24 ! clear %f24
1571 ba fp_clear_done
1572 fzeros %f25 ! clear %f25
1573 ba fp_clear_done
1574 fzeros %f26 ! clear %f26
1575 ba fp_clear_done
1576 fzeros %f27 ! clear %f27
1577 ba fp_clear_done
1578 fzeros %f28 ! clear %f28
1579 ba fp_clear_done
1580 fzeros %f29 ! clear %f29
1581 ba fp_clear_done
1582 fzeros %f30 ! clear %f30
1583 ba fp_clear_done
1584 fzeros %f31 ! clear %f31
1585 ! double precision register pairs, clear both of them on errors
1586 ba fp_clear_done
1587 fzero %f32 ! clear %f32
1588 ba fp_clear_done
1589 fzero %f32 ! clear %f32
1590 ba fp_clear_done
1591 fzero %f34 ! clear %f34
1592 ba fp_clear_done
1593 fzero %f34 ! clear %f34
1594 ba fp_clear_done
1595 fzero %f36 ! clear %f36
1596 ba fp_clear_done
1597 fzero %f36 ! clear %f36
1598 ba fp_clear_done
1599 fzero %f38 ! clear %f38
1600 ba fp_clear_done
1601 fzero %f38 ! clear %f38
1602 ba fp_clear_done
1603 fzero %f40 ! clear %f40
1604 ba fp_clear_done
1605 fzero %f40 ! clear %f40
1606 ba fp_clear_done
1607 fzero %f42 ! clear %f42
1608 ba fp_clear_done
1609 fzero %f42 ! clear %f42
1610 ba fp_clear_done
1611 fzero %f44 ! clear %f44
1612 ba fp_clear_done
1613 fzero %f44 ! clear %f44
1614 ba fp_clear_done
1615 fzero %f46 ! clear %f46
1616 ba fp_clear_done
1617 fzero %f46 ! clear %f46
1618 ba fp_clear_done
1619 fzero %f48 ! clear %f48
1620 ba fp_clear_done
1621 fzero %f48 ! clear %f48
1622 ba fp_clear_done
1623 fzero %f50 ! clear %f50
1624 ba fp_clear_done
1625 fzero %f50 ! clear %f50
1626 ba fp_clear_done
1627 fzero %f52 ! clear %f52
1628 ba fp_clear_done
1629 fzero %f52 ! clear %f52
1630 ba fp_clear_done
1631 fzero %f54 ! clear %f54
1632 ba fp_clear_done
1633 fzero %f54 ! clear %f54
1634 ba fp_clear_done
1635 fzero %f56 ! clear %f56
1636 ba fp_clear_done
1637 fzero %f56 ! clear %f56
1638 ba fp_clear_done
1639 fzero %f58 ! clear %f58
1640 ba fp_clear_done
1641 fzero %f58 ! clear %f58
1642 ba fp_clear_done
1643 fzero %f60 ! clear %f60
1644 ba fp_clear_done
1645 fzero %f60 ! clear %f60
1646 ba fp_clear_done
1647 fzero %f62 ! clear %f62
1648 ba fp_clear_done
1649 fzero %f62 ! clear %f62
16501:
1651 ! %g2 has freg number, %g3 has base address-4
1652 sllx %g2, 3, %g2 ! offset = freg# * 8
1653 add %g3, %g2, %g3 ! %g3 = instruction block addr
1654 jmp %g3 + SZ_INSTR ! jmp to clear register
1655 nop
1656
1657fp_clear_done:
1658 ! reset FPRS.FEF
1659 wr %g4, %g0, %fprs
1660 HVRET ! return to caller
1661 SET_SIZE(clear_frf_ue)
1662
1663 /*
1664 * Disable Tick_compare correctable errors for a short period
1665 * to ensure that performance is not affected if the error
1666 * is persistent. Note that this is for h/w compare operations,
1667 * (TCCD errors), not ASR reads (TCCP errors).
1668 */
1669 ENTRY(tick_cmp_storm)
1670
1671 ! first verify that storm prevention is enabled
1672 CHECK_BLACKOUT_INTERVAL(%g4)
1673
1674 /*
1675 * save our return address
1676 */
1677 STORE_ERR_RETURN_ADDR(%g7, %g4, %g5)
1678
1679 mov CORE_ERR_REPORT_EN, %g3
1680 ldxa [%g3]ASI_ERR_EN, %g4
1681 setx (ERR_TCCD), %g5, %g6
1682 btst %g6, %g4
1683 bz,pn %xcc, 9f ! TCCD already off
1684 andn %g4, %g6, %g6
1685 stxa %g6, [%g3]ASI_ERR_EN
1686
1687 /*
1688 * Set up a cyclic on this strand to re-enable the TCCP/TCCD bits
1689 * after an interval of 6 seconds. Set a flag in the
1690 * strand struct to indicate that the cyclic has been set
1691 * for this bank.
1692 */
1693 mov STRAND_ERR_FLAG_TICK_CMP, %g4
1694 STRAND_STRUCT(%g6)
1695 lduw [%g6 + STRAND_ERR_FLAG], %g2 ! installed flags
1696 btst %g4, %g2 ! handler installed?
1697 bnz,pn %xcc, 9f ! yes
1698 nop
1699
1700 STRAND2CONFIG_STRUCT(%g6, %g4)
1701 ldx [%g4 + CONFIG_CE_BLACKOUT], %g1
1702 brz,a,pn %g1, 9f ! zero: blackout disabled
1703 nop
1704 ! handler installed, set flag
1705 STRAND_STRUCT(%g6)
1706 SET_STRAND_ERR_FLAG(%g6, STRAND_ERR_FLAG_TICK_CMP, %g5)
1707 setx cerer_set_error_bits, %g5, %g2
1708 RELOC_OFFSET(%g3, %g4)
1709 sub %g2, %g4, %g2 ! g2 = handler address
1710 setx ERR_TCCD, %g4, %g3 ! g3 = arg 0 : bit(s) to set
1711 mov STRAND_ERR_FLAG_TICK_CMP, %g4 ! g4 = arg 1 : cpu flags to clear
1712 ! g1 = delta tick
1713 VCPU_STRUCT(%g6)
1714 ! g6 - CPU struct
1715 HVCALL(cyclic_add_rel) /* ( del_tick, address, arg0, arg1 ) */
17169:
1717 GET_ERR_RETURN_ADDR(%g7, %g2)
1718 HVRET
1719 SET_SIZE(tick_cmp_storm)
1720
1721
1722 /*
1723 * cyclic function used to re-enable CERER bits
1724 *
1725 * %g1 CERER bits to set
1726 * %g2 strand->err_flags to clear
1727 * %g7 return address
1728 * %g5 - %g6 clobbered
1729 */
1730 ENTRY(cerer_set_error_bits)
1731 STRAND_STRUCT(%g6)
1732 CLEAR_STRAND_ERR_FLAG(%g6, %g2, %g5)
1733
1734 mov CORE_ERR_REPORT_EN, %g5
1735 ldxa [%g5]ASI_ERR_EN, %g4
1736 or %g4, %g1, %g4 ! enable arg0 flags
1737 stxa %g4, [%g5]ASI_ERR_EN
1738
1739 HVRET
1740
1741 SET_SIZE(cerer_set_error_bits)
1742
1743 /*
1744 * Check whether the IRFU error is transient or persistent
1745 * or if the integre register file is failing.
1746 * If TL == 1
1747 * clear_irf_ue
1748 * TRANSIENT
1749 * if TL > 1 {
1750 * if (D-SFSR.[TL - 1].IRFU && D-SFAR.[TL -1] == D-SFAR) {
1751 * previous trap was identical,
1752 * PERSISTENT
1753 * }
1754 * if (D-SFSR.[TL - 1].IRFU {
1755 * FAILURE
1756 * }
1757 * clear_irf_ue
1758 * TRANSIENT
1759 * }
1760 *
1761 */
1762 ENTRY(correct_irfu)
1763 STORE_ERR_RETURN_ADDR(%g7, %g1, %g2)
1764
1765 rdpr %tl, %g2
1766 cmp %g2, 1
1767 bg,pn %xcc, 1f
1768 nop
1769
1770 HVCALL(clear_irf_ue)
1771
1772 /*
1773 * If we get to here we have created a sun4v IRF
1774 * precise non-resumable error report and the
1775 * IRF UE has been cleared.
1776 */
1777 ba correct_irfu_exit
1778 nop
1779
17801:
1781 /*
1782 * TL > 1
1783 * Either we have nested IRF traps, in which case we have a
1784 * failed RF and we mark the CPU as bad, or we have a different
1785 * trap type at (TL - 1).
1786 */
1787 STRAND_STRUCT(%g1)
1788 sub %g2, 2, %g2 ! (TL - 1) - 1 for diag_buf
1789 mulx %g2, STRAND_ERR_ESR_INCR, %g2
1790 add %g1, %g2, %g3
1791 add %g3, STRAND_ERR_DSFSR, %g3
1792 ldx [%g3], %g3 ! D-SFSR.[TL - 1]
1793 cmp %g3, DSFSR_IRFU
1794 be %xcc, 2f
1795 cmp %g3, DSFSR_IRFC
1796 be %xcc, 2f
1797
1798 /*
1799 * Not a nested IRF error, clear it and return
1800 */
1801 HVCALL(clear_irf_ue)
1802
1803 /*
1804 * If we get to here we have created a sun4v IRF
1805 * precise non-resumable error report and the
1806 * IRF UE has been cleared. return
1807 */
1808 ba correct_irfu_exit
1809 nop
1810
18112:
1812 /*
1813 * we have nested IRF errors
1814 * mark the CPU as bad and send a CPU Sun4v report
1815 */
1816 SET_CPU_IN_ERROR(%g2, %g3)
1817 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1818 mov SUN4V_CPU_RPRT, %g3
1819 mov 1, %g4
1820 sllx %g4, %g3, %g3
1821 st %g3, [%g2 + ERR_SUN4V_RPRT_ATTR]
1822 mov EDESC_UE_RESUMABLE, %g3
1823 st %g3, [%g2 + ERR_SUN4V_RPRT_EDESC]
1824
1825correct_irfu_exit:
1826 GET_ERR_RETURN_ADDR(%g7, %g1)
1827 HVRET
1828 SET_SIZE(correct_irfu)
1829
1830
1831 /*
1832 * Clear the UE in the integer register file
1833 * Arguments:
1834 * %g1-%g4 -scratch
1835 * %g5, %g6 - preserved
1836 * %g7 - return address
1837 */
1838 ENTRY_NP(clear_irf_ue)
1839 ! get the register number within the set
1840 GET_ERR_DSFAR(%g3, %g2)
1841 srlx %g3, DSFAR_IRF_INDEX_SHIFT, %g2
1842 and %g2, DSFAR_IRF_INDEX_MASK, %g2
1843
1844 /*
1845 * The index from the D-SFAR does not match the standard Sparc V9 register
1846 * index.
1847 */
1848 CONVERT_IRF_INDEX(%g2, %g4)
1849 cmp %g2, 8 ! is reg# < 8?
1850 bl irf_glob_ue ! yes, then global reg
1851 mov %g3, %g1
1852
1853 ! Now clear the register in error
1854 ba 1f ! clear register
1855 rd %pc, %g3 ! get clear instr base addr
1856
1857 ! an array of instruction blocks indexed by register number to
1858 ! clear the non-global register reported in error.
1859 ba irf_clear_done
1860 mov %g0, %o0 ! clear %o0
1861 ba irf_clear_done
1862 mov %g0, %o1 ! clear %o1
1863 ba irf_clear_done
1864 mov %g0, %o2 ! clear %o2
1865 ba irf_clear_done
1866 mov %g0, %o3 ! clear %o3
1867 ba irf_clear_done
1868 mov %g0, %o4 ! clear %o4
1869 ba irf_clear_done
1870 mov %g0, %o5 ! clear %o5
1871 ba irf_clear_done
1872 mov %g0, %o6 ! clear %o6
1873 ba irf_clear_done
1874 mov %g0, %o7 ! clear %o7
1875 ba irf_clear_done
1876 mov %g0, %l0 ! clear %l0
1877 ba irf_clear_done
1878 mov %g0, %l1 ! clear %l1
1879 ba irf_clear_done
1880 mov %g0, %l2 ! clear %l2
1881 ba irf_clear_done
1882 mov %g0, %l3 ! clear %l3
1883 ba irf_clear_done
1884 mov %g0, %l4 ! clear %l4
1885 ba irf_clear_done
1886 mov %g0, %l5 ! clear %l5
1887 ba irf_clear_done
1888 mov %g0, %l6 ! clear %l6
1889 ba irf_clear_done
1890 mov %g0, %l7 ! clear %l7
1891 ba irf_clear_done
1892 mov %g0, %i0 ! clear %i0
1893 ba irf_clear_done
1894 mov %g0, %i1 ! clear %i1
1895 ba irf_clear_done
1896 mov %g0, %i2 ! clear %i2
1897 ba irf_clear_done
1898 mov %g0, %i3 ! clear %i3
1899 ba irf_clear_done
1900 mov %g0, %i4 ! clear %i4
1901 ba irf_clear_done
1902 mov %g0, %i5 ! clear %i5
1903 ba irf_clear_done
1904 mov %g0, %i6 ! clear %i6
1905 ba irf_clear_done
1906 mov %g0, %i7 ! clear %i7
19071:
1908 sub %g2, 8, %g2 ! skip globals
1909 sllx %g2, 3, %g2 ! offset = reg# * 8
1910 add %g3, %g2, %g3 ! %g3 = instruction block addr
1911 jmp %g3 + SZ_INSTR ! jmp to clear register
1912 nop
1913
1914 ! restore gl from value in %o0, and restore %o0
1915irf_gl_clear_done:
1916 wrpr %o0, %gl ! restore %gl
1917 mov %g4, %o0 ! restore %o0
1918
1919irf_clear_done:
1920 HVRET ! return to caller
1921
1922 ! %g1 has the gl + register number
1923irf_glob_ue:
1924 ! now re-read the global register in error
1925 ba 1f
1926 rd %pc, %g3 ! get clear instr base addr
1927
1928 ! an array of instructions blocks indexed by global register number
1929 ! to clear the global register reported in error.
1930 ! %gl points to the error global set
1931
1932 ba irf_gl_clear_done
1933 mov %g0, %g0 ! clear %g0
1934 ba irf_gl_clear_done
1935 mov %g0, %g1 ! clear %g1
1936 ba irf_gl_clear_done
1937 mov %g0, %g2 ! clear %g2
1938 ba irf_gl_clear_done
1939 mov %g0, %g3 ! clear %g3
1940 ba irf_gl_clear_done
1941 mov %g0, %g4 ! clear %g4
1942 ba irf_gl_clear_done
1943 mov %g0, %g5 ! clear %g5
1944 ba irf_gl_clear_done
1945 mov %g0, %g6 ! clear %g6
1946 ba irf_gl_clear_done
1947 mov %g0, %g7 ! clear %g7
19481:
1949 sllx %g2, 3, %g2 ! offset (2 instrs)
1950 add %g3, %g2, %g3 ! %g3 = instruction entry
1951 mov %o0, %g4 ! save %o0 in %g4
1952 GET_ERR_GL(%o0) ! save %gl in %o0
1953
1954 ! set gl to error global
1955 srlx %g1, DSFAR_IRF_GL_SHIFT, %g2 ! get global set from SFAR
1956 and %g2, DSFAR_IRF_GL_MASK, %g2 ! %g2 has %gl value
1957
1958 jmp %g3 + SZ_INSTR ! jump to clear global
1959 wrpr %g2, %gl ! set gl to error gl
1960
1961 SET_SIZE(clear_irf_ue)
1962
1963 /*
1964 * Correct an IRF ECC error
1965 * %g7 return address
1966 * %g1 - %g6 clobbered
1967 *
1968 * Get register address from D-SFAR[4:0]
1969 * Get ECC syndrome from D-SFAR[14:7]
1970 * Disable SETER.PSCCE
1971 * (Note: could get an error while doing the correction ....
1972 * and then it all goes horribly horribly wrong !)
1973 * Decode ECC syndrome using ecc_table[]
1974 * - if error in data bits, ecc_table[syndrome] = [0 .. 63]
1975 * xor correction mask with data read from IRF
1976 * - write data back to IRF
1977 * Enable SETER.PSCCE
1978 */
1979 ENTRY(correct_irfc)
1980
1981 STORE_ERR_RETURN_ADDR(%g7, %g4, %g5)
1982
1983 GET_ERR_DSFAR(%g4, %g5)
1984 srlx %g4, DSFAR_IRF_SYNDROME_SHIFT, %g5
1985 and %g5, DSFAR_IRF_SYNDROME_MASK, %g5
1986 ! ECC syndrome in %g5
1987
1988 setx irf_ecc_syndrome_table, %g2, %g3
1989 RELOC_OFFSET(%g6, %g2)
1990 sub %g3, %g2, %g3
1991 ! %g3 ecc syndrome table
1992
1993 mulx %g5, ECC_SYNDROME_TABLE_ENTRY_SIZE, %g5
1994 add %g3, %g5, %g3
1995 ldub [%g3], %g5
1996 ! decoded ECC syndrome in %g5.
1997
1998 /*
1999 * check for multiple bit errors, and no-error
2000 */
2001 cmp %g5, ECC_ne ! no error
2002 be,pn %xcc, correct_irfc_exit
2003 nop
2004 cmp %g5, ECC_U ! Uncorrectable double (or 2n) bit error */
2005 be,pn %xcc, convert_irfc_to_irfu
2006 nop
2007 cmp %g5, ECC_M ! Triple or worse (2n + 1) bit error */
2008 be,pn %xcc, convert_irfc_to_irfu
2009 nop
2010
2011 srlx %g4, DSFAR_IRF_INDEX_SHIFT, %g4
2012 and %g4, DSFAR_IRF_INDEX_MASK, %g4
2013 ! register number in %g4
2014 ! data bit in error is in %g5
2015
2016 ! disable SETER.PSCCE
2017 setx ERR_PSCCE, %g3, %g1
2018 mov CORE_ERR_TRAP_EN, %g2
2019 ldxa [%g2]ASI_ERR_EN, %g3
2020 andn %g3, %g1, %g3
2021 stxa %g3, [%g2]ASI_ERR_EN
2022
2023 /*
2024 * The index from the D-SFAR does not match the standard Sparc V9 register
2025 * index.
2026 */
2027 CONVERT_IRF_INDEX(%g4, %g1)
2028
2029 ! reg# < 8 => global register
2030 cmp %g4, 8
2031 bl,pn %xcc, correct_irfc_gl
2032 nop
2033
2034 ! %g4 reg#
2035 ! %g5 syndrome
2036
2037 ! Now read the register in error
2038 ba 1f ! read register
2039 rd %pc, %g3 ! get read instr base addr
2040
2041 ! an array of instruction blocks indexed by register number to
2042 ! clear the non-global register reported in error.
2043 CORRECT_IRFC(%o0, %g1, %g6, correct_irfc_done)
2044 CORRECT_IRFC(%o1, %g1, %g6, correct_irfc_done)
2045 CORRECT_IRFC(%o2, %g1, %g6, correct_irfc_done)
2046 CORRECT_IRFC(%o3, %g1, %g6, correct_irfc_done)
2047 CORRECT_IRFC(%o4, %g1, %g6, correct_irfc_done)
2048 CORRECT_IRFC(%o5, %g1, %g6, correct_irfc_done)
2049 CORRECT_IRFC(%o6, %g1, %g6, correct_irfc_done)
2050 CORRECT_IRFC(%o7, %g1, %g6, correct_irfc_done)
2051 CORRECT_IRFC(%l0, %g1, %g6, correct_irfc_done)
2052 CORRECT_IRFC(%l1, %g1, %g6, correct_irfc_done)
2053 CORRECT_IRFC(%l2, %g1, %g6, correct_irfc_done)
2054 CORRECT_IRFC(%l3, %g1, %g6, correct_irfc_done)
2055 CORRECT_IRFC(%l4, %g1, %g6, correct_irfc_done)
2056 CORRECT_IRFC(%l5, %g1, %g6, correct_irfc_done)
2057 CORRECT_IRFC(%l6, %g1, %g6, correct_irfc_done)
2058 CORRECT_IRFC(%l7, %g1, %g6, correct_irfc_done)
2059 CORRECT_IRFC(%i0, %g1, %g6, correct_irfc_done)
2060 CORRECT_IRFC(%i1, %g1, %g6, correct_irfc_done)
2061 CORRECT_IRFC(%i2, %g1, %g6, correct_irfc_done)
2062 CORRECT_IRFC(%i3, %g1, %g6, correct_irfc_done)
2063 CORRECT_IRFC(%i4, %g1, %g6, correct_irfc_done)
2064 CORRECT_IRFC(%i5, %g1, %g6, correct_irfc_done)
2065 CORRECT_IRFC(%i6, %g1, %g6, correct_irfc_done)
2066 CORRECT_IRFC(%i7, %g1, %g6, correct_irfc_done)
20671:
2068
2069
2070 /*
2071 * The correction mask is generated as a 64-bit vector of 0's with a single
2072 * 1 bit by decoding the syndrome (using rf_ecc-syndrome_table[]).
2073 * We XOR that vector with the register data.
2074 *
2075 * (Note if the error is in a check bit the vector is all 0's - no need to
2076 * do the XOR).
2077 *
2078 * Once we have the corrected data, just write it back to the register. If
2079 * the error was in the check bits, hardware will (should) generate the
2080 * correct check bits and write both the data and the check bits to the
2081 * register file contents.
2082 */
2083 sub %g4, 8, %g4 ! skip globals
2084 mov 1, %g1
2085 sllx %g1, %g5, %g1
2086 cmp %g5, ECC_ne ! if syndrome > ECC_ne
2087 movge %xcc, %g0, %g1 ! no/checkbit error, clear correction mask
2088 ! %g1 correction mask
2089 mulx %g4, CORRECT_IRFC_SIZE, %g4 ! offset = reg# * CORRECT_IRFC_SIZE
2090 add %g3, %g4, %g3 ! %g3 = instruction block addr
2091 jmp %g3 + SZ_INSTR ! jmp to correct register
2092 nop
2093
2094 /*
2095 * Error was in a global register
2096 */
2097correct_irfc_gl:
2098
2099 ! %g4 register#
2100 ! %g5 syndrome
2101
2102 /*
2103 * We need a couple of non-globals registers to play with
2104 * when we change GL to the value at the time of the erorr
2105 */
2106 mov %o5, %g1
2107 mov %o4, %g2
2108
2109 ! get the base address of the instruction to read the register
2110 ba 1f
2111 rd %pc, %g3
2112
2113 ba irf_read_gl_done
2114 mov %g0, %o5
2115 ba irf_read_gl_done
2116 mov %g1, %o5
2117 ba irf_read_gl_done
2118 mov %g2, %o5
2119 ba irf_read_gl_done
2120 mov %g3, %o5
2121 ba irf_read_gl_done
2122 mov %g4, %o5
2123 ba irf_read_gl_done
2124 mov %g5, %o5
2125 ba irf_read_gl_done
2126 mov %g6, %o5
2127 ba irf_read_gl_done
2128 mov %g7, %o5
2129
21301:
2131 sllx %g4, 3, %g4 ! offset (2 instrs)
2132 add %g3, %g4, %g3 ! %g3 = instruction entry
2133 GET_ERR_GL(%o4) ! save %gl in %o4
2134
2135 ! set GL to error trap GL
2136 GET_ERR_DSFAR(%g6, %g7)
2137 srlx %g6, DSFAR_IRF_GL_SHIFT, %g6
2138 and %g6, DSFAR_IRF_GL_MASK, %g6
2139
2140 jmp %g3 + SZ_INSTR ! jump to clear global
2141 wrpr %g6, %gl ! set gl to error gl
2142
2143irf_read_gl_done:
2144 ! %o4 GL
2145 wrpr %o4, %gl
2146 ! %g5 syndrome
2147 ! %o5 value
2148 mov 1, %g6
2149 sllx %g6, %g5, %g6
2150 cmp %g5, ECC_ne ! if syndrome >= ECC_ne
2151 movge %xcc, %g0, %g6 ! no/checkbit error, clear correction mask
2152 xor %o5, %g6, %o5
2153 ! %o5 corrected data
2154
2155irf_restore_gl_data:
2156 ! Now restore the register in error
2157 ba 1f ! restore register
2158 rd %pc, %g3 ! get restore instr base addr
2159
2160 ba irf_restore_gl_done
2161 mov %o5, %g0
2162 ba irf_restore_gl_done
2163 mov %o5, %g1
2164 ba irf_restore_gl_done
2165 mov %o5, %g2
2166 ba irf_restore_gl_done
2167 mov %o5, %g3
2168 ba irf_restore_gl_done
2169 mov %o5, %g4
2170 ba irf_restore_gl_done
2171 mov %o5, %g5
2172 ba irf_restore_gl_done
2173 mov %o5, %g6
2174 ba irf_restore_gl_done
2175 mov %o5, %g7
2176
21771:
2178 add %g3, %g4, %g3 ! %g3 = instruction entry
2179 GET_ERR_GL(%o4) ! save %gl in %o4
2180
2181 ! set GL to error trap GL
2182 GET_ERR_DSFAR(%g6, %g5)
2183 srlx %g6, DSFAR_IRF_GL_SHIFT, %g6
2184 and %g6, DSFAR_IRF_GL_MASK, %g6
2185
2186 jmp %g3 + SZ_INSTR ! jump to clear global
2187 wrpr %g6, %gl ! set gl to error gl
2188
2189irf_restore_gl_done:
2190 wrpr %o4, %gl
2191 mov %g1, %o5
2192 mov %g2, %o4
2193
2194 ba correct_irfc_done
2195 nop
2196
2197convert_irfc_to_irfu:
2198 CONVERT_CE_TO_UE(1)
2199 .empty
2200 /* NOTREACHED */
2201
2202correct_irfc_done:
2203 ! enable SETER.PSCCE
2204 setx ERR_PSCCE, %g3, %g1
2205 mov CORE_ERR_TRAP_EN, %g2
2206 ldxa [%g2]ASI_ERR_EN, %g3
2207 or %g3, %g1, %g3
2208 stxa %g3, [%g2]ASI_ERR_EN
2209
2210correct_irfc_exit:
2211
2212 GET_ERR_RETURN_ADDR(%g7, %g4)
2213
2214 HVRET
2215 SET_SIZE(correct_irfc)
2216
2217#ifdef TEST_ERRORS
2218 ENTRY(inject_cmp_errors)
2219
2220 ba 4f
2221 nop
2222
2223 ! bit 25, IRF
2224 set ((1 << 31) | (1 << 25) | 1), %g5
2225 membar #Sync
2226 stxa %g5, [%g0]ASI_ERROR_INJECT_REG
2227
2228 ! should get an IRFC on this instruction
2229 mov 7, %o0
2230 membar #Sync
2231 nop
2232 stxa %g0, [%g0]ASI_ERROR_INJECT_REG
2233 cmp %o0, 7
2234 be %xcc, 1f
2235 nop
2236 mov %g7, %g6
2237 PRINT_NOTRAP("Failed to fix IRFC\r\n")
2238 PRINTX_NOTRAP(%o0)
2239 mov %g6, %g7
2240 ba 2f
2241 nop
22421:
2243 mov %g7, %g6
2244 PRINT_NOTRAP("Fixed IRFC\r\n")
2245 PRINTX_NOTRAP(%o0)
2246 mov %g6, %g7
22472:
2248 ! bit 24, FRF
2249 rd %fprs, %g4
2250 or %g4, FPRS_FEF, %g3
2251 wr %g3, %fprs
2252 set ((1 << 31) | (1 << 24) | 4), %g5
2253 membar #Sync
2254 stxa %g5, [%g0]ASI_ERROR_INJECT_REG
2255
2256 ! should get an FRFC on this instruction
2257 fmovs %f8, %f2
2258 fmovs %f6, %f4
2259 faddd %f2, %f4, %f6
2260 membar #Sync
2261 nop
2262 stxa %g0, [%g0]ASI_ERROR_INJECT_REG
2263 wr %g4, %fprs
22643:
2265 ! bit 23, SCA
2266 set ((1 << 31) | (1 << 23) | 7), %g5
2267 membar #Sync
2268 stxa %g5, [%g0]ASI_ERROR_INJECT_REG
2269
2270 ! should get an SCAC on this instruction
2271 mov 0, %g5
2272 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2273 add %g5, 8, %g5
2274 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2275 add %g5, 8, %g5
2276 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2277 add %g5, 8, %g5
2278 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2279 mov 0x30, %g5
2280 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2281 add %g5, 8, %g5
2282 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2283 add %g5, 8, %g5
2284 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2285 add %g5, 8, %g5
2286 ldxa [%g5]ASI_HSCRATCHPAD, %g5
2287 membar #Sync
2288 nop
2289 stxa %g0, [%g0]ASI_ERROR_INJECT_REG
22904:
2291 ! bit 21, TSA
2292 set ((1 << 31) | (1 << 21) | 8), %g5
2293 membar #Sync
2294 stxa %g5, [%g0]ASI_ERROR_INJECT_REG
2295
2296 ! should get an TSAC on this instruction
2297 rdpr %tl, %g5
2298 wrpr %g5, %tl
2299 membar #Sync
2300 rdpr %tstate, %g5
2301 wrpr %g5, %tstate
2302 membar #Sync
2303 rdpr %tpc, %g5
2304 wrpr %g5, %tpc
2305 membar #Sync
2306 rdpr %tnpc, %g5
2307 wrpr %g5, %tnpc
2308 membar #Sync
2309 rdhpr %htstate, %g5
2310 wrhpr %g5, %htstate
2311 membar #Sync
2312 nop
2313 stxa %g0, [%g0]ASI_ERROR_INJECT_REG
2314 HVRET
2315 SET_SIZE(inject_cmp_errors)
2316#endif
2317
2318 /*
2319 * Dump trap registers
2320 * %g7 return address
2321 */
2322 ENTRY(dump_dbu_data)
2323
2324 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
2325
2326 /*
2327 * Store L2 ESR/EAR for the banks into the DIAG_BUF
2328 */
2329 set (NO_L2_BANKS - 1), %g3
23301:
2331 ! skip banks which are disabled. causes hang.
2332 SKIP_DISABLED_L2_BANK(%g3, %g4, %g5, 2f)
2333
2334 setx L2_ERROR_STATUS_REG, %g4, %g5
2335 sllx %g3, L2_BANK_SHIFT, %g2
2336 or %g5, %g2, %g2
2337 ldx [%g2], %g4
2338 stx %g4, [%g2] ! clear ESR RW1C
2339 stx %g0, [%g2] ! clear ESR RW
2340
2341 add %g1, ERR_DIAG_BUF_L2_CACHE_ESR, %g2
2342 mulx %g3, ERR_DIAG_BUF_L2_CACHE_ESR_INCR, %g5
2343 add %g2, %g5, %g2
2344 ! %g2 diag_buf->l2_cache.esr
2345 stx %g4, [%g2]
2346
2347 add %g1, ERR_DIAG_BUF_L2_CACHE_EAR, %g2
2348 mulx %g3, ERR_DIAG_BUF_L2_CACHE_EAR_INCR, %g5
2349 add %g2, %g5, %g2
2350 setx L2_ERROR_ADDRESS_REG, %g4, %g5
2351 sllx %g3, L2_BANK_SHIFT, %g4
2352 or %g5, %g4, %g4
2353 ldx [%g4], %g5
2354 stx %g0, [%g4] ! clear L2 EAR
2355 stx %g5, [%g2]
2356
23572:
2358 ! next bank
2359 brgz,pt %g3, 1b
2360 dec %g3
2361
2362 ! DIAG_BUF in %g1
2363
2364 /*
2365 * Store DRAM ESR/EAR/ND for the bank in error into the DIAG_BUF
2366 */
2367 set (NO_DRAM_BANKS - 1), %g3
23683:
2369 ! skip banks which are disabled. causes hang.
2370 SKIP_DISABLED_DRAM_BANK(%g3, %g4, %g5, 4f)
2371
2372 setx DRAM_ESR_BASE, %g4, %g5
2373 sllx %g3, DRAM_BANK_SHIFT, %g2
2374 or %g5, %g2, %g2
2375 ldx [%g2], %g4
2376 brz,pt %g4, 4f ! no error on this bank
2377 nop
2378
2379 stx %g4, [%g2] ! clear DRAM ESR RW1C
2380 stx %g0, [%g2] ! clear DRAM ESR RW
2381 add %g1, ERR_DIAG_BUF_DRAM_ESR, %g2
2382 mulx %g3, ERR_DIAG_BUF_DRAM_ESR_INCR, %g5
2383 add %g2, %g5, %g2
2384 stx %g4, [%g2]
2385
2386 add %g1, ERR_DIAG_BUF_DRAM_EAR, %g2
2387 mulx %g3, ERR_DIAG_BUF_DRAM_EAR_INCR, %g5
2388 add %g2, %g5, %g2
2389 setx DRAM_EAR_BASE, %g4, %g5
2390 sllx %g3, DRAM_BANK_SHIFT, %g4
2391 or %g5, %g4, %g4
2392 ldx [%g4], %g5
2393 stx %g0, [%g4] ! clear DRAM EAR register
2394 stx %g0, [%g4] ! and again for erratum 116
2395 stx %g5, [%g2]
2396
2397 add %g1, ERR_DIAG_BUF_DRAM_LOC, %g2
2398 mulx %g3, ERR_DIAG_BUF_DRAM_LOC_INCR, %g5
2399 add %g2, %g5, %g2
2400 setx DRAM_ELR_BASE, %g4, %g5
2401 sllx %g3, DRAM_BANK_SHIFT, %g4
2402 or %g5, %g4, %g4
2403 ldx [%g4], %g5
2404 stx %g0, [%g4] ! clear DRAM LOC register
2405 stx %g0, [%g4] ! and again for erratum 116
2406 stx %g5, [%g2]
2407
2408 add %g1, ERR_DIAG_BUF_DRAM_CTR, %g2
2409 mulx %g3, ERR_DIAG_BUF_DRAM_CTR_INCR, %g5
2410 add %g2, %g5, %g2
2411 setx DRAM_ECR_BASE, %g4, %g5
2412 sllx %g3, DRAM_BANK_SHIFT, %g4
2413 or %g5, %g4, %g4
2414 ldx [%g4], %g5
2415 stx %g0, [%g4] ! clear DRAM COUNTER register
2416 stx %g0, [%g4] ! and again for erratum 116
2417 stx %g5, [%g2]
2418
2419 add %g1, ERR_DIAG_BUF_DRAM_FBD, %g2
2420 mulx %g3, ERR_DIAG_BUF_DRAM_FBD_INCR, %g5
2421 add %g2, %g5, %g2
2422 setx DRAM_FBD_BASE, %g4, %g5
2423 sllx %g3, DRAM_BANK_SHIFT, %g4
2424 or %g5, %g4, %g4
2425 ldx [%g4], %g5
2426 stx %g0, [%g4] ! clear FBD syndrome register
2427 stx %g0, [%g4] ! and again for erratum 116
2428 stx %g5, [%g2]
2429
2430 add %g1, ERR_DIAG_BUF_DRAM_RETRY, %g2
2431 mulx %g3, ERR_DIAG_BUF_DRAM_RETRY_INCR, %g5
2432 add %g2, %g5, %g2
2433 setx DRAM_RETRY_BASE, %g4, %g5
2434 sllx %g3, DRAM_BANK_SHIFT, %g4
2435 or %g5, %g4, %g4
2436 ldx [%g4], %g5
2437 stx %g0, [%g4] ! clear DRAM error retry register
2438 stx %g0, [%g4] ! and again for erratum 116
2439 stx %g5, [%g2]
2440
24414:
2442 ! next bank
2443 brgz,pt %g3, 3b
2444 dec %g3
2445
2446 add %g1, ERR_DIAG_BUF_DIAG_DATA, %g1
2447 add %g1, ERR_DIAG_DATA_TRAP_REGS, %g1
2448 ! %g1 diag-buf->diag_data.err_trap_regs
2449 rdpr %tl, %g2
2450 mov %g2, %g3
24515:
2452 wrpr %g3, %tl
2453 mulx %g3, ERR_TRAP_REGS_SIZE, %g4
2454 add %g4, %g1, %g4 ! %g4 diag_buf->diag_data.err_trap_regs[TL]
2455 rdpr %tt, %g5
2456 stx %g5, [%g4 + ERR_TT]
2457 rdpr %tpc, %g5
2458 stx %g5, [%g4 + ERR_TPC]
2459 rdpr %tnpc, %g5
2460 stx %g5, [%g4 + ERR_TNPC]
2461 rdpr %tstate, %g5
2462 stx %g5, [%g4 + ERR_TSTATE]
2463 rdhpr %htstate, %g5
2464 stx %g5, [%g4 + ERR_HTSTATE]
2465 dec %g3
2466 brnz,pt %g3, 5b
2467 nop
2468
2469 ! restore original trap level
2470 wrpr %g2, %tl
2471
2472 HVRET
2473 SET_SIZE(dump_dbu_data)
2474
2475
2476 /*
2477 * Algorithm to correct (if possible) FRFC errors:
2478 * - use DSFAR[5:1] to determine the suspect double
2479 * floating reg (%f0 - %f62), and read reg data
2480 * - use DSFAR[5:1] to get the ecc bits
2481 * - use data bits[63:32] and ECC bits [13:7] to
2482 * calculate syndrome for even single reg.
2483 * - if syndrome indicates data CE then
2484 * correct and store back.
2485 * - if syndrome is UE then unrecoverable.
2486 * - use data bits[31:0] and ECC bits [6:0] to calculate
2487 * syndrome for odd reg. Take same actions as for even reg.
2488 * - if neither indicate UE then write back into double
2489 * floating pt reg.
2490 *
2491 * %g1 - %g6 clobbered
2492 * %g7 return address
2493 *
2494 * STRAND_FP_TMP1 - data from fpreg
2495 * STRAND_FP_TMP2 - ecc
2496 */
2497 ENTRY(correct_frfc)
2498
2499 ! make sure FPRS.FEF is set
2500 rd %fprs, %g1
2501 STRAND_PUSH(%g1, %g2, %g3)
2502 or %g1, FPRS_FEF, %g3
2503 wr %g3, %fprs
2504
2505 /*
2506 * Get the FRF Index and use it to calculate which
2507 * freg to read by working out offset into table
2508 * below.
2509 */
2510 GET_ERR_DSFAR(%g5, %g4)
2511 ! clear DSFAR[0], use only even numbered double FP registers
2512 and %g5, DSFAR_FRF_DBL_REG_MASK, %g5
2513 sllx %g5, 2, %g5
2514
2515 ! get start address of table below
2516 ba read_fr_start
2517 rd %pc, %g4
2518
2519 ba read_fr_done
2520 std %f0, [%g2 + STRAND_FP_TMP1]
2521 ba read_fr_done
2522 std %f2, [%g2 + STRAND_FP_TMP1]
2523 ba read_fr_done
2524 std %f4, [%g2 + STRAND_FP_TMP1]
2525 ba read_fr_done
2526 std %f6, [%g2 + STRAND_FP_TMP1]
2527 ba read_fr_done
2528 std %f8, [%g2 + STRAND_FP_TMP1]
2529 ba read_fr_done
2530 std %f10, [%g2 + STRAND_FP_TMP1]
2531 ba read_fr_done
2532 std %f12, [%g2 + STRAND_FP_TMP1]
2533 ba read_fr_done
2534 std %f14, [%g2 + STRAND_FP_TMP1]
2535 ba read_fr_done
2536 std %f16, [%g2 + STRAND_FP_TMP1]
2537 ba read_fr_done
2538 std %f18, [%g2 + STRAND_FP_TMP1]
2539 ba read_fr_done
2540 std %f20, [%g2 + STRAND_FP_TMP1]
2541 ba read_fr_done
2542 std %f22, [%g2 + STRAND_FP_TMP1]
2543 ba read_fr_done
2544 std %f24, [%g2 + STRAND_FP_TMP1]
2545 ba read_fr_done
2546 std %f26, [%g2 + STRAND_FP_TMP1]
2547 ba read_fr_done
2548 std %f28, [%g2 + STRAND_FP_TMP1]
2549 ba read_fr_done
2550 std %f30, [%g2 + STRAND_FP_TMP1]
2551 ba read_fr_done
2552 std %f32, [%g2 + STRAND_FP_TMP1]
2553 ba read_fr_done
2554 std %f34, [%g2 + STRAND_FP_TMP1]
2555 ba read_fr_done
2556 std %f36, [%g2 + STRAND_FP_TMP1]
2557 ba read_fr_done
2558 std %f38, [%g2 + STRAND_FP_TMP1]
2559 ba read_fr_done
2560 std %f40, [%g2 + STRAND_FP_TMP1]
2561 ba read_fr_done
2562 std %f42, [%g2 + STRAND_FP_TMP1]
2563 ba read_fr_done
2564 std %f44, [%g2 + STRAND_FP_TMP1]
2565 ba read_fr_done
2566 std %f46, [%g2 + STRAND_FP_TMP1]
2567 ba read_fr_done
2568 std %f48, [%g2 + STRAND_FP_TMP1]
2569 ba read_fr_done
2570 std %f50, [%g2 + STRAND_FP_TMP1]
2571 ba read_fr_done
2572 std %f52, [%g2 + STRAND_FP_TMP1]
2573 ba read_fr_done
2574 std %f54, [%g2 + STRAND_FP_TMP1]
2575 ba read_fr_done
2576 std %f56, [%g2 + STRAND_FP_TMP1]
2577 ba read_fr_done
2578 std %f58, [%g2 + STRAND_FP_TMP1]
2579 ba read_fr_done
2580 std %f60, [%g2 + STRAND_FP_TMP1]
2581 ba read_fr_done
2582 std %f62, [%g2 + STRAND_FP_TMP1]
2583
2584read_fr_start:
2585 DISABLE_PSCCE(%g1, %g2, %g3)
2586
2587 STRAND_STRUCT(%g2)
2588 add %g4, %g5, %g4
2589 jmp %g4 + SZ_INSTR
2590 nop
2591
2592read_fr_done:
2593 ENABLE_PSCCE(%g1, %g3, %g4)
2594
2595 ! %g2 strandp
2596 ! %g5 DSFAR
2597 ! FP register in error in STRAND_FP_TMP1
2598
2599 /*
2600 * Get the ECC data for the freg.
2601 *
2602 * %g5 - D-SFAR[5:1] already shifted to VA[7:3] above for
2603 * table access
2604 */
2605 ldxa [%g5]ASI_FRF_ECC_REG, %g4
2606 stx %g4, [%g2 + STRAND_FP_TMP2]
2607
2608 ! FP register in error in STRAND_FP_TMP1
2609 ! FP register ECC in error in STRAND_FP_TMP2
2610
2611 /*
2612 * Calculate syndrome for 'even' single reg first.
2613 */
2614 lduw [%g2 + STRAND_FP_TMP1], %g1
2615 GEN_FRF_CHECK(%g1, %g2, %g3, %g4, %g5, %g6)
2616 ! check bits in %g2
2617
2618 STRAND_STRUCT(%g3)
2619
2620 ldx [%g3 + STRAND_FP_TMP2], %g1 ! ecc
2621 srlx %g1, ASI_FRF_ECC_EVEN_SHIFT, %g1 ! even ecc
2622 xor %g1, %g2, %g5 ! calculate syndrome
2623 and %g5, FRF_SYND5_MASK, %g5 ! %g5 - synd{5:0}
2624
2625 /*
2626 * synd{6} is parity over data and ecc and
2627 * is calculated separately from synd{5:0}
2628 */
2629 lduw [%g3 + STRAND_FP_TMP1], %g1 ! even data
2630 ldx [%g3 + STRAND_FP_TMP2], %g2 ! ecc
2631 srlx %g2, ASI_FRF_ECC_EVEN_SHIFT, %g2 ! even ecc
2632 xor %g1, %g2, %g1
2633
2634 GEN_PARITY(%g1, %g4)
2635 ! synd{6} in %g4
2636
2637 /*
2638 * Merge the separate syndrome bits together to get
2639 * full synd{6:0}.
2640 */
2641 sllx %g4, FRF_SYND6_SHIFT, %g4
2642 or %g5, %g4, %g5 ! g5 - synd{6:0}
2643
2644 /*
2645 * FRF errors use the same syndrome table as L2 cache data
2646 */
2647 setx l2_ecc_syndrome_table, %g2, %g3
2648 RELOC_OFFSET(%g6, %g2)
2649 sub %g3, %g2, %g3 ! %g3 - ecc syndrome table
2650
2651 mulx %g5, ECC_SYNDROME_TABLE_ENTRY_SIZE, %g5
2652 add %g3, %g5, %g6
2653 ldub [%g6], %g5 ! %g5 - decoded ECC syndrome
2654
2655 /*
2656 * Now check error type and correct if possible.
2657 */
2658
2659 ! Not-an-error
2660 cmp %g5, ECC_ne
2661 be,pn %xcc, even_fr_done
2662
2663 ! Uncorrectable error
2664 cmp %g5, ECC_U
2665 be,pn %xcc, convert_frfc_to_frfu
2666
2667 ! Multiple error
2668 cmp %g5, ECC_M
2669 be,pn %xcc, convert_frfc_to_frfu
2670
2671 ! NotData/Triple or worse
2672 cmp %g5, ECC_N_M
2673 be,pn %xcc, convert_frfc_to_frfu
2674
2675 ! Check bit error (will be corrected by HW)
2676 cmp %g5, ECC_C0
2677 bge %xcc, even_fr_done
2678 nop
2679
2680 /*
2681 * Only reach this point if we are dealing with a
2682 * data bit error, so now correct it.
2683 * %g5 data bit in error
2684 */
2685 mov 1, %g1
2686 sllx %g1, %g5, %g5
2687 STRAND_STRUCT(%g2)
2688 lduw [%g2 + STRAND_FP_TMP1], %g1 ! even data
2689 xor %g1, %g5, %g1 ! correct bit
2690 stw %g1, [%g2 + STRAND_FP_TMP1]
2691
2692even_fr_done:
2693
2694 /*
2695 * Now calculate syndrome for 'odd' single reg
2696 */
2697 STRAND_STRUCT(%g2)
2698 lduw [%g2 + STRAND_FP_TMP1 + 4], %g1
2699 GEN_FRF_CHECK(%g1, %g2, %g3, %g4, %g5, %g6)
2700 ! check bits in %g2
2701
2702 STRAND_STRUCT(%g3)
2703 ldx [%g3 + STRAND_FP_TMP2], %g1 ! ecc
2704 and %g1, ASI_FRF_ECC_ODD_MASK, %g1 ! odd ecc
2705 xor %g1, %g2, %g5 ! calculate syndrome
2706 and %g5, FRF_SYND5_MASK, %g5 ! %g5 - synd{5:0}
2707
2708 /*
2709 * synd{6} is parity over data and ecc
2710 * and is calculated separately from synd{5:0}
2711 */
2712 lduw [%g3 + STRAND_FP_TMP1 + 4], %g1 ! odd data
2713 ldx [%g3 + STRAND_FP_TMP2], %g2 ! ecc
2714 and %g2, ASI_FRF_ECC_ODD_MASK, %g2 ! odd ecc
2715 xor %g1, %g2, %g1
2716
2717 GEN_PARITY(%g1, %g6)
2718 ! %g6 - synd{6}
2719
2720 /*
2721 * Merge the separate syndrome bits together to get
2722 * full synd{6:0}.
2723 */
2724 sllx %g6, FRF_SYND6_SHIFT, %g6
2725 or %g5, %g6, %g5 ! %g5 - synd{6:0}
2726
2727 /*
2728 * FRF errors use the same syndrome table as L2 cache data
2729 */
2730 setx l2_ecc_syndrome_table, %g2, %g3
2731 RELOC_OFFSET(%g6, %g2)
2732 sub %g3, %g2, %g3 ! %g3 - ecc syndrome table
2733
2734 mulx %g5, ECC_SYNDROME_TABLE_ENTRY_SIZE, %g5
2735 add %g3, %g5, %g6
2736 ldub [%g6], %g5 ! %g5 - decoded ECC syndrome
2737
2738 /*
2739 * Now check error type and correct if possible.
2740 */
2741 ! Not-an-error
2742 cmp %g5, ECC_ne
2743 be,pn %xcc, frf_exit
2744
2745 ! Uncorrectable error
2746 cmp %g5, ECC_U
2747 be,pn %xcc, convert_frfc_to_frfu
2748
2749 ! Multiple error
2750 cmp %g5, ECC_M
2751 be,pn %xcc, convert_frfc_to_frfu
2752
2753 ! NotData/Triple or worse
2754 cmp %g5, ECC_N_M
2755 be,pn %xcc, convert_frfc_to_frfu
2756
2757 ! Check bit error (will be corrected by HW)
2758 cmp %g5, ECC_C0
2759 bge %xcc, frf_exit
2760 nop
2761
2762 /*
2763 * Only reach this point if we are dealing with a
2764 * data bit error, so now correct it.
2765 * %g5 data bit in error
2766 */
2767 mov 1, %g1
2768 sllx %g1, %g5, %g5
2769 STRAND_STRUCT(%g2)
2770 lduw [%g2 + STRAND_FP_TMP1 + 4], %g1 ! odd data
2771 xor %g1, %g5, %g1
2772 ba frf_exit
2773 stw %g1, [%g2 + STRAND_FP_TMP1 + 4] ! store corrected data
2774
2775convert_frfc_to_frfu:
2776
2777 ! restore FPRS.FEF
2778 STRAND_POP(%g1, %g2)
2779 wr %g1, %fprs
2780
2781 /*
2782 * We know that FRFU is (FRFC entry - 1) so get the
2783 * error table entry and move it back to the FRFU entry
2784 */
2785 CONVERT_CE_TO_UE(+1)
2786 /* NOTREACHED */
2787
2788frf_exit:
2789 /*
2790 * Write back the corrected data. Note that if it was a check
2791 * bit which was in error this will be automatically fixed
2792 * by HW during the writeback.
2793 */
2794
2795 /*
2796 * Get the FRF Index and use it to calculate which
2797 * freg to write by working out offset into table
2798 * below.
2799 */
2800 GET_ERR_DSFAR(%g5, %g4)
2801 and %g5, DSFAR_FRF_DBL_REG_MASK, %g5
2802 sllx %g5, 2, %g5 ! each table entry 2 instr in size
2803
2804 ba write_fr_start
2805 rd %pc, %g4
2806
2807 ba write_fr_done
2808 ldd [%g2 + STRAND_FP_TMP1], %f0
2809 ba write_fr_done
2810 ldd [%g2 + STRAND_FP_TMP1], %f2
2811 ba write_fr_done
2812 ldd [%g2 + STRAND_FP_TMP1], %f4
2813 ba write_fr_done
2814 ldd [%g2 + STRAND_FP_TMP1], %f6
2815 ba write_fr_done
2816 ldd [%g2 + STRAND_FP_TMP1], %f8
2817 ba write_fr_done
2818 ldd [%g2 + STRAND_FP_TMP1], %f10
2819 ba write_fr_done
2820 ldd [%g2 + STRAND_FP_TMP1], %f12
2821 ba write_fr_done
2822 ldd [%g2 + STRAND_FP_TMP1], %f14
2823 ba write_fr_done
2824 ldd [%g2 + STRAND_FP_TMP1], %f16
2825 ba write_fr_done
2826 ldd [%g2 + STRAND_FP_TMP1], %f18
2827 ba write_fr_done
2828 ldd [%g2 + STRAND_FP_TMP1], %f20
2829 ba write_fr_done
2830 ldd [%g2 + STRAND_FP_TMP1], %f22
2831 ba write_fr_done
2832 ldd [%g2 + STRAND_FP_TMP1], %f24
2833 ba write_fr_done
2834 ldd [%g2 + STRAND_FP_TMP1], %f26
2835 ba write_fr_done
2836 ldd [%g2 + STRAND_FP_TMP1], %f28
2837 ba write_fr_done
2838 ldd [%g2 + STRAND_FP_TMP1], %f30
2839 ba write_fr_done
2840 ldd [%g2 + STRAND_FP_TMP1], %f32
2841 ba write_fr_done
2842 ldd [%g2 + STRAND_FP_TMP1], %f34
2843 ba write_fr_done
2844 ldd [%g2 + STRAND_FP_TMP1], %f36
2845 ba write_fr_done
2846 ldd [%g2 + STRAND_FP_TMP1], %f38
2847 ba write_fr_done
2848 ldd [%g2 + STRAND_FP_TMP1], %f40
2849 ba write_fr_done
2850 ldd [%g2 + STRAND_FP_TMP1], %f42
2851 ba write_fr_done
2852 ldd [%g2 + STRAND_FP_TMP1], %f44
2853 ba write_fr_done
2854 ldd [%g2 + STRAND_FP_TMP1], %f46
2855 ba write_fr_done
2856 ldd [%g2 + STRAND_FP_TMP1], %f48
2857 ba write_fr_done
2858 ldd [%g2 + STRAND_FP_TMP1], %f50
2859 ba write_fr_done
2860 ldd [%g2 + STRAND_FP_TMP1], %f52
2861 ba write_fr_done
2862 ldd [%g2 + STRAND_FP_TMP1], %f54
2863 ba write_fr_done
2864 ldd [%g2 + STRAND_FP_TMP1], %f56
2865 ba write_fr_done
2866 ldd [%g2 + STRAND_FP_TMP1], %f58
2867 ba write_fr_done
2868 ldd [%g2 + STRAND_FP_TMP1], %f60
2869 ba write_fr_done
2870 ldd [%g2 + STRAND_FP_TMP1], %f62
2871
2872write_fr_start:
2873 DISABLE_PSCCE(%g1, %g2, %g3)
2874
2875 STRAND_STRUCT(%g2)
2876 add %g4, %g5, %g4
2877 jmp %g4 + SZ_INSTR
2878 nop
2879
2880write_fr_done:
2881 ENABLE_PSCCE(%g1, %g2, %g3)
2882
2883 ! restore FPRS.FEF
2884 STRAND_POP(%g1, %g2)
2885 wr %g1, %fprs
2886 HVRET
2887 SET_SIZE(correct_frfc)