Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / huron / src / errors_subr.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: errors_subr.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49#pragma ident "@(#)errors_subr.s 1.6 07/08/03 SMI"
50
51#include <sys/asm_linkage.h>
52#include <hypervisor.h>
53#include <sun4v/asi.h>
54#include <asi.h>
55#include <mmu.h>
56#include <hprivregs.h>
57#include <cache.h>
58
59#include <offsets.h>
60#include <util.h>
61#include <abort.h>
62#include <debug.h>
63#include <dram.h>
64#include <cmp.h>
65#include <intr.h>
66#include <error_defs.h>
67#include <error_regs.h>
68#include <error_soc.h>
69#include <error_asm.h>
70
71 /*
72 * Enable errors
73 */
74 ENTRY(enable_errors_strand)
75
76 ! enable CMP reporting
77 mov CORE_ERR_REPORT_EN, %g4
78 setx CORE_ERRORS_ENABLE, %g5, %g6
79 stxa %g6, [%g4]ASI_ERR_EN
80
81 ! enable CMP traps
82 mov CORE_ERR_TRAP_EN, %g4
83 setx CORE_ERROR_TRAP_ENABLE, %g5, %g6
84 stxa %g6, [%g4]ASI_ERR_EN
85
86 HVRET
87
88 SET_SIZE(enable_errors_strand)
89
90 ENTRY(enable_errors_chip)
91
92 /*
93 * Target SOC/L2 errors at current strand
94 */
95 PHYS_STRAND_ID(%g2)
96 setx SOC_ERRORSTEER_REG, %g3, %g4
97 stx %g2, [%g4]
98
99 set L2_ERRORSTEER_MASK, %g1
100 sllx %g2, L2_ERRORSTEER_SHIFT, %g2
101 and %g2, %g1, %g2
102 setx L2_CONTROL_REG, %g4, %g5
103 set (NO_L2_BANKS - 1), %g3
1041:
105 SKIP_DISABLED_L2_BANK(%g3, %g4, %g6, 2f)
106
107 sllx %g3, L2_BANK_SHIFT, %g4
108 ldx [%g5 + %g4], %g6
109 andn %g6, %g1, %g6
110 or %g6, %g2, %g6
111 stx %g6, [%g5 + %g4]
1122:
113 ! next L2 bank
114 brgz,pt %g3, 1b
115 dec %g3
116
117 /*
118 * Clear DRAM ESR/FBD/ND for all banks
119 * Set the DRAM ECC/FBR Error Count registers
120 */
121 set (NO_DRAM_BANKS - 1), %g3
1221:
123 ! skip banks which are disabled. causes hang.
124 SKIP_DISABLED_DRAM_BANK(%g3, %g4, %g5, 2f)
125
126 setx DRAM_ESR_BASE, %g4, %g5
127 sllx %g3, DRAM_BANK_SHIFT, %g2
128 or %g5, %g2, %g2
129 ldx [%g2], %g4
130 stx %g4, [%g2] ! clear DRAM ESR RW1C
131 stx %g0, [%g2] ! clear DRAM ESR RW
132
133#ifndef DEBUG_LEGION
134
135 setx DRAM_FBD_BASE, %g4, %g5
136 sllx %g3, DRAM_BANK_SHIFT, %g4
137 or %g5, %g4, %g4
138 stx %g0, [%g4] ! clear DRAM FBD SYND RW
139
140 setx DRAM_FBR_COUNT_BASE, %g4, %g5
141 sllx %g3, DRAM_BANK_SHIFT, %g4
142 add %g5, %g4, %g5
143 mov DRAM_ERROR_COUNTER_FBR_RATIO, %g4
144 stx %g4, [%g5]
145
146#endif
1472:
148 ! next bank
149 brgz,pt %g3, 1b
150 dec %g3
151
152 ! enable L2$ and DRAM error traps
153 set (NO_L2_BANKS - 1), %g3
1541:
155 ! skip banks which are disabled. causes hang.
156 SKIP_DISABLED_L2_BANK(%g3, %g4, %g5, 2f)
157
158 setx L2_ERROR_STATUS_REG, %g4, %g5
159 sllx %g3, L2_BANK_SHIFT, %g4
160 or %g4, %g5, %g4
161 ldx [%g4], %g5
162 stx %g5, [%g4] ! clear ESR RW1C
163 stx %g0, [%g4] ! clear ESR RW
164
165 setx L2_ERROR_ADDRESS_REG, %g4, %g5
166 sllx %g3, L2_BANK_SHIFT, %g4
167 or %g4, %g5, %g4
168 stx %g0, [%g4] ! clear EAR RW
169
170 setx L2_ERROR_NOTDATA_REG, %g4, %g5
171 sllx %g3, L2_BANK_SHIFT, %g4
172 or %g4, %g5, %g4
173 ldx [%g4], %g5
174 stx %g5, [%g4] ! clear NDESR RW1C
175 stx %g0, [%g4] ! clear NDESR RW
176
177 setx L2_ERROR_ENABLE_REG, %g4, %g5
178 sllx %g3, L2_BANK_SHIFT, %g4
179 add %g4, %g5, %g4
180 ldx [%g4], %g2
181 or %g2, (L2_NCEEN | L2_CEEN), %g2
182 stx %g2, [%g4]
1832:
184 brgz,pt %g3, 1b
185 dec %g3
186
187 ! clear the SOC STATUS register before enabling logs/traps
188 setx SOC_ERROR_STATUS_REG, %g5, %g6
189 stx %g0, [%g6]
190
191 ! enable all SOC error recording -- reset/config?
192 setx SOC_ERROR_LOG_ENABLE, %g5, %g6
193 setx SOC_ALL_ERRORS, %g3, %g1
194 stx %g1, [%g6]
195
196 ! enable all SOC error traps -- reset/config?
197 setx SOC_ERROR_TRAP_ENABLE, %g5, %g6
198 setx SOC_ALL_ERRORS, %g3, %g1
199 stx %g1, [%g6]
200
201 ! enable all SOC fatal errors -- reset/config?
202 setx SOC_FATAL_ERROR_ENABLE, %g5, %g6
203 setx SOC_FATAL_ERRORS, %g3, %g1
204 stx %g1, [%g6]
205
206 HVRET
207
208 SET_SIZE(enable_errors_chip)
209
210#ifdef DEBUG_LEGION
211 /*
212 * Print Service Error Report (SER) to console
213 * %g7 return address
214 */
215 ENTRY(print_diag_ser)
216 GET_ERR_DIAG_BUF(%g1, %g2)
217 brz,pn %g1, 1f
218 nop
219
220 mov %g7, %g6
221
222 PRINT("Error type : 0x");
223 ldx [%g1 + ERR_DIAG_RPRT_ERROR_TYPE], %g2
224 PRINTX(%g2)
225 PRINT("\r\n");
226
227 PRINT("Report type : 0x");
228 ldx [%g1 + ERR_DIAG_RPRT_REPORT_TYPE], %g2
229 PRINTX(%g2)
230 PRINT("\r\n");
231
232 PRINT("TOD: 0x");
233 ldx [%g1 + ERR_DIAG_RPRT_TOD], %g2
234 PRINTX(%g2)
235 PRINT("\r\n");
236
237 PRINT("EHDL : 0x");
238 ldx [%g1 + ERR_DIAG_RPRT_EHDL], %g2
239 PRINTX(%g2)
240 PRINT("\r\n");
241
242 PRINT("ERR_STICK: 0x");
243 ldx [%g1 + ERR_DIAG_RPRT_ERR_STICK], %g2
244 PRINTX(%g2)
245 PRINT("\r\n");
246
247 PRINT("CPUVER: 0x");
248 ldx [%g1 + ERR_DIAG_RPRT_CPUVER], %g2
249 PRINTX(%g2)
250 PRINT("\r\n");
251
252 PRINT("CPUSERIAL: 0x");
253 ldx [%g1 + ERR_DIAG_RPRT_SERIAL], %g2
254 PRINTX(%g2)
255 PRINT("\r\n");
256
257 PRINT("TSTATE: 0x");
258 ldx [%g1 + ERR_DIAG_RPRT_TSTATE], %g2
259 PRINTX(%g2)
260 PRINT("\r\n");
261
262 PRINT("HTSTATE: 0x");
263 ldx [%g1 + ERR_DIAG_RPRT_HTSTATE], %g2
264 PRINTX(%g2)
265 PRINT("\r\n");
266
267 PRINT("TPC: 0x");
268 ldx [%g1 + ERR_DIAG_RPRT_TPC], %g2
269 PRINTX(%g2)
270 PRINT("\r\n");
271
272 PRINT("CPUID : 0x");
273 lduh [%g1 + ERR_DIAG_RPRT_CPUID], %g2
274 PRINTX(%g2)
275 PRINT("\r\n");
276
277 PRINT("TT: 0x");
278 lduh [%g1 + ERR_DIAG_RPRT_TT], %g2
279 PRINTX(%g2)
280 PRINT("\r\n");
281
282 PRINT("TL : 0x");
283 ldub [%g1 + ERR_DIAG_RPRT_TL], %g2
284 PRINTX(%g2)
285 PRINT("\r\n");
286
287 mov %g6, %g7
2881:
289 HVRET
290 SET_SIZE(print_diag_ser)
291
292
293 /*
294 * print diag buf data to console
295 * %g7 return address
296 */
297 ENTRY(print_diag_buf)
298 GET_ERR_DIAG_BUF(%g1, %g2)
299 brz,pn %g1, 1f
300 nop
301
302 mov %g7, %g6
303
304 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
305
306 PRINT("ISFSR: 0x");
307 ldx [%g1 + ERR_DIAG_BUF_SPARC_ISFSR], %g2
308 PRINTX(%g2)
309 PRINT("\r\n");
310
311 PRINT("DSFSR: 0x");
312 ldx [%g1 + ERR_DIAG_BUF_SPARC_DSFSR], %g2
313 PRINTX(%g2)
314 PRINT("\r\n");
315
316 PRINT("DSFAR: 0x");
317 ldx [%g1 + ERR_DIAG_BUF_SPARC_DSFAR], %g2
318 PRINTX(%g2)
319 PRINT("\r\n");
320
321 PRINT("DESR: 0x");
322 ldx [%g1 + ERR_DIAG_BUF_SPARC_DESR], %g2
323 PRINTX(%g2)
324 PRINT("\r\n");
325
326 PRINT("DFESR: 0x");
327 ldx [%g1 + ERR_DIAG_BUF_SPARC_DFESR], %g2
328 PRINTX(%g2)
329 PRINT("\r\n");
330
331 PRINT("BANK 0: L2_ESR :0x");
332 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 0)], %g2
333 PRINTX(%g2)
334 PRINT(" : L2_EAR :0x");
335 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 0)], %g2
336 PRINTX(%g2)
337 PRINT(" : L2_ND: 0x");
338 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 0)], %g2
339 PRINTX(%g2)
340 PRINT("\r\n");
341
342 PRINT("BANK 1: L2_ESR :0x");
343 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 1)], %g2
344 PRINTX(%g2)
345 PRINT(" : L2_EAR :0x");
346 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 1)], %g2
347 PRINTX(%g2)
348 PRINT(" : L2_ND: 0x");
349 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 1)], %g2
350 PRINTX(%g2)
351 PRINT("\r\n");
352
353 PRINT("BANK 2: L2_ESR :0x");
354 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 2)], %g2
355 PRINTX(%g2)
356 PRINT(" : L2_EAR :0x");
357 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 2)], %g2
358 PRINTX(%g2)
359 PRINT(" : L2_ND: 0x");
360 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 2)], %g2
361 PRINTX(%g2)
362 PRINT("\r\n");
363
364 PRINT("BANK 3: L2_ESR :0x");
365 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 3)], %g2
366 PRINTX(%g2)
367 PRINT(" : L2_EAR :0x");
368 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 3)], %g2
369 PRINTX(%g2)
370 PRINT(" : L2_ND: 0x");
371 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 3)], %g2
372 PRINTX(%g2)
373 PRINT("\r\n");
374
375 PRINT("BANK 4: L2_ESR :0x");
376 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 4)], %g2
377 PRINTX(%g2)
378 PRINT(" : L2_EAR :0x");
379 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 4)], %g2
380 PRINTX(%g2)
381 PRINT(" : L2_ND: 0x");
382 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 4)], %g2
383 PRINTX(%g2)
384 PRINT("\r\n");
385
386 PRINT("BANK 5: L2_ESR :0x");
387 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 5)], %g2
388 PRINTX(%g2)
389 PRINT(" : L2_EAR :0x");
390 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 5)], %g2
391 PRINTX(%g2)
392 PRINT(" : L2_ND: 0x");
393 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 5)], %g2
394 PRINTX(%g2)
395 PRINT("\r\n");
396
397 PRINT("BANK 6: L2_ESR :0x");
398 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 6)], %g2
399 PRINTX(%g2)
400 PRINT(" : L2_EAR :0x");
401 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 6)], %g2
402 PRINTX(%g2)
403 PRINT(" : L2_ND: 0x");
404 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 6)], %g2
405 PRINTX(%g2)
406 PRINT("\r\n");
407
408 PRINT("BANK 7: L2_ESR :0x");
409 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ESR + (ERR_DIAG_BUF_L2_CACHE_ESR_INCR * 7)], %g2
410 PRINTX(%g2)
411 PRINT(" : L2_EAR :0x");
412 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_EAR + (ERR_DIAG_BUF_L2_CACHE_EAR_INCR * 7)], %g2
413 PRINTX(%g2)
414 PRINT(" : L2_ND: 0x");
415 ldx [%g1 + ERR_DIAG_BUF_L2_CACHE_ND + (ERR_DIAG_BUF_L2_CACHE_ND_INCR * 7)], %g2
416 PRINTX(%g2)
417 PRINT("\r\n");
418
419 PRINT("Bank 0: DRAM_ESR :0x");
420 ldx [%g1 + ERR_DIAG_BUF_DRAM_ESR + (ERR_DIAG_BUF_DRAM_ESR_INCR * 0)], %g2
421 PRINTX(%g2)
422 PRINT(" : DRAM_EAR :0x");
423 ldx [%g1 + ERR_DIAG_BUF_DRAM_EAR + (ERR_DIAG_BUF_DRAM_EAR_INCR * 0)], %g2
424 PRINTX(%g2)
425 PRINT(" : DRAM_LOC: 0x");
426 ldx [%g1 + ERR_DIAG_BUF_DRAM_LOC + (ERR_DIAG_BUF_DRAM_LOC_INCR * 0)], %g2
427 PRINTX(%g2)
428 PRINT("\r\n");
429 PRINT(" DRAM_CTR :0x");
430 ldx [%g1 + ERR_DIAG_BUF_DRAM_CTR + (ERR_DIAG_BUF_DRAM_CTR_INCR * 0)], %g2
431 PRINTX(%g2)
432 PRINT(" : DRAM_FBD :0x");
433 ldx [%g1 + ERR_DIAG_BUF_DRAM_FBD + (ERR_DIAG_BUF_DRAM_FBD_INCR * 0)], %g2
434 PRINTX(%g2)
435 PRINT("\r\n");
436
437 PRINT("Bank 1: DRAM_ESR :0x");
438 ldx [%g1 + ERR_DIAG_BUF_DRAM_ESR + (ERR_DIAG_BUF_DRAM_ESR_INCR * 1)], %g2
439 PRINTX(%g2)
440 PRINT(" : DRAM_EAR :0x");
441 ldx [%g1 + ERR_DIAG_BUF_DRAM_EAR + (ERR_DIAG_BUF_DRAM_EAR_INCR * 1)], %g2
442 PRINTX(%g2)
443 PRINT(" : DRAM_LOC: 0x");
444 ldx [%g1 + ERR_DIAG_BUF_DRAM_LOC + (ERR_DIAG_BUF_DRAM_LOC_INCR * 1)], %g2
445 PRINTX(%g2)
446 PRINT("\r\n");
447 PRINT(" DRAM_CTR :0x");
448 ldx [%g1 + ERR_DIAG_BUF_DRAM_CTR + (ERR_DIAG_BUF_DRAM_CTR_INCR * 1)], %g2
449 PRINTX(%g2)
450 PRINT(" : DRAM_FBD :0x");
451 ldx [%g1 + ERR_DIAG_BUF_DRAM_FBD + (ERR_DIAG_BUF_DRAM_FBD_INCR * 1)], %g2
452 PRINTX(%g2)
453 PRINT("\r\n");
454
455 PRINT("Bank 2: DRAM_ESR :0x");
456 ldx [%g1 + ERR_DIAG_BUF_DRAM_ESR + (ERR_DIAG_BUF_DRAM_ESR_INCR * 2)], %g2
457 PRINTX(%g2)
458 PRINT(" : DRAM_EAR :0x");
459 ldx [%g1 + ERR_DIAG_BUF_DRAM_EAR + (ERR_DIAG_BUF_DRAM_EAR_INCR * 2)], %g2
460 PRINTX(%g2)
461 PRINT(" : DRAM_LOC: 0x");
462 ldx [%g1 + ERR_DIAG_BUF_DRAM_LOC + (ERR_DIAG_BUF_DRAM_LOC_INCR * 2)], %g2
463 PRINTX(%g2)
464 PRINT("\r\n");
465 PRINT(" DRAM_CTR :0x");
466 ldx [%g1 + ERR_DIAG_BUF_DRAM_CTR + (ERR_DIAG_BUF_DRAM_CTR_INCR * 2)], %g2
467 PRINTX(%g2)
468 PRINT(" : DRAM_FBD :0x");
469 ldx [%g1 + ERR_DIAG_BUF_DRAM_FBD + (ERR_DIAG_BUF_DRAM_FBD_INCR * 2)], %g2
470 PRINTX(%g2)
471 PRINT("\r\n");
472
473 PRINT("Bank 3: DRAM_ESR :0x");
474 ldx [%g1 + ERR_DIAG_BUF_DRAM_ESR + (ERR_DIAG_BUF_DRAM_ESR_INCR * 3)], %g2
475 PRINTX(%g2)
476 PRINT(" : DRAM_EAR :0x");
477 ldx [%g1 + ERR_DIAG_BUF_DRAM_EAR + (ERR_DIAG_BUF_DRAM_EAR_INCR * 3)], %g2
478 PRINTX(%g2)
479 PRINT(" : DRAM_LOC: 0x");
480 ldx [%g1 + ERR_DIAG_BUF_DRAM_LOC + (ERR_DIAG_BUF_DRAM_LOC_INCR * 3)], %g2
481 PRINTX(%g2)
482 PRINT("\r\n");
483 PRINT(" DRAM_CTR :0x");
484 ldx [%g1 + ERR_DIAG_BUF_DRAM_CTR + (ERR_DIAG_BUF_DRAM_CTR_INCR * 3)], %g2
485 PRINTX(%g2)
486 PRINT(" : DRAM_FBD :0x");
487 ldx [%g1 + ERR_DIAG_BUF_DRAM_FBD + (ERR_DIAG_BUF_DRAM_FBD_INCR * 3)], %g2
488 PRINTX(%g2)
489 PRINT("\r\n");
490
491 mov %g6, %g7
4921:
493 HVRET
494 SET_SIZE(print_diag_buf)
495
496 /*
497 * print sun4v erpt data to console
498 * %g7 return address
499 */
500 ENTRY(print_sun4v_erpt)
501 GET_ERR_SUN4V_RPRT_BUF(%g1, %g2)
502 brz,pn %g1, 1f
503 nop
504
505 mov %g7, %g6
506
507 PRINT("EHDL : 0x");
508 ldx [%g1 + ERR_SUN4V_RPRT_G_EHDL], %g2 ! ehdl
509 PRINTX(%g2)
510 PRINT("\r\n");
511
512 PRINT("STICK : 0x");
513 ldx [%g1 + ERR_SUN4V_RPRT_G_STICK], %g2 ! stick
514 PRINTX(%g2)
515 PRINT("\r\n");
516
517 PRINT("EDESC : 0x");
518 ld [%g1 + ERR_SUN4V_RPRT_EDESC], %g2 ! edesc
519 PRINTX(%g2)
520 PRINT("\r\n");
521
522 PRINT("ATTR : 0x");
523 ld [%g1 + ERR_SUN4V_RPRT_ATTR], %g2 ! attr
524 PRINTX(%g2)
525 PRINT("\r\n");
526
527 PRINT("ADDR : 0x");
528 ldx [%g1 + ERR_SUN4V_RPRT_ADDR], %g2 ! addr
529 PRINTX(%g2)
530 PRINT("\r\n");
531
532 PRINT("SZ : 0x");
533 ld [%g1 + ERR_SUN4V_RPRT_SZ], %g2 ! sz
534 PRINTX(%g2)
535 PRINT("\r\n");
536
537 PRINT("CPUID : 0x");
538 lduh [%g1 + ERR_SUN4V_RPRT_G_CPUID], %g2 ! cpuid
539 PRINTX(%g2)
540 PRINT("\r\n");
541
542 PRINT("SECS : 0x");
543 lduh [%g1 + ERR_SUN4V_RPRT_G_SECS], %g2 ! secs
544 PRINTX(%g2)
545 PRINT("\r\n");
546
547 PRINT("ASI : 0x");
548 lduh [%g1 + ERR_SUN4V_RPRT_ASI], %g2 ! asi/pad
549 PRINTX(%g2)
550 PRINT("\r\n");
551
552 PRINT("REG : 0x");
553 lduh [%g1 + ERR_SUN4V_RPRT_REG], %g2 ! reg
554 PRINTX(%g2)
555 PRINT("\r\n");
556
557 mov %g6, %g7
5581:
559 HVRET
560 SET_SIZE(print_sun4v_erpt)
561
562#endif /* DEBUG */
563
564 ENTRY(relocate_error_tables)
565 mov %g7, %g6
566
567 setx instruction_access_MMU_errors, %g2, %g3
568 HVCALL(relocate_error_table_entries)
569 setx data_access_MMU_errors, %g2, %g3
570 HVCALL(relocate_error_table_entries)
571 setx internal_processor_errors, %g2, %g3
572 HVCALL(relocate_error_table_entries)
573 setx hw_corrected_errors, %g2, %g3
574 HVCALL(relocate_error_table_entries)
575 setx store_errors, %g2, %g3
576 HVCALL(relocate_error_table_entries)
577 setx data_access_errors, %g2, %g3
578 HVCALL(relocate_error_table_entries)
579 setx sw_recoverable_errors, %g2, %g3
580 HVCALL(relocate_error_table_entries)
581 setx instruction_access_errors, %g2, %g3
582 HVCALL(relocate_error_table_entries)
583 setx l2c_errors, %g2, %g3
584 HVCALL(relocate_error_table_entries)
585 setx soc_errors, %g2, %g3
586 HVCALL(relocate_error_table_entries)
587 setx dram_errors, %g2, %g3
588 HVCALL(relocate_error_table_entries)
589 setx precise_dau_errors, %g2, %g3
590 HVCALL(relocate_error_table_entries)
591 setx disrupting_dau_errors, %g2, %g3
592 HVCALL(relocate_error_table_entries)
593 setx precise_ldau_errors, %g2, %g3
594 HVCALL(relocate_error_table_entries)
595 setx disrupting_ldau_errors, %g2, %g3
596 HVCALL(relocate_error_table_entries)
597 setx dbu_errors, %g2, %g3
598 HVCALL(relocate_error_table_entries)
599 setx sw_abort_errors, %g2, %g3
600 HVCALL(relocate_error_table_entries)
601
602 mov %g6, %g7
603 HVRET
604 SET_SIZE(relocate_error_tables)
605
606 /*
607 * Relocate the function pointers in an error table
608 * %g3 error_table
609 * %g5 relocation offset
610 * %g7 return address
611 * %g1 clobbered
612 * %g2, %g4, %g6 preserved
613 */
614 ENTRY(relocate_error_table_entries)
615 sub %g3, %g5, %g3
6161:
617 ldx [%g3 + ERR_GUEST_REPORT_FCN], %g1
618 brz %g1, 2f
619 sub %g1, %g5, %g1
620 stx %g1, [%g3 + ERR_GUEST_REPORT_FCN]
6212:
622 ldx [%g3 + ERR_REPORT_FCN], %g1
623 brz %g1, 3f
624 sub %g1, %g5, %g1
625 stx %g1, [%g3 + ERR_REPORT_FCN]
6263:
627 ldx [%g3 + ERR_CORRECT_FCN], %g1
628 brz %g1, 4f
629 sub %g1, %g5, %g1
630 stx %g1, [%g3 + ERR_CORRECT_FCN]
6314:
632 ldx [%g3 + ERR_STORM_FCN], %g1
633 brz %g1, 5f
634 sub %g1, %g5, %g1
635 stx %g1, [%g3 + ERR_STORM_FCN]
6365:
637 ldx [%g3 + ERR_PRINT_FCN], %g1
638 brz %g1, 6f
639 sub %g1, %g5, %g1
640 stx %g1, [%g3 + ERR_PRINT_FCN]
641
6426:
643 ld [%g3 + ERR_FLAGS], %g1
644 btst ERR_LAST_IN_TABLE, %g1
645 bz,pn %xcc, 1b
646 add %g3, ERROR_TABLE_ENTRY_SIZE, %g3
647
648 HVRET
649
650 SET_SIZE(relocate_error_table_entries)
651
652 /*
653 * If we get an error trap which we cannot identify we want
654 * a basic service report (TT, TPC etc) sent to the FERG.
655 * To make this happen the error_table_entry for that trap
656 * must have an error report function.
657 *
658 * XXXX
659 * Is there any useful information we could gather here ?
660 * XXXX
661 */
662 ENTRY(dump_no_error)
663
664 GET_ERR_DIAG_DATA_BUF(%g1, %g2)
665
666 HVRET
667
668 SET_SIZE(dump_no_error)
669
670 /*
671 * Clear AMB FBDIMM memory errors
672 *
673 * These regs are RWCST, which is write 1 to clear,
674 * and sticky through a link reset.
675 */
676 ENTRY(clear_amb_errors)
677
678 STORE_ERR_RETURN_ADDR(%g7, %g1, %g2)
679
680 set (NO_DRAM_BANKS - 1), %g3
6810:
682 ! skip banks which are disabled. causes hang.
683 SKIP_DISABLED_DRAM_BANK(%g3, %g4, %g5, 4f)
684
685 /*
686 * How many channels to clear ?
687 */
688 setx DRAM_SNGL_CHNL_MODE_BASE, %g2, %g1
689 sllx %g3, DRAM_BANK_SHIFT, %g2
690 or %g1, %g2, %g1
691 ldx [%g1], %g2
692 and %g2, 1, %g2 ! %g2 == 1 single channel mode
693 movrz %g2, 1, %g1 ! loop counter 1 for 2 channels
694 movrnz %g2, 0, %g1 ! loop counter 0 for 1 channel
6951:
696 /*
697 * How many DIMMs per channel ?
698 */
699 setx DRAM_DIMM_PRESENT_BASE, %g2, %g4
700 sllx %g3, DRAM_BANK_SHIFT, %g2
701 or %g4, %g2, %g4
702 ldx [%g4], %g4
703 and %g4, 0xf, %g4 ! max AMB ID
7042:
705 ! %g3 bank
706 ! %g1 channel
707 ! %g4 DIMM
708
709 sllx %g4, CONFIG_ADDR_AMB_POS, %g6
710 ! %g6 AMB ID of Configuration register access
711
712 /* clear FERR */
713 set ((CONFIG_FUNCTION_FBD << CONFIG_FUNCTION_SHIFT) | DRAM_FBDIMM_FERR), %g5
714 or %g5, %g6, %g5 ! AMB ID
715 sllx %g1, CONFIG_ADDR_CH_POS, %g2 ! Channel of Configuration register access
716 or %g5, %g2, %g5
717 ! %g5 channel/AMB ID/FERR
718
719 setx DRAM_CONFIG_REG_ACC_ADDR_BASE, %g2, %g7
720 sllx %g3, DRAM_BANK_SHIFT, %g2
721 or %g7, %g2, %g7
722 stx %g5, [%g7]
723
724 setx DRAM_CONFIG_REG_ACC_DATA_BASE, %g2, %g7
725 sllx %g3, DRAM_BANK_SHIFT, %g2
726 or %g7, %g2, %g7
727 ! config registers are RWCST
728 mov -1, %g5
729 stx %g5, [%g7]
730
731 /* clear NERR */
732 set ((CONFIG_FUNCTION_FBD << CONFIG_FUNCTION_SHIFT) | DRAM_FBDIMM_NERR), %g5
733 or %g5, %g6, %g5 ! AMB ID
734 sllx %g1, CONFIG_ADDR_CH_POS, %g2 ! Channel of Configuration register access
735 or %g5, %g2, %g5
736 ! %g5 channel/AMB ID/FERR
737
738 setx DRAM_CONFIG_REG_ACC_ADDR_BASE, %g2, %g7
739 sllx %g3, DRAM_BANK_SHIFT, %g2
740 or %g7, %g2, %g7
741 stx %g5, [%g7]
742
743 setx DRAM_CONFIG_REG_ACC_DATA_BASE, %g2, %g7
744 sllx %g3, DRAM_BANK_SHIFT, %g2
745 or %g7, %g2, %g7
746 ! config registers are RWCST
747 mov -1, %g5
748 stx %g5, [%g7]
749
750 brgz,pt %g4, 2b
751 dec %g4 ! next AMB ID
752
753 brgz,pt %g1, 1b ! next channel
754 dec %g1
755
7564:
757 brgz,pt %g3, 0b
758 dec %g3 ! next DRAM bank
759
760 GET_ERR_RETURN_ADDR(%g7, %g2)
761 HVRET
762
763 SET_SIZE(clear_amb_errors)
764
765 /*
766 * Determine whether a particular error has been steered to this
767 * CPU rather than actually occurring on a resource owned by this
768 * guest. If it has, send the error details to a CPU owned by the
769 * guest which owns the resource which took the error and then just
770 * allow this CPU/guest to continue.
771 *
772 * %g1 - %g6 clobbered
773 * %g7 return address
774 */
775 ENTRY(errors_check_steering)
776
777 /*
778 * Errors causing precise/deferred traps will never require rerouting.
779 * Also, errors causing hw_corrected_error traps are always corrected
780 * by the hardware so no guest intervention is required. Only
781 * sw_recoverable_error traps might require a sun4v guest error
782 * report to be rerouted to a different guests CPU.
783 */
784 rdpr %tt, %g2
785 cmp %g2, TT_ASYNCERR
786 bne,pt %xcc, errors_check_steering_exit
787 nop
788
789 /*
790 * Only MEM reports might need rerouting
791 */
792 GET_ERR_SUN4V_RPRT_BUF(%g2, %g4)
793 ld [%g2 + ERR_SUN4V_RPRT_ATTR], %g4 ! attr
794 mov 1, %g3
795 sllx %g3, SUN4V_MEM_RPRT, %g3
796 and %g4, %g3, %g4
797 brz,pt %g4, errors_check_steering_exit
798 nop
799
800 /*
801 * Only MEM reports with a valid RA can be rerouted
802 */
803 ldx [%g2 + ERR_SUN4V_RPRT_ADDR], %g4
804 setx CPU_ERR_INVALID_RA, %g3, %g5
805 cmp %g4, %g5
806 be,pn %xcc, errors_check_steering_exit
807 nop
808
809 /*
810 * Does this RA belong to this guest ?
811 */
812 RA2PA_RANGE_CHECK(%g2, %g4, ERPT_MEM_SIZE, 1f, %g5)
813 ! yes, it does.
814 ba,pt %xcc, errors_check_steering_exit
815 nop
8161:
817 ! nope, some other guest
818
819 /*
820 * Find the guest which owns this RA.
821 * For each guest loop through the ra2pa_segment array and check the
822 * RA against the base/limit
823 * %g4 RA
824 */
825 ROOT_STRUCT(%g2)
826 ldx [%g2 + CONFIG_GUESTS], %g2 ! &guests[0]
827 set NGUESTS - 1, %g3 ! %g3 guest loop counter
8281:
829 RA2PA_RANGE_CHECK(%g2, %g4, ERPT_MEM_SIZE, 2f, %g5)
830 ! we have a valid RA so this is the guest for this error
831 ba,pt %xcc, 3f
832 nop
8332:
834 set GUEST_SIZE, %g5
835 add %g2, %g5, %g2 ! guest++
836 brnz,pt %g3, 1b
837 dec %g3 ! nguests--
838
839 ! no guest found for this RA
840 ba,pt %xcc, errors_check_steering_exit
841 nop
842
8433:
844 ! %g2 &guest
845 ! %g4 RA
846
847 ! is it for the guest we are running on ? (redundant check ...)
848 GUEST_STRUCT(%g1)
849 cmp %g1, %g2
850 be,pt %xcc, errors_check_steering_exit
851 nop
852
853 ! go and finish re-routing this error
854 ba cpu_reroute_error
855 nop
856
857 /*
858 * If cpu_reroute_error() returns it has failed to reroute the
859 * error so just return and take the sun4v report on this guest
860 */
861
862errors_check_steering_exit:
863
864 HVRET
865
866 SET_SIZE(errors_check_steering)
867
868 /*
869 * re-route an error report (cont'd)
870 * 1. select one of the active CPUs for that guest
871 * 2. Copy the data from the error erport into that
872 * CPUs cpu struct
873 * 3. Send a VECINTR_ERROR_XCALL to that CPU
874 * 4. Clear the diag_buf/sun4v erpt in_use bits
875 * 5: RETRY
876 *
877 * %g2 target guest
878 * %g4 RA
879 * %g7 return address
880 */
881
882 ENTRY_NP(cpu_reroute_error)
883
884 /*
885 * find first live cpu in guest->vcpus
886 * Then deliver the error to that vcpu, and interrupt
887 * the strand it is running on to make that happen.
888 */
889 add %g2, GUEST_VCPUS, %g2
890 mov 0, %g3
8911:
892 cmp %g3, NVCPUS
893 be,pn %xcc, cpu_reroute_error_exit
894 nop
895
896 mulx %g3, GUEST_VCPUS_INCR, %g5
897 ldx [%g2 + %g5], %g1
898 brz,a,pn %g1, 1b
899 inc %g3
900 ! check whether this CPU is running guest code ?
901 ldx [%g1 + CPU_STATUS], %g5
902 cmp %g5, CPU_STATE_RUNNING
903 bne,pt %xcc, 1b
904 inc %g3
905
906 ! %g3 target vcpu id
907 ! %g1 &vcpus[target]
908
909 ldx [%g1 + CPU_STRAND], %g1
910
911 /*
912 * It is possible that the CPUs rerouted data is already in use.
913 * We use the rerouted_addr field as a spinlock. The target CPU
914 * will set this to 0 after reading the error data allowing us
915 * to re-use the rerouting fields.
916 * See cpu_err_rerouted() below.
917 *
918 * %g1 &strands[target]
919 * %g3 target cpuid
920 * %g4 RA
921 */
922 set STRAND_REROUTED_ADDR, %g2
923 add %g1, %g2, %g6
9241: casx [%g6], %g0, %g4
925 brnz,pn %g4, 1b
926 nop
927
928
929 ! get the data out of the current STRAND's sun4v erpt and store
930 ! in the target STRAND struct
931 GET_ERR_SUN4V_RPRT_BUF(%g5, %g6)
932 set STRAND_REROUTED_CPU, %g4
933 stx %g3, [%g1 + %g4]
934 ldx [%g5 + ERR_SUN4V_RPRT_G_EHDL], %g6 ! ehdl
935 set STRAND_REROUTED_EHDL, %g4
936 stx %g6, [%g1 + %g4]
937 ld [%g5 + ERR_SUN4V_RPRT_ATTR], %g6 ! attr
938 set STRAND_REROUTED_ATTR, %g4
939 stx %g6, [%g1 + %g4]
940 ldx [%g5 + ERR_SUN4V_RPRT_G_STICK], %g6 ! stick
941 ! STICK is probably not necessary. I doubt if FMA checks
942 ! both EHDL/STICK when looking for duplicate reports,
943 ! but it doesn't kill us to do it.
944 set STRAND_REROUTED_STICK, %g4
945 stx %g6, [%g1 + %g4]
946
947 ! send an x-call to the target CPU
948 ldub [%g1 + STRAND_ID], %g3
949 sllx %g3, INT_VEC_DIS_VCID_SHIFT, %g3
950 mov VECINTR_ERROR_XCALL, %g5
951 or %g3, %g5, %g3
952 stxa %g3, [%g0]ASI_INTR_UDB_W
953
954 /*
955 * Clear the in_use bit on the sun4v report buffer
956 */
957 GET_ERR_SUN4V_RPRT_BUF(%g2, %g4)
958 brnz,a,pt %g1, 1f
959 stub %g0, [%g2 + ERR_SUN4V_RPRT_IN_USE]
9601:
961
962 /*
963 * Clear the error report in_use field
964 */
965 GET_ERR_DIAG_BUF(%g1, %g2)
966 brnz,a,pt %g1, 1f
967 stub %g0, [%g1 + ERR_DIAG_RPRT_IN_USE]
9681:
969 /*
970 * error is rerouted, get out of here
971 */
972 GET_ERR_TABLE_ENTRY(%g1, %g2)
973
974 /*
975 * Does the trap handler for this error park the strands ?
976 * If yes, resume them here.
977 */
978 ld [%g1 + ERR_FLAGS], %g2
979 btst ERR_STRANDS_PARKED, %g2
980 bz,pn %xcc, 1f
981 nop
982
983 RESUME_ALL_STRANDS(%g3, %g4, %g5, %g6)
984
9851:
986 /*
987 * check whether we stored the globals and re-used
988 * at MAXPTL
989 */
990 btst ERR_GL_STORED, %g2
991 bz,pt %xcc, 1f
992 nop
993
994 RESTORE_GLOBALS(retry)
9951:
996 retry
997
998cpu_reroute_error_exit:
999
1000 /*
1001 * failed to find a guest to send this error to ...
1002 */
1003 HVRET
1004
1005 SET_SIZE(cpu_reroute_error)
1006
1007 /*
1008 * An error has been re-routed to this STRAND.
1009 * The EHDL/ADDR/STICK/ATTR have been stored in the STRAND struct
1010 * by the STRAND that originally detected the error.
1011 *
1012 * Note: STICK may not be strictly necessary
1013 */
1014 ENTRY_NP(cpu_err_rerouted)
1015
10161:
1017 STRAND_STRUCT(%g6)
1018
1019 set STRAND_REROUTED_ATTR, %g4
1020 ldx [%g6 + %g4], %g3
1021
1022 HVCALL(error_handler_sun4v_report)
1023
1024 /*
1025 * Must ensure that we get a sun4v report buffer, spin if necessary
1026 */
1027 GET_ERR_SUN4V_RPRT_BUF(%g2, %g3)
1028 brz,pn %g2, 1b
1029 nop
1030
1031 STRAND_STRUCT(%g6)
1032
1033 set STRAND_REROUTED_CPU, %g4
1034 ldx [%g6 + %g4], %g4
1035 stx %g4, [%g2 + ERR_SUN4V_RPRT_G_CPUID]
1036 STRAND_PUSH(%g4, %g3, %g5)
1037
1038 set STRAND_REROUTED_EHDL, %g4
1039 ldx [%g6 + %g4], %g4
1040 stx %g4, [%g2 + ERR_SUN4V_RPRT_G_EHDL]
1041
1042 set STRAND_REROUTED_STICK, %g4
1043 ldx [%g6 + %g4], %g4
1044 stx %g4, [%g2 + ERR_SUN4V_RPRT_G_STICK]
1045
1046 set STRAND_REROUTED_ATTR, %g4
1047 ldx [%g6 + %g4], %g4
1048 stw %g4, [%g2 + ERR_SUN4V_RPRT_ATTR]
1049 STRAND_PUSH(%g4, %g3, %g5)
1050
1051 ! keep ADDR after EHDL/STICK/ATTR to avoid race
1052 set STRAND_REROUTED_ADDR, %g4
1053 ldx [%g6 + %g4], %g1
1054
1055 ! Clear the strand->rerouted-addr field now to let other
1056 ! errors in.
1057 stx %g0, [%g6 + %g4]
1058 stx %g1, [%g2 + ERR_SUN4V_RPRT_ADDR]
1059
1060 set EDESC_UE_RESUMABLE, %g4
1061 stw %g4, [%g2 + ERR_SUN4V_RPRT_EDESC]
1062
1063 mov ERPT_MEM_SIZE, %g4
1064 st %g4, [%g2 + ERR_SUN4V_RPRT_SZ]
1065
1066 /*
1067 * gueue a resumable error report and exit
1068 */
1069 add %g2, ERR_SUN4V_CPU_ERPT, %g2
1070 HVCALL(queue_resumable_erpt)
1071
1072 /*
1073 * Clear the in_use bit on the sun4v report buffer
1074 */
1075 GET_ERR_SUN4V_RPRT_BUF(%g2, %g4)
1076 stub %g0, [%g2 + ERR_SUN4V_RPRT_IN_USE]
1077
1078 ! get the error CPUID to do the necessary cleanup
1079 STRAND_POP(%g2, %g3) ! ATTR
1080 STRAND_POP(%g1, %g3)
1081
1082 /*
1083 * This should be a CPU error report for a strand in error
1084 */
1085 cmp %g2, SUN4V_CPU_RPRT
1086 bne,pt %xcc, 1f
1087 nop
1088
1089 /*
1090 * Must be a different CPU ID for a strand in error
1091 */
1092 VCPU_STRUCT(%g3)
1093 ldub [%g3 + CPU_VID], %g3
1094 cmp %g1, %g4
1095 be,pt %xcc, 1f
1096 nop
1097
1098 /*
1099 * get the vcpu and strand for the vcpu that took the error
1100 * %g1 error vcpu
1101 */
1102 GUEST_STRUCT(%g3)
1103 sllx %g1, GUEST_VCPUS_SHIFT, %g1
1104 add %g1, %g3, %g1
1105 add %g1, GUEST_VCPUS, %g1
1106 ldx [%g1], %g1 ! err vcpu struct
1107 ldx [%g1 + CPU_STRAND], %g2 ! err strand struct
1108
1109 ! deschedule and stop the vcpu
1110 ! %g1 - vcpu struct
1111 ! %g2 - strand struct
1112 HVCALL(desched_n_stop_vcpu)
1113
1114 /*
1115 * If the heartbeat is disabled then it was running on the failed
1116 * cpu and needs to be restarted on this cpu.
1117 */
1118 ROOT_STRUCT(%g2)
1119 ldx [%g2 + CONFIG_HEARTBEAT_CPU], %g2
1120 cmp %g2, -1
1121 bne,pt %xcc, 1f
1122 nop
1123 HVCALL(heartbeat_enable)
11241:
1125
1126 /*
1127 * and exit the x-call handler
1128 */
1129 retry
1130
1131 SET_SIZE(cpu_err_rerouted)
1132
1133 ENTRY_NP(strand_in_error)
1134
1135 STRAND_STRUCT(%g5)
1136 ldub [%g5 + STRAND_ID], %g5
1137 mov 1, %g4
1138 sllx %g4, %g5, %g4
1139
1140 ROOT_STRUCT(%g2) ! config ptr
1141
1142 ! clear this strand from the active list
1143 ldx [%g2 + CONFIG_STACTIVE], %g3
1144 bclr %g4, %g3
1145 stx %g3, [%g2 + CONFIG_STACTIVE]
1146
1147 ! set this strand in the halted list
1148 ldx [%g2 + CONFIG_STHALT], %g3
1149 bset %g4, %g3
1150 stx %g3, [%g2 + CONFIG_STHALT]
1151
1152 ! find another idle strand for re-targetting
1153 ldx [%g2 + CONFIG_STIDLE], %g3
1154 mov 0, %g6
1155.find_strand:
1156 cmp %g5, %g6
1157 be,pn %xcc, .next_strand
1158 mov 1, %g4
1159 sllx %g4, %g6, %g4
1160 andcc %g3, %g4, %g0
1161 bnz,a %xcc, .found_a_strand
1162 nop
1163
1164.next_strand:
1165 inc %g6
1166 cmp %g6, NSTRANDS
1167 bne,pn %xcc, .find_strand
1168 nop
1169
1170 /*
1171 * No usable active strands are left in the
1172 * system, force host exit
1173 */
1174#ifdef CONFIG_VBSC_SVC
1175 ba,a vbsc_guest_exit
1176#else
1177 LEGION_EXIT(%o0)
1178#endif
1179
1180.found_a_strand:
1181 ! %g5 this strand ID
1182 ! %g6 target strand ID
1183
1184 /*
1185 * handoff L2 Steering CPU
1186 * If we are the steering cpu, migrate it to our chosen one
1187 */
1188 setx L2_CONTROL_REG, %g3, %g4
1189 ldx [%g4], %g2 ! current setting
1190 srlx %g2, L2_ERRORSTEER_SHIFT, %g3
1191 and %g3, (NSTRANDS - 1), %g3
1192 cmp %g3, %g5 ! is this steering strand ?
1193 bnz,pt %xcc, 1f
1194 nop
1195
1196 ! It is the L2 Steering strand. Migrate responsibility to tgt strand
1197 sllx %g3, L2_ERRORSTEER_SHIFT, %g3
1198 andn %g3, %g2, %g2 ! remove this strand
1199 sllx %g6, L2_ERRORSTEER_SHIFT, %g3
1200 or %g2, %g3, %g2
1201 stx %g2, [%g4]
1202
12031:
1204 mov %g5, %g1 ! this strand
1205 mov %g6, %g2 ! target strand
1206
1207#ifdef CONFIG_FPGA
1208 /*
1209 * Migrate SSI interrupts
1210 */
1211 STRAND_PUSH(%g1, %g3, %g4)
1212 STRAND_PUSH(%g2, %g3, %g4)
1213 HVCALL(ssi_redistribute_interrupts)
1214 STRAND_POP(%g2, %g3)
1215 STRAND_POP(%g1, %g3)
1216#endif
1217
1218 /*
1219 * Disable heartbeat interrupts if they're on this cpu.
1220 * cpu_in_error_finish will invoke heartbeat_enable on the
1221 * remote cpu if the heartbeat was disabled.
1222 */
1223 STRAND_PUSH(%g1, %g3, %g4)
1224 STRAND_PUSH(%g2, %g3, %g4)
1225 HVCALL(heartbeat_disable)
1226 STRAND_POP(%g2, %g3)
1227 STRAND_POP(%g1, %g3)
1228
1229#ifdef CONFIG_PIU
1230 /*
1231 * if this guest owns a PCIE bus, redirect
1232 * PIU interrupts
1233 */
1234 GUEST_STRUCT(%g3)
1235 ROOT_STRUCT(%g4)
1236 ldx [%g4 + CONFIG_PCIE_BUSSES], %g4
1237 ! check leaf A
1238 ldx [%g4 + PCIE_DEVICE_GUESTP], %g5
1239 cmp %g3, %g5
1240 bne %xcc, 1f
1241 nop
1242
1243 /*
1244 * Migrate PIU intrs
1245 */
1246 STRAND_PUSH(%g1, %g3, %g4)
1247 STRAND_PUSH(%g2, %g3, %g4)
1248 HVCALL(piu_intr_redistribution)
1249 STRAND_POP(%g2, %g3)
1250 STRAND_POP(%g1, %g3)
12511:
1252
1253#if defined(CONFIG_FPGA) && defined(CONFIG_FPGA_UART)
1254 /*
1255 * redirect serial uart interrupts
1256 */
1257 STRAND_PUSH(%g1, %g3, %g4)
1258 STRAND_PUSH(%g2, %g3, %g4)
1259 HVCALL(fpga_uart_intr_redistribute)
1260 STRAND_POP(%g2, %g3)
1261 STRAND_POP(%g1, %g3)
1262#endif /* CONFIG_FPGA`&& CONFIG_FPGA_UART */
1263
1264#endif /* CONFIG_PIU */
1265
1266 /*
1267 * Migrate vdev intrs
1268 */
1269 STRAND_PUSH(%g1, %g3, %g4)
1270 STRAND_PUSH(%g2, %g3, %g4)
1271 HVCALL(vdev_intr_redistribution)
1272 STRAND_POP(%g2, %g3)
1273 STRAND_POP(%g1, %g3)
1274
1275 ! %g1 this strand id
1276 ! %g2 tgt strand id
1277
1278 /*
1279 * Now pick another VCPU in this guest to target the erpt
1280 * Ensure that the VCPU is not bound to the strand in error
1281 */
1282 VCPU_STRUCT(%g1)
1283 GUEST_STRUCT(%g2)
1284 add %g2, GUEST_VCPUS, %g2
1285 mov 0, %g3
1286
1287 ! %g1 - this vcpu struct
1288 ! %g2 - array of vcpus in guest
1289 ! %g3 - vcpu array idx
1290.find_cpu_loop:
1291 ldx [%g2], %g4 ! vcpu struct
1292 brz,pn %g4, .find_cpu_continue
1293 nop
1294
1295 ! ignore this vcpu
1296 cmp %g4, %g1
1297 be,pn %xcc, .find_cpu_continue
1298 nop
1299
1300 ! check whether this CPU is running guest code ?
1301 ldx [%g4 + CPU_STATUS], %g6
1302 cmp %g6, CPU_STATE_RUNNING
1303 bne,pt %xcc, .find_cpu_continue
1304 nop
1305
1306 ! check the error queues.. if not set, not a good candidate
1307 ldx [%g4 + CPU_ERRQR_BASE], %g6
1308 brz,pt %g6, .find_cpu_continue
1309 nop
1310
1311 /*
1312 * find the strand this vcpu is ON, make sure it is idle
1313 * NOTE: currently this check is not necessary, more
1314 * likely when we have sub-strand scheduling
1315 */
1316 ! %g1 - this vcpu struct
1317 ! %g2 - curr vcpu in guest vcpu array
1318 ! %g3 - vcpu array idx
1319 ! %g4 - target vcpus struct
1320 STRAND_STRUCT(%g5) ! this strand
1321 ldx [%g4 + CPU_STRAND], %g6 ! vcpu->strand
1322 cmp %g5, %g6
1323 be,pn %xcc, .find_cpu_continue
1324 nop
1325
1326 ! check if the target strand is IDLE
1327 ldub [%g6 + STRAND_ID], %g6 ! vcpu->strand->id
1328 mov 1, %g5
1329 sllx %g5, %g6, %g6
1330 VCPU2ROOT_STRUCT(%g1, %g5)
1331 ldx [%g5 + CONFIG_STIDLE], %g5
1332 btst %g5, %g6
1333 bnz,pt %xcc, .found_a_cpu
1334 nop
1335
1336.find_cpu_continue:
1337 add %g2, GUEST_VCPUS_INCR, %g2
1338 inc %g3
1339 cmp %g3, NVCPUS
1340 bne,pn %xcc, .find_cpu_loop
1341 nop
1342
1343 ! If we got here, we didn't find a good tgt cpu
1344 ! do not send an erpt, exit the guest
1345
1346 ! HVCALL(guest_exit)
1347
1348 ba,a .skip_sending_erpt
1349
1350.found_a_cpu:
1351 ! %g4 - target vcpu struct
1352
1353 STRAND_STRUCT(%g1) ! this strand
1354
1355 ldx [%g4 + CPU_STRAND], %g3
1356
1357 /*
1358 * It is possible that the target STRANDs rerouted data is already in use.
1359 * We use the rerouted_addr field as a spinlock. The target strand
1360 * will set this to 0 after reading the error data allowing us
1361 * to re-use the rerouting fields.
1362 * See cpu_err_rerouted() below.
1363 *
1364 * %g3 &strands[target]
1365 */
1366 set ERR_INVALID_RA, %g6
1367 set STRAND_REROUTED_ADDR, %g5
1368 add %g3, %g5, %g5
13691: casx [%g5], %g0, %g6
1370 brnz,pn %g6, 1b
1371 nop
1372
1373 ! %g3 target strand struct
1374 ldub [%g1 + CPU_VID], %g6
1375 set STRAND_REROUTED_CPU, %g4
1376 stx %g6, [%g3 + %g4]
1377 GEN_SEQ_NUMBER(%g6, %g5)
1378 set STRAND_REROUTED_EHDL, %g4
1379 stx %g6, [%g3 + %g4]
1380 set SUN4V_CPU_RPRT, %g6
1381 set STRAND_REROUTED_ATTR, %g4
1382 stx %g6, [%g3 + %g4]
1383 GET_ERR_STICK(%g6)
1384 set STRAND_REROUTED_STICK, %g4
1385 stx %g6, [%g3 + %g4]
1386
1387 /*
1388 * Send a xcall to the target cpu so it can finish the work
1389 */
1390 ldub [%g2 + STRAND_ID], %g2 ! tgt strand id
1391 sllx %g2, INT_VEC_DIS_VCID_SHIFT, %g5
1392 or %g5, VECINTR_CPUINERR, %g5
1393 stxa %g5, [%g0]ASI_INTR_UDB_W
1394
1395.skip_sending_erpt:
1396
1397 RESUME_ALL_STRANDS(%g3, %g4, %g5, %g6)
1398
1399 /*
1400 * Clear the error report in_use field
1401 */
1402 GET_ERR_DIAG_BUF(%g4, %g5)
1403 brnz,a,pt %g4, 1f
1404 stub %g0, [%g4 + ERR_DIAG_RPRT_IN_USE]
14051:
1406 /*
1407 * Clear the sun4v report in_use field
1408 */
1409 GET_ERR_SUN4V_RPRT_BUF(%g4, %g5)
1410 brnz,a,pt %g4, 1f
1411 stub %g0, [%g4 + ERR_SUN4V_RPRT_IN_USE]
14121:
1413
1414 ! park myself
1415 STRAND_STRUCT(%g6)
1416 ldub [%g6 + STRAND_ID], %g6
1417 mov 1, %g5
1418 sllx %g5, %g6, %g6
1419 ROOT_STRUCT(%g2) ! %g2 config
1420 add %g2, CONFIG_STACTIVE, %g3
1421 ldx [%g3], %g4
1422 andn %g4, %g6, %g4 ! %g6 my strand
1423 stx %g4, [%g3] ! pull myself off from active CPUs
1424 add %g2, CONFIG_STIDLE, %g2
1425 ldx [%g2], %g3
1426 andn %g6, %g3, %g3 ! %g6 my strand
1427 st %g3, [%g2] ! remove myself from idle CPUs
1428
1429 ! idle this strand
1430 mov CMP_CORE_RUNNING_W1C, %g2
1431 stxa %g6, [%g2]ASI_CMP_CHIP
1432
1433 /*
1434 * If we get here someone else resumed this strand by mistake
1435 * hvabort to catch the mistake
1436 */
1437 ba hvabort
1438 rd %pc, %g1
1439
1440 SET_SIZE(strand_in_error)
1441
1442 ENTRY(dump_hvabort)
1443
1444 STRAND_PUSH(%g7, %g2, %g3)
1445
1446 GET_ERR_DIAG_BUF(%g1, %g2)
1447 add %g1, ERR_DIAG_ABORT_DATA, %g1
1448
1449 STRAND_STRUCT(%g2)
1450 ldx [%g2 + STRAND_ABORT_PC], %g3
1451 stx %g3, [%g1 + ERR_ABORT_PC]
1452
1453 add %g1, ERR_ABORT_VERSION, %g2
1454 mov ABORT_VERSION_INFO_SIZE, %g3
1455 HVCALL(dump_version)
1456
1457 GET_ERR_CWP(%g3)
1458 stx %g3, [%g1 + ERR_ABORT_CWP]
1459
1460 ! %g3 %cwp
1461
1462 ! store this strands register windows
1463 add %g1, ERR_ABORT_REG_WINDOWS, %g2
1464 mov NWINDOWS - 1, %g4
14651:
1466 mulx %g4, 24 * 8, %g5
1467 add %g5, %g2, %g5
1468 wrpr %g4, %cwp
1469
1470 stx %o0, [%g5 + (0 * 8)]
1471 stx %o1, [%g5 + (1 * 8)]
1472 stx %o2, [%g5 + (2 * 8)]
1473 stx %o3, [%g5 + (3 * 8)]
1474 stx %o4, [%g5 + (4 * 8)]
1475 stx %o5, [%g5 + (5 * 8)]
1476 stx %o6, [%g5 + (6 * 8)]
1477 stx %o7, [%g5 + (7 * 8)]
1478 stx %i0, [%g5 + (8 * 8)]
1479 stx %i1, [%g5 + (9 * 8)]
1480 stx %i2, [%g5 + (10 * 8)]
1481 stx %i3, [%g5 + (11 * 8)]
1482 stx %i4, [%g5 + (12 * 8)]
1483 stx %i5, [%g5 + (13 * 8)]
1484 stx %i6, [%g5 + (14 * 8)]
1485 stx %i7, [%g5 + (15 * 8)]
1486 stx %l0, [%g5 + (16 * 8)]
1487 stx %l1, [%g5 + (17 * 8)]
1488 stx %l2, [%g5 + (18 * 8)]
1489 stx %l3, [%g5 + (19 * 8)]
1490 stx %l4, [%g5 + (20 * 8)]
1491 stx %l5, [%g5 + (21 * 8)]
1492 stx %l6, [%g5 + (22 * 8)]
1493 stx %l7, [%g5 + (23 * 8)]
1494
1495 brgz,pt %g4, 1b
1496 dec %g4
1497
1498 wrpr %g3, %cwp ! restore %cwp
1499
1500 ! store the trap stack
1501 rdpr %tl, %g4
1502 brz,pn %g4, .no_trap_stack
1503 mov %g4, %g3
1504 add %g1, ERR_ABORT_TRAP_REGS, %g2
15051:
1506 mulx %g4, ERR_TRAP_REGS_SIZE, %g5
1507 add %g5, %g2, %g5
1508 wrpr %g4, %tl
1509
1510 rdpr %tt, %g6
1511 stx %g6, [%g5 + ERR_TT]
1512 rdpr %tpc, %g6
1513 stx %g6, [%g5 + ERR_TPC]
1514 rdpr %tnpc, %g6
1515 stx %g6, [%g5 + ERR_TNPC]
1516 rdpr %tstate, %g6
1517 stx %g6, [%g5 + ERR_TSTATE]
1518 rdhpr %htstate, %g6
1519 stx %g6, [%g5 + ERR_HTSTATE]
1520
1521 dec %g4
1522 brgz,pt %g4, 1b
1523 nop
1524
1525 wrpr %g3, %tl ! restore %tl
1526
1527.no_trap_stack:
1528
1529 ! now I have all those local registers to play with ....
1530 mov %g1, %l1
1531 GET_ERR_GL(%l7)
1532
1533 ! store this strands register windows
1534 add %g1, ERR_ABORT_GLOBAL_REGS, %l2
1535 mov MAXGL - 1, %l4
15361:
1537 mulx %l4, 8 * 8, %l5
1538 add %l5, %l2, %l5
1539 wrpr %l4, %gl
1540
1541 stx %g0, [%l5 + (0 * 8)]
1542 stx %g1, [%l5 + (1 * 8)]
1543 stx %g2, [%l5 + (2 * 8)]
1544 stx %g3, [%l5 + (3 * 8)]
1545 stx %g4, [%l5 + (4 * 8)]
1546 stx %g5, [%l5 + (5 * 8)]
1547 stx %g6, [%l5 + (6 * 8)]
1548 stx %g7, [%l5 + (7 * 8)]
1549
1550 brgz,pt %l4, 1b
1551 dec %l4
1552
1553 wrpr %l7, %gl ! restore %gl
1554 mov %l1, %g1
1555
1556 /*
1557 * Do C/ASM specific bits
1558 */
1559 GET_ERR_TABLE_ENTRY(%g3, %g2)
1560 lduw [%g3 + ERR_FLAGS], %g3
1561 set ERR_ABORT_ASM, %g2
1562 btst %g3, %g2
1563 bz,pn %xcc, .c_dump_hvabort
1564 nop
1565
1566.asm_dump_hvabort:
1567 /*
1568 * This is an assembler-initiated abort
1569 * fill in .....
1570 */
1571 ba .dump_hvabort_exit
1572 nop
1573
1574.c_dump_hvabort:
1575 /*
1576 * This is a C-initiated abort
1577 * fill in .....
1578 */
1579 ba .dump_hvabort_exit
1580 nop
1581
1582.dump_hvabort_exit:
1583 STRAND_POP(%g7, %g2)
1584 HVRET
1585 SET_SIZE(dump_hvabort)
1586
1587 /*
1588 * %g1 calling %pc
1589 */
1590 ENTRY_NP(hvabort)
1591 mov %g1, %g6
1592 HV_PRINT_NOTRAP("ABORT: Failure 0x");
1593 HV_PRINTX_NOTRAP(%g6)
1594
1595 ! stash the calling %pc
1596 STRAND_STRUCT(%g2)
1597 set STRAND_ABORT_PC, %g3
1598 stx %g6, [%g2 + %g3]
1599
1600 ! ASM abort errors use sw_abort_errors[0]
1601 setx sw_abort_errors, %g2, %g3
1602 RELOC_OFFSET(%g2, %g4)
1603 ba error_handler ! tail call
1604 sub %g3, %g4, %g1
1605
1606 SET_SIZE(hvabort)
1607
1608 ENTRY(c_hvabort)
1609
1610 ! stash the calling %pc
1611 STRAND_STRUCT(%g2)
1612 set STRAND_ABORT_PC, %g3
1613 stx %o7, [%g2 + %g3]
1614
1615 setx sw_abort_errors, %g2, %g3
1616 RELOC_OFFSET(%g2, %g4)
1617 sub %g3, %g4, %g3
1618
1619 ! C abort errors use sw_abort_errors[1]
1620 set ERROR_TABLE_ENTRY_SIZE, %g2
1621 ba error_handler
1622 add %g3, %g2, %g1
1623 SET_SIZE(c_hvabort)