Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / procs / sunsparc / libniagara2 / niagara2_error.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* OpenSPARC T2 Processor File: niagara2_error.c
5* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
6* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
7*
8* The above named program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public
10* License version 2 as published by the Free Software Foundation.
11*
12* The above named program is distributed in the hope that it will be
13* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15* General Public License for more details.
16*
17* You should have received a copy of the GNU General Public
18* License along with this work; if not, write to the Free Software
19* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20*
21* ========== Copyright Header End ============================================
22*/
23/*
24 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
25 * Use is subject to license terms.
26 */
27#pragma ident "@(#)niagara2_error.c 1.8 07/02/28 SMI"
28
29#include <stdio.h>
30#include <stdlib.h>
31#include <unistd.h>
32#include <strings.h>
33
34#include "ss_common.h"
35#include "niagara2.h"
36
37
38#if ERROR_INJECTION
39/*
40 * This file contains Niagara 2 specific error injection routines.
41 */
42void niagara2_init_error_list()
43{
44 int i;
45 /*
46 * Setup the error list in terms of the following format:
47 *
48 * Error <number, name>, Trap <type, name>, Trap <priority>, Encoded <errtype>, CERER.<bit>
49 *
50 */
51 ss_error_desc_t error_init_list[] = {
52 { E( ITTM ), T( instruction_access_MMU_error), 1, 1, B( ITTM, 61 ) },
53 { E( ITTP ), T( instruction_access_MMU_error), 2, 2, B( ITTP, 63 ) },
54 { E( ITDP ), T( instruction_access_MMU_error), 3, 3, B( ITDP, 62 ) },
55 { E( ITMU ), T( instruction_access_MMU_error), 4, 4, B( HWTWMU, 59) },
56 { E( ITL2U ), T( instruction_access_MMU_error), 5, 5, B( (char *)0,0) },
57 { E( ITL2ND ), T( instruction_access_MMU_error), 5, 6, B( (char *)0, 0) },
58 { E( ICL2U ), T( instruction_access_MMU_error), 6, 1, B( ICL2U, 54) },
59 { E( ICL2ND ), T( instruction_access_MMU_error), 6, 2, B( ICL2ND, 53) },
60 { E( IRFU ), T( internal_processor_error ), 1, 1, B( IRF, 52) },
61 { E( IRFC ), T( internal_processor_error ), 2, 2, B( IRF, 52) },
62 { E( FRFU ), T( internal_processor_error ), 3, 3, B( FRF, 50) },
63 { E( FRFC ), T( internal_processor_error ), 4, 4, B( FRF, 50) },
64 { E( SBDLC ), T( internal_processor_error ), 10, 5, B( SBDLC, 37) },
65 { E( SBDLU ), T( internal_processor_error ), 10, 6, B( SBDLU, 36) },
66 { E( MRAU ), T( internal_processor_error ), 11, 7, B( MRAU, 33) },
67 { E( TSAC ), T( internal_processor_error ), 11, 8, B( TSAC, 32) },
68 { E( TSAU ), T( internal_processor_error ), 11, 9, B( TSAU, 31) },
69 { E( SCAC ), T( internal_processor_error ), 11, 10, B( SCAC, 30) },
70 { E( SCAU ), T( internal_processor_error ), 11, 11, B( SCAU, 29) },
71 { E( TCCP ), T( internal_processor_error ), 11, 12, B( TCCP, 28) },
72 { E( TCCU ), T( internal_processor_error ), 11, 13, B( TCCU, 6) },
73 { E( DTTM ), T( data_access_MMU_error ), 5, 1, B( DTTM, 47) },
74 { E( DTTP ), T( data_access_MMU_error ), 6, 2, B( DTTP, 48) },
75 { E( DTDP ), T( data_access_MMU_error ), 7, 3, B( DTDP, 46) },
76 { E( DTMU ), T( data_access_MMU_error ), 8, 4, B( HWTWMU, 59) },
77 { E( DTL2U ), T( data_access_MMU_error ), 9, 5, B( (char *)0, 0) },
78 { E( DTL2ND ), T( data_access_MMU_error ), 9, 6, B( (char *)0, 0) },
79 { E( DCL2U ), T( data_access_error ), 11, 1, B( DCL2U, 39) },
80 { E( DCL2ND ), T( data_access_error ), 11, 2, B( DCL2ND, 38) },
81 { E( SOCU ), T( data_access_error ), 12, 4, B( (char *)0, 0) },
82 { E( ICVP ), T( hw_corrected_error ), Pri(11, 1), 1, B( ICVP, 18) },
83 { E( ICTP ), T( hw_corrected_error ), Pri(11, 2), 2, B( ICTP, 17) },
84 { E( ICTM ), T( hw_corrected_error ), Pri(11, 3), 3, B( ICTM, 16) },
85 { E( ICDP ), T( hw_corrected_error ), Pri(11, 4), 4, B( ICDP, 15) },
86 { E( DCVP ), T( hw_corrected_error ), Pri(12, 1), 5, B( DCVP, 14) },
87 { E( DCTP ), T( hw_corrected_error ), Pri(12, 1), 6, B( DCTP, 13) },
88 { E( DCTM ), T( hw_corrected_error ), Pri(12, 3), 7, B( DCTM, 12) },
89 { E( DCDP ), T( hw_corrected_error ), Pri(12, 4), 8, B( DCDP, 11) },
90 { E( L2C ), T( hw_corrected_error ), 13, 9, B( (char *)0, 0) },
91 { E( SBDPC ), T( hw_corrected_error ), 14, 10, B( SBDPC, 10) },
92 { E( SOCC ), T( hw_corrected_error ), 15, 11, B( (char *)0, 0) },
93 { E( SBDPU ), T( sw_recoverable_error), 1, 6, B( SBDPU, 9) },
94 { E( TCCD ), T( sw_recoverable_error), 2, 14, B( TCCD, 7) },
95 { E( TCUD ), T( sw_recoverable_error), 2, 15, B( TCUD, 6) },
96 { E( MAMU ), T( sw_recoverable_error), 3, 7, B( MAMU, 8) },
97 { E( MAL2C ), T( sw_recoverable_error), 3, 8, B( MAL2C, 5) },
98 { E( MAL2U ), T( sw_recoverable_error), 3, 9, B( MAL2U, 4) },
99 { E( MAL2ND ), T( sw_recoverable_error), 3, 10, B( MAL2ND,3) },
100 { E( CWQL2C ), T( sw_recoverable_error), 4, 11, B( CWQL2C, 2) },
101 { E( CWQL2U ), T( sw_recoverable_error), 4, 12, B( CWQL2U, 1) },
102 { E( CWQL2ND ), T( sw_recoverable_error), 4, 13, B( CWQL2ND, 0) },
103 { E( L2C ), T( sw_recoverable_error), 5, 20, B( (char *)0, 0) },
104 { E( L2U ), T( sw_recoverable_error), 5, 16, B( (char *)0, 0) },
105 { E( L2ND ), T( sw_recoverable_error), 5, 17, B( (char *)0, 0) },
106 { E( ITL2C ), T( sw_recoverable_error), 6, 1, B( (char *)0, 0) },
107 { E( ICL2C ), T( sw_recoverable_error), 6, 2, B( (char *)0, 0) },
108 { E( DTL2C ), T( sw_recoverable_error), 6, 3, B( (char *)0, 0) },
109 { E( DCL2C ), T( sw_recoverable_error), 6, 4, B( (char *)0, 0) },
110 { E( SOCU ), T( sw_recoverable_error), 7, 19, B( (char *)0, 0) },
111 { E( NONE ), 0, (char *)0, 0, 0, B( (char *)0, 0) }
112 };
113
114 for (i = 0; error_init_list[i].error_type != NONE; i ++)
115 ss_error_list[error_init_list[i].error_type] = error_init_list[i];
116}
117
118
119void extract_error_type(error_conf_t *errorconfp)
120{
121 int i;
122
123 for (i = 1; i< ERROR_MAXNUM; i++) {
124 if (streq(lex.strp, ss_error_list[i].error_name)) {
125 errorconfp->type_namep = strdup(lex.strp);
126 errorconfp->type = ss_error_list[i].error_type;
127 return;
128 }
129 }
130
131 lex_fatal("unknown error type parsing error config");
132}
133
134
135void update_errflags(simcpu_t * sp)
136{
137 sp->errorp->check_xdcache = (find_errconf(sp, (LD|ST),
138 (IRFC|IRFU|FRFC|FRFU))) ? true : false;
139 sp->errorp->check_xicache = (find_errconf(sp, IFETCH,
140 (ICVP|ICTP|ICTM|ICDP))) ? true : false;
141 sp->errorp->check_dtlb = (find_errconf(sp, (LD|ST),
142 (DTTM|DTTP|DTDP|DTMU))) ? true : false;
143}
144
145
146/*
147 * If demap of tlb entry with parity error detected then remove error config
148 */
149void tlb_entry_error_match(simcpu_t *sp, ss_mmu_t *mmup, tlb_entry_t *tep)
150{
151 error_conf_t *ep;
152
153 FIXME_WARNING(("tlb_entry_error_match() is not implemented!"));
154
155#if ERROR_INJECTION_FIXME
156 DBGERR( lprintf(sp->gid, "ss_tlb_insert(): errorp->itep=%x"
157 " errorp->dtep=%x tep=%x\n",
158 sp->errorp->itep, sp->errorp->dtep, tep); );
159
160 if (sp->error_enabled) {
161 if (sp->errorp->itep == tep && mmup->is_immu) {
162 if ((ep = find_errconf(sp, IFETCH, IMDU)) == NULL)
163 goto tlb_warning;
164 if (remove_errconf(sp, ep) == NULL)
165 clear_errflags(sp); else update_errflags(sp);
166 sp->errorp->itep = NULL;
167 return;
168 } else
169 if (sp->errorp->dtep == tep && !mmup->is_immu) {
170 if ((ep = find_errconf(sp, (LD|ST), DMDU)) == NULL)
171 goto tlb_warning;
172 if (remove_errconf(sp, ep) == NULL)
173 clear_errflags(sp); else update_errflags(sp);
174 sp->errorp->dtep = NULL;
175 return;
176 }
177 return;
178
179tlb_warning: EXEC_WARNING(("tlb_entry_error_match(): tracking tlb"
180 " entry in error for non-existent error config"));
181 }
182#endif /* ERROR_INJECTION_FIXME */
183}
184
185
186bool_t itlb_hit_error_match(simcpu_t *sp, tlb_entry_t *tep)
187{
188 error_t *errorp = sp->errorp;
189 error_conf_t *ep;
190
191 if (sp->error_check && (ep = find_errconf(sp, IFETCH, ITDP))) {
192 if (errorp->itep) {
193 DBGERR( lprintf(sp->gid, "ss_xic_miss(): "
194 " errorp->itep=%x, tep=%x\n", errorp->itep, tep); );
195 if ((tlb_entry_t *)errorp->itep == tep) {
196 ss_error_condition(sp, ep);
197 return true;
198 }
199 } else {
200 errorp->itep = tep;
201 ss_error_condition(sp, ep);
202 return true;
203 }
204 }
205
206 return false;
207}
208
209
210bool_t dtlb_hit_error_match(simcpu_t *sp, int op, tlb_entry_t *tep, tpaddr_t va)
211{
212 error_t *errorp = sp->errorp;
213 error_conf_t *ep;
214
215 FIXME_WARNING(("dtlb_hit_error_match() is not implemented!"));
216
217#if ERROR_INJECTION_FIXME
218
219 if (sp->error_check == true && errorp->check_dtlb) {
220 bool_t is_load, is_store;
221
222 is_load = IS_V9_MA_LOAD(op);
223 is_store = IS_V9_MA_STORE(op);
224
225 if (is_load)
226 ep = find_errconf(sp, LD, DMDU);
227 else
228 if (is_store) ep = find_errconf(sp, ST, DMSU);
229
230 if (ep) {
231 if (errorp->dtep) {
232 DBGERR( lprintf(sp->gid, "ss_memory_asi_access: "
233 "errorp->dtep=%x, tep=%x\n", errorp->dtep,tep); );
234 if ((tlb_entry_t *)errorp->dtep == tep) {
235 ss_error_condition(sp, ep);
236 return true;
237 }
238 } else {
239 errorp->dtep = tep;
240 errorp->addr = va;
241 ss_error_condition(sp, ep);
242 return true;
243 }
244 }
245 }
246#endif /* ERROR_INJECTION_FIXME */
247
248 return false;
249}
250
251
252void xicache_error_match(simcpu_t *sp, tpaddr_t pa)
253{
254 error_t *errorp = sp->errorp;
255
256 FIXME_WARNING(("xicache_error_match() is not implemented!"));
257
258#if ERROR_INJECTION_FIXME
259 if (sp->error_check == true && errorp->check_xicache) {
260 ss_proc_t *npp;
261 error_conf_t *ep;
262
263 DBGERR( lprintf(sp->gid, "ss_xic_miss(): ifetch cache hit\n"); );
264
265 ep = find_errconf(sp, IFETCH, ITC|IDC|LDAC|LDAU|DAC|DAU);
266 npp = sp->config_procp->procp;
267
268 if (ep) {
269 switch(ep->type) {
270 case ITC:
271 case IDC:
272 errorp->addr = pa;
273 ss_error_condition(sp, ep);
274 break;
275
276 case LDAC:
277 case LDAU:
278 for (bank=0; bank<npp->num_l2banks; bank++) {
279 if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled;
280 }
281 errorp->addr = pa;
282 ss_error_condition(sp, ep);
283 break;
284
285 case DAC:
286 for (bank=0; bank<npp->num_l2banks; bank++) {
287 if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled;
288 }
289 errorp->addr = pa;
290 ss_error_condition(sp, ep);
291 break;
292
293 case DAU:
294 for (bank=0; bank<npp->num_l2banks; bank++) {
295 if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled;
296 }
297 errorp->addr = pa;
298 ss_error_condition(sp, ep);
299 break;
300
301l2_disabled: DBGERR( lprintf(sp->gid, "ss_xic_miss: No LDAC/LDAU Error"
302 " - L2 disabled\n"); );
303 break;
304 default:
305 break;
306 }
307 }
308 }
309
310#endif /* ERROR_INJECTION_FIXME */
311
312}
313
314
315bool_t l2dram_access_error_match(simcpu_t *sp, int op, tpaddr_t pa)
316{
317 error_t *errorp = sp->errorp;
318 ss_proc_t *npp = sp->config_procp->procp;
319 error_conf_t *ep;
320
321 FIXME_WARNING(("l2dram_access_error_match() is not implemented!"));
322
323 if (npp->error_check) {
324 l2c_t *l2p = npp->l2p;
325 bool_t is_load, is_store, is_atomic;
326 uint8_t bank;
327
328 is_load = IS_V9_MA_LOAD(op);
329 is_store = IS_V9_MA_STORE(op);
330 is_atomic = IS_V9_MA_ATOMIC(op);
331
332#if ERROR_INJECTION_FIXME
333 /* check if direct-map mode displacement flushing the error cacheline */
334 l2p = npp->l2p;
335 bank = (pa >> 6) & 0x3;
336 if (l2p->control[bank] & L2_DMMODE) {
337 if ((pa & L2_DM_MASK) == (npp->errorp->ldac_addr & L2_DM_MASK)) {
338 npp->errorp->ldac_addr = NULL;
339 ss_set_errcheck(npp);
340 goto npp_err_done;
341 }
342 if ((pa & L2_DM_MASK) == (npp->errorp->ldau_addr & L2_DM_MASK)) {
343 npp->errorp->ldac_addr = NULL;
344 ss_set_errcheck(npp);
345 goto npp_err_done;
346 }
347 }
348
349 /*
350 * when accessing cacheline with error: load or partial store
351 * causes LDAC or LDAU, store to line with correctible error clears it,
352 * store to uncorrectible causes a writeback error
353 */
354 if (pa == npp->errorp->ldac_addr) {
355 if (is_load ||
356 (is_store && (size == MA_Size8 || size == MA_Size16))) {
357 ep = new_errconf((is_load ? LD : ST), LDAC);
358 ep->npp = true;
359 goto lda_err;
360 } else if (is_store) {
361 npp->errorp->ldac_addr = NULL;
362 ss_set_errcheck(npp);
363 }
364 } else if (pa = npp->errorp->ldau_addr) {
365 if (is_load ||
366 (is_store && (size == MA_Size8 || size == MA_Size16))) {
367 ep = new_errconf((is_load ? LD : ST), LDAU);
368 ep->npp = true;
369 goto lda_err;
370 } else if (is_store) {
371 npp->errorp->ldau_addr = NULL;
372 ss_set_errcheck(npp);
373 }
374 }
375 }
376
377npp_err_done:
378
379 /* now check for errors to be generated from this thread's error list */
380 if (sp->error_check && errorp->check_xdcache) {
381 bool_t is_load, is_store, is_atomic;
382 uint8_t bank;
383 xicache_t * xicp;
384 xicache_instn_t * xip;
385 uint64_t xidx;
386 tvaddr_t xpc;
387
388 is_load = IS_V9_MA_LOAD(op);
389 is_store = IS_V9_MA_STORE(op);
390 is_atomic = IS_V9_MA_ATOMIC(op);
391
392 if (is_load) ep = find_errconf(sp, LD,
393 (DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU));
394 else
395 if (is_store) ep = find_errconf(sp, ST,
396 (DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU));
397
398 if (ep)
399 switch(ep->type) {
400 case IRC:
401 case IRU:
402 case FRC:
403 case FRU:
404 xicp = sp->xicachep;
405 xpc = sp->pc;
406 xidx = (xpc>>2) & XICACHE_NUM_INSTR_MASK;
407 xip = &xicp->instn[xidx];
408 errorp->reg = X_RS1(xip->rawi);
409 ss_error_condition(sp, ep);
410 return true;
411 case DTC:
412 case DDC:
413 errorp->addr = pa;
414 ss_error_condition(sp, ep);
415 return true;
416lda_err: case LDAU:
417 case LDAC:
418 l2p = npp->l2p;
419 for (bank=0; bank<npp->num_l2banks; bank++) {
420 if (l2p->control[bank] & L2_DIS) goto l2_disabled;
421 }
422 if (is_load) {
423 if (is_atomic) errorp->l2_write = L2_RW_bit;
424 errorp->addr = pa;
425 ss_error_condition(sp, ep);
426 return true;
427 } else
428 if (is_store && (size == MA_Size8 || size == MA_Size16)) {
429 errorp->l2_write = L2_RW_bit;
430 errorp->partial_st = true;
431 errorp->addr = pa;
432 ss_error_condition(sp, ep);
433 return true;
434 }
435 break;
436
437ldw_err: case LDWU:
438 case LDWC:
439 l2p = npp->l2p;
440 for (bank=0; bank<npp->num_l2banks; bank++) {
441 if (l2p->control[bank] & L2_DIS) goto l2_disabled;
442 }
443 if (is_store) {
444 errorp->addr = pa;
445 ss_error_condition(sp, ep);
446 return true;
447 }
448 break;
449
450 case DAC:
451 l2p = npp->l2p;
452 for (bank=0; bank<npp->num_l2banks; bank++) {
453 if (l2p->control[bank] & L2_DIS) goto l2_disabled;
454 }
455 if (ep->op == LD && is_load) {
456 if (is_atomic) errorp->l2_write = L2_RW_bit;
457 errorp->addr = pa;
458 ss_error_condition(sp, ep);
459 return true;
460 } else
461 if (ep->op == ST && is_store) {
462 if (size == MA_Size8 || size == MA_Size16)
463 errorp->partial_st = true;
464 errorp->l2_write = L2_RW_bit;
465 errorp->addr = pa;
466 ss_error_condition(sp, ep);
467 return true;
468 }
469 break;
470
471 case DAU:
472 l2p = npp->l2p;
473 for (bank=0; bank<npp->num_l2banks; bank++) {
474 if (l2p->control[bank] & L2_DIS) goto l2_disabled;
475 }
476 if (ep->op == LD && is_load) {
477 if (is_atomic) errorp->l2_write = L2_RW_bit;
478 errorp->addr = pa;
479 ss_error_condition(sp, ep);
480 return true;
481 } else
482 if (ep->op == ST && is_store) {
483 if (size == MA_Size8 || size == MA_Size16)
484 errorp->partial_st = true;
485 errorp->l2_write = L2_RW_bit;
486 errorp->addr = pa;
487 ss_error_condition(sp, ep);
488 return true;
489 }
490 break;
491
492l2_disabled: DBGERR( lprintf(sp->gid, "ss_memory_asi_access: "
493 "No LDAC/LDWC/LDAU/LDWU/DAC Error - L2 disabled\n"); );
494 break;
495 }
496#endif /* ERROR_INJECTION_FIXME */
497 }
498
499 return false;
500}
501
502
503bool_t tlb_data_access_error_match(simcpu_t *sp, ss_mmu_t *mmup, uint64_t idx)
504{
505 error_conf_t *ep;
506
507 FIXME_WARNING(("tlb_data_access_error_match() is not implemented!"));
508
509 if (sp->error_check == true && (ep = find_errconf(sp, ASI_LD, ITDP|DTDP))) {
510 if (ep->type == ITDP && mmup->is_immu) {
511 sp->errorp->tlb_idx[IMDU_IDX] = idx;
512 ss_error_condition(sp, ep);
513 return true;
514 } else
515 if (ep->type == DTDP && !mmup->is_immu) {
516 sp->errorp->tlb_idx[DMDU_IDX] = idx;
517 ss_error_condition(sp, ep);
518 return true;
519 }
520 }
521
522 return false;
523}
524
525
526bool_t tlb_tag_access_error_match(simcpu_t *sp, ss_mmu_t *mmup, uint64_t idx)
527{
528 error_conf_t *ep;
529
530 FIXME_WARNING(("tlb_tag_access_error_match() is not implemented!"));
531
532 if (sp->error_check == true && (ep = find_errconf(sp, ASI_LD, ITTP|DTTP))) {
533
534 if (ep->type == ITTP && mmup->is_immu) {
535 sp->errorp->tlb_idx[IMTU_IDX] = idx;
536 ss_error_condition(sp, ep);
537 return true;
538 } else
539 if (ep->type == DTTP && !mmup->is_immu) {
540 sp->errorp->tlb_idx[DMTU_IDX] = idx;
541 ss_error_condition(sp, ep);
542 return true;
543 }
544 }
545
546 return false;
547}
548
549
550void ss_error_condition(simcpu_t *sp, error_conf_t *ep)
551{
552 ss_strand_t * nsp;
553 ss_proc_t * npp;
554 l2c_t * l2p;
555 mcu_bank_t * dbp;
556 simcpu_t * esp;
557 sparcv9_cpu_t * v9p;
558 sparcv9_trap_type_t tt;
559 error_t * errorp;
560 uint8_t bank,tid;
561 int idx;
562
563 v9p = sp->specificp;
564 nsp = v9p->impl_specificp;
565 npp = sp->config_procp->procp;
566 errorp = sp->errorp;
567
568 DBGERR( lprintf(sp->gid, "ss_error_condition() etype = %s\n", ep->type_namep); );
569
570 switch (ep->type) {
571 case ITDP:
572 if (ep->op == IFETCH) {
573 if (nsp->error.cerer & ss_error_list[ITDP].enable_bit) {
574 nsp->error.isfsr = ss_error_list[ITDP].error_code;
575 N_TPC(v9p, v9p->tl) = MMU_PC(sp->pc);
576 tt = (sparcv9_trap_type_t)ss_error_list[ITDP].trap_type;
577 v9p->post_precise_trap(sp, tt);
578 }
579 }
580 return;
581#if ERROR_INJECTION_FIXME
582 case IRC:
583 nsp->error.status = NA_IRC_bit;
584 nsp->error.addr = (I_REG_NUM(errorp->reg) | I_REG_WIN(v9p->cwp)
585 | I_SYND(IREG_FAKE_SYND_SINGLE));
586 if (nsp->error.enabled & NA_CEEN) {
587 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
588 v9p->post_precise_trap(sp, tt);
589 }
590 if (remove_errconf(sp, ep) == NULL)
591 clear_errflags(sp); else update_errflags(sp);
592 break;
593 case IRU:
594 nsp->error.status = NA_IRU_bit;
595 nsp->error.addr = (I_REG_NUM(errorp->reg) | I_REG_WIN(v9p->cwp)
596 | I_SYND(IREG_FAKE_SYND_DOUBLE));
597 if (nsp->error.enabled & NA_NCEEN) {
598 tt = Sparcv9_trap_internal_processor_error;
599 v9p->post_precise_trap(sp, tt);
600 }
601 if (remove_errconf(sp, ep) == NULL)
602 clear_errflags(sp); else update_errflags(sp);
603 break;
604 case FRC:
605 nsp->error.status = NA_FRC_bit;
606 nsp->error.addr = (F_REG_NUM(errorp->reg) |
607 EVEN_SYND(FREG_FAKE_SYND_SINGLE) | ODD_SYND(NULL));
608 if (nsp->error.enabled & NA_CEEN) {
609 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
610 v9p->post_precise_trap(sp, tt);
611 }
612 if (remove_errconf(sp, ep) == NULL)
613 clear_errflags(sp); else update_errflags(sp);
614 break;
615 case FRU:
616 nsp->error.status = NA_FRU_bit;
617 nsp->error.addr = (F_REG_NUM(errorp->reg) |
618 EVEN_SYND(FREG_FAKE_SYND_DOUBLE) | ODD_SYND(NULL));
619 if (nsp->error.enabled & NA_NCEEN) {
620 tt = Sparcv9_trap_internal_processor_error;
621 v9p->post_precise_trap(sp, tt);
622 }
623 if (remove_errconf(sp, ep) == NULL)
624 clear_errflags(sp); else update_errflags(sp);
625 break;
626 case IMTU:
627 nsp->error.status = (NA_PRIV_bit|NA_IMTU_bit);
628 nsp->error.addr = TLB_INDEX(errorp->tlb_idx[IMTU_IDX]);
629 errorp->tlb_idx[IMTU_IDX] = NULL;
630 if (nsp->error.enabled & NA_NCEEN) {
631 tt = Sparcv9_trap_data_access_error;
632 v9p->post_precise_trap(sp, tt);
633 }
634 if (remove_errconf(sp, ep) == NULL)
635 clear_errflags(sp); else update_errflags(sp);
636 break;
637 case DMTU:
638 nsp->error.status = (NA_PRIV_bit|NA_IMTU_bit);
639 nsp->error.addr = TLB_INDEX(errorp->tlb_idx[DMTU_IDX]);
640 errorp->tlb_idx[DMTU_IDX] = NULL;
641 if (nsp->error.enabled & NA_NCEEN) {
642 tt = Sparcv9_trap_data_access_error;
643 v9p->post_precise_trap(sp, tt);
644 }
645 if (remove_errconf(sp, ep) == NULL)
646 clear_errflags(sp); else update_errflags(sp);
647 break;
648 case DMDU:
649 if (ep->op == ASI_LD) {
650 nsp->error.status = (NA_PRIV_bit|NA_DMDU_bit);
651 nsp->error.addr = TLB_INDEX(errorp->tlb_idx[DMDU_IDX]);
652 errorp->tlb_idx[DMDU_IDX] = NULL;
653 if (nsp->error.enabled & NA_NCEEN) {
654 tt = Sparcv9_trap_data_access_error;
655 v9p->post_precise_trap(sp, tt);
656 }
657 if (remove_errconf(sp, ep) == NULL)
658 clear_errflags(sp); else update_errflags(sp);
659 } else {
660 nsp->error.status = NA_DMDU_bit;
661 nsp->error.status |= (ep->priv == V9_HyperPriv ||
662 ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
663 nsp->error.addr = MMU_VA(errorp->addr);
664 if (nsp->error.enabled & NA_NCEEN) {
665 tt = Sparcv9_trap_data_access_error;
666 v9p->post_precise_trap(sp, tt);
667 }
668 }
669 break;
670 case DMSU:
671 nsp->error.status = NA_DMSU_bit;
672 nsp->error.status |= (ep->priv == V9_HyperPriv ||
673 ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
674 nsp->error.addr = MMU_VA(errorp->addr);
675 if (nsp->error.enabled & NA_NCEEN) {
676 tt = Sparcv9_trap_data_access_error;
677 v9p->post_precise_trap(sp, tt);
678 }
679 break;
680 case ITC:
681 nsp->error.status = NA_ITC_bit;
682 goto icache_error;
683 case IDC:
684 nsp->error.status = NA_IDC_bit;
685icache_error: nsp->error.status |= (ep->priv == V9_HyperPriv ||
686 ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
687 nsp->error.addr = L1_PA(errorp->addr);
688 if (nsp->error.enabled & NA_CEEN) {
689 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
690 v9p->post_precise_trap(sp, tt);
691 }
692 if (remove_errconf(sp, ep) == NULL)
693 clear_errflags(sp); else update_errflags(sp);
694 break;
695 case DTC:
696 nsp->error.status = NA_DTC_bit;
697 goto dcache_error;
698 case DDC:
699 nsp->error.status = NA_DDC_bit;
700dcache_error: nsp->error.status |= (ep->priv == V9_HyperPriv ||
701 ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
702 nsp->error.addr = L1_PA(errorp->addr);
703 if (nsp->error.enabled & NA_CEEN) {
704 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
705 v9p->post_precise_trap(sp, tt);
706 }
707 if (remove_errconf(sp, ep) == NULL)
708 clear_errflags(sp); else update_errflags(sp);
709 break;
710 case MAU:
711 if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
712 IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
713 break;
714 case LDAC:
715 bank = (errorp->addr >> 6) & 0x3;
716 l2p = npp->l2p;
717 tid = nsp->vcore_id;
718 l2p->error_status[bank] = L2_LDAC_bit | L2_TID(tid) | L2_VEC_bit |
719 L2_FAKE_SYND_SINGLE | errorp->l2_write;
720 l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
721 if ((nsp->error.enabled & NA_CEEN) &&
722 (l2p->error_enable[bank] & L2_CEEN)) {
723 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
724 v9p->post_precise_trap(sp, tt);
725 }
726 /* l2 corrected on partial store or atomic hit */
727 if (errorp->l2_write) {
728 npp->errorp->ldac_addr = NULL;
729 ss_set_errcheck(npp);
730 } else {
731 int idx;
732
733 /* l2 uncorrected on load/ifetch hit so make error proc-wide */
734 npp->error_check = true;
735 npp->errorp->ldac_addr = errorp->addr;
736 /*
737 * NB: proper behavior is to flush all cpu xdcache's
738 * but there is no lock on the xdc so I didn't try it
739 */
740 sp->xdcache_trans_flush_pending = true;
741 }
742
743 /* bit of a hack - some errorconf's aren't owned by sp's so free them */
744 if (ep->npp) free(ep);
745 else {
746 if (remove_errconf(sp, ep) == NULL)
747 clear_errflags(sp); else update_errflags(sp);
748 }
749 break;
750 case LDWC:
751 bank = (errorp->addr >> 6) & 0x3;
752 l2p = npp->l2p;
753 tid = nsp->vcore_id;
754 l2p->error_status[bank] = L2_LDWC_bit | L2_TID(tid) | L2_VEC_bit |
755 L2_FAKE_SYND_SINGLE | L2_RW_bit;
756 l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
757 tid = (l2p->control[bank] & L2_ERRORSTEER);
758 v9p = npp->strand[STRANDID2IDX(npp, tid)];
759 nsp = v9p->impl_specificp;
760 esp = v9p->simp;
761 if ((nsp->error.enabled & NA_CEEN) &&
762 (l2p->error_enable[bank] & L2_CEEN)) {
763 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
764 v9p->post_precise_trap(esp, tt);
765 }
766 if (remove_errconf(sp, ep) == NULL)
767 clear_errflags(sp); else update_errflags(sp);
768 break;
769 case LDRC:
770 case LDSC:
771 if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
772 IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
773 break;
774 case LDAU:
775 bank = (errorp->addr >> 6) & 0x3;
776 l2p = npp->l2p;
777 tid = nsp->vcore_id;
778 l2p->error_status[bank] = L2_LDAU_bit | L2_TID(tid) | L2_VEU_bit |
779 L2_FAKE_SYND_DOUBLE | errorp->l2_write;
780 l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
781 if (l2p->error_enable[bank] & L2_NCEEN) {
782 nsp->error.status = NA_LDAU_bit;
783 nsp->error.status |= (ep->priv == V9_HyperPriv ||
784 ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
785 nsp->error.addr = L1_PA(errorp->addr);
786 if (nsp->error.enabled & NA_NCEEN) {
787 tt = (ep->type == IFETCH)
788 ? Sparcv9_trap_instruction_access_error
789 : Sparcv9_trap_data_access_error;
790 v9p->post_precise_trap(sp, tt);
791 }
792 }
793 /*
794 * store error info to cacheline for error handler diag access
795 * and to support direct-mapped mode displacement flushing
796 */
797 /* index stores to a 32bit word and its ECC+rsvd bits */
798 idx = errorp->addr & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
799 /* put oddeven select bit low so data is in addr order */
800 idx |= ((errorp->addr >> L2_ODDEVEN_SHIFT) & 1);
801 l2p->diag_datap[idx] = ((0xabbadada << 7) | L2_FAKE_SYND_DOUBLE);
802
803 /* index stores to a tag and its ECC+rsvd bits */
804 idx = errorp->addr & (L2_WAY | L2_LINE | L2_BANK) >> 6;
805 l2p->diag_tagp[idx] = (errorp->addr & L2_TAG) >> 12;
806
807 /* index valid/dirty or alloc/used bits and parity */
808 idx = errorp->addr & (L2_LINE | L2_BANK) >> 6;
809 idx |= ((errorp->addr & L2_VDSEL) >> 10);
810 l2p->diag_vuadp[idx] = 0xfff << 12; /* all lines valid/clean */
811
812 /* uncorrectible error in l2 so make it proc-wide */
813 npp->error_check = true;
814 npp->errorp->ldau_addr = errorp->addr;
815 sp->xdcache_trans_flush_pending = true;
816
817 /* bit of a hack - some errorconf's aren't owned by sp's so free them */
818 if (ep->npp) free(ep);
819 else {
820 if (remove_errconf(sp, ep) == NULL)
821 clear_errflags(sp); else update_errflags(sp);
822 }
823 break;
824 case LDWU:
825 bank = (errorp->addr >> 6) & 0x3;
826 l2p = npp->l2p;
827 tid = nsp->vcore_id;
828 l2p->error_status[bank] = L2_LDWU_bit | L2_TID(tid) | L2_VEU_bit |
829 L2_FAKE_SYND_DOUBLE | L2_RW_bit;
830 l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
831 if ((nsp->error.enabled & NA_NCEEN) &&
832 (l2p->error_enable[bank] & L2_NCEEN)) {
833 tid = (l2p->control[bank] & L2_ERRORSTEER);
834 v9p = npp->strand[STRANDID2IDX(npp, tid)];
835 esp = v9p->simp;
836 tt = (sparcv9_trap_type_t)N1_trap_data_error;
837 v9p->post_precise_trap(esp, tt);
838 }
839 npp->error_check = true;
840 npp->errorp->ldau_addr = errorp->addr;
841
842 /* bit of a hack - some errorconf's aren't owned by sp's so free them */
843 if (ep->npp) free(ep);
844 else {
845 if (remove_errconf(sp, ep) == NULL)
846 clear_errflags(sp); else update_errflags(sp);
847 }
848 break;
849 case LDRU:
850 case LDSU:
851 case LTC:
852 case LVU:
853 case LRU:
854 if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
855 IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
856 break;
857 case DAC:
858 l2p = npp->l2p;
859 bank = (errorp->addr >> 6) & 0x3;
860 dbp = &(npp->mbankp[bank]);
861 dbp->error_status = DRAM_DAC_bit | DRAM_FAKE_SYND_SINGLE;
862
863 /* if store miss and L2 disabled then only set DRAM error status */
864 if (ep->op == ST && !errorp->partial_st) {
865 for (bank=0; bank<npp->num_l2banks; bank++) {
866 if (l2p->control[bank] & L2_DIS)
867 break;
868 }
869 }
870
871 bank = (errorp->addr >> 6) & 0x3;
872 tid = nsp->vcore_id;
873 l2p->error_status[bank] = L2_DAC_bit | L2_TID(tid) | L2_VEC_bit |
874 errorp->l2_write;
875 l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
876 if ((nsp->error.enabled & NA_CEEN) &&
877 (l2p->error_enable[bank] & L2_CEEN)) {
878 /*
879 * partial stores and odd-numbered cache lines
880 * redirected to errorsteer thread
881 */
882 if (errorp->partial_st || (errorp->addr & 0x40)) {
883 tid = (l2p->control[bank] & L2_ERRORSTEER);
884 v9p = npp->strand[STRANDID2IDX(npp, tid)];
885 esp = v9p->simp;
886 l2p->error_status[bank] &= ~(errorp->l2_write);
887 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
888 v9p->post_precise_trap(esp, tt);
889 } else {
890 tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
891 v9p->post_precise_trap(sp, tt);
892 }
893 }
894 if (remove_errconf(sp, ep) == NULL)
895 clear_errflags(sp); else update_errflags(sp);
896 break;
897 case DSC:
898 case DAU:
899 l2p = npp->l2p;
900 bank = (errorp->addr >> 6) & 0x3;
901 dbp = &(npp->mbankp[bank]);
902 dbp->error_status = DRAM_DAU_bit | DRAM_FAKE_SYND_DOUBLE;
903 tid = nsp->vcore_id;
904 l2p->error_status[bank] = L2_DAU_bit | L2_TID(tid) | L2_VEU_bit |
905 errorp->l2_write;
906 l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
907 if (l2p->error_enable[bank] & L2_NCEEN) {
908 nsp->error.status = NA_LDAU_bit; /* as per Table 12-4 of PRM */
909 nsp->error.status |= (ep->priv == V9_HyperPriv ||
910 ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
911 /*
912 * partial stores and odd-numbered cache lines
913 * redirected to errorsteer thread
914 */
915 if (errorp->partial_st || (errorp->addr & 0x40)) {
916 tid = (l2p->control[bank] & L2_ERRORSTEER);
917 v9p = npp->strand[STRANDID2IDX(npp, tid)];
918 esp = v9p->simp;
919 l2p->error_status[bank] &= ~(errorp->l2_write);
920 /*
921 * set address to non-requested 16B block
922 * within the same 64B cache line
923 */
924 if (!errorp->partial_st)
925 errorp->addr = (errorp->addr & ~0x30) |
926 (((errorp->addr & 0x30) + 0x10) % 0x40);
927 nsp->error.addr = L1_PA(errorp->addr);
928 tt = (sparcv9_trap_type_t)N1_trap_data_error;
929 v9p->post_precise_trap(esp, tt);
930 break;
931 }
932 nsp->error.addr = L1_PA(errorp->addr);
933 if (nsp->error.enabled & NA_NCEEN) {
934 tt = (ep->type == IFETCH)
935 ? Sparcv9_trap_instruction_access_error
936 : Sparcv9_trap_data_access_error;
937 v9p->post_precise_trap(sp, tt);
938 }
939 }
940 if (remove_errconf(sp, ep) == NULL)
941 clear_errflags(sp); else update_errflags(sp);
942 break;
943 case DSU:
944 case DBU9:
945 case DRAM:
946 if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
947 IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
948 break;
949
950 default:
951 if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
952 EXEC_WARNING(("Unspecified Error Type: %s\n", ep->type_namep));
953 break;
954#endif /* ERROR_INJECTION_FIXME */
955 }
956
957 FIXME_WARNING(("ss_error_condition() is not implemented!"));
958}
959#endif /* ERROR_INJECTION */