Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / procs / sunsparc / common / ss_err_trap.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* OpenSPARC T2 Processor File: ss_err_trap.c
5* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
6* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
7*
8* The above named program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public
10* License version 2 as published by the Free Software Foundation.
11*
12* The above named program is distributed in the hope that it will be
13* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15* General Public License for more details.
16*
17* You should have received a copy of the GNU General Public
18* License along with this work; if not, write to the Free Software
19* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20*
21* ========== Copyright Header End ============================================
22*/
23/*
24 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
25 * Use is subject to license terms.
26 */
27#pragma ident "@(#)ss_err_trap.c 1.17 07/02/28 SMI"
28
29#if ERROR_TRAP_GEN /* { */
30
31#include <stdio.h>
32#include <stdlib.h>
33#include <unistd.h>
34#include <string.h> /* memcpy/memset */
35#include <strings.h>
36#include <thread.h>
37#include <errno.h>
38
39#include "ss_common.h"
40
41#ifdef NIAGARA1
42#include "niagara.h"
43#endif
44
45#ifdef NIAGARA2
46#include "niagara2.h"
47#endif
48
49#ifdef ROCK
50#include "rock.h"
51#endif
52
53
54#if (ERROR_TRAP_GEN > 1)
55#define ERR_TRAP_VERBOSE(_s) do { _s } while (0)
56#else
57#define ERR_TRAP_VERBOSE(_s) do { } while (0)
58#endif
59
60/*
61 * error file parsing functions
62 */
63static error_event_t *add_error_event(error_event_t *);
64static error_asi_t *add_error_asi(error_asi_t *);
65static void parse_error_event(error_event_t *);
66static void parse_error_asi(error_asi_t *);
67static void parse_error_def(ss_proc_t *);
68
69/* temp head pointers for dynamic error file reload */
70static error_event_t * temp_error_event_list_rootp = NULL;
71static error_asi_t * temp_error_asi_list_rootp = NULL;
72
73extern void debug_set_breakpoint(tvaddr_t bpaddr);
74
75/*
76 * Called exactly once for every error event which we want to inject
77 * into the system.
78 */
79void
80ss_inject_error_trap( simcpu_t * sp, char * err_name, sparcv9_trap_type_t trap_only_tt, int sp_intr_only)
81{
82 int idx, i;
83 ss_error_entry_t *error_table;
84 ss_sp_error_t *sp_error_table;
85 uint64_t reg_val, mask_val;
86 bool_t is_new_trap, is_ready;
87 int target_cpu, strand_idx;
88 simcpu_t *trap_sp;
89 ss_proc_t *rpp;
90 sparcv9_cpu_t *tv9p;
91 sparcv9_cpu_t *v9p;
92 ss_strand_t *tstrandp;
93 ss_strand_t *rsp;
94
95 rpp = (ss_proc_t *)(sp->config_procp->procp);
96 v9p = (sparcv9_cpu_t *)(sp->specificp);
97 rsp = v9p->impl_specificp;
98
99 error_table = rpp->ss_err_state.err_event_tbl;
100 sp_error_table = rpp->ss_err_state.sp_err_tbl;
101
102 /*
103 * Calling code must ensure that the ready_for_next_injection
104 * flag has been changed from true to false before calling this
105 * routine.
106 * We may set it back to true in this routine based on whether
107 * or not there is a trap to post.
108 */
109 ASSERT(rpp->ss_err_state.ready_for_next_injection == false);
110
111 /*
112 * Search for the specific error we are generating in
113 * the global error table.
114 */
115 for (idx=0; error_table[idx].trap_type != INVALID_TRAP; idx++) {
116 if (strcmp(error_table[idx].error_name, err_name) == 0) {
117 break;
118 }
119 }
120
121 if (strcmp(error_table[idx].error_name, END_ERR_STRING) == 0) {
122 lprintf(sp->gid, "ERROR_TRAP_GEN: %s not found. Unable to inject\n", err_name);
123 rpp->ss_err_state.ready_for_next_injection = true;
124 return;
125 }
126
127 /*
128 * Special case for injecting traps which are not associated
129 * with any particular error.
130 */
131 if (strcmp(error_table[idx].error_name, TRAP_ERR_STRING) == 0) {
132 error_table[idx].trap_type = trap_only_tt;
133 }
134
135ERR_TRAP_VERBOSE(
136 lprintf(sp->gid, "ERROR_TRAP_GEN: Injecting:\n" );
137 lprintf(sp->gid, " name = %s\n", error_table[idx].error_name);
138 if (error_table[idx].trap_target != TARGET_SP)
139 lprintf(sp->gid, " trap_type = 0x%x\n", (int)error_table[idx].trap_type);
140
141 lprintf(sp->gid, " trap_class = %d (precise=%d, deferred=%d, disrupting=%d, SP_intr=%d)\n",
142 (int)error_table[idx].trap_class, PRECISE_TT, DEFERRED_TT, DISRUPTING_TT, SP_INTR);
143 lprintf(sp->gid, " is_persistent = %d\n", (int)error_table[idx].is_persistent);
144 lprintf(sp->gid, " trap_target = 0x%x\n", error_table[idx].trap_target);
145);
146
147 /*
148 * First check the error recording value/mask
149 * if applicable to this error.
150 *
151 * All bits which are specified in the mask must be turned on.
152 *
153 * Otherwise, we simply do nothing (return).
154 */
155 if (error_table[idx].error_record.eer_access) {
156 /*
157 * We are interested in the register value for the strand
158 * that is encountering the error. i.e. the current CPU.
159 */
160 reg_val = error_table[idx].error_record.eer_access(sp, ASI_NA, ADDR_NA, true, 0, true);
161 mask_val = error_table[idx].error_record.mask;
162 if ((reg_val & mask_val) != mask_val) {
163 lprintf(sp->gid, "ERROR_TRAP_GEN: Error Recording Register is OFF for %s\n",
164 error_table[idx].error_name);
165 rpp->ss_err_state.ready_for_next_injection = true;
166 return;
167 }
168 }
169
170 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: Error Recording Register is ON for %s\n",
171 error_table[idx].error_name););
172
173 /*
174 * Next we inject the ESR value as specified by the mask
175 */
176 if (error_table[idx].error_status.esr_inject) {
177 /*
178 * We inject the ESR of the strand that is encountering
179 * the error. i.e. the current CPU.
180 *
181 * Return value tells us whether the injection of this
182 * particular ESR mask will result in a new trap. As
183 * opposed to simply turning on the "ME" bit for
184 * instance.
185 */
186 is_new_trap = error_table[idx].error_status.esr_inject(sp, error_table[idx].error_status.err_inject_mask, &error_table[idx]);
187
188 /* update global flag */
189 rpp->ss_err_state.esrs_clear = false;
190 } else {
191 /*
192 * if no ESR is associated with this error trap,
193 * then we always post a new trap.
194 */
195 is_new_trap = true;
196 }
197
198 if (is_new_trap == false) {
199 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: No new trap to generate\n"););
200 rpp->ss_err_state.ready_for_next_injection = true;
201 return;
202 }
203
204ERR_TRAP_VERBOSE(
205 if (error_table[idx].trap_target != TARGET_SP)
206 lprintf(sp->gid, "ERROR_TRAP_GEN: Error %s has been injected. Attempting to post TT 0x%x\n",
207 error_table[idx].error_name, (int)error_table[idx].trap_type);
208 else
209 lprintf(sp->gid, "ERROR_TRAP_GEN: Error %s has been injected. Attempting to post SP interrupt\n",
210 error_table[idx].error_name);
211);
212
213 /*
214 * Target CPU for this error trap is specified by
215 * error_table[idx].trap_target
216 *
217 * 'TARGET_MYSELF' means always post trap to
218 * the CPU encountering the error. In this case,
219 * that would be the current CPU.
220 *
221 * 'TARGET_SP' means post an interrupt to the
222 * service processor.
223 */
224 target_cpu = error_table[idx].trap_target;
225 if (target_cpu == TARGET_SP) {
226 trap_sp = sp;
227 } else {
228 if (target_cpu == TARGET_MYSELF) {
229 tv9p = v9p;
230 trap_sp = sp;
231 } else {
232 strand_idx = STRANDID2IDX(rpp, target_cpu);
233
234 ASSERT(VALIDIDX(rpp, strand_idx));
235
236 tv9p = rpp->strand[strand_idx];
237 trap_sp = tv9p->simp;
238 }
239
240 tstrandp = tv9p->impl_specificp;
241 }
242
243 /*
244 * Verify that we are not allowing more than one
245 * outstanding error trap injection at a time.
246 */
247 ASSERT(rpp->ss_err_state.inj_error_trap == NULL);
248
249 /*
250 * We must first check the error reporting registers. If
251 * the trap is ready to be posted, then we will do so.
252 * (the check below works for non-maskable error traps
253 * too since the eer_access routines for those will be
254 * NULL).
255 *
256 * Otherwise, if the trap is not ready to be posted due
257 * to one or more error reporting register settings, we
258 * simply update this strand's inj_error_trap pointer
259 * if it is a disrupting error trap or do nothing for
260 * precise and disrupting error traps.
261 *
262 * ss_check_error_traps() will take care of re-checking the
263 * error reporting registers after any CPU state change in
264 * the future for the disrupting traps.
265 */
266
267 is_ready = true;
268
269 if (error_table[idx].error_report.eer_access) {
270 /*
271 * We are interested in the register value for the strand
272 * that will recieve the trap. i.e. the target CPU which
273 * may or may not be the current CPU.
274 */
275 reg_val = error_table[idx].error_report.eer_access(trap_sp, ASI_NA, ADDR_NA, true, 0, true);
276 mask_val = error_table[idx].error_report.mask;
277 if ((reg_val & mask_val) != mask_val)
278 is_ready = false;
279
280 }
281
282 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: Error Reporting Register is %s for %s\n",
283 is_ready ? "ON" : "OFF" , error_table[idx].error_name););
284
285 /*
286 * All disrupting error traps are also conditioned by the PSTATE.IE
287 * bit when HPSTATE.HPRIV is set.
288 */
289 if (error_table[idx].trap_class == DISRUPTING_TT) {
290 /* ss_check_error_traps() will post the trap when ready */
291 rpp->ss_err_state.inj_error_trap = &(error_table[idx]);
292 rpp->ss_err_state.trap_target_gid = trap_sp->gid;
293
294 if ((v9p->state == V9_HyperPriv) && !(v9p->pstate.int_enabled))
295 is_ready = false;
296
297 if (is_ready)
298 tv9p->post_precise_trap(trap_sp, error_table[idx].trap_type);
299
300 lprintf(sp->gid, "ERROR_TRAP_GEN: TT 0x%x is %s for %s\n",
301 error_table[idx].trap_type, is_ready ? "posted" : "pending" ,
302 error_table[idx].error_name);
303 } else
304 if (error_table[idx].trap_class == SP_INTR) {
305 /* search for the error entry in the sp table */
306 for (idx=0; strcmp(sp_error_table[idx].error_name, END_ERR_STRING) != 0; idx++) {
307 if (strcmp(sp_error_table[idx].error_name, err_name) == 0) {
308 break;
309 }
310 }
311 if (strcmp(sp_error_table[idx].error_name, END_ERR_STRING) == 0) {
312 lprintf(sp->gid, "ERROR_TRAP_GEN: %s not found in sp error table. Unable to inject\n", err_name);
313 rpp->ss_err_state.ready_for_next_injection = true;
314 return;
315 }
316
317 /*
318 * Special case for injecting a service processor interrupt
319 * not associated with any particular error.
320 */
321 if (strcmp(sp_error_table[idx].error_name, SP_INTR_ERR_STRING) == 0) {
322 sp_error_table[idx].sp_intr = sp_intr_only;
323 }
324
325 if (is_ready) {
326 /* Post the SP interrupt */
327 if (sp->config_procp->proc_typep->sp_interrupt(sp, sp_error_table[idx].sp_intr,
328 sp_error_table[idx].error_name))
329
330 lprintf(sp->gid, "ERROR_TRAP_GEN: SP INTERRUPT 0x%x is posted for %s\n",
331 sp_error_table[idx].sp_intr, sp_error_table[idx].error_name);
332 else
333 lprintf(sp->gid, "ERROR_TRAP_GEN: SP INTERRUPT 0x%x failed or unsupported for %s\n",
334 sp_error_table[idx].sp_intr, sp_error_table[idx].error_name);
335 } else {
336 lprintf(sp->gid, "ERROR_TRAP_GEN: SP INTERRUPT 0x%x is dropped for %s\n",
337 sp_error_table[idx].sp_intr, sp_error_table[idx].error_name);
338 }
339 rpp->ss_err_state.ready_for_next_injection = true;
340 rpp->ss_err_state.inj_error_trap = NULL;
341 } else {
342 if (is_ready) {
343 /* Post the trap */
344 rpp->ss_err_state.inj_error_trap = &(error_table[idx]);
345 rpp->ss_err_state.trap_target_gid = trap_sp->gid;
346 tv9p->post_precise_trap(trap_sp, error_table[idx].trap_type);
347 } else {
348 /*
349 * No traps to post or hold pending, so we can once
350 * again allow injection of new errors.
351 */
352 rpp->ss_err_state.ready_for_next_injection = true;
353 }
354
355 lprintf(sp->gid, "ERROR_TRAP_GEN: TT 0x%x is %s for %s\n",
356 error_table[idx].trap_type, is_ready ? "posted" : "dropped" ,
357 error_table[idx].error_name);
358 }
359
360 return;
361}
362
363/*
364 * If there is an error_asi_list associated with this error
365 * do the following for each node:
366 *
367 * -) Using the id field, check if this node is already in the
368 * permanent asi list (from a previous trigger of this error)
369 * if yes:
370 * ensure it's access_cnt is reset using the value in
371 * the error_asi
372 * if no:
373 * copy this error_asi from the error_asi list into
374 * the head of the permanent asi_list
375 */
376void
377update_error_asi_list(simcpu_t *sp)
378{
379 error_asi_t *easip;
380 error_asi_t *temp_easip;
381 bool_t found;
382 ss_proc_t *rpp = (ss_proc_t *)(sp->config_procp->procp);
383 error_asi_t *temp_asi_listp;
384
385 temp_asi_listp = sp->eep->temp_error_asi_list_rootp;
386
387 if (temp_asi_listp == NULL)
388 return; /* nothing to do */
389
390 pthread_mutex_lock(&rpp->ss_err_state.err_lock);
391
392 /* For each node in the temp_asi_listp */
393 for (temp_easip = temp_asi_listp; temp_easip != NULL;
394 temp_easip = temp_easip->nextp) {
395
396 found = false;
397
398 /* compare against each node in the perm_asi_list */
399 for (easip = rpp->ss_err_state.error_asi_list_rootp;
400 easip != NULL; easip = easip->nextp) {
401
402 if (easip->id == temp_easip->id) {
403
404 /* found a match - re-arm this error_asi */
405 found = true;
406 easip->access_cnt = temp_easip->access_cnt;
407
408 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: Updating single-use ASI " \
409 "override associated with this error event (id-%u)\n",
410 easip->id););
411 } /* if */
412
413 } /* for */
414
415 if (!found) {
416 error_asi_t *new_error_asip = NULL;
417
418 /*
419 * create a new error_asi, copy in the data from the temp_asi
420 * and add it to the head of the permanent list
421 */
422 new_error_asip = add_error_asi(new_error_asip);
423 bcopy(temp_easip, new_error_asip, sizeof(error_asi_t));
424 new_error_asip->nextp = rpp->ss_err_state.error_asi_list_rootp;
425 rpp->ss_err_state.error_asi_list_rootp = new_error_asip;
426
427 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: Adding single-use ASI " \
428 "override associated with this error event (id-%u)\n",
429 new_error_asip->id););
430 } /* if */
431
432 } /* for */
433
434 pthread_mutex_unlock(&rpp->ss_err_state.err_lock);
435
436 ERR_TRAP_VERBOSE(
437 lprintf(sp->gid, "ERROR_TRAP_GEN: Updated Error ASI Override list\n");
438 dump_error_asi_list(sp->gid, rpp->ss_err_state.error_asi_list_rootp);
439 lprintf(sp->gid, "ERROR_TRAP_GEN: END Error ASI Override list\n\n");
440 );
441}
442
443/*
444 * Returns TRUE if the triggered error has been injected.
445 */
446bool_t
447trigger_error_trap(simcpu_t * sp)
448{
449 ss_proc_t *rpp;
450
451 /*
452 * Check for common case.
453 */
454 if ((sp->eep == NULL) || (sp->eep->ee_status != EE_TRIGGERED))
455 return false;
456
457 rpp = (ss_proc_t *)(sp->config_procp->procp);
458
459 /*
460 * Serialize the number of outstanding injections.
461 */
462 pthread_mutex_lock(&rpp->ss_err_state.injection_lock);
463
464 if (rpp->ss_err_state.ready_for_next_injection == false) {
465
466 pthread_mutex_unlock(&rpp->ss_err_state.injection_lock);
467 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: Injection already in progress. Will try again later\n"););
468 return false;
469 } else {
470 rpp->ss_err_state.ready_for_next_injection = false;
471 pthread_mutex_unlock(&rpp->ss_err_state.injection_lock);
472 }
473
474 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: CPU is ready for injection\n"););
475
476 /*
477 * If there are any user-defined ASI overrides associated
478 * with this error, then add them to the head of the asi
479 * override list. This needs to take into account that the
480 * same error may trigger multiple times so we need to
481 * update the asi list associated with this error if we
482 * detect that this error has already triggered.
483 */
484 update_error_asi_list(sp);
485
486 /*
487 * Next we inject the actual error trap or SP interrupt.
488 */
489 if (sp->eep->options.bits.error_str) {
490 ss_inject_error_trap(sp, sp->eep->error_str, 0, 0);
491 } else if (sp->eep->options.bits.sp_intr) {
492 ss_inject_error_trap(sp, SP_INTR_ERR_STRING, 0, sp->eep->sp_intr);
493 } else
494 ss_inject_error_trap(sp, TRAP_ERR_STRING, sp->eep->trap_num, 0);
495
496 /*
497 * Finally check to see whether this CPU should be watching
498 * for any new error events to be triggered.
499 *
500 * trigger_cnt:
501 * >1 means trigger N times, 1 means trigger once
502 */
503 if (sp->eep->trigger_cnt > 1 ) {
504 sp->eep->trigger_cnt -= 1;
505
506 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: Error event trigger cnt = 0x%x. " \
507 "re-arming existing triggers\n", sp->eep->trigger_cnt););
508
509 sp->eep->ee_status = EE_ASSIGNED;
510 sp->error_pending = true;
511 } else {
512 sp->eep->trigger_cnt = 0;
513 /*
514 * See if there are any other events which may need to
515 * be triggered on this CPU.
516 */
517 check_pending_error_events(sp);
518 }
519
520 return true;
521}
522
523/*
524 * Called every time the CPU state has been changed.
525 */
526void
527ss_check_error_traps(simcpu_t * sp)
528{
529 ss_proc_t *rpp;
530 sparcv9_cpu_t *v9p;
531 ss_strand_t *rsp;
532 ss_error_entry_t *ep;
533 bool_t is_ready;
534 uint64_t reg_val, mask_val, err_pending_mask;
535 int idx;
536 ss_error_entry_t *error_table;
537
538 rpp = (ss_proc_t *)(sp->config_procp->procp);
539 v9p = (sparcv9_cpu_t *)(sp->specificp);
540 rsp = v9p->impl_specificp;
541
542 error_table = rpp->ss_err_state.err_event_tbl;
543
544 /*
545 * If this strand's inj_error_trap pointer contains
546 * a valid pointer, it means we are trying to inject
547 * an error trap so we check to see if it is ready
548 * to be posted.
549 */
550 ep = rpp->ss_err_state.inj_error_trap;
551 if ((ep) && (rpp->ss_err_state.trap_target_gid == sp->gid)) {
552 is_ready = true;
553
554 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: TT 0x%x still pending for this CPU\n",
555 ep->trap_type););
556
557 if (ep->error_report.eer_access) {
558 /*
559 * We are interested in the register value for the
560 * current strand.
561 */
562 reg_val = ep->error_report.eer_access(sp, ASI_NA, ADDR_NA, true, 0, true);
563 mask_val = ep->error_report.mask;
564 if ((reg_val & mask_val) != mask_val)
565 is_ready = false;
566 }
567 /*
568 * All disrupting error traps are also conditioned by the
569 * PSTATE.IE bit when HPSTATE.HPRIV is set.
570 */
571 if ((ep->trap_class == DISRUPTING_TT) &&
572 ((v9p->state == V9_HyperPriv) && !(v9p->pstate.int_enabled)))
573 is_ready = false;
574
575 if (is_ready) {
576 lprintf(sp->gid, "ERROR_TRAP_GEN: Posting TT 0x%x to this CPU\n", ep->trap_type);
577 v9p->post_precise_trap(sp, ep->trap_type);
578 }
579
580 return;
581 }
582
583 /*
584 * Have any new error events been triggered on this CPU
585 * and not yet injected?
586 */
587 if (trigger_error_trap(sp)) {
588 /*
589 * If that call resulted in injection of an error
590 * event, then we are done for now.
591 */
592 return;
593 }
594
595 /*
596 * Global assumptions about error handling on all SunSPARC
597 * CPUs:
598 *
599 * 1) Only maskable error traps can be persistent.
600 *
601 * By "persistent", we mean that the error trap keeps
602 * getting generated over and over again as long as the
603 * Error Status Register and Error Enabling Register
604 * conditions are satisfied. Otherwise, the trap handler
605 * won't be able to make forward progress.
606 *
607 * 2) All disrupting traps which are persistent will also
608 * be conditioned by the PSTATE.IE bit when
609 * HPSTATE.HPRIV is set.
610 *
611 *
612 * With the above rules in mind:
613 *
614 * Now it is time to search through our entire list of
615 * error traps and for each and every "persistent" error
616 * type found which is targetted at the current CPU,
617 * post a trap if the ESR and EER conditions for that
618 * error are satisfied.
619 *
620 */
621
622 /*
623 * performance optimization which might help
624 * us in some cases.
625 */
626 if (rpp->ss_err_state.esrs_clear) return;
627
628DBGERRTRAP( lprintf(sp->gid, "ERROR_TRAP_GEN: Checking error list for persistent error ESR bits\n"); );
629
630 for (idx=0; error_table[idx].trap_type != INVALID_TRAP; idx++) {
631 if ((error_table[idx].trap_target == rsp->vcore_id) &&
632 (error_table[idx].is_persistent)) {
633 /*
634 * Check to see if the Error Status bit(s) for this error
635 * is turned on. Note that all persistent error traps
636 * must have a valid esr_access() routine.
637 */
638 ASSERT(error_table[idx].error_status.esr_access);
639
640 reg_val = error_table[idx].error_status.esr_access(sp, ASI_NA, ADDR_NA, true, 0, true);
641 /*
642 * The err_pending_mask is all the bits that can indicate an error condition in an ESR.
643 * It is used to retrieve the error bits from the ESR and then compared against the
644 * the err_inject_mask. If a match is found it indicates that the error corresponding
645 * to this err_inject_mask is still pending.
646 */
647 err_pending_mask = error_table[idx].error_status.err_pending_mask;
648 mask_val = error_table[idx].error_status.err_inject_mask;
649
650 if (err_pending_mask) {
651 if ((reg_val & err_pending_mask) != mask_val)
652 goto ss_check_next_err;
653 } else { /* if err_pending_mask is not set for an error entry */
654 if ((reg_val & mask_val) != mask_val)
655 goto ss_check_next_err;
656 }
657
658DBGERRTRAP( lprintf(sp->gid, "ERROR_TRAP_GEN: ESR bit is set for persistent error (%s) " \
659 "targetted at the current CPU\n", error_table[idx].error_name););
660
661 /*
662 * Check to see if the Error Reporting conditions are satisfied
663 * for this strand.
664 */
665 if (error_table[idx].error_report.eer_access) {
666 reg_val = error_table[idx].error_report.eer_access(sp, ASI_NA, ADDR_NA, true, 0, true);
667 mask_val = error_table[idx].error_report.mask;
668 if ((reg_val & mask_val) != mask_val)
669 goto ss_check_next_err;
670 }
671
672 /*
673 * Check to see if the Error Recording conditions are satisfied
674 * for this strand.
675 */
676 if (error_table[idx].error_record.eer_access) {
677 reg_val = error_table[idx].error_record.eer_access(sp, ASI_NA, ADDR_NA, true, 0, true);
678 mask_val = error_table[idx].error_record.mask;
679 if ((reg_val & mask_val) != mask_val)
680 goto ss_check_next_err;
681 }
682
683 /*
684 * All disrupting error traps are also conditioned by the
685 * PSTATE.IE bit when HPSTATE.HPRIV is set.
686 */
687 if ((error_table[idx].trap_class == DISRUPTING_TT) &&
688 ((v9p->state == V9_HyperPriv) && !(v9p->pstate.int_enabled)))
689 goto ss_check_next_err;
690
691 lprintf(sp->gid, "ERROR_TRAP_GEN: Posting TT 0x%x for persistent error (%s) " \
692 "to the current CPU\n", error_table[idx].trap_type, error_table[idx].error_name);
693
694 v9p->post_precise_trap(sp, error_table[idx].trap_type);
695
696ss_check_next_err:;
697 }
698 }
699DBGERRTRAP( lprintf(sp->gid, "ERROR_TRAP_GEN: Finished checking error list\n"); );
700}
701
702/*
703 * Checks the global list of error events which were given to us
704 * by the user. If we find an error event targetted at the
705 * current CPU which has not yet been triggered, assign it to
706 * this CPU and set up the trigger conditions accordingly.
707 */
708void
709check_pending_error_events(simcpu_t * sp)
710{
711 ss_proc_t *rpp;
712 sparcv9_cpu_t *v9p;
713 ss_strand_t *rsp;
714 error_event_t *eep;
715
716 rpp = (ss_proc_t *)(sp->config_procp->procp);
717 v9p = (sparcv9_cpu_t *)(sp->specificp);
718 rsp = v9p->impl_specificp;
719
720 sp->eep = NULL;
721 eep = rpp->ss_err_state.error_event_list_rootp;
722
723 for ( ; eep != NULL ; eep = eep->nextp) {
724 if ((eep->target_cpuid == rsp->vcore_id) &&
725 (eep->ee_status == EE_PARSED)) {
726 break;
727 }
728 }
729
730 if (eep) {
731 /*
732 * The following console output is a bit wordy
733 * and inefficient, but this is not performance
734 * critical code in any way.
735 */
736ERR_TRAP_VERBOSE(
737 lprintf(sp->gid, "ERROR_TRAP_GEN: Setting up CPU triggers:\n");
738
739 /*
740 * Either the error_string or a trap number must have been parsed
741 */
742 if (eep->options.bits.sp_intr)
743 lprintf(sp->gid, " SP interrupt = 0x%x\n", eep->sp_intr);
744 else
745 if (eep->options.bits.error_str)
746 lprintf(sp->gid, " error_str = %s\n", eep->error_str);
747 else
748 lprintf(sp->gid, " trap num = 0x%x\n", eep->trap_num);
749 if (eep->options.bits.sp_intr)
750 lprintf(sp->gid, " targ_cpuid = SP\n");
751 else
752 lprintf(sp->gid, " targ_cpuid = 0x%llx\n", eep->target_cpuid);
753 lprintf(sp->gid, " instr_cnt = 0x%llx (or later)\n", eep->instn_cnt);
754
755 if (eep->options.bits.address)
756 lprintf(sp->gid, " address = 0x%llx\n", eep->address.addr);
757 else
758 lprintf(sp->gid, " address = <ANY ADDRESS>\n");
759
760 switch (eep->address.access) {
761 case ERROR_ON_LOAD:
762 lprintf(sp->gid, " access type = LOAD\n");
763 break;
764 case ERROR_ON_STORE:
765 lprintf(sp->gid, " access type = STORE\n");
766 break;
767 default:
768 lprintf(sp->gid, " access type = LOAD or STORE\n");
769 }
770
771 if (eep->options.bits.priv) {
772 switch (eep->priv) {
773 case V9_User:
774 lprintf(sp->gid, " priv level = USER\n");
775 break;
776 case V9_Priv:
777 lprintf(sp->gid, " priv level = PRIVILEDGED\n");
778 break;
779 case V9_HyperPriv:
780 lprintf(sp->gid, " priv level = HYPERPRIVILEDGED\n");
781 break;
782 default:
783 lprintf(sp->gid, " priv level = <ERROR IN PARSING>\n");
784 fatal("Error in parsing for error_Event. Unknown priv level specified");
785 } /* switch */
786 } else {
787 lprintf(sp->gid, " priv level = <ANY PRIV LEVEL>\n");
788 }
789
790 if (eep->tl == ERROR_TL_NONE)
791 lprintf(sp->gid, " trap level = <ANY TRAP LEVEL>\n");
792 else
793 lprintf(sp->gid, " trap level = %d\n", eep->tl);
794); /* ERR_TRAP_VERBOSE */
795
796 /*
797 * setup to catch this error. We can trigger an error
798 * when the following parameters are met:
799 *
800 * 1) on an instn_cnt - done
801 * 2) on an address access (load, store or either) - done
802 * 3) on an address access after an instn_cnt
803 * 4) any address in Priv, Hpriv or User mode
804 */
805 sp->eep = eep;
806
807 if (eep->options.bits.pc) {
808 lprintf(sp->gid, "ERROR_TRAP_GEN: Setting breakpoint for %%pc=0x%llx\n", eep->pc);
809 debug_set_breakpoint(eep->pc);
810 }
811
812 /*
813 * Catch the case where we do not trigger the error until
814 * we've reached a specified instn_cnt
815 */
816 if ((eep->instn_cnt == ERROR_INSTN_CNT_NONE) || (sp->cycle >= eep->instn_cnt)) {
817 sp->error_cycle_reached = true;
818 } else {
819 sp->error_cycle_reached = false;
820 }
821
822 sp->error_cycle = eep->instn_cnt;
823
824 sp->error_pending = true;
825
826 } else {
827 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: No more errors to trigger on this CPU\n"););
828 sp->eep = NULL;
829 }
830}
831
832/*
833 * This routine is called from ss_take_exception as one of the last
834 * steps before a CPU takes a trap.
835 *
836 * This allows us to compare the trap about to be taken against
837 * the trap we are currently trying to inject. If it is a match,
838 * then we know that our injection was successful.
839 *
840 * This is needed so that we don't lose error injection traps
841 * which might otherwise get lost if it just so happened that
842 * a higher priority trap (such as MMU miss) occurs at the same
843 * time we are injecting an error trap.
844 *
845 * Note: It is very important that this routine be called before
846 * the final call to ss_check_interrupts() in ss_take_exception().
847 * Otherwise we could end up with two traps being delivered for
848 * every error injected.
849 */
850void
851ss_error_taking_trap( simcpu_t * sp, sparcv9_trap_type_t trap_type )
852{
853 ss_proc_t *rpp;
854 ss_error_entry_t *ep;
855
856 rpp = (ss_proc_t *)(sp->config_procp->procp);
857
858 ep = rpp->ss_err_state.inj_error_trap;
859 if ((ep) && (ep->trap_type == trap_type)) {
860 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: TT 0x%x has been taken by this CPU\n", trap_type););
861 rpp->ss_err_state.inj_error_trap = NULL;
862 rpp->ss_err_state.ready_for_next_injection = true;
863 }
864}
865
866/*
867 * This routine checks the list of user provided
868 * ASI overrides. Each ASI override can be specified
869 * as being global (valid for all cpus) or only
870 * valid for a specified set of cpuids (using a
871 * 64bit mask).
872 *
873 * If we find a match, we record the value in
874 * asi_reg on a store operation, or apply both of
875 * our masks to the returned value if this is a
876 * load operation.
877 *
878 * Each ASI override entry contains two masks:
879 * First we 'nand' the nand_mask
880 * Then we 'or' in the or_mask
881 *
882 * If "is_found" is true, it means we already have
883 * a value (pointed to by "val") and so we would
884 * just update that value based on our mask values.
885 * Otherwise we use the value stored in asi_reg
886 * and apply our masks on top of that.
887 *
888 */
889bool_t
890ss_check_user_asi_list(simcpu_t *sp, int asi, tvaddr_t addr, uint64_t *val, bool_t is_load, bool_t is_found)
891{
892 ss_proc_t *rpp;
893 error_asi_t *easip;
894 sparcv9_cpu_t *v9p;
895 ss_strand_t *nsp;
896 uint64_t vcpuid;
897 bool_t found = false;
898 bool_t ret;
899
900 v9p = (sparcv9_cpu_t *)(sp->specificp);
901 nsp = v9p->impl_specificp;;
902 rpp = (ss_proc_t *)(sp->config_procp->procp);
903 vcpuid = nsp->vcore_id;
904
905 pthread_mutex_lock(&rpp->ss_err_state.err_lock);
906
907 /*
908 * Search the ASI override list for a match
909 */
910 for (easip = rpp->ss_err_state.error_asi_list_rootp; easip != NULL;
911 easip = easip->nextp ) {
912
913 /* see if there is a match */
914 if ((easip->asi == asi) && (easip->access_cnt != 0) &&
915 ((easip->va == addr) || (easip->va == ANY_ERR_VA))) {
916
917 /* Check if this asi is valid for this cpu */
918 if ((easip->cpu_mask & MASK64(vcpuid, vcpuid)) >> vcpuid)
919 break;
920 }
921 }
922
923 /*
924 * If non NULL, we found a match
925 */
926 if (easip) {
927 if (is_load) {
928 uint64_t new_val;
929 if (is_found) {
930 /* ASI already has a value so use it */
931 new_val = *val;
932 } else {
933 /*
934 * ASI was user-specified so we use the
935 * user specified value instead.
936 */
937 new_val = (easip->asi_reg);
938 }
939
940 /* apply user-specified masks */
941 new_val &= ~(easip->nand_mask);
942 new_val |= easip->or_mask;
943
944DBGERRTRAP( lprintf(sp->gid, "ERROR_TRAP_GEN: user override (load) " \
945 "for ASI 0x%x VA 0x%llx original value=0x%llx nand_mask=0x%llx " \
946 "or_mask=0x%llx. Returning 0x%llx\n", asi, addr, (is_found) ? *val : \
947 easip->asi_reg, easip->nand_mask, easip->or_mask, new_val); );
948
949 *val = new_val;
950
951 } else {
952 /*
953 * Store
954 */
955 if (is_found) {
956 DBGERRTRAP( lprintf(sp->gid, "ERROR_TRAP_GEN: user override (store) for " \
957 "ASI 0x%x VA 0x%llx. Treating as noop.\n", asi, addr); );
958 } else {
959 DBGERRTRAP( lprintf(sp->gid, "ERROR_TRAP_GEN: user override (store) for " \
960 "ASI 0x%x VA 0x%llx. Storing value 0x%llx.\n", asi, addr,
961 *val); );
962 easip->asi_reg = *val;
963 }
964 }
965
966 /*
967 * If the access_cnt set to all f's, that means it's permanent
968 * so don't decrement it.
969 * If we get here with an acces_cnt of 0 - something bad has
970 * happened so stop!
971 */
972 if ((easip->access_cnt > 0) && (easip->access_cnt != UINT32_MAX)) {
973 easip->access_cnt -= 1;
974 } else if (easip->access_cnt == 0) {
975 fatal("access_cnt should not be (%u) for a valid ASI (0x%llx)",
976 easip->access_cnt, easip);
977 }
978 pthread_mutex_unlock(&rpp->ss_err_state.err_lock);
979 return true;
980 }
981
982 pthread_mutex_unlock(&rpp->ss_err_state.err_lock);
983 return false;
984}
985
986bool_t
987ss_error_asi_access(simcpu_t * sp, maccess_t op, int regnum, int asi, bool_t is_load, tvaddr_t addr, uint64_t store_val)
988{
989 int idx;
990 ss_err_reg_t * er;
991 bool_t match_found = false;
992 bool_t user_val = false;
993 uint64_t val;
994 bool_t legion_access = false; /* HV accessing the ASI */
995
996 ss_proc_t *rpp;
997
998 rpp = (ss_proc_t *)(sp->config_procp->procp);
999
1000
1001DBGERRTRAP( lprintf(sp->gid, "ERROR_TRAP_GEN: Access (%s) to ASI 0x%x VA 0x%llx ...\n",
1002 is_load ? "load" : "store", asi, addr); );
1003
1004 /*
1005 * Search through global list of Error Enable / Error Status registers
1006 * for this particular ASI/VA.
1007 *
1008 * Obviously, not quite as efficient as having a big switch statment
1009 * based on the ASI number, but we only come into this code when the
1010 * normal ASI access routine has come up empty handed. i.e. it is only
1011 * used for error register ASI access.
1012 *
1013 * As such it allows us to push all the CPU specific PRM details down
1014 * into tables which can be defined by each CPU and verified against
1015 * the (moving target of a) PRM in one place, leaving this code
1016 * relatively stable and common for SunSPARC CPUs.
1017 */
1018 er = rpp->ss_err_state.err_reg_tbl;
1019 for (idx=0; er[idx].asi != INVALID_ASI; idx++) {
1020 if ( (er[idx].asi == asi) && (er[idx].addr == addr)) {
1021 val = er[idx].reg_access(sp, asi, addr, is_load, store_val, legion_access);
1022 match_found = true;
1023 break;
1024 }
1025 }
1026
1027 /*
1028 * Check user provided list of ASI/VA/value pairs.
1029 */
1030 if (!is_load)
1031 val = store_val;
1032
1033 user_val = ss_check_user_asi_list(sp, asi, addr, &val, is_load, match_found);
1034
1035 if (!match_found && !user_val) {
1036 lprintf(sp->gid, "ERROR_TRAP_GEN: Access to ASI 0x%x VA 0x%llx @ " \
1037 "pc=0x%llx failed.\n", asi, addr, sp->pc);
1038 lprintf(sp->gid, " add \"error_asi { ASI 0x%x; VA 0x%x; }\" " \
1039 "to error conf file and try again.\n", asi, addr);
1040 return false;
1041 }
1042
1043 /*
1044 * A match was found.
1045 */
1046 if (is_load) {
1047 ASSERT(MA_Ld == op);
1048 if (regnum != Reg_sparcv9_g0)
1049 sp->intreg[regnum] = val;
1050
1051 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: ldxa from ASI 0x%x VA 0x%llx. returning 0x%llx\n",
1052 asi, addr, val););
1053 } else {
1054 ERR_TRAP_VERBOSE(lprintf(sp->gid, "ERROR_TRAP_GEN: stxa to ASI 0x%x VA 0x%llx. stored 0x%llx\n",
1055 asi, addr, store_val););
1056 }
1057
1058 return true;
1059}
1060
1061void
1062dump_error_event_list(int gid, error_event_t * eelp)
1063{
1064 while (eelp != NULL) {
1065 lprintf(-1, "\nerror_event { \n");
1066 if (eelp->options.bits.error_str)
1067 lprintf(-1, "\terror_str=%s \n", eelp->error_str);
1068 else
1069 if (eelp->options.bits.trap_num)
1070 lprintf(-1, "\ttrap num=0x%x \n", eelp->trap_num);
1071 else
1072 lprintf(-1, "\tSP interrupt=0x%x \n", eelp->sp_intr);
1073
1074 if (eelp->options.bits.target_cpuid)
1075 lprintf(-1, "\ttarg_cpuid=0x%x \n", eelp->target_cpuid);
1076 if (eelp->options.bits.instn_cnt)
1077 lprintf(-1, "\tinstn_cnt=0x%x \n", eelp->instn_cnt);
1078 if (eelp->options.bits.pc)
1079 lprintf(-1, "\tpc=0x%x \n", eelp->pc);
1080 if (eelp->options.bits.address)
1081 lprintf(-1, "\taddress=0x%x, access=0x%x \n",
1082 eelp->address.addr, eelp->address.access);
1083 if (eelp->options.bits.priv)
1084 lprintf(-1, "\tpriv=0x%x \n", eelp->priv);
1085 if (eelp->options.bits.tl)
1086 lprintf(-1, "\ttl=0x%x \n", eelp->tl);
1087 if (eelp->options.bits.trigger_cnt)
1088 lprintf(-1, "\ttrigger_cnt=0x%x \n", eelp->trigger_cnt);
1089
1090 if (eelp->temp_error_asi_list_rootp != NULL) {
1091 lprintf(-1, " --- START ASI list for this error event ---\n");
1092 dump_error_asi_list(gid, eelp->temp_error_asi_list_rootp);
1093 lprintf(-1, " --- END ASI list for this error event ---\n");
1094 }
1095 lprintf(-1, "} \n");
1096 eelp = eelp->nextp;
1097 }
1098}
1099
1100void
1101dump_error_asi_list(int gid, error_asi_t * ealp)
1102{
1103
1104 while (ealp != NULL) {
1105
1106 if (ealp->va == ANY_ERR_VA) {
1107 lprintf(-1, "\tasi=0x%x va=<any addr> ", ealp->asi);
1108 } else {
1109 lprintf(-1, "\tasi=0x%x va=0x%x ", ealp->asi, ealp->va);
1110 }
1111
1112 lprintf(-1, "\t cpu_mask=0x%llx nand_mask=0x%llx or_mask=0x%llx access_cnt=0x%x "\
1113 "id=%u\n",
1114 ealp->cpu_mask, ealp->nand_mask, ealp->or_mask, ealp->access_cnt,
1115 ealp->id);
1116
1117 ealp = ealp->nextp;
1118 }
1119}
1120
1121void
1122dump_cpu_error_table(int gid, ss_error_entry_t * eep)
1123{
1124 int idx;
1125 lprintf(gid, " \t \t trap class \t \t trap target \n");
1126 lprintf(gid, " \t \t (precise=%d, \t \t (0xffffffff means\n", PRECISE_TT);
1127 lprintf(gid, " error \t trap\t deferred=%d, \t is_persistent\t not init or the \n", DEFERRED_TT);
1128 lprintf(gid, " name \t type\t disrupting=%d)\t (true/false) \t detecting CPU) \n", DISRUPTING_TT);
1129 lprintf(gid, " ===== \t ====\t ==============\t =============\t =================\n");
1130 for (idx=0; eep[idx].trap_type != INVALID_TRAP; idx++) {
1131 lprintf(gid, " %s \t 0x%x \t %d \t %s \t 0x%x\n",
1132 eep[idx].error_name, eep[idx].trap_type, eep[idx].trap_class,
1133 eep[idx].is_persistent ? "T" : "F", eep[idx].trap_target);
1134 }
1135}
1136
1137void
1138dump_cpu_error_reg_table(int gid, ss_err_reg_t * erp)
1139{
1140 int idx;
1141 for (idx=0; erp[idx].asi != INVALID_ASI; idx++) {
1142 lprintf(gid, " ASI = 0x%x\tVA = 0x%llx\n",
1143 erp[idx].asi, erp[idx].addr);
1144 }
1145}
1146
1147void
1148ss_error_event_parse(void * procp, bool_t is_reload)
1149{
1150 error_event_t * new_error_eventp;
1151
1152 if (is_reload == false) { /* initial parse */
1153 new_error_eventp = \
1154 add_error_event(((ss_proc_t *)procp)->ss_err_state.error_event_list_rootp);
1155 parse_error_event(new_error_eventp);
1156 if (((ss_proc_t *)procp)->ss_err_state.error_event_list_rootp == NULL)
1157 ((ss_proc_t *)procp)->ss_err_state.error_event_list_rootp = new_error_eventp;
1158 } else { /* dynamic reload */
1159 new_error_eventp = \
1160 add_error_event(temp_error_event_list_rootp);
1161 parse_error_event(new_error_eventp);
1162 if (temp_error_event_list_rootp == NULL)
1163 temp_error_event_list_rootp = new_error_eventp;
1164 }
1165}
1166
1167void
1168ss_error_asi_parse(void *procp, bool_t is_reload)
1169{
1170 error_asi_t * new_error_asip;
1171 ss_proc_t *rpp;
1172
1173 rpp = (ss_proc_t *)(procp);
1174
1175 if (is_reload == false) { /* initial parse */
1176 new_error_asip = \
1177 add_error_asi(rpp->ss_err_state.error_asi_list_rootp);
1178
1179 parse_error_asi(new_error_asip);
1180
1181 if (rpp->ss_err_state.error_asi_list_rootp == NULL)
1182 rpp->ss_err_state.error_asi_list_rootp = new_error_asip;
1183
1184 } else { /* dynamic reload */
1185 new_error_asip = \
1186 add_error_asi(temp_error_asi_list_rootp);
1187
1188 parse_error_asi(new_error_asip);
1189
1190 if (temp_error_asi_list_rootp == NULL)
1191 temp_error_asi_list_rootp = new_error_asip;
1192 }
1193}
1194
1195void
1196ss_error_parse_filename(void *procp)
1197{
1198 if ((((ss_proc_t *)procp)->ss_err_state.error_config_filep) != NULL)
1199 lex_fatal("error file already defined");
1200 lex_get(T_String);
1201 ((ss_proc_t *)procp)->ss_err_state.error_config_filep = Xstrdup(lex.strp);
1202 lex_get (T_S_Colon);
1203}
1204
1205error_event_t*
1206add_error_event(error_event_t *p)
1207{
1208 while ((p != NULL) && (p->nextp != NULL)) {
1209 p = p->nextp;
1210 }
1211 if (p == NULL) {
1212 p = Xmalloc( sizeof(error_event_t) );
1213 p->trap_num = NULL;
1214 p->sp_intr = NULL;
1215 p->target_cpuid = 0;
1216 p->instn_cnt = 0;
1217 p->pc = 0;
1218 p->priv = V9_UnInitialised;
1219 p->tl = ERROR_TL_NONE;
1220 p->address.addr = 0x0;
1221 p->address.access = ERROR_ON_LOAD_OR_STORE;
1222 p->trigger_cnt = 1;
1223 p->options.all = 0; /* list of constraints per error event */
1224 p->temp_error_asi_list_rootp = NULL;
1225 p->nextp = NULL;
1226 return p;
1227 } else {
1228 p->nextp = add_error_event(p->nextp);
1229 return p->nextp;
1230 }
1231}
1232
1233error_asi_t*
1234add_error_asi(error_asi_t *p)
1235{
1236 static uint_t id = 0;
1237
1238 while ((p != NULL) && (p->nextp != NULL)) {
1239 p = p->nextp;
1240 }
1241 if (p == NULL) {
1242 p = Xmalloc( sizeof(error_asi_t) );
1243 p->va = 1;
1244 p->or_mask = 0;
1245 p->nand_mask = 0;
1246 p->access_cnt = UINT32_MAX;
1247 p->nextp = NULL;
1248 p->asi_reg = 0;
1249 p->id = id++;
1250 p->cpu_mask = UINT64_MAX;
1251 return p;
1252 } else {
1253 p->nextp = add_error_asi(p->nextp);
1254 return p->nextp;
1255 }
1256}
1257
1258/*
1259 * This routine parses the error_event directive for each processor.
1260 * We parse the entire error_event and set the appropriate bits in
1261 * the option fields so we know which options the user specified and
1262 * which options they left out (we use default values for anything
1263 * that is not user specified)
1264 */
1265void
1266parse_error_event(error_event_t * eep)
1267{
1268 error_asi_t * temp_error_asi_listp;
1269 lexer_tok_t tok;
1270
1271 temp_error_asi_listp = NULL;
1272
1273 lex_get(T_L_Brace);
1274 do {
1275 tok = lex_get_token();
1276 if (tok == T_EOF) lex_fatal("unexpected EOF within error defn");
1277 if (tok == T_R_Brace) break;
1278 if (tok != T_Token) goto fail;
1279
1280 if (streq(lex.strp,"error")) {
1281 if (eep->options.bits.sp_intr)
1282 lex_fatal("Cannot specify both error and sp_intr in same error_event");
1283 if (eep->options.bits.trap_num)
1284 lex_fatal("Cannot specify both error and trap in same error_event");
1285 lex_get(T_String);
1286 eep->error_str = Xstrdup(lex.strp);
1287 eep->options.bits.error_str = 1;
1288 lex_get(T_S_Colon);
1289 } else
1290 if (streq(lex.strp,"trap")) {
1291 if (eep->options.bits.error_str)
1292 lex_fatal("Cannot specify both trap and error in same error_event");
1293 if (eep->options.bits.sp_intr)
1294 lex_fatal("Cannot specify both trap and sp_intr in same error_event");
1295 eep->trap_num = parse_number_assign();
1296 eep->options.bits.trap_num = 1;
1297 } else
1298 if (streq(lex.strp,"sp_intr")) {
1299 if (eep->options.bits.error_str)
1300 lex_fatal("Cannot specify both sp_intr and error in same error_event");
1301 if (eep->options.bits.trap_num)
1302 lex_fatal("Cannot specify both sp_intr and trap in same error_event");
1303 eep->sp_intr = parse_number_assign();
1304 eep->options.bits.sp_intr = 1;
1305 } else
1306 if (streq(lex.strp,"target_cpuid")) {
1307 eep->target_cpuid = parse_number_assign();
1308 eep->options.bits.target_cpuid = 1;
1309 } else
1310 if (streq(lex.strp,"instn_cnt")) {
1311 eep->instn_cnt = parse_number_assign();
1312 eep->options.bits.instn_cnt = 1;
1313 } else
1314 if (streq(lex.strp,"pc")) {
1315 eep->pc = parse_number_assign();
1316 eep->options.bits.pc = 1;
1317 } else
1318 if (streq(lex.strp,"address")) {
1319 lex_get(T_L_Brace);
1320 eep->address.addr = parse_number_assign();
1321 eep->options.bits.address = 1;
1322
1323 tok = lex_get_token();
1324 switch (tok) {
1325 case T_R_Brace: break;
1326 case T_String:
1327 if (streq(lex.strp,"load")) {
1328 eep->address.access = ERROR_ON_LOAD;
1329 eep->options.bits.access = 1;
1330 } else
1331 if (streq(lex.strp,"store")) {
1332 eep->address.access = ERROR_ON_STORE;
1333 eep->options.bits.access = 1;
1334 } else {
1335 lex_fatal("address expected LOAD/STORE");
1336 }
1337 lex_get(T_S_Colon);
1338 lex_get(T_R_Brace);
1339 break;
1340 default:
1341fail:
1342 lex_fatal("unexpected token");
1343 }
1344 } else
1345 if (streq(lex.strp,"priv")) {
1346 eep->options.bits.priv = 1;
1347 lex_get(T_String);
1348
1349 if (streq(lex.strp,"HPRIV"))
1350 eep->priv = V9_HyperPriv;
1351 else
1352 if (streq(lex.strp,"PRIV"))
1353 eep->priv = V9_Priv;
1354 else
1355 if (streq(lex.strp,"USER"))
1356 eep->priv = V9_User;
1357 else
1358 lex_fatal("priv expects HPRIV/PRIV/USER");
1359 lex_get(T_S_Colon);
1360 } else
1361 if (streq(lex.strp,"tl")) {
1362 eep->tl = parse_number_assign();
1363 eep->options.bits.tl = 1;
1364 } else
1365 if (streq(lex.strp,"trigger_cnt")) {
1366 eep->trigger_cnt = parse_number_assign();
1367 eep->options.bits.trigger_cnt = 1;
1368 } else
1369 if (streq(lex.strp,"error_asi")) {
1370 temp_error_asi_listp = add_error_asi(temp_error_asi_listp);
1371
1372 /* default to 1 before we parse temp asi */
1373 temp_error_asi_listp->access_cnt = 1;
1374 parse_error_asi(temp_error_asi_listp);
1375
1376 if (eep->temp_error_asi_list_rootp == NULL)
1377 eep->temp_error_asi_list_rootp =
1378 temp_error_asi_listp;
1379 } else
1380 lex_fatal("unknown option");
1381 } while (1);
1382
1383 /*
1384 * Make sure that there is at least one trap_num, error_str or
1385 * sp_intr specified in the error_event AND either an (instn _cnt
1386 * a %pc value or an address)
1387 */
1388 if ((eep->options.bits.trap_num == 0) && (eep->options.bits.error_str == 0) &&
1389 (eep->options.bits.sp_intr == 0))
1390 lex_fatal("error_event needs an error, a trap or an sp_intr specified");
1391
1392 if ((eep->options.bits.instn_cnt == 0) && (eep->options.bits.pc == 0) &&
1393 (eep->options.bits.address == 0))
1394 lex_fatal("error_event needs instn_cnt, pc or address to be specified");
1395}
1396
1397void
1398parse_error_asi(error_asi_t *error_asi_listp)
1399{
1400 lexer_tok_t tok;
1401 lex_get(T_L_Brace);
1402 do {
1403 tok = lex_get_token();
1404 if (tok == T_EOF) lex_fatal("unexpected EOF within asi defn");
1405 if (tok == T_R_Brace) break;
1406 if (streq(lex.strp,"ASI")) {
1407 error_asi_listp->asi = parse_number_assign();
1408 } else
1409 if (streq(lex.strp,"VA")) {
1410 error_asi_listp->va = parse_number_assign();
1411 } else
1412 if (streq(lex.strp,"OR_MASK")) {
1413 error_asi_listp->or_mask = parse_number_assign();
1414 } else
1415 if (streq(lex.strp,"NAND_MASK")) {
1416 error_asi_listp->nand_mask = parse_number_assign();
1417 } else
1418 if (streq(lex.strp,"ACCESS_CNT")) {
1419 error_asi_listp->access_cnt = parse_number_assign();
1420 } else
1421 if (streq(lex.strp,"CPU_MASK")) {
1422 error_asi_listp->cpu_mask = parse_number_assign();
1423 } else
1424 lex_fatal("asi expects ASI/VA/OR_MASK/NAND_MASK/ACCESS_CNT/CPU_MASK - not %s",
1425 lex.strp);
1426 } while (1);
1427 if (error_asi_listp->asi == 0)
1428 lex_fatal("asi defn needs ASI");
1429}
1430
1431/*
1432 * Dynamically load a user defined error file upon ~er
1433 */
1434
1435void
1436ss_error_reload_file(config_proc_t * cp)
1437{
1438 char tempfilep[64];
1439 char buffer[8192]; /* big space */
1440 char *reload_filep;
1441 int count;
1442 int res;
1443 FILE *fp;
1444
1445 reload_filep = ((ss_proc_t *)cp->procp)->ss_err_state.error_config_filep;
1446
1447 if (reload_filep == NULL) {
1448 reload_filep = Xstrdup("reload.error.conf");
1449 lprintf(-1, "ERROR_TRAP_GEN: error_reload_file_name not defined - using %s as default\n",
1450 reload_filep);
1451 } else {
1452 lprintf(-1, "ERROR_TRAP_GEN: Using error_reload_file_name as specified (%s) \n",
1453 reload_filep);
1454 }
1455
1456
1457 /*
1458 * First step is to run the C pre-processor
1459 * over the config file.
1460 * Output from the pre-processor, is piped
1461 * directly into the lexer.
1462 * This is complicated because the pre-processor also
1463 * outputs errors, and may fail.
1464 */
1465
1466 /* FIXME:
1467 * Should probably do this properly with pipes etc.
1468 * but for now, just use a temp file in /tmp
1469 * for the sake of convenience.
1470 */
1471
1472 for (count=0; count<4; count++) {
1473 sprintf(tempfilep, "/tmp/sim.err.cfg.%d.%02d", (int)getpid(), count);
1474 if (!file_exists(tempfilep)) break;
1475 }
1476 if (count == 5) fatal("Unable to create a temporary file for config pre-processing");
1477
1478 errno = 0;
1479
1480 sprintf(buffer,"%s %s %s > %s", options.cpp_cmd, options.cpp_optionsp,
1481 reload_filep, tempfilep);
1482
1483 do {
1484DBG( printf("system(%s)\n", buffer); );
1485 res = system(buffer);
1486 } while (res==-1 && (errno==EAGAIN || errno==EINTR));
1487
1488 if (res == -1)
1489 fatal("Failed trying to pre-process config file %s\n", reload_filep);
1490
1491 printf("Exit status %d\n", res);
1492
1493 fp = fopen_check(tempfilep, "r");
1494
1495 init_lexer(reload_filep, fp, tempfilep);
1496
1497 parse_error_def((ss_proc_t *)cp->procp);
1498 fclose(fp);
1499 unlink(tempfilep); /* clean up - remove temp file */
1500
1501 ss_error_dump_active(cp);
1502}
1503
1504/*
1505 * Dump error_event and error_asi lists upon ~ed
1506 */
1507
1508void
1509ss_error_dump_active(config_proc_t * cp)
1510{
1511 lprintf(-1, "ERROR_TRAP_GEN: Error Event list:\n");
1512 dump_error_event_list(-1, ((ss_proc_t *)cp->procp)->ss_err_state.error_event_list_rootp);
1513 lprintf(-1, "ERROR_TRAP_GEN: END Error Event list:\n\n");
1514
1515 lprintf(-1, "ERROR_TRAP_GEN: Error ASI Override list\n");
1516 dump_error_asi_list(-1, ((ss_proc_t *)cp->procp)->ss_err_state.error_asi_list_rootp);
1517 lprintf(-1, "ERROR_TRAP_GEN: END Error ASI Override list\n\n");
1518}
1519
1520
1521/*
1522 * Dump all supported (built-in) errors upon ~es
1523 */
1524void
1525ss_error_dump_supported(config_proc_t * cp)
1526{
1527 int idx = 0;
1528 ss_error_entry_t *er;
1529
1530 er = ((ss_proc_t *)cp->procp)->ss_err_state.err_event_tbl;
1531
1532 lprintf(-1, "ERROR_TRAP_GEN: Supported Errors list:\n");
1533 while (strcmp(er[idx].error_name, TRAP_ERR_STRING) != 0) {
1534 lprintf(-1, "%s\n", er[idx].error_name );
1535 idx++;
1536 }
1537 lprintf(-1, "ERROR_TRAP_GEN: End Supported Errors list:\n");
1538
1539}
1540
1541static void
1542parse_error_def(ss_proc_t * procp)
1543{
1544 lexer_tok_t tok;
1545 error_event_t *temp_error_eventp;
1546 error_asi_t *reload_error_asip;
1547 error_asi_t *perm_error_asip;
1548 error_asi_t *temp_error_asip;
1549 error_asi_t *end_node_perm_error_asip;
1550
1551 pthread_mutex_lock(&procp->ss_err_state.err_lock);
1552
1553 do {
1554 tok = lex_get_token();
1555 if (tok == T_EOF) break;
1556 if (tok != T_Token || (!streq(lex.strp,"error_event") && !streq(lex.strp,"error_asi")))
1557 lex_fatal("error_event or error_asi definition expected");
1558 if (streq(lex.strp,"error_asi")) {
1559 ss_error_asi_parse(procp, true);
1560 } else /* is error_event */
1561 ss_error_event_parse(procp, true);
1562 } while (1);
1563 lex_unget();
1564 if (temp_error_event_list_rootp != NULL) {
1565 temp_error_eventp = procp->ss_err_state.error_event_list_rootp;
1566 while (temp_error_eventp->nextp != NULL) {
1567 temp_error_eventp = temp_error_eventp->nextp;
1568 }
1569 temp_error_eventp->nextp = temp_error_event_list_rootp;
1570 temp_error_event_list_rootp = NULL;
1571 }
1572 if (temp_error_asi_list_rootp != NULL) {
1573 /*
1574 * Here we have parsed a new list of error_asis from the
1575 * reload file. There might be some error_asis here that
1576 * already existed in the permanent asi_list so we should
1577 * overwrite them instead of adding them to the list
1578 *
1579 * Need to check:
1580 * foreach node in the temp_error_asi_list_rootp
1581 * check each node in the permanent list
1582 * if there's a match, asi/va pair, update the one in
1583 * the permanent list
1584 * if there is no match, add it to the end of the
1585 * permanent list.
1586 */
1587
1588 perm_error_asip = procp->ss_err_state.error_asi_list_rootp;
1589 temp_error_asip = temp_error_asi_list_rootp;
1590
1591 /*
1592 * find the last node of the perm_asi list
1593 */
1594 while (perm_error_asip->nextp != NULL) {
1595 perm_error_asip = perm_error_asip->nextp;
1596 }
1597 end_node_perm_error_asip = perm_error_asip;
1598
1599 /*
1600 * now walk the temp list and see if each node
1601 * is in the perm list, if it is, replace it,
1602 * if not, then add it to the end of the perm_list
1603 */
1604 for (temp_error_asip = temp_error_asi_list_rootp; temp_error_asip != NULL;
1605 temp_error_asip = temp_error_asip->nextp) {
1606
1607 ERR_TRAP_VERBOSE(lprintf(-1, "\nChecking temp error_asi id = %u",
1608 temp_error_asip->id););
1609
1610 for (perm_error_asip = procp->ss_err_state.error_asi_list_rootp;
1611 perm_error_asip != NULL; perm_error_asip = perm_error_asip->nextp) {
1612
1613 ERR_TRAP_VERBOSE(lprintf(-1, "\nagainst perm error_asi id = %u",
1614 perm_error_asip->id););
1615
1616 if ((temp_error_asip->asi == perm_error_asip->asi) &&
1617 (temp_error_asip->va == perm_error_asip->va)) {
1618 error_asi_t *saved_nextp;
1619
1620 ERR_TRAP_VERBOSE(lprintf(-1, "\n temp asi %u matches with perm asi %u",
1621 temp_error_asip->id, perm_error_asip->id););
1622
1623 /*
1624 * overwrite the contents of the perm asi with the new values
1625 * taking care to preserve the nextp pointer in the perm asi.
1626 */
1627 saved_nextp = perm_error_asip->nextp;
1628 bcopy(temp_error_asip, perm_error_asip, sizeof(error_asi_t));
1629 perm_error_asip->nextp = saved_nextp;
1630
1631 break;
1632
1633 } /* if */
1634 } /* for */
1635
1636 if (perm_error_asip == NULL) {
1637 /*
1638 * we got here because we didn't find a match
1639 * so add this temp_asi to the end of the perm list
1640 * by creating a new error_asi and bcopying in
1641 * the data. We can then free the entire temp_list
1642 * when we are done;
1643 */
1644 error_asi_t *new_error_asip = NULL;
1645
1646 new_error_asip = add_error_asi(new_error_asip);
1647 bcopy(temp_error_asip, new_error_asip, sizeof(error_asi_t));
1648 end_node_perm_error_asip->nextp = new_error_asip;
1649 new_error_asip->nextp = NULL;
1650 end_node_perm_error_asip = new_error_asip;
1651 }
1652 }
1653
1654 /*
1655 * we have either updated the permanent asi list from
1656 * the contents of this new list (just parsed in from
1657 * a reload) or we have copied entries from this list
1658 * into the permanent list. Either way, we are done with
1659 * the temp_error_asi_list_rootp list so we can free it all.
1660 */
1661 for (temp_error_asip = temp_error_asi_list_rootp; temp_error_asip != NULL;
1662 temp_error_asip = temp_error_asip->nextp) {
1663 Xfree(temp_error_asip);
1664 }
1665 temp_error_asi_list_rootp = NULL;
1666 }
1667
1668 pthread_mutex_unlock(&procp->ss_err_state.err_lock);
1669
1670}
1671
1672#endif /* } ERROR_TRAP_GEN */
1673
1674int no_ss_error_trap_gen;