* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: ss_err_trap.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)ss_err_trap.c 1.17 07/02/28 SMI"
#if ERROR_TRAP_GEN /* { */
#include <string.h> /* memcpy/memset */
#define ERR_TRAP_VERBOSE(_s) do { _s } while (0)
#define ERR_TRAP_VERBOSE(_s) do { } while (0)
* error file parsing functions
static error_event_t
*add_error_event(error_event_t
*);
static error_asi_t
*add_error_asi(error_asi_t
*);
static void parse_error_event(error_event_t
*);
static void parse_error_asi(error_asi_t
*);
static void parse_error_def(ss_proc_t
*);
/* temp head pointers for dynamic error file reload */
static error_event_t
* temp_error_event_list_rootp
= NULL
;
static error_asi_t
* temp_error_asi_list_rootp
= NULL
;
extern void debug_set_breakpoint(tvaddr_t bpaddr
);
* Called exactly once for every error event which we want to inject
ss_inject_error_trap( simcpu_t
* sp
, char * err_name
, sparcv9_trap_type_t trap_only_tt
, int sp_intr_only
)
ss_error_entry_t
*error_table
;
ss_sp_error_t
*sp_error_table
;
uint64_t reg_val
, mask_val
;
bool_t is_new_trap
, is_ready
;
int target_cpu
, strand_idx
;
rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
rsp
= v9p
->impl_specificp
;
error_table
= rpp
->ss_err_state
.err_event_tbl
;
sp_error_table
= rpp
->ss_err_state
.sp_err_tbl
;
* Calling code must ensure that the ready_for_next_injection
* flag has been changed from true to false before calling this
* We may set it back to true in this routine based on whether
* or not there is a trap to post.
ASSERT(rpp
->ss_err_state
.ready_for_next_injection
== false);
* Search for the specific error we are generating in
* the global error table.
for (idx
=0; error_table
[idx
].trap_type
!= INVALID_TRAP
; idx
++) {
if (strcmp(error_table
[idx
].error_name
, err_name
) == 0) {
if (strcmp(error_table
[idx
].error_name
, END_ERR_STRING
) == 0) {
lprintf(sp
->gid
, "ERROR_TRAP_GEN: %s not found. Unable to inject\n", err_name
);
rpp
->ss_err_state
.ready_for_next_injection
= true;
* Special case for injecting traps which are not associated
* with any particular error.
if (strcmp(error_table
[idx
].error_name
, TRAP_ERR_STRING
) == 0) {
error_table
[idx
].trap_type
= trap_only_tt
;
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Injecting:\n" );
lprintf(sp
->gid
, " name = %s\n", error_table
[idx
].error_name
);
if (error_table
[idx
].trap_target
!= TARGET_SP
)
lprintf(sp
->gid
, " trap_type = 0x%x\n", (int)error_table
[idx
].trap_type
);
lprintf(sp
->gid
, " trap_class = %d (precise=%d, deferred=%d, disrupting=%d, SP_intr=%d)\n",
(int)error_table
[idx
].trap_class
, PRECISE_TT
, DEFERRED_TT
, DISRUPTING_TT
, SP_INTR
);
lprintf(sp
->gid
, " is_persistent = %d\n", (int)error_table
[idx
].is_persistent
);
lprintf(sp
->gid
, " trap_target = 0x%x\n", error_table
[idx
].trap_target
);
* First check the error recording value/mask
* if applicable to this error.
* All bits which are specified in the mask must be turned on.
* Otherwise, we simply do nothing (return).
if (error_table
[idx
].error_record
.eer_access
) {
* We are interested in the register value for the strand
* that is encountering the error. i.e. the current CPU.
reg_val
= error_table
[idx
].error_record
.eer_access(sp
, ASI_NA
, ADDR_NA
, true, 0, true);
mask_val
= error_table
[idx
].error_record
.mask
;
if ((reg_val
& mask_val
) != mask_val
) {
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Error Recording Register is OFF for %s\n",
error_table
[idx
].error_name
);
rpp
->ss_err_state
.ready_for_next_injection
= true;
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: Error Recording Register is ON for %s\n",
error_table
[idx
].error_name
););
* Next we inject the ESR value as specified by the mask
if (error_table
[idx
].error_status
.esr_inject
) {
* We inject the ESR of the strand that is encountering
* the error. i.e. the current CPU.
* Return value tells us whether the injection of this
* particular ESR mask will result in a new trap. As
* opposed to simply turning on the "ME" bit for
is_new_trap
= error_table
[idx
].error_status
.esr_inject(sp
, error_table
[idx
].error_status
.err_inject_mask
, &error_table
[idx
]);
rpp
->ss_err_state
.esrs_clear
= false;
* if no ESR is associated with this error trap,
* then we always post a new trap.
if (is_new_trap
== false) {
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: No new trap to generate\n"););
rpp
->ss_err_state
.ready_for_next_injection
= true;
if (error_table
[idx
].trap_target
!= TARGET_SP
)
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Error %s has been injected. Attempting to post TT 0x%x\n",
error_table
[idx
].error_name
, (int)error_table
[idx
].trap_type
);
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Error %s has been injected. Attempting to post SP interrupt\n",
error_table
[idx
].error_name
);
* Target CPU for this error trap is specified by
* error_table[idx].trap_target
* 'TARGET_MYSELF' means always post trap to
* the CPU encountering the error. In this case,
* that would be the current CPU.
* 'TARGET_SP' means post an interrupt to the
target_cpu
= error_table
[idx
].trap_target
;
if (target_cpu
== TARGET_SP
) {
if (target_cpu
== TARGET_MYSELF
) {
strand_idx
= STRANDID2IDX(rpp
, target_cpu
);
ASSERT(VALIDIDX(rpp
, strand_idx
));
tv9p
= rpp
->strand
[strand_idx
];
tstrandp
= tv9p
->impl_specificp
;
* Verify that we are not allowing more than one
* outstanding error trap injection at a time.
ASSERT(rpp
->ss_err_state
.inj_error_trap
== NULL
);
* We must first check the error reporting registers. If
* the trap is ready to be posted, then we will do so.
* (the check below works for non-maskable error traps
* too since the eer_access routines for those will be
* Otherwise, if the trap is not ready to be posted due
* to one or more error reporting register settings, we
* simply update this strand's inj_error_trap pointer
* if it is a disrupting error trap or do nothing for
* precise and disrupting error traps.
* ss_check_error_traps() will take care of re-checking the
* error reporting registers after any CPU state change in
* the future for the disrupting traps.
if (error_table
[idx
].error_report
.eer_access
) {
* We are interested in the register value for the strand
* that will recieve the trap. i.e. the target CPU which
* may or may not be the current CPU.
reg_val
= error_table
[idx
].error_report
.eer_access(trap_sp
, ASI_NA
, ADDR_NA
, true, 0, true);
mask_val
= error_table
[idx
].error_report
.mask
;
if ((reg_val
& mask_val
) != mask_val
)
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: Error Reporting Register is %s for %s\n",
is_ready
? "ON" : "OFF" , error_table
[idx
].error_name
););
* All disrupting error traps are also conditioned by the PSTATE.IE
* bit when HPSTATE.HPRIV is set.
if (error_table
[idx
].trap_class
== DISRUPTING_TT
) {
/* ss_check_error_traps() will post the trap when ready */
rpp
->ss_err_state
.inj_error_trap
= &(error_table
[idx
]);
rpp
->ss_err_state
.trap_target_gid
= trap_sp
->gid
;
if ((v9p
->state
== V9_HyperPriv
) && !(v9p
->pstate
.int_enabled
))
tv9p
->post_precise_trap(trap_sp
, error_table
[idx
].trap_type
);
lprintf(sp
->gid
, "ERROR_TRAP_GEN: TT 0x%x is %s for %s\n",
error_table
[idx
].trap_type
, is_ready
? "posted" : "pending" ,
error_table
[idx
].error_name
);
if (error_table
[idx
].trap_class
== SP_INTR
) {
/* search for the error entry in the sp table */
for (idx
=0; strcmp(sp_error_table
[idx
].error_name
, END_ERR_STRING
) != 0; idx
++) {
if (strcmp(sp_error_table
[idx
].error_name
, err_name
) == 0) {
if (strcmp(sp_error_table
[idx
].error_name
, END_ERR_STRING
) == 0) {
lprintf(sp
->gid
, "ERROR_TRAP_GEN: %s not found in sp error table. Unable to inject\n", err_name
);
rpp
->ss_err_state
.ready_for_next_injection
= true;
* Special case for injecting a service processor interrupt
* not associated with any particular error.
if (strcmp(sp_error_table
[idx
].error_name
, SP_INTR_ERR_STRING
) == 0) {
sp_error_table
[idx
].sp_intr
= sp_intr_only
;
/* Post the SP interrupt */
if (sp
->config_procp
->proc_typep
->sp_interrupt(sp
, sp_error_table
[idx
].sp_intr
,
sp_error_table
[idx
].error_name
))
lprintf(sp
->gid
, "ERROR_TRAP_GEN: SP INTERRUPT 0x%x is posted for %s\n",
sp_error_table
[idx
].sp_intr
, sp_error_table
[idx
].error_name
);
lprintf(sp
->gid
, "ERROR_TRAP_GEN: SP INTERRUPT 0x%x failed or unsupported for %s\n",
sp_error_table
[idx
].sp_intr
, sp_error_table
[idx
].error_name
);
lprintf(sp
->gid
, "ERROR_TRAP_GEN: SP INTERRUPT 0x%x is dropped for %s\n",
sp_error_table
[idx
].sp_intr
, sp_error_table
[idx
].error_name
);
rpp
->ss_err_state
.ready_for_next_injection
= true;
rpp
->ss_err_state
.inj_error_trap
= NULL
;
rpp
->ss_err_state
.inj_error_trap
= &(error_table
[idx
]);
rpp
->ss_err_state
.trap_target_gid
= trap_sp
->gid
;
tv9p
->post_precise_trap(trap_sp
, error_table
[idx
].trap_type
);
* No traps to post or hold pending, so we can once
* again allow injection of new errors.
rpp
->ss_err_state
.ready_for_next_injection
= true;
lprintf(sp
->gid
, "ERROR_TRAP_GEN: TT 0x%x is %s for %s\n",
error_table
[idx
].trap_type
, is_ready
? "posted" : "dropped" ,
error_table
[idx
].error_name
);
* If there is an error_asi_list associated with this error
* do the following for each node:
* -) Using the id field, check if this node is already in the
* permanent asi list (from a previous trigger of this error)
* ensure it's access_cnt is reset using the value in
* copy this error_asi from the error_asi list into
* the head of the permanent asi_list
update_error_asi_list(simcpu_t
*sp
)
ss_proc_t
*rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
error_asi_t
*temp_asi_listp
;
temp_asi_listp
= sp
->eep
->temp_error_asi_list_rootp
;
if (temp_asi_listp
== NULL
)
return; /* nothing to do */
pthread_mutex_lock(&rpp
->ss_err_state
.err_lock
);
/* For each node in the temp_asi_listp */
for (temp_easip
= temp_asi_listp
; temp_easip
!= NULL
;
temp_easip
= temp_easip
->nextp
) {
/* compare against each node in the perm_asi_list */
for (easip
= rpp
->ss_err_state
.error_asi_list_rootp
;
easip
!= NULL
; easip
= easip
->nextp
) {
if (easip
->id
== temp_easip
->id
) {
/* found a match - re-arm this error_asi */
easip
->access_cnt
= temp_easip
->access_cnt
;
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: Updating single-use ASI " \
"override associated with this error event (id-%u)\n",
error_asi_t
*new_error_asip
= NULL
;
* create a new error_asi, copy in the data from the temp_asi
* and add it to the head of the permanent list
new_error_asip
= add_error_asi(new_error_asip
);
bcopy(temp_easip
, new_error_asip
, sizeof(error_asi_t
));
new_error_asip
->nextp
= rpp
->ss_err_state
.error_asi_list_rootp
;
rpp
->ss_err_state
.error_asi_list_rootp
= new_error_asip
;
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: Adding single-use ASI " \
"override associated with this error event (id-%u)\n",
pthread_mutex_unlock(&rpp
->ss_err_state
.err_lock
);
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Updated Error ASI Override list\n");
dump_error_asi_list(sp
->gid
, rpp
->ss_err_state
.error_asi_list_rootp
);
lprintf(sp
->gid
, "ERROR_TRAP_GEN: END Error ASI Override list\n\n");
* Returns TRUE if the triggered error has been injected.
trigger_error_trap(simcpu_t
* sp
)
if ((sp
->eep
== NULL
) || (sp
->eep
->ee_status
!= EE_TRIGGERED
))
rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
* Serialize the number of outstanding injections.
pthread_mutex_lock(&rpp
->ss_err_state
.injection_lock
);
if (rpp
->ss_err_state
.ready_for_next_injection
== false) {
pthread_mutex_unlock(&rpp
->ss_err_state
.injection_lock
);
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: Injection already in progress. Will try again later\n"););
rpp
->ss_err_state
.ready_for_next_injection
= false;
pthread_mutex_unlock(&rpp
->ss_err_state
.injection_lock
);
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: CPU is ready for injection\n"););
* If there are any user-defined ASI overrides associated
* with this error, then add them to the head of the asi
* override list. This needs to take into account that the
* same error may trigger multiple times so we need to
* update the asi list associated with this error if we
* detect that this error has already triggered.
update_error_asi_list(sp
);
* Next we inject the actual error trap or SP interrupt.
if (sp
->eep
->options
.bits
.error_str
) {
ss_inject_error_trap(sp
, sp
->eep
->error_str
, 0, 0);
} else if (sp
->eep
->options
.bits
.sp_intr
) {
ss_inject_error_trap(sp
, SP_INTR_ERR_STRING
, 0, sp
->eep
->sp_intr
);
ss_inject_error_trap(sp
, TRAP_ERR_STRING
, sp
->eep
->trap_num
, 0);
* Finally check to see whether this CPU should be watching
* for any new error events to be triggered.
* >1 means trigger N times, 1 means trigger once
if (sp
->eep
->trigger_cnt
> 1 ) {
sp
->eep
->trigger_cnt
-= 1;
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: Error event trigger cnt = 0x%x. " \
"re-arming existing triggers\n", sp
->eep
->trigger_cnt
););
sp
->eep
->ee_status
= EE_ASSIGNED
;
sp
->error_pending
= true;
sp
->eep
->trigger_cnt
= 0;
* See if there are any other events which may need to
* be triggered on this CPU.
check_pending_error_events(sp
);
* Called every time the CPU state has been changed.
ss_check_error_traps(simcpu_t
* sp
)
uint64_t reg_val
, mask_val
, err_pending_mask
;
ss_error_entry_t
*error_table
;
rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
rsp
= v9p
->impl_specificp
;
error_table
= rpp
->ss_err_state
.err_event_tbl
;
* If this strand's inj_error_trap pointer contains
* a valid pointer, it means we are trying to inject
* an error trap so we check to see if it is ready
ep
= rpp
->ss_err_state
.inj_error_trap
;
if ((ep
) && (rpp
->ss_err_state
.trap_target_gid
== sp
->gid
)) {
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: TT 0x%x still pending for this CPU\n",
if (ep
->error_report
.eer_access
) {
* We are interested in the register value for the
reg_val
= ep
->error_report
.eer_access(sp
, ASI_NA
, ADDR_NA
, true, 0, true);
mask_val
= ep
->error_report
.mask
;
if ((reg_val
& mask_val
) != mask_val
)
* All disrupting error traps are also conditioned by the
* PSTATE.IE bit when HPSTATE.HPRIV is set.
if ((ep
->trap_class
== DISRUPTING_TT
) &&
((v9p
->state
== V9_HyperPriv
) && !(v9p
->pstate
.int_enabled
)))
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Posting TT 0x%x to this CPU\n", ep
->trap_type
);
v9p
->post_precise_trap(sp
, ep
->trap_type
);
* Have any new error events been triggered on this CPU
if (trigger_error_trap(sp
)) {
* If that call resulted in injection of an error
* event, then we are done for now.
* Global assumptions about error handling on all SunSPARC
* 1) Only maskable error traps can be persistent.
* By "persistent", we mean that the error trap keeps
* getting generated over and over again as long as the
* Error Status Register and Error Enabling Register
* conditions are satisfied. Otherwise, the trap handler
* won't be able to make forward progress.
* 2) All disrupting traps which are persistent will also
* be conditioned by the PSTATE.IE bit when
* With the above rules in mind:
* Now it is time to search through our entire list of
* error traps and for each and every "persistent" error
* type found which is targetted at the current CPU,
* post a trap if the ESR and EER conditions for that
* performance optimization which might help
if (rpp
->ss_err_state
.esrs_clear
) return;
DBGERRTRAP( lprintf(sp
->gid
, "ERROR_TRAP_GEN: Checking error list for persistent error ESR bits\n"); );
for (idx
=0; error_table
[idx
].trap_type
!= INVALID_TRAP
; idx
++) {
if ((error_table
[idx
].trap_target
== rsp
->vcore_id
) &&
(error_table
[idx
].is_persistent
)) {
* Check to see if the Error Status bit(s) for this error
* is turned on. Note that all persistent error traps
* must have a valid esr_access() routine.
ASSERT(error_table
[idx
].error_status
.esr_access
);
reg_val
= error_table
[idx
].error_status
.esr_access(sp
, ASI_NA
, ADDR_NA
, true, 0, true);
* The err_pending_mask is all the bits that can indicate an error condition in an ESR.
* It is used to retrieve the error bits from the ESR and then compared against the
* the err_inject_mask. If a match is found it indicates that the error corresponding
* to this err_inject_mask is still pending.
err_pending_mask
= error_table
[idx
].error_status
.err_pending_mask
;
mask_val
= error_table
[idx
].error_status
.err_inject_mask
;
if ((reg_val
& err_pending_mask
) != mask_val
)
} else { /* if err_pending_mask is not set for an error entry */
if ((reg_val
& mask_val
) != mask_val
)
DBGERRTRAP( lprintf(sp
->gid
, "ERROR_TRAP_GEN: ESR bit is set for persistent error (%s) " \
"targetted at the current CPU\n", error_table
[idx
].error_name
););
* Check to see if the Error Reporting conditions are satisfied
if (error_table
[idx
].error_report
.eer_access
) {
reg_val
= error_table
[idx
].error_report
.eer_access(sp
, ASI_NA
, ADDR_NA
, true, 0, true);
mask_val
= error_table
[idx
].error_report
.mask
;
if ((reg_val
& mask_val
) != mask_val
)
* Check to see if the Error Recording conditions are satisfied
if (error_table
[idx
].error_record
.eer_access
) {
reg_val
= error_table
[idx
].error_record
.eer_access(sp
, ASI_NA
, ADDR_NA
, true, 0, true);
mask_val
= error_table
[idx
].error_record
.mask
;
if ((reg_val
& mask_val
) != mask_val
)
* All disrupting error traps are also conditioned by the
* PSTATE.IE bit when HPSTATE.HPRIV is set.
if ((error_table
[idx
].trap_class
== DISRUPTING_TT
) &&
((v9p
->state
== V9_HyperPriv
) && !(v9p
->pstate
.int_enabled
)))
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Posting TT 0x%x for persistent error (%s) " \
"to the current CPU\n", error_table
[idx
].trap_type
, error_table
[idx
].error_name
);
v9p
->post_precise_trap(sp
, error_table
[idx
].trap_type
);
DBGERRTRAP( lprintf(sp
->gid
, "ERROR_TRAP_GEN: Finished checking error list\n"); );
* Checks the global list of error events which were given to us
* by the user. If we find an error event targetted at the
* current CPU which has not yet been triggered, assign it to
* this CPU and set up the trigger conditions accordingly.
check_pending_error_events(simcpu_t
* sp
)
rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
rsp
= v9p
->impl_specificp
;
eep
= rpp
->ss_err_state
.error_event_list_rootp
;
for ( ; eep
!= NULL
; eep
= eep
->nextp
) {
if ((eep
->target_cpuid
== rsp
->vcore_id
) &&
(eep
->ee_status
== EE_PARSED
)) {
* The following console output is a bit wordy
* and inefficient, but this is not performance
* critical code in any way.
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Setting up CPU triggers:\n");
* Either the error_string or a trap number must have been parsed
if (eep
->options
.bits
.sp_intr
)
lprintf(sp
->gid
, " SP interrupt = 0x%x\n", eep
->sp_intr
);
if (eep
->options
.bits
.error_str
)
lprintf(sp
->gid
, " error_str = %s\n", eep
->error_str
);
lprintf(sp
->gid
, " trap num = 0x%x\n", eep
->trap_num
);
if (eep
->options
.bits
.sp_intr
)
lprintf(sp
->gid
, " targ_cpuid = SP\n");
lprintf(sp
->gid
, " targ_cpuid = 0x%llx\n", eep
->target_cpuid
);
lprintf(sp
->gid
, " instr_cnt = 0x%llx (or later)\n", eep
->instn_cnt
);
if (eep
->options
.bits
.address
)
lprintf(sp
->gid
, " address = 0x%llx\n", eep
->address
.addr
);
lprintf(sp
->gid
, " address = <ANY ADDRESS>\n");
switch (eep
->address
.access
) {
lprintf(sp
->gid
, " access type = LOAD\n");
lprintf(sp
->gid
, " access type = STORE\n");
lprintf(sp
->gid
, " access type = LOAD or STORE\n");
if (eep
->options
.bits
.priv
) {
lprintf(sp
->gid
, " priv level = USER\n");
lprintf(sp
->gid
, " priv level = PRIVILEDGED\n");
lprintf(sp
->gid
, " priv level = HYPERPRIVILEDGED\n");
lprintf(sp
->gid
, " priv level = <ERROR IN PARSING>\n");
fatal("Error in parsing for error_Event. Unknown priv level specified");
lprintf(sp
->gid
, " priv level = <ANY PRIV LEVEL>\n");
if (eep
->tl
== ERROR_TL_NONE
)
lprintf(sp
->gid
, " trap level = <ANY TRAP LEVEL>\n");
lprintf(sp
->gid
, " trap level = %d\n", eep
->tl
);
); /* ERR_TRAP_VERBOSE */
* setup to catch this error. We can trigger an error
* when the following parameters are met:
* 1) on an instn_cnt - done
* 2) on an address access (load, store or either) - done
* 3) on an address access after an instn_cnt
* 4) any address in Priv, Hpriv or User mode
if (eep
->options
.bits
.pc
) {
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Setting breakpoint for %%pc=0x%llx\n", eep
->pc
);
debug_set_breakpoint(eep
->pc
);
* Catch the case where we do not trigger the error until
* we've reached a specified instn_cnt
if ((eep
->instn_cnt
== ERROR_INSTN_CNT_NONE
) || (sp
->cycle
>= eep
->instn_cnt
)) {
sp
->error_cycle_reached
= true;
sp
->error_cycle_reached
= false;
sp
->error_cycle
= eep
->instn_cnt
;
sp
->error_pending
= true;
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: No more errors to trigger on this CPU\n"););
* This routine is called from ss_take_exception as one of the last
* steps before a CPU takes a trap.
* This allows us to compare the trap about to be taken against
* the trap we are currently trying to inject. If it is a match,
* then we know that our injection was successful.
* This is needed so that we don't lose error injection traps
* which might otherwise get lost if it just so happened that
* a higher priority trap (such as MMU miss) occurs at the same
* time we are injecting an error trap.
* Note: It is very important that this routine be called before
* the final call to ss_check_interrupts() in ss_take_exception().
* Otherwise we could end up with two traps being delivered for
ss_error_taking_trap( simcpu_t
* sp
, sparcv9_trap_type_t trap_type
)
rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
ep
= rpp
->ss_err_state
.inj_error_trap
;
if ((ep
) && (ep
->trap_type
== trap_type
)) {
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: TT 0x%x has been taken by this CPU\n", trap_type
););
rpp
->ss_err_state
.inj_error_trap
= NULL
;
rpp
->ss_err_state
.ready_for_next_injection
= true;
* This routine checks the list of user provided
* ASI overrides. Each ASI override can be specified
* as being global (valid for all cpus) or only
* valid for a specified set of cpuids (using a
* If we find a match, we record the value in
* asi_reg on a store operation, or apply both of
* our masks to the returned value if this is a
* Each ASI override entry contains two masks:
* First we 'nand' the nand_mask
* Then we 'or' in the or_mask
* If "is_found" is true, it means we already have
* a value (pointed to by "val") and so we would
* just update that value based on our mask values.
* Otherwise we use the value stored in asi_reg
* and apply our masks on top of that.
ss_check_user_asi_list(simcpu_t
*sp
, int asi
, tvaddr_t addr
, uint64_t *val
, bool_t is_load
, bool_t is_found
)
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
nsp
= v9p
->impl_specificp
;;
rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
pthread_mutex_lock(&rpp
->ss_err_state
.err_lock
);
* Search the ASI override list for a match
for (easip
= rpp
->ss_err_state
.error_asi_list_rootp
; easip
!= NULL
;
/* see if there is a match */
if ((easip
->asi
== asi
) && (easip
->access_cnt
!= 0) &&
((easip
->va
== addr
) || (easip
->va
== ANY_ERR_VA
))) {
/* Check if this asi is valid for this cpu */
if ((easip
->cpu_mask
& MASK64(vcpuid
, vcpuid
)) >> vcpuid
)
* If non NULL, we found a match
/* ASI already has a value so use it */
* ASI was user-specified so we use the
* user specified value instead.
new_val
= (easip
->asi_reg
);
/* apply user-specified masks */
new_val
&= ~(easip
->nand_mask
);
new_val
|= easip
->or_mask
;
DBGERRTRAP( lprintf(sp
->gid
, "ERROR_TRAP_GEN: user override (load) " \
"for ASI 0x%x VA 0x%llx original value=0x%llx nand_mask=0x%llx " \
"or_mask=0x%llx. Returning 0x%llx\n", asi
, addr
, (is_found
) ? *val
: \
easip
->asi_reg
, easip
->nand_mask
, easip
->or_mask
, new_val
); );
DBGERRTRAP( lprintf(sp
->gid
, "ERROR_TRAP_GEN: user override (store) for " \
"ASI 0x%x VA 0x%llx. Treating as noop.\n", asi
, addr
); );
DBGERRTRAP( lprintf(sp
->gid
, "ERROR_TRAP_GEN: user override (store) for " \
"ASI 0x%x VA 0x%llx. Storing value 0x%llx.\n", asi
, addr
,
* If the access_cnt set to all f's, that means it's permanent
* If we get here with an acces_cnt of 0 - something bad has
if ((easip
->access_cnt
> 0) && (easip
->access_cnt
!= UINT32_MAX
)) {
} else if (easip
->access_cnt
== 0) {
fatal("access_cnt should not be (%u) for a valid ASI (0x%llx)",
easip
->access_cnt
, easip
);
pthread_mutex_unlock(&rpp
->ss_err_state
.err_lock
);
pthread_mutex_unlock(&rpp
->ss_err_state
.err_lock
);
ss_error_asi_access(simcpu_t
* sp
, maccess_t op
, int regnum
, int asi
, bool_t is_load
, tvaddr_t addr
, uint64_t store_val
)
bool_t match_found
= false;
bool_t legion_access
= false; /* HV accessing the ASI */
rpp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
DBGERRTRAP( lprintf(sp
->gid
, "ERROR_TRAP_GEN: Access (%s) to ASI 0x%x VA 0x%llx ...\n",
is_load
? "load" : "store", asi
, addr
); );
* Search through global list of Error Enable / Error Status registers
* for this particular ASI/VA.
* Obviously, not quite as efficient as having a big switch statment
* based on the ASI number, but we only come into this code when the
* normal ASI access routine has come up empty handed. i.e. it is only
* used for error register ASI access.
* As such it allows us to push all the CPU specific PRM details down
* into tables which can be defined by each CPU and verified against
* the (moving target of a) PRM in one place, leaving this code
* relatively stable and common for SunSPARC CPUs.
er
= rpp
->ss_err_state
.err_reg_tbl
;
for (idx
=0; er
[idx
].asi
!= INVALID_ASI
; idx
++) {
if ( (er
[idx
].asi
== asi
) && (er
[idx
].addr
== addr
)) {
val
= er
[idx
].reg_access(sp
, asi
, addr
, is_load
, store_val
, legion_access
);
* Check user provided list of ASI/VA/value pairs.
user_val
= ss_check_user_asi_list(sp
, asi
, addr
, &val
, is_load
, match_found
);
if (!match_found
&& !user_val
) {
lprintf(sp
->gid
, "ERROR_TRAP_GEN: Access to ASI 0x%x VA 0x%llx @ " \
"pc=0x%llx failed.\n", asi
, addr
, sp
->pc
);
lprintf(sp
->gid
, " add \"error_asi { ASI 0x%x; VA 0x%x; }\" " \
"to error conf file and try again.\n", asi
, addr
);
if (regnum
!= Reg_sparcv9_g0
)
sp
->intreg
[regnum
] = val
;
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: ldxa from ASI 0x%x VA 0x%llx. returning 0x%llx\n",
ERR_TRAP_VERBOSE(lprintf(sp
->gid
, "ERROR_TRAP_GEN: stxa to ASI 0x%x VA 0x%llx. stored 0x%llx\n",
dump_error_event_list(int gid
, error_event_t
* eelp
)
lprintf(-1, "\nerror_event { \n");
if (eelp
->options
.bits
.error_str
)
lprintf(-1, "\terror_str=%s \n", eelp
->error_str
);
if (eelp
->options
.bits
.trap_num
)
lprintf(-1, "\ttrap num=0x%x \n", eelp
->trap_num
);
lprintf(-1, "\tSP interrupt=0x%x \n", eelp
->sp_intr
);
if (eelp
->options
.bits
.target_cpuid
)
lprintf(-1, "\ttarg_cpuid=0x%x \n", eelp
->target_cpuid
);
if (eelp
->options
.bits
.instn_cnt
)
lprintf(-1, "\tinstn_cnt=0x%x \n", eelp
->instn_cnt
);
if (eelp
->options
.bits
.pc
)
lprintf(-1, "\tpc=0x%x \n", eelp
->pc
);
if (eelp
->options
.bits
.address
)
lprintf(-1, "\taddress=0x%x, access=0x%x \n",
eelp
->address
.addr
, eelp
->address
.access
);
if (eelp
->options
.bits
.priv
)
lprintf(-1, "\tpriv=0x%x \n", eelp
->priv
);
if (eelp
->options
.bits
.tl
)
lprintf(-1, "\ttl=0x%x \n", eelp
->tl
);
if (eelp
->options
.bits
.trigger_cnt
)
lprintf(-1, "\ttrigger_cnt=0x%x \n", eelp
->trigger_cnt
);
if (eelp
->temp_error_asi_list_rootp
!= NULL
) {
lprintf(-1, " --- START ASI list for this error event ---\n");
dump_error_asi_list(gid
, eelp
->temp_error_asi_list_rootp
);
lprintf(-1, " --- END ASI list for this error event ---\n");
dump_error_asi_list(int gid
, error_asi_t
* ealp
)
if (ealp
->va
== ANY_ERR_VA
) {
lprintf(-1, "\tasi=0x%x va=<any addr> ", ealp
->asi
);
lprintf(-1, "\tasi=0x%x va=0x%x ", ealp
->asi
, ealp
->va
);
lprintf(-1, "\t cpu_mask=0x%llx nand_mask=0x%llx or_mask=0x%llx access_cnt=0x%x "\
ealp
->cpu_mask
, ealp
->nand_mask
, ealp
->or_mask
, ealp
->access_cnt
,
dump_cpu_error_table(int gid
, ss_error_entry_t
* eep
)
lprintf(gid
, " \t \t trap class \t \t trap target \n");
lprintf(gid
, " \t \t (precise=%d, \t \t (0xffffffff means\n", PRECISE_TT
);
lprintf(gid
, " error \t trap\t deferred=%d, \t is_persistent\t not init or the \n", DEFERRED_TT
);
lprintf(gid
, " name \t type\t disrupting=%d)\t (true/false) \t detecting CPU) \n", DISRUPTING_TT
);
lprintf(gid
, " ===== \t ====\t ==============\t =============\t =================\n");
for (idx
=0; eep
[idx
].trap_type
!= INVALID_TRAP
; idx
++) {
lprintf(gid
, " %s \t 0x%x \t %d \t %s \t 0x%x\n",
eep
[idx
].error_name
, eep
[idx
].trap_type
, eep
[idx
].trap_class
,
eep
[idx
].is_persistent
? "T" : "F", eep
[idx
].trap_target
);
dump_cpu_error_reg_table(int gid
, ss_err_reg_t
* erp
)
for (idx
=0; erp
[idx
].asi
!= INVALID_ASI
; idx
++) {
lprintf(gid
, " ASI = 0x%x\tVA = 0x%llx\n",
erp
[idx
].asi
, erp
[idx
].addr
);
ss_error_event_parse(void * procp
, bool_t is_reload
)
error_event_t
* new_error_eventp
;
if (is_reload
== false) { /* initial parse */
add_error_event(((ss_proc_t
*)procp
)->ss_err_state
.error_event_list_rootp
);
parse_error_event(new_error_eventp
);
if (((ss_proc_t
*)procp
)->ss_err_state
.error_event_list_rootp
== NULL
)
((ss_proc_t
*)procp
)->ss_err_state
.error_event_list_rootp
= new_error_eventp
;
} else { /* dynamic reload */
add_error_event(temp_error_event_list_rootp
);
parse_error_event(new_error_eventp
);
if (temp_error_event_list_rootp
== NULL
)
temp_error_event_list_rootp
= new_error_eventp
;
ss_error_asi_parse(void *procp
, bool_t is_reload
)
error_asi_t
* new_error_asip
;
rpp
= (ss_proc_t
*)(procp
);
if (is_reload
== false) { /* initial parse */
add_error_asi(rpp
->ss_err_state
.error_asi_list_rootp
);
parse_error_asi(new_error_asip
);
if (rpp
->ss_err_state
.error_asi_list_rootp
== NULL
)
rpp
->ss_err_state
.error_asi_list_rootp
= new_error_asip
;
} else { /* dynamic reload */
add_error_asi(temp_error_asi_list_rootp
);
parse_error_asi(new_error_asip
);
if (temp_error_asi_list_rootp
== NULL
)
temp_error_asi_list_rootp
= new_error_asip
;
ss_error_parse_filename(void *procp
)
if ((((ss_proc_t
*)procp
)->ss_err_state
.error_config_filep
) != NULL
)
lex_fatal("error file already defined");
((ss_proc_t
*)procp
)->ss_err_state
.error_config_filep
= Xstrdup(lex
.strp
);
add_error_event(error_event_t
*p
)
while ((p
!= NULL
) && (p
->nextp
!= NULL
)) {
p
= Xmalloc( sizeof(error_event_t
) );
p
->priv
= V9_UnInitialised
;
p
->address
.access
= ERROR_ON_LOAD_OR_STORE
;
p
->options
.all
= 0; /* list of constraints per error event */
p
->temp_error_asi_list_rootp
= NULL
;
p
->nextp
= add_error_event(p
->nextp
);
add_error_asi(error_asi_t
*p
)
while ((p
!= NULL
) && (p
->nextp
!= NULL
)) {
p
= Xmalloc( sizeof(error_asi_t
) );
p
->access_cnt
= UINT32_MAX
;
p
->cpu_mask
= UINT64_MAX
;
p
->nextp
= add_error_asi(p
->nextp
);
* This routine parses the error_event directive for each processor.
* We parse the entire error_event and set the appropriate bits in
* the option fields so we know which options the user specified and
* which options they left out (we use default values for anything
* that is not user specified)
parse_error_event(error_event_t
* eep
)
error_asi_t
* temp_error_asi_listp
;
temp_error_asi_listp
= NULL
;
if (tok
== T_EOF
) lex_fatal("unexpected EOF within error defn");
if (tok
== T_R_Brace
) break;
if (tok
!= T_Token
) goto fail
;
if (streq(lex
.strp
,"error")) {
if (eep
->options
.bits
.sp_intr
)
lex_fatal("Cannot specify both error and sp_intr in same error_event");
if (eep
->options
.bits
.trap_num
)
lex_fatal("Cannot specify both error and trap in same error_event");
eep
->error_str
= Xstrdup(lex
.strp
);
eep
->options
.bits
.error_str
= 1;
if (streq(lex
.strp
,"trap")) {
if (eep
->options
.bits
.error_str
)
lex_fatal("Cannot specify both trap and error in same error_event");
if (eep
->options
.bits
.sp_intr
)
lex_fatal("Cannot specify both trap and sp_intr in same error_event");
eep
->trap_num
= parse_number_assign();
eep
->options
.bits
.trap_num
= 1;
if (streq(lex
.strp
,"sp_intr")) {
if (eep
->options
.bits
.error_str
)
lex_fatal("Cannot specify both sp_intr and error in same error_event");
if (eep
->options
.bits
.trap_num
)
lex_fatal("Cannot specify both sp_intr and trap in same error_event");
eep
->sp_intr
= parse_number_assign();
eep
->options
.bits
.sp_intr
= 1;
if (streq(lex
.strp
,"target_cpuid")) {
eep
->target_cpuid
= parse_number_assign();
eep
->options
.bits
.target_cpuid
= 1;
if (streq(lex
.strp
,"instn_cnt")) {
eep
->instn_cnt
= parse_number_assign();
eep
->options
.bits
.instn_cnt
= 1;
if (streq(lex
.strp
,"pc")) {
eep
->pc
= parse_number_assign();
eep
->options
.bits
.pc
= 1;
if (streq(lex
.strp
,"address")) {
eep
->address
.addr
= parse_number_assign();
eep
->options
.bits
.address
= 1;
if (streq(lex
.strp
,"load")) {
eep
->address
.access
= ERROR_ON_LOAD
;
eep
->options
.bits
.access
= 1;
if (streq(lex
.strp
,"store")) {
eep
->address
.access
= ERROR_ON_STORE
;
eep
->options
.bits
.access
= 1;
lex_fatal("address expected LOAD/STORE");
lex_fatal("unexpected token");
if (streq(lex
.strp
,"priv")) {
eep
->options
.bits
.priv
= 1;
if (streq(lex
.strp
,"HPRIV"))
eep
->priv
= V9_HyperPriv
;
if (streq(lex
.strp
,"PRIV"))
if (streq(lex
.strp
,"USER"))
lex_fatal("priv expects HPRIV/PRIV/USER");
if (streq(lex
.strp
,"tl")) {
eep
->tl
= parse_number_assign();
eep
->options
.bits
.tl
= 1;
if (streq(lex
.strp
,"trigger_cnt")) {
eep
->trigger_cnt
= parse_number_assign();
eep
->options
.bits
.trigger_cnt
= 1;
if (streq(lex
.strp
,"error_asi")) {
temp_error_asi_listp
= add_error_asi(temp_error_asi_listp
);
/* default to 1 before we parse temp asi */
temp_error_asi_listp
->access_cnt
= 1;
parse_error_asi(temp_error_asi_listp
);
if (eep
->temp_error_asi_list_rootp
== NULL
)
eep
->temp_error_asi_list_rootp
=
lex_fatal("unknown option");
* Make sure that there is at least one trap_num, error_str or
* sp_intr specified in the error_event AND either an (instn _cnt
* a %pc value or an address)
if ((eep
->options
.bits
.trap_num
== 0) && (eep
->options
.bits
.error_str
== 0) &&
(eep
->options
.bits
.sp_intr
== 0))
lex_fatal("error_event needs an error, a trap or an sp_intr specified");
if ((eep
->options
.bits
.instn_cnt
== 0) && (eep
->options
.bits
.pc
== 0) &&
(eep
->options
.bits
.address
== 0))
lex_fatal("error_event needs instn_cnt, pc or address to be specified");
parse_error_asi(error_asi_t
*error_asi_listp
)
if (tok
== T_EOF
) lex_fatal("unexpected EOF within asi defn");
if (tok
== T_R_Brace
) break;
if (streq(lex
.strp
,"ASI")) {
error_asi_listp
->asi
= parse_number_assign();
if (streq(lex
.strp
,"VA")) {
error_asi_listp
->va
= parse_number_assign();
if (streq(lex
.strp
,"OR_MASK")) {
error_asi_listp
->or_mask
= parse_number_assign();
if (streq(lex
.strp
,"NAND_MASK")) {
error_asi_listp
->nand_mask
= parse_number_assign();
if (streq(lex
.strp
,"ACCESS_CNT")) {
error_asi_listp
->access_cnt
= parse_number_assign();
if (streq(lex
.strp
,"CPU_MASK")) {
error_asi_listp
->cpu_mask
= parse_number_assign();
lex_fatal("asi expects ASI/VA/OR_MASK/NAND_MASK/ACCESS_CNT/CPU_MASK - not %s",
if (error_asi_listp
->asi
== 0)
lex_fatal("asi defn needs ASI");
* Dynamically load a user defined error file upon ~er
ss_error_reload_file(config_proc_t
* cp
)
char buffer
[8192]; /* big space */
reload_filep
= ((ss_proc_t
*)cp
->procp
)->ss_err_state
.error_config_filep
;
if (reload_filep
== NULL
) {
reload_filep
= Xstrdup("reload.error.conf");
lprintf(-1, "ERROR_TRAP_GEN: error_reload_file_name not defined - using %s as default\n",
lprintf(-1, "ERROR_TRAP_GEN: Using error_reload_file_name as specified (%s) \n",
* First step is to run the C pre-processor
* Output from the pre-processor, is piped
* directly into the lexer.
* This is complicated because the pre-processor also
* outputs errors, and may fail.
* Should probably do this properly with pipes etc.
* but for now, just use a temp file in /tmp
* for the sake of convenience.
for (count
=0; count
<4; count
++) {
sprintf(tempfilep
, "/tmp/sim.err.cfg.%d.%02d", (int)getpid(), count
);
if (!file_exists(tempfilep
)) break;
if (count
== 5) fatal("Unable to create a temporary file for config pre-processing");
sprintf(buffer
,"%s %s %s > %s", options
.cpp_cmd
, options
.cpp_optionsp
,
reload_filep
, tempfilep
);
DBG( printf("system(%s)\n", buffer
); );
} while (res
==-1 && (errno
==EAGAIN
|| errno
==EINTR
));
fatal("Failed trying to pre-process config file %s\n", reload_filep
);
printf("Exit status %d\n", res
);
fp
= fopen_check(tempfilep
, "r");
init_lexer(reload_filep
, fp
, tempfilep
);
parse_error_def((ss_proc_t
*)cp
->procp
);
unlink(tempfilep
); /* clean up - remove temp file */
ss_error_dump_active(cp
);
* Dump error_event and error_asi lists upon ~ed
ss_error_dump_active(config_proc_t
* cp
)
lprintf(-1, "ERROR_TRAP_GEN: Error Event list:\n");
dump_error_event_list(-1, ((ss_proc_t
*)cp
->procp
)->ss_err_state
.error_event_list_rootp
);
lprintf(-1, "ERROR_TRAP_GEN: END Error Event list:\n\n");
lprintf(-1, "ERROR_TRAP_GEN: Error ASI Override list\n");
dump_error_asi_list(-1, ((ss_proc_t
*)cp
->procp
)->ss_err_state
.error_asi_list_rootp
);
lprintf(-1, "ERROR_TRAP_GEN: END Error ASI Override list\n\n");
* Dump all supported (built-in) errors upon ~es
ss_error_dump_supported(config_proc_t
* cp
)
er
= ((ss_proc_t
*)cp
->procp
)->ss_err_state
.err_event_tbl
;
lprintf(-1, "ERROR_TRAP_GEN: Supported Errors list:\n");
while (strcmp(er
[idx
].error_name
, TRAP_ERR_STRING
) != 0) {
lprintf(-1, "%s\n", er
[idx
].error_name
);
lprintf(-1, "ERROR_TRAP_GEN: End Supported Errors list:\n");
parse_error_def(ss_proc_t
* procp
)
error_event_t
*temp_error_eventp
;
error_asi_t
*reload_error_asip
;
error_asi_t
*perm_error_asip
;
error_asi_t
*temp_error_asip
;
error_asi_t
*end_node_perm_error_asip
;
pthread_mutex_lock(&procp
->ss_err_state
.err_lock
);
if (tok
!= T_Token
|| (!streq(lex
.strp
,"error_event") && !streq(lex
.strp
,"error_asi")))
lex_fatal("error_event or error_asi definition expected");
if (streq(lex
.strp
,"error_asi")) {
ss_error_asi_parse(procp
, true);
} else /* is error_event */
ss_error_event_parse(procp
, true);
if (temp_error_event_list_rootp
!= NULL
) {
temp_error_eventp
= procp
->ss_err_state
.error_event_list_rootp
;
while (temp_error_eventp
->nextp
!= NULL
) {
temp_error_eventp
= temp_error_eventp
->nextp
;
temp_error_eventp
->nextp
= temp_error_event_list_rootp
;
temp_error_event_list_rootp
= NULL
;
if (temp_error_asi_list_rootp
!= NULL
) {
* Here we have parsed a new list of error_asis from the
* reload file. There might be some error_asis here that
* already existed in the permanent asi_list so we should
* overwrite them instead of adding them to the list
* foreach node in the temp_error_asi_list_rootp
* check each node in the permanent list
* if there's a match, asi/va pair, update the one in
* if there is no match, add it to the end of the
perm_error_asip
= procp
->ss_err_state
.error_asi_list_rootp
;
temp_error_asip
= temp_error_asi_list_rootp
;
* find the last node of the perm_asi list
while (perm_error_asip
->nextp
!= NULL
) {
perm_error_asip
= perm_error_asip
->nextp
;
end_node_perm_error_asip
= perm_error_asip
;
* now walk the temp list and see if each node
* is in the perm list, if it is, replace it,
* if not, then add it to the end of the perm_list
for (temp_error_asip
= temp_error_asi_list_rootp
; temp_error_asip
!= NULL
;
temp_error_asip
= temp_error_asip
->nextp
) {
ERR_TRAP_VERBOSE(lprintf(-1, "\nChecking temp error_asi id = %u",
for (perm_error_asip
= procp
->ss_err_state
.error_asi_list_rootp
;
perm_error_asip
!= NULL
; perm_error_asip
= perm_error_asip
->nextp
) {
ERR_TRAP_VERBOSE(lprintf(-1, "\nagainst perm error_asi id = %u",
if ((temp_error_asip
->asi
== perm_error_asip
->asi
) &&
(temp_error_asip
->va
== perm_error_asip
->va
)) {
error_asi_t
*saved_nextp
;
ERR_TRAP_VERBOSE(lprintf(-1, "\n temp asi %u matches with perm asi %u",
temp_error_asip
->id
, perm_error_asip
->id
););
* overwrite the contents of the perm asi with the new values
* taking care to preserve the nextp pointer in the perm asi.
saved_nextp
= perm_error_asip
->nextp
;
bcopy(temp_error_asip
, perm_error_asip
, sizeof(error_asi_t
));
perm_error_asip
->nextp
= saved_nextp
;
if (perm_error_asip
== NULL
) {
* we got here because we didn't find a match
* so add this temp_asi to the end of the perm list
* by creating a new error_asi and bcopying in
* the data. We can then free the entire temp_list
error_asi_t
*new_error_asip
= NULL
;
new_error_asip
= add_error_asi(new_error_asip
);
bcopy(temp_error_asip
, new_error_asip
, sizeof(error_asi_t
));
end_node_perm_error_asip
->nextp
= new_error_asip
;
new_error_asip
->nextp
= NULL
;
end_node_perm_error_asip
= new_error_asip
;
* we have either updated the permanent asi list from
* the contents of this new list (just parsed in from
* a reload) or we have copied entries from this list
* into the permanent list. Either way, we are done with
* the temp_error_asi_list_rootp list so we can free it all.
for (temp_error_asip
= temp_error_asi_list_rootp
; temp_error_asip
!= NULL
;
temp_error_asip
= temp_error_asip
->nextp
) {
temp_error_asi_list_rootp
= NULL
;
pthread_mutex_unlock(&procp
->ss_err_state
.err_lock
);
#endif /* } ERROR_TRAP_GEN */
int no_ss_error_trap_gen
;