* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: SS_Strand.h
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
#include "BL_BoundedArray.h"
#include "SS_Interrupt.h"
#include "SS_InstrCache.h"
#include "SS_AsiInfoTable.h"
#include "SS_Registers.h"
#include "SS_PidContext.h"
typedef SS_Vaddr (*SS_InstMmu
)( SS_Vaddr
, SS_Vaddr
, SS_Strand
*, SS_Instr
*, SS_InstrCache::Tag
* );
typedef SS_Vaddr (*SS_TrapFun
)( SS_Vaddr
, SS_Vaddr
, SS_Strand
*, SS_Instr
*, SS_Trap::Type
);
typedef SS_Vaddr (*SS_MmuTrap
)( SS_Vaddr
, SS_Vaddr
, SS_Strand
*, SS_Instr
*, SS_Vaddr
, SS_Trap::Type
);
typedef SS_Vaddr (*SS_DataMmu
)( SS_Vaddr
, SS_Vaddr
, SS_Strand
*, SS_Instr
*, SS_Vaddr
, uint_t mem
);
typedef SS_Vaddr (*SS_InvalidAsi
)( SS_Vaddr
, SS_Vaddr
, SS_Strand
*, SS_Instr
*, SS_Vaddr
);
SS_Vaddr
ss_break_inst_va_dec( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
);
SS_Vaddr
mem_run_fetch512( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_Vaddr va
, SS_Tte
* tte
);
SS_Vaddr
io_run_fetch512( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_Vaddr va
, SS_Tte
* tte
);
SS_Vaddr
mem_trc_fetch512( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_Vaddr va
, SS_Tte
* tte
);
SS_Vaddr
io_trc_fetch512( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_Vaddr va
, SS_Tte
* tte
);
// ==================================================================================
// The SS_Strand class has no virtual methods and classes derived from SS_Strand
// can not have virtual methods either. Function pointers are used instead of virtual.
// The reason for non-virtual is avoidance of hidden speed penalties and avoiding
// an add on computing IRF register offsets: g0 is at offset 0 so we can check for
// g0 by comparing index against 0 (this saves setting a constant to compare against).
// Additionally the class has a very open public interface. This does not mean that
// all members can be accessed at will. Contrary, all intertfaces (SAM virtual cpu),
// verification PLI, PFE are expected to only use methods defined between the Public
// Interface Start and End markers below.
// ==================================================================================
SS_Strand( SS_Node
& _parent
, const char* _name
,
SS_Execute run_exe_table
[],
SS_Memop mem_run_table_init
[][4],
SS_Memop mem_trc_table_init
[][4],
SS_MemErrDetector
& mem_err_detector
);
void warm_reset(bool intp
=true);
// run_step() and trc_step() execute count instruction in run (fast)
// and trc (slow) mode respectively. Both routines do not check tick
// and stick against tick_cmpr, stick_cmpr and the hstick_cmpr. This
// is handled separately by run_tick(). The correct order of calling
// is run_step() or trc_step() followed by run_tick().
uint64_t run_step( uint64_t count
);
uint64_t trc_step( uint64_t count
);
void run_tick( uint64_t incr
);
void add_tracer( SS_Tracer
* t
);
void del_tracer( SS_Tracer
* t
);
void snapshot( SS_SnapShot
& ss
);
const char* get_node_name() { return name
; }
void get_name( char* dst
);
SS_BreakPoint::Ident
break_on_inst_va( SS_Vaddr va
);
SS_BreakPoint::Ident
break_on_trap( uint_t tt
);
SS_BreakPoint::Ident
break_on_red_mode();
SS_BreakPoint::Error
break_enable( SS_BreakPoint::Ident id
);
SS_BreakPoint::Error
break_disable( SS_BreakPoint::Ident id
);
SS_BreakPoint::Error
break_delete( SS_BreakPoint::Ident id
);
void irq_launch( SS_Trap::Type
, bool do_time_out
=true );
SS_Paddr
va2pa( SS_Vaddr va
) { SS_Paddr pa
; return ((cnv2pa
)(this,Sam::Vcpu::TRANSLATE_VA_TO_PA
,va
,0,0,&pa
)) ? 0 : pa
; }
SS_Paddr
va2pa( SS_Vaddr va
, uint_t ctx
) { SS_Paddr pa
; return ((cnv2pa
)(this,Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX
,va
,ctx
,0,&pa
)) ? 0 : pa
; }
SS_Paddr
va2pa( SS_Vaddr va
, uint_t ctx
, uint_t pid
) { SS_Paddr pa
; return ((cnv2pa
)(this,Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX_PID
,va
,ctx
,pid
,&pa
)) ? 0 : pa
; }
SS_Paddr
ra2pa( SS_Vaddr ra
) { SS_Paddr pa
; return ((cnv2pa
)(this,Sam::Vcpu::TRANSLATE_RA_TO_PA
,ra
,0,0,&pa
)) ? 0 : pa
; }
SS_Paddr
ra2pa( SS_Vaddr ra
, uint_t pid
) { SS_Paddr pa
; return ((cnv2pa
)(this,Sam::Vcpu::TRANSLATE_RA_TO_PA_PID
,ra
,0,pid
,&pa
)) ? 0 : pa
; }
enum Limits
// These are SunSparc limits, not implementation limits
MAX_WP
= 7, // Actual limit is 31, but that has never been used.
//======================================================================
// Do not add data members above here and do not change the layout
// of the members below here as we need to most critical state
// reachable with a single ld/st with immediate offset (4 Kb space).
// For that reason the SS_ ctr registers are sorted in 64, 32, 16 & 8
// bit sizes so that there are no unused gaps between the members
//======================================================================
// These accessors take a byte offset into the register file, not an index
uint64_t& get_irf( int64_t ofs
) { return *(uint64_t*)((char*)this + ofs
); }
uint32_t& get_frf( int64_t ofs
) { return *(uint32_t*)((char*)this + ofs
); }
uint64_t& get_drf( int64_t ofs
) { return *(uint64_t*)((char*)this + ofs
); }
static uint64_t reg_off2idx( int64_t ofs
) { return (ofs
-offsetof(SS_Strand
,irf
))/sizeof(uint64_t); }
static uint64_t reg_idx2off( int64_t idx
) { return idx
*sizeof(uint64_t)+offsetof(SS_Strand
,irf
); }
static uint64_t freg_off2idx( int64_t ofs
)
uint64_t idx
= (ofs
-offsetof(SS_Strand
,drf
))/sizeof(uint32_t);
static uint64_t freg_idx2off( int64_t idx
)
return idx
*sizeof(uint32_t)+offsetof(SS_Strand
,drf
);
// set_fprs() sets the dl and du bits based on the ofs (rd). The ofs is
// from the this pointer to the frf member + rd index (scaled by 4(%f) or 8(%d)).
void set_fprs( int64_t ofs
) { fprs
= fprs() | ((ofs
- (32 * 4)) >> 7); }
uint64_t irf
[32]; // %g, %o, %l, %i
// Floating point register file. Note that single precision registers
// are not listed here since they are aliased with double precision.
// The little endian platform such as AMD64 makes this aliasing a little
// Some processors generate helper sequences of Sparc lookalike
// opcodes that use temporary registers. The %g, %i, %l, %o regs
// need to be present at the same time as the %t (temps)
uint64_t trf
[8]; // Temporary registers
SS_HstickCmpr hstick_cmpr
;
SS_RstvAddr rstv_addr
; // Strand's PowerOnReset vector address
SS_Canrestore canrestore
;
SS_StrandId strand_id
; // The one and only strand id
SS_MaxWp max_wp
; // Strand's NWINDOWS - 1
SS_MaxGl max_gl
; // Strand's MAX_GL
SS_MaxTl max_tl
; // Strand's MAX_TL
SS_MaxPgl max_pgl
; // Strand's MAX_PGL
SS_MaxPtl max_ptl
; // Strand's MAX_PTL
SS_VaBits va_bits
; // Size of the SS_Vaddr in bits
SS_PaBits pa_bits
; // Size of the SS_Paddr in bits
SS_LsuCtr lsu_ctr
; // ToDo remove this as not all product have this ..
SS_InstCount inst_count
; // Number of instructions executed so far
SS_Fsr fsr
; // The real FSR of the simulated host.
SS_Fsr fsr_run
; // The FSR of the simulated host, excluding tem=0, aexc=0, cexc=0
SS_Fsr fsr_tem
; // The FSR.tem bits of the simulated host
SS_Fsr fsr_exc
; // The FSR.cexc and FSR.aexc of the simulated host
uint64_t fsr_cpu
; // The FSR of the host (save place for keeping fsr during fpop execution)
uint64_t fsr_tmp
; // The FSR after an fpop has executed (scratch)
bool running
; // True when the strand is running
bool halted
; // True when the strand is halted
// unhalt() is called to pull the strand out of halted mode.
// The new running mode is _running
void unhalt( bool _running
=true )
sim_state
.running(_running
);
SS_DecodeTable
* dec_table
; // Pointer to the decode tree
SS_Execute
* exe_table
; // Pointers to tables of execute functions
SS_Execute
* run_exe_table_ref
; // Ref to trace run table
SS_Memop (*mem_table
)[4]; // Pointers to tables of current memory access functions
SS_Memop (*mem_run_table_ref
)[4]; // Pointers to tables of fast memory access functions
SS_Memop (*mem_trc_table_ref
)[4]; // Pointers to tables of tracing memory access functions
BL_Memory
* memory
; // Pointer to RAM
SS_Io
* io
; // Pointer to I/O
uint64_t mmu_scratch
; // The MMU uses this as scratch space for enidan conversion
uint64_t mem_data
[8]; // For holding values loaded/stored to memory (avoid %sp adjustment code!)
uint8_t stpartial16
[16]; // Partial store 16 to partial store 8 byte mask conversion table
uint8_t stpartial32
[4]; // Partial store 32 to partial store 8 byte mask conversion table
uint64_t mem_mask
; // Mask used for partial stores ... iso %o6
BL_BoundedArray
<BL_EccBits
,32> irf_ecc
; // Ecc values for IRF RAS
SS_MemErrDetector
& mem_err_detector
; // RAS memory hierarchy error detector
// Returns all the I$ information for a paddr associated w/ this core
char* icache_info(SS_Paddr pa
)
return mem_err_detector
.icache_info(pa
, this);
// Returns all the I$ information for a set associated w/ this core
char* icache_set(uint_t set
)
return mem_err_detector
.icache_set(set
, this);
// Returns all the D$ information for a set associated w/ this core
char* dcache_set(uint_t set
)
return mem_err_detector
.dcache_set(set
, this);
// Returns all the L2$ information for a set associated w/ this core
char* l2cache_set(uint_t set
)
return mem_err_detector
.l2cache_set(set
, this);
// Returns all the L2$ information for a set associated w/ this core
char* l2cache_set(uint_t bank
, uint_t set
)
return mem_err_detector
.l2cache_set(bank
, set
, this);
uint32_t md
[2]; // Instruction breakpoint opcode match mask and match data
uint64_t mask_data
; // A union so that a core can update a strand atomically without locks
BL_BoundedArray
<InstBreakpoint
,2> inst_iw
;
SS_Vaddr inst_wp_va_mask
; // The bits that should be match for a watchpoint hit.
SS_Vaddr inst_wp_va_addr
; // Bit0 of addr is used to disable (1) or enable(0) watchpoint.
// inst_breakpoint_set() sets the mask and data value for instruction
// word breakpoints atomically.
void inst_breakpoint_set( uint_t index
, uint32_t mask
, uint32_t data
)
assert((data
&~ mask
) == 0); // Expect data bits not used in match to be zero'd
inst_iw
[index
].mask_data
= ib
.mask_data
;
// inst_breakpoint_hit() returns true when one of the instruction
bool inst_breakpoint_hit( uint32_t opc
)
return sim_state
.ib_enabled()
&& (inst_iw
[0].md
[0] && ((opc
& inst_iw
[0].md
[0]) == inst_iw
[0].md
[1]))
|| (inst_iw
[1].md
[0] && ((opc
& inst_iw
[1].md
[0]) == inst_iw
[1].md
[1]));
// inst_watchpoint_va_set() sets the instr_watchpoint of virtual
// addesses atomically. Note that bit 0 of the inst_wp_va_addr
// is used as enabled(bit0=0) and disable (bit0=1)
void inst_watchpoint_va_set( SS_Vaddr mask
, SS_Vaddr addr
)
inst_wp_va_addr
= 1; // Force mismatch always
SS_Vaddr
inst_watchpoint_va_get()
// inst_watchpoint_va_hit() returns true when the va matches the
bool inst_watchpoint_va_hit( SS_Vaddr va
)
return (va
& inst_wp_va_mask
) == inst_wp_va_addr
;
// inst_watchpoint_va_near_hit() returns true when the va matches the
// set va watchpoints when the mask is applied first. This is used in
// decode cache situations to avoid caching cases that should go through
// the mmu to check for va watchpoint hits.
bool inst_watchpoint_va_near_hit( SS_Vaddr mask
, SS_Vaddr va
)
return (va
& inst_wp_va_mask
& mask
) == (inst_wp_va_addr
& (mask
+ 1));
// The inst_mmu can set the inst_dec to a decoder that is used just for a
// single cache line. The fetch 512 routines resets the inst_dec back to
// the save_dec. ss_trap() also resets the inst_dec back to save_dec.
// inst_dec and save_dec can cange when breakpoints on instruction opcode
// are activated, either for front end use or for hardware purposes.
SS_Execute inst_dec
; // Pointer to the instruction decode function
SS_Execute save_dec
; // Always points at the true instruction decoder
SS_Tte
* fail_tte
; // Default tte that always mismatches
SS_Tte
* inst_tte
; // Current tte used by IMMU
SS_Tte
* trc_inst_tte
; // Same as inst_tte for tracing purpose
SS_Tte
* phys_tte_mem
; // The TTE used for MMU bypass, e.g. pa == pa, mode for RAM
SS_Tte
* phys_tte_io
; // The TTE used for MMU bypass, e.g. pa == pa, mode for I/O
SS_Tte
* phys_tte_mem_am
; // The TTE used for MMU bypass, e.g. pa == pa, mode for RAM in V8!?!
uint64_t mask_pstate_am
; // V8 addressing mode mask
SS_AsiReg inst_dft_asi
; // Default instruction ASI with priv mode
SS_AsiReg data_dft_asi
; // Default data ASI with priv mode
SS_PidContext inst_ctx
; // Keep track of partition id and context switching
SS_PidContext inst_ctx_pa
; // For pa2pa pid and ctx don't care
SS_PidContext inst_ctx_ra
; // For ra2pa ctx don't care
SS_PidContext inst_ctx_va
; // For va2pa we do care about pid and ctx
SS_PidContext data_ctx
; // Keep track of partition id and context switching
SS_Chain
* inst_tte_link
; // Inst TLB tte usage places
uint_t inst_tte_link_size
; // Inst TLB tte usage places size
SS_Chain phys_tte_link
; // Phys tte usage places (inst only)
SS_Chain
* data_tte_link
; // Data TLB tte usage places
uint_t data_tte_link_size
; // Data TLB tte usage places size
void (*sim_update
)( SS_Strand
* );
SS_InvalidAsi invalid_asi
;
const char* (*get_state_name
)( SS_Strand
*, SS_Registers::Index index
);
SS_Registers::Error (*get_state
)( SS_Strand
*, SS_Registers::Index index
, uint64_t* value
);
SS_Registers::Error (*set_state
)( SS_Strand
*, SS_Registers::Index index
, uint64_t value
);
// The change_running_from_snapshot can be set to signal to
// the change_running() routine that it was called after a
// restore from a duymp (snapshot). change_running() should
// clear the flag when iut sees it set.
bool change_running_from_snapshot
;
void (*change_running
)( SS_Strand
* );
bool trap_launch_ok( SS_Trap::Type tt
);
// In v8plus mode we have assembly routines that call trap. Trap is
// implemented in C. So in v8plus mode we have v9 assembly calling a
// v8plus piece of code. So we need to make a mode conversion between
// the assembly and the c code - note we don't want to recode the assembly.
// The same happens for a few other routines.
SS_Execute
* v8_exe_table
; // Pointers to tables of execute functions
SS_TrapFun v8_trap
; // v9 to v8plus convertor
SS_MmuTrap v8_inst_trap
; // ,,
SS_MmuTrap v8_data_trap
; // ,,
SS_DataMmu v8_data_mmu
; // ,,
SS_InvalidAsi v8_invalid_asi
; // ,,
SS_Execute v8_inst_dec
; // Pointer to the instruction decode function in v8 mode
// The internal_interrupt and external_interrupt routines deal with
// interrupts that come from crosscall or from devices. The external
// routine gets a 64 byte payload describing a sun4v interrupt say,
// from which it derives and to call the internal routine. The external
// routine creates an EXTERNAL_INTERRUPT signal taht is posted with
// post_signal; this to make it MP safe.
void (*internal_interrupt
)( SS_Strand
*, uint_t
, bool );
void (*external_interrupt
)( SS_Strand
*, uint64_t*, bool );
// TrapState is one entry on the trap stack.
BL_BoundedArray
<TrapState
,MAX_TL
+ 1> trap_state
;
BL_BoundedArray
<SS_BreakTrap
*,SS_Trap::MAX_TT
> break_trap
;
// Breakpoints on a specific trap
SS_BreakRedMode
*break_red_mode
; // Breakpoints on switch to red mode
SS_BreakInstVa
* break_inst_va
; // Breakpoints on instruction va
SS_BreakPoint
* break_points
; // The current set of breakpoints
SS_BreakPoint
* break_hit
;
// skip_break_inst_va is for the case when the pc matches in the inst_mmu
// routine rather then in the ss_break_inst_va_dec. In that case we get
// double hits ... this boolean is for ignoring the second hit.
// test_break_inst_va() returns true when at least one breakpoint
// matches the given va. When natching it will insert a break signal
// in the strands signal queue that will cause exit of the loop.
bool test_break_inst_va( SS_Vaddr va
)
for (SS_BreakInstVa
* bp
= break_inst_va
; bp
; bp
= (SS_BreakInstVa
*)(bp
->link
))
if (bp
->break_inst_va(this,va
))
// near_break_inst_va() returns true when va matches when msk is applied
// first ... when the pc walks into a decode cacheline that has a breakpoint
bool near_break_inst_va( SS_Vaddr mask
, SS_Vaddr va
)
for (SS_BreakInstVa
* bp
= break_inst_va
; bp
; bp
= (SS_BreakInstVa
*)(bp
->link
))
if (bp
->check_inst_va(mask
,va
))
SS_AsiInfoTable asi_info
;
SS_InstrCache
* inst_cache
;
SS_Tracer
* trc_hook
; // Hook for putting different tracers, for trc_step.
SS_Strand
* run_next
; // Pointer to next strand that is in running mode
// wrf and grf (and Ecc) point to malloc'ed arrays and never move afterwards
uint64_t* wrf
; // The window registers
BL_EccBits
* wrf_ecc
; // ECC values for window regs
uint64_t* grf
; // The global registers
BL_EccBits
* grf_ecc
; // ECC values for global regs
BL_BoundedArray
<uint64_t,8> scratchpad
; // The privileged scratchpad registers
BL_BoundedArray
<uint64_t,8> hscratchpad
; // The hyper privileged scratchpad registers
SS_InstrCache
* inst_cache_va_nuc_user
; // Nucleus context inst cache for user
SS_InstrCache
* inst_cache_va_pri_user
; // Primary context inst cache for user
SS_InstrCache
* inst_cache_va_nuc_nuc_nuc_priv
; // Inst cache for va->pa priv code
SS_InstrCache
* inst_cache_va_nuc_nuc_sec_priv
; // Inst cache for va->pa priv code
SS_InstrCache
* inst_cache_va_nuc_pri_sec_priv
; // Inst cache for va->pa priv code
SS_InstrCache
* inst_cache_va_pri_priv
; // Inst cache for va->pa priv code
SS_InstrCache
* inst_cache_ra_nuc_user
; // Inst cache for user ra->pa code
SS_InstrCache
* inst_cache_ra_pri_user
; // Inst cache for user ra->pa code
SS_InstrCache
* inst_cache_ra_nuc_priv
; // Inst cache for priv ra->pa code
SS_InstrCache
* inst_cache_ra_pri_priv
; // Inst cache for priv ra->pa code
SS_InstrCache
* inst_cache_pa
; // Inst cache for hprv and red mode pa->pa
static void ss_sim_update( SS_Strand
* );
static SS_Vaddr
ss_trap( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
, SS_Trap::Type tt
);
static SS_AsiSpace::Error
scratchpad_ld64( SS_Node
*, void*, SS_Strand
*, SS_Vaddr
, uint64_t* );
static SS_AsiSpace::Error
scratchpad_st64( SS_Node
*, void*, SS_Strand
*, SS_Vaddr
, uint64_t );
static SS_AsiSpace::Error
lsu_ctr_st64 ( SS_Node
*, void*, SS_Strand
*, SS_Vaddr
, uint64_t );
// irq is the structure that holds all the disrupting traps until they are take.
// flush_va() is for va watchpoint and va breakpoint detection
void flush_va( SS_Vaddr
);
// flush() is mainly useful for frontends to flush a particular pa
// when code has been written to memory throu the FPE
static const uint64_t RAS_TTE_POISON
= 1;
void flush( SS_Paddr
, bool for_ras
=false );
void setup_tte_link_tables();
void flush_tte( SS_Tlb
* tlb
, SS_Tte
* );
void post_signal( SS_Signal
* sgn
) { msg
.post_signal(sgn
); }
// The swig interface is a bit anoying for the get/set_state methods.
// So we provide SWIG (Python) two members to hold errors returned.
// This is a workaround, need to look into this to fix (see SWIG pointer.i file)
SS_Registers::Error reg_error
;
SS_AsiSpace::Error asi_error
;
// cnv2pa is for frontend purposes and translates a given va/ra with optional
// given context and optional given partition id to a physical address.
// The default context is primary_context[0] .
Sam::Vcpu::TranslateError (*cnv2pa
)( SS_Strand
*, Sam::Vcpu::TranslateMode
, SS_Vaddr
, uint64_t ctx
, uint64_t pid
, SS_Paddr
* pa
);
// Hooks for IRF/FRF ras support
SS_RasIrfSrc ras_rs1
; // IRF
SS_RasFrfSrc ras_frs1
; // Single precison FP
SS_RasDrfSrc ras_drs1
; // Double precison FP
// The ras_enable routine is used to turn on RAS features
void (*ras_enable
)( SS_Strand
*, char* cmd
);
static void default_ras_enable( SS_Strand
*, char* cmd
);
void (*run_perf
) ( SS_Strand
* s
, Sam::Vcpu::perfcntr which
, int64_t incr
);
//============================================================================
// For cosim mode we need a few hooks for irq, tlb and other syncing that goes
// on between the reference and device under test. All those hooks go here
//============================================================================
std::map
<SS_Registers::Index
,uint64_t> *ctr_sync
;
void (*irq_store
)( void* irq_sync
, SS_Trap::Type irq_type
, bool do_time_out
);
void (*inst_tlb_read
)( void* tlb_sync
);
void inst_tlb_set( SS_Tlb
* );
int (*inst_tlb_write
)( void* tlb_sync
);
void (*inst_tlb_lookup
)( void* tlb_sync
);
void (*data_tlb_read
)( void* tlb_sync
);
int (*data_tlb_write
)( void* tlb_sync
);
void (*data_tlb_lookup
)( void* tlb_sync
);
SS_Trap::Type (*inst_hwtw
)( SS_Strand
* strand
, SS_Vaddr va
, int_t entry
);
SS_Trap::Type (*data_hwtw
)( SS_Strand
* strand
, SS_Vaddr va
, uint8_t asi
, int_t entry
);
bool inst_mmu_error
; // Used in cosim to sync trap 0x71
bool data_mmu_error
; // Used in cosim to sync trap 0x72
void *asi_ext_obj
; // The object that have the external call back function
SS_AsiSpace::Error (*asi_ext_ld64_fp
)( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
);
SS_AsiSpace::Error (*asi_ext_st64_fp
)( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
);
static SS_AsiSpace::Error
asi_ext_ld64( SS_Node
* a
, void* b
, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
);
static SS_AsiSpace::Error
asi_ext_st64( SS_Node
* a
, void* b
, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
);
static void SS_Strand::ss_run_perf( SS_Strand
* s
, Sam::Vcpu::perfcntr which
, int64_t incr
);
static const char* ss_get_state_name( SS_Strand
*, SS_Registers::Index index
);
static SS_Registers::Error
ss_get_state( SS_Strand
*, SS_Registers::Index index
, uint64_t* value
);
static SS_Registers::Error
ss_set_state( SS_Strand
*, SS_Registers::Index index
, uint64_t value
);
inline void SS_Strand::cwp_save()/*{{{*/
uint64_t* q
= &wrf
[cwp()*16];
if (!sim_state
.ras_enabled())
BL_EccBits
* q_ecc
= &wrf_ecc
[cwp()*16];
q_ecc
[i
] = irf_ecc
[i
+ 16];
inline void SS_Strand::cwp_load()/*{{{*/
uint64_t* q
= &wrf
[cwp()*16];
BL_EccBits
* q_ecc
= &wrf_ecc
[cwp()*16];
if (!sim_state
.ras_enabled())
irf_ecc
[i
+ 16] = q_ecc
[i
];
irf_ecc
[i
+ 8] = q_ecc
[i
+ 8];
inline void SS_Strand::do_save_inst()/*{{{*/
uint64_t* q
= &wrf
[cwp()*16];
if (!sim_state
.ras_enabled())
irf
[i
+ 24] = irf
[i
+ 8];
cwp
= (cwp() < max_wp()) ? (cwp() + 1) : 0;
cansave
= cansave() ? (cansave() - 1) : max_wp();
canrestore
= (canrestore() < max_wp()) ? (canrestore() + 1) : 0;
BL_EccBits
* q_ecc
= &wrf_ecc
[cwp()*16];
q_ecc
[i
] = irf_ecc
[i
+ 16];
irf
[i
+ 24] = irf
[i
+ 8];
irf_ecc
[i
+ 24] = irf_ecc
[i
+ 8];
cwp
= (cwp() < max_wp()) ? (cwp() + 1) : 0;
cansave
= cansave() ? (cansave() - 1) : max_wp();
canrestore
= (canrestore() < max_wp()) ? (canrestore() + 1) : 0;
q_ecc
= &wrf_ecc
[cwp()*16];
irf_ecc
[i
+ 16] = q_ecc
[i
];
irf_ecc
[i
+ 8] = q_ecc
[i
+ 8];
inline void SS_Strand::do_restore_inst()/*{{{*/
uint64_t* q
= &wrf
[cwp()*16];
BL_EccBits
* q_ecc
= &wrf_ecc
[cwp()*16];
if (!sim_state
.ras_enabled())
cwp
= cwp() ? (cwp() - 1) : max_wp();
cansave
= (cansave() < max_wp()) ? (cansave() + 1) : 0;
canrestore
= canrestore() ? (canrestore() - 1) : max_wp();
irf
[i
+ 8] = irf
[i
+ 24];
q_ecc
[i
] = irf_ecc
[16 + i
];
cwp
= cwp() ? (cwp() - 1) : max_wp();
cansave
= (cansave() < max_wp()) ? (cansave() + 1) : 0;
canrestore
= canrestore() ? (canrestore() - 1) : max_wp();
q_ecc
= &wrf_ecc
[cwp()*16];
irf
[i
+ 8] = irf
[i
+ 24];
irf_ecc
[i
+ 8] = irf_ecc
[i
+ 24];
irf_ecc
[i
+ 24] = q_ecc
[i
+ 8];
irf_ecc
[i
+ 16] = q_ecc
[i
];
inline void SS_Strand::gl_save()/*{{{*/
uint64_t* q
= &grf
[gl() * 8];
if (sim_state
.ras_enabled())
BL_EccBits
* q_ecc
= &grf_ecc
[gl() * 8];
inline void SS_Strand::gl_load()/*{{{*/
uint64_t* q
= &grf
[gl() * 8];
if (sim_state
.ras_enabled())
BL_EccBits
* q_ecc
= &grf_ecc
[gl() * 8];