// ========== Copyright Header Begin ==========================================
// OpenSPARC T2 Processor File: N2_Strand.cc
// Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
// The above named program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public
// License version 2 as published by the Free Software Foundation.
// The above named program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// You should have received a copy of the GNU General Public
// License along with this work; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
// ========== Copyright Header End ============================================
#include "N2_Registers.h"
#include "BL_BitUtility.h"
inline SS_Vaddr
va_signext( SS_Vaddr va
) /*{{{*/
// va_signext() sign extends the given virtual address from VA_BITS to 64 bits.
return (va
<< (64 - VA_BITS
)) >> (64 - VA_BITS
);
inline bool va_inrange( SS_Vaddr va
) /*{{{*/
// va_inrange() checks if the va is not in the what is called va-hole.
// N2 only implement 48bit of the 64bit virtual (real) address space.
// Virtual addresses are signed so this means that the upper 16 bits
// are checked for proper sign extendion if bit47. So we check that
// 0x0000000000000000 < va <= 0x00007fffffffffff
// or 0xffff800000000000 < va <= 0xffffffffffffffff
return ((va
>> (VA_BITS
- 1)) == SS_Vaddr(0))
|| ((va
>> (VA_BITS
- 1)) == -SS_Vaddr(1));
inline bool pc_inrange( SS_Vaddr va
) /*{{{*/
// pc_inrange() checks the same range as va_inrange() with an additional
// 0x20 bytes taken of the positive end of the range. So we check that
// 0x0000000000000000 < va <= 0x00007fffffffffdf
// or 0xffff800000000000 < va <= 0xffffffffffffffff
return (((va
+ SS_Vaddr(0x20)) >> (VA_BITS
- 1)) == SS_Vaddr(0))
|| (( va
>> (VA_BITS
- 1)) == -SS_Vaddr(1));
inline bool pc_onrange( SS_Vaddr va
) /*{{{*/
// pc_onrange() checks the same range as pc_inrange() but instead with one
// additional decode cache line (0x40 bytes) taken of the positive end of
// the range. So we check that
// 0x0000000000000000 < va <= 0x00007fffffffffbf
// or 0xffff800000000000 < va <= 0xffffffffffffffff
return (((va
+ SS_Vaddr(SS_InstrCache::LINE_SIZE
* 4)) >> (VA_BITS
- 1)) == SS_Vaddr(0))
|| (( va
>> (VA_BITS
- 1)) == -SS_Vaddr(1));
inline bool pc_iorange( SS_Vaddr pc
)/*{{{*/
// I/O address ranges 0xff00000000:0xffffffffff and 0xa000000000:0xbfffffffff
// are the only I/O regions from which instructions fetches are ok.
return (((pc
>> (PA_BITS
- 8)) & 0xff) == 0xff) || (((pc
>> (PA_BITS
- 3)) & 0x7) == 0x5);
extern "C" SS_Vaddr
n2_exe_real_range( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
)/*{{{*/
// First check if we have a UI breakpoint on the little address range
if (s
->test_break_inst_va(pc
))
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->inst_tag_update(0,pc
);
return (s
->inst_trap
)(pc
,npc
,s
,i
,va_signext(pc
),SS_Trap::SS_Trap::INSTRUCTION_REAL_RANGE
);
extern "C" SS_Vaddr
n2_exe_address_range( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
)/*{{{*/
// First check if we have a UI breakpoint on the little address range
if (s
->test_break_inst_va(pc
))
N2_Strand
* n2
= (N2_Strand
*)s
;
uint64_t ctxt0
= n2
->tl() ? 0 : n2
->primary_context
[0]();
n2
->inst_tag_update(0,pc
);
return (s
->inst_trap
)(pc
,npc
,s
,i
,va_signext(pc
),SS_Trap::SS_Trap::INSTRUCTION_ADDRESS_RANGE
);
extern "C" SS_Vaddr
n2_exe_va_watchpoint( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
)/*{{{*/
// First check if we have a UI breakpoint on the watchpoint address
if (s
->test_break_inst_va(pc
))
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_Vaddr va
= pc
& n2
->mask_pstate_am
;
uint64_t ctxt0
= n2
->tl() ? 0 : n2
->primary_context
[0]();
// Make sure we handle va watchpoint enable/disable properly. E.g
// when va watchpoint info is changed we flush the cache. We only
// expect to get here when they are enabled and when they should hit.
assert(n2
->inst_watchpoint_va_hit(va
));
return (n2
->inst_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::INSTRUCTION_VA_WATCHPOINT
); // prio 2.5
extern "C" SS_Vaddr
n2_dec_real_range( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
*s
, SS_Instr
* i
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_Instr
* line
= n2
->inst_cache
->pc_line(pc
);
// For the last cacheline of the positive range of the address space we
// have to be carefull what we do. For 0x7fffffffffc0 to 0x7fffffffdf
// we have to decode as normal, for 0x7fffffffe0 to 0x7fffffffff we have
// to throw real range traps. In either case make sure we check UI break
for (l
= 0; l
< (SS_InstrCache::LINE_SIZE
>> 1); l
++)
line
->line_index(l
)->exe
= ss_break_inst_va_dec
;
for (; l
< SS_InstrCache::LINE_SIZE
; l
++)
line
->line_index(l
)->exe
= n2_exe_real_range
;
return (i
->exe
)(pc
,npc
,s
,i
);
extern "C" SS_Vaddr
n2_dec_address_range( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
*s
, SS_Instr
* i
)/*{{{*/
assert(SS_InstrCache::LINE_SIZE
== 16);
// For the last cacheline of the positive range of the address space we
// have to be carefull what we do. For 0x7fffffffffc0 to 0x7fffffffdf
// we have to decode as normal, for 0x7fffffffe0 to 0x7fffffffff we have
// to throw address range traps. However the va watchpoint has to be
// checked before that as it has higher priority then address range trap.
// Make sure that we check UI breakpoints first though.
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_Instr
* line
= n2
->inst_cache
->pc_line(pc
);
for (l
= 0; l
< (SS_InstrCache::LINE_SIZE
>> 1); l
++)
line
->line_index(l
)->exe
= ss_break_inst_va_dec
;
for (; l
< SS_InstrCache::LINE_SIZE
; l
++)
line
->line_index(l
)->exe
= n2_exe_address_range
;
SS_Vaddr va
= pc
& n2
->mask_pstate_am
;
SS_Vaddr tm
= -(SS_InstrCache::LINE_SIZE
* 4);
// If va watchpoint checks are enabled and the they happen in this
// cache line then set exe of the location that causes the va watchpoint
// trap to the routine that does that.
if (n2
->inst_watchpoint_va_near_hit(tm
,va
))
l
= (n2
->inst_watchpoint_va_get() >> 2) & SS_InstrCache::LINE_MASK
;
line
->line_index(l
)->exe
= n2_exe_va_watchpoint
;
return (i
->exe
)(pc
,npc
,s
,i
);
extern "C" SS_Vaddr
n2_dec_va_watchpoint( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
*s
, SS_Instr
* i
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_Instr
* line
= n2
->inst_cache
->pc_line(pc
);
// On an inst_mmu_va access we can cause a fetch of a cacheline that has
// a watchpoint enabled in it, but the pc that caused the fetch did not match.
// The mmu makes us trampoline through this routine. So set the decoders back
// to the normal decoders and insert a watchpoint trigger in the correct place.
for (l
=0; l
< SS_InstrCache::LINE_SIZE
; l
++)
line
->line_index(l
)->exe
= ss_break_inst_va_dec
;
SS_Vaddr va
= pc
& n2
->mask_pstate_am
;
SS_Vaddr tm
= -(SS_InstrCache::LINE_SIZE
* 4);
// Make sure we only get here when watchpoint are truely enabled and fall
assert(n2
->inst_watchpoint_va_near_hit(tm
,va
));
l
= (n2
->inst_watchpoint_va_get() >> 2) & SS_InstrCache::LINE_MASK
;
line
->line_index(l
)->exe
= n2_exe_va_watchpoint
;
return (i
->exe
)(pc
,npc
,s
,i
);
N2_Strand::N2_Strand( N2_Core
& _core
, const char* _name
, uint_t _strand_id
)/*{{{*/
SS_Strand(_core
,_name
,run_exe_table
,mem_run_table
,mem_trc_table
,_core
.cpu
.mem_err_detector
),
trap_dae_inv_asi(SS_Trap::RESERVED
), // UNSUPPORTED_PAGE_SIZE ... an exception to the norm ...
dec_table
= &run_dec_xx_xxxxxx
;
exe_table
= run_exe_table
;
mem_table
= mem_run_table
;
v8_exe_table
= exe_table
;
exe_table
= v8_run_exe_table
;
get_state_name
= n2_get_state_name
;
get_state
= n2_get_state
;
set_state
= n2_set_state
;
data_wp_pa_mask
= (SS_Paddr(1) << pa_bits()) - 8;
data_wp_va_mask
= (SS_Vaddr(1) << va_bits()) - 8;
SS_Paddr io_mask
= SS_Paddr(1) << (pa_bits() - 1);
phys_tte_mem
->phys_mask
= ~(io_mask
- SS_Paddr(1));
phys_tte_mem
->phys_page
= SS_Paddr(0);
phys_tte_mem
->virt_mask
= io_mask
;
phys_tte_mem
->virt_page
= SS_Paddr(0);
phys_tte_mem_am
->phys_mask
= ~SS_Paddr(0) << 32;
phys_tte_mem_am
->phys_page
= SS_Paddr(0);
phys_tte_mem_am
->virt_mask
= io_mask
;
phys_tte_mem_am
->virt_page
= SS_Paddr(0);
phys_tte_io
->phys_mask
= ~(io_mask
- SS_Paddr(1));
phys_tte_io
->phys_page
= io_mask
;
phys_tte_io
->virt_mask
= io_mask
;
phys_tte_io
->virt_page
= io_mask
;
trap
= (SS_TrapFun
)ss_trap
;
inst_mmu
= (SS_InstMmu
)n2_inst_mmu_pa
;
inst_mmu_va
= (SS_InstMmu
)n2_inst_mmu_va
;
inst_mmu_ra
= (SS_InstMmu
)n2_inst_mmu_ra
;
inst_mmu_pa
= (SS_InstMmu
)n2_inst_mmu_pa
;
data_mmu
= (SS_DataMmu
)n2_data_mmu
;
inst_trap
= (SS_MmuTrap
)n2_inst_trap
;
data_trap
= (SS_MmuTrap
)n2_data_trap
;
invalid_asi
= (SS_InvalidAsi
)n2_invalid_asi
;
internal_interrupt
= n2_internal_interrupt
;
external_interrupt
= n2_external_interrupt
;
ras_enable
= n2_ras_enable
;
inst_tlb
= &core
.inst_tlb
;
data_tlb
= &core
.data_tlb
;
if ((strand_id() % N2_Model::NO_STRANDS_PER_CPU
) == 0)
core
.inst_tlb
.add_strand(this);
core
.data_tlb
.add_strand(this);
inst_hwtw
= n2_inst_hwtw
;
data_hwtw
= n2_data_hwtw
;
new(&tstate
) N2_Tstate(); // N2 uses one bit less for the gl field
new(&lsu_ctr
) N2_LsuCtr(); // ToDo: N2 need to have its own lsu ctr, should not be in SS_Strand.
new(&gl
) N2_Gl(); // N2 gl uses [3:0] instead of [2:0]
core_id
.max_core_id(0x3f);
core_id
.max_strand_id(0x7);
core_id
.core_id(strand_id());
core_intr_id
.intr_id_hi(0);
core_intr_id
.intr_id_lo(strand_id());
// the first strand of each node (i.e., cpu) should be in running state
sim_state
.running((strand_id() % N2_Model::NO_STRANDS_PER_CPU
) == 0);
// Set the trap priorities for some traps to the correct number
SS_Trap::table
[SS_Trap::ILLEGAL_INSTRUCTION
].priority
= 61;
SS_Trap::table
[SS_Trap::INSTRUCTION_BREAKPOINT
].priority
= 62;
// Add the N2 specific translating asi info to the asi_info table.
// Note block init stores are not valid for floating point stores.
SS_AsiInfo::Flags ldst
= SS_AsiInfo::QUAD_LOAD
| SS_AsiInfo::BLOCK_INIT
| SS_AsiInfo::CLASS_STX
| SS_AsiInfo::CLASS_ST
;
SS_AsiInfo::Flags ldst_p
= ldst
| SS_AsiInfo::PRIMARY
;
SS_AsiInfo::Flags ldst_s
= ldst
| SS_AsiInfo::SECONDARY
;
SS_AsiInfo::Flags ldst_n
= ldst
| SS_AsiInfo::NUCLEUS
| SS_AsiInfo::PRIVILEGED
;
SS_AsiInfo::Flags ldst_pl
= ldst_p
| SS_AsiInfo::LITTLE_ENDIAN
;
SS_AsiInfo::Flags ldst_sl
= ldst_s
| SS_AsiInfo::LITTLE_ENDIAN
;
SS_AsiInfo::Flags ldst_nl
= ldst_n
| SS_AsiInfo::LITTLE_ENDIAN
;
SS_AsiInfo::Flags ldst_aiup
= ldst_p
| SS_AsiInfo::AS_IF_USER
| SS_AsiInfo::PRIVILEGED
;
SS_AsiInfo::Flags ldst_aius
= ldst_s
| SS_AsiInfo::AS_IF_USER
| SS_AsiInfo::PRIVILEGED
;
SS_AsiInfo::Flags ldst_aiupl
= ldst_pl
| SS_AsiInfo::AS_IF_USER
| SS_AsiInfo::PRIVILEGED
;
SS_AsiInfo::Flags ldst_aiusl
= ldst_sl
| SS_AsiInfo::AS_IF_USER
| SS_AsiInfo::PRIVILEGED
;
ldst_p
= ldst_p
| SS_AsiInfo::BYPASS
;
ldst_s
= ldst_s
| SS_AsiInfo::BYPASS
;
ldst_n
= ldst_n
| SS_AsiInfo::BYPASS
;
ldst_pl
= ldst_pl
| SS_AsiInfo::BYPASS
;
ldst_sl
= ldst_sl
| SS_AsiInfo::BYPASS
;
ldst_nl
= ldst_nl
| SS_AsiInfo::BYPASS
;
asi_info
[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_PRIMARY
].set_flags(ldst_aiup
);
asi_info
[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_SECONDAY
].set_flags(ldst_aius
);
asi_info
[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_PRIMARY_LITTLE
].set_flags(ldst_aiupl
);
asi_info
[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_SECONDAY_LITTLE
].set_flags(ldst_aiusl
);
asi_info
[N2_Asi::ASI_NUCLEUS_BLOCK_INIT_ST_QUAD_LDD
].set_flags(ldst_n
);
asi_info
[N2_Asi::ASI_NUCLEUS_BLOCK_INIT_ST_QUAD_LDD_LITTLE
].set_flags(ldst_nl
);
asi_info
[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_PRIMARY
].set_flags(ldst_p
);
asi_info
[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_SECONDAY
].set_flags(ldst_s
);
asi_info
[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_PRIMARY_LITTLE
].set_flags(ldst_pl
);
asi_info
[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_SECONDAY_LITTLE
].set_flags(ldst_sl
);
// Map in all the strand specific non translating asi/va
// mapped registers or address ranges
SS_Node
* INST_SIDE
= (SS_Node
*)0; // Used in one of the two void* arguments of ASI access to
SS_Node
* DATA_SIDE
= (SS_Node
*)1; // distinguish between instruction side of data side
SS_Node
* VIRT_FLAG
= (SS_Node
*)0; // Used in one of the two void* arguments of ASI access to
SS_Node
* REAL_FLAG
= (SS_Node
*)1; // distinguish between virtual or real
// N2 PRM says that bit63 to bit48 of the va are ignored. So clip
// those of for all accesses.
asi_map
.set_mask((SS_Vaddr(1) << VA_BITS
) - 1);
asi_map
[0x20].add(0x00,0x18,this,&scratchpad
,
n2_scratchpad_ld64
,n2_scratchpad_st64
,
n2_scratchpad_ld64
,n2_scratchpad_st64
);
asi_map
[0x20].add(0x30,0x38,this,&scratchpad
,
n2_scratchpad_ld64
,n2_scratchpad_st64
,
n2_scratchpad_ld64
,n2_scratchpad_st64
);
asi_map
[0x21].add(0x008,this,&primary_context
[0],
SS_AsiCtrReg::ld64
,pri_ctx_st64
,
SS_AsiCtrReg::rd64
,pri_ctx_st64
);
asi_map
[0x21].add(0x010,this,&secondary_context
[0],
SS_AsiCtrReg::ld64
,sec_ctx_st64
,
SS_AsiCtrReg::rd64
,sec_ctx_st64
);
asi_map
[0x21].add(0x108,this,&primary_context
[1],
SS_AsiCtrReg::ld64
,pri_ctx_st64
,
SS_AsiCtrReg::rd64
,pri_ctx_st64
);
asi_map
[0x21].add(0x110,this,&secondary_context
[1],
SS_AsiCtrReg::ld64
,sec_ctx_st64
,
SS_AsiCtrReg::rd64
,sec_ctx_st64
);
asi_map
[0x25].add(0x3c0,0x3f8,this,0,
intr_queue_ld64
,intr_queue_st64
,
intr_queue_ld64
,intr_queue_st64
);
asi_map
[0x45].add(0x00,this,&lsu_ctr
,
SS_AsiCtrReg::ld64
,n2_lsu_ctr_st64
,
SS_AsiCtrReg::rd64
,n2_lsu_ctr_st64
);
asi_map
[0x48].add(0x00,0xf8,this,0,
asi_map
[0x49].add(0x00,0xf8,this,0,
asi_map
[0x4a].set_mask(0x38);
asi_map
[0x4a].add(0x00,0x1f8,this,0,
stb_access_ld64
,SS_AsiCtrReg::wr64
);
asi_map
[0x4c].add(0x00,this,&desr
,
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x4c].add(0x18,this,seter
);
asi_map
[0x4f].add(0x00,0x38,this,&scratchpad
,
n2_scratchpad_ld64
,n2_scratchpad_st64
,
n2_scratchpad_ld64
,n2_scratchpad_st64
);
asi_map
[0x50].add(0x00,this,&inst_tag_target
,
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x50].add(0x18,this,&inst_sfsr
,
SS_AsiCtrReg::ld64
,SS_AsiCtrReg::st64
,
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x50].add(0x30,this,&inst_tag_access
,
SS_AsiCtrReg::ld64
,tag_access_st64
,
SS_AsiCtrReg::rd64
,tag_access_st64
);
asi_map
[0x51].set_mask(0x38);
asi_map
[0x51].add(SS_VADDR_MIN
,SS_VADDR_MAX
,(SS_Node
*)0,0,
asi_map
[0x52].add(0x108,this,&real_range
[0],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x52].add(0x110,this,&real_range
[1],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x52].add(0x118,this,&real_range
[2],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x52].add(0x120,this,&real_range
[3],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x52].add(0x208,this,&physical_offset
[0],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x52].add(0x210,this,&physical_offset
[1],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x52].add(0x218,this,&physical_offset
[2],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x52].add(0x220,this,&physical_offset
[3],
SS_AsiCtrReg::ld64
,tsb_ra2pa_st64
,
SS_AsiCtrReg::rd64
,tsb_ra2pa_st64
);
asi_map
[0x53].add(SS_VADDR_MIN
,SS_VADDR_MAX
,this,0,
asi_map
[0x54].add(0x000,INST_SIDE
,VIRT_FLAG
,
asi_map
[0x54].add(0x400,INST_SIDE
,REAL_FLAG
,
asi_map
[0x54].add(0x010,this,&nucleus_tsb_config
[0],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x018,this,&nucleus_tsb_config
[1],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x020,this,&nucleus_tsb_config
[2],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x028,this,&nucleus_tsb_config
[3],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x030,this,&non_nucleus_tsb_config
[0],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x038,this,&non_nucleus_tsb_config
[1],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x040,this,&non_nucleus_tsb_config
[2],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x048,this,&non_nucleus_tsb_config
[3],
SS_AsiCtrReg::ld64
,tsb_config_st64
,
SS_AsiCtrReg::rd64
,tsb_config_st64
);
asi_map
[0x54].add(0x050,this,&inst_tsb_pointer
[0],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x058,this,&inst_tsb_pointer
[1],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x060,this,&inst_tsb_pointer
[2],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x068,this,&inst_tsb_pointer
[3],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x070,this,&data_tsb_pointer
[0],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x078,this,&data_tsb_pointer
[1],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x080,this,&data_tsb_pointer
[2],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x088,this,&data_tsb_pointer
[3],
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x54].add(0x090,this,&tw_control
,
SS_AsiCtrReg::ld64
,tw_control_st64
,
SS_AsiCtrReg::rd64
,tw_control_st64
);
asi_map
[0x55].add(0x000,0x1ff,INST_SIDE
,0,
tlb_data_access_ld64
,tlb_data_access_st64
,
tlb_data_access_ld64
,tlb_data_access_st64
);
asi_map
[0x55].add(0x400,0x5ff,INST_SIDE
,0,
tlb_data_access_ld64
,tlb_data_access_st64
,
tlb_data_access_ld64
,tlb_data_access_st64
);
asi_map
[0x56].add(0x00,0x7ff,INST_SIDE
,0,
asi_map
[0x57].add(SS_VADDR_MIN
,SS_VADDR_MAX
,this,0,
asi_map
[0x58].add(0x000,this,&data_tag_target
,
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x58].add(0x018,this,data_sfsr
);
asi_map
[0x58].add(0x020,this,&data_sfar
,
SS_AsiCtrReg::rd64
,SS_AsiCtrReg::wr64
);
asi_map
[0x58].add(0x030,this,&data_tag_access
,
SS_AsiCtrReg::ld64
,tag_access_st64
,
SS_AsiCtrReg::rd64
,tag_access_st64
);
asi_map
[0x58].add(0x038,this,&data_wp
,
SS_AsiCtrReg::ld64
,data_wp_st64
,
SS_AsiCtrReg::ld64
,data_wp_st64
);
asi_map
[0x58].add(0x040,this,hwtw_config
);
asi_map
[0x58].add(0x080,this,&partition_id
,
SS_AsiCtrReg::ld64
,partition_id_st64
,
SS_AsiCtrReg::rd64
,partition_id_st64
);
asi_map
[0x59].add(0x00,0x78,(SS_Node
*)0,0,
scratchpad_access_ld64
,0,
scratchpad_access_ld64
,0);
asi_map
[0x5a].add(0x00,0x10,(SS_Node
*)0,0,
asi_map
[0x5a].add(0x20,0x30,(SS_Node
*)0,0,
asi_map
[0x5b].set_mask(0x38);
asi_map
[0x5b].add(SS_VADDR_MIN
,SS_VADDR_MAX
,(SS_Node
*)0,0,
asi_map
[0x5c].add(0x000,DATA_SIDE
,VIRT_FLAG
,
asi_map
[0x5c].add(0x400,DATA_SIDE
,REAL_FLAG
,
asi_map
[0x5d].add(0x00,0x7ff,DATA_SIDE
,0,
tlb_data_access_ld64
,tlb_data_access_st64
,
tlb_data_access_ld64
,tlb_data_access_st64
);
asi_map
[0x5e].add(0x00,0x7ff,DATA_SIDE
,0,
asi_map
[0x5f].add(SS_VADDR_MIN
,SS_VADDR_MAX
,this,0,
asi_map
[0x63].add(0x00,this,&core_intr_id
,
asi_map
[0x63].add(0x10,this,&core_id
,
asi_map
[0x72].add(0x0,this,0,
intr_recv_ld64
,intr_recv_st64
,
intr_recv_ld64
,intr_recv_wr64
);
asi_map
[0x74].add(0x0,this,0,
const char* N2_Strand::n2_get_state_name( SS_Strand
* s
, SS_Registers::Index index
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
case SS_Registers::ASR_PCR
:
case SS_Registers::ASR_PIC
:
return ss_get_state_name(s
,index
);
SS_Registers::Error
N2_Strand::n2_get_state( SS_Strand
* s
, SS_Registers::Index index
, uint64_t* value
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
case SS_Registers::ASR_PCR
:
case SS_Registers::ASR_PIC
:
return ss_get_state(s
,index
,value
);
SS_Registers::Error
N2_Strand::n2_set_state( SS_Strand
* s
, SS_Registers::Index index
, uint64_t value
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
case SS_Registers::ASR_PCR
:
case SS_Registers::ASR_PIC
:
return ss_set_state(s
,index
,value
);
void N2_Strand::snapshot( SS_SnapShot
& ss
)/*{{{*/
sprintf(ss
.tag
,"%s.%s",prefix
,pcr
.name()); pcr
.snapshot(ss
);
sprintf(ss
.tag
,"%s.%s",prefix
,pic
.name()); pic
.snapshot(ss
);
lsu_ctr
.snapshot(ss
,prefix
);
// fpu does not require snapshot.
core_intr_id
.snapshot(ss
,prefix
);
core_id
.snapshot(ss
,prefix
);
SS_AsiCtrReg::snapshot(ss
,primary_context
,2,prefix
,"pri");
SS_AsiCtrReg::snapshot(ss
,secondary_context
,2,prefix
,"sec");
inst_tag_target
.snapshot(ss
,prefix
,"inst");
inst_tag_access
.snapshot(ss
,prefix
,"inst");
data_tag_target
.snapshot(ss
,prefix
,"data");
data_tag_access
.snapshot(ss
,prefix
,"data");
partition_id
.snapshot(ss
,prefix
);
SS_AsiCtrReg::snapshot(ss
,real_range
,4,prefix
);
SS_AsiCtrReg::snapshot(ss
,physical_offset
,4,prefix
);
SS_AsiCtrReg::snapshot(ss
,nucleus_tsb_config
,4,prefix
,"nuc");
SS_AsiCtrReg::snapshot(ss
,non_nucleus_tsb_config
,4,prefix
,"non");
SS_AsiCtrReg::snapshot(ss
,inst_tsb_pointer
,4,prefix
,"inst");
SS_AsiCtrReg::snapshot(ss
,data_tsb_pointer
,4,prefix
,"data");
hwtw_config
.snapshot(ss
,prefix
);
tw_control
.snapshot(ss
,prefix
);
inst_sfsr
.snapshot(ss
,prefix
);
data_sfsr
.snapshot(ss
,prefix
);
data_sfar
.snapshot(ss
,prefix
);
data_wp
.snapshot(ss
,prefix
);
cpu_mondo_head
.snapshot(ss
,prefix
,"cpu_mondo_head");
cpu_mondo_tail
.snapshot(ss
,prefix
,"cpu_mondo_tail");
dev_mondo_head
.snapshot(ss
,prefix
,"dev_mondo_head");
dev_mondo_tail
.snapshot(ss
,prefix
,"dev_mondo_tail");
resumable_head
.snapshot(ss
,prefix
,"resumable_head");
resumable_tail
.snapshot(ss
,prefix
,"resumable_tail");
non_resumable_head
.snapshot(ss
,prefix
,"non_resumable_head");
non_resumable_tail
.snapshot(ss
,prefix
,"non_resumable_tail");
sprintf(ss
.tag
,"%s.intr_recv",prefix
); ss
.val(&intr_recv
);
intr_r
.snapshot(ss
,prefix
);
seter
.snapshot(ss
,prefix
);
desr
.snapshot(ss
,prefix
);
sprintf(ss
.tag
,"%s.tw_status",prefix
); ss
.val(&tw_status
);
for (int r
= 0; r
< 4; r
++)
N2_TsbConfig
& zc
= nucleus_tsb_config
[r
];
N2_TsbConfig
& nc
= non_nucleus_tsb_config
[r
];
tsb_config
[r
].update(zc
.valid(), zc
.tsb_base() << 13, zc
.tsb_size() + 9,
zc
.page_size() * 3 + 13, zc
.ra_not_pa(), zc
.use_context());
tsb_config
[r
+ 4].update(nc
.valid(), nc
.tsb_base() << 13, nc
.tsb_size() + 9,
nc
.page_size() * 3 + 13, nc
.ra_not_pa(), nc
.use_context());
N2_RealRange
& rr
= real_range
[r
];
N2_PhysicalOffset
& po
= physical_offset
[r
];
tsb_ra2pa
[r
].update(rr
.enable(),rr
.rpn_low(),rr
.rpn_high(),po
.ptv());
data_wp_st64(0,0,this,0,data_wp());
inst_ctx_ra
.set_pid(partition_id());
inst_ctx_va
.set_pid(partition_id());
inst_ctx_va
.set_pri_ctx0(primary_context
[0]());
inst_ctx_va
.set_pri_ctx1(primary_context
[1]());
data_ctx
.set_pid(partition_id());
data_ctx
.set_pri_ctx0(primary_context
[0]());
data_ctx
.set_pri_ctx1(primary_context
[1]());
data_ctx
.set_sec_ctx0(secondary_context
[0]());
data_ctx
.set_sec_ctx1(secondary_context
[1]());
n2_lsu_ctr_st64(0,&lsu_ctr
,this,0,lsu_ctr());
SS_Vaddr
N2_Strand::n2_inst_mmu_pa( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_InstrCache::Tag
* line_tag
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_Vaddr tm
= -(SS_InstrCache::LINE_SIZE
* 4);
int io
= (pc
>> (n2
->pa_bits() - 1)) & 1;
// First check for on or near UI breakpoints.
if (n2
->near_break_inst_va(tm
,pc
))
// Check if we hit the breakpoint. If so then exit now.
// Else fetch a line, decode, execute the instructions
// guided by the ss_break_inst_va_dec() routine.
if (n2
->test_break_inst_va(pc
))
s
->skip_break_inst_va
= true;
n2
->inst_dec
= ss_break_inst_va_dec
;
// Now select the TTE to be used for the PA to PA translation.
// In 32bit mode the pc gets clipped to 32bit address space.
// We use a preprogrammed TTE to handle this situation.
tte
= n2
->phys_tte_mem_am
;
// Check the I/O regions that we can fetch from.
n2
->inst_tag_update(0,pc
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,pc
,SS_Trap::INSTRUCTION_ACCESS_ERROR
); // prio 4.0
// Normal 64bit address memory access.
// Set the TTE that is currently used for instruction fetch,
// update the decode cache tag and tte, and add the TTE to the
// linked lists of TTE to make flushing TTEs from the instruction
// caches an simepl task of walking the list of cachelines that
SS_Memop exe
= n2
->mem_table
[0][io
];
SS_Vaddr tag
= pc
& -(SS_InstrCache::LINE_SIZE
* 4);
line_tag
->lnk
.insert_after(&n2
->inst_tte_link
[tte
->index
]);
return (exe
)(pc
,npc
,s
,line
,tag
,tte
);
SS_Vaddr
N2_Strand::n2_inst_mmu_ra( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_InstrCache::Tag
* line_tag
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_Vaddr va
= pc
& n2
->mask_pstate_am
;
SS_Vaddr tm
= -(SS_InstrCache::LINE_SIZE
* 4);
// First check for on or near UI breakpoints.
if (n2
->near_break_inst_va(tm
,pc
))
// Check if we hit the breakpoint. If so then exit now.
// Else fetch a line, decode, execute the instructions
// guided by the ss_break_inst_va_dec() routine.
if (n2
->test_break_inst_va(pc
))
s
->skip_break_inst_va
= true;
n2
->inst_dec
= ss_break_inst_va_dec
;
// Handle real range address checks. We use trampoline
// decoding to get the corner cases right when pc is in the
// cacheline that should cause the trap but is not on it.
n2
->inst_tag_update(0,va
);
return (n2
->inst_trap
)(pc
,va_signext(npc
),s
,line
,va
,SS_Trap::INSTRUCTION_REAL_RANGE
); // prio 2.6
// The current pc is not on the va-edge (last 16 bytes of the
// possitve end of the address space). To make sure we get the
// traps for the real range we trampoline the decoder through
// n2_dec_real_range. Note after this mmu routine we fetch the
// cacheline and set the exe part of the instruction to inst_dec.
// So switching the decoder temporarily makes us trampoline into
// the decoder that knows about the real range traps.
n2
->inst_dec
= n2_dec_real_range
;
// Lookup the TLB and check that the TTE can be used for translation.
// E.g. check that no error conditions occured on the TLB lookup
// (inst_mmu_error is set in cosim mode only) and that the TLB lookup
bool tte_multi_hit
= false;
if ((n2
->inst_tte
!= n2
->fail_tte
) && n2
->inst_tte
->match_real(va
,n2
->partition_id()))
tte
= ((N2_Tlb
*)n2
->inst_tlb
)->lookup_ra2pa(s
,va
,n2
->partition_id(),&tte_multi_hit
);
assert(n2
->sim_state
.cosim());
n2
->inst_mmu_error
= false;
n2
->inst_tag_update(0,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR
); // Prio 2.7
n2
->inst_tag_update(0,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INST_REAL_TRANSLATION_MISS
); // prio 2.8
// ITLB Multi Hit, Tag and Data Parity Error Detection
else if (tte
->has_errors() || tte_multi_hit
)
N2_Core
& n2_core
= n2
->core
;
// Multiple hits are detected only if ittm bit is set in CERER
if (n2_core
.cerer
.ittm() && tte_multi_hit
)
// Tag Parity Errors in ITLB are detected only if the ITTP bit is set in CERER
else if (n2_core
.cerer
.ittp() && tte
->tag_parity_error())
// Data Parity Errors in ITLB are detected only if the ITDP bit is set in CERER
else if (n2_core
.cerer
.itdp() && tte
->data_parity_error())
// The error type is recorded in the ISFSR
n2
->inst_sfsr
.error_type(itlb_error_type
);
n2
->inst_tag_update(0,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR
); // Prio 2.7
int io
= (tte
->phys_page
>> (n2
->pa_bits() - 1)) & 1;
// Check the properties of the TTE: no fault, and check fetching
// from restricted I/O spaces. Note that we never fetch real addresses
// in user mode, so we don't have to check for privileged violation.
n2
->inst_tag_update(0,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::IAE_NFO_PAGE
); // prio 3.3
else if (io
&& !pc_iorange(tte
->trans(pc
)))
n2
->inst_tag_update(0,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INSTRUCTION_ACCESS_ERROR
); // prio 4.0
SS_Memop exe
= n2
->mem_table
[0][io
];
SS_Vaddr tag
= pc
& -(SS_InstrCache::LINE_SIZE
* 4);
// Set the TTE that is currently used for instruction fetch,
// update the decode cache tag and tte, and add the TTE to the
// linked lists of TTE to make flushing TTEs from the instruction
// caches an simepl task of walking the list of cachelines that
line_tag
->lnk
.insert_after(&n2
->inst_tte_link
[tte
->index
]);
return (exe
)(pc
,npc
,s
,line
,tag
,tte
);
SS_Vaddr
N2_Strand::n2_inst_mmu_va( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_InstrCache::Tag
* line_tag
)/*{{{*/
bool tte_multi_hit
= false;
N2_Strand
* n2
= (N2_Strand
*)s
;
uint64_t ctxt0
= n2
->tl() ? 0 : n2
->primary_context
[0]();
uint64_t ctxt1
= n2
->tl() ? 0 : n2
->primary_context
[1]();
SS_Vaddr va
= pc
& n2
->mask_pstate_am
;
SS_Vaddr tm
= -(SS_InstrCache::LINE_SIZE
* 4);
// First check for on or near UI breakpoints.
if (n2
->near_break_inst_va(tm
,pc
))
// Check if we hit the breakpoint. If so then exit now.
// Else fetch a line, decode, execute the instructions
// guided by the ss_break_inst_va_dec() routine.
if (n2
->test_break_inst_va(pc
))
s
->skip_break_inst_va
= true;
n2
->inst_dec
= ss_break_inst_va_dec
;
// Handle va watchpoint and address range checks. For both
// exceptions we use trampoline decoding to get the corner cases
// right when pc is in the cacheline that should cause the traps.
if (n2
->inst_watchpoint_va_hit(va
))
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INSTRUCTION_VA_WATCHPOINT
); // prio 2.5
else if (!pc_onrange(va
))
n2
->inst_tag_update(ctxt0
,va
);
return (n2
->inst_trap
)(pc
,va_signext(npc
),s
,line
,va
,SS_Trap::INSTRUCTION_ADDRESS_RANGE
); // prio 2.6
// The current pc is not on the va-edge (last 16 bytes of the
// possitve end of the address space). To make sure we get the
// traps for the address range we trampoline the decoder through
// n2_dec_address_range. Note after this mmu routine we fetch the
// cacheline and set the exe part of the instruction to inst_dec.
// So switching the decoder temporarily makes us trampoline into
// the decoder that knows about the address range traps.
n2
->inst_dec
= n2_dec_address_range
;
else if (n2
->inst_watchpoint_va_near_hit(tm
,va
))
// The current pc did not match the va watchpoint address, however
// the va watchpoint is enabled and falls in the same cacheline.
// So trampoline through n2_dec_va_watchpoint to get the watchpoint
// trap on the correct pc.
n2
->inst_dec
= n2_dec_va_watchpoint
;
// Lookup the TLB and check that the TTE can be used for translation.
// E.g. check that no error conditions occured on the TLB lookup
// (inst_mmu_error is set in cosim mode only) and that the TLB lookup
// found a matching TTE. If we didn't find a TTE in the TLB then
// perform a hardware table walk to bring one in from the TSB(s).
if ((n2
->inst_tte
!= n2
->fail_tte
) && n2
->inst_tte
->match_virt(va
,ctxt0
,ctxt1
,n2
->partition_id()))
// Lookup va for both contexts. If both match then we have a multi
// hit case too. However, if both contexts are the same then this is ok.
bool tte_multi_hit1
= false;
tte0
= ((N2_Tlb
*)n2
->inst_tlb
)->lookup_va2pa(s
,va
,ctxt0
,n2
->partition_id(),&tte_multi_hit0
);
tte1
= ((N2_Tlb
*)n2
->inst_tlb
)->lookup_va2pa(s
,va
,ctxt1
,n2
->partition_id(),&tte_multi_hit1
);
tte
= tte0
? tte0
: tte1
;
tte_multi_hit
= ((tte0
!= 0) && (tte1
!= 0)) || tte_multi_hit0
|| tte_multi_hit1
;
assert(n2
->sim_state
.cosim());
n2
->inst_mmu_error
= false;
n2
->inst_tag_update(ctxt0
,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR
); // Prio 2.7
// ITLB Multi Hit, Tag and Data Parity Error Detection
if (tte
->has_errors() || tte_multi_hit
)
N2_Core
& n2_core
= n2
->core
;
// Multiple hits are detected only if ittm bit is set in CERER
if (n2_core
.cerer
.ittm() && tte_multi_hit
)
// Tag Parity Errors in ITLB are detected only if the ITTP bit is set in CERER
else if (n2_core
.cerer
.ittp() && tte
->tag_parity_error())
// Data Parity Errors in ITLB are detected only if the ITDP bit is set in CERER
else if (n2_core
.cerer
.itdp() && tte
->data_parity_error())
// The error type is recorded in the ISFSR
n2
->inst_sfsr
.error_type(itlb_error_type
);
n2
->inst_tag_update(ctxt0
,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR
); // Prio 2.7
if ((tte
= n2
->n2_inst_htw(va
,ctxt0
,ctxt1
)) == 0)
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,n2
->trap_htw
);
int io
= (tte
->phys_page
>> (n2
->pa_bits() - 1)) & 1;
// Check the properties of the TTE: privileged violation, no fault,
// and check fetching from restricted I/O spaces.
if (tte
->p() && (n2
->sim_state
.priv() == SS_USER
))
n2
->inst_tag_update(ctxt0
,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::IAE_PRIVILEGE_VIOLATION
); // prio 3.1
n2
->inst_tag_update(ctxt0
,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::IAE_NFO_PAGE
); // prio 3.3
else if (io
&& !pc_iorange(tte
->trans(pc
)))
n2
->inst_tag_update(ctxt0
,va
);
return (n2
->inst_trap
)(pc
,npc
,s
,line
,va
,SS_Trap::INSTRUCTION_ACCESS_ERROR
); // prio 4.0
SS_Memop exe
= n2
->mem_table
[0][io
];
// Set the TTE that is currently used for instruction fetch,
// update the decode cache tag and tte, and add the TTE to the
// linked lists of TTE to make flushing TTEs from the instruction
// caches an simepl task of walking the list of cachelines that
line_tag
->lnk
.insert_after(&n2
->inst_tte_link
[tte
->index
]);
return (exe
)(pc
,npc
,s
,line
,tag
,tte
);
SS_Vaddr
N2_Strand::n2_data_mmu( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
, SS_Vaddr va
, uint_t mem
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_AsiInfo asi_info
= n2
->asi_info
[i
->asi
];
va
&= n2
->mask_pstate_am
;
if (asi_info
.is_primary())
ctxt0
= n2
->primary_context
[0]();
ctxt1
= n2
->primary_context
[1]();
else if (asi_info
.is_secondary())
ctxt0
= n2
->secondary_context
[0]();
ctxt1
= n2
->secondary_context
[1]();
//assert(asi_info.is_nucleus());
if ((n2
->sim_state
.priv() == SS_HPRV
) && asi_info
.is_bypass())
io
= (va
>> (n2
->pa_bits() - 1)) & 1;
if (io
&& (i
->is_atomic()
|| (i
->is_read() && (asi_info
.is_quad_load_asi() || asi_info
.is_block_asi()))))
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DAE_NC_PAGE
); // Prio 12.5
le
= asi_info
.is_little_endian();
tte
= io
? n2
->phys_tte_io
: n2
->phys_tte_mem
;
if (((n2
->sim_state
.priv() < SS_HPRV
) && !n2
->sim_state
.data_mmu()) || asi_info
.is_real()) // RA->PA
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::MEM_REAL_RANGE
); // Prio 11.3
ctxt0
= 0; // set context field to 0 in case of RA->PA
(n2
->data_tlb_read
)(n2
->tlb_sync
);
n2
->data_tlb_read_skip
= true;
tte
= ((N2_Tlb
*)n2
->data_tlb
)->lookup_ra2pa(s
,va
,n2
->partition_id(),&tte_multi_hit
);
n2
->data_mmu_error
= false;
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DATA_ACCESS_MMU_ERROR
); // Prio 12.2
// DTLB Multi Hit, Tag and Data Parity Error Detection
if (tte
->has_errors() || tte_multi_hit
)
N2_Core
& n2_core
= n2
->core
;
// Multiple hits are detected only if DTTM bit is set in CERER
if (n2_core
.cerer
.dttm() && tte_multi_hit
)
// Tag Parity Errors in DTLB are detected only if the DTTP bit is set in CERER
else if (n2_core
.cerer
.dttp() && tte
->tag_parity_error())
// Data Parity Errors in DTLB are detected only if the DTDP bit is set in CERER
else if (n2_core
.cerer
.dtdp() && tte
->data_parity_error())
// Error type and va are stored in DSFSR and DSFAR respectively
n2
->data_sfsr
.error_type(dtlb_error_type
);
n2
->data_sfar
.error_addr(va
);
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DATA_ACCESS_MMU_ERROR
); // Prio 12.2
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DATA_REAL_TRANSLATION_MISS
); // Prio 12.3
if (n2
->va_watchpoint_hit(i
,va
))
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::VA_WATCHPOINT
); // Prio 11.2
else if (!va_inrange(va
))
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::MEM_ADDRESS_RANGE
); // Prio 11.3
(n2
->data_tlb_read
)(n2
->tlb_sync
);
n2
->data_tlb_read_skip
= true;
// Lookup va for both contexts. If both match then we have a multi
// hit case too. However, if both contexts are the same then this is ok.
bool tte_multi_hit1
= false;
tte0
= ((N2_Tlb
*)n2
->data_tlb
)->lookup_va2pa(s
,va
,ctxt0
,n2
->partition_id(),&tte_multi_hit0
);
tte1
= ((N2_Tlb
*)n2
->data_tlb
)->lookup_va2pa(s
,va
,ctxt1
,n2
->partition_id(),&tte_multi_hit1
);
tte
= tte0
? tte0
: tte1
;
bool tte_multi_hit
= ((tte0
!= 0) && (tte1
!= 0)) || tte_multi_hit0
|| tte_multi_hit1
;
n2
->data_mmu_error
= false;
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DATA_ACCESS_MMU_ERROR
); // Prio 12.2
// DTLB Multi Hit, Tag and Data Parity Error Detection
if (tte
->has_errors() || tte_multi_hit
)
N2_Core
& n2_core
= n2
->core
;
// Multiple hits are detected only if DTTM bit is set in CERER
if (n2_core
.cerer
.dttm() && (tte
->multi_hit() || tte_multi_hit
))
// Tag Parity Errors in DTLB are detected only if the DTTP bit is set in CERER
else if (n2_core
.cerer
.dttp() && tte
->tag_parity_error())
// Data Parity Errors in DTLB are detected only if the DTDP bit is set in CERER
else if (n2_core
.cerer
.dtdp() && tte
->data_parity_error())
// Error type and va are stored in DSFSR and DSFAR respectively
n2
->data_sfsr
.error_type(dtlb_error_type
);
n2
->data_sfar
.error_addr(va
);
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DATA_ACCESS_MMU_ERROR
); // Prio 12.2
n2
->data_tlb_read_skip
= false;
// Output trace for cohere that mis tlb; they don't trap.
n2
->trc_hook
->mem_access(i
->is_fetch() ? SS_Tracer::PREFETCH
: SS_Tracer::FLUSH
,va
,0,0,0);
tte
= n2
->n2_data_htw(va
,ctxt0
,ctxt1
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,n2
->trap_htw
);
io
= (tte
->phys_page
>> (n2
->pa_bits() - 1)) & 1;
// Check the TTE properties: privileged violation when we are in user
// mode or pretend to be user (as_is_user asi), atomic operations from
// i/o of non cacheable pages, no fault violations, side effects, or
// write operation to read only pages.
if (tte
->p() && ((n2
->sim_state
.priv() == SS_USER
) || asi_info
.is_as_if_user()))
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DAE_PRIVILEGE_VIOLATION
); // Prio 12.4
else if ((io
&& (i
->is_atomic()
|| (i
->is_read() && (asi_info
.is_quad_load_asi() || asi_info
.is_block_asi()))))
|| (!tte
->cp() && (i
->is_atomic()
|| (i
->is_read() && asi_info
.is_quad_load_asi()))))
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DAE_NC_PAGE
); // Prio 12.5
else if (tte
->nfo() && !asi_info
.is_nofault())
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DAE_NFO_PAGE
); // Prio 12.6
else if (tte
->e() && asi_info
.is_nofault())
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::DAE_SO_PAGE
); // Prio 12.6
else if (!tte
->w() && i
->is_write())
n2
->data_tag_update(ctxt0
,va
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::FAST_DATA_ACCESS_PROTECTION
); // Prio 12.7
le
= (asi_info
.is_little_endian() ^ tte
->ie());
// Check for pa watchpoints
SS_Paddr pa
= tte
->trans(va
);
if (!i
->is_cohere() && n2
->pa_watchpoint_hit(i
,pa
))
return (n2
->data_trap
)(pc
,npc
,s
,i
,va
,SS_Trap::PA_WATCHPOINT
); // Prio 12.8
// All ra2pa and va2pa use TTEs that come from the TLB. We keep a linked list
// per TLB index of locations where a TTE is used such that we can efficiently
// update the decode caches. The pa2pa TTEs are not stored in the linked list.
if (!n2
->data_wp_check
&& !pa2pa
)
i
->lnk
.insert_after(&n2
->data_tte_link
[tte
->index
]);
// Set the lower bits of the TTE pointer to include le (little endian) and io (I/O)
// This used in the memory access part of the memory operation.
tte
= i
->set_tte(le
,io
<< 1,tte
);
// Call the routines that handle memory. We have a separate routines for
// memory and i/o, and big or little endian this to make the best of optimizers.
SS_Memop exe
= n2
->mem_table
[mem
][((long)tte
& 3)];
n2
->data_tlb_read_skip
= false;
// In hyper privileged mode we do not cache the TTE when the ASI specifies
// that we need to translate (not bypass). Note the only ASIs that translate
// in hyper privileged mode are the AS_IF ASIs. This check makes decode cacheing
// perform better as we havce to flush much less.
if (n2
->data_wp_check
|| ((n2
->sim_state
.priv() == SS_HPRV
) && !pa2pa
))
return (exe
)(pc
,npc
,s
,i
,va
,tte
);
SS_Vaddr
N2_Strand::n2_invalid_asi( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
, SS_Vaddr ea
)/*{{{*/
// All lsu instructions for which the privilege mode is wrong at decode time,
// or for which the used asi is not valid end up here. Go through the trap checking
// motion as we need to raise one with the correct priority.
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_AsiInfo ai
= n2
->asi_info
[i
->asi
];
if (!i
->is_cohere() && (ea
& (i
->len
- 1)))
if ((i
->len
== 8) && (ea
& 7) == 0x4)
if ((i
->opc
.get_op3() & 0x2f) == 0x23) // lddf & lddfa
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::LDDF_MEM_ADDRESS_NOT_ALIGNED
); // Prio 10.1
else if (((i
->opc
.get_op3() & 0x2f) == 0x27)) // stdf & stdfa
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::STDF_MEM_ADDRESS_NOT_ALIGNED
); // Prio 10.1
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::MEM_ADDRESS_NOT_ALIGNED
); // Prio 10.2
else if (!i
->is_cohere() && (s
->sim_state
.priv() < s
->asi_info
[i
->asi
].get_protection()))
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::PRIVILEGED_ACTION
); // Prio 11.1
else if (ai
.is_translating())
ea
&= n2
->mask_pstate_am
;
if ((n2
->sim_state
.priv() == SS_HPRV
) && ai
.is_bypass())
else if (((n2
->sim_state
.priv() < SS_HPRV
) && !n2
->sim_state
.data_mmu()) || ai
.is_real())
if (!i
->is_cohere() && !va_inrange(ea
))
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::MEM_REAL_RANGE
); // Prio 11.3
else if (n2
->data_mmu_error
)
n2
->data_mmu_error
= false;
n2
->data_tag_update(0,ea
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::DATA_ACCESS_MMU_ERROR
); // Prio 12.2
if (!i
->is_cohere() && (n2
->va_watchpoint_hit(i
,ea
)))
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::VA_WATCHPOINT
); // Prio 11.2
if (!i
->is_cohere() && !va_inrange(ea
))
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::MEM_ADDRESS_RANGE
); // Prio 11.3
else if (n2
->data_mmu_error
)
ctxt0
= n2
->primary_context
[0]();
ctxt1
= n2
->primary_context
[1]();
else if (ai
.is_secondary())
ctxt0
= n2
->secondary_context
[0]();
ctxt1
= n2
->secondary_context
[1]();
n2
->data_mmu_error
= false;
n2
->data_tag_update(ctxt0
,ea
);
return (n2
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::DATA_ACCESS_MMU_ERROR
); // Prio 12.2
return (s
->data_trap
)(pc
,npc
,s
,i
,ea
,SS_Trap::DAE_INVALID_ASI
); // Prio 12.1
void N2_Strand::inst_tag_update( uint_t context
, SS_Vaddr va
)/*{{{*/
inst_tag_target
.va(va
>> 22);
inst_tag_access
.context(context
);
inst_tag_target
.context(context
);
SS_TsbConfig
* tsb_cfg
= context
? &tsb_config
[4] : tsb_config
;
for (int i
=0; i
< 4; i
++)
inst_tsb_pointer
[i
] = tsb_cfg
[i
].index(va
);
void N2_Strand::data_tag_update( uint_t context
, SS_Vaddr va
)/*{{{*/
data_tag_target
.va(va
>> 22);
data_tag_access
.context(context
);
data_tag_target
.context(context
);
SS_TsbConfig
* tsb_cfg
= context
? &tsb_config
[4] : tsb_config
;
for (int i
=0; i
< 4; i
++)
data_tsb_pointer
[i
] = tsb_cfg
[i
].index(va
);
SS_Vaddr
N2_Strand::n2_trap( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
, SS_Trap::Type tt
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
return SS_Strand::ss_trap(pc
,npc
,s
,i
,tt
);
SS_Vaddr
N2_Strand::n2_inst_trap( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* line
, SS_Vaddr va
, SS_Trap::Type tt
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->inst_tte
= n2
->fail_tte
;
// Jmpl causes MEM_ADDRESS_RANGE Traps. When the mmu is in ra2pa mode
// the trap should be MEM_REAL_RANGE. We tunnel these traps through the
// inst_trap iso data_trap as they are treated differently that way :-)
case SS_Trap::MEM_ADDRESS_RANGE
:
if (n2
->inst_mmu
== n2
->inst_mmu_ra
)
tt
= SS_Trap::MEM_REAL_RANGE
;
case SS_Trap::MEM_ADDRESS_NOT_ALIGNED
:
n2
->data_sfar
= va_signext(va
& n2
->mask_pstate_am
);
// Clear the decode cache line in case of inst mmu trap
case SS_Trap::MEM_ADDRESS_RANGE
:
case SS_Trap::MEM_ADDRESS_NOT_ALIGNED
:
n2
->trc_hook
->reg_value(N2_Registers::REG_DATA_SFAR
,n2
->data_sfar());
case SS_Trap::MEM_REAL_RANGE
:
// this is coming from jmpl, no need to clear up decode cache
for (int i
=0; i
< SS_Instr::LINE_SIZE
; i
++)
line
->line_index(i
)->opc
= 0;
n2
->trc_hook
->inst_trap(va
);
return (n2
->trap
)(pc
,npc
,s
,line
,tt
);
SS_Vaddr
N2_Strand::n2_data_trap( SS_Vaddr pc
, SS_Vaddr npc
, SS_Strand
* s
, SS_Instr
* i
, SS_Vaddr va
, SS_Trap::Type tt
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
if (n2
->data_tlb_read_skip
)
// Before the mmu does a tlb lookup it sets this flag as after the lookup
// hits and something is wrong we get here and should not do a tlb read again.
n2
->data_tlb_read_skip
= false;
else if ((n2
->asi_info
[i
->asi
].is_translating()) &&
((n2
->sim_state
.priv() != SS_HPRV
) ||
!(n2
->asi_info
[i
->asi
].is_bypass())))
// Only pull the dtlb_read message when the trap priority >= 10.0 (x10)
// ToDo only traps with priority >= 10.1 go through here ???
if (SS_Trap::table
[tt
].priority
>= 100)
(n2
->data_tlb_read
)(n2
->tlb_sync
);
if ((tt
== SS_Trap::DAE_INVALID_ASI
) || (tt
== SS_Trap::PRIVILEGED_ACTION
))
// TLBLOOKUP pli messages are send out too early so we have to fix that up
// here when the supposed instruction actually traps and does not use the tlb.
// Ben M. : it looks like the the signals we look at to generate
// the ITLB LOOKUPs are in the ITLB and only happen after the ASI command
// is put on the ASI RING. But it looks like the DTLB signals do not go
// through the actual ASI ring but directory to the DTLB, and they assert
// even if we get the ASI invalid trap. So I think that code needs
// to be in place for DTLBLOOKUPs but not for ITLBLOOKUPs. ---> keep
// ITLBLOOKUP code around for a while longer. 6/12/07
case 0x53: // inst_tlb_probe
case 0x55: // inst_tlb_data_access
case 0x56: // inst_tlb_tag_read
// a corresponding ASI_REAS & TLBLOOKUP were sent already, have to
n2
->asi_map
.ld64(n2
, i
->asi
, va
, &popout
);
(n2
->inst_tlb_lookup
)(n2
->tlb_sync
);
case 0x5d: // data_tlb_data_access
case 0x5e: // data_tlb_tag_read
n2
->asi_map
.ld64(n2
, i
->asi
, va
, &popout
);
(n2
->data_tlb_lookup
)(n2
->tlb_sync
);
// Cohereing instructions don;t cause a trap, so filter them out here
// iso having if i->cohere() tests all over the place. However, we
case SS_Trap::DATA_ACCESS_MMU_ERROR
:
// For now, the UNSUPPORTED_PAGE_SIZE trap is the only trap that can
// happen during wrasi. We use a backdoor, e.g. raise the INVALID_ASI
// trap and detect here that an other trap should be raised. ToDo cleanup proper
if (n2
->trap_dae_inv_asi
!= SS_Trap::RESERVED
)
tt
= n2
->trap_dae_inv_asi
;
n2
->trap_dae_inv_asi
= SS_Trap::RESERVED
;
case SS_Trap::DAE_INVALID_ASI
:
case SS_Trap::DAE_PRIVILEGE_VIOLATION
:
case SS_Trap::MEM_ADDRESS_NOT_ALIGNED
:
case SS_Trap::LDDF_MEM_ADDRESS_NOT_ALIGNED
:
case SS_Trap::STDF_MEM_ADDRESS_NOT_ALIGNED
:
va
&= n2
->mask_pstate_am
;
case SS_Trap::DAE_NC_PAGE
:
case SS_Trap::DAE_NFO_PAGE
:
case SS_Trap::DAE_SO_PAGE
:
case SS_Trap::MEM_ADDRESS_RANGE
:
case SS_Trap::MEM_REAL_RANGE
:
case SS_Trap::FAST_DATA_ACCESS_PROTECTION
:
//case SS_Trap::PRIVILEGED_ACTION: ... stxa 58/30 no DSFAR ? ToDo is this right as the PRM says we should update
case SS_Trap::VA_WATCHPOINT
:
case SS_Trap::PA_WATCHPOINT
:
n2
->data_sfar
= va_signext(va
);
n2
->trc_hook
->data_trap(va
);
return (n2
->trap
)(pc
,npc
,n2
,i
,tt
);
SS_Tte
* N2_Strand::n2_inst_htw( SS_Vaddr va
, SS_Context ctxt0
, SS_Context ctxt1
)/*{{{*/
SS_TsbConfig
* tsb_cfg
= ctxt0
? &tsb_config
[4] : &tsb_config
[0];
bool htw_enabled
= false;
inst_tag_update(ctxt0
,va
);
for (int n
=4; n
--; tsb_cfg
++)
if (!tsb_cfg
->is_valid())
SS_Paddr tsb_addr
= tsb_cfg
->index(va
);
uint64_t tte_tag_data
[2];
SS_MsyncMemory
* msync_mem
= (SS_MsyncMemory
*)memory
;
msync_mem
->msync_info(this->strand_id(),tsb_addr
,SS_Memory::HTW
);
memory
->ld128atomic(tsb_addr
,tte_tag_data
);
tsb_tte_tag
= tte_tag_data
[0];
tsb_tte_data
= tte_tag_data
[1];
trc_hook
->hwop(SS_Tracer::LD_CODE
, tsb_addr
, 16, tte_tag_data
);
SS_Vaddr vpn_mask
= ~((SS_Vaddr(1) << (tsb_tte_data
.size() * 3 + 13)) - SS_Vaddr(1));
&& (tsb_tte_tag
.reserved0() == 0) && (tsb_tte_tag
.reserved1() == 0)
&& tsb_cfg
->match(va
& vpn_mask
,(tsb_tte_tag
.va() << 22) & vpn_mask
)
&& (tsb_cfg
->get_page_size() <= tsb_tte_data
.size()) && (tsb_tte_data
.size() < 8))
ok
= tsb_tte_tag
.context() == 0;
else if (tsb_cfg
->use_context())
if (tsb_cfg
->use_context_0())
tsb_tte_tag
.context(ctxt0
);
tsb_tte_tag
.context(ctxt1
);
ok
= tsb_tte_tag
.context() == ctxt0
;
if (tsb_cfg
->is_ra_not_pa())
SS_Paddr ra_mask
= (SS_Paddr(1) << (tsb_tte_data
.size() * 3)) - SS_Paddr(1);
SS_Paddr ra_low
= tsb_tte_data
.pa() & ~ra_mask
;
SS_Paddr ra_high
= tsb_tte_data
.pa() | ra_mask
;
for (uint_t r
=0; r
< 4; r
++)
SS_TsbRaToPa
* r2p
= &tsb_ra2pa
[r
];
if (r2p
->valid
&& (r2p
->rpn_beg
<= ra_low
) && (ra_high
<= r2p
->rpn_end
))
trap_htw
= SS_Trap::IAE_UNAUTH_ACCESS
; // prio 2.9 (3.2 in Sun Sparc)
tsb_tte_data
.pa(r2p
->ppn_ofs
+ ra_low
);
tsb_tte_data
.pa_zero_ext(0);
(inst_tlb_write
)(tlb_sync
);
SS_Tte
* tte
= tlb
->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va
,tlb_entry
,0);
trap_htw
= SS_Trap::IAE_UNAUTH_ACCESS
; // Prio 2.9
trap_htw
= SS_Trap::INSTRUCTION_INVALID_TSB_ENTRY
; // Prio 2.9 (2.10 in Sun Sparc)
else if (!tsb_tte_data
.x())
trap_htw
= SS_Trap::IAE_UNAUTH_ACCESS
;
tsb_tte_data
.pa_zero_ext(0);
(inst_tlb_write
)(tlb_sync
);
SS_Tte
* tte
= tlb
->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va
,tlb_entry
,0);
// Check if one of the 8 tsb config is enabled. If so then hardware
// table walk is considered enabled.
tsb_cfg
= ctxt0
? &tsb_config
[0] : &tsb_config
[4];
for (int n
=4; n
--; tsb_cfg
++)
trap_htw
= SS_Trap::INSTRUCTION_ACCESS_MMU_MISS
;
trap_htw
= SS_Trap::FAST_INSTRUCTION_ACCESS_MMU_MISS
;
SS_Tte
* N2_Strand::n2_data_htw( SS_Vaddr va
, SS_Context ctxt0
, SS_Context ctxt1
)/*{{{*/
SS_TsbConfig
* tsb_cfg
= ctxt0
? &tsb_config
[4] : &tsb_config
[0];
bool htw_enabled
= false;
data_tag_update(ctxt0
,va
);
for (int n
=4; n
--; tsb_cfg
++)
if (!tsb_cfg
->is_valid())
SS_Paddr tsb_addr
= tsb_cfg
->index(va
);
uint64_t tte_tag_data
[2];
SS_MsyncMemory
* msync_mem
= (SS_MsyncMemory
*)memory
;
msync_mem
->msync_info(this->strand_id(),tsb_addr
,SS_Memory::HTW
);
memory
->ld128atomic(tsb_addr
,tte_tag_data
);
tsb_tte_tag
= tte_tag_data
[0];
tsb_tte_data
= tte_tag_data
[1];
trc_hook
->hwop(SS_Tracer::LD_DATA
, tsb_addr
, 16, tte_tag_data
);
SS_Vaddr vpn_mask
= ~((SS_Vaddr(1) << (tsb_tte_data
.size() * 3 + 13)) - SS_Vaddr(1));
&& (tsb_tte_tag
.reserved0() == 0) && (tsb_tte_tag
.reserved1() == 0)
&& tsb_cfg
->match(va
& vpn_mask
,(tsb_tte_tag
.va() << 22) & vpn_mask
)
&& (tsb_cfg
->get_page_size() <= tsb_tte_data
.size()) && (tsb_tte_data
.size() < 8))
ok
= tsb_tte_tag
.context() == 0;
else if (tsb_cfg
->use_context())
if (tsb_cfg
->use_context_0())
tsb_tte_tag
.context(ctxt0
);
tsb_tte_tag
.context(ctxt1
);
ok
= tsb_tte_tag
.context() == ctxt0
;
if (tsb_cfg
->is_ra_not_pa())
SS_Paddr ra_mask
= (SS_Paddr(1) << (tsb_tte_data
.size() * 3)) - SS_Paddr(1);
SS_Paddr ra_low
= tsb_tte_data
.pa() & ~ra_mask
;
SS_Paddr ra_high
= tsb_tte_data
.pa() | ra_mask
;
for (uint_t r
=0; r
< 4; r
++)
SS_TsbRaToPa
* r2p
= &tsb_ra2pa
[r
];
if (r2p
->valid
&& (r2p
->rpn_beg
<= ra_low
) && (ra_high
<= r2p
->rpn_end
))
tsb_tte_data
.pa(r2p
->ppn_ofs
+ ra_low
);
tsb_tte_data
.pa_zero_ext(0);
(data_tlb_write
)(tlb_sync
);
SS_Tte
* tte
= tlb
->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va
,tlb_entry
,0);
trap_htw
= SS_Trap::DATA_INVALID_TSB_ENTRY
;
tsb_tte_data
.pa_zero_ext(0);
(data_tlb_write
)(tlb_sync
);
SS_Tte
* tte
= tlb
->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va
,tlb_entry
,0);
tsb_cfg
= ctxt0
? &tsb_config
[0] : &tsb_config
[4];
for (int n
=4; n
--; tsb_cfg
++)
trap_htw
= SS_Trap::DATA_ACCESS_MMU_MISS
;
trap_htw
= SS_Trap::FAST_DATA_ACCESS_MMU_MISS
;
SS_Trap::Type
N2_Strand::n2_inst_hwtw( SS_Strand
* strand
, SS_Vaddr va
, int_t entry
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)strand
;
n2
->trap_htw
= SS_Trap::RESERVED
;
tte
= n2
->n2_inst_htw(va_signext(va
),n2
->primary_context
[0](),n2
->primary_context
[1]());
tte
= n2
->n2_inst_htw(va_signext(va
),0,0);
SS_Trap::Type
N2_Strand::n2_data_hwtw( SS_Strand
* strand
, SS_Vaddr va
, uint8_t asi
, int_t entry
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)strand
;
SS_AsiInfo asi_info
= n2
->asi_info
[asi
];
n2
->trap_htw
= SS_Trap::RESERVED
;
if (asi_info
.is_primary())
tte
= n2
->n2_data_htw(va_signext(va
),n2
->primary_context
[0](),n2
->primary_context
[1]());
else if (asi_info
.is_secondary())
tte
= n2
->n2_data_htw(va_signext(va
),n2
->secondary_context
[0](),n2
->secondary_context
[1]());
tte
= n2
->n2_data_htw(va_signext(va
),0,0);
SS_AsiSpace::Error
N2_Strand::n2_lsu_ctr_st64( SS_Node
*, void* _reg
, SS_Strand
* s
, SS_Vaddr
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
N2_LsuCtr
* lc
= (N2_LsuCtr
*)_reg
;
// Grab the inst and data mmu enable bits and store in the common
// enable flag for use in sim_update.
s
->sim_state
.inst_mmu(s
->lsu_ctr
.im());
s
->sim_state
.data_mmu(s
->lsu_ctr
.dm());
// Watchpoint address is always 8 bytes aligned so an offset value
// of 1 causes mismatch (= disabled), use that, e.g bit0=0 is enable,
n2
->data_wp_va_addr
|= 1;
n2
->data_wp_pa_addr
|= 1;
n2
->data_wp_check
= false;
n2
->data_wp_bytes
= lc
->bm();
n2
->data_wp_flags
= (lc
->re() ? SS_Instr::READ
: 0) | (lc
->we() ? SS_Instr::WRITE
: 0);
if ((lc
->bm() == 0) || (lc
->mode() < 2))
n2
->data_wp_va_addr
|= SS_Vaddr(1);
n2
->data_wp_pa_addr
|= SS_Paddr(1);
else if (lc
->mode() == 2)
n2
->data_wp_va_addr
|= SS_Vaddr(1);
n2
->data_wp_pa_addr
&= ~SS_Paddr(1);
n2
->data_wp_check
= true;
n2
->data_wp_va_addr
&= ~SS_Vaddr(1);
n2
->data_wp_pa_addr
|= SS_Paddr(1);
n2
->data_wp_check
= true;
// Propagete the sim_state changes.
SS_AsiSpace::Error
N2_Strand::data_wp_st64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->data_wp
.set(va_signext(data
));
// Set new the watchpoint addresses and add enable bit0=0 or disable bit0=1
n2
->data_wp_va_addr
= (n2
->data_wp
.va() << 3) | (n2
->data_wp_va_addr
& SS_Vaddr(1));
n2
->data_wp_pa_addr
= (n2
->data_wp
.pa() << 3) | (n2
->data_wp_pa_addr
& SS_Paddr(1));
SS_AsiSpace::Error
N2_Strand::tsb_config_st64( SS_Node
*, void* _reg
, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
N2_TsbConfig
* tc
= (N2_TsbConfig
*)_reg
;
uint64_t old_data
= (*tc
)();
n2
->trap_dae_inv_asi
= SS_Trap::UNSUPPORTED_PAGE_SIZE
;
return SS_AsiSpace::NO_ASI
;
n2
->tsb_config
[(va
- 0x10) >> 3].update(tc
->valid(),
tc
->page_size() * 3 + 13,
tsb_cfg
= n2
->inst_tag_access
.context() ? &n2
->tsb_config
[4] : n2
->tsb_config
;
for (int i
=0; i
< 4; i
++)
n2
->inst_tsb_pointer
[i
] = tsb_cfg
[i
].index(n2
->inst_tag_access
.va() << 13);
tsb_cfg
= n2
->data_tag_access
.context() ? &n2
->tsb_config
[4] : n2
->tsb_config
;
for (int i
=0; i
< 4; i
++)
n2
->data_tsb_pointer
[i
] = tsb_cfg
[i
].index(n2
->data_tag_access
.va() << 13);
SS_AsiSpace::Error
N2_Strand::tsb_ra2pa_st64( SS_Node
*, void* _reg
, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_AsiCtrReg
* cr
= (SS_AsiCtrReg
*)_reg
;
uint_t r
= ((va
- 8) & 0x18) >> 3;
n2
->tsb_ra2pa
[r
].update(n2
->real_range
[r
].enable(),
n2
->real_range
[r
].rpn_low(),
n2
->real_range
[r
].rpn_high(),
n2
->physical_offset
[r
].ptv());
SS_AsiSpace::Error
N2_Strand::tag_access_st64( SS_Node
* , void* _reg
, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
if (_reg
== &n2
->inst_tag_access
)
n2
->inst_tag_access
= data
;
n2
->inst_tag_update(n2
->inst_tag_access
.context(),n2
->inst_tag_access
.va() << 13);
n2
->data_tag_access
= data
;
n2
->data_tag_update(n2
->data_tag_access
.context(),n2
->data_tag_access
.va() << 13);
SS_AsiSpace::Error
N2_Strand::tlb_data_in_st64( SS_Node
* d_or_i
, void* r_or_v
, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
N2_TagAccess
* tag_access
= d_or_i
? &n2
->data_tag_access
: &n2
->inst_tag_access
;
n2
->tsb_tte_tag
.context(tag_access
->context());
n2
->tsb_tte_tag
.va(tag_access
->va() >> (22 - 13));
n2
->tsb_tte_tag
.reserved0(0);
n2
->tsb_tte_tag
.reserved1(0);
n2
->tsb_tte_data
.pa_zero_ext(0);
switch (n2
->tsb_tte_data
.size())
n2
->trap_dae_inv_asi
= SS_Trap::UNSUPPORTED_PAGE_SIZE
;
return SS_AsiSpace::NO_ASI
;
idx
= (n2
->data_tlb_write
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->data_tlb
;
idx
= (n2
->inst_tlb_write
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->inst_tlb
;
// N2 has X (or EP bit) reserved as it has a dedicated ITLB. For completeness
// and uniformity accress platforms we force the x bit to one.
tlb
->insert_tsb_tte(n2
,n2
->partition_id(),n2
->tsb_tte_tag(),n2
->tsb_tte_data(),tag_access
->va() << 13,idx
,r_or_v
!= 0);
SS_AsiSpace::Error
N2_Strand::tlb_data_access_ld64( SS_Node
* d_or_i
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
(n2
->data_tlb_lookup
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->data_tlb
;
(n2
->inst_tlb_lookup
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->inst_tlb
;
if (index
.index() < tlb
->size())
SS_Tte
* tte
= tlb
->get(index
.index() & (tlb
->size() - 1));
// A diagnostic ASI access to ASI_ITLB_DATA_ACCESS_REG is supposed to calculate
// and set the data parity (irrespective of mode)
uint64_t dp
= tte
->nfo();
int data_parity
= BL_BitUtility::calc_parity(dp
);
// The parity obtained in the above step is calculated parity. It does not
// reflect if an error was injected or not. To set this correct, the
// calculated parity needs to be xor'ed with the injection mask
data_parity
^= tte
->data_parity_error();
n2
->tsb_tte_data
.size(tte
->page_size());
n2
->tsb_tte_data
.w(tte
->w());
n2
->tsb_tte_data
.p(tte
->p());
n2
->tsb_tte_data
.cp(tte
->cp());
n2
->tsb_tte_data
.e(tte
->e());
n2
->tsb_tte_data
.ie(tte
->ie());
n2
->tsb_tte_data
.pa(tte
->taddr() >> 13);
// Per N2 PRM v1.2 (Table 12-2), bits 61:56 of TTE
// is defined as the 'soft' field.
// (In VONK for some reason, this field has been named as sw1)
// Since, the data parity is stored in the most significant bit
// (bit 61) of the soft field, we need to left shift the data
n2
->tsb_tte_data
.sw1(data_parity
<< (N2_TsbTteData::WIDTH_SW1
- 1));
n2
->tsb_tte_data
.nfo(tte
->nfo());
n2
->tsb_tte_data
.v(tte
->valid_bit());
*data
= n2
->tsb_tte_data();
SS_AsiSpace::Error
N2_Strand::tlb_data_access_st64( SS_Node
* d_or_i
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
N2_TagAccess
* tag_access
= d_or_i
? &n2
->data_tag_access
: &n2
->inst_tag_access
;
n2
->tsb_tte_tag
.context(tag_access
->context());
n2
->tsb_tte_tag
.va(tag_access
->va() >> (22 - 13));
n2
->tsb_tte_tag
.reserved0(0);
n2
->tsb_tte_tag
.reserved1(0);
n2
->tsb_tte_data
.pa_zero_ext(0);
switch (n2
->tsb_tte_data
.size())
n2
->trap_dae_inv_asi
= SS_Trap::UNSUPPORTED_PAGE_SIZE
;
return SS_AsiSpace::NO_ASI
;
(n2
->data_tlb_write
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->data_tlb
;
(n2
->inst_tlb_write
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->inst_tlb
;
// N2 has X (or EP bit) reserved as it has a dedicated ITLB. For completeness
// and uniformity accress platforms we force the x bit to one.
tlb
->insert_tsb_tte(n2
,n2
->partition_id(),n2
->tsb_tte_tag(),n2
->tsb_tte_data(),
tag_access
->va() << 13,index
.index(),index
.flag() != 0);
SS_AsiSpace::Error
N2_Strand::tlb_tag_read_ld64( SS_Node
* d_or_i
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
(n2
->data_tlb_lookup
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->data_tlb
;
(n2
->inst_tlb_lookup
)(n2
->tlb_sync
);
tlb
= (N2_Tlb
*)n2
->inst_tlb
;
SS_Tte
* tte
= tlb
->get(index
.index() & (tlb
->size() - 1));
// A diagnostic ASI access to ASI_ITLB_TAG_READ_REG is supposed to calculate
// and set the tag's parity (irrespective of mode)
uint64_t tp
= tte
->pid();
int tag_parity
= BL_BitUtility::calc_parity(tp
);
// The parity obtained in the above step is calculated parity. It does not
// reflect if an error was injected or not. To set this correct, the
// calculated parity needs to be xor'ed with the injection mask
tag_parity
^= tte
->tag_parity_error();
tag_read
.pid(tte
->pid());
tag_read
.parity(tag_parity
);
tag_read
.real(tte
->real_bit());
tag_read
.va_ra(tte
->tag() >> 13);
tag_read
.context(tte
->context());
SS_AsiSpace::Error
N2_Strand::inst_tlb_demap_st64( SS_Node
* _cpu
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
context
= n2
->primary_context
[0]();
n2
->inst_tag_update(context
,va
);
n2
->inst_tag_update(0,va
);
n2
->inst_tag_update(0,va
);
(n2
->inst_tlb_write
)(n2
->tlb_sync
);
N2_Tlb
* tlb
= (N2_Tlb
*)n2
->inst_tlb
;
tlb
->demap_real(n2
,n2
->partition_id(),demap
.va() << 13);
tlb
->demap_virt(n2
,n2
->partition_id(),context
,demap
.va() << 13);
tlb
->demap_virt(n2
,n2
->partition_id(),context
);
tlb
->demap_all(n2
,n2
->partition_id());
tlb
->demap_real(n2
,n2
->partition_id());
tlb
->demap_virt(n2
,n2
->partition_id());
SS_AsiSpace::Error
N2_Strand::data_tlb_demap_st64( SS_Node
* _cpu
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
context
= n2
->primary_context
[0]();
n2
->data_tag_update(context
,va
);
context
= n2
->secondary_context
[0]();
n2
->data_tag_update(0,va
);
n2
->data_tag_update(0,va
);
n2
->data_tag_update(0,va
);
(n2
->data_tlb_write
)(n2
->tlb_sync
);
N2_Tlb
* tlb
= (N2_Tlb
*)n2
->data_tlb
;
tlb
->demap_real(n2
,n2
->partition_id(),demap
.va() << 13);
tlb
->demap_virt(n2
,n2
->partition_id(),context
,demap
.va() << 13);
tlb
->demap_virt(n2
,n2
->partition_id(),context
);
tlb
->demap_all(n2
,n2
->partition_id());
tlb
->demap_real(n2
,n2
->partition_id());
tlb
->demap_virt(n2
,n2
->partition_id());
SS_AsiSpace::Error
N2_Strand::partition_id_st64( SS_Node
* _cpu
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->partition_id
.set(data
);
n2
->inst_ctx_ra
.set_pid(n2
->partition_id());
n2
->inst_ctx_va
.set_pid(n2
->partition_id());
n2
->data_ctx
.set_pid(n2
->partition_id());
SS_AsiSpace::Error
N2_Strand::pri_ctx_st64( SS_Node
* _cpu
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->primary_context
[0].set(data
);
n2
->inst_ctx_va
.set_pri_ctx0(n2
->primary_context
[0]());
n2
->data_ctx
.set_pri_ctx0(n2
->primary_context
[0]());
n2
->primary_context
[1].set(data
);
n2
->inst_ctx_va
.set_pri_ctx1(n2
->primary_context
[1]());
n2
->data_ctx
.set_pri_ctx1(n2
->primary_context
[1]());
SS_AsiSpace::Error
N2_Strand::sec_ctx_st64( SS_Node
* _cpu
, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->secondary_context
[0].set(data
);
n2
->data_ctx
.set_sec_ctx0(n2
->secondary_context
[0]());
n2
->secondary_context
[1].set(data
);
n2
->data_ctx
.set_sec_ctx1(n2
->secondary_context
[1]());
SS_AsiSpace::Error
N2_Strand::inst_tlb_probe_ld64( SS_Node
* _cpu
, void*, SS_Strand
* s
, SS_Vaddr addr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
N2_ItlbProbeAddr itlb_addr
;
N2_ItlbProbeData itlb_data
;
SS_Vaddr va
= va_signext((itlb_addr
.va() << 13) & n2
->mask_pstate_am
);
(n2
->inst_tlb_lookup
)(n2
->tlb_sync
);
N2_Tlb
* tlb
= (N2_Tlb
*)n2
->inst_tlb
;
tte
= tlb
->lookup_ra2pa(s
,va
,n2
->partition_id(),&tte_multi_hit
);
ctxt0
= n2
->primary_context
[0]();
ctxt1
= n2
->primary_context
[1]();
tte
= tlb
->lookup_va2pa(s
,va
,ctxt0
,n2
->partition_id(),&tte_multi_hit
);
tte
= tlb
->lookup_va2pa(s
,va
,ctxt1
,n2
->partition_id(),&tte_multi_hit
);
// ToDo. Do we deal with multi hit detection here?
itlb_data
.pa(tte
->trans(va
) >> 13);
SS_AsiSpace::Error
N2_Strand::intr_queue_st64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->cpu_mondo_head
= data
;
if (n2
->sim_state
.priv() == SS_Strand::SS_PRIV
)
return SS_AsiSpace::NO_WRITE
;
n2
->cpu_mondo_tail
= data
;
n2
->dev_mondo_head
= data
;
if (n2
->sim_state
.priv() == SS_Strand::SS_PRIV
)
return SS_AsiSpace::NO_WRITE
;
n2
->dev_mondo_tail
= data
;
n2
->resumable_head
= data
;
if (n2
->sim_state
.priv() == SS_Strand::SS_PRIV
)
return SS_AsiSpace::NO_WRITE
;
n2
->resumable_tail
= data
;
n2
->non_resumable_head
= data
;
if (n2
->sim_state
.priv() == SS_Strand::SS_PRIV
)
return SS_AsiSpace::NO_WRITE
;
n2
->non_resumable_tail
= data
;
assert(0); // asi mapping error
if (n2
->cpu_mondo_head
.offset() != n2
->cpu_mondo_tail
.offset())
n2
->irq
.raise(n2
,SS_Interrupt::BIT_CPU_MONDO_TRAP
);
n2
->irq
.retract(SS_Interrupt::BIT_CPU_MONDO_TRAP
);
if (n2
->dev_mondo_head
.offset() != n2
->dev_mondo_tail
.offset())
n2
->irq
.raise(n2
,SS_Interrupt::BIT_DEV_MONDO_TRAP
);
n2
->irq
.retract(SS_Interrupt::BIT_DEV_MONDO_TRAP
);
if (n2
->resumable_head
.offset() != n2
->resumable_tail
.offset())
n2
->irq
.raise(n2
,SS_Interrupt::BIT_RESUMABLE_ERROR
);
n2
->irq
.retract(SS_Interrupt::BIT_RESUMABLE_ERROR
);
SS_AsiSpace::Error
N2_Strand::intr_queue_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
case 0x3c0: *data
= n2
->cpu_mondo_head(); break;
case 0x3c8: *data
= n2
->cpu_mondo_tail(); break;
case 0x3d0: *data
= n2
->dev_mondo_head(); break;
case 0x3d8: *data
= n2
->dev_mondo_tail(); break;
case 0x3e0: *data
= n2
->resumable_head(); break;
case 0x3e8: *data
= n2
->resumable_tail(); break;
case 0x3f0: *data
= n2
->non_resumable_head(); break;
case 0x3f8: *data
= n2
->non_resumable_tail(); break;
default: assert(0); // asi mapping error
SS_AsiSpace::Error
N2_Strand::intr_recv_st64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->intr_recv
= data
& n2
->intr_recv
;
SS_AsiSpace::Error
N2_Strand::intr_recv_wr64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_AsiSpace::Error
N2_Strand::intr_recv_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
SS_AsiSpace::Error
N2_Strand::intr_r_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->intr_recv
&= ~(uint64_t(1) << n2
->intr_r
.vector());
SS_AsiSpace::Error
N2_Strand::desr_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->desr
.set_unmasked(0);
SS_AsiSpace::Error
N2_Strand::stb_access_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
N2_StbAccessAddrFields addr
;
if (n2
->sim_state
.ras_enabled() )
// calculate index to look into the corresponding entry
*data
= n2
->stb
.get_stb_value(addr());
N2_StbAccessDaReg stb_access_da_reg
;
N2_StbAccessEccReg stb_access_ecc_reg
;
N2_StbAccessCtlReg stb_access_ctl_reg
;
N2_StbAccessCamReg stb_access_cam_reg
;
case N2_StbAccessAddrFields::DATA_FIELD
:
stb_access_da_reg
.set(*data
);
*data
= stb_access_da_reg();
case N2_StbAccessAddrFields::ECC_FIELD
:
stb_access_ecc_reg
.set(*data
);
*data
= stb_access_ecc_reg();
case N2_StbAccessAddrFields::CNTRL_PARITY_FIELD
:
stb_access_ctl_reg
.set(*data
);
*data
= stb_access_ctl_reg();
case N2_StbAccessAddrFields::CAM_FIELD
:
stb_access_cam_reg
.set(*data
);
*data
= stb_access_cam_reg();
case N2_StbAccessAddrFields::STB_POINTER_FIELD
:
// the value of "current store buffer pointer" is provided
// by test bench through follow-me scheme.
if (n2
->sim_state
.ras_enabled()) {
*data
= n2
->stb
.get_stb_pointer();
// This ASI, like other diagnostic array access ASIs is not checked for a
// legal VA. So, using a "reserved" value won't generate an exception, but
// will generate unpredictable data from a software perspective.
// I can tell you that for this specific case, all cases where va[8]==1
// will act the same as va[8:6]==100, which is to return the current store
//---> per Mark, we should treat the remaining cases as 0x4. testbench is
// expected to provide a follow-me value.
SS_AsiSpace::Error
N2_Strand::irf_ecc_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
*data
= 0; // For now return 0
SS_AsiSpace::Error
N2_Strand::frf_ecc_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
*data
= 0; // For now return 0
SS_AsiSpace::Error
N2_Strand::tsa_access_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
*data
= 0; // For now return 0
SS_AsiSpace::Error
N2_Strand::mra_access_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
*data
= 0; // For now return 0
SS_AsiSpace::Error
N2_Strand::tick_access_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
// 1 - Return Data , 0 - Return ECC
*data
= n2
->tick_cmpr(); break;
*data
= n2
->stick_cmpr(); break;
*data
= n2
->hstick_cmpr(); break;
if (s
->sim_state
.ras_enabled())
ecc_obj
= n2
->tick_cmpr_array_ecc
[ta
.index()];
SS_AsiSpace::Error
N2_Strand::tw_control_st64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->tw_control
.set(data
);
n2
->core
.tw_status
.lock();
if (n2
->tw_control
.stp())
n2
->core
.tw_status
.stp(n2
->core
.tw_status
.stp() | (1 << (n2
->strand_id() & 7)));
n2
->core
.tw_status
.stp(n2
->core
.tw_status
.stp() &~ (1 << (n2
->strand_id() & 7)));
n2
->core
.tw_status
.unlock();
SS_AsiSpace::Error
N2_Strand::n2_scratchpad_ld64( SS_Node
*, void* _reg
, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
)/*{{{*/
uint64_t* reg
= (uint64_t*)_reg
;
N2_Strand
* n2
= (N2_Strand
*)s
;
// If RAS enabled and appropriate error detection flags are set
// determine if there are any errors (injected or otherwise)
if (n2
->sim_state
.ras_enabled() )
N2_Core
& n2_core
= n2
->core
;
BL_EccBits ecc_obj
= n2
->sp_ecc
[(va
>> 3) & 7];
// Check if the ecc associated with this register is a valid ecc
// Syndrome is the difference between the stored and calculated ECC values
BL_Hamming_64_8_Synd syndrome
= BL_Hamming_64_8_Synd(reg
[(va
>> 3) & 7],ecc_obj
);
bool updateDsfar
= false;
// Errors are recorded only if the PSCCE bit is set in the SETER
// Correctable errors are detected only SCAC bit in CERER is set
if (n2_core
.cerer
.scac())
if (syndrome
.isSingleBitError())
n2
->data_sfsr
.error_type(N2_DataSfsr::SCAC
);
// Uncorrectable errors are detected only if SCAU bit in CERER is set
else if (n2_core
.cerer
.scau())
if (syndrome
.isDoubleBitError() || syndrome
.isMultipleBitError())
n2
->data_sfsr
.error_type(N2_DataSfsr::SCAU
);
/*unsigned long long native_add = 0;
unsigned long long intermediate_err_add = (native_add & ~0x7f8ULL) |
syndrome.getSyndrome() << 3;
unsigned long long error_add = (intermediate_err_add & ~0x7ULL) |
n2->data_sfar.error_addr(error_add);*/
unsigned long long error_add
= 0;
// Capture the syndrome and array index in DSFAR
// Store the syndrome in bits 3 thru 10 of DSFAR
error_add
= BL_BitUtility::set_subfield(error_add
,syndrome
.getSyndrome(),3,10);
// Store the scratchpad array index in bits 0 thru 2
error_add
= BL_BitUtility::set_subfield(error_add
,((va
>> 3) & 7),0,2);
n2
->data_sfar
.error_addr(error_add
);
return SS_AsiSpace::TRAP_IPE
;
*data
= reg
[(va
>> 3) & 7];
SS_AsiSpace::Error
N2_Strand::n2_scratchpad_st64( SS_Node
*, void* _reg
, SS_Strand
* s
, SS_Vaddr va
, uint64_t data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
// If RAS enabled and appropriate error injection flags are set
// Then inject error in check bits
if (n2
->sim_state
.ras_enabled() )
N2_Core
& n2_core
= n2
->core
;
BL_EccBits ecc_obj
= BL_Hamming_64_8_Synd::calc_check_bits(data
);
unsigned ecc
= ecc_obj
.get();
// Check if ENB and SCAU bits are set in N2 Error Injection Register
if ((n2_core
.error_inject
.ene() == 1) && (n2_core
.error_inject
.scau() == 1))
ecc
^= n2_core
.error_inject
.eccmask();
// Set back the corrputed ecc
n2
->sp_ecc
[(va
>> 3) & 7] = ecc_obj
;
uint64_t* reg
= (uint64_t*)_reg
;
reg
[(va
>> 3) & 7] = data
;
SS_AsiSpace::Error
N2_Strand::scratchpad_access_ld64( SS_Node
*, void*, SS_Strand
* s
, SS_Vaddr va
, uint64_t* data
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
// 1 - Return Data , 0 - Return ECC
*data
= n2
->scratchpad
[spa
.index()];
if(s
->sim_state
.ras_enabled())
ecc_obj
= n2
->sp_ecc
[spa
.index()];
/*static*/ void N2_Strand::n2_run_perf( SS_Strand
* s
, Sam::Vcpu::perfcntr which
, int64_t incr
)/*{{{*/
// run_perf() updates a pic value (pic0, or pic1) and sets the
// overflow bit accordingly, if trap on overflow are enabled (pcr.toe)
// and if overflow did occur it raises the appropriate trap.
N2_Strand
* n2
= (N2_Strand
*)s
;
if ((n2
->pcr
.st() && n2
->pstate
.priv())
|| (n2
->pcr
.ut() && ! n2
->pstate
.priv())
|| (n2
->pcr
.ht() && n2
->hpstate
.hpriv()))
uint64_t tmp
= n2
->pic
.l() + incr
;
if (n2
->pcr
.toe() & 0x1) /* bit-4 for pic0 ??? */
n2
->irq
.raise(s
,SS_Interrupt::BIT_INTERRUPT_LEVEL_15
);
uint64_t tmp
= n2
->pic
.h() + incr
;
if (n2
->pcr
.toe() & 0x2) /* bit-5 for pic1 ??? */
n2
->irq
.raise(s
,SS_Interrupt::BIT_INTERRUPT_LEVEL_15
);
void N2_Strand::n2_internal_interrupt( SS_Strand
* s
, uint_t vector
, bool raise
)/*{{{*/
// Called by N2_Cpu when asi 0x73 (intr_w) is assigned,
// e.g. whenever one strand cross calls another strand
N2_Strand
* n2
= (N2_Strand
*)s
;
assert(!n2
->sim_state
.cosim());
n2
->intr_recv
|= (uint64_t(1) << vector
);
n2
->intr_recv
&= ~(uint64_t(1) << vector
);
void N2_Strand::intr_update()/*{{{*/
for (int l
=63; l
>= 0; l
--)
if (intr_recv
& (uint64_t(1) << l
))
irq
.raise(this,SS_Interrupt::BIT_INTERRUPT_VECTOR
);
irq
.retract(SS_Interrupt::BIT_INTERRUPT_VECTOR
);
Sam::Vcpu::TranslateError
N2_Strand::n2_cnv2pa( SS_Strand
* s
, Sam::Vcpu::TranslateMode mode
, SS_Vaddr addr
, uint64_t ctx
, uint64_t pid
, SS_Paddr
* pa
)/*{{{*/
N2_Strand
* n2
= (N2_Strand
*)s
;
case Sam::Vcpu::TRANSLATE_VA_TO_PA
:
ctx
= n2
->primary_context
[0]();
case Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX
:
pid
= n2
->partition_id();
case Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX_PID
:
tte
= ((N2_Tlb
*)n2
->inst_tlb
)->lookup_va2pa(s
,addr
,ctx
,pid
,&tte_multi_hit
);
tte
= ((N2_Tlb
*)n2
->data_tlb
)->lookup_va2pa(s
,addr
,ctx
,pid
,&tte_multi_hit
);
case Sam::Vcpu::TRANSLATE_RA_TO_PA
:
pid
= n2
->partition_id();
case Sam::Vcpu::TRANSLATE_RA_TO_PA_PID
:
tte
= ((N2_Tlb
*)n2
->inst_tlb
)->lookup_ra2pa(s
,addr
,pid
,&tte_multi_hit
);
tte
= ((N2_Tlb
*)n2
->data_tlb
)->lookup_ra2pa(s
,addr
,pid
,&tte_multi_hit
);
case Sam::Vcpu::TRANSLATE_PA_TO_PA
:
if ((addr
>> (n2
->pa_bits() - 1)) & 1)
return Sam::Vcpu::TRANSLATE_OK
;
return Sam::Vcpu::TRANSLATE_NO_TTE_FOUND
;
void N2_Strand::warm_reset(bool intp
)/*{{{*/
// because WMR/DBR is not exactly the same as POR, cannot call
// ss_trap(POWER_ON_RESET) directly.
//ss_trap(0,0,this,0,SS_Trap::POWER_ON_RESET);
//TODO if there are other registers that should be updated by wmr/dbr,
// but are not part of trap handling, then do it here.
// the warm_reset() is invoked by, at least, two places, one is reset_gen,
// where a RESET_GEN_WMR should be triggered, another is cosim pli-command
// "INTP 00 00", which signals a system-wide warm_reset, a RESET_GEN_WMR
// trap should bot be triggered, as there will be an "INTP xx 01" followed,
// one for each strand, where RESET_GEN_WMR will be invoked.
irq_launch(SS_Trap::RESET_GEN_WMR
);
void N2_Strand::n2_external_interrupt( SS_Strand
* s
, uint64_t *payload
, bool raise
)/*{{{*/
// Called by external source such as NIU to signal
// a device interrupt to the strand. The N2 model is
// always raise the trap. It never sends retracts;
// we take care of that ourselves.
N2_Strand
* n2
= (N2_Strand
*)s
;
assert(!n2
->sim_state
.cosim());
// Can not use s->msg.make_signal here because the strand is the
// receiver of the signal, not the creator.
SS_Signal
*sgn
= SS_Signal::alloc(SS_Signal::EXTERNAL_INTERRUPT
);
sgn
->irq_type
= payload
[7];
void N2_Strand::n2_ras_enable( SS_Strand
* s
, char* )/*{{{*/
// Ras enable set from frontend
N2_Strand
* n2
= (N2_Strand
*)s
;
n2
->exe_table
= n2
->trc_exe_table
;
n2
->mem_table
= n2
->mem_ras_table
;
n2
->sim_state
.ras_enabled(1);
assert(((N2_Model
*)n2
->model
)->ck_memory
);
s
->memory
= ((N2_Model
*)n2
->model
)->ck_memory
;
if (n2
->mem_err_detector
.memory
== NULL
)
n2
->mem_err_detector
.memory
= s
->memory
;
s
->flush_tte_all(); // bounce the decode caches
// fill_store_buffer_mem() adds the write transactions contained in a
// MemoryTransaction to the store buffer. It handles both large and small
// transactions by breaking large transactions in to doubleword chunks
// and mapping 4, 2, and 1 byte transactions to the correct "byte mark".
SS_Trap::Type
N2_Strand::fill_store_buffer_mem(const MemoryTransaction
&memXact
)/*{{{*/
if (!memXact
.writeXact())
fprintf(stderr
,"N2_Strand::fillStoreBufferMem"
"bad xact access: %d", memXact
.access());
for (int ndx
= 0; ndx
< memXact
.size()/sizeof(double); ++ndx
)
return stb
.fill_store_buffer(false, memXact
.paddr() + sizeof(double) * ndx
,
0xff, memXact
.getData(ndx
));
fprintf(stderr
,"N2_Strand::fill_store_buffer_mem"
"bad xact size: %d", memXact
.size());
return stb
.fill_store_buffer(false, memXact
.paddr(), byteMarks
, memXact
.getData());
SS_Trap::Type
N2_Strand::fill_store_buffer_asi(uint64_t addr
,
if (asi_num
!= N2_Asi::ASI_ERROR_INJECT
)
return stb
.fill_store_buffer(true, addr
, asi_num
, data
);
SS_Trap::Type
N2_Strand::check_store_buffer_RAWtrap(const MemoryTransaction
&memXact
)/*{{{*/
fprintf(stderr
,"N2_Strand::check_store_buffer_RAWtrap"
"bad access type: %d", memXact
.access());
return stb
.check_store_buffer_RAWtrap(memXact
);
SS_Trap::Type
N2_Strand::flush_store_buffer()/*{{{*/
return stb
.flush_store_buffer();