Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / sam-t2 / sam / cpus / vonk / n2 / lib / cpu / src / N2_Strand.cc
// ========== Copyright Header Begin ==========================================
//
// OpenSPARC T2 Processor File: N2_Strand.cc
// Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
//
// The above named program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public
// License version 2 as published by the Free Software Foundation.
//
// The above named program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this work; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
//
// ========== Copyright Header End ============================================
#include <new>
#include "N2_Asi.h"
#include "N2_Registers.h"
#include "N2_Model.h"
#include "N2_Cpu.h"
#include "N2_Core.h"
#include "N2_Strand.h"
#include "N2_IrfEcc.h"
#include "N2_FrfEcc.h"
#include "N2_Tlb.h"
#include "BL_BitUtility.h"
#include "SS_CKMemory.h"
#include <stdio.h>
#include <stdlib.h>
enum
{
VA_BITS = 48,
PA_BITS = 40
};
inline SS_Vaddr va_signext( SS_Vaddr va ) /*{{{*/
// va_signext() sign extends the given virtual address from VA_BITS to 64 bits.
{
return (va << (64 - VA_BITS)) >> (64 - VA_BITS);
}
/*}}}*/
inline bool va_inrange( SS_Vaddr va ) /*{{{*/
//
// va_inrange() checks if the va is not in the what is called va-hole.
// N2 only implement 48bit of the 64bit virtual (real) address space.
// Virtual addresses are signed so this means that the upper 16 bits
// are checked for proper sign extendion if bit47. So we check that
//
// 0x0000000000000000 < va <= 0x00007fffffffffff
// or 0xffff800000000000 < va <= 0xffffffffffffffff
{
return ((va >> (VA_BITS - 1)) == SS_Vaddr(0))
|| ((va >> (VA_BITS - 1)) == -SS_Vaddr(1));
}
/*}}}*/
inline bool pc_inrange( SS_Vaddr va ) /*{{{*/
//
// pc_inrange() checks the same range as va_inrange() with an additional
// 0x20 bytes taken of the positive end of the range. So we check that
//
// 0x0000000000000000 < va <= 0x00007fffffffffdf
// or 0xffff800000000000 < va <= 0xffffffffffffffff
{
return (((va + SS_Vaddr(0x20)) >> (VA_BITS - 1)) == SS_Vaddr(0))
|| (( va >> (VA_BITS - 1)) == -SS_Vaddr(1));
}
/*}}}*/
inline bool pc_onrange( SS_Vaddr va ) /*{{{*/
//
// pc_onrange() checks the same range as pc_inrange() but instead with one
// additional decode cache line (0x40 bytes) taken of the positive end of
// the range. So we check that
//
// 0x0000000000000000 < va <= 0x00007fffffffffbf
// or 0xffff800000000000 < va <= 0xffffffffffffffff
{
return (((va + SS_Vaddr(SS_InstrCache::LINE_SIZE * 4)) >> (VA_BITS - 1)) == SS_Vaddr(0))
|| (( va >> (VA_BITS - 1)) == -SS_Vaddr(1));
}
/*}}}*/
inline bool pc_iorange( SS_Vaddr pc )/*{{{*/
{
// I/O address ranges 0xff00000000:0xffffffffff and 0xa000000000:0xbfffffffff
// are the only I/O regions from which instructions fetches are ok.
return (((pc >> (PA_BITS - 8)) & 0xff) == 0xff) || (((pc >> (PA_BITS - 3)) & 0x7) == 0x5);
}
/*}}}*/
extern "C" SS_Vaddr n2_exe_real_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i )/*{{{*/
{
// First check if we have a UI breakpoint on the little address range
if (s->test_break_inst_va(pc))
return pc;
N2_Strand* n2 = (N2_Strand*)s;
n2->inst_tag_update(0,pc);
return (s->inst_trap)(pc,npc,s,i,va_signext(pc),SS_Trap::SS_Trap::INSTRUCTION_REAL_RANGE);
}
/*}}}*/
extern "C" SS_Vaddr n2_exe_address_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i )/*{{{*/
{
// First check if we have a UI breakpoint on the little address range
if (s->test_break_inst_va(pc))
return pc;
N2_Strand* n2 = (N2_Strand*)s;
uint64_t ctxt0 = n2->tl() ? 0 : n2->primary_context[0]();
n2->inst_tag_update(0,pc);
return (s->inst_trap)(pc,npc,s,i,va_signext(pc),SS_Trap::SS_Trap::INSTRUCTION_ADDRESS_RANGE);
}
/*}}}*/
extern "C" SS_Vaddr n2_exe_va_watchpoint( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i )/*{{{*/
{
// First check if we have a UI breakpoint on the watchpoint address
if (s->test_break_inst_va(pc))
return pc;
N2_Strand* n2 = (N2_Strand*)s;
SS_Vaddr va = pc & n2->mask_pstate_am;
uint64_t ctxt0 = n2->tl() ? 0 : n2->primary_context[0]();
// Make sure we handle va watchpoint enable/disable properly. E.g
// when va watchpoint info is changed we flush the cache. We only
// expect to get here when they are enabled and when they should hit.
assert(n2->inst_watchpoint_va_hit(va));
return (n2->inst_trap)(pc,npc,s,i,va,SS_Trap::INSTRUCTION_VA_WATCHPOINT); // prio 2.5
}
/*}}}*/
extern "C" SS_Vaddr n2_dec_real_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand *s, SS_Instr* i )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_Instr* line = n2->inst_cache->pc_line(pc);
long l;
// For the last cacheline of the positive range of the address space we
// have to be carefull what we do. For 0x7fffffffffc0 to 0x7fffffffdf
// we have to decode as normal, for 0x7fffffffe0 to 0x7fffffffff we have
// to throw real range traps. In either case make sure we check UI break
// points first.
for (l = 0; l < (SS_InstrCache::LINE_SIZE >> 1); l++)
line->line_index(l)->exe = ss_break_inst_va_dec;
for (; l < SS_InstrCache::LINE_SIZE; l++)
line->line_index(l)->exe = n2_exe_real_range;
return (i->exe)(pc,npc,s,i);
}
/*}}}*/
extern "C" SS_Vaddr n2_dec_address_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand *s, SS_Instr* i )/*{{{*/
{
assert(SS_InstrCache::LINE_SIZE == 16);
// For the last cacheline of the positive range of the address space we
// have to be carefull what we do. For 0x7fffffffffc0 to 0x7fffffffdf
// we have to decode as normal, for 0x7fffffffe0 to 0x7fffffffff we have
// to throw address range traps. However the va watchpoint has to be
// checked before that as it has higher priority then address range trap.
// Make sure that we check UI breakpoints first though.
N2_Strand* n2 = (N2_Strand*)s;
SS_Instr* line = n2->inst_cache->pc_line(pc);
long l;
for (l = 0; l < (SS_InstrCache::LINE_SIZE >> 1); l++)
line->line_index(l)->exe = ss_break_inst_va_dec;
for (; l < SS_InstrCache::LINE_SIZE; l++)
line->line_index(l)->exe = n2_exe_address_range;
SS_Vaddr va = pc & n2->mask_pstate_am;
SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
// If va watchpoint checks are enabled and the they happen in this
// cache line then set exe of the location that causes the va watchpoint
// trap to the routine that does that.
if (n2->inst_watchpoint_va_near_hit(tm,va))
{
l = (n2->inst_watchpoint_va_get() >> 2) & SS_InstrCache::LINE_MASK;
line->line_index(l)->exe = n2_exe_va_watchpoint;
}
return (i->exe)(pc,npc,s,i);
}
/*}}}*/
extern "C" SS_Vaddr n2_dec_va_watchpoint( SS_Vaddr pc, SS_Vaddr npc, SS_Strand *s, SS_Instr* i )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_Instr* line = n2->inst_cache->pc_line(pc);
long l;
// On an inst_mmu_va access we can cause a fetch of a cacheline that has
// a watchpoint enabled in it, but the pc that caused the fetch did not match.
// The mmu makes us trampoline through this routine. So set the decoders back
// to the normal decoders and insert a watchpoint trigger in the correct place.
for (l=0; l < SS_InstrCache::LINE_SIZE; l++)
line->line_index(l)->exe = ss_break_inst_va_dec;
SS_Vaddr va = pc & n2->mask_pstate_am;
SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
// Make sure we only get here when watchpoint are truely enabled and fall
// in this cacheline.
assert(n2->inst_watchpoint_va_near_hit(tm,va));
l = (n2->inst_watchpoint_va_get() >> 2) & SS_InstrCache::LINE_MASK;
line->line_index(l)->exe = n2_exe_va_watchpoint;
return (i->exe)(pc,npc,s,i);
}
/*}}}*/
N2_Strand::N2_Strand( N2_Core& _core, const char* _name, uint_t _strand_id )/*{{{*/
:
SS_Strand(_core,_name,run_exe_table,mem_run_table,mem_trc_table,_core.cpu.mem_err_detector),
core(_core),
trap_dae_inv_asi(SS_Trap::RESERVED), // UNSUPPORTED_PAGE_SIZE ... an exception to the norm ...
data_wp_pa_mask(1),
data_wp_pa_addr(0),
data_wp_va_mask(1),
data_wp_va_addr(0),
data_wp_bytes(0),
data_wp_flags(0),
data_wp_check(false),
stb(*this)
{
strand_id = _strand_id;
model = &core.cpu.model;
dec_table = &run_dec_xx_xxxxxx;
exe_table = run_exe_table;
mem_table = mem_run_table;
#ifdef ARCH_V8
v8_exe_table = exe_table;
exe_table = v8_run_exe_table;
#endif
get_state_name = n2_get_state_name;
get_state = n2_get_state;
set_state = n2_set_state;
va_bits = VA_BITS;
pa_bits = PA_BITS;
data_wp_pa_mask = (SS_Paddr(1) << pa_bits()) - 8;
data_wp_va_mask = (SS_Vaddr(1) << va_bits()) - 8;
SS_Paddr io_mask = SS_Paddr(1) << (pa_bits() - 1);
phys_tte_mem->phys_mask = ~(io_mask - SS_Paddr(1));
phys_tte_mem->phys_page = SS_Paddr(0);
phys_tte_mem->virt_mask = io_mask;
phys_tte_mem->virt_page = SS_Paddr(0);
phys_tte_mem_am->phys_mask = ~SS_Paddr(0) << 32;
phys_tte_mem_am->phys_page = SS_Paddr(0);
phys_tte_mem_am->virt_mask = io_mask;
phys_tte_mem_am->virt_page = SS_Paddr(0);
phys_tte_io->phys_mask = ~(io_mask - SS_Paddr(1));
phys_tte_io->phys_page = io_mask;
phys_tte_io->virt_mask = io_mask;
phys_tte_io->virt_page = io_mask;
trap = (SS_TrapFun)ss_trap;
inst_mmu = (SS_InstMmu)n2_inst_mmu_pa;
inst_mmu_va = (SS_InstMmu)n2_inst_mmu_va;
inst_mmu_ra = (SS_InstMmu)n2_inst_mmu_ra;
inst_mmu_pa = (SS_InstMmu)n2_inst_mmu_pa;
data_mmu = (SS_DataMmu)n2_data_mmu;
inst_trap = (SS_MmuTrap)n2_inst_trap;
data_trap = (SS_MmuTrap)n2_data_trap;
invalid_asi = (SS_InvalidAsi)n2_invalid_asi;
run_perf = n2_run_perf;
internal_interrupt = n2_internal_interrupt;
external_interrupt = n2_external_interrupt;
ras_enable = n2_ras_enable;
inst_tlb = &core.inst_tlb;
data_tlb = &core.data_tlb;
if ((strand_id() % N2_Model::NO_STRANDS_PER_CPU) == 0)
{
core.inst_tlb.add_strand(this);
core.data_tlb.add_strand(this);
}
setup_tte_link_tables();
tlb_entry = -1;
#ifdef COMPILE_FOR_COSIM
inst_hwtw = n2_inst_hwtw;
data_hwtw = n2_data_hwtw;
#endif
cnv2pa = n2_cnv2pa;
new(&tstate) N2_Tstate(); // N2 uses one bit less for the gl field
new(&lsu_ctr) N2_LsuCtr(); // ToDo: N2 need to have its own lsu ctr, should not be in SS_Strand.
new(&gl) N2_Gl(); // N2 gl uses [3:0] instead of [2:0]
intr_recv = 0;
max_tl = 6;
max_gl = 3;
hver.maxwin(7);
hver.maxgl(3);
hver.maxtl(6);
hver.mask(0x20);
hver.impl(0x24);
hver.manuf(0x3e);
core_id.max_core_id(0x3f);
core_id.max_strand_id(0x7);
core_id.core_id(strand_id());
core_intr_id.intr_id_hi(0);
core_intr_id.intr_id_lo(strand_id());
// the first strand of each node (i.e., cpu) should be in running state
// at the beginning.
sim_state.running((strand_id() % N2_Model::NO_STRANDS_PER_CPU) == 0);
// Set the trap priorities for some traps to the correct number
SS_Trap::table[SS_Trap::ILLEGAL_INSTRUCTION].priority = 61;
SS_Trap::table[SS_Trap::INSTRUCTION_BREAKPOINT].priority = 62;
// Add the N2 specific translating asi info to the asi_info table.
// Note block init stores are not valid for floating point stores.
SS_AsiInfo::Flags ldst = SS_AsiInfo::QUAD_LOAD | SS_AsiInfo::BLOCK_INIT | SS_AsiInfo::CLASS_STX | SS_AsiInfo::CLASS_ST;
SS_AsiInfo::Flags ldst_p = ldst | SS_AsiInfo::PRIMARY;
SS_AsiInfo::Flags ldst_s = ldst | SS_AsiInfo::SECONDARY;
SS_AsiInfo::Flags ldst_n = ldst | SS_AsiInfo::NUCLEUS | SS_AsiInfo::PRIVILEGED;
SS_AsiInfo::Flags ldst_pl = ldst_p | SS_AsiInfo::LITTLE_ENDIAN;
SS_AsiInfo::Flags ldst_sl = ldst_s | SS_AsiInfo::LITTLE_ENDIAN;
SS_AsiInfo::Flags ldst_nl = ldst_n | SS_AsiInfo::LITTLE_ENDIAN;
SS_AsiInfo::Flags ldst_aiup = ldst_p | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
SS_AsiInfo::Flags ldst_aius = ldst_s | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
SS_AsiInfo::Flags ldst_aiupl = ldst_pl | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
SS_AsiInfo::Flags ldst_aiusl = ldst_sl | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
ldst_p = ldst_p | SS_AsiInfo::BYPASS;
ldst_s = ldst_s | SS_AsiInfo::BYPASS;
ldst_n = ldst_n | SS_AsiInfo::BYPASS;
ldst_pl = ldst_pl | SS_AsiInfo::BYPASS;
ldst_sl = ldst_sl | SS_AsiInfo::BYPASS;
ldst_nl = ldst_nl | SS_AsiInfo::BYPASS;
asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_PRIMARY].set_flags(ldst_aiup);
asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_SECONDAY].set_flags(ldst_aius);
asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_PRIMARY_LITTLE].set_flags(ldst_aiupl);
asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_SECONDAY_LITTLE].set_flags(ldst_aiusl);
asi_info[N2_Asi::ASI_NUCLEUS_BLOCK_INIT_ST_QUAD_LDD].set_flags(ldst_n);
asi_info[N2_Asi::ASI_NUCLEUS_BLOCK_INIT_ST_QUAD_LDD_LITTLE].set_flags(ldst_nl);
asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_PRIMARY].set_flags(ldst_p);
asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_SECONDAY].set_flags(ldst_s);
asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_PRIMARY_LITTLE].set_flags(ldst_pl);
asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_SECONDAY_LITTLE].set_flags(ldst_sl);
// Map in all the strand specific non translating asi/va
// mapped registers or address ranges
SS_Node* INST_SIDE = (SS_Node*)0; // Used in one of the two void* arguments of ASI access to
SS_Node* DATA_SIDE = (SS_Node*)1; // distinguish between instruction side of data side
SS_Node* VIRT_FLAG = (SS_Node*)0; // Used in one of the two void* arguments of ASI access to
SS_Node* REAL_FLAG = (SS_Node*)1; // distinguish between virtual or real
// N2 PRM says that bit63 to bit48 of the va are ignored. So clip
// those of for all accesses.
asi_map.set_mask((SS_Vaddr(1) << VA_BITS) - 1);
asi_map[0x20].add(0x00,0x18,this,&scratchpad,
n2_scratchpad_ld64,n2_scratchpad_st64,
n2_scratchpad_ld64,n2_scratchpad_st64);
asi_map[0x20].add(0x30,0x38,this,&scratchpad,
n2_scratchpad_ld64,n2_scratchpad_st64,
n2_scratchpad_ld64,n2_scratchpad_st64);
asi_map[0x21].add(0x008,this,&primary_context[0],
SS_AsiCtrReg::ld64,pri_ctx_st64,
SS_AsiCtrReg::rd64,pri_ctx_st64);
asi_map[0x21].add(0x010,this,&secondary_context[0],
SS_AsiCtrReg::ld64,sec_ctx_st64,
SS_AsiCtrReg::rd64,sec_ctx_st64);
asi_map[0x21].add(0x108,this,&primary_context[1],
SS_AsiCtrReg::ld64,pri_ctx_st64,
SS_AsiCtrReg::rd64,pri_ctx_st64);
asi_map[0x21].add(0x110,this,&secondary_context[1],
SS_AsiCtrReg::ld64,sec_ctx_st64,
SS_AsiCtrReg::rd64,sec_ctx_st64);
asi_map[0x25].add(0x3c0,0x3f8,this,0,
intr_queue_ld64,intr_queue_st64,
intr_queue_ld64,intr_queue_st64);
asi_map[0x45].add(0x00,this,&lsu_ctr,
SS_AsiCtrReg::ld64,n2_lsu_ctr_st64,
SS_AsiCtrReg::rd64,n2_lsu_ctr_st64);
asi_map[0x48].add(0x00,0xf8,this,0,
irf_ecc_ld64,0,
irf_ecc_ld64,0);
asi_map[0x49].add(0x00,0xf8,this,0,
frf_ecc_ld64,0,
frf_ecc_ld64,0);
asi_map[0x4a].set_mask(0x38);
asi_map[0x4a].add(0x00,0x1f8,this,0,
stb_access_ld64,0,
stb_access_ld64,SS_AsiCtrReg::wr64);
asi_map[0x4c].add(0x00,this,&desr,
desr_ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x4c].add(0x18,this,seter);
asi_map[0x4f].add(0x00,0x38,this,&scratchpad,
n2_scratchpad_ld64,n2_scratchpad_st64,
n2_scratchpad_ld64,n2_scratchpad_st64);
asi_map[0x50].add(0x00,this,&inst_tag_target,
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x50].add(0x18,this,&inst_sfsr,
SS_AsiCtrReg::ld64,SS_AsiCtrReg::st64,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x50].add(0x30,this,&inst_tag_access,
SS_AsiCtrReg::ld64,tag_access_st64,
SS_AsiCtrReg::rd64,tag_access_st64);
asi_map[0x51].set_mask(0x38);
asi_map[0x51].add(SS_VADDR_MIN,SS_VADDR_MAX,(SS_Node*)0,0,
mra_access_ld64,0,
mra_access_ld64,0);
asi_map[0x52].add(0x108,this,&real_range[0],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x52].add(0x110,this,&real_range[1],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x52].add(0x118,this,&real_range[2],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x52].add(0x120,this,&real_range[3],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x52].add(0x208,this,&physical_offset[0],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x52].add(0x210,this,&physical_offset[1],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x52].add(0x218,this,&physical_offset[2],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x52].add(0x220,this,&physical_offset[3],
SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
asi_map[0x53].add(SS_VADDR_MIN,SS_VADDR_MAX,this,0,
inst_tlb_probe_ld64,0,
inst_tlb_probe_ld64,0);
asi_map[0x54].add(0x000,INST_SIDE,VIRT_FLAG,
0,tlb_data_in_st64,
0,tlb_data_in_st64);
asi_map[0x54].add(0x400,INST_SIDE,REAL_FLAG,
0,tlb_data_in_st64,
0,tlb_data_in_st64);
asi_map[0x54].add(0x010,this,&nucleus_tsb_config[0],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x018,this,&nucleus_tsb_config[1],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x020,this,&nucleus_tsb_config[2],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x028,this,&nucleus_tsb_config[3],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x030,this,&non_nucleus_tsb_config[0],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x038,this,&non_nucleus_tsb_config[1],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x040,this,&non_nucleus_tsb_config[2],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x048,this,&non_nucleus_tsb_config[3],
SS_AsiCtrReg::ld64,tsb_config_st64,
SS_AsiCtrReg::rd64,tsb_config_st64);
asi_map[0x54].add(0x050,this,&inst_tsb_pointer[0],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x058,this,&inst_tsb_pointer[1],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x060,this,&inst_tsb_pointer[2],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x068,this,&inst_tsb_pointer[3],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x070,this,&data_tsb_pointer[0],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x078,this,&data_tsb_pointer[1],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x080,this,&data_tsb_pointer[2],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x088,this,&data_tsb_pointer[3],
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x54].add(0x090,this,&tw_control,
SS_AsiCtrReg::ld64,tw_control_st64,
SS_AsiCtrReg::rd64,tw_control_st64);
asi_map[0x55].add(0x000,0x1ff,INST_SIDE,0,
tlb_data_access_ld64,tlb_data_access_st64,
tlb_data_access_ld64,tlb_data_access_st64);
asi_map[0x55].add(0x400,0x5ff,INST_SIDE,0,
tlb_data_access_ld64,tlb_data_access_st64,
tlb_data_access_ld64,tlb_data_access_st64);
asi_map[0x56].add(0x00,0x7ff,INST_SIDE,0,
tlb_tag_read_ld64,0,
tlb_tag_read_ld64,0);
asi_map[0x57].add(SS_VADDR_MIN,SS_VADDR_MAX,this,0,
0,inst_tlb_demap_st64,
0,inst_tlb_demap_st64);
asi_map[0x58].add(0x000,this,&data_tag_target,
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x58].add(0x018,this,data_sfsr);
asi_map[0x58].add(0x020,this,&data_sfar,
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
asi_map[0x58].add(0x030,this,&data_tag_access,
SS_AsiCtrReg::ld64,tag_access_st64,
SS_AsiCtrReg::rd64,tag_access_st64);
asi_map[0x58].add(0x038,this,&data_wp,
SS_AsiCtrReg::ld64,data_wp_st64,
SS_AsiCtrReg::ld64,data_wp_st64);
asi_map[0x58].add(0x040,this,hwtw_config);
asi_map[0x58].add(0x080,this,&partition_id,
SS_AsiCtrReg::ld64,partition_id_st64,
SS_AsiCtrReg::rd64,partition_id_st64);
asi_map[0x59].add(0x00,0x78,(SS_Node*)0,0,
scratchpad_access_ld64,0,
scratchpad_access_ld64,0);
asi_map[0x5a].add(0x00,0x10,(SS_Node*)0,0,
tick_access_ld64,0,
tick_access_ld64,0);
asi_map[0x5a].add(0x20,0x30,(SS_Node*)0,0,
tick_access_ld64,0,
tick_access_ld64,0);
asi_map[0x5b].set_mask(0x38);
asi_map[0x5b].add(SS_VADDR_MIN,SS_VADDR_MAX,(SS_Node*)0,0,
tsa_access_ld64,0,
tsa_access_ld64,0);
asi_map[0x5c].add(0x000,DATA_SIDE,VIRT_FLAG,
0,tlb_data_in_st64,
0,tlb_data_in_st64);
asi_map[0x5c].add(0x400,DATA_SIDE,REAL_FLAG,
0,tlb_data_in_st64,
0,tlb_data_in_st64);
asi_map[0x5d].add(0x00,0x7ff,DATA_SIDE,0,
tlb_data_access_ld64,tlb_data_access_st64,
tlb_data_access_ld64,tlb_data_access_st64);
asi_map[0x5e].add(0x00,0x7ff,DATA_SIDE,0,
tlb_tag_read_ld64,0,
tlb_tag_read_ld64,0);
asi_map[0x5f].add(SS_VADDR_MIN,SS_VADDR_MAX,this,0,
0,data_tlb_demap_st64,
0,data_tlb_demap_st64);
asi_map[0x63].add(0x00,this,&core_intr_id,
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,0);
asi_map[0x63].add(0x10,this,&core_id,
SS_AsiCtrReg::ld64,0,
SS_AsiCtrReg::rd64,0);
asi_map[0x72].add(0x0,this,0,
intr_recv_ld64,intr_recv_st64,
intr_recv_ld64,intr_recv_wr64);
asi_map[0x74].add(0x0,this,0,
intr_r_ld64,0,
intr_r_ld64,0);
}
/*}}}*/
const char* N2_Strand::n2_get_state_name( SS_Strand* s, SS_Registers::Index index )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
switch (index)
{
case SS_Registers::ASR_PCR:
return n2->pcr.name();
case SS_Registers::ASR_PIC:
return n2->pic.name();
default:
return ss_get_state_name(s,index);
}
}
/*}}}*/
SS_Registers::Error N2_Strand::n2_get_state( SS_Strand* s, SS_Registers::Index index, uint64_t* value )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
switch (index)
{
case SS_Registers::ASR_PCR:
*value = n2->pcr();
break;
case SS_Registers::ASR_PIC:
*value = n2->pic();
break;
default:
return ss_get_state(s,index,value);
}
return SS_Registers::OK;
}
/*}}}*/
SS_Registers::Error N2_Strand::n2_set_state( SS_Strand* s, SS_Registers::Index index, uint64_t value )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
switch (index)
{
case SS_Registers::ASR_PCR:
n2->pcr.set(value);
break;
case SS_Registers::ASR_PIC:
n2->pic.set(value);
break;
default:
return ss_set_state(s,index,value);
}
return SS_Registers::OK;
}
/*}}}*/
void N2_Strand::snapshot( SS_SnapShot& ss )/*{{{*/
{
char prefix[32];
get_name(prefix);
SS_Strand::snapshot(ss);
sprintf(ss.tag,"%s.%s",prefix,pcr.name()); pcr.snapshot(ss);
sprintf(ss.tag,"%s.%s",prefix,pic.name()); pic.snapshot(ss);
lsu_ctr.snapshot(ss,prefix);
// fpu does not require snapshot.
core_intr_id.snapshot(ss,prefix);
core_id.snapshot(ss,prefix);
SS_AsiCtrReg::snapshot(ss,primary_context,2,prefix,"pri");
SS_AsiCtrReg::snapshot(ss,secondary_context,2,prefix,"sec");
inst_tag_target.snapshot(ss,prefix,"inst");
inst_tag_access.snapshot(ss,prefix,"inst");
data_tag_target.snapshot(ss,prefix,"data");
data_tag_access.snapshot(ss,prefix,"data");
partition_id.snapshot(ss,prefix);
SS_AsiCtrReg::snapshot(ss,real_range,4,prefix);
SS_AsiCtrReg::snapshot(ss,physical_offset,4,prefix);
SS_AsiCtrReg::snapshot(ss,nucleus_tsb_config,4,prefix,"nuc");
SS_AsiCtrReg::snapshot(ss,non_nucleus_tsb_config,4,prefix,"non");
SS_AsiCtrReg::snapshot(ss,inst_tsb_pointer,4,prefix,"inst");
SS_AsiCtrReg::snapshot(ss,data_tsb_pointer,4,prefix,"data");
hwtw_config.snapshot(ss,prefix);
tw_control.snapshot(ss,prefix);
inst_sfsr.snapshot(ss,prefix);
data_sfsr.snapshot(ss,prefix);
data_sfar.snapshot(ss,prefix);
data_wp.snapshot(ss,prefix);
cpu_mondo_head.snapshot(ss,prefix,"cpu_mondo_head");
cpu_mondo_tail.snapshot(ss,prefix,"cpu_mondo_tail");
dev_mondo_head.snapshot(ss,prefix,"dev_mondo_head");
dev_mondo_tail.snapshot(ss,prefix,"dev_mondo_tail");
resumable_head.snapshot(ss,prefix,"resumable_head");
resumable_tail.snapshot(ss,prefix,"resumable_tail");
non_resumable_head.snapshot(ss,prefix,"non_resumable_head");
non_resumable_tail.snapshot(ss,prefix,"non_resumable_tail");
sprintf(ss.tag,"%s.intr_recv",prefix); ss.val(&intr_recv);
intr_r.snapshot(ss,prefix);
seter.snapshot(ss,prefix);
desr.snapshot(ss,prefix);
sprintf(ss.tag,"%s.tw_status",prefix); ss.val(&tw_status);
if (ss.do_load())
{
for (int r = 0; r < 4; r++)
{
N2_TsbConfig& zc = nucleus_tsb_config[r];
N2_TsbConfig& nc = non_nucleus_tsb_config[r];
tsb_config[r].update(zc.valid(), zc.tsb_base() << 13, zc.tsb_size() + 9,
zc.page_size() * 3 + 13, zc.ra_not_pa(), zc.use_context());
tsb_config[r + 4].update(nc.valid(), nc.tsb_base() << 13, nc.tsb_size() + 9,
nc.page_size() * 3 + 13, nc.ra_not_pa(), nc.use_context());
N2_RealRange& rr = real_range[r];
N2_PhysicalOffset& po = physical_offset[r];
tsb_ra2pa[r].update(rr.enable(),rr.rpn_low(),rr.rpn_high(),po.ptv());
}
data_wp_st64(0,0,this,0,data_wp());
inst_ctx_ra.set_pid(partition_id());
inst_ctx_va.set_pid(partition_id());
inst_ctx_va.set_pri_ctx0(primary_context[0]());
inst_ctx_va.set_pri_ctx1(primary_context[1]());
data_ctx.set_pid(partition_id());
data_ctx.set_pri_ctx0(primary_context[0]());
data_ctx.set_pri_ctx1(primary_context[1]());
data_ctx.set_sec_ctx0(secondary_context[0]());
data_ctx.set_sec_ctx1(secondary_context[1]());
n2_lsu_ctr_st64(0,&lsu_ctr,this,0,lsu_ctr());
}
}
/*}}}*/
SS_Vaddr N2_Strand::n2_inst_mmu_pa( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_InstrCache::Tag* line_tag )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_Tte* tte;
SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
int io = (pc >> (n2->pa_bits() - 1)) & 1;
// First check for on or near UI breakpoints.
if (n2->near_break_inst_va(tm,pc))
{
// Check if we hit the breakpoint. If so then exit now.
// Else fetch a line, decode, execute the instructions
// guided by the ss_break_inst_va_dec() routine.
if (n2->test_break_inst_va(pc))
{
s->skip_break_inst_va = true;
return pc;
}
else
{
n2->inst_dec = ss_break_inst_va_dec;
}
}
// Now select the TTE to be used for the PA to PA translation.
if (n2->pstate.am())
{
// In 32bit mode the pc gets clipped to 32bit address space.
// We use a preprogrammed TTE to handle this situation.
tte = n2->phys_tte_mem_am;
}
else if (io)
{
// Check the I/O regions that we can fetch from.
if (!pc_iorange(pc))
{
n2->inst_tag_update(0,pc);
return (n2->inst_trap)(pc,npc,s,line,pc,SS_Trap::INSTRUCTION_ACCESS_ERROR); // prio 4.0
}
tte = n2->phys_tte_io;
}
else
{
// Normal 64bit address memory access.
tte = n2->phys_tte_mem;
}
// Set the TTE that is currently used for instruction fetch,
// update the decode cache tag and tte, and add the TTE to the
// linked lists of TTE to make flushing TTEs from the instruction
// caches an simepl task of walking the list of cachelines that
// have been used.
SS_Memop exe = n2->mem_table[0][io];
SS_Vaddr tag = pc & -(SS_InstrCache::LINE_SIZE * 4);
n2->inst_tte = tte;
line_tag->tag = tag;
line_tag->tte = tte;
line_tag->lnk.unlink();
line_tag->lnk.insert_after(&n2->inst_tte_link[tte->index]);
return (exe)(pc,npc,s,line,tag,tte);
}
/*}}}*/
SS_Vaddr N2_Strand::n2_inst_mmu_ra( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_InstrCache::Tag* line_tag )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_Tte* tte;
SS_Vaddr va = pc & n2->mask_pstate_am;
SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
// First check for on or near UI breakpoints.
if (n2->near_break_inst_va(tm,pc))
{
// Check if we hit the breakpoint. If so then exit now.
// Else fetch a line, decode, execute the instructions
// guided by the ss_break_inst_va_dec() routine.
if (n2->test_break_inst_va(pc))
{
s->skip_break_inst_va = true;
return pc;
}
else
{
n2->inst_dec = ss_break_inst_va_dec;
}
}
// Handle real range address checks. We use trampoline
// decoding to get the corner cases right when pc is in the
// cacheline that should cause the trap but is not on it.
if (!pc_onrange(va))
{
if (!pc_inrange(va))
{
n2->inst_tag_update(0,va);
return (n2->inst_trap)(pc,va_signext(npc),s,line,va,SS_Trap::INSTRUCTION_REAL_RANGE); // prio 2.6
}
// The current pc is not on the va-edge (last 16 bytes of the
// possitve end of the address space). To make sure we get the
// traps for the real range we trampoline the decoder through
// n2_dec_real_range. Note after this mmu routine we fetch the
// cacheline and set the exe part of the instruction to inst_dec.
// So switching the decoder temporarily makes us trampoline into
// the decoder that knows about the real range traps.
n2->inst_dec = n2_dec_real_range;
}
// Lookup the TLB and check that the TTE can be used for translation.
// E.g. check that no error conditions occured on the TLB lookup
// (inst_mmu_error is set in cosim mode only) and that the TLB lookup
// found a matching TTE.
bool tte_multi_hit = false;
if ((n2->inst_tte != n2->fail_tte) && n2->inst_tte->match_real(va,n2->partition_id()))
tte = n2->inst_tte;
else
tte = ((N2_Tlb*)n2->inst_tlb)->lookup_ra2pa(s,va,n2->partition_id(),&tte_multi_hit);
if (n2->inst_mmu_error)
{
assert(n2->sim_state.cosim());
n2->inst_mmu_error = false;
n2->inst_tag_update(0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
}
else if (tte == 0)
{
n2->inst_tag_update(0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INST_REAL_TRANSLATION_MISS); // prio 2.8
}
// ITLB Multi Hit, Tag and Data Parity Error Detection
else if (tte->has_errors() || tte_multi_hit)
{
N2_Core& n2_core = n2->core;
int itlb_error_type = 0;
// Multiple hits are detected only if ittm bit is set in CERER
if (n2_core.cerer.ittm() && tte_multi_hit)
itlb_error_type = 1;
// Tag Parity Errors in ITLB are detected only if the ITTP bit is set in CERER
else if (n2_core.cerer.ittp() && tte->tag_parity_error())
itlb_error_type = 2;
// Data Parity Errors in ITLB are detected only if the ITDP bit is set in CERER
else if (n2_core.cerer.itdp() && tte->data_parity_error())
itlb_error_type = 3;
// The error type is recorded in the ISFSR
if (itlb_error_type > 0)
{
n2->inst_sfsr.error_type(itlb_error_type);
n2->inst_tag_update(0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
}
}
int io = (tte->phys_page >> (n2->pa_bits() - 1)) & 1;
// Check the properties of the TTE: no fault, and check fetching
// from restricted I/O spaces. Note that we never fetch real addresses
// in user mode, so we don't have to check for privileged violation.
if (tte->nfo())
{
n2->inst_tag_update(0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::IAE_NFO_PAGE); // prio 3.3
}
else if (io && !pc_iorange(tte->trans(pc)))
{
n2->inst_tag_update(0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_ERROR); // prio 4.0
}
SS_Memop exe = n2->mem_table[0][io];
SS_Vaddr tag = pc & -(SS_InstrCache::LINE_SIZE * 4);
// Set the TTE that is currently used for instruction fetch,
// update the decode cache tag and tte, and add the TTE to the
// linked lists of TTE to make flushing TTEs from the instruction
// caches an simepl task of walking the list of cachelines that
// have been used.
n2->inst_tte = tte;
line_tag->tag = tag;
line_tag->tte = tte;
line_tag->lnk.unlink();
line_tag->lnk.insert_after(&n2->inst_tte_link[tte->index]);
return (exe)(pc,npc,s,line,tag,tte);
}
/*}}}*/
SS_Vaddr N2_Strand::n2_inst_mmu_va( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_InstrCache::Tag* line_tag )/*{{{*/
{
bool tte_multi_hit = false;
N2_Strand* n2 = (N2_Strand*)s;
SS_Tte* tte;
uint64_t ctxt0 = n2->tl() ? 0 : n2->primary_context[0]();
uint64_t ctxt1 = n2->tl() ? 0 : n2->primary_context[1]();
SS_Vaddr va = pc & n2->mask_pstate_am;
SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
// First check for on or near UI breakpoints.
if (n2->near_break_inst_va(tm,pc))
{
// Check if we hit the breakpoint. If so then exit now.
// Else fetch a line, decode, execute the instructions
// guided by the ss_break_inst_va_dec() routine.
if (n2->test_break_inst_va(pc))
{
s->skip_break_inst_va = true;
return pc;
}
else
{
n2->inst_dec = ss_break_inst_va_dec;
}
}
// Handle va watchpoint and address range checks. For both
// exceptions we use trampoline decoding to get the corner cases
// right when pc is in the cacheline that should cause the traps.
if (n2->inst_watchpoint_va_hit(va))
{
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_VA_WATCHPOINT); // prio 2.5
}
else if (!pc_onrange(va))
{
if (!pc_inrange(va))
{
n2->inst_tag_update(ctxt0,va);
return (n2->inst_trap)(pc,va_signext(npc),s,line,va,SS_Trap::INSTRUCTION_ADDRESS_RANGE); // prio 2.6
}
// The current pc is not on the va-edge (last 16 bytes of the
// possitve end of the address space). To make sure we get the
// traps for the address range we trampoline the decoder through
// n2_dec_address_range. Note after this mmu routine we fetch the
// cacheline and set the exe part of the instruction to inst_dec.
// So switching the decoder temporarily makes us trampoline into
// the decoder that knows about the address range traps.
n2->inst_dec = n2_dec_address_range;
}
else if (n2->inst_watchpoint_va_near_hit(tm,va))
{
// The current pc did not match the va watchpoint address, however
// the va watchpoint is enabled and falls in the same cacheline.
// So trampoline through n2_dec_va_watchpoint to get the watchpoint
// trap on the correct pc.
n2->inst_dec = n2_dec_va_watchpoint;
}
// Lookup the TLB and check that the TTE can be used for translation.
// E.g. check that no error conditions occured on the TLB lookup
// (inst_mmu_error is set in cosim mode only) and that the TLB lookup
// found a matching TTE. If we didn't find a TTE in the TLB then
// perform a hardware table walk to bring one in from the TSB(s).
if ((n2->inst_tte != n2->fail_tte) && n2->inst_tte->match_virt(va,ctxt0,ctxt1,n2->partition_id()))
tte = n2->inst_tte;
else
{
// Lookup va for both contexts. If both match then we have a multi
// hit case too. However, if both contexts are the same then this is ok.
bool tte_multi_hit0;
bool tte_multi_hit1 = false;
SS_Tte* tte0;
SS_Tte* tte1 = 0;
tte0 = ((N2_Tlb*)n2->inst_tlb)->lookup_va2pa(s,va,ctxt0,n2->partition_id(),&tte_multi_hit0);
if (ctxt0 != ctxt1)
tte1 = ((N2_Tlb*)n2->inst_tlb)->lookup_va2pa(s,va,ctxt1,n2->partition_id(),&tte_multi_hit1);
tte = tte0 ? tte0 : tte1;
tte_multi_hit = ((tte0 != 0) && (tte1 != 0)) || tte_multi_hit0 || tte_multi_hit1;
}
if (n2->inst_mmu_error)
{
assert(n2->sim_state.cosim());
n2->inst_mmu_error = false;
n2->inst_tag_update(ctxt0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
}
else if (tte)
{
// ITLB Multi Hit, Tag and Data Parity Error Detection
if (tte->has_errors() || tte_multi_hit)
{
N2_Core& n2_core = n2->core;
int itlb_error_type = 0;
// Multiple hits are detected only if ittm bit is set in CERER
if (n2_core.cerer.ittm() && tte_multi_hit)
itlb_error_type = 1;
// Tag Parity Errors in ITLB are detected only if the ITTP bit is set in CERER
else if (n2_core.cerer.ittp() && tte->tag_parity_error())
itlb_error_type = 2;
// Data Parity Errors in ITLB are detected only if the ITDP bit is set in CERER
else if (n2_core.cerer.itdp() && tte->data_parity_error())
itlb_error_type = 3;
// The error type is recorded in the ISFSR
if (itlb_error_type > 0)
{
n2->inst_sfsr.error_type(itlb_error_type);
n2->inst_tag_update(ctxt0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
}
}
}
else
{
if ((tte = n2->n2_inst_htw(va,ctxt0,ctxt1)) == 0)
return (n2->inst_trap)(pc,npc,s,line,va,n2->trap_htw);
}
int io = (tte->phys_page >> (n2->pa_bits() - 1)) & 1;
// Check the properties of the TTE: privileged violation, no fault,
// and check fetching from restricted I/O spaces.
if (tte->p() && (n2->sim_state.priv() == SS_USER))
{
n2->inst_tag_update(ctxt0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::IAE_PRIVILEGE_VIOLATION); // prio 3.1
}
else if (tte->nfo())
{
n2->inst_tag_update(ctxt0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::IAE_NFO_PAGE); // prio 3.3
}
else if (io && !pc_iorange(tte->trans(pc)))
{
n2->inst_tag_update(ctxt0,va);
return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_ERROR); // prio 4.0
}
SS_Memop exe = n2->mem_table[0][io];
SS_Vaddr tag = pc & tm;
// Set the TTE that is currently used for instruction fetch,
// update the decode cache tag and tte, and add the TTE to the
// linked lists of TTE to make flushing TTEs from the instruction
// caches an simepl task of walking the list of cachelines that
// have been used.
n2->inst_tte = tte;
line_tag->tag = tag;
line_tag->tte = tte;
line_tag->lnk.unlink();
line_tag->lnk.insert_after(&n2->inst_tte_link[tte->index]);
return (exe)(pc,npc,s,line,tag,tte);
}
/*}}}*/
SS_Vaddr N2_Strand::n2_data_mmu( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Vaddr va, uint_t mem )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_Tte* tte;
int le;
uint64_t io;
bool pa2pa = false;
SS_AsiInfo asi_info = n2->asi_info[i->asi];
va &= n2->mask_pstate_am;
SS_Context ctxt0, ctxt1;
if (asi_info.is_primary())
{
ctxt0 = n2->primary_context[0]();
ctxt1 = n2->primary_context[1]();
}
else if (asi_info.is_secondary())
{
ctxt0 = n2->secondary_context[0]();
ctxt1 = n2->secondary_context[1]();
}
else
{
//assert(asi_info.is_nucleus());
ctxt0 = 0;
ctxt1 = 0;
}
if ((n2->sim_state.priv() == SS_HPRV) && asi_info.is_bypass())
{
io = (va >> (n2->pa_bits() - 1)) & 1;
if (io && (i->is_atomic()
|| (i->is_read() && (asi_info.is_quad_load_asi() || asi_info.is_block_asi()))))
{
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_NC_PAGE); // Prio 12.5
}
le = asi_info.is_little_endian();
tte = io ? n2->phys_tte_io : n2->phys_tte_mem;
pa2pa = true;
}
else
{
if (((n2->sim_state.priv() < SS_HPRV) && !n2->sim_state.data_mmu()) || asi_info.is_real()) // RA->PA
{
if (!va_inrange(va))
{
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::MEM_REAL_RANGE); // Prio 11.3
}
else
{
bool tte_multi_hit;
ctxt0 = 0; // set context field to 0 in case of RA->PA
#ifdef COMPILE_FOR_COSIM
(n2->data_tlb_read)(n2->tlb_sync);
n2->data_tlb_read_skip = true;
#endif
tte = ((N2_Tlb*)n2->data_tlb)->lookup_ra2pa(s,va,n2->partition_id(),&tte_multi_hit);
if (n2->data_mmu_error)
{
n2->data_mmu_error = false;
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
}
else if (tte)
{
// DTLB Multi Hit, Tag and Data Parity Error Detection
if (tte->has_errors() || tte_multi_hit)
{
N2_Core& n2_core = n2->core;
int dtlb_error_type = 0;
// Multiple hits are detected only if DTTM bit is set in CERER
if (n2_core.cerer.dttm() && tte_multi_hit)
dtlb_error_type = 1;
// Tag Parity Errors in DTLB are detected only if the DTTP bit is set in CERER
else if (n2_core.cerer.dttp() && tte->tag_parity_error())
dtlb_error_type = 2;
// Data Parity Errors in DTLB are detected only if the DTDP bit is set in CERER
else if (n2_core.cerer.dtdp() && tte->data_parity_error())
dtlb_error_type = 3;
// Error type and va are stored in DSFSR and DSFAR respectively
if (dtlb_error_type > 0)
{
n2->data_sfsr.error_type(dtlb_error_type);
n2->data_sfar.error_addr(va);
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
}
}
}
else
{
if (!i->is_cohere())
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_REAL_TRANSLATION_MISS); // Prio 12.3
}
}
}
else // VA->PA
{
if (n2->va_watchpoint_hit(i,va))
{
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::VA_WATCHPOINT); // Prio 11.2
}
else if (!va_inrange(va))
{
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::MEM_ADDRESS_RANGE); // Prio 11.3
}
else
{
#ifdef COMPILE_FOR_COSIM
(n2->data_tlb_read)(n2->tlb_sync);
n2->data_tlb_read_skip = true;
#endif
// Lookup va for both contexts. If both match then we have a multi
// hit case too. However, if both contexts are the same then this is ok.
bool tte_multi_hit0;
bool tte_multi_hit1 = false;
SS_Tte* tte0;
SS_Tte* tte1 = 0;
tte0 = ((N2_Tlb*)n2->data_tlb)->lookup_va2pa(s,va,ctxt0,n2->partition_id(),&tte_multi_hit0);
if (ctxt0 != ctxt1)
tte1 = ((N2_Tlb*)n2->data_tlb)->lookup_va2pa(s,va,ctxt1,n2->partition_id(),&tte_multi_hit1);
tte = tte0 ? tte0 : tte1;
bool tte_multi_hit = ((tte0 != 0) && (tte1 != 0)) || tte_multi_hit0 || tte_multi_hit1;
if (n2->data_mmu_error)
{
n2->data_mmu_error = false;
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
}
else if (tte)
{
// DTLB Multi Hit, Tag and Data Parity Error Detection
if (tte->has_errors() || tte_multi_hit)
{
N2_Core& n2_core = n2->core;
int dtlb_error_type = 0;
// Multiple hits are detected only if DTTM bit is set in CERER
if (n2_core.cerer.dttm() && (tte->multi_hit() || tte_multi_hit))
dtlb_error_type = 1;
// Tag Parity Errors in DTLB are detected only if the DTTP bit is set in CERER
else if (n2_core.cerer.dttp() && tte->tag_parity_error())
dtlb_error_type = 2;
// Data Parity Errors in DTLB are detected only if the DTDP bit is set in CERER
else if (n2_core.cerer.dtdp() && tte->data_parity_error())
dtlb_error_type = 3;
// Error type and va are stored in DSFSR and DSFAR respectively
if (dtlb_error_type > 0)
{
n2->data_sfsr.error_type(dtlb_error_type);
n2->data_sfar.error_addr(va);
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
}
}
}
else
{
if (i->is_cohere())
{
#ifdef COMPILE_FOR_COSIM
n2->data_tlb_read_skip = false;
#endif
// Output trace for cohere that mis tlb; they don't trap.
if (n2->trc_hook)
n2->trc_hook->mem_access(i->is_fetch() ? SS_Tracer::PREFETCH : SS_Tracer::FLUSH,va,0,0,0);
n2->npc = npc + 4;
return npc;
}
tte = n2->n2_data_htw(va,ctxt0,ctxt1);
if (tte == 0)
return (n2->data_trap)(pc,npc,s,i,va,n2->trap_htw);
}
}
}
io = (tte->phys_page >> (n2->pa_bits() - 1)) & 1;
// Check the TTE properties: privileged violation when we are in user
// mode or pretend to be user (as_is_user asi), atomic operations from
// i/o of non cacheable pages, no fault violations, side effects, or
// write operation to read only pages.
if (tte->p() && ((n2->sim_state.priv() == SS_USER) || asi_info.is_as_if_user()))
{
if (!i->is_cohere())
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_PRIVILEGE_VIOLATION); // Prio 12.4
}
else if ((io && (i->is_atomic()
|| (i->is_read() && (asi_info.is_quad_load_asi() || asi_info.is_block_asi()))))
|| (!tte->cp() && (i->is_atomic()
|| (i->is_read() && asi_info.is_quad_load_asi()))))
{
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_NC_PAGE); // Prio 12.5
}
else if (tte->nfo() && !asi_info.is_nofault())
{
if (!i->is_cohere())
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_NFO_PAGE); // Prio 12.6
}
else if (tte->e() && asi_info.is_nofault())
{
if (!i->is_cohere())
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_SO_PAGE); // Prio 12.6
}
else if (!tte->w() && i->is_write())
{
if (!i->is_cohere())
n2->data_tag_update(ctxt0,va);
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::FAST_DATA_ACCESS_PROTECTION); // Prio 12.7
}
le = (asi_info.is_little_endian() ^ tte->ie());
}
// Check for pa watchpoints
SS_Paddr pa = tte->trans(va);
if (!i->is_cohere() && n2->pa_watchpoint_hit(i,pa))
{
return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::PA_WATCHPOINT); // Prio 12.8
}
// All ra2pa and va2pa use TTEs that come from the TLB. We keep a linked list
// per TLB index of locations where a TTE is used such that we can efficiently
// update the decode caches. The pa2pa TTEs are not stored in the linked list.
if (!n2->data_wp_check && !pa2pa)
{
i->lnk.unlink();
i->lnk.insert_after(&n2->data_tte_link[tte->index]);
}
// Set the lower bits of the TTE pointer to include le (little endian) and io (I/O)
// This used in the memory access part of the memory operation.
tte = i->set_tte(le,io << 1,tte);
// Call the routines that handle memory. We have a separate routines for
// memory and i/o, and big or little endian this to make the best of optimizers.
SS_Memop exe = n2->mem_table[mem][((long)tte & 3)];
#ifdef COMPILE_FOR_COSIM
n2->data_tlb_read_skip = false;
i->tte = n2->fail_tte;
#endif
// In hyper privileged mode we do not cache the TTE when the ASI specifies
// that we need to translate (not bypass). Note the only ASIs that translate
// in hyper privileged mode are the AS_IF ASIs. This check makes decode cacheing
// perform better as we havce to flush much less.
if (n2->data_wp_check || ((n2->sim_state.priv() == SS_HPRV) && !pa2pa))
i->tte = n2->fail_tte;
return (exe)(pc,npc,s,i,va,tte);
}
/*}}}*/
SS_Vaddr N2_Strand::n2_invalid_asi( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Vaddr ea )/*{{{*/
{
// All lsu instructions for which the privilege mode is wrong at decode time,
// or for which the used asi is not valid end up here. Go through the trap checking
// motion as we need to raise one with the correct priority.
N2_Strand* n2 = (N2_Strand*)s;
SS_AsiInfo ai = n2->asi_info[i->asi];
if (!i->is_cohere() && (ea & (i->len - 1)))
{
if ((i->len == 8) && (ea & 7) == 0x4)
{
if ((i->opc.get_op3() & 0x2f) == 0x23) // lddf & lddfa
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::LDDF_MEM_ADDRESS_NOT_ALIGNED); // Prio 10.1
else if (((i->opc.get_op3() & 0x2f) == 0x27)) // stdf & stdfa
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::STDF_MEM_ADDRESS_NOT_ALIGNED); // Prio 10.1
}
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::MEM_ADDRESS_NOT_ALIGNED); // Prio 10.2
}
else if (!i->is_cohere() && (s->sim_state.priv() < s->asi_info[i->asi].get_protection()))
{
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::PRIVILEGED_ACTION); // Prio 11.1
}
else if (ai.is_translating())
{
ea &= n2->mask_pstate_am;
if ((n2->sim_state.priv() == SS_HPRV) && ai.is_bypass())
{
// PA->PA
}
else if (((n2->sim_state.priv() < SS_HPRV) && !n2->sim_state.data_mmu()) || ai.is_real())
{
// RA->PA
if (!i->is_cohere() && !va_inrange(ea))
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::MEM_REAL_RANGE); // Prio 11.3
else if (n2->data_mmu_error)
{
n2->data_mmu_error = false;
n2->data_tag_update(0,ea);
return (n2->data_trap)(pc,npc,s,i,ea,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
}
}
else
{
// VA->PA
if (!i->is_cohere() && (n2->va_watchpoint_hit(i,ea)))
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::VA_WATCHPOINT); // Prio 11.2
if (!i->is_cohere() && !va_inrange(ea))
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::MEM_ADDRESS_RANGE); // Prio 11.3
else if (n2->data_mmu_error)
{
SS_Context ctxt0, ctxt1;
if (ai.is_primary())
{
ctxt0 = n2->primary_context[0]();
ctxt1 = n2->primary_context[1]();
}
else if (ai.is_secondary())
{
ctxt0 = n2->secondary_context[0]();
ctxt1 = n2->secondary_context[1]();
}
else
{
assert(ai.is_nucleus());
ctxt0 = 0;
ctxt1 = 0;
}
n2->data_mmu_error = false;
n2->data_tag_update(ctxt0,ea);
return (n2->data_trap)(pc,npc,s,i,ea,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
}
}
}
else // non translating
{
}
return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::DAE_INVALID_ASI); // Prio 12.1
}
/*}}}*/
void N2_Strand::inst_tag_update( uint_t context, SS_Vaddr va )/*{{{*/
{
va = va_signext(va);
inst_tag_access = va;
inst_tag_target.va(va >> 22);
inst_tag_access.context(context);
inst_tag_target.context(context);
SS_TsbConfig* tsb_cfg = context ? &tsb_config[4] : tsb_config;
for (int i=0; i < 4; i++)
inst_tsb_pointer[i] = tsb_cfg[i].index(va);
}
/*}}}*/
void N2_Strand::data_tag_update( uint_t context, SS_Vaddr va )/*{{{*/
{
va = va_signext(va);
data_tag_access = va;
data_tag_target.va(va >> 22);
data_tag_access.context(context);
data_tag_target.context(context);
SS_TsbConfig* tsb_cfg = context ? &tsb_config[4] : tsb_config;
for (int i=0; i < 4; i++)
data_tsb_pointer[i] = tsb_cfg[i].index(va);
}
/*}}}*/
SS_Vaddr N2_Strand::n2_trap( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Trap::Type tt )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
uint_t context;
return SS_Strand::ss_trap(pc,npc,s,i,tt);
}
/*}}}*/
SS_Vaddr N2_Strand::n2_inst_trap( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_Vaddr va, SS_Trap::Type tt )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
n2->inst_tte = n2->fail_tte;
// Jmpl causes MEM_ADDRESS_RANGE Traps. When the mmu is in ra2pa mode
// the trap should be MEM_REAL_RANGE. We tunnel these traps through the
// inst_trap iso data_trap as they are treated differently that way :-)
switch (tt)
{
case SS_Trap::MEM_ADDRESS_RANGE:
if (n2->inst_mmu == n2->inst_mmu_ra)
tt = SS_Trap::MEM_REAL_RANGE;
case SS_Trap::MEM_ADDRESS_NOT_ALIGNED:
n2->data_sfar = va_signext(va & n2->mask_pstate_am);
break;
default:
break;
}
if (n2->trc_hook)
{
// Clear the decode cache line in case of inst mmu trap
switch (tt)
{
case SS_Trap::MEM_ADDRESS_RANGE:
case SS_Trap::MEM_ADDRESS_NOT_ALIGNED:
n2->trc_hook->reg_value(N2_Registers::REG_DATA_SFAR,n2->data_sfar());
break;
case SS_Trap::MEM_REAL_RANGE:
// this is coming from jmpl, no need to clear up decode cache
break;
default:
for (int i=0; i < SS_Instr::LINE_SIZE; i++)
line->line_index(i)->opc = 0;
break;
}
n2->trc_hook->inst_trap(va);
}
return (n2->trap)(pc,npc,s,line,tt);
}
/*}}}*/
SS_Vaddr N2_Strand::n2_data_trap( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Vaddr va, SS_Trap::Type tt )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
#ifdef COMPILE_FOR_COSIM
if (n2->data_tlb_read_skip)
{
// Before the mmu does a tlb lookup it sets this flag as after the lookup
// hits and something is wrong we get here and should not do a tlb read again.
n2->data_tlb_read_skip = false;
}
else if ((n2->asi_info[i->asi].is_translating()) &&
((n2->sim_state.priv() != SS_HPRV) ||
!(n2->asi_info[i->asi].is_bypass())))
{
// Only pull the dtlb_read message when the trap priority >= 10.0 (x10)
// ToDo only traps with priority >= 10.1 go through here ???
if (SS_Trap::table[tt].priority >= 100)
{
(n2->data_tlb_read)(n2->tlb_sync);
}
}
if ((tt == SS_Trap::DAE_INVALID_ASI) || (tt == SS_Trap::PRIVILEGED_ACTION))
{
// TLBLOOKUP pli messages are send out too early so we have to fix that up
// here when the supposed instruction actually traps and does not use the tlb.
// Ben M. : it looks like the the signals we look at to generate
// the ITLB LOOKUPs are in the ITLB and only happen after the ASI command
// is put on the ASI RING. But it looks like the DTLB signals do not go
// through the actual ASI ring but directory to the DTLB, and they assert
// even if we get the ASI invalid trap. So I think that code needs
// to be in place for DTLBLOOKUPs but not for ITLBLOOKUPs. ---> keep
// ITLBLOOKUP code around for a while longer. 6/12/07
switch (i->asi)
{
case 0x53: // inst_tlb_probe
case 0x55: // inst_tlb_data_access
case 0x56: // inst_tlb_tag_read
{
// a corresponding ASI_REAS & TLBLOOKUP were sent already, have to
// pop them out
uint64_t popout;
n2->asi_map.ld64(n2, i->asi, va, &popout);
(n2->inst_tlb_lookup)(n2->tlb_sync);
break;
}
case 0x5d: // data_tlb_data_access
case 0x5e: // data_tlb_tag_read
{
uint64_t popout;
n2->asi_map.ld64(n2, i->asi, va, &popout);
(n2->data_tlb_lookup)(n2->tlb_sync);
break;
}
}
}
#endif
// Cohereing instructions don;t cause a trap, so filter them out here
// iso having if i->cohere() tests all over the place. However, we
if (i->is_cohere())
{
switch (tt)
{
case SS_Trap::DATA_ACCESS_MMU_ERROR:
break;
default:
n2->npc = npc + 4;
return npc;
}
}
i->tte = n2->fail_tte;
// For now, the UNSUPPORTED_PAGE_SIZE trap is the only trap that can
// happen during wrasi. We use a backdoor, e.g. raise the INVALID_ASI
// trap and detect here that an other trap should be raised. ToDo cleanup proper
if (n2->trap_dae_inv_asi != SS_Trap::RESERVED)
{
tt = n2->trap_dae_inv_asi;
n2->trap_dae_inv_asi = SS_Trap::RESERVED;
}
switch (tt)
{
case SS_Trap::DAE_INVALID_ASI:
case SS_Trap::DAE_PRIVILEGE_VIOLATION:
case SS_Trap::MEM_ADDRESS_NOT_ALIGNED:
case SS_Trap::LDDF_MEM_ADDRESS_NOT_ALIGNED:
case SS_Trap::STDF_MEM_ADDRESS_NOT_ALIGNED:
va &= n2->mask_pstate_am;
case SS_Trap::DAE_NC_PAGE:
case SS_Trap::DAE_NFO_PAGE:
case SS_Trap::DAE_SO_PAGE:
case SS_Trap::MEM_ADDRESS_RANGE:
case SS_Trap::MEM_REAL_RANGE:
case SS_Trap::FAST_DATA_ACCESS_PROTECTION:
//case SS_Trap::PRIVILEGED_ACTION: ... stxa 58/30 no DSFAR ? ToDo is this right as the PRM says we should update
case SS_Trap::VA_WATCHPOINT:
case SS_Trap::PA_WATCHPOINT:
n2->data_sfar = va_signext(va);
break;
default:
break;
}
if (n2->trc_hook)
n2->trc_hook->data_trap(va);
return (n2->trap)(pc,npc,n2,i,tt);
}
/*}}}*/
SS_Tte* N2_Strand::n2_inst_htw( SS_Vaddr va, SS_Context ctxt0, SS_Context ctxt1 )/*{{{*/
{
SS_TsbConfig* tsb_cfg = ctxt0 ? &tsb_config[4] : &tsb_config[0];
bool htw_enabled = false;
tw_status = 1;
inst_tag_update(ctxt0,va);
for (int n=4; n--; tsb_cfg++)
{
if (!tsb_cfg->is_valid())
continue;
htw_enabled = true;
SS_Paddr tsb_addr = tsb_cfg->index(va);
uint64_t tte_tag_data[2];
#ifdef MEMORY_MSYNC
SS_MsyncMemory* msync_mem = (SS_MsyncMemory*)memory;
msync_mem->msync_info(this->strand_id(),tsb_addr,SS_Memory::HTW);
#endif
memory->ld128atomic(tsb_addr,tte_tag_data);
tsb_tte_tag = tte_tag_data[0];
tsb_tte_data = tte_tag_data[1];
if (trc_hook)
trc_hook->hwop(SS_Tracer::LD_CODE, tsb_addr, 16, tte_tag_data);
SS_Vaddr vpn_mask = ~((SS_Vaddr(1) << (tsb_tte_data.size() * 3 + 13)) - SS_Vaddr(1));
if (tsb_tte_data.v()
&& (tsb_tte_tag.reserved0() == 0) && (tsb_tte_tag.reserved1() == 0)
&& tsb_cfg->match(va & vpn_mask,(tsb_tte_tag.va() << 22) & vpn_mask)
&& (tsb_cfg->get_page_size() <= tsb_tte_data.size()) && (tsb_tte_data.size() < 8))
{
bool ok;
if (ctxt0 == 0)
ok = tsb_tte_tag.context() == 0;
else if (tsb_cfg->use_context())
{
if (tsb_cfg->use_context_0())
tsb_tte_tag.context(ctxt0);
else
tsb_tte_tag.context(ctxt1);
ok = true;
}
else
ok = tsb_tte_tag.context() == ctxt0;
if (ok)
{
N2_Tlb* tlb;
if (tsb_cfg->is_ra_not_pa())
{
SS_Paddr ra_mask = (SS_Paddr(1) << (tsb_tte_data.size() * 3)) - SS_Paddr(1);
SS_Paddr ra_low = tsb_tte_data.pa() & ~ra_mask;
SS_Paddr ra_high = tsb_tte_data.pa() | ra_mask;
for (uint_t r=0; r < 4; r++)
{
SS_TsbRaToPa* r2p = &tsb_ra2pa[r];
if (r2p->valid && (r2p->rpn_beg <= ra_low) && (ra_high <= r2p->rpn_end))
{
if (!tsb_tte_data.x())
{
trap_htw = SS_Trap::IAE_UNAUTH_ACCESS; // prio 2.9 (3.2 in Sun Sparc)
tw_status = 0;
return 0;
}
tsb_tte_data.pa(r2p->ppn_ofs + ra_low);
tsb_tte_data.pa_zero_ext(0);
#ifdef COMPILE_FOR_COSIM
(inst_tlb_write)(tlb_sync);
#endif
tlb = (N2_Tlb*)inst_tlb;
SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
tlb_entry = -1;
tw_status = 0;
return tte;
}
}
if (!tsb_tte_data.x())
trap_htw = SS_Trap::IAE_UNAUTH_ACCESS; // Prio 2.9
else
trap_htw = SS_Trap::INSTRUCTION_INVALID_TSB_ENTRY; // Prio 2.9 (2.10 in Sun Sparc)
tw_status = 0;
return 0;
}
else if (!tsb_tte_data.x())
{
trap_htw = SS_Trap::IAE_UNAUTH_ACCESS;
tw_status = 0;
return 0;
}
else
{
tsb_tte_data.pa_zero_ext(0);
#ifdef COMPILE_FOR_COSIM
(inst_tlb_write)(tlb_sync);
#endif
tlb = (N2_Tlb*)inst_tlb;
SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
tlb_entry = -1;
tw_status = 0;
return tte;
}
}
}
}
// Check if one of the 8 tsb config is enabled. If so then hardware
// table walk is considered enabled.
tsb_cfg = ctxt0 ? &tsb_config[0] : &tsb_config[4];
for (int n=4; n--; tsb_cfg++)
{
if (tsb_cfg->is_valid())
{
htw_enabled = true;
break;
}
}
if (htw_enabled)
trap_htw = SS_Trap::INSTRUCTION_ACCESS_MMU_MISS;
else
trap_htw = SS_Trap::FAST_INSTRUCTION_ACCESS_MMU_MISS;
tw_status = 0;
return 0;
}
/*}}}*/
SS_Tte* N2_Strand::n2_data_htw( SS_Vaddr va, SS_Context ctxt0, SS_Context ctxt1 )/*{{{*/
{
SS_TsbConfig* tsb_cfg = ctxt0 ? &tsb_config[4] : &tsb_config[0];
bool htw_enabled = false;
tw_status = 1;
data_tag_update(ctxt0,va);
for (int n=4; n--; tsb_cfg++)
{
if (!tsb_cfg->is_valid())
continue;
htw_enabled = true;
SS_Paddr tsb_addr = tsb_cfg->index(va);
uint64_t tte_tag_data[2];
#ifdef MEMORY_MSYNC
SS_MsyncMemory* msync_mem = (SS_MsyncMemory*)memory;
msync_mem->msync_info(this->strand_id(),tsb_addr,SS_Memory::HTW);
#endif
memory->ld128atomic(tsb_addr,tte_tag_data);
tsb_tte_tag = tte_tag_data[0];
tsb_tte_data = tte_tag_data[1];
if (trc_hook)
trc_hook->hwop(SS_Tracer::LD_DATA, tsb_addr, 16, tte_tag_data);
SS_Vaddr vpn_mask = ~((SS_Vaddr(1) << (tsb_tte_data.size() * 3 + 13)) - SS_Vaddr(1));
if (tsb_tte_data.v()
&& (tsb_tte_tag.reserved0() == 0) && (tsb_tte_tag.reserved1() == 0)
&& tsb_cfg->match(va & vpn_mask,(tsb_tte_tag.va() << 22) & vpn_mask)
&& (tsb_cfg->get_page_size() <= tsb_tte_data.size()) && (tsb_tte_data.size() < 8))
{
bool ok;
if (ctxt0 == 0)
ok = tsb_tte_tag.context() == 0;
else if (tsb_cfg->use_context())
{
if (tsb_cfg->use_context_0())
tsb_tte_tag.context(ctxt0);
else
tsb_tte_tag.context(ctxt1);
ok = true;
}
else
ok = tsb_tte_tag.context() == ctxt0;
if (ok)
{
N2_Tlb* tlb;
if (tsb_cfg->is_ra_not_pa())
{
SS_Paddr ra_mask = (SS_Paddr(1) << (tsb_tte_data.size() * 3)) - SS_Paddr(1);
SS_Paddr ra_low = tsb_tte_data.pa() & ~ra_mask;
SS_Paddr ra_high = tsb_tte_data.pa() | ra_mask;
for (uint_t r=0; r < 4; r++)
{
SS_TsbRaToPa* r2p = &tsb_ra2pa[r];
if (r2p->valid && (r2p->rpn_beg <= ra_low) && (ra_high <= r2p->rpn_end))
{
tsb_tte_data.pa(r2p->ppn_ofs + ra_low);
tsb_tte_data.pa_zero_ext(0);
#ifdef COMPILE_FOR_COSIM
(data_tlb_write)(tlb_sync);
#endif
tlb = (N2_Tlb*)data_tlb;
SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
tlb_entry = -1;
tw_status = 0;
return tte;
}
}
trap_htw = SS_Trap::DATA_INVALID_TSB_ENTRY;
tw_status = 0;
return 0;
}
else
{
tsb_tte_data.pa_zero_ext(0);
#ifdef COMPILE_FOR_COSIM
(data_tlb_write)(tlb_sync);
#endif
tlb = (N2_Tlb*)data_tlb;
SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
tlb_entry = -1;
tw_status = 0;
return tte;
}
}
}
}
tsb_cfg = ctxt0 ? &tsb_config[0] : &tsb_config[4];
for (int n=4; n--; tsb_cfg++)
{
if (tsb_cfg->is_valid())
{
htw_enabled = true;
break;
}
}
if (htw_enabled)
trap_htw = SS_Trap::DATA_ACCESS_MMU_MISS;
else
trap_htw = SS_Trap::FAST_DATA_ACCESS_MMU_MISS;
tw_status = 0;
return 0;
}
/*}}}*/
#ifdef COMPILE_FOR_COSIM
SS_Trap::Type N2_Strand::n2_inst_hwtw( SS_Strand* strand, SS_Vaddr va, int_t entry )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)strand;
SS_Tte* tte;
n2->tlb_entry = entry;
n2->trap_htw = SS_Trap::RESERVED;
if (n2->tl() == 0)
tte = n2->n2_inst_htw(va_signext(va),n2->primary_context[0](),n2->primary_context[1]());
else
tte = n2->n2_inst_htw(va_signext(va),0,0);
return n2->trap_htw;
}
/*}}}*/
SS_Trap::Type N2_Strand::n2_data_hwtw( SS_Strand* strand, SS_Vaddr va, uint8_t asi, int_t entry )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)strand;
SS_Tte* tte;
SS_AsiInfo asi_info = n2->asi_info[asi];
n2->tlb_entry = entry;
n2->trap_htw = SS_Trap::RESERVED;
if (asi_info.is_primary())
tte = n2->n2_data_htw(va_signext(va),n2->primary_context[0](),n2->primary_context[1]());
else if (asi_info.is_secondary())
tte = n2->n2_data_htw(va_signext(va),n2->secondary_context[0](),n2->secondary_context[1]());
else
tte = n2->n2_data_htw(va_signext(va),0,0);
return n2->trap_htw;
}
/*}}}*/
#endif
SS_AsiSpace::Error N2_Strand::n2_lsu_ctr_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
N2_LsuCtr* lc = (N2_LsuCtr*)_reg;
(*lc) = data;
// Grab the inst and data mmu enable bits and store in the common
// enable flag for use in sim_update.
s->sim_state.inst_mmu(s->lsu_ctr.im());
s->sim_state.data_mmu(s->lsu_ctr.dm());
// Watchpoint address is always 8 bytes aligned so an offset value
// of 1 causes mismatch (= disabled), use that, e.g bit0=0 is enable,
// bit0=1 is disable.
n2->data_wp_va_addr |= 1;
n2->data_wp_pa_addr |= 1;
n2->data_wp_check = false;
n2->data_wp_bytes = lc->bm();
n2->data_wp_flags = (lc->re() ? SS_Instr::READ : 0) | (lc->we() ? SS_Instr::WRITE : 0);
if ((lc->bm() == 0) || (lc->mode() < 2))
{
n2->data_wp_va_addr |= SS_Vaddr(1);
n2->data_wp_pa_addr |= SS_Paddr(1);
}
else if (lc->mode() == 2)
{
n2->data_wp_va_addr |= SS_Vaddr(1);
n2->data_wp_pa_addr &= ~SS_Paddr(1);
n2->data_wp_check = true;
}
else
{
n2->data_wp_va_addr &= ~SS_Vaddr(1);
n2->data_wp_pa_addr |= SS_Paddr(1);
n2->data_wp_check = true;
}
// Propagete the sim_state changes.
(s->sim_update)(s);
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::data_wp_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
n2->data_wp.set(va_signext(data));
// Set new the watchpoint addresses and add enable bit0=0 or disable bit0=1
n2->data_wp_va_addr = (n2->data_wp.va() << 3) | (n2->data_wp_va_addr & SS_Vaddr(1));
n2->data_wp_pa_addr = (n2->data_wp.pa() << 3) | (n2->data_wp_pa_addr & SS_Paddr(1));
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tsb_config_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
N2_TsbConfig* tc = (N2_TsbConfig*)_reg;
SS_TsbConfig* tsb_cfg;
uint64_t old_data = (*tc)();
(*tc) = data;
switch (tc->page_size())
{
case 0:
case 1:
case 3:
case 5:
break;
default:
(*tc) = old_data;
n2->trap_dae_inv_asi = SS_Trap::UNSUPPORTED_PAGE_SIZE;
return SS_AsiSpace::NO_ASI;
}
n2->tsb_config[(va - 0x10) >> 3].update(tc->valid(),
tc->tsb_base() << 13,
tc->tsb_size() + 9,
tc->page_size() * 3 + 13,
tc->ra_not_pa(),
tc->use_context());
tsb_cfg = n2->inst_tag_access.context() ? &n2->tsb_config[4] : n2->tsb_config;
for (int i=0; i < 4; i++)
n2->inst_tsb_pointer[i] = tsb_cfg[i].index(n2->inst_tag_access.va() << 13);
tsb_cfg = n2->data_tag_access.context() ? &n2->tsb_config[4] : n2->tsb_config;
for (int i=0; i < 4; i++)
n2->data_tsb_pointer[i] = tsb_cfg[i].index(n2->data_tag_access.va() << 13);
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tsb_ra2pa_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_AsiCtrReg* cr = (SS_AsiCtrReg*)_reg;
cr->set(data);
uint_t r = ((va - 8) & 0x18) >> 3;
n2->tsb_ra2pa[r].update(n2->real_range[r].enable(),
n2->real_range[r].rpn_low(),
n2->real_range[r].rpn_high(),
n2->physical_offset[r].ptv());
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tag_access_st64( SS_Node* , void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
data = va_signext(data);
if (_reg == &n2->inst_tag_access)
{
n2->inst_tag_access = data;
n2->inst_tag_update(n2->inst_tag_access.context(),n2->inst_tag_access.va() << 13);
}
else
{
n2->data_tag_access = data;
n2->data_tag_update(n2->data_tag_access.context(),n2->data_tag_access.va() << 13);
}
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tlb_data_in_st64( SS_Node* d_or_i, void* r_or_v, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
N2_TagAccess* tag_access = d_or_i ? &n2->data_tag_access : &n2->inst_tag_access;
n2->tsb_tte_tag.context(tag_access->context());
n2->tsb_tte_tag.va(tag_access->va() >> (22 - 13));
n2->tsb_tte_tag.reserved0(0);
n2->tsb_tte_tag.reserved1(0);
n2->tsb_tte_data = data;
n2->tsb_tte_data.pa_zero_ext(0);
switch (n2->tsb_tte_data.size())
{
case 0:
case 1:
case 3:
case 5:
break;
default:
n2->trap_dae_inv_asi = SS_Trap::UNSUPPORTED_PAGE_SIZE;
return SS_AsiSpace::NO_ASI;
}
N2_Tlb* tlb;
int idx = -1;
if (d_or_i)
{
#ifdef COMPILE_FOR_COSIM
idx = (n2->data_tlb_write)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->data_tlb;
}
else
{
#ifdef COMPILE_FOR_COSIM
idx = (n2->inst_tlb_write)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->inst_tlb;
// N2 has X (or EP bit) reserved as it has a dedicated ITLB. For completeness
// and uniformity accress platforms we force the x bit to one.
n2->tsb_tte_data.x(1);
}
tlb->insert_tsb_tte(n2,n2->partition_id(),n2->tsb_tte_tag(),n2->tsb_tte_data(),tag_access->va() << 13,idx,r_or_v != 0);
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tlb_data_access_ld64( SS_Node* d_or_i, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
{
N2_TlbIndex index;
N2_Strand* n2 = (N2_Strand*)s;
index = va;
N2_Tlb* tlb;
if (d_or_i)
{
#ifdef COMPILE_FOR_COSIM
(n2->data_tlb_lookup)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->data_tlb;
}
else
{
#ifdef COMPILE_FOR_COSIM
(n2->inst_tlb_lookup)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->inst_tlb;
}
if (index.index() < tlb->size())
{
SS_Tte* tte = tlb->get(index.index() & (tlb->size() - 1));
// A diagnostic ASI access to ASI_ITLB_DATA_ACCESS_REG is supposed to calculate
// and set the data parity (irrespective of mode)
uint64_t dp = tte->nfo();
dp ^= tte->taddr();
dp ^= tte->ie();
dp ^= tte->e();
dp ^= tte->cp();
dp ^= tte->p();
dp ^= tte->w();
dp ^= tte->page_size();
int data_parity = BL_BitUtility::calc_parity(dp);
// The parity obtained in the above step is calculated parity. It does not
// reflect if an error was injected or not. To set this correct, the
// calculated parity needs to be xor'ed with the injection mask
data_parity ^= tte->data_parity_error();
n2->tsb_tte_data.size(tte->page_size());
n2->tsb_tte_data.sw0(0);
n2->tsb_tte_data.w(tte->w());
n2->tsb_tte_data.x(1);
n2->tsb_tte_data.p(tte->p());
n2->tsb_tte_data.cv(0);
n2->tsb_tte_data.cp(tte->cp());
n2->tsb_tte_data.e(tte->e());
n2->tsb_tte_data.ie(tte->ie());
n2->tsb_tte_data.pa(tte->taddr() >> 13);
// Per N2 PRM v1.2 (Table 12-2), bits 61:56 of TTE
// is defined as the 'soft' field.
// (In VONK for some reason, this field has been named as sw1)
// Since, the data parity is stored in the most significant bit
// (bit 61) of the soft field, we need to left shift the data
// parity by 5 positions
n2->tsb_tte_data.sw1(data_parity << (N2_TsbTteData::WIDTH_SW1 - 1));
n2->tsb_tte_data.nfo(tte->nfo());
n2->tsb_tte_data.v(tte->valid_bit());
*data = n2->tsb_tte_data();
}
else
{
*data = 0;
}
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tlb_data_access_st64( SS_Node* d_or_i, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_TlbIndex index;
N2_Strand* n2 = (N2_Strand*)s;
index = va;
N2_TagAccess* tag_access = d_or_i ? &n2->data_tag_access : &n2->inst_tag_access;
n2->tsb_tte_tag.context(tag_access->context());
n2->tsb_tte_tag.va(tag_access->va() >> (22 - 13));
n2->tsb_tte_tag.reserved0(0);
n2->tsb_tte_tag.reserved1(0);
n2->tsb_tte_data = data;
n2->tsb_tte_data.pa_zero_ext(0);
switch (n2->tsb_tte_data.size())
{
case 0:
case 1:
case 3:
case 5:
break;
default:
n2->trap_dae_inv_asi = SS_Trap::UNSUPPORTED_PAGE_SIZE;
return SS_AsiSpace::NO_ASI;
}
N2_Tlb* tlb;
if (d_or_i)
{
#ifdef COMPILE_FOR_COSIM
(n2->data_tlb_write)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->data_tlb;
}
else
{
#ifdef COMPILE_FOR_COSIM
(n2->inst_tlb_write)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->inst_tlb;
// N2 has X (or EP bit) reserved as it has a dedicated ITLB. For completeness
// and uniformity accress platforms we force the x bit to one.
n2->tsb_tte_data.x(1);
}
tlb->insert_tsb_tte(n2,n2->partition_id(),n2->tsb_tte_tag(),n2->tsb_tte_data(),
tag_access->va() << 13,index.index(),index.flag() != 0);
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tlb_tag_read_ld64( SS_Node* d_or_i, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
{
N2_TagRead tag_read;
N2_TlbIndex index;
N2_Strand* n2 = (N2_Strand*)s;
index = va;
N2_Tlb* tlb;
if (d_or_i)
{
#ifdef COMPILE_FOR_COSIM
(n2->data_tlb_lookup)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->data_tlb;
}
else
{
#ifdef COMPILE_FOR_COSIM
(n2->inst_tlb_lookup)(n2->tlb_sync);
#endif
tlb = (N2_Tlb*)n2->inst_tlb;
}
SS_Tte* tte = tlb->get(index.index() & (tlb->size() - 1));
// A diagnostic ASI access to ASI_ITLB_TAG_READ_REG is supposed to calculate
// and set the tag's parity (irrespective of mode)
uint64_t tp = tte->pid();
tp ^= tte->real_bit();
tp ^= tte->tag();
tp ^= tte->context();
int tag_parity = BL_BitUtility::calc_parity(tp);
// The parity obtained in the above step is calculated parity. It does not
// reflect if an error was injected or not. To set this correct, the
// calculated parity needs to be xor'ed with the injection mask
tag_parity ^= tte->tag_parity_error();
tag_read.pid(tte->pid());
tag_read.parity(tag_parity);
tag_read.real(tte->real_bit());
tag_read.used(0);
tag_read.va_ra(tte->tag() >> 13);
tag_read.context(tte->context());
*data = tag_read();
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::inst_tlb_demap_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
uint_t context;
N2_Demap demap;
demap = va_signext(va);
switch (demap.context())
{
case 0:
context = n2->primary_context[0]();
n2->inst_tag_update(context,va);
break;
case 2:
context = 0;
n2->inst_tag_update(0,va);
break;
default:
if (demap.type() < 2)
return SS_AsiSpace::OK;
n2->inst_tag_update(0,va);
}
#ifdef COMPILE_FOR_COSIM
(n2->inst_tlb_write)(n2->tlb_sync);
#endif
N2_Tlb* tlb = (N2_Tlb*)n2->inst_tlb;
switch (demap.type())
{
case 0:
if (demap.real())
tlb->demap_real(n2,n2->partition_id(),demap.va() << 13);
else
tlb->demap_virt(n2,n2->partition_id(),context,demap.va() << 13);
break;
case 1:
tlb->demap_virt(n2,n2->partition_id(),context);
break;
case 2:
tlb->demap_all(n2,n2->partition_id());
break;
case 3:
if (demap.real())
tlb->demap_real(n2,n2->partition_id());
else
tlb->demap_virt(n2,n2->partition_id());
break;
}
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::data_tlb_demap_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
uint_t context;
N2_Demap demap;
demap = va_signext(va);
switch (demap.context())
{
case 0:
context = n2->primary_context[0]();
n2->data_tag_update(context,va);
break;
case 1:
context = n2->secondary_context[0]();
n2->data_tag_update(0,va);
break;
case 2:
context = 0;
n2->data_tag_update(0,va);
break;
default:
if (demap.type() < 2)
return SS_AsiSpace::OK;
n2->data_tag_update(0,va);
}
#ifdef COMPILE_FOR_COSIM
(n2->data_tlb_write)(n2->tlb_sync);
#endif
N2_Tlb* tlb = (N2_Tlb*)n2->data_tlb;
switch (demap.type())
{
case 0:
if (demap.real())
tlb->demap_real(n2,n2->partition_id(),demap.va() << 13);
else
tlb->demap_virt(n2,n2->partition_id(),context,demap.va() << 13);
break;
case 1:
tlb->demap_virt(n2,n2->partition_id(),context);
break;
case 2:
tlb->demap_all(n2,n2->partition_id());
break;
case 3:
if (demap.real())
tlb->demap_real(n2,n2->partition_id());
else
tlb->demap_virt(n2,n2->partition_id());
break;
}
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::partition_id_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
n2->partition_id.set(data);
n2->inst_ctx_ra.set_pid(n2->partition_id());
n2->inst_ctx_va.set_pid(n2->partition_id());
n2->data_ctx.set_pid(n2->partition_id());
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::pri_ctx_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
if ((va & 0x100) == 0)
{
n2->primary_context[0].set(data);
n2->inst_ctx_va.set_pri_ctx0(n2->primary_context[0]());
n2->data_ctx.set_pri_ctx0(n2->primary_context[0]());
}
n2->primary_context[1].set(data);
n2->inst_ctx_va.set_pri_ctx1(n2->primary_context[1]());
n2->data_ctx.set_pri_ctx1(n2->primary_context[1]());
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::sec_ctx_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
if ((va & 0x100) == 0)
{
n2->secondary_context[0].set(data);
n2->data_ctx.set_sec_ctx0(n2->secondary_context[0]());
}
n2->secondary_context[1].set(data);
n2->data_ctx.set_sec_ctx1(n2->secondary_context[1]());
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::inst_tlb_probe_ld64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr addr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_Tte* tte;
bool tte_multi_hit;
N2_ItlbProbeAddr itlb_addr;
N2_ItlbProbeData itlb_data;
itlb_addr = addr;
SS_Vaddr va = va_signext((itlb_addr.va() << 13) & n2->mask_pstate_am);
#ifdef COMPILE_FOR_COSIM
(n2->inst_tlb_lookup)(n2->tlb_sync);
#endif
N2_Tlb* tlb = (N2_Tlb*)n2->inst_tlb;
if (itlb_addr.real())
{
tte = tlb->lookup_ra2pa(s,va,n2->partition_id(),&tte_multi_hit);
}
else
{
uint64_t ctxt0, ctxt1;
if (n2->tl() == 0)
{
ctxt0 = n2->primary_context[0]();
ctxt1 = n2->primary_context[1]();
}
else
{
ctxt0 = 0;
ctxt1 = 0;
}
tte = tlb->lookup_va2pa(s,va,ctxt0,n2->partition_id(),&tte_multi_hit);
if (tte == 0)
tte = tlb->lookup_va2pa(s,va,ctxt1,n2->partition_id(),&tte_multi_hit);
// ToDo. Do we deal with multi hit detection here?
}
if (tte)
{
itlb_data.v(1);
itlb_data.mh(0);
itlb_data.tp(0);
itlb_data.dp(0);
itlb_data.pa(tte->trans(va) >> 13);
}
else
{
itlb_data = 0;
}
*data = itlb_data();
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::intr_queue_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
switch (va)
{
case 0x3c0:
n2->cpu_mondo_head = data;
break;
case 0x3c8:
if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
return SS_AsiSpace::NO_WRITE;
n2->cpu_mondo_tail = data;
break;
case 0x3d0:
n2->dev_mondo_head = data;
break;
case 0x3d8:
if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
return SS_AsiSpace::NO_WRITE;
n2->dev_mondo_tail = data;
break;
case 0x3e0:
n2->resumable_head = data;
break;
case 0x3e8:
if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
return SS_AsiSpace::NO_WRITE;
n2->resumable_tail = data;
break;
case 0x3f0:
n2->non_resumable_head = data;
break;
case 0x3f8:
if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
return SS_AsiSpace::NO_WRITE;
n2->non_resumable_tail = data;
break;
default:
assert(0); // asi mapping error
}
if (n2->cpu_mondo_head.offset() != n2->cpu_mondo_tail.offset())
n2->irq.raise(n2,SS_Interrupt::BIT_CPU_MONDO_TRAP);
else
n2->irq.retract(SS_Interrupt::BIT_CPU_MONDO_TRAP);
if (n2->dev_mondo_head.offset() != n2->dev_mondo_tail.offset())
n2->irq.raise(n2,SS_Interrupt::BIT_DEV_MONDO_TRAP);
else
n2->irq.retract(SS_Interrupt::BIT_DEV_MONDO_TRAP);
if (n2->resumable_head.offset() != n2->resumable_tail.offset())
n2->irq.raise(n2,SS_Interrupt::BIT_RESUMABLE_ERROR);
else
n2->irq.retract(SS_Interrupt::BIT_RESUMABLE_ERROR);
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::intr_queue_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
switch (va)
{
case 0x3c0: *data = n2->cpu_mondo_head(); break;
case 0x3c8: *data = n2->cpu_mondo_tail(); break;
case 0x3d0: *data = n2->dev_mondo_head(); break;
case 0x3d8: *data = n2->dev_mondo_tail(); break;
case 0x3e0: *data = n2->resumable_head(); break;
case 0x3e8: *data = n2->resumable_tail(); break;
case 0x3f0: *data = n2->non_resumable_head(); break;
case 0x3f8: *data = n2->non_resumable_tail(); break;
default: assert(0); // asi mapping error
}
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::intr_recv_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
n2->intr_recv = data & n2->intr_recv;
n2->intr_update();
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::intr_recv_wr64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
n2->intr_recv = data;
n2->intr_update();
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::intr_recv_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
*data = n2->intr_recv;
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::intr_r_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
*data= n2->intr_r();
n2->intr_recv &= ~(uint64_t(1) << n2->intr_r.vector());
n2->intr_update();
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::desr_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
*data= n2->desr();
n2->desr.set_unmasked(0);
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::stb_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
N2_StbAccessAddrFields addr;
addr.set(va);
if (n2->sim_state.ras_enabled() )
{
// calculate index to look into the corresponding entry
*data = n2->stb.get_stb_value(addr());
}
N2_StbAccessDaReg stb_access_da_reg;
N2_StbAccessEccReg stb_access_ecc_reg;
N2_StbAccessCtlReg stb_access_ctl_reg;
N2_StbAccessCamReg stb_access_cam_reg;
switch (addr.field())
{
case N2_StbAccessAddrFields::DATA_FIELD:
stb_access_da_reg.set(*data);
*data = stb_access_da_reg();
break;
case N2_StbAccessAddrFields::ECC_FIELD:
stb_access_ecc_reg.set(*data);
*data = stb_access_ecc_reg();
break;
case N2_StbAccessAddrFields::CNTRL_PARITY_FIELD:
stb_access_ctl_reg.set(*data);
*data = stb_access_ctl_reg();
break;
case N2_StbAccessAddrFields::CAM_FIELD:
stb_access_cam_reg.set(*data);
*data = stb_access_cam_reg();
break;
case N2_StbAccessAddrFields::STB_POINTER_FIELD:
// the value of "current store buffer pointer" is provided
// by test bench through follow-me scheme.
// or not, in RAS mode.
if (n2->sim_state.ras_enabled()) {
*data = n2->stb.get_stb_pointer();
}
break;
default:
// reserved values.
// ---
// This ASI, like other diagnostic array access ASIs is not checked for a
// legal VA. So, using a "reserved" value won't generate an exception, but
// will generate unpredictable data from a software perspective.
// I can tell you that for this specific case, all cases where va[8]==1
// will act the same as va[8:6]==100, which is to return the current store
// buffer pointer.
// ---Mark
//---> per Mark, we should treat the remaining cases as 0x4. testbench is
// expected to provide a follow-me value.
break;
}
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::irf_ecc_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
*data = 0; // For now return 0
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::frf_ecc_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
*data = 0; // For now return 0
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tsa_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
*data = 0; // For now return 0
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::mra_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
*data = 0; // For now return 0
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tick_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
N2_TickAccess ta;
ta = va;
// 1 - Return Data , 0 - Return ECC
if (ta.data_np())
{
switch (ta.index())
{
case 0:
*data = n2->tick_cmpr(); break;
case 1:
*data = n2->stick_cmpr(); break;
case 2:
*data = n2->hstick_cmpr(); break;
default:
assert(0);
}
}
else
{
if (s->sim_state.ras_enabled())
{
BL_EccBits ecc_obj;
ecc_obj = n2->tick_cmpr_array_ecc[ta.index()];
*data = ecc_obj.get();
}
else
*data = 0;
}
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::tw_control_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
n2->tw_control.set(data);
n2->core.tw_status.lock();
if (n2->tw_control.stp())
n2->core.tw_status.stp(n2->core.tw_status.stp() | (1 << (n2->strand_id() & 7)));
else
n2->core.tw_status.stp(n2->core.tw_status.stp() &~ (1 << (n2->strand_id() & 7)));
n2->core.tw_status.unlock();
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::n2_scratchpad_ld64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
{
assert(va < 64);
uint64_t* reg = (uint64_t*)_reg;
N2_Strand* n2 = (N2_Strand*)s;
// If RAS enabled and appropriate error detection flags are set
// determine if there are any errors (injected or otherwise)
if (n2->sim_state.ras_enabled() )
{
N2_Core& n2_core = n2->core;
BL_EccBits ecc_obj = n2->sp_ecc[(va >> 3) & 7];
// Check if the ecc associated with this register is a valid ecc
if (ecc_obj.valid())
{
// Syndrome is the difference between the stored and calculated ECC values
BL_Hamming_64_8_Synd syndrome = BL_Hamming_64_8_Synd(reg[(va >> 3) & 7],ecc_obj);
bool updateDsfar = false;
// Errors are recorded only if the PSCCE bit is set in the SETER
if (n2->seter.pscce())
{
// Correctable errors are detected only SCAC bit in CERER is set
if (n2_core.cerer.scac())
{
if (syndrome.isSingleBitError())
{
n2->data_sfsr.error_type(N2_DataSfsr::SCAC);
updateDsfar = true;
}
}
// Uncorrectable errors are detected only if SCAU bit in CERER is set
else if (n2_core.cerer.scau())
{
if (syndrome.isDoubleBitError() || syndrome.isMultipleBitError())
{
n2->data_sfsr.error_type(N2_DataSfsr::SCAU);
updateDsfar = true;
}
}
if (updateDsfar)
{
/*unsigned long long native_add = 0;
unsigned long long intermediate_err_add = (native_add & ~0x7f8ULL) |
syndrome.getSyndrome() << 3;
unsigned long long error_add = (intermediate_err_add & ~0x7ULL) |
((va >> 3) & 7);
n2->data_sfar.error_addr(error_add);*/
unsigned long long error_add = 0;
// Capture the syndrome and array index in DSFAR
// Store the syndrome in bits 3 thru 10 of DSFAR
error_add = BL_BitUtility::set_subfield(error_add,syndrome.getSyndrome(),3,10);
// Store the scratchpad array index in bits 0 thru 2
error_add = BL_BitUtility::set_subfield(error_add,((va >> 3) & 7),0,2);
n2->data_sfar.error_addr(error_add);
return SS_AsiSpace::TRAP_IPE;
}
}
}
}
*data = reg[(va >> 3) & 7];
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::n2_scratchpad_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
{
assert(va < 64);
N2_Strand* n2 = (N2_Strand*)s;
// If RAS enabled and appropriate error injection flags are set
// Then inject error in check bits
if (n2->sim_state.ras_enabled() )
{
N2_Core& n2_core = n2->core;
BL_EccBits ecc_obj = BL_Hamming_64_8_Synd::calc_check_bits(data);
unsigned ecc = ecc_obj.get();
// Check if ENB and SCAU bits are set in N2 Error Injection Register
if ((n2_core.error_inject.ene() == 1) && (n2_core.error_inject.scau() == 1))
ecc ^= n2_core.error_inject.eccmask();
// Set back the corrputed ecc
ecc_obj.set(ecc);
n2->sp_ecc[(va >> 3) & 7] = ecc_obj;
}
uint64_t* reg = (uint64_t*)_reg;
reg[(va >> 3) & 7] = data;
return SS_AsiSpace::OK;
}
/*}}}*/
SS_AsiSpace::Error N2_Strand::scratchpad_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
N2_ScratchpadAccess spa;
spa = va;
// 1 - Return Data , 0 - Return ECC
if (spa.data_np())
*data = n2->scratchpad[spa.index()];
else
{
BL_EccBits ecc_obj(0);
if(s->sim_state.ras_enabled())
ecc_obj = n2->sp_ecc[spa.index()];
*data = ecc_obj.get();
}
return SS_AsiSpace::OK;
}
/*}}}*/
/*static*/ void N2_Strand::n2_run_perf( SS_Strand* s, Sam::Vcpu::perfcntr which, int64_t incr )/*{{{*/
//
// run_perf() updates a pic value (pic0, or pic1) and sets the
// overflow bit accordingly, if trap on overflow are enabled (pcr.toe)
// and if overflow did occur it raises the appropriate trap.
//
{
N2_Strand* n2 = (N2_Strand*)s;
if ((n2->pcr.st() && n2->pstate.priv())
|| (n2->pcr.ut() && ! n2->pstate.priv())
|| (n2->pcr.ht() && n2->hpstate.hpriv()))
{
switch (which)
{
case Sam::Vcpu::PIC0:
{
uint64_t tmp = n2->pic.l() + incr;
if (tmp > 0xffffffffull)
{
n2->pcr.ov0(1);
if (n2->pcr.toe() & 0x1) /* bit-4 for pic0 ??? */
{
n2->irq.raise(s,SS_Interrupt::BIT_INTERRUPT_LEVEL_15);
}
}
n2->pic.l(tmp);
}
break;
case Sam::Vcpu::PIC1:
{
uint64_t tmp = n2->pic.h() + incr;
if (tmp > 0xffffffffull)
{
n2->pcr.ov1(1);
if (n2->pcr.toe() & 0x2) /* bit-5 for pic1 ??? */
{
n2->irq.raise(s,SS_Interrupt::BIT_INTERRUPT_LEVEL_15);
}
}
n2->pic.h(tmp);
}
break;
}
}
}
/*}}}*/
void N2_Strand::n2_internal_interrupt( SS_Strand* s, uint_t vector, bool raise )/*{{{*/
{
// Called by N2_Cpu when asi 0x73 (intr_w) is assigned,
// e.g. whenever one strand cross calls another strand
N2_Strand* n2 = (N2_Strand*)s;
assert(!n2->sim_state.cosim());
if (raise)
n2->intr_recv |= (uint64_t(1) << vector);
else
n2->intr_recv &= ~(uint64_t(1) << vector);
n2->intr_update();
}
/*}}}*/
void N2_Strand::intr_update()/*{{{*/
{
intr_r.vector(0);
if (intr_recv)
{
for (int l=63; l >= 0; l--)
{
if (intr_recv & (uint64_t(1) << l))
{
intr_r.vector(l);
break;
}
}
irq.raise(this,SS_Interrupt::BIT_INTERRUPT_VECTOR);
}
else
irq.retract(SS_Interrupt::BIT_INTERRUPT_VECTOR);
}
/*}}}*/
Sam::Vcpu::TranslateError N2_Strand::n2_cnv2pa( SS_Strand* s, Sam::Vcpu::TranslateMode mode, SS_Vaddr addr, uint64_t ctx, uint64_t pid, SS_Paddr* pa )/*{{{*/
{
N2_Strand* n2 = (N2_Strand*)s;
SS_Tte* tte = 0;
bool tte_multi_hit;
switch (mode)
{
case Sam::Vcpu::TRANSLATE_VA_TO_PA:
ctx = n2->primary_context[0]();
// fall through
case Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX:
pid = n2->partition_id();
// fall through
case Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX_PID:
tte = ((N2_Tlb*)n2->inst_tlb)->lookup_va2pa(s,addr,ctx,pid,&tte_multi_hit);
if (tte == 0)
tte = ((N2_Tlb*)n2->data_tlb)->lookup_va2pa(s,addr,ctx,pid,&tte_multi_hit);
break;
case Sam::Vcpu::TRANSLATE_RA_TO_PA:
pid = n2->partition_id();
// fall through
case Sam::Vcpu::TRANSLATE_RA_TO_PA_PID:
tte = ((N2_Tlb*)n2->inst_tlb)->lookup_ra2pa(s,addr,pid,&tte_multi_hit);
if (tte == 0)
tte = ((N2_Tlb*)n2->data_tlb)->lookup_ra2pa(s,addr,pid,&tte_multi_hit);
break;
case Sam::Vcpu::TRANSLATE_PA_TO_PA:
if ((addr >> (n2->pa_bits() - 1)) & 1)
tte = n2->phys_tte_io;
else
tte = n2->phys_tte_mem;
break;
default:
assert(0);
// fall out
}
if (tte)
{
*pa = tte->trans(addr);
return Sam::Vcpu::TRANSLATE_OK;
}
else
return Sam::Vcpu::TRANSLATE_NO_TTE_FOUND;
}
/*}}}*/
void N2_Strand::warm_reset(bool intp)/*{{{*/
{
// because WMR/DBR is not exactly the same as POR, cannot call
// ss_trap(POWER_ON_RESET) directly.
//ss_trap(0,0,this,0,SS_Trap::POWER_ON_RESET);
//TODO if there are other registers that should be updated by wmr/dbr,
// but are not part of trap handling, then do it here.
lsu_ctr = 0;
seter = 0;
// the warm_reset() is invoked by, at least, two places, one is reset_gen,
// where a RESET_GEN_WMR should be triggered, another is cosim pli-command
// "INTP 00 00", which signals a system-wide warm_reset, a RESET_GEN_WMR
// trap should bot be triggered, as there will be an "INTP xx 01" followed,
// one for each strand, where RESET_GEN_WMR will be invoked.
if (intp)
irq_launch(SS_Trap::RESET_GEN_WMR);
return;
}
/*}}}*/
void N2_Strand::n2_external_interrupt( SS_Strand* s, uint64_t *payload, bool raise )/*{{{*/
{
// Called by external source such as NIU to signal
// a device interrupt to the strand. The N2 model is
// always raise the trap. It never sends retracts;
// we take care of that ourselves.
N2_Strand* n2 = (N2_Strand*)s;
assert(!n2->sim_state.cosim());
// Can not use s->msg.make_signal here because the strand is the
// receiver of the signal, not the creator.
SS_Signal *sgn = SS_Signal::alloc(SS_Signal::EXTERNAL_INTERRUPT);
sgn->irq_type = payload[7];
sgn->irq_raise = true;
n2->post_signal(sgn);
}
/*}}}*/
void N2_Strand::n2_ras_enable( SS_Strand* s, char* )/*{{{*/
{
// Ras enable set from frontend
N2_Strand* n2 = (N2_Strand*)s;
n2->exe_table = n2->trc_exe_table;
n2->mem_table = n2->mem_ras_table;
n2->sim_state.ras_enabled(1);
assert(((N2_Model*)n2->model)->ck_memory);
s->memory = ((N2_Model*)n2->model)->ck_memory;
if (n2->mem_err_detector.memory == NULL)
n2->mem_err_detector.memory = s->memory;
s->flush_tte_all(); // bounce the decode caches
}
/*}}}*/
// fill_store_buffer_mem() adds the write transactions contained in a
// MemoryTransaction to the store buffer. It handles both large and small
// transactions by breaking large transactions in to doubleword chunks
// and mapping 4, 2, and 1 byte transactions to the correct "byte mark".
SS_Trap::Type N2_Strand::fill_store_buffer_mem(const MemoryTransaction &memXact)/*{{{*/
{
if (!memXact.writeXact())
{
fprintf(stderr,"N2_Strand::fillStoreBufferMem"
"bad xact access: %d", memXact.access());
}
if (memXact.size() >= 8)
{
for (int ndx = 0; ndx < memXact.size()/sizeof(double); ++ndx)
{
return stb.fill_store_buffer(false, memXact.paddr() + sizeof(double) * ndx,
0xff, memXact.getData(ndx));
}
} else
{
uint8_t byteMarks;
switch (memXact.size())
{
case sizeof(int):
byteMarks = 0xf;
break;
case sizeof(short):
byteMarks = 0x3;
break;
case sizeof(char):
byteMarks = 0x1;
break;
default:
fprintf(stderr,"N2_Strand::fill_store_buffer_mem"
"bad xact size: %d", memXact.size());
}
return stb.fill_store_buffer(false, memXact.paddr(), byteMarks, memXact.getData());
}
return SS_Trap::NO_TRAP;
}
/*}}}*/
SS_Trap::Type N2_Strand::fill_store_buffer_asi(uint64_t addr,
uint8_t asi_num,
uint64_t data)/*{{{*/
{
if (asi_num != N2_Asi::ASI_ERROR_INJECT)
{
return stb.fill_store_buffer(true, addr, asi_num, data);
}
return SS_Trap::NO_TRAP;
}
/*}}}*/
SS_Trap::Type N2_Strand::check_store_buffer_RAWtrap(const MemoryTransaction &memXact)/*{{{*/
{
if (!memXact.readXact())
{
fprintf(stderr,"N2_Strand::check_store_buffer_RAWtrap"
"bad access type: %d", memXact.access());
}
return stb.check_store_buffer_RAWtrap(memXact);
}
/*}}}*/
SS_Trap::Type N2_Strand::flush_store_buffer()/*{{{*/
{
return stb.flush_store_buffer();
}
/*}}}*/