Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / sam-t2 / sam / cpus / vonk / n2 / lib / cpu / src / N2_Strand.cc
CommitLineData
920dae64
AT
1// ========== Copyright Header Begin ==========================================
2//
3// OpenSPARC T2 Processor File: N2_Strand.cc
4// Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
5// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
6//
7// The above named program is free software; you can redistribute it and/or
8// modify it under the terms of the GNU General Public
9// License version 2 as published by the Free Software Foundation.
10//
11// The above named program is distributed in the hope that it will be
12// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14// General Public License for more details.
15//
16// You should have received a copy of the GNU General Public
17// License along with this work; if not, write to the Free Software
18// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19//
20// ========== Copyright Header End ============================================
21
22#include <new>
23#include "N2_Asi.h"
24#include "N2_Registers.h"
25#include "N2_Model.h"
26#include "N2_Cpu.h"
27#include "N2_Core.h"
28#include "N2_Strand.h"
29#include "N2_IrfEcc.h"
30#include "N2_FrfEcc.h"
31#include "N2_Tlb.h"
32#include "BL_BitUtility.h"
33#include "SS_CKMemory.h"
34#include <stdio.h>
35#include <stdlib.h>
36
37enum
38{
39 VA_BITS = 48,
40 PA_BITS = 40
41};
42
43inline SS_Vaddr va_signext( SS_Vaddr va ) /*{{{*/
44// va_signext() sign extends the given virtual address from VA_BITS to 64 bits.
45{
46 return (va << (64 - VA_BITS)) >> (64 - VA_BITS);
47}
48/*}}}*/
49
50inline bool va_inrange( SS_Vaddr va ) /*{{{*/
51//
52// va_inrange() checks if the va is not in the what is called va-hole.
53// N2 only implement 48bit of the 64bit virtual (real) address space.
54// Virtual addresses are signed so this means that the upper 16 bits
55// are checked for proper sign extendion if bit47. So we check that
56//
57// 0x0000000000000000 < va <= 0x00007fffffffffff
58// or 0xffff800000000000 < va <= 0xffffffffffffffff
59{
60 return ((va >> (VA_BITS - 1)) == SS_Vaddr(0))
61 || ((va >> (VA_BITS - 1)) == -SS_Vaddr(1));
62}
63/*}}}*/
64inline bool pc_inrange( SS_Vaddr va ) /*{{{*/
65//
66// pc_inrange() checks the same range as va_inrange() with an additional
67// 0x20 bytes taken of the positive end of the range. So we check that
68//
69// 0x0000000000000000 < va <= 0x00007fffffffffdf
70// or 0xffff800000000000 < va <= 0xffffffffffffffff
71{
72 return (((va + SS_Vaddr(0x20)) >> (VA_BITS - 1)) == SS_Vaddr(0))
73 || (( va >> (VA_BITS - 1)) == -SS_Vaddr(1));
74}
75/*}}}*/
76inline bool pc_onrange( SS_Vaddr va ) /*{{{*/
77//
78// pc_onrange() checks the same range as pc_inrange() but instead with one
79// additional decode cache line (0x40 bytes) taken of the positive end of
80// the range. So we check that
81//
82// 0x0000000000000000 < va <= 0x00007fffffffffbf
83// or 0xffff800000000000 < va <= 0xffffffffffffffff
84{
85 return (((va + SS_Vaddr(SS_InstrCache::LINE_SIZE * 4)) >> (VA_BITS - 1)) == SS_Vaddr(0))
86 || (( va >> (VA_BITS - 1)) == -SS_Vaddr(1));
87}
88/*}}}*/
89
90inline bool pc_iorange( SS_Vaddr pc )/*{{{*/
91{
92 // I/O address ranges 0xff00000000:0xffffffffff and 0xa000000000:0xbfffffffff
93 // are the only I/O regions from which instructions fetches are ok.
94
95 return (((pc >> (PA_BITS - 8)) & 0xff) == 0xff) || (((pc >> (PA_BITS - 3)) & 0x7) == 0x5);
96}
97/*}}}*/
98
99extern "C" SS_Vaddr n2_exe_real_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i )/*{{{*/
100{
101 // First check if we have a UI breakpoint on the little address range
102
103 if (s->test_break_inst_va(pc))
104 return pc;
105
106 N2_Strand* n2 = (N2_Strand*)s;
107 n2->inst_tag_update(0,pc);
108 return (s->inst_trap)(pc,npc,s,i,va_signext(pc),SS_Trap::SS_Trap::INSTRUCTION_REAL_RANGE);
109}
110/*}}}*/
111extern "C" SS_Vaddr n2_exe_address_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i )/*{{{*/
112{
113 // First check if we have a UI breakpoint on the little address range
114
115 if (s->test_break_inst_va(pc))
116 return pc;
117
118 N2_Strand* n2 = (N2_Strand*)s;
119 uint64_t ctxt0 = n2->tl() ? 0 : n2->primary_context[0]();
120 n2->inst_tag_update(0,pc);
121 return (s->inst_trap)(pc,npc,s,i,va_signext(pc),SS_Trap::SS_Trap::INSTRUCTION_ADDRESS_RANGE);
122}
123/*}}}*/
124extern "C" SS_Vaddr n2_exe_va_watchpoint( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i )/*{{{*/
125{
126 // First check if we have a UI breakpoint on the watchpoint address
127
128 if (s->test_break_inst_va(pc))
129 return pc;
130
131 N2_Strand* n2 = (N2_Strand*)s;
132 SS_Vaddr va = pc & n2->mask_pstate_am;
133 uint64_t ctxt0 = n2->tl() ? 0 : n2->primary_context[0]();
134
135 // Make sure we handle va watchpoint enable/disable properly. E.g
136 // when va watchpoint info is changed we flush the cache. We only
137 // expect to get here when they are enabled and when they should hit.
138
139 assert(n2->inst_watchpoint_va_hit(va));
140
141 return (n2->inst_trap)(pc,npc,s,i,va,SS_Trap::INSTRUCTION_VA_WATCHPOINT); // prio 2.5
142}
143/*}}}*/
144
145extern "C" SS_Vaddr n2_dec_real_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand *s, SS_Instr* i )/*{{{*/
146{
147 N2_Strand* n2 = (N2_Strand*)s;
148 SS_Instr* line = n2->inst_cache->pc_line(pc);
149 long l;
150
151 // For the last cacheline of the positive range of the address space we
152 // have to be carefull what we do. For 0x7fffffffffc0 to 0x7fffffffdf
153 // we have to decode as normal, for 0x7fffffffe0 to 0x7fffffffff we have
154 // to throw real range traps. In either case make sure we check UI break
155 // points first.
156
157 for (l = 0; l < (SS_InstrCache::LINE_SIZE >> 1); l++)
158 line->line_index(l)->exe = ss_break_inst_va_dec;
159
160 for (; l < SS_InstrCache::LINE_SIZE; l++)
161 line->line_index(l)->exe = n2_exe_real_range;
162
163 return (i->exe)(pc,npc,s,i);
164}
165/*}}}*/
166extern "C" SS_Vaddr n2_dec_address_range( SS_Vaddr pc, SS_Vaddr npc, SS_Strand *s, SS_Instr* i )/*{{{*/
167{
168 assert(SS_InstrCache::LINE_SIZE == 16);
169
170 // For the last cacheline of the positive range of the address space we
171 // have to be carefull what we do. For 0x7fffffffffc0 to 0x7fffffffdf
172 // we have to decode as normal, for 0x7fffffffe0 to 0x7fffffffff we have
173 // to throw address range traps. However the va watchpoint has to be
174 // checked before that as it has higher priority then address range trap.
175 // Make sure that we check UI breakpoints first though.
176
177 N2_Strand* n2 = (N2_Strand*)s;
178 SS_Instr* line = n2->inst_cache->pc_line(pc);
179 long l;
180
181 for (l = 0; l < (SS_InstrCache::LINE_SIZE >> 1); l++)
182 line->line_index(l)->exe = ss_break_inst_va_dec;
183 for (; l < SS_InstrCache::LINE_SIZE; l++)
184 line->line_index(l)->exe = n2_exe_address_range;
185
186 SS_Vaddr va = pc & n2->mask_pstate_am;
187 SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
188
189 // If va watchpoint checks are enabled and the they happen in this
190 // cache line then set exe of the location that causes the va watchpoint
191 // trap to the routine that does that.
192
193 if (n2->inst_watchpoint_va_near_hit(tm,va))
194 {
195 l = (n2->inst_watchpoint_va_get() >> 2) & SS_InstrCache::LINE_MASK;
196 line->line_index(l)->exe = n2_exe_va_watchpoint;
197 }
198
199 return (i->exe)(pc,npc,s,i);
200}
201/*}}}*/
202extern "C" SS_Vaddr n2_dec_va_watchpoint( SS_Vaddr pc, SS_Vaddr npc, SS_Strand *s, SS_Instr* i )/*{{{*/
203{
204 N2_Strand* n2 = (N2_Strand*)s;
205 SS_Instr* line = n2->inst_cache->pc_line(pc);
206 long l;
207
208 // On an inst_mmu_va access we can cause a fetch of a cacheline that has
209 // a watchpoint enabled in it, but the pc that caused the fetch did not match.
210 // The mmu makes us trampoline through this routine. So set the decoders back
211 // to the normal decoders and insert a watchpoint trigger in the correct place.
212
213 for (l=0; l < SS_InstrCache::LINE_SIZE; l++)
214 line->line_index(l)->exe = ss_break_inst_va_dec;
215
216 SS_Vaddr va = pc & n2->mask_pstate_am;
217 SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
218
219 // Make sure we only get here when watchpoint are truely enabled and fall
220 // in this cacheline.
221
222 assert(n2->inst_watchpoint_va_near_hit(tm,va));
223
224 l = (n2->inst_watchpoint_va_get() >> 2) & SS_InstrCache::LINE_MASK;
225 line->line_index(l)->exe = n2_exe_va_watchpoint;
226
227 return (i->exe)(pc,npc,s,i);
228}
229/*}}}*/
230
231N2_Strand::N2_Strand( N2_Core& _core, const char* _name, uint_t _strand_id )/*{{{*/
232 :
233 SS_Strand(_core,_name,run_exe_table,mem_run_table,mem_trc_table,_core.cpu.mem_err_detector),
234 core(_core),
235 trap_dae_inv_asi(SS_Trap::RESERVED), // UNSUPPORTED_PAGE_SIZE ... an exception to the norm ...
236 data_wp_pa_mask(1),
237 data_wp_pa_addr(0),
238 data_wp_va_mask(1),
239 data_wp_va_addr(0),
240 data_wp_bytes(0),
241 data_wp_flags(0),
242 data_wp_check(false),
243 stb(*this)
244{
245 strand_id = _strand_id;
246 model = &core.cpu.model;
247
248 dec_table = &run_dec_xx_xxxxxx;
249 exe_table = run_exe_table;
250 mem_table = mem_run_table;
251
252#ifdef ARCH_V8
253 v8_exe_table = exe_table;
254 exe_table = v8_run_exe_table;
255#endif
256
257 get_state_name = n2_get_state_name;
258 get_state = n2_get_state;
259 set_state = n2_set_state;
260
261 va_bits = VA_BITS;
262 pa_bits = PA_BITS;
263
264 data_wp_pa_mask = (SS_Paddr(1) << pa_bits()) - 8;
265 data_wp_va_mask = (SS_Vaddr(1) << va_bits()) - 8;
266
267 SS_Paddr io_mask = SS_Paddr(1) << (pa_bits() - 1);
268
269 phys_tte_mem->phys_mask = ~(io_mask - SS_Paddr(1));
270 phys_tte_mem->phys_page = SS_Paddr(0);
271 phys_tte_mem->virt_mask = io_mask;
272 phys_tte_mem->virt_page = SS_Paddr(0);
273
274 phys_tte_mem_am->phys_mask = ~SS_Paddr(0) << 32;
275 phys_tte_mem_am->phys_page = SS_Paddr(0);
276 phys_tte_mem_am->virt_mask = io_mask;
277 phys_tte_mem_am->virt_page = SS_Paddr(0);
278
279 phys_tte_io->phys_mask = ~(io_mask - SS_Paddr(1));
280 phys_tte_io->phys_page = io_mask;
281 phys_tte_io->virt_mask = io_mask;
282 phys_tte_io->virt_page = io_mask;
283
284 trap = (SS_TrapFun)ss_trap;
285 inst_mmu = (SS_InstMmu)n2_inst_mmu_pa;
286 inst_mmu_va = (SS_InstMmu)n2_inst_mmu_va;
287 inst_mmu_ra = (SS_InstMmu)n2_inst_mmu_ra;
288 inst_mmu_pa = (SS_InstMmu)n2_inst_mmu_pa;
289 data_mmu = (SS_DataMmu)n2_data_mmu;
290 inst_trap = (SS_MmuTrap)n2_inst_trap;
291 data_trap = (SS_MmuTrap)n2_data_trap;
292 invalid_asi = (SS_InvalidAsi)n2_invalid_asi;
293
294 run_perf = n2_run_perf;
295
296 internal_interrupt = n2_internal_interrupt;
297 external_interrupt = n2_external_interrupt;
298 ras_enable = n2_ras_enable;
299
300 inst_tlb = &core.inst_tlb;
301 data_tlb = &core.data_tlb;
302
303 if ((strand_id() % N2_Model::NO_STRANDS_PER_CPU) == 0)
304 {
305 core.inst_tlb.add_strand(this);
306 core.data_tlb.add_strand(this);
307 }
308
309 setup_tte_link_tables();
310
311 tlb_entry = -1;
312#ifdef COMPILE_FOR_COSIM
313 inst_hwtw = n2_inst_hwtw;
314 data_hwtw = n2_data_hwtw;
315#endif
316
317 cnv2pa = n2_cnv2pa;
318
319 new(&tstate) N2_Tstate(); // N2 uses one bit less for the gl field
320 new(&lsu_ctr) N2_LsuCtr(); // ToDo: N2 need to have its own lsu ctr, should not be in SS_Strand.
321 new(&gl) N2_Gl(); // N2 gl uses [3:0] instead of [2:0]
322
323 intr_recv = 0;
324
325 max_tl = 6;
326 max_gl = 3;
327
328 hver.maxwin(7);
329 hver.maxgl(3);
330 hver.maxtl(6);
331 hver.mask(0x20);
332 hver.impl(0x24);
333 hver.manuf(0x3e);
334
335 core_id.max_core_id(0x3f);
336 core_id.max_strand_id(0x7);
337 core_id.core_id(strand_id());
338
339 core_intr_id.intr_id_hi(0);
340 core_intr_id.intr_id_lo(strand_id());
341
342 // the first strand of each node (i.e., cpu) should be in running state
343 // at the beginning.
344 sim_state.running((strand_id() % N2_Model::NO_STRANDS_PER_CPU) == 0);
345
346 // Set the trap priorities for some traps to the correct number
347
348 SS_Trap::table[SS_Trap::ILLEGAL_INSTRUCTION].priority = 61;
349 SS_Trap::table[SS_Trap::INSTRUCTION_BREAKPOINT].priority = 62;
350
351 // Add the N2 specific translating asi info to the asi_info table.
352 // Note block init stores are not valid for floating point stores.
353
354 SS_AsiInfo::Flags ldst = SS_AsiInfo::QUAD_LOAD | SS_AsiInfo::BLOCK_INIT | SS_AsiInfo::CLASS_STX | SS_AsiInfo::CLASS_ST;
355
356 SS_AsiInfo::Flags ldst_p = ldst | SS_AsiInfo::PRIMARY;
357 SS_AsiInfo::Flags ldst_s = ldst | SS_AsiInfo::SECONDARY;
358 SS_AsiInfo::Flags ldst_n = ldst | SS_AsiInfo::NUCLEUS | SS_AsiInfo::PRIVILEGED;
359
360 SS_AsiInfo::Flags ldst_pl = ldst_p | SS_AsiInfo::LITTLE_ENDIAN;
361 SS_AsiInfo::Flags ldst_sl = ldst_s | SS_AsiInfo::LITTLE_ENDIAN;
362 SS_AsiInfo::Flags ldst_nl = ldst_n | SS_AsiInfo::LITTLE_ENDIAN;
363
364 SS_AsiInfo::Flags ldst_aiup = ldst_p | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
365 SS_AsiInfo::Flags ldst_aius = ldst_s | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
366 SS_AsiInfo::Flags ldst_aiupl = ldst_pl | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
367 SS_AsiInfo::Flags ldst_aiusl = ldst_sl | SS_AsiInfo::AS_IF_USER | SS_AsiInfo::PRIVILEGED;
368
369 ldst_p = ldst_p | SS_AsiInfo::BYPASS;
370 ldst_s = ldst_s | SS_AsiInfo::BYPASS;
371 ldst_n = ldst_n | SS_AsiInfo::BYPASS;
372 ldst_pl = ldst_pl | SS_AsiInfo::BYPASS;
373 ldst_sl = ldst_sl | SS_AsiInfo::BYPASS;
374 ldst_nl = ldst_nl | SS_AsiInfo::BYPASS;
375
376 asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_PRIMARY].set_flags(ldst_aiup);
377 asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_SECONDAY].set_flags(ldst_aius);
378 asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_PRIMARY_LITTLE].set_flags(ldst_aiupl);
379 asi_info[N2_Asi::ASI_AS_IF_USER_BLOCK_INIT_ST_QUAD_LDD_SECONDAY_LITTLE].set_flags(ldst_aiusl);
380 asi_info[N2_Asi::ASI_NUCLEUS_BLOCK_INIT_ST_QUAD_LDD].set_flags(ldst_n);
381 asi_info[N2_Asi::ASI_NUCLEUS_BLOCK_INIT_ST_QUAD_LDD_LITTLE].set_flags(ldst_nl);
382 asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_PRIMARY].set_flags(ldst_p);
383 asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_SECONDAY].set_flags(ldst_s);
384 asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_PRIMARY_LITTLE].set_flags(ldst_pl);
385 asi_info[N2_Asi::ASI_BLOCK_INIT_ST_QUAD_LDD_SECONDAY_LITTLE].set_flags(ldst_sl);
386
387 // Map in all the strand specific non translating asi/va
388 // mapped registers or address ranges
389
390 SS_Node* INST_SIDE = (SS_Node*)0; // Used in one of the two void* arguments of ASI access to
391 SS_Node* DATA_SIDE = (SS_Node*)1; // distinguish between instruction side of data side
392 SS_Node* VIRT_FLAG = (SS_Node*)0; // Used in one of the two void* arguments of ASI access to
393 SS_Node* REAL_FLAG = (SS_Node*)1; // distinguish between virtual or real
394
395 // N2 PRM says that bit63 to bit48 of the va are ignored. So clip
396 // those of for all accesses.
397
398 asi_map.set_mask((SS_Vaddr(1) << VA_BITS) - 1);
399
400 asi_map[0x20].add(0x00,0x18,this,&scratchpad,
401 n2_scratchpad_ld64,n2_scratchpad_st64,
402 n2_scratchpad_ld64,n2_scratchpad_st64);
403
404 asi_map[0x20].add(0x30,0x38,this,&scratchpad,
405 n2_scratchpad_ld64,n2_scratchpad_st64,
406 n2_scratchpad_ld64,n2_scratchpad_st64);
407
408 asi_map[0x21].add(0x008,this,&primary_context[0],
409 SS_AsiCtrReg::ld64,pri_ctx_st64,
410 SS_AsiCtrReg::rd64,pri_ctx_st64);
411
412 asi_map[0x21].add(0x010,this,&secondary_context[0],
413 SS_AsiCtrReg::ld64,sec_ctx_st64,
414 SS_AsiCtrReg::rd64,sec_ctx_st64);
415
416 asi_map[0x21].add(0x108,this,&primary_context[1],
417 SS_AsiCtrReg::ld64,pri_ctx_st64,
418 SS_AsiCtrReg::rd64,pri_ctx_st64);
419
420 asi_map[0x21].add(0x110,this,&secondary_context[1],
421 SS_AsiCtrReg::ld64,sec_ctx_st64,
422 SS_AsiCtrReg::rd64,sec_ctx_st64);
423
424 asi_map[0x25].add(0x3c0,0x3f8,this,0,
425 intr_queue_ld64,intr_queue_st64,
426 intr_queue_ld64,intr_queue_st64);
427
428 asi_map[0x45].add(0x00,this,&lsu_ctr,
429 SS_AsiCtrReg::ld64,n2_lsu_ctr_st64,
430 SS_AsiCtrReg::rd64,n2_lsu_ctr_st64);
431
432 asi_map[0x48].add(0x00,0xf8,this,0,
433 irf_ecc_ld64,0,
434 irf_ecc_ld64,0);
435
436 asi_map[0x49].add(0x00,0xf8,this,0,
437 frf_ecc_ld64,0,
438 frf_ecc_ld64,0);
439
440 asi_map[0x4a].set_mask(0x38);
441 asi_map[0x4a].add(0x00,0x1f8,this,0,
442 stb_access_ld64,0,
443 stb_access_ld64,SS_AsiCtrReg::wr64);
444
445 asi_map[0x4c].add(0x00,this,&desr,
446 desr_ld64,0,
447 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
448
449 asi_map[0x4c].add(0x18,this,seter);
450
451 asi_map[0x4f].add(0x00,0x38,this,&scratchpad,
452 n2_scratchpad_ld64,n2_scratchpad_st64,
453 n2_scratchpad_ld64,n2_scratchpad_st64);
454
455 asi_map[0x50].add(0x00,this,&inst_tag_target,
456 SS_AsiCtrReg::ld64,0,
457 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
458
459 asi_map[0x50].add(0x18,this,&inst_sfsr,
460 SS_AsiCtrReg::ld64,SS_AsiCtrReg::st64,
461 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
462
463 asi_map[0x50].add(0x30,this,&inst_tag_access,
464 SS_AsiCtrReg::ld64,tag_access_st64,
465 SS_AsiCtrReg::rd64,tag_access_st64);
466
467 asi_map[0x51].set_mask(0x38);
468 asi_map[0x51].add(SS_VADDR_MIN,SS_VADDR_MAX,(SS_Node*)0,0,
469 mra_access_ld64,0,
470 mra_access_ld64,0);
471
472 asi_map[0x52].add(0x108,this,&real_range[0],
473 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
474 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
475
476 asi_map[0x52].add(0x110,this,&real_range[1],
477 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
478 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
479
480 asi_map[0x52].add(0x118,this,&real_range[2],
481 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
482 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
483
484 asi_map[0x52].add(0x120,this,&real_range[3],
485 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
486 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
487
488 asi_map[0x52].add(0x208,this,&physical_offset[0],
489 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
490 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
491
492 asi_map[0x52].add(0x210,this,&physical_offset[1],
493 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
494 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
495
496 asi_map[0x52].add(0x218,this,&physical_offset[2],
497 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
498 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
499
500 asi_map[0x52].add(0x220,this,&physical_offset[3],
501 SS_AsiCtrReg::ld64,tsb_ra2pa_st64,
502 SS_AsiCtrReg::rd64,tsb_ra2pa_st64);
503
504 asi_map[0x53].add(SS_VADDR_MIN,SS_VADDR_MAX,this,0,
505 inst_tlb_probe_ld64,0,
506 inst_tlb_probe_ld64,0);
507
508 asi_map[0x54].add(0x000,INST_SIDE,VIRT_FLAG,
509 0,tlb_data_in_st64,
510 0,tlb_data_in_st64);
511
512 asi_map[0x54].add(0x400,INST_SIDE,REAL_FLAG,
513 0,tlb_data_in_st64,
514 0,tlb_data_in_st64);
515
516 asi_map[0x54].add(0x010,this,&nucleus_tsb_config[0],
517 SS_AsiCtrReg::ld64,tsb_config_st64,
518 SS_AsiCtrReg::rd64,tsb_config_st64);
519
520 asi_map[0x54].add(0x018,this,&nucleus_tsb_config[1],
521 SS_AsiCtrReg::ld64,tsb_config_st64,
522 SS_AsiCtrReg::rd64,tsb_config_st64);
523
524 asi_map[0x54].add(0x020,this,&nucleus_tsb_config[2],
525 SS_AsiCtrReg::ld64,tsb_config_st64,
526 SS_AsiCtrReg::rd64,tsb_config_st64);
527
528 asi_map[0x54].add(0x028,this,&nucleus_tsb_config[3],
529 SS_AsiCtrReg::ld64,tsb_config_st64,
530 SS_AsiCtrReg::rd64,tsb_config_st64);
531
532 asi_map[0x54].add(0x030,this,&non_nucleus_tsb_config[0],
533 SS_AsiCtrReg::ld64,tsb_config_st64,
534 SS_AsiCtrReg::rd64,tsb_config_st64);
535
536 asi_map[0x54].add(0x038,this,&non_nucleus_tsb_config[1],
537 SS_AsiCtrReg::ld64,tsb_config_st64,
538 SS_AsiCtrReg::rd64,tsb_config_st64);
539
540 asi_map[0x54].add(0x040,this,&non_nucleus_tsb_config[2],
541 SS_AsiCtrReg::ld64,tsb_config_st64,
542 SS_AsiCtrReg::rd64,tsb_config_st64);
543
544 asi_map[0x54].add(0x048,this,&non_nucleus_tsb_config[3],
545 SS_AsiCtrReg::ld64,tsb_config_st64,
546 SS_AsiCtrReg::rd64,tsb_config_st64);
547
548 asi_map[0x54].add(0x050,this,&inst_tsb_pointer[0],
549 SS_AsiCtrReg::ld64,0,
550 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
551
552 asi_map[0x54].add(0x058,this,&inst_tsb_pointer[1],
553 SS_AsiCtrReg::ld64,0,
554 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
555
556 asi_map[0x54].add(0x060,this,&inst_tsb_pointer[2],
557 SS_AsiCtrReg::ld64,0,
558 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
559
560 asi_map[0x54].add(0x068,this,&inst_tsb_pointer[3],
561 SS_AsiCtrReg::ld64,0,
562 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
563
564 asi_map[0x54].add(0x070,this,&data_tsb_pointer[0],
565 SS_AsiCtrReg::ld64,0,
566 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
567
568 asi_map[0x54].add(0x078,this,&data_tsb_pointer[1],
569 SS_AsiCtrReg::ld64,0,
570 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
571
572 asi_map[0x54].add(0x080,this,&data_tsb_pointer[2],
573 SS_AsiCtrReg::ld64,0,
574 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
575
576 asi_map[0x54].add(0x088,this,&data_tsb_pointer[3],
577 SS_AsiCtrReg::ld64,0,
578 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
579
580 asi_map[0x54].add(0x090,this,&tw_control,
581 SS_AsiCtrReg::ld64,tw_control_st64,
582 SS_AsiCtrReg::rd64,tw_control_st64);
583
584 asi_map[0x55].add(0x000,0x1ff,INST_SIDE,0,
585 tlb_data_access_ld64,tlb_data_access_st64,
586 tlb_data_access_ld64,tlb_data_access_st64);
587 asi_map[0x55].add(0x400,0x5ff,INST_SIDE,0,
588 tlb_data_access_ld64,tlb_data_access_st64,
589 tlb_data_access_ld64,tlb_data_access_st64);
590
591 asi_map[0x56].add(0x00,0x7ff,INST_SIDE,0,
592 tlb_tag_read_ld64,0,
593 tlb_tag_read_ld64,0);
594
595 asi_map[0x57].add(SS_VADDR_MIN,SS_VADDR_MAX,this,0,
596 0,inst_tlb_demap_st64,
597 0,inst_tlb_demap_st64);
598
599 asi_map[0x58].add(0x000,this,&data_tag_target,
600 SS_AsiCtrReg::ld64,0,
601 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
602
603 asi_map[0x58].add(0x018,this,data_sfsr);
604
605 asi_map[0x58].add(0x020,this,&data_sfar,
606 SS_AsiCtrReg::ld64,0,
607 SS_AsiCtrReg::rd64,SS_AsiCtrReg::wr64);
608
609 asi_map[0x58].add(0x030,this,&data_tag_access,
610 SS_AsiCtrReg::ld64,tag_access_st64,
611 SS_AsiCtrReg::rd64,tag_access_st64);
612
613 asi_map[0x58].add(0x038,this,&data_wp,
614 SS_AsiCtrReg::ld64,data_wp_st64,
615 SS_AsiCtrReg::ld64,data_wp_st64);
616
617 asi_map[0x58].add(0x040,this,hwtw_config);
618
619 asi_map[0x58].add(0x080,this,&partition_id,
620 SS_AsiCtrReg::ld64,partition_id_st64,
621 SS_AsiCtrReg::rd64,partition_id_st64);
622
623 asi_map[0x59].add(0x00,0x78,(SS_Node*)0,0,
624 scratchpad_access_ld64,0,
625 scratchpad_access_ld64,0);
626
627 asi_map[0x5a].add(0x00,0x10,(SS_Node*)0,0,
628 tick_access_ld64,0,
629 tick_access_ld64,0);
630
631 asi_map[0x5a].add(0x20,0x30,(SS_Node*)0,0,
632 tick_access_ld64,0,
633 tick_access_ld64,0);
634
635 asi_map[0x5b].set_mask(0x38);
636 asi_map[0x5b].add(SS_VADDR_MIN,SS_VADDR_MAX,(SS_Node*)0,0,
637 tsa_access_ld64,0,
638 tsa_access_ld64,0);
639
640 asi_map[0x5c].add(0x000,DATA_SIDE,VIRT_FLAG,
641 0,tlb_data_in_st64,
642 0,tlb_data_in_st64);
643
644 asi_map[0x5c].add(0x400,DATA_SIDE,REAL_FLAG,
645 0,tlb_data_in_st64,
646 0,tlb_data_in_st64);
647
648 asi_map[0x5d].add(0x00,0x7ff,DATA_SIDE,0,
649 tlb_data_access_ld64,tlb_data_access_st64,
650 tlb_data_access_ld64,tlb_data_access_st64);
651
652 asi_map[0x5e].add(0x00,0x7ff,DATA_SIDE,0,
653 tlb_tag_read_ld64,0,
654 tlb_tag_read_ld64,0);
655
656 asi_map[0x5f].add(SS_VADDR_MIN,SS_VADDR_MAX,this,0,
657 0,data_tlb_demap_st64,
658 0,data_tlb_demap_st64);
659
660 asi_map[0x63].add(0x00,this,&core_intr_id,
661 SS_AsiCtrReg::ld64,0,
662 SS_AsiCtrReg::rd64,0);
663
664 asi_map[0x63].add(0x10,this,&core_id,
665 SS_AsiCtrReg::ld64,0,
666 SS_AsiCtrReg::rd64,0);
667
668 asi_map[0x72].add(0x0,this,0,
669 intr_recv_ld64,intr_recv_st64,
670 intr_recv_ld64,intr_recv_wr64);
671
672 asi_map[0x74].add(0x0,this,0,
673 intr_r_ld64,0,
674 intr_r_ld64,0);
675}
676/*}}}*/
677
678const char* N2_Strand::n2_get_state_name( SS_Strand* s, SS_Registers::Index index )/*{{{*/
679{
680 N2_Strand* n2 = (N2_Strand*)s;
681 switch (index)
682 {
683 case SS_Registers::ASR_PCR:
684 return n2->pcr.name();
685 case SS_Registers::ASR_PIC:
686 return n2->pic.name();
687 default:
688 return ss_get_state_name(s,index);
689 }
690}
691/*}}}*/
692SS_Registers::Error N2_Strand::n2_get_state( SS_Strand* s, SS_Registers::Index index, uint64_t* value )/*{{{*/
693{
694 N2_Strand* n2 = (N2_Strand*)s;
695 switch (index)
696 {
697 case SS_Registers::ASR_PCR:
698 *value = n2->pcr();
699 break;
700 case SS_Registers::ASR_PIC:
701 *value = n2->pic();
702 break;
703 default:
704 return ss_get_state(s,index,value);
705 }
706 return SS_Registers::OK;
707}
708/*}}}*/
709SS_Registers::Error N2_Strand::n2_set_state( SS_Strand* s, SS_Registers::Index index, uint64_t value )/*{{{*/
710{
711 N2_Strand* n2 = (N2_Strand*)s;
712 switch (index)
713 {
714 case SS_Registers::ASR_PCR:
715 n2->pcr.set(value);
716 break;
717 case SS_Registers::ASR_PIC:
718 n2->pic.set(value);
719 break;
720 default:
721 return ss_set_state(s,index,value);
722 }
723 return SS_Registers::OK;
724}
725/*}}}*/
726
727void N2_Strand::snapshot( SS_SnapShot& ss )/*{{{*/
728{
729 char prefix[32];
730 get_name(prefix);
731
732 SS_Strand::snapshot(ss);
733
734 sprintf(ss.tag,"%s.%s",prefix,pcr.name()); pcr.snapshot(ss);
735 sprintf(ss.tag,"%s.%s",prefix,pic.name()); pic.snapshot(ss);
736 lsu_ctr.snapshot(ss,prefix);
737 // fpu does not require snapshot.
738 core_intr_id.snapshot(ss,prefix);
739 core_id.snapshot(ss,prefix);
740 SS_AsiCtrReg::snapshot(ss,primary_context,2,prefix,"pri");
741 SS_AsiCtrReg::snapshot(ss,secondary_context,2,prefix,"sec");
742 inst_tag_target.snapshot(ss,prefix,"inst");
743 inst_tag_access.snapshot(ss,prefix,"inst");
744 data_tag_target.snapshot(ss,prefix,"data");
745 data_tag_access.snapshot(ss,prefix,"data");
746 partition_id.snapshot(ss,prefix);
747 SS_AsiCtrReg::snapshot(ss,real_range,4,prefix);
748 SS_AsiCtrReg::snapshot(ss,physical_offset,4,prefix);
749 SS_AsiCtrReg::snapshot(ss,nucleus_tsb_config,4,prefix,"nuc");
750 SS_AsiCtrReg::snapshot(ss,non_nucleus_tsb_config,4,prefix,"non");
751 SS_AsiCtrReg::snapshot(ss,inst_tsb_pointer,4,prefix,"inst");
752 SS_AsiCtrReg::snapshot(ss,data_tsb_pointer,4,prefix,"data");
753 hwtw_config.snapshot(ss,prefix);
754 tw_control.snapshot(ss,prefix);
755 inst_sfsr.snapshot(ss,prefix);
756 data_sfsr.snapshot(ss,prefix);
757 data_sfar.snapshot(ss,prefix);
758 data_wp.snapshot(ss,prefix);
759 cpu_mondo_head.snapshot(ss,prefix,"cpu_mondo_head");
760 cpu_mondo_tail.snapshot(ss,prefix,"cpu_mondo_tail");
761 dev_mondo_head.snapshot(ss,prefix,"dev_mondo_head");
762 dev_mondo_tail.snapshot(ss,prefix,"dev_mondo_tail");
763 resumable_head.snapshot(ss,prefix,"resumable_head");
764 resumable_tail.snapshot(ss,prefix,"resumable_tail");
765 non_resumable_head.snapshot(ss,prefix,"non_resumable_head");
766 non_resumable_tail.snapshot(ss,prefix,"non_resumable_tail");
767 sprintf(ss.tag,"%s.intr_recv",prefix); ss.val(&intr_recv);
768 intr_r.snapshot(ss,prefix);
769 seter.snapshot(ss,prefix);
770 desr.snapshot(ss,prefix);
771 sprintf(ss.tag,"%s.tw_status",prefix); ss.val(&tw_status);
772
773 if (ss.do_load())
774 {
775 for (int r = 0; r < 4; r++)
776 {
777 N2_TsbConfig& zc = nucleus_tsb_config[r];
778 N2_TsbConfig& nc = non_nucleus_tsb_config[r];
779
780 tsb_config[r].update(zc.valid(), zc.tsb_base() << 13, zc.tsb_size() + 9,
781 zc.page_size() * 3 + 13, zc.ra_not_pa(), zc.use_context());
782 tsb_config[r + 4].update(nc.valid(), nc.tsb_base() << 13, nc.tsb_size() + 9,
783 nc.page_size() * 3 + 13, nc.ra_not_pa(), nc.use_context());
784
785 N2_RealRange& rr = real_range[r];
786 N2_PhysicalOffset& po = physical_offset[r];
787 tsb_ra2pa[r].update(rr.enable(),rr.rpn_low(),rr.rpn_high(),po.ptv());
788 }
789
790 data_wp_st64(0,0,this,0,data_wp());
791
792 inst_ctx_ra.set_pid(partition_id());
793 inst_ctx_va.set_pid(partition_id());
794 inst_ctx_va.set_pri_ctx0(primary_context[0]());
795 inst_ctx_va.set_pri_ctx1(primary_context[1]());
796
797 data_ctx.set_pid(partition_id());
798 data_ctx.set_pri_ctx0(primary_context[0]());
799 data_ctx.set_pri_ctx1(primary_context[1]());
800 data_ctx.set_sec_ctx0(secondary_context[0]());
801 data_ctx.set_sec_ctx1(secondary_context[1]());
802
803 n2_lsu_ctr_st64(0,&lsu_ctr,this,0,lsu_ctr());
804 }
805}
806/*}}}*/
807
808SS_Vaddr N2_Strand::n2_inst_mmu_pa( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_InstrCache::Tag* line_tag )/*{{{*/
809{
810 N2_Strand* n2 = (N2_Strand*)s;
811 SS_Tte* tte;
812 SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
813
814 int io = (pc >> (n2->pa_bits() - 1)) & 1;
815
816 // First check for on or near UI breakpoints.
817
818 if (n2->near_break_inst_va(tm,pc))
819 {
820 // Check if we hit the breakpoint. If so then exit now.
821 // Else fetch a line, decode, execute the instructions
822 // guided by the ss_break_inst_va_dec() routine.
823
824 if (n2->test_break_inst_va(pc))
825 {
826 s->skip_break_inst_va = true;
827 return pc;
828 }
829 else
830 {
831 n2->inst_dec = ss_break_inst_va_dec;
832 }
833 }
834
835 // Now select the TTE to be used for the PA to PA translation.
836
837 if (n2->pstate.am())
838 {
839 // In 32bit mode the pc gets clipped to 32bit address space.
840 // We use a preprogrammed TTE to handle this situation.
841
842 tte = n2->phys_tte_mem_am;
843 }
844 else if (io)
845 {
846 // Check the I/O regions that we can fetch from.
847
848 if (!pc_iorange(pc))
849 {
850 n2->inst_tag_update(0,pc);
851 return (n2->inst_trap)(pc,npc,s,line,pc,SS_Trap::INSTRUCTION_ACCESS_ERROR); // prio 4.0
852 }
853 tte = n2->phys_tte_io;
854 }
855 else
856 {
857 // Normal 64bit address memory access.
858
859 tte = n2->phys_tte_mem;
860 }
861
862 // Set the TTE that is currently used for instruction fetch,
863 // update the decode cache tag and tte, and add the TTE to the
864 // linked lists of TTE to make flushing TTEs from the instruction
865 // caches an simepl task of walking the list of cachelines that
866 // have been used.
867
868 SS_Memop exe = n2->mem_table[0][io];
869 SS_Vaddr tag = pc & -(SS_InstrCache::LINE_SIZE * 4);
870
871 n2->inst_tte = tte;
872
873 line_tag->tag = tag;
874 line_tag->tte = tte;
875 line_tag->lnk.unlink();
876 line_tag->lnk.insert_after(&n2->inst_tte_link[tte->index]);
877
878 return (exe)(pc,npc,s,line,tag,tte);
879}
880/*}}}*/
881SS_Vaddr N2_Strand::n2_inst_mmu_ra( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_InstrCache::Tag* line_tag )/*{{{*/
882{
883 N2_Strand* n2 = (N2_Strand*)s;
884 SS_Tte* tte;
885 SS_Vaddr va = pc & n2->mask_pstate_am;
886 SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
887
888 // First check for on or near UI breakpoints.
889
890 if (n2->near_break_inst_va(tm,pc))
891 {
892 // Check if we hit the breakpoint. If so then exit now.
893 // Else fetch a line, decode, execute the instructions
894 // guided by the ss_break_inst_va_dec() routine.
895
896 if (n2->test_break_inst_va(pc))
897 {
898 s->skip_break_inst_va = true;
899 return pc;
900 }
901 else
902 {
903 n2->inst_dec = ss_break_inst_va_dec;
904 }
905 }
906
907 // Handle real range address checks. We use trampoline
908 // decoding to get the corner cases right when pc is in the
909 // cacheline that should cause the trap but is not on it.
910
911 if (!pc_onrange(va))
912 {
913 if (!pc_inrange(va))
914 {
915 n2->inst_tag_update(0,va);
916 return (n2->inst_trap)(pc,va_signext(npc),s,line,va,SS_Trap::INSTRUCTION_REAL_RANGE); // prio 2.6
917 }
918
919 // The current pc is not on the va-edge (last 16 bytes of the
920 // possitve end of the address space). To make sure we get the
921 // traps for the real range we trampoline the decoder through
922 // n2_dec_real_range. Note after this mmu routine we fetch the
923 // cacheline and set the exe part of the instruction to inst_dec.
924 // So switching the decoder temporarily makes us trampoline into
925 // the decoder that knows about the real range traps.
926
927 n2->inst_dec = n2_dec_real_range;
928 }
929
930 // Lookup the TLB and check that the TTE can be used for translation.
931 // E.g. check that no error conditions occured on the TLB lookup
932 // (inst_mmu_error is set in cosim mode only) and that the TLB lookup
933 // found a matching TTE.
934
935 bool tte_multi_hit = false;
936
937 if ((n2->inst_tte != n2->fail_tte) && n2->inst_tte->match_real(va,n2->partition_id()))
938 tte = n2->inst_tte;
939 else
940 tte = ((N2_Tlb*)n2->inst_tlb)->lookup_ra2pa(s,va,n2->partition_id(),&tte_multi_hit);
941
942 if (n2->inst_mmu_error)
943 {
944 assert(n2->sim_state.cosim());
945 n2->inst_mmu_error = false;
946 n2->inst_tag_update(0,va);
947 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
948 }
949 else if (tte == 0)
950 {
951 n2->inst_tag_update(0,va);
952 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INST_REAL_TRANSLATION_MISS); // prio 2.8
953 }
954 // ITLB Multi Hit, Tag and Data Parity Error Detection
955 else if (tte->has_errors() || tte_multi_hit)
956 {
957 N2_Core& n2_core = n2->core;
958 int itlb_error_type = 0;
959
960 // Multiple hits are detected only if ittm bit is set in CERER
961 if (n2_core.cerer.ittm() && tte_multi_hit)
962 itlb_error_type = 1;
963 // Tag Parity Errors in ITLB are detected only if the ITTP bit is set in CERER
964 else if (n2_core.cerer.ittp() && tte->tag_parity_error())
965 itlb_error_type = 2;
966 // Data Parity Errors in ITLB are detected only if the ITDP bit is set in CERER
967 else if (n2_core.cerer.itdp() && tte->data_parity_error())
968 itlb_error_type = 3;
969
970 // The error type is recorded in the ISFSR
971 if (itlb_error_type > 0)
972 {
973 n2->inst_sfsr.error_type(itlb_error_type);
974 n2->inst_tag_update(0,va);
975 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
976 }
977 }
978
979 int io = (tte->phys_page >> (n2->pa_bits() - 1)) & 1;
980
981 // Check the properties of the TTE: no fault, and check fetching
982 // from restricted I/O spaces. Note that we never fetch real addresses
983 // in user mode, so we don't have to check for privileged violation.
984
985 if (tte->nfo())
986 {
987 n2->inst_tag_update(0,va);
988 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::IAE_NFO_PAGE); // prio 3.3
989 }
990 else if (io && !pc_iorange(tte->trans(pc)))
991 {
992 n2->inst_tag_update(0,va);
993 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_ERROR); // prio 4.0
994 }
995
996 SS_Memop exe = n2->mem_table[0][io];
997 SS_Vaddr tag = pc & -(SS_InstrCache::LINE_SIZE * 4);
998
999 // Set the TTE that is currently used for instruction fetch,
1000 // update the decode cache tag and tte, and add the TTE to the
1001 // linked lists of TTE to make flushing TTEs from the instruction
1002 // caches an simepl task of walking the list of cachelines that
1003 // have been used.
1004
1005 n2->inst_tte = tte;
1006
1007 line_tag->tag = tag;
1008 line_tag->tte = tte;
1009 line_tag->lnk.unlink();
1010 line_tag->lnk.insert_after(&n2->inst_tte_link[tte->index]);
1011
1012 return (exe)(pc,npc,s,line,tag,tte);
1013}
1014/*}}}*/
1015SS_Vaddr N2_Strand::n2_inst_mmu_va( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_InstrCache::Tag* line_tag )/*{{{*/
1016{
1017 bool tte_multi_hit = false;
1018 N2_Strand* n2 = (N2_Strand*)s;
1019 SS_Tte* tte;
1020
1021 uint64_t ctxt0 = n2->tl() ? 0 : n2->primary_context[0]();
1022 uint64_t ctxt1 = n2->tl() ? 0 : n2->primary_context[1]();
1023
1024 SS_Vaddr va = pc & n2->mask_pstate_am;
1025 SS_Vaddr tm = -(SS_InstrCache::LINE_SIZE * 4);
1026
1027 // First check for on or near UI breakpoints.
1028
1029 if (n2->near_break_inst_va(tm,pc))
1030 {
1031 // Check if we hit the breakpoint. If so then exit now.
1032 // Else fetch a line, decode, execute the instructions
1033 // guided by the ss_break_inst_va_dec() routine.
1034
1035 if (n2->test_break_inst_va(pc))
1036 {
1037 s->skip_break_inst_va = true;
1038 return pc;
1039 }
1040 else
1041 {
1042 n2->inst_dec = ss_break_inst_va_dec;
1043 }
1044 }
1045
1046 // Handle va watchpoint and address range checks. For both
1047 // exceptions we use trampoline decoding to get the corner cases
1048 // right when pc is in the cacheline that should cause the traps.
1049
1050 if (n2->inst_watchpoint_va_hit(va))
1051 {
1052 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_VA_WATCHPOINT); // prio 2.5
1053 }
1054 else if (!pc_onrange(va))
1055 {
1056 if (!pc_inrange(va))
1057 {
1058 n2->inst_tag_update(ctxt0,va);
1059 return (n2->inst_trap)(pc,va_signext(npc),s,line,va,SS_Trap::INSTRUCTION_ADDRESS_RANGE); // prio 2.6
1060 }
1061
1062 // The current pc is not on the va-edge (last 16 bytes of the
1063 // possitve end of the address space). To make sure we get the
1064 // traps for the address range we trampoline the decoder through
1065 // n2_dec_address_range. Note after this mmu routine we fetch the
1066 // cacheline and set the exe part of the instruction to inst_dec.
1067 // So switching the decoder temporarily makes us trampoline into
1068 // the decoder that knows about the address range traps.
1069
1070 n2->inst_dec = n2_dec_address_range;
1071 }
1072 else if (n2->inst_watchpoint_va_near_hit(tm,va))
1073 {
1074 // The current pc did not match the va watchpoint address, however
1075 // the va watchpoint is enabled and falls in the same cacheline.
1076 // So trampoline through n2_dec_va_watchpoint to get the watchpoint
1077 // trap on the correct pc.
1078
1079 n2->inst_dec = n2_dec_va_watchpoint;
1080 }
1081
1082 // Lookup the TLB and check that the TTE can be used for translation.
1083 // E.g. check that no error conditions occured on the TLB lookup
1084 // (inst_mmu_error is set in cosim mode only) and that the TLB lookup
1085 // found a matching TTE. If we didn't find a TTE in the TLB then
1086 // perform a hardware table walk to bring one in from the TSB(s).
1087
1088 if ((n2->inst_tte != n2->fail_tte) && n2->inst_tte->match_virt(va,ctxt0,ctxt1,n2->partition_id()))
1089 tte = n2->inst_tte;
1090 else
1091 {
1092 // Lookup va for both contexts. If both match then we have a multi
1093 // hit case too. However, if both contexts are the same then this is ok.
1094
1095 bool tte_multi_hit0;
1096 bool tte_multi_hit1 = false;
1097 SS_Tte* tte0;
1098 SS_Tte* tte1 = 0;
1099
1100 tte0 = ((N2_Tlb*)n2->inst_tlb)->lookup_va2pa(s,va,ctxt0,n2->partition_id(),&tte_multi_hit0);
1101 if (ctxt0 != ctxt1)
1102 tte1 = ((N2_Tlb*)n2->inst_tlb)->lookup_va2pa(s,va,ctxt1,n2->partition_id(),&tte_multi_hit1);
1103
1104 tte = tte0 ? tte0 : tte1;
1105 tte_multi_hit = ((tte0 != 0) && (tte1 != 0)) || tte_multi_hit0 || tte_multi_hit1;
1106 }
1107
1108 if (n2->inst_mmu_error)
1109 {
1110 assert(n2->sim_state.cosim());
1111 n2->inst_mmu_error = false;
1112 n2->inst_tag_update(ctxt0,va);
1113 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
1114 }
1115 else if (tte)
1116 {
1117 // ITLB Multi Hit, Tag and Data Parity Error Detection
1118 if (tte->has_errors() || tte_multi_hit)
1119 {
1120 N2_Core& n2_core = n2->core;
1121 int itlb_error_type = 0;
1122
1123 // Multiple hits are detected only if ittm bit is set in CERER
1124 if (n2_core.cerer.ittm() && tte_multi_hit)
1125 itlb_error_type = 1;
1126 // Tag Parity Errors in ITLB are detected only if the ITTP bit is set in CERER
1127 else if (n2_core.cerer.ittp() && tte->tag_parity_error())
1128 itlb_error_type = 2;
1129 // Data Parity Errors in ITLB are detected only if the ITDP bit is set in CERER
1130 else if (n2_core.cerer.itdp() && tte->data_parity_error())
1131 itlb_error_type = 3;
1132
1133 // The error type is recorded in the ISFSR
1134 if (itlb_error_type > 0)
1135 {
1136 n2->inst_sfsr.error_type(itlb_error_type);
1137 n2->inst_tag_update(ctxt0,va);
1138 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_MMU_ERROR); // Prio 2.7
1139 }
1140 }
1141 }
1142 else
1143 {
1144 if ((tte = n2->n2_inst_htw(va,ctxt0,ctxt1)) == 0)
1145 return (n2->inst_trap)(pc,npc,s,line,va,n2->trap_htw);
1146 }
1147
1148 int io = (tte->phys_page >> (n2->pa_bits() - 1)) & 1;
1149
1150 // Check the properties of the TTE: privileged violation, no fault,
1151 // and check fetching from restricted I/O spaces.
1152
1153 if (tte->p() && (n2->sim_state.priv() == SS_USER))
1154 {
1155 n2->inst_tag_update(ctxt0,va);
1156 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::IAE_PRIVILEGE_VIOLATION); // prio 3.1
1157 }
1158 else if (tte->nfo())
1159 {
1160 n2->inst_tag_update(ctxt0,va);
1161 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::IAE_NFO_PAGE); // prio 3.3
1162 }
1163 else if (io && !pc_iorange(tte->trans(pc)))
1164 {
1165 n2->inst_tag_update(ctxt0,va);
1166 return (n2->inst_trap)(pc,npc,s,line,va,SS_Trap::INSTRUCTION_ACCESS_ERROR); // prio 4.0
1167 }
1168
1169 SS_Memop exe = n2->mem_table[0][io];
1170 SS_Vaddr tag = pc & tm;
1171
1172 // Set the TTE that is currently used for instruction fetch,
1173 // update the decode cache tag and tte, and add the TTE to the
1174 // linked lists of TTE to make flushing TTEs from the instruction
1175 // caches an simepl task of walking the list of cachelines that
1176 // have been used.
1177
1178 n2->inst_tte = tte;
1179
1180 line_tag->tag = tag;
1181 line_tag->tte = tte;
1182 line_tag->lnk.unlink();
1183 line_tag->lnk.insert_after(&n2->inst_tte_link[tte->index]);
1184
1185 return (exe)(pc,npc,s,line,tag,tte);
1186}
1187/*}}}*/
1188SS_Vaddr N2_Strand::n2_data_mmu( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Vaddr va, uint_t mem )/*{{{*/
1189{
1190 N2_Strand* n2 = (N2_Strand*)s;
1191 SS_Tte* tte;
1192 int le;
1193 uint64_t io;
1194 bool pa2pa = false;
1195
1196 SS_AsiInfo asi_info = n2->asi_info[i->asi];
1197
1198 va &= n2->mask_pstate_am;
1199
1200 SS_Context ctxt0, ctxt1;
1201 if (asi_info.is_primary())
1202 {
1203 ctxt0 = n2->primary_context[0]();
1204 ctxt1 = n2->primary_context[1]();
1205 }
1206 else if (asi_info.is_secondary())
1207 {
1208 ctxt0 = n2->secondary_context[0]();
1209 ctxt1 = n2->secondary_context[1]();
1210 }
1211 else
1212 {
1213 //assert(asi_info.is_nucleus());
1214 ctxt0 = 0;
1215 ctxt1 = 0;
1216 }
1217
1218 if ((n2->sim_state.priv() == SS_HPRV) && asi_info.is_bypass())
1219 {
1220 io = (va >> (n2->pa_bits() - 1)) & 1;
1221
1222 if (io && (i->is_atomic()
1223 || (i->is_read() && (asi_info.is_quad_load_asi() || asi_info.is_block_asi()))))
1224 {
1225 n2->data_tag_update(ctxt0,va);
1226 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_NC_PAGE); // Prio 12.5
1227 }
1228
1229 le = asi_info.is_little_endian();
1230 tte = io ? n2->phys_tte_io : n2->phys_tte_mem;
1231 pa2pa = true;
1232 }
1233 else
1234 {
1235
1236 if (((n2->sim_state.priv() < SS_HPRV) && !n2->sim_state.data_mmu()) || asi_info.is_real()) // RA->PA
1237 {
1238 if (!va_inrange(va))
1239 {
1240 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::MEM_REAL_RANGE); // Prio 11.3
1241 }
1242 else
1243 {
1244 bool tte_multi_hit;
1245
1246 ctxt0 = 0; // set context field to 0 in case of RA->PA
1247
1248#ifdef COMPILE_FOR_COSIM
1249 (n2->data_tlb_read)(n2->tlb_sync);
1250 n2->data_tlb_read_skip = true;
1251#endif
1252 tte = ((N2_Tlb*)n2->data_tlb)->lookup_ra2pa(s,va,n2->partition_id(),&tte_multi_hit);
1253
1254 if (n2->data_mmu_error)
1255 {
1256 n2->data_mmu_error = false;
1257 n2->data_tag_update(ctxt0,va);
1258 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
1259 }
1260 else if (tte)
1261 {
1262 // DTLB Multi Hit, Tag and Data Parity Error Detection
1263 if (tte->has_errors() || tte_multi_hit)
1264 {
1265 N2_Core& n2_core = n2->core;
1266 int dtlb_error_type = 0;
1267
1268 // Multiple hits are detected only if DTTM bit is set in CERER
1269 if (n2_core.cerer.dttm() && tte_multi_hit)
1270 dtlb_error_type = 1;
1271 // Tag Parity Errors in DTLB are detected only if the DTTP bit is set in CERER
1272 else if (n2_core.cerer.dttp() && tte->tag_parity_error())
1273 dtlb_error_type = 2;
1274 // Data Parity Errors in DTLB are detected only if the DTDP bit is set in CERER
1275 else if (n2_core.cerer.dtdp() && tte->data_parity_error())
1276 dtlb_error_type = 3;
1277
1278 // Error type and va are stored in DSFSR and DSFAR respectively
1279 if (dtlb_error_type > 0)
1280 {
1281 n2->data_sfsr.error_type(dtlb_error_type);
1282 n2->data_sfar.error_addr(va);
1283 n2->data_tag_update(ctxt0,va);
1284 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
1285 }
1286 }
1287 }
1288 else
1289 {
1290 if (!i->is_cohere())
1291 n2->data_tag_update(ctxt0,va);
1292 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_REAL_TRANSLATION_MISS); // Prio 12.3
1293 }
1294 }
1295 }
1296 else // VA->PA
1297 {
1298 if (n2->va_watchpoint_hit(i,va))
1299 {
1300 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::VA_WATCHPOINT); // Prio 11.2
1301 }
1302 else if (!va_inrange(va))
1303 {
1304 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::MEM_ADDRESS_RANGE); // Prio 11.3
1305 }
1306 else
1307 {
1308#ifdef COMPILE_FOR_COSIM
1309 (n2->data_tlb_read)(n2->tlb_sync);
1310 n2->data_tlb_read_skip = true;
1311#endif
1312
1313 // Lookup va for both contexts. If both match then we have a multi
1314 // hit case too. However, if both contexts are the same then this is ok.
1315
1316 bool tte_multi_hit0;
1317 bool tte_multi_hit1 = false;
1318 SS_Tte* tte0;
1319 SS_Tte* tte1 = 0;
1320
1321 tte0 = ((N2_Tlb*)n2->data_tlb)->lookup_va2pa(s,va,ctxt0,n2->partition_id(),&tte_multi_hit0);
1322 if (ctxt0 != ctxt1)
1323 tte1 = ((N2_Tlb*)n2->data_tlb)->lookup_va2pa(s,va,ctxt1,n2->partition_id(),&tte_multi_hit1);
1324
1325 tte = tte0 ? tte0 : tte1;
1326 bool tte_multi_hit = ((tte0 != 0) && (tte1 != 0)) || tte_multi_hit0 || tte_multi_hit1;
1327
1328 if (n2->data_mmu_error)
1329 {
1330 n2->data_mmu_error = false;
1331 n2->data_tag_update(ctxt0,va);
1332 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
1333 }
1334 else if (tte)
1335 {
1336 // DTLB Multi Hit, Tag and Data Parity Error Detection
1337 if (tte->has_errors() || tte_multi_hit)
1338 {
1339 N2_Core& n2_core = n2->core;
1340 int dtlb_error_type = 0;
1341
1342 // Multiple hits are detected only if DTTM bit is set in CERER
1343 if (n2_core.cerer.dttm() && (tte->multi_hit() || tte_multi_hit))
1344 dtlb_error_type = 1;
1345 // Tag Parity Errors in DTLB are detected only if the DTTP bit is set in CERER
1346 else if (n2_core.cerer.dttp() && tte->tag_parity_error())
1347 dtlb_error_type = 2;
1348 // Data Parity Errors in DTLB are detected only if the DTDP bit is set in CERER
1349 else if (n2_core.cerer.dtdp() && tte->data_parity_error())
1350 dtlb_error_type = 3;
1351
1352 // Error type and va are stored in DSFSR and DSFAR respectively
1353 if (dtlb_error_type > 0)
1354 {
1355 n2->data_sfsr.error_type(dtlb_error_type);
1356 n2->data_sfar.error_addr(va);
1357 n2->data_tag_update(ctxt0,va);
1358 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
1359 }
1360 }
1361 }
1362 else
1363 {
1364 if (i->is_cohere())
1365 {
1366#ifdef COMPILE_FOR_COSIM
1367 n2->data_tlb_read_skip = false;
1368#endif
1369 // Output trace for cohere that mis tlb; they don't trap.
1370 if (n2->trc_hook)
1371 n2->trc_hook->mem_access(i->is_fetch() ? SS_Tracer::PREFETCH : SS_Tracer::FLUSH,va,0,0,0);
1372
1373 n2->npc = npc + 4;
1374 return npc;
1375 }
1376
1377 tte = n2->n2_data_htw(va,ctxt0,ctxt1);
1378
1379 if (tte == 0)
1380 return (n2->data_trap)(pc,npc,s,i,va,n2->trap_htw);
1381 }
1382 }
1383 }
1384
1385 io = (tte->phys_page >> (n2->pa_bits() - 1)) & 1;
1386
1387 // Check the TTE properties: privileged violation when we are in user
1388 // mode or pretend to be user (as_is_user asi), atomic operations from
1389 // i/o of non cacheable pages, no fault violations, side effects, or
1390 // write operation to read only pages.
1391
1392 if (tte->p() && ((n2->sim_state.priv() == SS_USER) || asi_info.is_as_if_user()))
1393 {
1394 if (!i->is_cohere())
1395 n2->data_tag_update(ctxt0,va);
1396 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_PRIVILEGE_VIOLATION); // Prio 12.4
1397 }
1398 else if ((io && (i->is_atomic()
1399 || (i->is_read() && (asi_info.is_quad_load_asi() || asi_info.is_block_asi()))))
1400 || (!tte->cp() && (i->is_atomic()
1401 || (i->is_read() && asi_info.is_quad_load_asi()))))
1402 {
1403 n2->data_tag_update(ctxt0,va);
1404 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_NC_PAGE); // Prio 12.5
1405 }
1406 else if (tte->nfo() && !asi_info.is_nofault())
1407 {
1408 if (!i->is_cohere())
1409 n2->data_tag_update(ctxt0,va);
1410 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_NFO_PAGE); // Prio 12.6
1411 }
1412 else if (tte->e() && asi_info.is_nofault())
1413 {
1414 if (!i->is_cohere())
1415 n2->data_tag_update(ctxt0,va);
1416 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::DAE_SO_PAGE); // Prio 12.6
1417 }
1418 else if (!tte->w() && i->is_write())
1419 {
1420 if (!i->is_cohere())
1421 n2->data_tag_update(ctxt0,va);
1422 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::FAST_DATA_ACCESS_PROTECTION); // Prio 12.7
1423 }
1424
1425 le = (asi_info.is_little_endian() ^ tte->ie());
1426 }
1427
1428 // Check for pa watchpoints
1429
1430 SS_Paddr pa = tte->trans(va);
1431
1432 if (!i->is_cohere() && n2->pa_watchpoint_hit(i,pa))
1433 {
1434 return (n2->data_trap)(pc,npc,s,i,va,SS_Trap::PA_WATCHPOINT); // Prio 12.8
1435 }
1436
1437 // All ra2pa and va2pa use TTEs that come from the TLB. We keep a linked list
1438 // per TLB index of locations where a TTE is used such that we can efficiently
1439 // update the decode caches. The pa2pa TTEs are not stored in the linked list.
1440
1441 if (!n2->data_wp_check && !pa2pa)
1442 {
1443 i->lnk.unlink();
1444 i->lnk.insert_after(&n2->data_tte_link[tte->index]);
1445 }
1446
1447 // Set the lower bits of the TTE pointer to include le (little endian) and io (I/O)
1448 // This used in the memory access part of the memory operation.
1449
1450 tte = i->set_tte(le,io << 1,tte);
1451
1452 // Call the routines that handle memory. We have a separate routines for
1453 // memory and i/o, and big or little endian this to make the best of optimizers.
1454
1455 SS_Memop exe = n2->mem_table[mem][((long)tte & 3)];
1456
1457#ifdef COMPILE_FOR_COSIM
1458 n2->data_tlb_read_skip = false;
1459 i->tte = n2->fail_tte;
1460#endif
1461
1462 // In hyper privileged mode we do not cache the TTE when the ASI specifies
1463 // that we need to translate (not bypass). Note the only ASIs that translate
1464 // in hyper privileged mode are the AS_IF ASIs. This check makes decode cacheing
1465 // perform better as we havce to flush much less.
1466
1467 if (n2->data_wp_check || ((n2->sim_state.priv() == SS_HPRV) && !pa2pa))
1468 i->tte = n2->fail_tte;
1469
1470 return (exe)(pc,npc,s,i,va,tte);
1471}
1472/*}}}*/
1473SS_Vaddr N2_Strand::n2_invalid_asi( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Vaddr ea )/*{{{*/
1474{
1475 // All lsu instructions for which the privilege mode is wrong at decode time,
1476 // or for which the used asi is not valid end up here. Go through the trap checking
1477 // motion as we need to raise one with the correct priority.
1478
1479 N2_Strand* n2 = (N2_Strand*)s;
1480 SS_AsiInfo ai = n2->asi_info[i->asi];
1481
1482 if (!i->is_cohere() && (ea & (i->len - 1)))
1483 {
1484 if ((i->len == 8) && (ea & 7) == 0x4)
1485 {
1486 if ((i->opc.get_op3() & 0x2f) == 0x23) // lddf & lddfa
1487 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::LDDF_MEM_ADDRESS_NOT_ALIGNED); // Prio 10.1
1488 else if (((i->opc.get_op3() & 0x2f) == 0x27)) // stdf & stdfa
1489 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::STDF_MEM_ADDRESS_NOT_ALIGNED); // Prio 10.1
1490 }
1491 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::MEM_ADDRESS_NOT_ALIGNED); // Prio 10.2
1492 }
1493 else if (!i->is_cohere() && (s->sim_state.priv() < s->asi_info[i->asi].get_protection()))
1494 {
1495 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::PRIVILEGED_ACTION); // Prio 11.1
1496 }
1497 else if (ai.is_translating())
1498 {
1499 ea &= n2->mask_pstate_am;
1500
1501 if ((n2->sim_state.priv() == SS_HPRV) && ai.is_bypass())
1502 {
1503 // PA->PA
1504 }
1505 else if (((n2->sim_state.priv() < SS_HPRV) && !n2->sim_state.data_mmu()) || ai.is_real())
1506 {
1507 // RA->PA
1508
1509 if (!i->is_cohere() && !va_inrange(ea))
1510 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::MEM_REAL_RANGE); // Prio 11.3
1511 else if (n2->data_mmu_error)
1512 {
1513 n2->data_mmu_error = false;
1514 n2->data_tag_update(0,ea);
1515 return (n2->data_trap)(pc,npc,s,i,ea,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
1516 }
1517 }
1518 else
1519 {
1520 // VA->PA
1521
1522 if (!i->is_cohere() && (n2->va_watchpoint_hit(i,ea)))
1523 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::VA_WATCHPOINT); // Prio 11.2
1524
1525 if (!i->is_cohere() && !va_inrange(ea))
1526 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::MEM_ADDRESS_RANGE); // Prio 11.3
1527 else if (n2->data_mmu_error)
1528 {
1529 SS_Context ctxt0, ctxt1;
1530 if (ai.is_primary())
1531 {
1532 ctxt0 = n2->primary_context[0]();
1533 ctxt1 = n2->primary_context[1]();
1534 }
1535 else if (ai.is_secondary())
1536 {
1537 ctxt0 = n2->secondary_context[0]();
1538 ctxt1 = n2->secondary_context[1]();
1539 }
1540 else
1541 {
1542 assert(ai.is_nucleus());
1543 ctxt0 = 0;
1544 ctxt1 = 0;
1545 }
1546 n2->data_mmu_error = false;
1547 n2->data_tag_update(ctxt0,ea);
1548 return (n2->data_trap)(pc,npc,s,i,ea,SS_Trap::DATA_ACCESS_MMU_ERROR); // Prio 12.2
1549 }
1550 }
1551 }
1552 else // non translating
1553 {
1554
1555 }
1556
1557 return (s->data_trap)(pc,npc,s,i,ea,SS_Trap::DAE_INVALID_ASI); // Prio 12.1
1558}
1559/*}}}*/
1560
1561void N2_Strand::inst_tag_update( uint_t context, SS_Vaddr va )/*{{{*/
1562{
1563 va = va_signext(va);
1564 inst_tag_access = va;
1565 inst_tag_target.va(va >> 22);
1566 inst_tag_access.context(context);
1567 inst_tag_target.context(context);
1568
1569 SS_TsbConfig* tsb_cfg = context ? &tsb_config[4] : tsb_config;
1570 for (int i=0; i < 4; i++)
1571 inst_tsb_pointer[i] = tsb_cfg[i].index(va);
1572}
1573/*}}}*/
1574void N2_Strand::data_tag_update( uint_t context, SS_Vaddr va )/*{{{*/
1575{
1576 va = va_signext(va);
1577 data_tag_access = va;
1578 data_tag_target.va(va >> 22);
1579 data_tag_access.context(context);
1580 data_tag_target.context(context);
1581
1582 SS_TsbConfig* tsb_cfg = context ? &tsb_config[4] : tsb_config;
1583 for (int i=0; i < 4; i++)
1584 data_tsb_pointer[i] = tsb_cfg[i].index(va);
1585}
1586/*}}}*/
1587
1588SS_Vaddr N2_Strand::n2_trap( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Trap::Type tt )/*{{{*/
1589{
1590 N2_Strand* n2 = (N2_Strand*)s;
1591 uint_t context;
1592
1593 return SS_Strand::ss_trap(pc,npc,s,i,tt);
1594}
1595/*}}}*/
1596SS_Vaddr N2_Strand::n2_inst_trap( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* line, SS_Vaddr va, SS_Trap::Type tt )/*{{{*/
1597{
1598 N2_Strand* n2 = (N2_Strand*)s;
1599
1600 n2->inst_tte = n2->fail_tte;
1601
1602 // Jmpl causes MEM_ADDRESS_RANGE Traps. When the mmu is in ra2pa mode
1603 // the trap should be MEM_REAL_RANGE. We tunnel these traps through the
1604 // inst_trap iso data_trap as they are treated differently that way :-)
1605
1606 switch (tt)
1607 {
1608 case SS_Trap::MEM_ADDRESS_RANGE:
1609 if (n2->inst_mmu == n2->inst_mmu_ra)
1610 tt = SS_Trap::MEM_REAL_RANGE;
1611 case SS_Trap::MEM_ADDRESS_NOT_ALIGNED:
1612 n2->data_sfar = va_signext(va & n2->mask_pstate_am);
1613 break;
1614 default:
1615 break;
1616 }
1617
1618 if (n2->trc_hook)
1619 {
1620 // Clear the decode cache line in case of inst mmu trap
1621 switch (tt)
1622 {
1623 case SS_Trap::MEM_ADDRESS_RANGE:
1624 case SS_Trap::MEM_ADDRESS_NOT_ALIGNED:
1625 n2->trc_hook->reg_value(N2_Registers::REG_DATA_SFAR,n2->data_sfar());
1626 break;
1627 case SS_Trap::MEM_REAL_RANGE:
1628 // this is coming from jmpl, no need to clear up decode cache
1629 break;
1630 default:
1631 for (int i=0; i < SS_Instr::LINE_SIZE; i++)
1632 line->line_index(i)->opc = 0;
1633 break;
1634 }
1635
1636 n2->trc_hook->inst_trap(va);
1637 }
1638
1639 return (n2->trap)(pc,npc,s,line,tt);
1640}
1641/*}}}*/
1642SS_Vaddr N2_Strand::n2_data_trap( SS_Vaddr pc, SS_Vaddr npc, SS_Strand* s, SS_Instr* i, SS_Vaddr va, SS_Trap::Type tt )/*{{{*/
1643{
1644 N2_Strand* n2 = (N2_Strand*)s;
1645
1646#ifdef COMPILE_FOR_COSIM
1647 if (n2->data_tlb_read_skip)
1648 {
1649 // Before the mmu does a tlb lookup it sets this flag as after the lookup
1650 // hits and something is wrong we get here and should not do a tlb read again.
1651
1652 n2->data_tlb_read_skip = false;
1653 }
1654 else if ((n2->asi_info[i->asi].is_translating()) &&
1655 ((n2->sim_state.priv() != SS_HPRV) ||
1656 !(n2->asi_info[i->asi].is_bypass())))
1657 {
1658 // Only pull the dtlb_read message when the trap priority >= 10.0 (x10)
1659 // ToDo only traps with priority >= 10.1 go through here ???
1660
1661 if (SS_Trap::table[tt].priority >= 100)
1662 {
1663 (n2->data_tlb_read)(n2->tlb_sync);
1664 }
1665 }
1666
1667 if ((tt == SS_Trap::DAE_INVALID_ASI) || (tt == SS_Trap::PRIVILEGED_ACTION))
1668 {
1669 // TLBLOOKUP pli messages are send out too early so we have to fix that up
1670 // here when the supposed instruction actually traps and does not use the tlb.
1671
1672 // Ben M. : it looks like the the signals we look at to generate
1673 // the ITLB LOOKUPs are in the ITLB and only happen after the ASI command
1674 // is put on the ASI RING. But it looks like the DTLB signals do not go
1675 // through the actual ASI ring but directory to the DTLB, and they assert
1676 // even if we get the ASI invalid trap. So I think that code needs
1677 // to be in place for DTLBLOOKUPs but not for ITLBLOOKUPs. ---> keep
1678 // ITLBLOOKUP code around for a while longer. 6/12/07
1679 switch (i->asi)
1680 {
1681 case 0x53: // inst_tlb_probe
1682 case 0x55: // inst_tlb_data_access
1683 case 0x56: // inst_tlb_tag_read
1684 {
1685 // a corresponding ASI_REAS & TLBLOOKUP were sent already, have to
1686 // pop them out
1687 uint64_t popout;
1688 n2->asi_map.ld64(n2, i->asi, va, &popout);
1689 (n2->inst_tlb_lookup)(n2->tlb_sync);
1690 break;
1691 }
1692 case 0x5d: // data_tlb_data_access
1693 case 0x5e: // data_tlb_tag_read
1694 {
1695 uint64_t popout;
1696 n2->asi_map.ld64(n2, i->asi, va, &popout);
1697 (n2->data_tlb_lookup)(n2->tlb_sync);
1698 break;
1699 }
1700 }
1701 }
1702#endif
1703
1704 // Cohereing instructions don;t cause a trap, so filter them out here
1705 // iso having if i->cohere() tests all over the place. However, we
1706
1707 if (i->is_cohere())
1708 {
1709 switch (tt)
1710 {
1711 case SS_Trap::DATA_ACCESS_MMU_ERROR:
1712 break;
1713 default:
1714 n2->npc = npc + 4;
1715 return npc;
1716 }
1717 }
1718
1719 i->tte = n2->fail_tte;
1720
1721 // For now, the UNSUPPORTED_PAGE_SIZE trap is the only trap that can
1722 // happen during wrasi. We use a backdoor, e.g. raise the INVALID_ASI
1723 // trap and detect here that an other trap should be raised. ToDo cleanup proper
1724
1725 if (n2->trap_dae_inv_asi != SS_Trap::RESERVED)
1726 {
1727 tt = n2->trap_dae_inv_asi;
1728 n2->trap_dae_inv_asi = SS_Trap::RESERVED;
1729 }
1730
1731 switch (tt)
1732 {
1733 case SS_Trap::DAE_INVALID_ASI:
1734 case SS_Trap::DAE_PRIVILEGE_VIOLATION:
1735 case SS_Trap::MEM_ADDRESS_NOT_ALIGNED:
1736 case SS_Trap::LDDF_MEM_ADDRESS_NOT_ALIGNED:
1737 case SS_Trap::STDF_MEM_ADDRESS_NOT_ALIGNED:
1738 va &= n2->mask_pstate_am;
1739
1740 case SS_Trap::DAE_NC_PAGE:
1741 case SS_Trap::DAE_NFO_PAGE:
1742 case SS_Trap::DAE_SO_PAGE:
1743 case SS_Trap::MEM_ADDRESS_RANGE:
1744 case SS_Trap::MEM_REAL_RANGE:
1745 case SS_Trap::FAST_DATA_ACCESS_PROTECTION:
1746 //case SS_Trap::PRIVILEGED_ACTION: ... stxa 58/30 no DSFAR ? ToDo is this right as the PRM says we should update
1747 case SS_Trap::VA_WATCHPOINT:
1748 case SS_Trap::PA_WATCHPOINT:
1749 n2->data_sfar = va_signext(va);
1750 break;
1751 default:
1752 break;
1753 }
1754
1755 if (n2->trc_hook)
1756 n2->trc_hook->data_trap(va);
1757
1758 return (n2->trap)(pc,npc,n2,i,tt);
1759}
1760/*}}}*/
1761
1762SS_Tte* N2_Strand::n2_inst_htw( SS_Vaddr va, SS_Context ctxt0, SS_Context ctxt1 )/*{{{*/
1763{
1764 SS_TsbConfig* tsb_cfg = ctxt0 ? &tsb_config[4] : &tsb_config[0];
1765 bool htw_enabled = false;
1766
1767 tw_status = 1;
1768 inst_tag_update(ctxt0,va);
1769
1770 for (int n=4; n--; tsb_cfg++)
1771 {
1772 if (!tsb_cfg->is_valid())
1773 continue;
1774
1775 htw_enabled = true;
1776
1777 SS_Paddr tsb_addr = tsb_cfg->index(va);
1778
1779 uint64_t tte_tag_data[2];
1780#ifdef MEMORY_MSYNC
1781 SS_MsyncMemory* msync_mem = (SS_MsyncMemory*)memory;
1782 msync_mem->msync_info(this->strand_id(),tsb_addr,SS_Memory::HTW);
1783#endif
1784 memory->ld128atomic(tsb_addr,tte_tag_data);
1785 tsb_tte_tag = tte_tag_data[0];
1786 tsb_tte_data = tte_tag_data[1];
1787
1788 if (trc_hook)
1789 trc_hook->hwop(SS_Tracer::LD_CODE, tsb_addr, 16, tte_tag_data);
1790
1791 SS_Vaddr vpn_mask = ~((SS_Vaddr(1) << (tsb_tte_data.size() * 3 + 13)) - SS_Vaddr(1));
1792
1793 if (tsb_tte_data.v()
1794 && (tsb_tte_tag.reserved0() == 0) && (tsb_tte_tag.reserved1() == 0)
1795 && tsb_cfg->match(va & vpn_mask,(tsb_tte_tag.va() << 22) & vpn_mask)
1796 && (tsb_cfg->get_page_size() <= tsb_tte_data.size()) && (tsb_tte_data.size() < 8))
1797 {
1798 bool ok;
1799
1800 if (ctxt0 == 0)
1801 ok = tsb_tte_tag.context() == 0;
1802 else if (tsb_cfg->use_context())
1803 {
1804 if (tsb_cfg->use_context_0())
1805 tsb_tte_tag.context(ctxt0);
1806 else
1807 tsb_tte_tag.context(ctxt1);
1808 ok = true;
1809 }
1810 else
1811 ok = tsb_tte_tag.context() == ctxt0;
1812
1813 if (ok)
1814 {
1815 N2_Tlb* tlb;
1816
1817 if (tsb_cfg->is_ra_not_pa())
1818 {
1819 SS_Paddr ra_mask = (SS_Paddr(1) << (tsb_tte_data.size() * 3)) - SS_Paddr(1);
1820 SS_Paddr ra_low = tsb_tte_data.pa() & ~ra_mask;
1821 SS_Paddr ra_high = tsb_tte_data.pa() | ra_mask;
1822
1823 for (uint_t r=0; r < 4; r++)
1824 {
1825 SS_TsbRaToPa* r2p = &tsb_ra2pa[r];
1826 if (r2p->valid && (r2p->rpn_beg <= ra_low) && (ra_high <= r2p->rpn_end))
1827 {
1828 if (!tsb_tte_data.x())
1829 {
1830 trap_htw = SS_Trap::IAE_UNAUTH_ACCESS; // prio 2.9 (3.2 in Sun Sparc)
1831 tw_status = 0;
1832 return 0;
1833 }
1834 tsb_tte_data.pa(r2p->ppn_ofs + ra_low);
1835 tsb_tte_data.pa_zero_ext(0);
1836
1837#ifdef COMPILE_FOR_COSIM
1838 (inst_tlb_write)(tlb_sync);
1839#endif
1840 tlb = (N2_Tlb*)inst_tlb;
1841 SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
1842 tlb_entry = -1;
1843
1844 tw_status = 0;
1845 return tte;
1846 }
1847 }
1848 if (!tsb_tte_data.x())
1849 trap_htw = SS_Trap::IAE_UNAUTH_ACCESS; // Prio 2.9
1850 else
1851 trap_htw = SS_Trap::INSTRUCTION_INVALID_TSB_ENTRY; // Prio 2.9 (2.10 in Sun Sparc)
1852 tw_status = 0;
1853 return 0;
1854 }
1855 else if (!tsb_tte_data.x())
1856 {
1857 trap_htw = SS_Trap::IAE_UNAUTH_ACCESS;
1858 tw_status = 0;
1859 return 0;
1860 }
1861 else
1862 {
1863 tsb_tte_data.pa_zero_ext(0);
1864
1865#ifdef COMPILE_FOR_COSIM
1866 (inst_tlb_write)(tlb_sync);
1867#endif
1868 tlb = (N2_Tlb*)inst_tlb;
1869 SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
1870 tlb_entry = -1;
1871
1872 tw_status = 0;
1873 return tte;
1874 }
1875 }
1876 }
1877 }
1878
1879 // Check if one of the 8 tsb config is enabled. If so then hardware
1880 // table walk is considered enabled.
1881
1882 tsb_cfg = ctxt0 ? &tsb_config[0] : &tsb_config[4];
1883 for (int n=4; n--; tsb_cfg++)
1884 {
1885 if (tsb_cfg->is_valid())
1886 {
1887 htw_enabled = true;
1888 break;
1889 }
1890 }
1891
1892 if (htw_enabled)
1893 trap_htw = SS_Trap::INSTRUCTION_ACCESS_MMU_MISS;
1894 else
1895 trap_htw = SS_Trap::FAST_INSTRUCTION_ACCESS_MMU_MISS;
1896
1897 tw_status = 0;
1898 return 0;
1899}
1900/*}}}*/
1901SS_Tte* N2_Strand::n2_data_htw( SS_Vaddr va, SS_Context ctxt0, SS_Context ctxt1 )/*{{{*/
1902{
1903 SS_TsbConfig* tsb_cfg = ctxt0 ? &tsb_config[4] : &tsb_config[0];
1904 bool htw_enabled = false;
1905
1906 tw_status = 1;
1907 data_tag_update(ctxt0,va);
1908
1909 for (int n=4; n--; tsb_cfg++)
1910 {
1911 if (!tsb_cfg->is_valid())
1912 continue;
1913
1914 htw_enabled = true;
1915
1916 SS_Paddr tsb_addr = tsb_cfg->index(va);
1917
1918 uint64_t tte_tag_data[2];
1919#ifdef MEMORY_MSYNC
1920 SS_MsyncMemory* msync_mem = (SS_MsyncMemory*)memory;
1921 msync_mem->msync_info(this->strand_id(),tsb_addr,SS_Memory::HTW);
1922#endif
1923 memory->ld128atomic(tsb_addr,tte_tag_data);
1924 tsb_tte_tag = tte_tag_data[0];
1925 tsb_tte_data = tte_tag_data[1];
1926
1927 if (trc_hook)
1928 trc_hook->hwop(SS_Tracer::LD_DATA, tsb_addr, 16, tte_tag_data);
1929
1930
1931 SS_Vaddr vpn_mask = ~((SS_Vaddr(1) << (tsb_tte_data.size() * 3 + 13)) - SS_Vaddr(1));
1932
1933 if (tsb_tte_data.v()
1934 && (tsb_tte_tag.reserved0() == 0) && (tsb_tte_tag.reserved1() == 0)
1935 && tsb_cfg->match(va & vpn_mask,(tsb_tte_tag.va() << 22) & vpn_mask)
1936 && (tsb_cfg->get_page_size() <= tsb_tte_data.size()) && (tsb_tte_data.size() < 8))
1937 {
1938 bool ok;
1939
1940 if (ctxt0 == 0)
1941 ok = tsb_tte_tag.context() == 0;
1942 else if (tsb_cfg->use_context())
1943 {
1944 if (tsb_cfg->use_context_0())
1945 tsb_tte_tag.context(ctxt0);
1946 else
1947 tsb_tte_tag.context(ctxt1);
1948 ok = true;
1949 }
1950 else
1951 ok = tsb_tte_tag.context() == ctxt0;
1952
1953 if (ok)
1954 {
1955 N2_Tlb* tlb;
1956
1957 if (tsb_cfg->is_ra_not_pa())
1958 {
1959 SS_Paddr ra_mask = (SS_Paddr(1) << (tsb_tte_data.size() * 3)) - SS_Paddr(1);
1960 SS_Paddr ra_low = tsb_tte_data.pa() & ~ra_mask;
1961 SS_Paddr ra_high = tsb_tte_data.pa() | ra_mask;
1962
1963 for (uint_t r=0; r < 4; r++)
1964 {
1965 SS_TsbRaToPa* r2p = &tsb_ra2pa[r];
1966 if (r2p->valid && (r2p->rpn_beg <= ra_low) && (ra_high <= r2p->rpn_end))
1967 {
1968 tsb_tte_data.pa(r2p->ppn_ofs + ra_low);
1969 tsb_tte_data.pa_zero_ext(0);
1970
1971#ifdef COMPILE_FOR_COSIM
1972 (data_tlb_write)(tlb_sync);
1973#endif
1974 tlb = (N2_Tlb*)data_tlb;
1975 SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
1976 tlb_entry = -1;
1977
1978 tw_status = 0;
1979 return tte;
1980 }
1981 }
1982 trap_htw = SS_Trap::DATA_INVALID_TSB_ENTRY;
1983 tw_status = 0;
1984 return 0;
1985 }
1986 else
1987 {
1988 tsb_tte_data.pa_zero_ext(0);
1989
1990#ifdef COMPILE_FOR_COSIM
1991 (data_tlb_write)(tlb_sync);
1992#endif
1993 tlb = (N2_Tlb*)data_tlb;
1994 SS_Tte* tte = tlb->insert_tsb_tte(this,partition_id(),tsb_tte_tag(),tsb_tte_data(),va,tlb_entry,0);
1995 tlb_entry = -1;
1996
1997 tw_status = 0;
1998 return tte;
1999 }
2000 }
2001 }
2002 }
2003
2004 tsb_cfg = ctxt0 ? &tsb_config[0] : &tsb_config[4];
2005 for (int n=4; n--; tsb_cfg++)
2006 {
2007 if (tsb_cfg->is_valid())
2008 {
2009 htw_enabled = true;
2010 break;
2011 }
2012 }
2013
2014 if (htw_enabled)
2015 trap_htw = SS_Trap::DATA_ACCESS_MMU_MISS;
2016 else
2017 trap_htw = SS_Trap::FAST_DATA_ACCESS_MMU_MISS;
2018
2019 tw_status = 0;
2020 return 0;
2021}
2022/*}}}*/
2023
2024#ifdef COMPILE_FOR_COSIM
2025SS_Trap::Type N2_Strand::n2_inst_hwtw( SS_Strand* strand, SS_Vaddr va, int_t entry )/*{{{*/
2026{
2027 N2_Strand* n2 = (N2_Strand*)strand;
2028 SS_Tte* tte;
2029
2030 n2->tlb_entry = entry;
2031 n2->trap_htw = SS_Trap::RESERVED;
2032
2033 if (n2->tl() == 0)
2034 tte = n2->n2_inst_htw(va_signext(va),n2->primary_context[0](),n2->primary_context[1]());
2035 else
2036 tte = n2->n2_inst_htw(va_signext(va),0,0);
2037
2038 return n2->trap_htw;
2039}
2040/*}}}*/
2041SS_Trap::Type N2_Strand::n2_data_hwtw( SS_Strand* strand, SS_Vaddr va, uint8_t asi, int_t entry )/*{{{*/
2042{
2043 N2_Strand* n2 = (N2_Strand*)strand;
2044 SS_Tte* tte;
2045 SS_AsiInfo asi_info = n2->asi_info[asi];
2046
2047 n2->tlb_entry = entry;
2048 n2->trap_htw = SS_Trap::RESERVED;
2049
2050 if (asi_info.is_primary())
2051 tte = n2->n2_data_htw(va_signext(va),n2->primary_context[0](),n2->primary_context[1]());
2052 else if (asi_info.is_secondary())
2053 tte = n2->n2_data_htw(va_signext(va),n2->secondary_context[0](),n2->secondary_context[1]());
2054 else
2055 tte = n2->n2_data_htw(va_signext(va),0,0);
2056
2057 return n2->trap_htw;
2058}
2059/*}}}*/
2060#endif
2061
2062SS_AsiSpace::Error N2_Strand::n2_lsu_ctr_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
2063{
2064 N2_Strand* n2 = (N2_Strand*)s;
2065 N2_LsuCtr* lc = (N2_LsuCtr*)_reg;
2066 (*lc) = data;
2067
2068 // Grab the inst and data mmu enable bits and store in the common
2069 // enable flag for use in sim_update.
2070
2071 s->sim_state.inst_mmu(s->lsu_ctr.im());
2072 s->sim_state.data_mmu(s->lsu_ctr.dm());
2073
2074 // Watchpoint address is always 8 bytes aligned so an offset value
2075 // of 1 causes mismatch (= disabled), use that, e.g bit0=0 is enable,
2076 // bit0=1 is disable.
2077
2078 n2->data_wp_va_addr |= 1;
2079 n2->data_wp_pa_addr |= 1;
2080 n2->data_wp_check = false;
2081 n2->data_wp_bytes = lc->bm();
2082 n2->data_wp_flags = (lc->re() ? SS_Instr::READ : 0) | (lc->we() ? SS_Instr::WRITE : 0);
2083
2084 if ((lc->bm() == 0) || (lc->mode() < 2))
2085 {
2086 n2->data_wp_va_addr |= SS_Vaddr(1);
2087 n2->data_wp_pa_addr |= SS_Paddr(1);
2088 }
2089 else if (lc->mode() == 2)
2090 {
2091 n2->data_wp_va_addr |= SS_Vaddr(1);
2092 n2->data_wp_pa_addr &= ~SS_Paddr(1);
2093 n2->data_wp_check = true;
2094 }
2095 else
2096 {
2097 n2->data_wp_va_addr &= ~SS_Vaddr(1);
2098 n2->data_wp_pa_addr |= SS_Paddr(1);
2099 n2->data_wp_check = true;
2100 }
2101
2102 // Propagete the sim_state changes.
2103
2104 (s->sim_update)(s);
2105
2106 return SS_AsiSpace::OK;
2107}
2108/*}}}*/
2109SS_AsiSpace::Error N2_Strand::data_wp_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
2110{
2111 N2_Strand* n2 = (N2_Strand*)s;
2112 n2->data_wp.set(va_signext(data));
2113
2114 // Set new the watchpoint addresses and add enable bit0=0 or disable bit0=1
2115
2116 n2->data_wp_va_addr = (n2->data_wp.va() << 3) | (n2->data_wp_va_addr & SS_Vaddr(1));
2117 n2->data_wp_pa_addr = (n2->data_wp.pa() << 3) | (n2->data_wp_pa_addr & SS_Paddr(1));
2118 return SS_AsiSpace::OK;
2119}
2120/*}}}*/
2121
2122SS_AsiSpace::Error N2_Strand::tsb_config_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2123{
2124 N2_Strand* n2 = (N2_Strand*)s;
2125 N2_TsbConfig* tc = (N2_TsbConfig*)_reg;
2126 SS_TsbConfig* tsb_cfg;
2127
2128 uint64_t old_data = (*tc)();
2129 (*tc) = data;
2130
2131 switch (tc->page_size())
2132 {
2133 case 0:
2134 case 1:
2135 case 3:
2136 case 5:
2137 break;
2138 default:
2139 (*tc) = old_data;
2140 n2->trap_dae_inv_asi = SS_Trap::UNSUPPORTED_PAGE_SIZE;
2141 return SS_AsiSpace::NO_ASI;
2142 }
2143
2144 n2->tsb_config[(va - 0x10) >> 3].update(tc->valid(),
2145 tc->tsb_base() << 13,
2146 tc->tsb_size() + 9,
2147 tc->page_size() * 3 + 13,
2148 tc->ra_not_pa(),
2149 tc->use_context());
2150
2151 tsb_cfg = n2->inst_tag_access.context() ? &n2->tsb_config[4] : n2->tsb_config;
2152 for (int i=0; i < 4; i++)
2153 n2->inst_tsb_pointer[i] = tsb_cfg[i].index(n2->inst_tag_access.va() << 13);
2154
2155 tsb_cfg = n2->data_tag_access.context() ? &n2->tsb_config[4] : n2->tsb_config;
2156 for (int i=0; i < 4; i++)
2157 n2->data_tsb_pointer[i] = tsb_cfg[i].index(n2->data_tag_access.va() << 13);
2158
2159 return SS_AsiSpace::OK;
2160}
2161/*}}}*/
2162SS_AsiSpace::Error N2_Strand::tsb_ra2pa_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2163{
2164 N2_Strand* n2 = (N2_Strand*)s;
2165 SS_AsiCtrReg* cr = (SS_AsiCtrReg*)_reg;
2166 cr->set(data);
2167 uint_t r = ((va - 8) & 0x18) >> 3;
2168 n2->tsb_ra2pa[r].update(n2->real_range[r].enable(),
2169 n2->real_range[r].rpn_low(),
2170 n2->real_range[r].rpn_high(),
2171 n2->physical_offset[r].ptv());
2172 return SS_AsiSpace::OK;
2173}
2174/*}}}*/
2175SS_AsiSpace::Error N2_Strand::tag_access_st64( SS_Node* , void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2176{
2177 N2_Strand* n2 = (N2_Strand*)s;
2178 data = va_signext(data);
2179 if (_reg == &n2->inst_tag_access)
2180 {
2181 n2->inst_tag_access = data;
2182 n2->inst_tag_update(n2->inst_tag_access.context(),n2->inst_tag_access.va() << 13);
2183 }
2184 else
2185 {
2186 n2->data_tag_access = data;
2187 n2->data_tag_update(n2->data_tag_access.context(),n2->data_tag_access.va() << 13);
2188 }
2189 return SS_AsiSpace::OK;
2190}
2191/*}}}*/
2192
2193SS_AsiSpace::Error N2_Strand::tlb_data_in_st64( SS_Node* d_or_i, void* r_or_v, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2194{
2195 N2_Strand* n2 = (N2_Strand*)s;
2196
2197 N2_TagAccess* tag_access = d_or_i ? &n2->data_tag_access : &n2->inst_tag_access;
2198
2199 n2->tsb_tte_tag.context(tag_access->context());
2200 n2->tsb_tte_tag.va(tag_access->va() >> (22 - 13));
2201 n2->tsb_tte_tag.reserved0(0);
2202 n2->tsb_tte_tag.reserved1(0);
2203 n2->tsb_tte_data = data;
2204 n2->tsb_tte_data.pa_zero_ext(0);
2205
2206 switch (n2->tsb_tte_data.size())
2207 {
2208 case 0:
2209 case 1:
2210 case 3:
2211 case 5:
2212 break;
2213 default:
2214 n2->trap_dae_inv_asi = SS_Trap::UNSUPPORTED_PAGE_SIZE;
2215 return SS_AsiSpace::NO_ASI;
2216 }
2217
2218 N2_Tlb* tlb;
2219 int idx = -1;
2220
2221 if (d_or_i)
2222 {
2223#ifdef COMPILE_FOR_COSIM
2224 idx = (n2->data_tlb_write)(n2->tlb_sync);
2225#endif
2226 tlb = (N2_Tlb*)n2->data_tlb;
2227 }
2228 else
2229 {
2230#ifdef COMPILE_FOR_COSIM
2231 idx = (n2->inst_tlb_write)(n2->tlb_sync);
2232#endif
2233 tlb = (N2_Tlb*)n2->inst_tlb;
2234
2235 // N2 has X (or EP bit) reserved as it has a dedicated ITLB. For completeness
2236 // and uniformity accress platforms we force the x bit to one.
2237
2238 n2->tsb_tte_data.x(1);
2239 }
2240
2241 tlb->insert_tsb_tte(n2,n2->partition_id(),n2->tsb_tte_tag(),n2->tsb_tte_data(),tag_access->va() << 13,idx,r_or_v != 0);
2242
2243 return SS_AsiSpace::OK;
2244}
2245/*}}}*/
2246SS_AsiSpace::Error N2_Strand::tlb_data_access_ld64( SS_Node* d_or_i, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
2247{
2248 N2_TlbIndex index;
2249 N2_Strand* n2 = (N2_Strand*)s;
2250
2251 index = va;
2252
2253 N2_Tlb* tlb;
2254
2255 if (d_or_i)
2256 {
2257#ifdef COMPILE_FOR_COSIM
2258 (n2->data_tlb_lookup)(n2->tlb_sync);
2259#endif
2260 tlb = (N2_Tlb*)n2->data_tlb;
2261 }
2262 else
2263 {
2264#ifdef COMPILE_FOR_COSIM
2265 (n2->inst_tlb_lookup)(n2->tlb_sync);
2266#endif
2267 tlb = (N2_Tlb*)n2->inst_tlb;
2268 }
2269
2270 if (index.index() < tlb->size())
2271 {
2272 SS_Tte* tte = tlb->get(index.index() & (tlb->size() - 1));
2273
2274 // A diagnostic ASI access to ASI_ITLB_DATA_ACCESS_REG is supposed to calculate
2275 // and set the data parity (irrespective of mode)
2276 uint64_t dp = tte->nfo();
2277 dp ^= tte->taddr();
2278 dp ^= tte->ie();
2279 dp ^= tte->e();
2280 dp ^= tte->cp();
2281 dp ^= tte->p();
2282 dp ^= tte->w();
2283 dp ^= tte->page_size();
2284 int data_parity = BL_BitUtility::calc_parity(dp);
2285 // The parity obtained in the above step is calculated parity. It does not
2286 // reflect if an error was injected or not. To set this correct, the
2287 // calculated parity needs to be xor'ed with the injection mask
2288 data_parity ^= tte->data_parity_error();
2289
2290 n2->tsb_tte_data.size(tte->page_size());
2291 n2->tsb_tte_data.sw0(0);
2292 n2->tsb_tte_data.w(tte->w());
2293 n2->tsb_tte_data.x(1);
2294 n2->tsb_tte_data.p(tte->p());
2295 n2->tsb_tte_data.cv(0);
2296 n2->tsb_tte_data.cp(tte->cp());
2297 n2->tsb_tte_data.e(tte->e());
2298 n2->tsb_tte_data.ie(tte->ie());
2299 n2->tsb_tte_data.pa(tte->taddr() >> 13);
2300 // Per N2 PRM v1.2 (Table 12-2), bits 61:56 of TTE
2301 // is defined as the 'soft' field.
2302 // (In VONK for some reason, this field has been named as sw1)
2303 // Since, the data parity is stored in the most significant bit
2304 // (bit 61) of the soft field, we need to left shift the data
2305 // parity by 5 positions
2306 n2->tsb_tte_data.sw1(data_parity << (N2_TsbTteData::WIDTH_SW1 - 1));
2307 n2->tsb_tte_data.nfo(tte->nfo());
2308 n2->tsb_tte_data.v(tte->valid_bit());
2309
2310 *data = n2->tsb_tte_data();
2311 }
2312 else
2313 {
2314 *data = 0;
2315 }
2316
2317 return SS_AsiSpace::OK;
2318}
2319/*}}}*/
2320SS_AsiSpace::Error N2_Strand::tlb_data_access_st64( SS_Node* d_or_i, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2321{
2322 N2_TlbIndex index;
2323 N2_Strand* n2 = (N2_Strand*)s;
2324
2325 index = va;
2326
2327 N2_TagAccess* tag_access = d_or_i ? &n2->data_tag_access : &n2->inst_tag_access;
2328
2329 n2->tsb_tte_tag.context(tag_access->context());
2330 n2->tsb_tte_tag.va(tag_access->va() >> (22 - 13));
2331 n2->tsb_tte_tag.reserved0(0);
2332 n2->tsb_tte_tag.reserved1(0);
2333 n2->tsb_tte_data = data;
2334 n2->tsb_tte_data.pa_zero_ext(0);
2335
2336 switch (n2->tsb_tte_data.size())
2337 {
2338 case 0:
2339 case 1:
2340 case 3:
2341 case 5:
2342 break;
2343 default:
2344 n2->trap_dae_inv_asi = SS_Trap::UNSUPPORTED_PAGE_SIZE;
2345 return SS_AsiSpace::NO_ASI;
2346 }
2347
2348 N2_Tlb* tlb;
2349
2350 if (d_or_i)
2351 {
2352#ifdef COMPILE_FOR_COSIM
2353 (n2->data_tlb_write)(n2->tlb_sync);
2354#endif
2355 tlb = (N2_Tlb*)n2->data_tlb;
2356 }
2357 else
2358 {
2359#ifdef COMPILE_FOR_COSIM
2360 (n2->inst_tlb_write)(n2->tlb_sync);
2361#endif
2362 tlb = (N2_Tlb*)n2->inst_tlb;
2363
2364 // N2 has X (or EP bit) reserved as it has a dedicated ITLB. For completeness
2365 // and uniformity accress platforms we force the x bit to one.
2366
2367 n2->tsb_tte_data.x(1);
2368 }
2369
2370 tlb->insert_tsb_tte(n2,n2->partition_id(),n2->tsb_tte_tag(),n2->tsb_tte_data(),
2371 tag_access->va() << 13,index.index(),index.flag() != 0);
2372
2373 return SS_AsiSpace::OK;
2374}
2375/*}}}*/
2376SS_AsiSpace::Error N2_Strand::tlb_tag_read_ld64( SS_Node* d_or_i, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
2377{
2378 N2_TagRead tag_read;
2379 N2_TlbIndex index;
2380 N2_Strand* n2 = (N2_Strand*)s;
2381
2382 index = va;
2383
2384 N2_Tlb* tlb;
2385
2386 if (d_or_i)
2387 {
2388#ifdef COMPILE_FOR_COSIM
2389 (n2->data_tlb_lookup)(n2->tlb_sync);
2390#endif
2391 tlb = (N2_Tlb*)n2->data_tlb;
2392 }
2393 else
2394 {
2395#ifdef COMPILE_FOR_COSIM
2396 (n2->inst_tlb_lookup)(n2->tlb_sync);
2397#endif
2398 tlb = (N2_Tlb*)n2->inst_tlb;
2399 }
2400
2401 SS_Tte* tte = tlb->get(index.index() & (tlb->size() - 1));
2402
2403 // A diagnostic ASI access to ASI_ITLB_TAG_READ_REG is supposed to calculate
2404 // and set the tag's parity (irrespective of mode)
2405 uint64_t tp = tte->pid();
2406 tp ^= tte->real_bit();
2407 tp ^= tte->tag();
2408 tp ^= tte->context();
2409 int tag_parity = BL_BitUtility::calc_parity(tp);
2410 // The parity obtained in the above step is calculated parity. It does not
2411 // reflect if an error was injected or not. To set this correct, the
2412 // calculated parity needs to be xor'ed with the injection mask
2413 tag_parity ^= tte->tag_parity_error();
2414
2415 tag_read.pid(tte->pid());
2416 tag_read.parity(tag_parity);
2417 tag_read.real(tte->real_bit());
2418 tag_read.used(0);
2419 tag_read.va_ra(tte->tag() >> 13);
2420 tag_read.context(tte->context());
2421
2422 *data = tag_read();
2423
2424 return SS_AsiSpace::OK;
2425}
2426/*}}}*/
2427SS_AsiSpace::Error N2_Strand::inst_tlb_demap_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2428{
2429 N2_Strand* n2 = (N2_Strand*)s;
2430 uint_t context;
2431 N2_Demap demap;
2432
2433 demap = va_signext(va);
2434
2435 switch (demap.context())
2436 {
2437 case 0:
2438 context = n2->primary_context[0]();
2439 n2->inst_tag_update(context,va);
2440 break;
2441 case 2:
2442 context = 0;
2443 n2->inst_tag_update(0,va);
2444 break;
2445 default:
2446 if (demap.type() < 2)
2447 return SS_AsiSpace::OK;
2448 n2->inst_tag_update(0,va);
2449 }
2450
2451#ifdef COMPILE_FOR_COSIM
2452 (n2->inst_tlb_write)(n2->tlb_sync);
2453#endif
2454 N2_Tlb* tlb = (N2_Tlb*)n2->inst_tlb;
2455
2456 switch (demap.type())
2457 {
2458 case 0:
2459 if (demap.real())
2460 tlb->demap_real(n2,n2->partition_id(),demap.va() << 13);
2461 else
2462 tlb->demap_virt(n2,n2->partition_id(),context,demap.va() << 13);
2463 break;
2464 case 1:
2465 tlb->demap_virt(n2,n2->partition_id(),context);
2466 break;
2467 case 2:
2468 tlb->demap_all(n2,n2->partition_id());
2469 break;
2470 case 3:
2471 if (demap.real())
2472 tlb->demap_real(n2,n2->partition_id());
2473 else
2474 tlb->demap_virt(n2,n2->partition_id());
2475 break;
2476 }
2477 return SS_AsiSpace::OK;
2478}
2479/*}}}*/
2480SS_AsiSpace::Error N2_Strand::data_tlb_demap_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2481{
2482 N2_Strand* n2 = (N2_Strand*)s;
2483 uint_t context;
2484 N2_Demap demap;
2485
2486 demap = va_signext(va);
2487
2488 switch (demap.context())
2489 {
2490 case 0:
2491 context = n2->primary_context[0]();
2492 n2->data_tag_update(context,va);
2493 break;
2494 case 1:
2495 context = n2->secondary_context[0]();
2496 n2->data_tag_update(0,va);
2497 break;
2498 case 2:
2499 context = 0;
2500 n2->data_tag_update(0,va);
2501 break;
2502 default:
2503 if (demap.type() < 2)
2504 return SS_AsiSpace::OK;
2505 n2->data_tag_update(0,va);
2506 }
2507
2508#ifdef COMPILE_FOR_COSIM
2509 (n2->data_tlb_write)(n2->tlb_sync);
2510#endif
2511 N2_Tlb* tlb = (N2_Tlb*)n2->data_tlb;
2512
2513 switch (demap.type())
2514 {
2515 case 0:
2516 if (demap.real())
2517 tlb->demap_real(n2,n2->partition_id(),demap.va() << 13);
2518 else
2519 tlb->demap_virt(n2,n2->partition_id(),context,demap.va() << 13);
2520 break;
2521 case 1:
2522 tlb->demap_virt(n2,n2->partition_id(),context);
2523 break;
2524 case 2:
2525 tlb->demap_all(n2,n2->partition_id());
2526 break;
2527 case 3:
2528 if (demap.real())
2529 tlb->demap_real(n2,n2->partition_id());
2530 else
2531 tlb->demap_virt(n2,n2->partition_id());
2532 break;
2533 }
2534 return SS_AsiSpace::OK;
2535}
2536/*}}}*/
2537SS_AsiSpace::Error N2_Strand::partition_id_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2538{
2539 N2_Strand* n2 = (N2_Strand*)s;
2540
2541 n2->partition_id.set(data);
2542
2543 n2->inst_ctx_ra.set_pid(n2->partition_id());
2544 n2->inst_ctx_va.set_pid(n2->partition_id());
2545
2546 n2->data_ctx.set_pid(n2->partition_id());
2547
2548 return SS_AsiSpace::OK;
2549}
2550/*}}}*/
2551SS_AsiSpace::Error N2_Strand::pri_ctx_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2552{
2553 N2_Strand* n2 = (N2_Strand*)s;
2554
2555 if ((va & 0x100) == 0)
2556 {
2557 n2->primary_context[0].set(data);
2558 n2->inst_ctx_va.set_pri_ctx0(n2->primary_context[0]());
2559 n2->data_ctx.set_pri_ctx0(n2->primary_context[0]());
2560 }
2561
2562 n2->primary_context[1].set(data);
2563 n2->inst_ctx_va.set_pri_ctx1(n2->primary_context[1]());
2564 n2->data_ctx.set_pri_ctx1(n2->primary_context[1]());
2565
2566 return SS_AsiSpace::OK;
2567}
2568/*}}}*/
2569SS_AsiSpace::Error N2_Strand::sec_ctx_st64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2570{
2571 N2_Strand* n2 = (N2_Strand*)s;
2572
2573 if ((va & 0x100) == 0)
2574 {
2575 n2->secondary_context[0].set(data);
2576 n2->data_ctx.set_sec_ctx0(n2->secondary_context[0]());
2577 }
2578
2579 n2->secondary_context[1].set(data);
2580 n2->data_ctx.set_sec_ctx1(n2->secondary_context[1]());
2581
2582 return SS_AsiSpace::OK;
2583}
2584/*}}}*/
2585SS_AsiSpace::Error N2_Strand::inst_tlb_probe_ld64( SS_Node* _cpu, void*, SS_Strand* s, SS_Vaddr addr, uint64_t* data )/*{{{*/
2586{
2587 N2_Strand* n2 = (N2_Strand*)s;
2588 SS_Tte* tte;
2589 bool tte_multi_hit;
2590
2591 N2_ItlbProbeAddr itlb_addr;
2592 N2_ItlbProbeData itlb_data;
2593
2594 itlb_addr = addr;
2595
2596 SS_Vaddr va = va_signext((itlb_addr.va() << 13) & n2->mask_pstate_am);
2597
2598#ifdef COMPILE_FOR_COSIM
2599 (n2->inst_tlb_lookup)(n2->tlb_sync);
2600#endif
2601 N2_Tlb* tlb = (N2_Tlb*)n2->inst_tlb;
2602
2603 if (itlb_addr.real())
2604 {
2605 tte = tlb->lookup_ra2pa(s,va,n2->partition_id(),&tte_multi_hit);
2606 }
2607 else
2608 {
2609 uint64_t ctxt0, ctxt1;
2610
2611 if (n2->tl() == 0)
2612 {
2613 ctxt0 = n2->primary_context[0]();
2614 ctxt1 = n2->primary_context[1]();
2615 }
2616 else
2617 {
2618 ctxt0 = 0;
2619 ctxt1 = 0;
2620 }
2621
2622 tte = tlb->lookup_va2pa(s,va,ctxt0,n2->partition_id(),&tte_multi_hit);
2623 if (tte == 0)
2624 tte = tlb->lookup_va2pa(s,va,ctxt1,n2->partition_id(),&tte_multi_hit);
2625
2626 // ToDo. Do we deal with multi hit detection here?
2627 }
2628
2629 if (tte)
2630 {
2631 itlb_data.v(1);
2632 itlb_data.mh(0);
2633 itlb_data.tp(0);
2634 itlb_data.dp(0);
2635 itlb_data.pa(tte->trans(va) >> 13);
2636 }
2637 else
2638 {
2639 itlb_data = 0;
2640 }
2641
2642 *data = itlb_data();
2643
2644 return SS_AsiSpace::OK;
2645}
2646/*}}}*/
2647
2648SS_AsiSpace::Error N2_Strand::intr_queue_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2649{
2650 N2_Strand* n2 = (N2_Strand*)s;
2651
2652 switch (va)
2653 {
2654 case 0x3c0:
2655 n2->cpu_mondo_head = data;
2656 break;
2657 case 0x3c8:
2658 if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
2659 return SS_AsiSpace::NO_WRITE;
2660 n2->cpu_mondo_tail = data;
2661 break;
2662 case 0x3d0:
2663 n2->dev_mondo_head = data;
2664 break;
2665 case 0x3d8:
2666 if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
2667 return SS_AsiSpace::NO_WRITE;
2668 n2->dev_mondo_tail = data;
2669 break;
2670 case 0x3e0:
2671 n2->resumable_head = data;
2672 break;
2673 case 0x3e8:
2674 if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
2675 return SS_AsiSpace::NO_WRITE;
2676 n2->resumable_tail = data;
2677 break;
2678 case 0x3f0:
2679 n2->non_resumable_head = data;
2680 break;
2681 case 0x3f8:
2682 if (n2->sim_state.priv() == SS_Strand::SS_PRIV)
2683 return SS_AsiSpace::NO_WRITE;
2684 n2->non_resumable_tail = data;
2685 break;
2686 default:
2687 assert(0); // asi mapping error
2688 }
2689
2690 if (n2->cpu_mondo_head.offset() != n2->cpu_mondo_tail.offset())
2691 n2->irq.raise(n2,SS_Interrupt::BIT_CPU_MONDO_TRAP);
2692 else
2693 n2->irq.retract(SS_Interrupt::BIT_CPU_MONDO_TRAP);
2694
2695 if (n2->dev_mondo_head.offset() != n2->dev_mondo_tail.offset())
2696 n2->irq.raise(n2,SS_Interrupt::BIT_DEV_MONDO_TRAP);
2697 else
2698 n2->irq.retract(SS_Interrupt::BIT_DEV_MONDO_TRAP);
2699
2700 if (n2->resumable_head.offset() != n2->resumable_tail.offset())
2701 n2->irq.raise(n2,SS_Interrupt::BIT_RESUMABLE_ERROR);
2702 else
2703 n2->irq.retract(SS_Interrupt::BIT_RESUMABLE_ERROR);
2704
2705 return SS_AsiSpace::OK;
2706}
2707/*}}}*/
2708SS_AsiSpace::Error N2_Strand::intr_queue_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
2709{
2710 N2_Strand* n2 = (N2_Strand*)s;
2711 switch (va)
2712 {
2713 case 0x3c0: *data = n2->cpu_mondo_head(); break;
2714 case 0x3c8: *data = n2->cpu_mondo_tail(); break;
2715 case 0x3d0: *data = n2->dev_mondo_head(); break;
2716 case 0x3d8: *data = n2->dev_mondo_tail(); break;
2717 case 0x3e0: *data = n2->resumable_head(); break;
2718 case 0x3e8: *data = n2->resumable_tail(); break;
2719 case 0x3f0: *data = n2->non_resumable_head(); break;
2720 case 0x3f8: *data = n2->non_resumable_tail(); break;
2721 default: assert(0); // asi mapping error
2722 }
2723 return SS_AsiSpace::OK;
2724}
2725/*}}}*/
2726SS_AsiSpace::Error N2_Strand::intr_recv_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
2727{
2728 N2_Strand* n2 = (N2_Strand*)s;
2729 n2->intr_recv = data & n2->intr_recv;
2730 n2->intr_update();
2731 return SS_AsiSpace::OK;
2732}
2733/*}}}*/
2734SS_AsiSpace::Error N2_Strand::intr_recv_wr64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
2735{
2736 N2_Strand* n2 = (N2_Strand*)s;
2737 n2->intr_recv = data;
2738 n2->intr_update();
2739 return SS_AsiSpace::OK;
2740}
2741/*}}}*/
2742SS_AsiSpace::Error N2_Strand::intr_recv_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
2743{
2744 N2_Strand* n2 = (N2_Strand*)s;
2745 *data = n2->intr_recv;
2746 return SS_AsiSpace::OK;
2747}
2748/*}}}*/
2749SS_AsiSpace::Error N2_Strand::intr_r_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
2750{
2751 N2_Strand* n2 = (N2_Strand*)s;
2752 *data= n2->intr_r();
2753 n2->intr_recv &= ~(uint64_t(1) << n2->intr_r.vector());
2754 n2->intr_update();
2755 return SS_AsiSpace::OK;
2756}
2757/*}}}*/
2758SS_AsiSpace::Error N2_Strand::desr_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
2759{
2760 N2_Strand* n2 = (N2_Strand*)s;
2761 *data= n2->desr();
2762 n2->desr.set_unmasked(0);
2763 return SS_AsiSpace::OK;
2764}
2765/*}}}*/
2766SS_AsiSpace::Error N2_Strand::stb_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
2767{
2768 N2_Strand* n2 = (N2_Strand*)s;
2769 N2_StbAccessAddrFields addr;
2770 addr.set(va);
2771 if (n2->sim_state.ras_enabled() )
2772 {
2773 // calculate index to look into the corresponding entry
2774 *data = n2->stb.get_stb_value(addr());
2775 }
2776
2777 N2_StbAccessDaReg stb_access_da_reg;
2778 N2_StbAccessEccReg stb_access_ecc_reg;
2779 N2_StbAccessCtlReg stb_access_ctl_reg;
2780 N2_StbAccessCamReg stb_access_cam_reg;
2781
2782 switch (addr.field())
2783 {
2784 case N2_StbAccessAddrFields::DATA_FIELD:
2785 stb_access_da_reg.set(*data);
2786 *data = stb_access_da_reg();
2787 break;
2788 case N2_StbAccessAddrFields::ECC_FIELD:
2789 stb_access_ecc_reg.set(*data);
2790 *data = stb_access_ecc_reg();
2791 break;
2792 case N2_StbAccessAddrFields::CNTRL_PARITY_FIELD:
2793 stb_access_ctl_reg.set(*data);
2794 *data = stb_access_ctl_reg();
2795 break;
2796 case N2_StbAccessAddrFields::CAM_FIELD:
2797 stb_access_cam_reg.set(*data);
2798 *data = stb_access_cam_reg();
2799 break;
2800 case N2_StbAccessAddrFields::STB_POINTER_FIELD:
2801 // the value of "current store buffer pointer" is provided
2802 // by test bench through follow-me scheme.
2803 // or not, in RAS mode.
2804 if (n2->sim_state.ras_enabled()) {
2805 *data = n2->stb.get_stb_pointer();
2806 }
2807 break;
2808 default:
2809 // reserved values.
2810 // ---
2811 // This ASI, like other diagnostic array access ASIs is not checked for a
2812 // legal VA. So, using a "reserved" value won't generate an exception, but
2813 // will generate unpredictable data from a software perspective.
2814 // I can tell you that for this specific case, all cases where va[8]==1
2815 // will act the same as va[8:6]==100, which is to return the current store
2816 // buffer pointer.
2817 // ---Mark
2818 //---> per Mark, we should treat the remaining cases as 0x4. testbench is
2819 // expected to provide a follow-me value.
2820 break;
2821 }
2822 return SS_AsiSpace::OK;
2823}
2824/*}}}*/
2825SS_AsiSpace::Error N2_Strand::irf_ecc_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
2826{
2827 N2_Strand* n2 = (N2_Strand*)s;
2828 *data = 0; // For now return 0
2829 return SS_AsiSpace::OK;
2830}
2831/*}}}*/
2832SS_AsiSpace::Error N2_Strand::frf_ecc_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
2833{
2834 N2_Strand* n2 = (N2_Strand*)s;
2835 *data = 0; // For now return 0
2836 return SS_AsiSpace::OK;
2837}
2838/*}}}*/
2839SS_AsiSpace::Error N2_Strand::tsa_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
2840{
2841 N2_Strand* n2 = (N2_Strand*)s;
2842 *data = 0; // For now return 0
2843 return SS_AsiSpace::OK;
2844}
2845/*}}}*/
2846SS_AsiSpace::Error N2_Strand::mra_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t* data )/*{{{*/
2847{
2848 N2_Strand* n2 = (N2_Strand*)s;
2849 *data = 0; // For now return 0
2850 return SS_AsiSpace::OK;
2851}
2852/*}}}*/
2853SS_AsiSpace::Error N2_Strand::tick_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
2854{
2855 N2_Strand* n2 = (N2_Strand*)s;
2856 N2_TickAccess ta;
2857 ta = va;
2858 // 1 - Return Data , 0 - Return ECC
2859 if (ta.data_np())
2860 {
2861 switch (ta.index())
2862 {
2863 case 0:
2864 *data = n2->tick_cmpr(); break;
2865 case 1:
2866 *data = n2->stick_cmpr(); break;
2867 case 2:
2868 *data = n2->hstick_cmpr(); break;
2869 default:
2870 assert(0);
2871 }
2872 }
2873 else
2874 {
2875 if (s->sim_state.ras_enabled())
2876 {
2877 BL_EccBits ecc_obj;
2878 ecc_obj = n2->tick_cmpr_array_ecc[ta.index()];
2879 *data = ecc_obj.get();
2880 }
2881 else
2882 *data = 0;
2883 }
2884 return SS_AsiSpace::OK;
2885}
2886/*}}}*/
2887SS_AsiSpace::Error N2_Strand::tw_control_st64( SS_Node*, void*, SS_Strand* s, SS_Vaddr, uint64_t data )/*{{{*/
2888{
2889 N2_Strand* n2 = (N2_Strand*)s;
2890 n2->tw_control.set(data);
2891 n2->core.tw_status.lock();
2892 if (n2->tw_control.stp())
2893 n2->core.tw_status.stp(n2->core.tw_status.stp() | (1 << (n2->strand_id() & 7)));
2894 else
2895 n2->core.tw_status.stp(n2->core.tw_status.stp() &~ (1 << (n2->strand_id() & 7)));
2896 n2->core.tw_status.unlock();
2897 return SS_AsiSpace::OK;
2898}
2899/*}}}*/
2900
2901SS_AsiSpace::Error N2_Strand::n2_scratchpad_ld64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
2902{
2903 assert(va < 64);
2904 uint64_t* reg = (uint64_t*)_reg;
2905 N2_Strand* n2 = (N2_Strand*)s;
2906 // If RAS enabled and appropriate error detection flags are set
2907 // determine if there are any errors (injected or otherwise)
2908 if (n2->sim_state.ras_enabled() )
2909 {
2910 N2_Core& n2_core = n2->core;
2911 BL_EccBits ecc_obj = n2->sp_ecc[(va >> 3) & 7];
2912
2913 // Check if the ecc associated with this register is a valid ecc
2914 if (ecc_obj.valid())
2915 {
2916 // Syndrome is the difference between the stored and calculated ECC values
2917 BL_Hamming_64_8_Synd syndrome = BL_Hamming_64_8_Synd(reg[(va >> 3) & 7],ecc_obj);
2918 bool updateDsfar = false;
2919 // Errors are recorded only if the PSCCE bit is set in the SETER
2920 if (n2->seter.pscce())
2921 {
2922 // Correctable errors are detected only SCAC bit in CERER is set
2923 if (n2_core.cerer.scac())
2924 {
2925 if (syndrome.isSingleBitError())
2926 {
2927 n2->data_sfsr.error_type(N2_DataSfsr::SCAC);
2928 updateDsfar = true;
2929 }
2930 }
2931 // Uncorrectable errors are detected only if SCAU bit in CERER is set
2932 else if (n2_core.cerer.scau())
2933 {
2934 if (syndrome.isDoubleBitError() || syndrome.isMultipleBitError())
2935 {
2936 n2->data_sfsr.error_type(N2_DataSfsr::SCAU);
2937 updateDsfar = true;
2938 }
2939 }
2940 if (updateDsfar)
2941 {
2942 /*unsigned long long native_add = 0;
2943 unsigned long long intermediate_err_add = (native_add & ~0x7f8ULL) |
2944 syndrome.getSyndrome() << 3;
2945 unsigned long long error_add = (intermediate_err_add & ~0x7ULL) |
2946 ((va >> 3) & 7);
2947 n2->data_sfar.error_addr(error_add);*/
2948
2949 unsigned long long error_add = 0;
2950 // Capture the syndrome and array index in DSFAR
2951 // Store the syndrome in bits 3 thru 10 of DSFAR
2952 error_add = BL_BitUtility::set_subfield(error_add,syndrome.getSyndrome(),3,10);
2953 // Store the scratchpad array index in bits 0 thru 2
2954 error_add = BL_BitUtility::set_subfield(error_add,((va >> 3) & 7),0,2);
2955 n2->data_sfar.error_addr(error_add);
2956 return SS_AsiSpace::TRAP_IPE;
2957 }
2958 }
2959 }
2960 }
2961 *data = reg[(va >> 3) & 7];
2962 return SS_AsiSpace::OK;
2963}
2964/*}}}*/
2965
2966SS_AsiSpace::Error N2_Strand::n2_scratchpad_st64( SS_Node*, void* _reg, SS_Strand* s, SS_Vaddr va, uint64_t data )/*{{{*/
2967{
2968 assert(va < 64);
2969
2970 N2_Strand* n2 = (N2_Strand*)s;
2971 // If RAS enabled and appropriate error injection flags are set
2972 // Then inject error in check bits
2973 if (n2->sim_state.ras_enabled() )
2974 {
2975 N2_Core& n2_core = n2->core;
2976 BL_EccBits ecc_obj = BL_Hamming_64_8_Synd::calc_check_bits(data);
2977 unsigned ecc = ecc_obj.get();
2978 // Check if ENB and SCAU bits are set in N2 Error Injection Register
2979 if ((n2_core.error_inject.ene() == 1) && (n2_core.error_inject.scau() == 1))
2980 ecc ^= n2_core.error_inject.eccmask();
2981 // Set back the corrputed ecc
2982 ecc_obj.set(ecc);
2983 n2->sp_ecc[(va >> 3) & 7] = ecc_obj;
2984 }
2985
2986 uint64_t* reg = (uint64_t*)_reg;
2987 reg[(va >> 3) & 7] = data;
2988 return SS_AsiSpace::OK;
2989}
2990/*}}}*/
2991
2992SS_AsiSpace::Error N2_Strand::scratchpad_access_ld64( SS_Node*, void*, SS_Strand* s, SS_Vaddr va, uint64_t* data )/*{{{*/
2993{
2994 N2_Strand* n2 = (N2_Strand*)s;
2995 N2_ScratchpadAccess spa;
2996 spa = va;
2997 // 1 - Return Data , 0 - Return ECC
2998 if (spa.data_np())
2999 *data = n2->scratchpad[spa.index()];
3000 else
3001 {
3002 BL_EccBits ecc_obj(0);
3003 if(s->sim_state.ras_enabled())
3004 ecc_obj = n2->sp_ecc[spa.index()];
3005 *data = ecc_obj.get();
3006 }
3007 return SS_AsiSpace::OK;
3008}
3009/*}}}*/
3010
3011/*static*/ void N2_Strand::n2_run_perf( SS_Strand* s, Sam::Vcpu::perfcntr which, int64_t incr )/*{{{*/
3012//
3013// run_perf() updates a pic value (pic0, or pic1) and sets the
3014// overflow bit accordingly, if trap on overflow are enabled (pcr.toe)
3015// and if overflow did occur it raises the appropriate trap.
3016//
3017{
3018 N2_Strand* n2 = (N2_Strand*)s;
3019
3020 if ((n2->pcr.st() && n2->pstate.priv())
3021 || (n2->pcr.ut() && ! n2->pstate.priv())
3022 || (n2->pcr.ht() && n2->hpstate.hpriv()))
3023 {
3024 switch (which)
3025 {
3026 case Sam::Vcpu::PIC0:
3027 {
3028 uint64_t tmp = n2->pic.l() + incr;
3029 if (tmp > 0xffffffffull)
3030 {
3031 n2->pcr.ov0(1);
3032 if (n2->pcr.toe() & 0x1) /* bit-4 for pic0 ??? */
3033 {
3034 n2->irq.raise(s,SS_Interrupt::BIT_INTERRUPT_LEVEL_15);
3035 }
3036 }
3037 n2->pic.l(tmp);
3038 }
3039 break;
3040 case Sam::Vcpu::PIC1:
3041 {
3042 uint64_t tmp = n2->pic.h() + incr;
3043 if (tmp > 0xffffffffull)
3044 {
3045 n2->pcr.ov1(1);
3046 if (n2->pcr.toe() & 0x2) /* bit-5 for pic1 ??? */
3047 {
3048 n2->irq.raise(s,SS_Interrupt::BIT_INTERRUPT_LEVEL_15);
3049 }
3050 }
3051 n2->pic.h(tmp);
3052 }
3053 break;
3054 }
3055 }
3056}
3057/*}}}*/
3058
3059void N2_Strand::n2_internal_interrupt( SS_Strand* s, uint_t vector, bool raise )/*{{{*/
3060{
3061 // Called by N2_Cpu when asi 0x73 (intr_w) is assigned,
3062 // e.g. whenever one strand cross calls another strand
3063
3064 N2_Strand* n2 = (N2_Strand*)s;
3065
3066 assert(!n2->sim_state.cosim());
3067
3068 if (raise)
3069 n2->intr_recv |= (uint64_t(1) << vector);
3070 else
3071 n2->intr_recv &= ~(uint64_t(1) << vector);
3072
3073 n2->intr_update();
3074}
3075/*}}}*/
3076void N2_Strand::intr_update()/*{{{*/
3077{
3078 intr_r.vector(0);
3079 if (intr_recv)
3080 {
3081 for (int l=63; l >= 0; l--)
3082 {
3083 if (intr_recv & (uint64_t(1) << l))
3084 {
3085 intr_r.vector(l);
3086 break;
3087 }
3088 }
3089
3090 irq.raise(this,SS_Interrupt::BIT_INTERRUPT_VECTOR);
3091 }
3092 else
3093 irq.retract(SS_Interrupt::BIT_INTERRUPT_VECTOR);
3094}
3095/*}}}*/
3096
3097Sam::Vcpu::TranslateError N2_Strand::n2_cnv2pa( SS_Strand* s, Sam::Vcpu::TranslateMode mode, SS_Vaddr addr, uint64_t ctx, uint64_t pid, SS_Paddr* pa )/*{{{*/
3098{
3099 N2_Strand* n2 = (N2_Strand*)s;
3100 SS_Tte* tte = 0;
3101 bool tte_multi_hit;
3102
3103 switch (mode)
3104 {
3105 case Sam::Vcpu::TRANSLATE_VA_TO_PA:
3106 ctx = n2->primary_context[0]();
3107 // fall through
3108 case Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX:
3109 pid = n2->partition_id();
3110 // fall through
3111 case Sam::Vcpu::TRANSLATE_VA_TO_PA_CTX_PID:
3112 tte = ((N2_Tlb*)n2->inst_tlb)->lookup_va2pa(s,addr,ctx,pid,&tte_multi_hit);
3113 if (tte == 0)
3114 tte = ((N2_Tlb*)n2->data_tlb)->lookup_va2pa(s,addr,ctx,pid,&tte_multi_hit);
3115 break;
3116 case Sam::Vcpu::TRANSLATE_RA_TO_PA:
3117 pid = n2->partition_id();
3118 // fall through
3119 case Sam::Vcpu::TRANSLATE_RA_TO_PA_PID:
3120 tte = ((N2_Tlb*)n2->inst_tlb)->lookup_ra2pa(s,addr,pid,&tte_multi_hit);
3121 if (tte == 0)
3122 tte = ((N2_Tlb*)n2->data_tlb)->lookup_ra2pa(s,addr,pid,&tte_multi_hit);
3123 break;
3124 case Sam::Vcpu::TRANSLATE_PA_TO_PA:
3125 if ((addr >> (n2->pa_bits() - 1)) & 1)
3126 tte = n2->phys_tte_io;
3127 else
3128 tte = n2->phys_tte_mem;
3129 break;
3130 default:
3131 assert(0);
3132 // fall out
3133 }
3134
3135 if (tte)
3136 {
3137 *pa = tte->trans(addr);
3138 return Sam::Vcpu::TRANSLATE_OK;
3139 }
3140 else
3141 return Sam::Vcpu::TRANSLATE_NO_TTE_FOUND;
3142}
3143/*}}}*/
3144
3145void N2_Strand::warm_reset(bool intp)/*{{{*/
3146{
3147 // because WMR/DBR is not exactly the same as POR, cannot call
3148 // ss_trap(POWER_ON_RESET) directly.
3149 //ss_trap(0,0,this,0,SS_Trap::POWER_ON_RESET);
3150
3151 //TODO if there are other registers that should be updated by wmr/dbr,
3152 // but are not part of trap handling, then do it here.
3153 lsu_ctr = 0;
3154 seter = 0;
3155
3156 // the warm_reset() is invoked by, at least, two places, one is reset_gen,
3157 // where a RESET_GEN_WMR should be triggered, another is cosim pli-command
3158 // "INTP 00 00", which signals a system-wide warm_reset, a RESET_GEN_WMR
3159 // trap should bot be triggered, as there will be an "INTP xx 01" followed,
3160 // one for each strand, where RESET_GEN_WMR will be invoked.
3161 if (intp)
3162 irq_launch(SS_Trap::RESET_GEN_WMR);
3163 return;
3164}
3165/*}}}*/
3166
3167void N2_Strand::n2_external_interrupt( SS_Strand* s, uint64_t *payload, bool raise )/*{{{*/
3168{
3169 // Called by external source such as NIU to signal
3170 // a device interrupt to the strand. The N2 model is
3171 // always raise the trap. It never sends retracts;
3172 // we take care of that ourselves.
3173
3174 N2_Strand* n2 = (N2_Strand*)s;
3175
3176 assert(!n2->sim_state.cosim());
3177
3178 // Can not use s->msg.make_signal here because the strand is the
3179 // receiver of the signal, not the creator.
3180
3181 SS_Signal *sgn = SS_Signal::alloc(SS_Signal::EXTERNAL_INTERRUPT);
3182 sgn->irq_type = payload[7];
3183 sgn->irq_raise = true;
3184 n2->post_signal(sgn);
3185}
3186/*}}}*/
3187
3188void N2_Strand::n2_ras_enable( SS_Strand* s, char* )/*{{{*/
3189{
3190 // Ras enable set from frontend
3191
3192 N2_Strand* n2 = (N2_Strand*)s;
3193 n2->exe_table = n2->trc_exe_table;
3194 n2->mem_table = n2->mem_ras_table;
3195
3196 n2->sim_state.ras_enabled(1);
3197
3198 assert(((N2_Model*)n2->model)->ck_memory);
3199 s->memory = ((N2_Model*)n2->model)->ck_memory;
3200 if (n2->mem_err_detector.memory == NULL)
3201 n2->mem_err_detector.memory = s->memory;
3202
3203 s->flush_tte_all(); // bounce the decode caches
3204}
3205/*}}}*/
3206
3207// fill_store_buffer_mem() adds the write transactions contained in a
3208// MemoryTransaction to the store buffer. It handles both large and small
3209// transactions by breaking large transactions in to doubleword chunks
3210// and mapping 4, 2, and 1 byte transactions to the correct "byte mark".
3211SS_Trap::Type N2_Strand::fill_store_buffer_mem(const MemoryTransaction &memXact)/*{{{*/
3212{
3213 if (!memXact.writeXact())
3214 {
3215 fprintf(stderr,"N2_Strand::fillStoreBufferMem"
3216 "bad xact access: %d", memXact.access());
3217 }
3218
3219 if (memXact.size() >= 8)
3220 {
3221 for (int ndx = 0; ndx < memXact.size()/sizeof(double); ++ndx)
3222 {
3223 return stb.fill_store_buffer(false, memXact.paddr() + sizeof(double) * ndx,
3224 0xff, memXact.getData(ndx));
3225
3226 }
3227 } else
3228 {
3229 uint8_t byteMarks;
3230 switch (memXact.size())
3231 {
3232 case sizeof(int):
3233 byteMarks = 0xf;
3234 break;
3235
3236 case sizeof(short):
3237 byteMarks = 0x3;
3238 break;
3239
3240 case sizeof(char):
3241 byteMarks = 0x1;
3242 break;
3243
3244 default:
3245 fprintf(stderr,"N2_Strand::fill_store_buffer_mem"
3246 "bad xact size: %d", memXact.size());
3247 }
3248 return stb.fill_store_buffer(false, memXact.paddr(), byteMarks, memXact.getData());
3249 }
3250 return SS_Trap::NO_TRAP;
3251}
3252/*}}}*/
3253
3254SS_Trap::Type N2_Strand::fill_store_buffer_asi(uint64_t addr,
3255 uint8_t asi_num,
3256 uint64_t data)/*{{{*/
3257{
3258 if (asi_num != N2_Asi::ASI_ERROR_INJECT)
3259 {
3260 return stb.fill_store_buffer(true, addr, asi_num, data);
3261 }
3262 return SS_Trap::NO_TRAP;
3263}
3264/*}}}*/
3265
3266SS_Trap::Type N2_Strand::check_store_buffer_RAWtrap(const MemoryTransaction &memXact)/*{{{*/
3267{
3268 if (!memXact.readXact())
3269 {
3270 fprintf(stderr,"N2_Strand::check_store_buffer_RAWtrap"
3271 "bad access type: %d", memXact.access());
3272 }
3273 return stb.check_store_buffer_RAWtrap(memXact);
3274}
3275/*}}}*/
3276
3277SS_Trap::Type N2_Strand::flush_store_buffer()/*{{{*/
3278{
3279 return stb.flush_store_buffer();
3280}
3281/*}}}*/