Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * OpenSPARC T2 Processor File: simcore.h | |
5 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
6 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES. | |
7 | * | |
8 | * The above named program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public | |
10 | * License version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * The above named program is distributed in the hope that it will be | |
13 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public | |
18 | * License along with this work; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | |
20 | * | |
21 | * ========== Copyright Header End ============================================ | |
22 | */ | |
23 | /* | |
24 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. | |
25 | * Use is subject to license terms. | |
26 | */ | |
27 | ||
28 | #ifndef _SIMCORE_H_ | |
29 | #define _SIMCORE_H_ | |
30 | ||
31 | #pragma ident "@(#)simcore.h 1.48 07/02/13 SMI" | |
32 | ||
33 | #ifdef __cplusplus | |
34 | extern "C" { | |
35 | #endif | |
36 | ||
37 | ||
38 | /* | |
39 | * # # | |
40 | * ## # #### ##### ###### | |
41 | * # # # # # # # | |
42 | * # # # # # # ##### | |
43 | * # # # # # # # | |
44 | * # ## # # # # | |
45 | * # # #### # ###### | |
46 | * | |
47 | * If you think you need to edit this file you are probably wrong. | |
48 | * Please talk with Ash to discuss what you want to do before trying | |
49 | * to hack in here. So hands off ! | |
50 | */ | |
51 | ||
52 | #include <pthread.h> | |
53 | ||
54 | typedef struct CONFIG_PROC config_proc_t; | |
55 | typedef struct CONFIG_DEV config_dev_t; | |
56 | typedef struct CONFIG_ADDR config_addr_t; | |
57 | typedef struct PROC_TYPE proc_type_t; | |
58 | typedef struct DEV_TYPE dev_type_t; | |
59 | typedef struct DOMAIN domain_t; | |
60 | typedef struct SERVICE_PROC service_proc_t; | |
61 | typedef struct SYSTEM system_t; | |
62 | typedef struct ERROR_CONFIG error_conf_t; | |
63 | typedef struct REG_MAP reg_map_t; | |
64 | ||
65 | ||
66 | #include "list.h" | |
67 | ||
68 | typedef enum MACCESS maccess_t; | |
69 | ||
70 | /* | |
71 | * Internal structures for execution data cache simulation | |
72 | */ | |
73 | ||
74 | #include "xdcache.h" | |
75 | ||
76 | ||
77 | ||
78 | /* | |
79 | * Op fields for memory access operations | |
80 | * | |
81 | * Note: These field values are used by the core for embedding | |
82 | * in decoded instructions. However, processor modules may also | |
83 | * choose additional specific encodings that are restricted to | |
84 | * those modules only and are defined elsewhere. | |
85 | * | |
86 | * Care should be taken when modifying these codes as not to | |
87 | * tread on value defined elsewhere (values >= 0x100 && < 0xfff0). | |
88 | * | |
89 | * Processor specific modules should use the value | |
90 | * MA_Non_Gen_Base as the base for their specific definitions. | |
91 | * | |
92 | * The generic core values reside in no more than 8 bits | |
93 | * and are used to define basic load, store and cas operations. | |
94 | */ | |
95 | ||
96 | enum MACCESS { | |
97 | MA_Size_Mask = 0xf, | |
98 | ||
99 | MA_Size8 = 0x0, | |
100 | MA_Size16 = 0x1, | |
101 | MA_Size32 = 0x2, | |
102 | MA_Size64 = 0x3, | |
103 | MA_Size128 = 0x4, | |
104 | MA_Size256 = 0x5, | |
105 | MA_Size512 = 0x6, | |
106 | ||
107 | MA_Op_MaskGeneric = 0xf0, | |
108 | MA_Op_Mask = 0xfff0, | |
109 | ||
110 | MA_Ld = 0x00, | |
111 | MA_LdSigned = 0x10, | |
112 | MA_LdDouble = 0x20, /* SPARC only ? */ | |
113 | MA_LdFloat = 0x30, | |
114 | MA_St = 0x40, | |
115 | MA_StFloat = 0x50, | |
116 | MA_CAS = 0x60, | |
117 | MA_StDouble = 0x70, /* SPARC only ? */ | |
118 | MA_LdSt = 0x80, | |
119 | MA_Swap = 0x90, | |
120 | ||
121 | MA_Non_Gen_Base = 0x100, /* First non generic useable value */ | |
122 | MA_Non_Gen_Skip = 0x10, /* skip value for non-generic codes */ | |
123 | ||
124 | MA_ldu8 = MA_Size8 | MA_Ld, | |
125 | MA_ldu16 = MA_Size16 | MA_Ld, | |
126 | MA_ldu32 = MA_Size32 | MA_Ld, | |
127 | MA_ldu64 = MA_Size64 | MA_Ld, | |
128 | ||
129 | MA_lddu64 = MA_Size64 | MA_LdDouble, /* loads 64 bit value 64 bit aligned */ | |
130 | MA_stdu64 = MA_Size64 | MA_StDouble, | |
131 | ||
132 | MA_lddu128 = MA_Size128 | MA_LdDouble, /* loads 2x64 bit | |
133 | values atomically | |
134 | 128 bit aligned */ | |
135 | ||
136 | MA_lds8 = MA_Size8 | MA_LdSigned, | |
137 | MA_lds16 = MA_Size16 | MA_LdSigned, | |
138 | MA_lds32 = MA_Size32 | MA_LdSigned, | |
139 | MA_lds64 = MA_Size64 | MA_LdSigned, | |
140 | ||
141 | MA_st8 = MA_Size8 | MA_St, | |
142 | MA_st16 = MA_Size16 | MA_St, | |
143 | MA_st32 = MA_Size32 | MA_St, | |
144 | MA_st64 = MA_Size64 | MA_St, | |
145 | ||
146 | MA_ldfp8 = MA_Size8 | MA_LdFloat, | |
147 | MA_ldfp16 = MA_Size16 | MA_LdFloat, | |
148 | MA_ldfp32 = MA_Size32 | MA_LdFloat, | |
149 | MA_ldfp64 = MA_Size64 | MA_LdFloat, | |
150 | #ifdef PROCESSOR_SUPPORTS_QUADFP | |
151 | MA_ldfp128 = MA_Size128 | MA_LdFloat, | |
152 | #endif | |
153 | ||
154 | MA_stfp8 = MA_Size8 | MA_StFloat, | |
155 | MA_stfp16 = MA_Size16 | MA_StFloat, | |
156 | MA_stfp32 = MA_Size32 | MA_StFloat, | |
157 | MA_stfp64 = MA_Size64 | MA_StFloat, | |
158 | #ifdef PROCESSOR_SUPPORTS_QUADFP | |
159 | MA_stfp128 = MA_Size128 | MA_StFloat, | |
160 | #endif | |
161 | ||
162 | MA_cas32 = MA_Size32 | MA_CAS, | |
163 | MA_cas64 = MA_Size64 | MA_CAS, | |
164 | ||
165 | /* Are these generic ? FIXME */ | |
166 | MA_ldstub = MA_Size8 | MA_LdSt, | |
167 | MA_swap = MA_Size32 | MA_Swap | |
168 | }; | |
169 | ||
170 | /* | |
171 | * These macros must be passed an op masked with MA_Op_Mask. | |
172 | * It is intended that they be included as part of a architecture | |
173 | * specific form, hence the underscore prefix. | |
174 | */ | |
175 | #define _IS_MA_LOAD(_op) \ | |
176 | (MA_Ld == (_op) || MA_LdSigned == (_op) || \ | |
177 | MA_LdDouble == (_op) || MA_LdFloat == (_op) || \ | |
178 | MA_LdSt == (_op) || MA_CAS == (_op) || MA_Swap == (_op)) | |
179 | #define _IS_MA_STORE(_op) \ | |
180 | (MA_St == (_op) || MA_StDouble == (_op) || \ | |
181 | MA_StFloat == (_op) || \ | |
182 | MA_LdSt == (_op) || MA_CAS == (_op) || MA_Swap == (_op)) | |
183 | #define _IS_MA_ATOMIC(_op) \ | |
184 | (MA_LdSt == (_op) || MA_CAS == (_op) || MA_Swap == (_op)) | |
185 | ||
186 | ||
187 | ||
188 | /* | |
189 | * Data structures passed as pointers, but private to the | |
190 | * simulator core. | |
191 | */ | |
192 | ||
193 | typedef void *(*instn_exec_t)(); | |
194 | ||
195 | ||
196 | typedef struct XICACHE_INSTN xicache_instn_t; | |
197 | typedef struct XICACHE_LINE xicache_line_t; | |
198 | typedef struct XICACHE xicache_t; | |
199 | typedef union DECODED_INSTN decoded_instn_t; | |
200 | typedef struct BP_INFO bp_info_t; | |
201 | typedef struct BREAKPOINT breakpoint_t; | |
202 | ||
203 | ||
204 | /* Sigh : used as a hack for breakpoints - | |
205 | * try and remove this - FIXME */ | |
206 | typedef LIST_DEF( simcpu_list_t, simcpu_t ); | |
207 | ||
208 | extern simcpu_list_t simcpu_list; | |
209 | ||
210 | ||
211 | ||
212 | ||
213 | ||
214 | /* | |
215 | * Simulator core CPU structure .. | |
216 | * defines NINT integer registers, NFP floating point | |
217 | * registers etc ... | |
218 | */ | |
219 | ||
220 | #define NINT 32 | |
221 | #define NFP_128 16 | |
222 | #define NFP_64 32 | |
223 | #define NFP_32 64 | |
224 | ||
225 | typedef struct ERROR { | |
226 | void * itep; /* track itlb entry with error until demapped */ | |
227 | void * dtep; /* track dtlb entry with error until demapped */ | |
228 | uint8_t tlb_idx[4]; /* tlb entry with parity error */ | |
229 | #define IMDU_IDX 0 | |
230 | #define IMTU_IDX 1 | |
231 | #define DMDU_IDX 2 | |
232 | #define DMTU_IDX 3 | |
233 | uint64_t l2_write; /* write to l2 on partial store or atomic hit */ | |
234 | uint64_t partial_st; /* partial store l2 access */ | |
235 | uint8_t reg; /* register number with ECC error */ | |
236 | tvaddr_t addr; /* pa or va in error */ | |
237 | bool_t check_xicache; /* check error conditions on xdcache access */ | |
238 | bool_t check_dtlb; /* check error conditions on dtlb access */ | |
239 | bool_t check_xdcache; /* check error conditions on xdcache access */ | |
240 | error_conf_t * errlistp; /* errors to post for this simcpu */ | |
241 | } error_t; | |
242 | ||
243 | #define STATE_DISABLED 0x1LL | |
244 | #define STATE_PARKED 0x2LL | |
245 | ||
246 | #define RUNNABLE(_sp) ((_sp)->state_bits == 0) | |
247 | #define DISABLED(_sp) (((_sp)->state_bits & STATE_DISABLED) != 0) | |
248 | #define SET_DISABLED(_sp) ((_sp)->state_bits |= STATE_DISABLED) | |
249 | #define CLR_DISABLED(_sp) ((_sp)->state_bits &= ~STATE_DISABLED) | |
250 | #define PARKED(_sp) (((_sp)->state_bits & STATE_PARKED) != 0) | |
251 | #define SET_PARKED(_sp) ((_sp)->state_bits |= STATE_PARKED) | |
252 | #define CLR_PARKED(_sp) ((_sp)->state_bits &= ~STATE_PARKED) | |
253 | ||
254 | #if ERROR_TRAP_GEN /* { */ | |
255 | /* | |
256 | * Error Trap Gen Stuff | |
257 | */ | |
258 | ||
259 | typedef enum { | |
260 | ERROR_ON_LOAD, | |
261 | ERROR_ON_STORE, | |
262 | ERROR_ON_LOAD_OR_STORE | |
263 | } ee_access_t; | |
264 | ||
265 | typedef enum { | |
266 | EE_PARSED, /* user input has been completely parsed */ | |
267 | EE_ASSIGNED, /* assigned to an sp (watching for trigger) */ | |
268 | EE_TRIGGERED /* error event trigger conditions met */ | |
269 | } ee_status_t; | |
270 | ||
271 | #define ERROR_TL_NONE 0xffffffff | |
272 | #define ERROR_INSTN_CNT_NONE UINT64_MAX | |
273 | ||
274 | typedef union { | |
275 | uint64_t all; /* used to test/clear all options */ | |
276 | struct { | |
277 | uint64_t opts_pad:53; | |
278 | uint64_t trigger_cnt:1; | |
279 | uint64_t tl:1; | |
280 | uint64_t priv:1; | |
281 | uint64_t access:1; | |
282 | uint64_t address:1; | |
283 | uint64_t pc:1; | |
284 | uint64_t instn_cnt:1; | |
285 | uint64_t target_cpuid:1; | |
286 | uint64_t error_str:1; | |
287 | uint64_t trap_num:1; | |
288 | uint64_t sp_intr:1; | |
289 | } bits; | |
290 | } error_event_options_t; | |
291 | ||
292 | typedef struct error_event { | |
293 | uint64_t trap_num; | |
294 | int sp_intr; | |
295 | char *error_str; | |
296 | int64_t target_cpuid; | |
297 | int64_t instn_cnt; | |
298 | int64_t pc; | |
299 | struct { | |
300 | ee_access_t access; | |
301 | uint64_t addr; | |
302 | } address; | |
303 | int priv; | |
304 | int tl; | |
305 | ee_status_t ee_status; | |
306 | int trigger_cnt; | |
307 | error_event_options_t options; | |
308 | struct error_asi *temp_error_asi_list_rootp; | |
309 | struct error_event *nextp; | |
310 | } error_event_t; | |
311 | ||
312 | typedef struct error_asi { | |
313 | int asi; | |
314 | uint64_t va; /* 0x1 means all VAs */ | |
315 | uint64_t nand_mask; /* applied to register before or_mask */ | |
316 | uint64_t or_mask; | |
317 | uint32_t access_cnt; | |
318 | uint64_t asi_reg; | |
319 | uint32_t id; /* unique id for each error_asi */ | |
320 | uint64_t cpu_mask; /* bitmap of valid cpus for this error_asi */ | |
321 | struct error_asi *nextp; | |
322 | } error_asi_t; | |
323 | #endif /* ERROR_TRAP_GEN } */ | |
324 | ||
325 | ||
326 | #if HOST_CPU_LITTLE_ENDIAN | |
327 | #define FP_32_INDX(_i) ((_i) ^ 1) | |
328 | #else | |
329 | #define FP_32_INDX(_i) (_i) | |
330 | #endif | |
331 | ||
332 | struct SIMCPU { | |
333 | uint64_t intreg[NINT]; /* Register file info always first elements ! */ | |
334 | union { | |
335 | ieee_fp32_t s32[NFP_32]; | |
336 | ieee_fp64_t s64[NFP_64]; | |
337 | #ifdef PROCESSOR_SUPPORTS_QUADFP | |
338 | ieee_fp128_t s128[NFP_128]; /* largest FP type to ensure correct alignment */ | |
339 | #endif | |
340 | } fpreg; | |
341 | ||
342 | tvaddr_t pc; /* Program counter */ | |
343 | tvaddr_t npc; /* Next PC if used by target */ | |
344 | ||
345 | uint64_t miscreg[8]; /* misc 64 bit registers - target dependent usage */ | |
346 | /* for target=sparcv9: */ | |
347 | /* misc[0] = CCR */ | |
348 | ||
349 | uint64_t scratch64; /* emergency scratch */ | |
350 | ||
351 | ||
352 | /* | |
353 | * Different reasons why we should be paying attention | |
354 | * | |
355 | * To optimize the 'any attention asserted' check, we | |
356 | * use a union to map the different reasons to bytes | |
357 | * within one 64-bit word that be tested with one read. | |
358 | */ | |
359 | volatile union { | |
360 | uint64_t _attention; /* true if some simulator exception to consider */ | |
361 | struct { | |
362 | uint8_t _exec_loop_reset; | |
363 | uint8_t _sync_pending; | |
364 | uint8_t _async_event; /* set if another entity changed simcpu_t state */ | |
365 | uint8_t _exception_pending; | |
366 | uint8_t _xdcache_trans_flush_pending; | |
367 | uint8_t _xicache_trans_flush_pending; | |
368 | uint8_t _xicache_instn_flush_pending; | |
369 | } _subatt; | |
370 | } _att; | |
371 | ||
372 | #define attention _att._attention | |
373 | #define exec_loop_reset _att._subatt._exec_loop_reset | |
374 | #define sync_pending _att._subatt._sync_pending | |
375 | #define async_event _att._subatt._async_event | |
376 | #define exception_pending _att._subatt._exception_pending | |
377 | #define xdcache_trans_flush_pending _att._subatt._xdcache_trans_flush_pending | |
378 | #define xicache_trans_flush_pending _att._subatt._xicache_trans_flush_pending | |
379 | #define xicache_instn_flush_pending _att._subatt._xicache_instn_flush_pending | |
380 | ||
381 | /* | |
382 | * The error caught by this ASSERT is if more uint8_t's are added to _subatt | |
383 | * such that it becomes larger that _attention - which would break the | |
384 | * combined attention check. | |
385 | */ | |
386 | #define ATTENTION_SANITY_CHECK do { \ | |
387 | ASSERT(sizeof (((simcpu_t *)NULL)->_att) == sizeof (((simcpu_t *)NULL)->attention)); \ | |
388 | } while (0) | |
389 | ||
390 | volatile uint64_t state_bits; /* collection of state bits */ | |
391 | ||
392 | exec_thread_t * etp; /* pointer to exec_thread that this scheduler list is on */ | |
393 | simcpu_t * headp; /* pointer to the head simcpu of the scheduler list */ | |
394 | simcpu_t * nextp; /* next in scheduler list */ | |
395 | ||
396 | xicache_t * xicachep; /* pointer to local execution cache */ | |
397 | ||
398 | /* cpu specific handler for an xicache miss */ | |
399 | void (*xic_miss)(simcpu_t *, xicache_line_t *, tvaddr_t addr); | |
400 | ||
401 | config_addr_t * xic_miss_addrp; /* cpu specific cache of address used to */ | |
402 | /* satisfy last XC miss - only used by sp->xic_miss */ | |
403 | ||
404 | int gid; /* global simcpu id */ | |
405 | ||
406 | /* How this simulator cpu corresponds to the | |
407 | * specific CPU defined. | |
408 | */ | |
409 | config_proc_t * config_procp; | |
410 | void * specificp; /* target specific data pointer */ | |
411 | void (*decodemep)(simcpu_t *, xicache_instn_t *); /* decodeme function for this cpu */ | |
412 | ||
413 | simcycle_t cycle; | |
414 | simcycle_t cycle_target; | |
415 | void (*cycle_target_match)(simcpu_t * sp); | |
416 | simcycle_t cycle_quantum_start; | |
417 | simcycle_t total_instr; | |
418 | ||
419 | #if ERROR_INJECTION | |
420 | error_t * errorp; /* pointer to flags and stashed error values */ | |
421 | bool_t error_enabled; /* cycle match happened and errors to post */ | |
422 | bool_t error_check; /* first-level flag to check error conditions */ | |
423 | uint8_t error_priv; /* privilege level to post error */ | |
424 | #endif | |
425 | ||
426 | #if ERROR_TRAP_GEN /* { */ | |
427 | simcycle_t error_cycle; | |
428 | bool_t error_pending; | |
429 | bool_t error_cycle_reached; | |
430 | error_event_t *eep; | |
431 | #endif /* ERROR_TRAP_GEN } */ | |
432 | ||
433 | #if PERFORMANCE_CHECK /* { */ | |
434 | /* FIXME: Hack - to go away */ | |
435 | #define PERF_CYCLE_GAP 10000000 /* figure time for every 1M instructions */ | |
436 | hrtime_t last_hrtime; | |
437 | hrtime_t total_hrtime; | |
438 | simcycle_t perf_target; | |
439 | uint64_t prev_icount; | |
440 | uint64_t xdc_hits, xdc_misses, xdc_flushes; | |
441 | uint64_t prev_xdc_hits, prev_xdc_misses, prev_xdc_flushes; | |
442 | uint64_t xic_hits, xic_misses, xic_flushes; | |
443 | uint64_t prev_xic_hits, prev_xic_misses, prev_xic_flushes; | |
444 | #endif /* } */ | |
445 | ||
446 | ||
447 | /* | |
448 | * Misc stuff to (potentially go away) FIXME | |
449 | */ | |
450 | ||
451 | #if ENABLE_MAGIC_TRAPS /* { */ | |
452 | /* This for the magic trap that enables us to count instructions */ | |
453 | simcycle_t magic_count; | |
454 | #endif /* } */ | |
455 | ||
456 | /* | |
457 | * additional cache structures | |
458 | */ | |
459 | ||
460 | bp_info_t * bp_infop; | |
461 | ||
462 | uint32_t tagstate; | |
463 | ||
464 | void *debug; | |
465 | ||
466 | /* | |
467 | * xdcache is large - keep at the end to optimize structure | |
468 | * offsets. | |
469 | */ | |
470 | xdcache_t xdc; | |
471 | }; | |
472 | ||
473 | ||
474 | ||
475 | /* | |
476 | * DBGELMIN will execute when DBG_EL_MIN or DBG_EL are set in debug_bits. | |
477 | * DBGEL will only execute if DBG_EL is set in debug_bits. | |
478 | */ | |
479 | #ifndef NDEBUG | |
480 | #define DBG_EL 0x2LL | |
481 | #define DBG_EL_MIN 0x4LL | |
482 | #define DBG_ILLINST 0x10LL | |
483 | #define DBG_DECODE 0x40000000LL | |
484 | #define DBG_EXEC_LOOP 0x100000000LL | |
485 | #define DBG_XCACHE 0x20000000000LL | |
486 | ||
487 | #define DBGEL(s) do { if (debug_bits & (DBG_EL)) { s } } while (0) | |
488 | #define DBGELMIN(s) do { if (debug_bits & (DBG_EL_MIN|DBG_EL)) { s } } while (0) | |
489 | #define DBGILLINST(s) do { if (debug_bits & (DBG_ILLINST)) { s } } while (0) | |
490 | #define DBGDECODE(s) do { if (debug_bits & DBG_DECODE) { s } } while (0) /* debug exec_loop */ | |
491 | #define DBGEXECLOOP(s) do { if (debug_bits & DBG_EXEC_LOOP) { s } } while (0) /* debug exec_loop */ | |
492 | #define DBGXCACHE(s) do { if (debug_bits & DBG_XCACHE) { s } } while (0) /* debug x[di]cache */ | |
493 | #else | |
494 | #define DBGEL(s) do { } while (0) | |
495 | #define DBGELMIN(s) do { } while (0) | |
496 | #define DBGILLINST(s) do { } while (0) | |
497 | #define DBGDECODE(s) do { } while (0) | |
498 | #define DBGEXECLOOP(s) do { } while (0) /* debug exec_loop */ | |
499 | #define DBGXCACHE(s) do { } while (0) /* debug x[di]cache */ | |
500 | #endif | |
501 | ||
502 | #define DFT_EXEC_QUANTUM 8192 | |
503 | #define EXEC_QUANTUM (options.quantum) | |
504 | ||
505 | #define set_sync_pending(_sp) do { (_sp)->sync_pending = true; } while (0) | |
506 | ||
507 | ||
508 | extern simcpu_t * sim_cpu_alloc( | |
509 | config_proc_t * cfp, | |
510 | void * specificp); | |
511 | ||
512 | typedef struct { | |
513 | bool_t mode; /* true if SP defined in config file */ | |
514 | volatile bool_t poweron; /* true if SP has signalled a poweron */ | |
515 | } sp_info_t; | |
516 | ||
517 | ||
518 | struct SIMSTATUS { | |
519 | volatile bool_t running; /* start/stop execution */ | |
520 | volatile bool_t initialized; /* true when legion init complete */ | |
521 | sp_info_t sp; /* service proc info */ | |
522 | }; | |
523 | ||
524 | ||
525 | extern void init_simstatus(void); | |
526 | extern void simcore_start(void); | |
527 | extern void simcore_stop(void); | |
528 | extern void simcore_cpu_enable(simcpu_t *sp); | |
529 | extern void simcore_cpu_disable(simcpu_t *sp); | |
530 | extern void simcore_cpu_state_park(simcpu_t *sp); | |
531 | extern void simcore_cpu_state_unpark(simcpu_t *sp); | |
532 | ||
533 | struct EXEC_THREAD { | |
534 | uint_t id; /* Id for the exec_thread */ | |
535 | simcpu_t * allp; /* pointer to all cpus assigned */ | |
536 | ||
537 | uint_t nsimcpus; /* number of simcpus assigned to this thread */ | |
538 | }; | |
539 | ||
540 | extern void exec_loop(exec_thread_t *); | |
541 | extern void exec_loop_dh(exec_thread_t *); | |
542 | ||
543 | /* Get the number of instructions executed by simcpu_t */ | |
544 | /* Avoids incrementing both cycle and total_instr in the loop */ | |
545 | #define ICOUNT(_sp) ((_sp)->total_instr + \ | |
546 | ((_sp)->cycle -(_sp)->cycle_quantum_start)) | |
547 | ||
548 | extern void cycle_target_off(simcpu_t *sp); | |
549 | ||
550 | #if !defined(NDEBUG) /* { */ | |
551 | void simcore_update_debug_bits(uint64_t newval); | |
552 | #endif /* } */ | |
553 | ||
554 | #define _XCACHE_TAGSTATE_SHIFT 6 | |
555 | #define XCACHE_TAGSTATE_MASK (7 << _XCACHE_TAGSTATE_SHIFT) | |
556 | #define XCACHE_TAGSTATE_PHYS (0 << _XCACHE_TAGSTATE_SHIFT) | |
557 | #define XCACHE_TAGSTATE_TLN (1 << _XCACHE_TAGSTATE_SHIFT) | |
558 | #define XCACHE_TAGSTATE_TL0 (2 << _XCACHE_TAGSTATE_SHIFT) | |
559 | #define XCACHE_TAGSTATE_TLN_U (3 << _XCACHE_TAGSTATE_SHIFT) | |
560 | #define XCACHE_TAGSTATE_TL0_U (4 << _XCACHE_TAGSTATE_SHIFT) | |
561 | ||
562 | extern void xcache_set_tagstate(simcpu_t * sp); | |
563 | ||
564 | #ifdef __cplusplus | |
565 | } | |
566 | #endif | |
567 | ||
568 | #endif /* _SIMCORE_H_ */ |