Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / simcore / execinstns.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* OpenSPARC T2 Processor File: execinstns.c
5* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
6* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
7*
8* The above named program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public
10* License version 2 as published by the Free Software Foundation.
11*
12* The above named program is distributed in the hope that it will be
13* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15* General Public License for more details.
16*
17* You should have received a copy of the GNU General Public
18* License along with this work; if not, write to the Free Software
19* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20*
21* ========== Copyright Header End ============================================
22*/
23/*
24 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
25 * Use is subject to license terms.
26 */
27
28#pragma ident "@(#)execinstns.c 1.15 07/01/09 SMI"
29
30#include <stdio.h>
31#include <stdlib.h>
32
33#include <assert.h>
34
35#include "basics.h"
36#include "fatal.h"
37#include "allocate.h"
38#include "simcore.h"
39#include "config.h"
40#include "bswap.h"
41#include "xicache.h"
42#include "xdcache.h"
43#ifndef FP_DECODE_DISABLED
44#include "tsparcv9.h"
45#include "tsparcv9internal.h"
46#endif /* FP_DECODE_DISABLED */
47
48#include "execinstns.h" /* autogenerated definitions of instns */
49
50
51
52 /*
53 * Core simulator execution instructions.
54 *
55 * CPU specific instruction ops are typically in
56 * $SRCDIR/proc/processorinstns.c
57 */
58
59
60
61
62#define IMPL( _n ) \
63 void decoded_impl_##_n(simcpu_t *sp, xicache_instn_t * xcip) {
64#define ENDI sp->pc = sp->npc; sp->npc += 4; } /* SPARC !! FIXME */
65
66
67 /* Support for load and store operations */
68
69#if ERROR_TRAP_GEN /* { */
70
71#define ERROR_CHECK_TRIGGER(_addr1, _addr2, _access) do { \
72 sparcv9_cpu_t *v9p = (sparcv9_cpu_t *)(sp->specificp); \
73 error_event_t *eep = (error_event_t *)(sp->eep); \
74 \
75 if ((sp->error_pending == false) || \
76 (sp->error_cycle_reached == false) || \
77 (sp->eep->options.bits.pc)) { \
78 goto skip_error; \
79 } \
80 \
81 /* Check if error event has a load or store specified */ \
82 if ((eep->address.access == ERROR_ON_LOAD_OR_STORE) || \
83 (_access == eep->address.access)) { \
84 \
85 /* Check if error event has an address specified */ \
86 if ((eep->address.addr == 0x0) || \
87 (eep->address.addr == (_addr1 + _addr2))) { \
88 \
89 /* Check if error event has a priv level specified */ \
90 if ((eep->priv == V9_UnInitialised) || \
91 (eep->priv == v9p->state)) { \
92 \
93 /* Check if error event has a trap level specified */ \
94 if ((eep->tl == ERROR_TL_NONE) || (eep->tl == v9p->tl)) { \
95 \
96 lprintf(sp->gid, "ERROR_TRAP_GEN: TRIGGER: %s @ " \
97 "pc=0x%llx addr 0x%llx @ cycle=0x%llx priv=%d " \
98 "(user=%d, priv=%d, hyperpriv=%d) tl=%d " \
99 "error = %s\n", \
100 (_access == ERROR_ON_LOAD) ? "LOAD" : "STORE", \
101 sp->pc, (_addr1 + _addr2), sp->cycle, v9p->state, \
102 V9_User, V9_Priv, V9_HyperPriv, v9p->tl, \
103 eep->error_str ? eep->error_str : "trap-only"); \
104 \
105 eep->ee_status = EE_TRIGGERED; \
106 \
107 sp->error_pending = false; \
108 \
109 sp->config_procp->proc_typep->trigger_error_trap(sp); \
110 \
111 } /* tl */ \
112 } /* priv */ \
113 } /* address */ \
114 } /* load/store */ \
115skip_error:; \
116 } while (0)
117
118#else /* } ERROR_TRAP_GEN { */
119#define ERROR_CHECK_TRIGGER(_addr1, _addr2, _access) do { } while (0)
120#endif /* ERROR_TRAP_GEN } */
121
122
123#define LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
124 ERROR_CHECK_TRIGGER(_addr1, _addr2, ERROR_ON_LOAD); \
125 _LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype); \
126 } while (0)
127
128#define STORE_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
129 ERROR_CHECK_TRIGGER(_addr1, _addr2, ERROR_ON_STORE); \
130 _STORE_OP(_op, _addr1, _addr2, _dest, _accesstype); \
131 } while (0)
132
133
134#if HOST_CPU_BIG_ENDIAN
135#define HOST_MA_stfp64(_v) _v
136#define HOST_MA_stfp32(_v) _v
137#define HOST_MA_ldfp64(_v) _v
138#define HOST_MA_ldfp32(_v) _v
139#define HOST_MA_st8(_v) _v
140#define HOST_MA_st16(_v) _v
141#define HOST_MA_st32(_v) _v
142#define HOST_MA_st64(_v) _v
143#define HOST_MA_lds8(_v) _v
144#define HOST_MA_ldu8(_v) _v
145#define HOST_MA_lds16(_v) _v
146#define HOST_MA_lds32(_v) _v
147#define HOST_MA_ldu16(_v) _v
148#define HOST_MA_ldu32(_v) _v
149#define HOST_MA_ldu64(_v) _v
150#endif
151#if HOST_CPU_LITTLE_ENDIAN
152#define HOST_MA_stfp64(_v) BSWAP_64(_v)
153#define HOST_MA_stfp32(_v) BSWAP_32(_v)
154#define HOST_MA_ldfp64(_v) BSWAP_64(_v)
155#define HOST_MA_ldfp32(_v) BSWAP_32(_v)
156#define HOST_MA_st8(_v) _v
157#define HOST_MA_st16(_v) BSWAP_16(_v)
158#define HOST_MA_st32(_v) BSWAP_32(_v)
159#define HOST_MA_st64(_v) BSWAP_64(_v)
160#define HOST_MA_lds8(_v) _v
161#define HOST_MA_ldu8(_v) _v
162#define HOST_MA_lds16(_v) ((uint64_t)(sint16_t)(BSWAP_16(_v)))
163#define HOST_MA_lds32(_v) ((uint64_t)(sint32_t)(BSWAP_32(_v)))
164#define HOST_MA_ldu16(_v) BSWAP_16(_v)
165#define HOST_MA_ldu32(_v) BSWAP_32(_v)
166#define HOST_MA_ldu64(_v) BSWAP_64(_v)
167#endif
168
169#define _LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
170 tvaddr_t addr; \
171 tvaddr_t chk; \
172 long ridx; \
173 xdcache_line_t * xlp; \
174 \
175 addr = (_addr1) + (_addr2); \
176 \
177 /* miss if not aligned or tag miss */ \
178 ridx = (addr >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; \
179 xlp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + \
180 ridx); \
181 chk = (addr & (XDCACHE_TAG_MASK|XDCACHE_ALIGN_MASK)) | \
182 XDCACHE_READ_PERM; \
183 chk |= sp->tagstate; \
184 chk ^= xlp->tag; \
185 chk &= XDCACHE_READ_PERM | XDCACHE_TAG_MASK | \
186 XCACHE_TAGSTATE_MASK | ((1<<(_op & MA_Size_Mask))-1); \
187 if (chk != (tvaddr_t)0) { \
188 XDC_MISS(sp); \
189 sp->xdc.miss(sp, (uint64_t *)&(_dest), addr, _op); \
190 return; \
191 } \
192 XDC_HIT(sp); \
193 \
194 _dest = HOST_##_op((uint64_t)*(_accesstype *)(addr + xlp->offset)); \
195 } while (0)
196
197
198
199#define _STORE_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
200 tvaddr_t addr; \
201 tvaddr_t chk; \
202 long ridx; \
203 xdcache_line_t * xlp; \
204 \
205 addr = (_addr1) + (_addr2); \
206 \
207 /* miss if not aligned or tag miss */ \
208 ridx = (addr >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; \
209 xlp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + \
210 ridx); \
211 chk = (addr & (XDCACHE_TAG_MASK|XDCACHE_ALIGN_MASK)) | \
212 XDCACHE_WRITE_PERM; \
213 chk |= sp->tagstate; \
214 chk ^= xlp->tag; \
215 chk &= XDCACHE_WRITE_PERM | XDCACHE_TAG_MASK | \
216 XCACHE_TAGSTATE_MASK | ((1<<(_op & MA_Size_Mask))-1); \
217 if (chk != (tvaddr_t)0) { \
218 XDC_MISS(sp); \
219 sp->xdc.miss(sp, (uint64_t *)&(_dest), addr, _op); \
220 return; \
221 } \
222 XDC_HIT(sp); \
223 \
224 *(_accesstype *)(addr + xlp->offset) = HOST_##_op((_accesstype)(_dest)); \
225 } while (0)
226
227
228
229
230/*-----------------------------------------------------------*/
231
232 /*
233 ** Misc pseudo instructions
234 */
235
236IMPL( noop )
237 ENDI
238
239
240IMPL( zero_reg )
241 Rdest = 0;
242 ENDI
243
244IMPL( move_reg )
245 Rdest = Rsrc1;
246 ENDI
247
248IMPL( move_uimm )
249 Rdest = Uimm32;
250 ENDI
251
252IMPL( move_simm )
253 Rdest = Simm32;
254 ENDI
255
256/*-----------------------------------------------------------*/
257
258 /*
259 ** Arithmetic instructions
260 */
261
262IMPL( add_imm )
263 Rdest = Rsrc1 + Simm16;
264 ENDI
265
266IMPL( add_rrr )
267 Rdest = Rsrc1 + Rsrc2;
268 ENDI
269
270/* IMPL( sub_imm ) handled by add_imm by negating the immediate ! */
271
272IMPL( sub_rrr )
273 Rdest = Rsrc1 - Rsrc2;
274 ENDI
275
276
277/*-----------------------------------------------------------*/
278
279 /**/
280 /* Logic instructions*/
281 /**/
282
283IMPL( and_imm )
284 Rdest = Rsrc1 & Simm16;
285 ENDI
286
287IMPL( and_rrr )
288 Rdest = Rsrc1 & Rsrc2;
289 ENDI
290
291
292IMPL( andn_rrr )
293 Rdest = Rsrc1 & ~Rsrc2;
294 ENDI
295
296
297IMPL( or_imm )
298 Rdest = Rsrc1 | Simm16;
299 ENDI
300
301IMPL( or_rrr )
302 Rdest = Rsrc1 | Rsrc2;
303 ENDI
304
305
306IMPL( orn_rrr )
307 Rdest = Rsrc1 | ~Rsrc2;
308 ENDI
309
310
311IMPL( xor_imm )
312 Rdest = Rsrc1 ^ Simm16;
313 ENDI
314
315IMPL( xor_rrr )
316 Rdest = Rsrc1 ^ Rsrc2;
317 ENDI
318
319
320IMPL( xnor_rrr )
321 Rdest = ~(Rsrc1 ^ Rsrc2);
322 ENDI
323
324
325
326
327/*-----------------------------------------------------------*/
328
329
330
331 /*
332 * Shift instruction implementations
333 */
334
335
336IMPL( sll_imm )
337 Rdest = Rsrc1 << Simm16;
338 ENDI
339
340IMPL( sll64_rrr )
341 Rdest = Rsrc1 << (Rsrc2 & 0x3f);
342 ENDI
343
344IMPL( sll32_rrr )
345 Rdest = Rsrc1 << (Rsrc2 & 0x1f);
346 ENDI
347
348
349
350 /*
351 * Sparc compiler and gcc generates crappy code
352 * for these .. redo in assembler eventually
353 */
354
355IMPL( srl32_imm )
356 Rdest = (uint64_t)(((uint32_t)Rsrc1) >> Simm16);
357 ENDI
358
359
360IMPL( srl64_imm )
361 Rdest = Rsrc1 >> Simm16;
362 ENDI
363
364
365IMPL( srl32_rrr )
366 uint32_t temp;
367 temp = (uint32_t)Rsrc1;
368 temp >>= Rsrc2 & 0x1f;
369 Rdest = (uint64_t)temp;
370 ENDI
371
372IMPL( srl64_rrr )
373 Rdest = Rsrc1 >> (Rsrc2 & 0x3f);
374 ENDI
375
376
377
378IMPL( sra32_imm )
379 Rdest = (sint64_t)(((sint32_t)Rsrc1) >> Simm16);
380 ENDI
381
382
383IMPL( sra64_imm )
384 SRdest = SRsrc1 >> Simm16;
385 ENDI
386
387
388IMPL( sra32_rrr )
389 sint32_t temp;
390 temp = (sint32_t)Rsrc1;
391 temp >>= Rsrc2 & 0x1f;
392 Rdest = (sint64_t)temp;
393 ENDI
394
395IMPL( sra64_rrr )
396 Rdest = (uint64_t)(SRsrc1 >> (Rsrc2 & 0x3f));
397 ENDI
398
399
400
401 /*
402 * Multiply operations
403 */
404
405IMPL( mul_imm )
406 Rdest = Rsrc1 * Simm16;
407 ENDI
408
409
410IMPL( mul_rrr )
411 Rdest = Rsrc1 * Rsrc2;
412 ENDI
413
414
415
416
417
418 /*
419 * Load and store operations ...
420 *
421 * Eventually better managed in assembler
422 */
423
424IMPL( ldu8_imm )
425 LOAD_OP( MA_ldu8, Rsrc1, Simm16, Rdest, uint8_t);
426 ENDI
427
428IMPL( ldu16_imm )
429 LOAD_OP( MA_ldu16, Rsrc1, Simm16, Rdest, uint16_t);
430 ENDI
431
432IMPL( ldu32_imm )
433 LOAD_OP( MA_ldu32, Rsrc1, Simm16, Rdest, uint32_t);
434 ENDI
435
436IMPL( ld64_imm )
437 LOAD_OP( MA_ldu64, Rsrc1, Simm16, Rdest, uint64_t);
438 ENDI
439
440
441IMPL( lds8_imm )
442 LOAD_OP( MA_lds8, Rsrc1, Simm16, Rdest, sint8_t);
443 ENDI
444
445IMPL( lds16_imm )
446 LOAD_OP( MA_lds16, Rsrc1, Simm16, Rdest, sint16_t);
447 ENDI
448
449IMPL( lds32_imm )
450 LOAD_OP( MA_lds32, Rsrc1, Simm16, Rdest, sint32_t);
451 ENDI
452
453
454IMPL( st8_imm )
455 STORE_OP( MA_st8, Rsrc1, Simm16, Rdest, uint8_t);
456 ENDI
457
458IMPL( st16_imm )
459 STORE_OP( MA_st16, Rsrc1, Simm16, Rdest, uint16_t);
460 ENDI
461
462IMPL( st32_imm )
463 STORE_OP( MA_st32, Rsrc1, Simm16, Rdest, uint32_t);
464 ENDI
465
466IMPL( st64_imm )
467 STORE_OP( MA_st64, Rsrc1, Simm16, Rdest, uint64_t);
468 ENDI
469
470
471IMPL( ldu8_rrr )
472 LOAD_OP( MA_ldu8, Rsrc1, Rsrc2, Rdest, uint8_t);
473 ENDI
474
475IMPL( ldu16_rrr )
476 LOAD_OP( MA_ldu16, Rsrc1, Rsrc2, Rdest, uint16_t);
477 ENDI
478
479IMPL( ldu32_rrr )
480 LOAD_OP( MA_ldu32, Rsrc1, Rsrc2, Rdest, uint32_t);
481 ENDI
482
483IMPL( ld64_rrr )
484 LOAD_OP( MA_ldu64, Rsrc1, Rsrc2, Rdest, uint64_t);
485 ENDI
486
487
488IMPL( lds8_rrr )
489 LOAD_OP( MA_lds8, Rsrc1, Rsrc2, Rdest, sint8_t);
490 ENDI
491
492IMPL( lds16_rrr )
493 LOAD_OP( MA_lds16, Rsrc1, Rsrc2, Rdest, sint16_t);
494 ENDI
495
496IMPL( lds32_rrr )
497 LOAD_OP( MA_lds32, Rsrc1, Rsrc2, Rdest, sint32_t);
498 ENDI
499
500
501IMPL( st8_rrr )
502 STORE_OP( MA_st8, Rsrc1, Rsrc2, Rdest, uint8_t);
503 ENDI
504
505IMPL( st16_rrr )
506 STORE_OP( MA_st16, Rsrc1, Rsrc2, Rdest, uint16_t);
507 ENDI
508
509IMPL( st32_rrr )
510 STORE_OP( MA_st32, Rsrc1, Rsrc2, Rdest, uint32_t);
511 ENDI
512
513IMPL( st64_rrr )
514 STORE_OP( MA_st64, Rsrc1, Rsrc2, Rdest, uint64_t);
515 ENDI
516
517
518 /*
519 * Versions for floating point access
520 */
521
522IMPL( ldfp32_imm )
523#ifndef FP_DECODE_DISABLED
524 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
525 if (!v9p->fpu_on) {
526 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
527 return;
528 }
529#endif /* FP_DECODE_DISABLED */
530 LOAD_OP( MA_ldfp32, Rsrc1, Simm16, F32dest, ieee_fp32_t);
531 ENDI
532
533IMPL( ldfp64_imm )
534#ifndef FP_DECODE_DISABLED
535 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
536 if (!v9p->fpu_on) {
537 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
538 return;
539 }
540#endif /* FP_DECODE_DISABLED */
541 LOAD_OP( MA_ldfp64, Rsrc1, Simm16, F64dest, ieee_fp64_t);
542 ENDI
543
544IMPL( ldfp32_rrr )
545#ifndef FP_DECODE_DISABLED
546 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
547 if (!v9p->fpu_on) {
548 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
549 return;
550 }
551#endif /* FP_DECODE_DISABLED */
552 LOAD_OP( MA_ldfp32, Rsrc1, Rsrc2, F32dest, ieee_fp32_t);
553 ENDI
554
555IMPL( ldfp64_rrr )
556#ifndef FP_DECODE_DISABLED
557 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
558 if (!v9p->fpu_on) {
559 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
560 return;
561 }
562#endif /* FP_DECODE_DISABLED */
563 LOAD_OP( MA_ldfp64, Rsrc1, Rsrc2, F64dest, ieee_fp64_t);
564 ENDI
565
566
567IMPL( stfp32_imm )
568#ifndef FP_DECODE_DISABLED
569 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
570 if (!v9p->fpu_on) {
571 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
572 return;
573 }
574#endif /* FP_DECODE_DISABLED */
575 STORE_OP( MA_stfp32, Rsrc1, Simm16, F32dest, ieee_fp32_t);
576 ENDI
577
578IMPL( stfp64_imm )
579#ifndef FP_DECODE_DISABLED
580 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
581 if (!v9p->fpu_on) {
582 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
583 return;
584 }
585#endif /* FP_DECODE_DISABLED */
586 STORE_OP( MA_stfp64, Rsrc1, Simm16, F64dest, ieee_fp64_t);
587 ENDI
588
589IMPL( stfp32_rrr )
590#ifndef FP_DECODE_DISABLED
591 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
592 if (!v9p->fpu_on) {
593 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
594 return;
595 }
596#endif /* FP_DECODE_DISABLED */
597 STORE_OP( MA_stfp32, Rsrc1, Rsrc2, F32dest, ieee_fp32_t);
598 ENDI
599
600IMPL( stfp64_rrr )
601#ifndef FP_DECODE_DISABLED
602 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
603 if (!v9p->fpu_on) {
604 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
605 return;
606 }
607#endif /* FP_DECODE_DISABLED */
608 STORE_OP( MA_stfp64, Rsrc1, Rsrc2, F64dest, ieee_fp64_t);
609 ENDI
610
611#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
612
613IMPL( ldfp128_imm )
614#ifndef FP_DECODE_DISABLED
615 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
616 if (!v9p->fpu_on) {
617 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
618 return;
619 }
620#endif /* FP_DECODE_DISABLED */
621 LOAD_OP( MA_ldfp128, Rsrc1, Simm16, Rdest, ieee_fp128_t);
622 ENDI
623
624IMPL( ldfp128_rrr )
625#ifndef FP_DECODE_DISABLED
626 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
627 if (!v9p->fpu_on) {
628 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
629 return;
630 }
631#endif /* FP_DECODE_DISABLED */
632 LOAD_OP( MA_ldfp128, Rsrc1, Rsrc2, Rdest, ieee_fp128_t);
633 ENDI
634
635IMPL( stfp128_imm )
636#ifndef FP_DECODE_DISABLED
637 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
638 if (!v9p->fpu_on) {
639 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
640 return;
641 }
642#endif /* FP_DECODE_DISABLED */
643 STORE_OP( MA_stfp128, Rsrc1, Simm16, Rdest, ieee_fp128_t);
644 ENDI
645
646IMPL( stfp128_rrr )
647#ifndef FP_DECODE_DISABLED
648 sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
649 if (!v9p->fpu_on) {
650 v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
651 return;
652 }
653#endif /* FP_DECODE_DISABLED */
654 STORE_OP( MA_stfp128, Rsrc1, Rsrc2, Rdest, ieee_fp128_t);
655 ENDI
656
657#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */