Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / simcore / execinstns.c
/*
* ========== Copyright Header Begin ==========================================
*
* OpenSPARC T2 Processor File: execinstns.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
*
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ========== Copyright Header End ============================================
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "@(#)execinstns.c 1.15 07/01/09 SMI"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "basics.h"
#include "fatal.h"
#include "allocate.h"
#include "simcore.h"
#include "config.h"
#include "bswap.h"
#include "xicache.h"
#include "xdcache.h"
#ifndef FP_DECODE_DISABLED
#include "tsparcv9.h"
#include "tsparcv9internal.h"
#endif /* FP_DECODE_DISABLED */
#include "execinstns.h" /* autogenerated definitions of instns */
/*
* Core simulator execution instructions.
*
* CPU specific instruction ops are typically in
* $SRCDIR/proc/processorinstns.c
*/
#define IMPL( _n ) \
void decoded_impl_##_n(simcpu_t *sp, xicache_instn_t * xcip) {
#define ENDI sp->pc = sp->npc; sp->npc += 4; } /* SPARC !! FIXME */
/* Support for load and store operations */
#if ERROR_TRAP_GEN /* { */
#define ERROR_CHECK_TRIGGER(_addr1, _addr2, _access) do { \
sparcv9_cpu_t *v9p = (sparcv9_cpu_t *)(sp->specificp); \
error_event_t *eep = (error_event_t *)(sp->eep); \
\
if ((sp->error_pending == false) || \
(sp->error_cycle_reached == false) || \
(sp->eep->options.bits.pc)) { \
goto skip_error; \
} \
\
/* Check if error event has a load or store specified */ \
if ((eep->address.access == ERROR_ON_LOAD_OR_STORE) || \
(_access == eep->address.access)) { \
\
/* Check if error event has an address specified */ \
if ((eep->address.addr == 0x0) || \
(eep->address.addr == (_addr1 + _addr2))) { \
\
/* Check if error event has a priv level specified */ \
if ((eep->priv == V9_UnInitialised) || \
(eep->priv == v9p->state)) { \
\
/* Check if error event has a trap level specified */ \
if ((eep->tl == ERROR_TL_NONE) || (eep->tl == v9p->tl)) { \
\
lprintf(sp->gid, "ERROR_TRAP_GEN: TRIGGER: %s @ " \
"pc=0x%llx addr 0x%llx @ cycle=0x%llx priv=%d " \
"(user=%d, priv=%d, hyperpriv=%d) tl=%d " \
"error = %s\n", \
(_access == ERROR_ON_LOAD) ? "LOAD" : "STORE", \
sp->pc, (_addr1 + _addr2), sp->cycle, v9p->state, \
V9_User, V9_Priv, V9_HyperPriv, v9p->tl, \
eep->error_str ? eep->error_str : "trap-only"); \
\
eep->ee_status = EE_TRIGGERED; \
\
sp->error_pending = false; \
\
sp->config_procp->proc_typep->trigger_error_trap(sp); \
\
} /* tl */ \
} /* priv */ \
} /* address */ \
} /* load/store */ \
skip_error:; \
} while (0)
#else /* } ERROR_TRAP_GEN { */
#define ERROR_CHECK_TRIGGER(_addr1, _addr2, _access) do { } while (0)
#endif /* ERROR_TRAP_GEN } */
#define LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
ERROR_CHECK_TRIGGER(_addr1, _addr2, ERROR_ON_LOAD); \
_LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype); \
} while (0)
#define STORE_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
ERROR_CHECK_TRIGGER(_addr1, _addr2, ERROR_ON_STORE); \
_STORE_OP(_op, _addr1, _addr2, _dest, _accesstype); \
} while (0)
#if HOST_CPU_BIG_ENDIAN
#define HOST_MA_stfp64(_v) _v
#define HOST_MA_stfp32(_v) _v
#define HOST_MA_ldfp64(_v) _v
#define HOST_MA_ldfp32(_v) _v
#define HOST_MA_st8(_v) _v
#define HOST_MA_st16(_v) _v
#define HOST_MA_st32(_v) _v
#define HOST_MA_st64(_v) _v
#define HOST_MA_lds8(_v) _v
#define HOST_MA_ldu8(_v) _v
#define HOST_MA_lds16(_v) _v
#define HOST_MA_lds32(_v) _v
#define HOST_MA_ldu16(_v) _v
#define HOST_MA_ldu32(_v) _v
#define HOST_MA_ldu64(_v) _v
#endif
#if HOST_CPU_LITTLE_ENDIAN
#define HOST_MA_stfp64(_v) BSWAP_64(_v)
#define HOST_MA_stfp32(_v) BSWAP_32(_v)
#define HOST_MA_ldfp64(_v) BSWAP_64(_v)
#define HOST_MA_ldfp32(_v) BSWAP_32(_v)
#define HOST_MA_st8(_v) _v
#define HOST_MA_st16(_v) BSWAP_16(_v)
#define HOST_MA_st32(_v) BSWAP_32(_v)
#define HOST_MA_st64(_v) BSWAP_64(_v)
#define HOST_MA_lds8(_v) _v
#define HOST_MA_ldu8(_v) _v
#define HOST_MA_lds16(_v) ((uint64_t)(sint16_t)(BSWAP_16(_v)))
#define HOST_MA_lds32(_v) ((uint64_t)(sint32_t)(BSWAP_32(_v)))
#define HOST_MA_ldu16(_v) BSWAP_16(_v)
#define HOST_MA_ldu32(_v) BSWAP_32(_v)
#define HOST_MA_ldu64(_v) BSWAP_64(_v)
#endif
#define _LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
tvaddr_t addr; \
tvaddr_t chk; \
long ridx; \
xdcache_line_t * xlp; \
\
addr = (_addr1) + (_addr2); \
\
/* miss if not aligned or tag miss */ \
ridx = (addr >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; \
xlp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + \
ridx); \
chk = (addr & (XDCACHE_TAG_MASK|XDCACHE_ALIGN_MASK)) | \
XDCACHE_READ_PERM; \
chk |= sp->tagstate; \
chk ^= xlp->tag; \
chk &= XDCACHE_READ_PERM | XDCACHE_TAG_MASK | \
XCACHE_TAGSTATE_MASK | ((1<<(_op & MA_Size_Mask))-1); \
if (chk != (tvaddr_t)0) { \
XDC_MISS(sp); \
sp->xdc.miss(sp, (uint64_t *)&(_dest), addr, _op); \
return; \
} \
XDC_HIT(sp); \
\
_dest = HOST_##_op((uint64_t)*(_accesstype *)(addr + xlp->offset)); \
} while (0)
#define _STORE_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
tvaddr_t addr; \
tvaddr_t chk; \
long ridx; \
xdcache_line_t * xlp; \
\
addr = (_addr1) + (_addr2); \
\
/* miss if not aligned or tag miss */ \
ridx = (addr >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; \
xlp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + \
ridx); \
chk = (addr & (XDCACHE_TAG_MASK|XDCACHE_ALIGN_MASK)) | \
XDCACHE_WRITE_PERM; \
chk |= sp->tagstate; \
chk ^= xlp->tag; \
chk &= XDCACHE_WRITE_PERM | XDCACHE_TAG_MASK | \
XCACHE_TAGSTATE_MASK | ((1<<(_op & MA_Size_Mask))-1); \
if (chk != (tvaddr_t)0) { \
XDC_MISS(sp); \
sp->xdc.miss(sp, (uint64_t *)&(_dest), addr, _op); \
return; \
} \
XDC_HIT(sp); \
\
*(_accesstype *)(addr + xlp->offset) = HOST_##_op((_accesstype)(_dest)); \
} while (0)
/*-----------------------------------------------------------*/
/*
** Misc pseudo instructions
*/
IMPL( noop )
ENDI
IMPL( zero_reg )
Rdest = 0;
ENDI
IMPL( move_reg )
Rdest = Rsrc1;
ENDI
IMPL( move_uimm )
Rdest = Uimm32;
ENDI
IMPL( move_simm )
Rdest = Simm32;
ENDI
/*-----------------------------------------------------------*/
/*
** Arithmetic instructions
*/
IMPL( add_imm )
Rdest = Rsrc1 + Simm16;
ENDI
IMPL( add_rrr )
Rdest = Rsrc1 + Rsrc2;
ENDI
/* IMPL( sub_imm ) handled by add_imm by negating the immediate ! */
IMPL( sub_rrr )
Rdest = Rsrc1 - Rsrc2;
ENDI
/*-----------------------------------------------------------*/
/**/
/* Logic instructions*/
/**/
IMPL( and_imm )
Rdest = Rsrc1 & Simm16;
ENDI
IMPL( and_rrr )
Rdest = Rsrc1 & Rsrc2;
ENDI
IMPL( andn_rrr )
Rdest = Rsrc1 & ~Rsrc2;
ENDI
IMPL( or_imm )
Rdest = Rsrc1 | Simm16;
ENDI
IMPL( or_rrr )
Rdest = Rsrc1 | Rsrc2;
ENDI
IMPL( orn_rrr )
Rdest = Rsrc1 | ~Rsrc2;
ENDI
IMPL( xor_imm )
Rdest = Rsrc1 ^ Simm16;
ENDI
IMPL( xor_rrr )
Rdest = Rsrc1 ^ Rsrc2;
ENDI
IMPL( xnor_rrr )
Rdest = ~(Rsrc1 ^ Rsrc2);
ENDI
/*-----------------------------------------------------------*/
/*
* Shift instruction implementations
*/
IMPL( sll_imm )
Rdest = Rsrc1 << Simm16;
ENDI
IMPL( sll64_rrr )
Rdest = Rsrc1 << (Rsrc2 & 0x3f);
ENDI
IMPL( sll32_rrr )
Rdest = Rsrc1 << (Rsrc2 & 0x1f);
ENDI
/*
* Sparc compiler and gcc generates crappy code
* for these .. redo in assembler eventually
*/
IMPL( srl32_imm )
Rdest = (uint64_t)(((uint32_t)Rsrc1) >> Simm16);
ENDI
IMPL( srl64_imm )
Rdest = Rsrc1 >> Simm16;
ENDI
IMPL( srl32_rrr )
uint32_t temp;
temp = (uint32_t)Rsrc1;
temp >>= Rsrc2 & 0x1f;
Rdest = (uint64_t)temp;
ENDI
IMPL( srl64_rrr )
Rdest = Rsrc1 >> (Rsrc2 & 0x3f);
ENDI
IMPL( sra32_imm )
Rdest = (sint64_t)(((sint32_t)Rsrc1) >> Simm16);
ENDI
IMPL( sra64_imm )
SRdest = SRsrc1 >> Simm16;
ENDI
IMPL( sra32_rrr )
sint32_t temp;
temp = (sint32_t)Rsrc1;
temp >>= Rsrc2 & 0x1f;
Rdest = (sint64_t)temp;
ENDI
IMPL( sra64_rrr )
Rdest = (uint64_t)(SRsrc1 >> (Rsrc2 & 0x3f));
ENDI
/*
* Multiply operations
*/
IMPL( mul_imm )
Rdest = Rsrc1 * Simm16;
ENDI
IMPL( mul_rrr )
Rdest = Rsrc1 * Rsrc2;
ENDI
/*
* Load and store operations ...
*
* Eventually better managed in assembler
*/
IMPL( ldu8_imm )
LOAD_OP( MA_ldu8, Rsrc1, Simm16, Rdest, uint8_t);
ENDI
IMPL( ldu16_imm )
LOAD_OP( MA_ldu16, Rsrc1, Simm16, Rdest, uint16_t);
ENDI
IMPL( ldu32_imm )
LOAD_OP( MA_ldu32, Rsrc1, Simm16, Rdest, uint32_t);
ENDI
IMPL( ld64_imm )
LOAD_OP( MA_ldu64, Rsrc1, Simm16, Rdest, uint64_t);
ENDI
IMPL( lds8_imm )
LOAD_OP( MA_lds8, Rsrc1, Simm16, Rdest, sint8_t);
ENDI
IMPL( lds16_imm )
LOAD_OP( MA_lds16, Rsrc1, Simm16, Rdest, sint16_t);
ENDI
IMPL( lds32_imm )
LOAD_OP( MA_lds32, Rsrc1, Simm16, Rdest, sint32_t);
ENDI
IMPL( st8_imm )
STORE_OP( MA_st8, Rsrc1, Simm16, Rdest, uint8_t);
ENDI
IMPL( st16_imm )
STORE_OP( MA_st16, Rsrc1, Simm16, Rdest, uint16_t);
ENDI
IMPL( st32_imm )
STORE_OP( MA_st32, Rsrc1, Simm16, Rdest, uint32_t);
ENDI
IMPL( st64_imm )
STORE_OP( MA_st64, Rsrc1, Simm16, Rdest, uint64_t);
ENDI
IMPL( ldu8_rrr )
LOAD_OP( MA_ldu8, Rsrc1, Rsrc2, Rdest, uint8_t);
ENDI
IMPL( ldu16_rrr )
LOAD_OP( MA_ldu16, Rsrc1, Rsrc2, Rdest, uint16_t);
ENDI
IMPL( ldu32_rrr )
LOAD_OP( MA_ldu32, Rsrc1, Rsrc2, Rdest, uint32_t);
ENDI
IMPL( ld64_rrr )
LOAD_OP( MA_ldu64, Rsrc1, Rsrc2, Rdest, uint64_t);
ENDI
IMPL( lds8_rrr )
LOAD_OP( MA_lds8, Rsrc1, Rsrc2, Rdest, sint8_t);
ENDI
IMPL( lds16_rrr )
LOAD_OP( MA_lds16, Rsrc1, Rsrc2, Rdest, sint16_t);
ENDI
IMPL( lds32_rrr )
LOAD_OP( MA_lds32, Rsrc1, Rsrc2, Rdest, sint32_t);
ENDI
IMPL( st8_rrr )
STORE_OP( MA_st8, Rsrc1, Rsrc2, Rdest, uint8_t);
ENDI
IMPL( st16_rrr )
STORE_OP( MA_st16, Rsrc1, Rsrc2, Rdest, uint16_t);
ENDI
IMPL( st32_rrr )
STORE_OP( MA_st32, Rsrc1, Rsrc2, Rdest, uint32_t);
ENDI
IMPL( st64_rrr )
STORE_OP( MA_st64, Rsrc1, Rsrc2, Rdest, uint64_t);
ENDI
/*
* Versions for floating point access
*/
IMPL( ldfp32_imm )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp32, Rsrc1, Simm16, F32dest, ieee_fp32_t);
ENDI
IMPL( ldfp64_imm )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp64, Rsrc1, Simm16, F64dest, ieee_fp64_t);
ENDI
IMPL( ldfp32_rrr )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp32, Rsrc1, Rsrc2, F32dest, ieee_fp32_t);
ENDI
IMPL( ldfp64_rrr )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp64, Rsrc1, Rsrc2, F64dest, ieee_fp64_t);
ENDI
IMPL( stfp32_imm )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp32, Rsrc1, Simm16, F32dest, ieee_fp32_t);
ENDI
IMPL( stfp64_imm )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp64, Rsrc1, Simm16, F64dest, ieee_fp64_t);
ENDI
IMPL( stfp32_rrr )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp32, Rsrc1, Rsrc2, F32dest, ieee_fp32_t);
ENDI
IMPL( stfp64_rrr )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp64, Rsrc1, Rsrc2, F64dest, ieee_fp64_t);
ENDI
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
IMPL( ldfp128_imm )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp128, Rsrc1, Simm16, Rdest, ieee_fp128_t);
ENDI
IMPL( ldfp128_rrr )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp128, Rsrc1, Rsrc2, Rdest, ieee_fp128_t);
ENDI
IMPL( stfp128_imm )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp128, Rsrc1, Simm16, Rdest, ieee_fp128_t);
ENDI
IMPL( stfp128_rrr )
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp128, Rsrc1, Rsrc2, Rdest, ieee_fp128_t);
ENDI
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */