* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: execinstns.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)execinstns.c 1.15 07/01/09 SMI"
#ifndef FP_DECODE_DISABLED
#include "tsparcv9internal.h"
#endif /* FP_DECODE_DISABLED */
#include "execinstns.h" /* autogenerated definitions of instns */
* Core simulator execution instructions.
* CPU specific instruction ops are typically in
* $SRCDIR/proc/processorinstns.c
void decoded_impl_##_n(simcpu_t *sp, xicache_instn_t * xcip) {
#define ENDI sp->pc = sp->npc; sp->npc += 4; } /* SPARC !! FIXME */
/* Support for load and store operations */
#if ERROR_TRAP_GEN /* { */
#define ERROR_CHECK_TRIGGER(_addr1, _addr2, _access) do { \
sparcv9_cpu_t *v9p = (sparcv9_cpu_t *)(sp->specificp); \
error_event_t *eep = (error_event_t *)(sp->eep); \
if ((sp->error_pending == false) || \
(sp->error_cycle_reached == false) || \
(sp->eep->options.bits.pc)) { \
/* Check if error event has a load or store specified */ \
if ((eep->address.access == ERROR_ON_LOAD_OR_STORE) || \
(_access == eep->address.access)) { \
/* Check if error event has an address specified */ \
if ((eep->address.addr == 0x0) || \
(eep->address.addr == (_addr1 + _addr2))) { \
/* Check if error event has a priv level specified */ \
if ((eep->priv == V9_UnInitialised) || \
(eep->priv == v9p->state)) { \
/* Check if error event has a trap level specified */ \
if ((eep->tl == ERROR_TL_NONE) || (eep->tl == v9p->tl)) { \
lprintf(sp->gid, "ERROR_TRAP_GEN: TRIGGER: %s @ " \
"pc=0x%llx addr 0x%llx @ cycle=0x%llx priv=%d " \
"(user=%d, priv=%d, hyperpriv=%d) tl=%d " \
(_access == ERROR_ON_LOAD) ? "LOAD" : "STORE", \
sp->pc, (_addr1 + _addr2), sp->cycle, v9p->state, \
V9_User, V9_Priv, V9_HyperPriv, v9p->tl, \
eep->error_str ? eep->error_str : "trap-only"); \
eep->ee_status = EE_TRIGGERED; \
sp->error_pending = false; \
sp->config_procp->proc_typep->trigger_error_trap(sp); \
#else /* } ERROR_TRAP_GEN { */
#define ERROR_CHECK_TRIGGER(_addr1, _addr2, _access) do { } while (0)
#endif /* ERROR_TRAP_GEN } */
#define LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
ERROR_CHECK_TRIGGER(_addr1, _addr2, ERROR_ON_LOAD); \
_LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype); \
#define STORE_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
ERROR_CHECK_TRIGGER(_addr1, _addr2, ERROR_ON_STORE); \
_STORE_OP(_op, _addr1, _addr2, _dest, _accesstype); \
#define HOST_MA_stfp64(_v) _v
#define HOST_MA_stfp32(_v) _v
#define HOST_MA_ldfp64(_v) _v
#define HOST_MA_ldfp32(_v) _v
#define HOST_MA_st8(_v) _v
#define HOST_MA_st16(_v) _v
#define HOST_MA_st32(_v) _v
#define HOST_MA_st64(_v) _v
#define HOST_MA_lds8(_v) _v
#define HOST_MA_ldu8(_v) _v
#define HOST_MA_lds16(_v) _v
#define HOST_MA_lds32(_v) _v
#define HOST_MA_ldu16(_v) _v
#define HOST_MA_ldu32(_v) _v
#define HOST_MA_ldu64(_v) _v
#if HOST_CPU_LITTLE_ENDIAN
#define HOST_MA_stfp64(_v) BSWAP_64(_v)
#define HOST_MA_stfp32(_v) BSWAP_32(_v)
#define HOST_MA_ldfp64(_v) BSWAP_64(_v)
#define HOST_MA_ldfp32(_v) BSWAP_32(_v)
#define HOST_MA_st8(_v) _v
#define HOST_MA_st16(_v) BSWAP_16(_v)
#define HOST_MA_st32(_v) BSWAP_32(_v)
#define HOST_MA_st64(_v) BSWAP_64(_v)
#define HOST_MA_lds8(_v) _v
#define HOST_MA_ldu8(_v) _v
#define HOST_MA_lds16(_v) ((uint64_t)(sint16_t)(BSWAP_16(_v)))
#define HOST_MA_lds32(_v) ((uint64_t)(sint32_t)(BSWAP_32(_v)))
#define HOST_MA_ldu16(_v) BSWAP_16(_v)
#define HOST_MA_ldu32(_v) BSWAP_32(_v)
#define HOST_MA_ldu64(_v) BSWAP_64(_v)
#define _LOAD_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
addr = (_addr1) + (_addr2); \
/* miss if not aligned or tag miss */ \
ridx = (addr >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; \
xlp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + \
chk = (addr & (XDCACHE_TAG_MASK|XDCACHE_ALIGN_MASK)) | \
chk &= XDCACHE_READ_PERM | XDCACHE_TAG_MASK | \
XCACHE_TAGSTATE_MASK | ((1<<(_op & MA_Size_Mask))-1); \
if (chk != (tvaddr_t)0) { \
sp->xdc.miss(sp, (uint64_t *)&(_dest), addr, _op); \
_dest = HOST_##_op((uint64_t)*(_accesstype *)(addr + xlp->offset)); \
#define _STORE_OP(_op, _addr1, _addr2, _dest, _accesstype) do { \
addr = (_addr1) + (_addr2); \
/* miss if not aligned or tag miss */ \
ridx = (addr >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; \
xlp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + \
chk = (addr & (XDCACHE_TAG_MASK|XDCACHE_ALIGN_MASK)) | \
chk &= XDCACHE_WRITE_PERM | XDCACHE_TAG_MASK | \
XCACHE_TAGSTATE_MASK | ((1<<(_op & MA_Size_Mask))-1); \
if (chk != (tvaddr_t)0) { \
sp->xdc.miss(sp, (uint64_t *)&(_dest), addr, _op); \
*(_accesstype *)(addr + xlp->offset) = HOST_##_op((_accesstype)(_dest)); \
/*-----------------------------------------------------------*/
** Misc pseudo instructions
/*-----------------------------------------------------------*/
** Arithmetic instructions
/* IMPL( sub_imm ) handled by add_imm by negating the immediate ! */
/*-----------------------------------------------------------*/
Rdest
= ~(Rsrc1
^ Rsrc2
);
/*-----------------------------------------------------------*/
* Shift instruction implementations
Rdest
= Rsrc1
<< (Rsrc2
& 0x3f);
Rdest
= Rsrc1
<< (Rsrc2
& 0x1f);
* Sparc compiler and gcc generates crappy code
* for these .. redo in assembler eventually
Rdest
= (uint64_t)(((uint32_t)Rsrc1
) >> Simm16
);
Rdest
= Rsrc1
>> (Rsrc2
& 0x3f);
Rdest
= (sint64_t
)(((sint32_t
)Rsrc1
) >> Simm16
);
SRdest
= SRsrc1
>> Simm16
;
Rdest
= (uint64_t)(SRsrc1
>> (Rsrc2
& 0x3f));
* Load and store operations ...
* Eventually better managed in assembler
LOAD_OP( MA_ldu8
, Rsrc1
, Simm16
, Rdest
, uint8_t);
LOAD_OP( MA_ldu16
, Rsrc1
, Simm16
, Rdest
, uint16_t);
LOAD_OP( MA_ldu32
, Rsrc1
, Simm16
, Rdest
, uint32_t);
LOAD_OP( MA_ldu64
, Rsrc1
, Simm16
, Rdest
, uint64_t);
LOAD_OP( MA_lds8
, Rsrc1
, Simm16
, Rdest
, sint8_t
);
LOAD_OP( MA_lds16
, Rsrc1
, Simm16
, Rdest
, sint16_t
);
LOAD_OP( MA_lds32
, Rsrc1
, Simm16
, Rdest
, sint32_t
);
STORE_OP( MA_st8
, Rsrc1
, Simm16
, Rdest
, uint8_t);
STORE_OP( MA_st16
, Rsrc1
, Simm16
, Rdest
, uint16_t);
STORE_OP( MA_st32
, Rsrc1
, Simm16
, Rdest
, uint32_t);
STORE_OP( MA_st64
, Rsrc1
, Simm16
, Rdest
, uint64_t);
LOAD_OP( MA_ldu8
, Rsrc1
, Rsrc2
, Rdest
, uint8_t);
LOAD_OP( MA_ldu16
, Rsrc1
, Rsrc2
, Rdest
, uint16_t);
LOAD_OP( MA_ldu32
, Rsrc1
, Rsrc2
, Rdest
, uint32_t);
LOAD_OP( MA_ldu64
, Rsrc1
, Rsrc2
, Rdest
, uint64_t);
LOAD_OP( MA_lds8
, Rsrc1
, Rsrc2
, Rdest
, sint8_t
);
LOAD_OP( MA_lds16
, Rsrc1
, Rsrc2
, Rdest
, sint16_t
);
LOAD_OP( MA_lds32
, Rsrc1
, Rsrc2
, Rdest
, sint32_t
);
STORE_OP( MA_st8
, Rsrc1
, Rsrc2
, Rdest
, uint8_t);
STORE_OP( MA_st16
, Rsrc1
, Rsrc2
, Rdest
, uint16_t);
STORE_OP( MA_st32
, Rsrc1
, Rsrc2
, Rdest
, uint32_t);
STORE_OP( MA_st64
, Rsrc1
, Rsrc2
, Rdest
, uint64_t);
* Versions for floating point access
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp32
, Rsrc1
, Simm16
, F32dest
, ieee_fp32_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp64
, Rsrc1
, Simm16
, F64dest
, ieee_fp64_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp32
, Rsrc1
, Rsrc2
, F32dest
, ieee_fp32_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp64
, Rsrc1
, Rsrc2
, F64dest
, ieee_fp64_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp32
, Rsrc1
, Simm16
, F32dest
, ieee_fp32_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp64
, Rsrc1
, Simm16
, F64dest
, ieee_fp64_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp32
, Rsrc1
, Rsrc2
, F32dest
, ieee_fp32_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp64
, Rsrc1
, Rsrc2
, F64dest
, ieee_fp64_t
);
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp128
, Rsrc1
, Simm16
, Rdest
, ieee_fp128_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
LOAD_OP( MA_ldfp128
, Rsrc1
, Rsrc2
, Rdest
, ieee_fp128_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp128
, Rsrc1
, Simm16
, Rdest
, ieee_fp128_t
);
#ifndef FP_DECODE_DISABLED
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
STORE_OP( MA_stfp128
, Rsrc1
, Rsrc2
, Rdest
, ieee_fp128_t
);
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */