Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / procs / sparcv9 / sparcv9instns.c
/*
* ========== Copyright Header Begin ==========================================
*
* OpenSPARC T2 Processor File: sparcv9instns.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
*
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ========== Copyright Header End ============================================
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "@(#)sparcv9instns.c 1.64 07/03/19 SMI"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "basics.h"
#include "fatal.h"
#include "allocate.h"
#include "simcore.h"
#include "config.h"
#include "xicache.h"
#include "xdcache.h"
#include "tsparcv9.h"
#include "tsparcv9internal.h"
#include "sparcv9regs.h"
#include "sparcv9cc.h"
#include "magictraps.h"
#include "hostnative_asm.h" /* autogenerated from hostnative_asm.S */
#include "hostnative.h" /* autogenerated from hostnative.c */
#include "sparcv9decode.h"
#include "fpsim.h"
#define FPU_NOT_IMPLEMENTED(_which) \
FIXME_WARNING(("non-native implementation of " _which \
" not yet completed")); \
sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; \
sp->v9_fsr_ctrl |= SPARCv9_FTT_unimplemented_FPop << V9_FSR_FTT_SHIFT; \
v9p->post_precise_trap(sp, Sparcv9_trap_fp_exception_other); \
return;
/*
* Implementation(s) of pre-decoded sparcv9 specific instructions.
*/
#define IMPL( _n ) void decoded_impl_sparcv9_##_n (simcpu_t *sp, xicache_instn_t * xcip) {
#define ENDI NEXT_INSTN(sp); ENDDEF
#define ENDDEF }
#define _U (1 << V9_fcc_u)
#define _G (1 << V9_fcc_g)
#define _L (1 << V9_fcc_l)
#define _E (1 << V9_fcc_e)
static uint8_t sparcv9_fcc_magic[16] = {
0, /* never */
_U|_G|_L, /* NE */
_G|_L, /* LG */
_U|_L, /* UL */
_L, /* L */
_U|_G, /* UG */
_G, /* G */
_U, /* U */
_U|_G|_L|_E, /* always */
_E, /* E */
_U|_E, /* UE */
_G|_E, /* GE */
_U|_G|_E, /* UGE */
_L|_E, /* LE */
_U|_L|_E, /* ULE */
_G|_L|_E /* O */
};
#undef _U
#undef _G
#undef _L
#undef _E
/*
* Some FPU instructions that can never cause exceptions still
* update the FSR - so just clear current exceptions and trap type.
*/
#define FP_CLEAR_CEXC_FTT(_sp) do { \
(_sp)->v9_fsr_exc &= ~V9_FSR_CEXC_MASK; \
(_sp)->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; \
} while (0)
/* ------------------------------------------------------------ */
/*
* Instruction: sparcv9_add_co_imm
*/
#if !defined(HAS_NATIVE_sparcv9_add_co_imm) /* { */
IMPL(add_co_imm)
int64_t s1 = Rsrc1;
int64_t s2 = Simm16;
int64_t d;
uint64_t v, c;
d = s1 + s2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_co_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_add_co_rrr) /* { */
IMPL(add_co_rrr)
uint64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
d = Rsrc1 + Rsrc2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_co_imm_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_add_co_imm_rd0) /* { */
IMPL(add_co_imm_rd0)
int64_t s1 = Rsrc1, s2 = (int64_t)(int32_t)Simm16, d;
uint64_t v, c;
d = s1 + s2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_co_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_add_co_rrr_rd0) /* { */
IMPL(add_co_rrr_rd0)
int64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
d = s1 + s2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_ci_imm
*/
#if !defined(HAS_NATIVE_sparcv9_add_ci_imm) /* { */
IMPL(add_ci_imm)
int64_t s1 = Rsrc1, s2 = (int64_t)(int32_t)Simm16, d;
d = s1 + s2 + (sp->v9_ccr & 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_ci_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_add_ci_rrr) /* { */
IMPL(add_ci_rrr)
int64_t s1 = Rsrc1, s2 = Rsrc2, d;
d = s1 + s2 + (sp->v9_ccr & 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_cico_imm
*/
#if !defined(HAS_NATIVE_sparcv9_add_cico_imm) /* { */
IMPL(add_cico_imm)
int64_t s1 = Rsrc1, s2 = (int64_t)(int32_t)Simm16, d;
uint64_t v, c;
d = s1 + s2 + (sp->v9_ccr & 1);
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_cico_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_add_cico_rrr) /* { */
IMPL(add_cico_rrr)
int64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
d = s1 + s2 + (sp->v9_ccr & 1);
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_cico_imm_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_add_cico_imm_rd0) /* { */
IMPL(add_cico_imm_rd0)
int64_t s1 = Rsrc1, s2 = (int64_t)(int32_t)Simm16, d;
uint64_t v, c;
d = s1 + s2 + (sp->v9_ccr & 1);
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_add_cico_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_add_cico_rrr_rd0) /* { */
IMPL(add_cico_rrr_rd0)
int64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
d = s1 + s2 + (sp->v9_ccr & 1);
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_co_imm
*/
#if !defined(HAS_NATIVE_sparcv9_sub_co_imm) /* { */
IMPL(sub_co_imm)
int64_t s1 = Rsrc1;
int64_t s2 = Simm16;
int64_t d;
uint64_t v, c;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_co_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_sub_co_rrr) /* { */
IMPL(sub_co_rrr)
int64_t s1 = Rsrc1;
int64_t s2 = Rsrc2;
int64_t d;
uint64_t v, c;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_co_imm_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_sub_co_imm_rd0) /* { */
IMPL(sub_co_imm_rd0)
int64_t s1 = Rsrc1;
int64_t s2 = Simm16;
int64_t d;
uint64_t v, c;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_co_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_sub_co_rrr_rd0) /* { */
IMPL(sub_co_rrr_rd0)
int64_t s1 = Rsrc1;
int64_t s2 = Rsrc2;
int64_t d;
uint64_t v, c;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_ci_imm
*/
#if !defined(HAS_NATIVE_sparcv9_sub_ci_imm) /* { */
IMPL(sub_ci_imm)
int64_t s1 = Rsrc1, s2 = (int64_t)(int32_t)Simm16, d;
d = s1 - s2 - (sp->v9_ccr & 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_ci_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_sub_ci_rrr) /* { */
IMPL(sub_ci_rrr)
int64_t s1 = Rsrc1, s2 = Rsrc2, d;
d = s1 - s2 - (sp->v9_ccr & 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_cico_imm
*/
#if !defined(HAS_NATIVE_sparcv9_sub_cico_imm) /* { */
IMPL(sub_cico_imm)
int64_t s1 = Rsrc1, s2 = (int64_t)(int32_t)Simm16, d;
uint64_t v, c;
d = s1 - s2 - (sp->v9_ccr & 1);
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_cico_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_sub_cico_rrr) /* { */
IMPL(sub_cico_rrr)
int64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
d = s1 - s2 - (sp->v9_ccr & 1);
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
Rdest = d;
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_cico_imm_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_sub_cico_imm_rd0) /* { */
IMPL(sub_cico_imm_rd0)
int64_t s1 = Rsrc1, s2 = (int64_t)(int32_t)Simm16, d;
uint64_t v, c;
d = s1 - s2 - (sp->v9_ccr & 1);
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_sub_cico_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_sub_cico_rrr_rd0) /* { */
IMPL(sub_cico_rrr_rd0)
int64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
d = s1 - s2 - (sp->v9_ccr & 1);
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
ENDI
#endif /* } */
#define LOGIC_OP(_res, _op) do { \
uint64_t result; \
result = _op; \
sp->v9_ccr = V9_xcc_n((result>>63)&1) \
| V9_xcc_z(result==0LL) \
| V9_icc_n((result>>31)&1) \
| V9_icc_z((result&MASK64(31,0))==0LL); \
_res; \
} while (0)
/*
* Instruction: sparcv9_and_cc_imm
*/
#if !defined(HAS_NATIVE_sparcv9_and_cc_imm) /* { */
IMPL(and_cc_imm)
LOGIC_OP(Rdest = result, Rsrc1 & Simm16);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_and_cc_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_and_cc_rrr) /* { */
IMPL(and_cc_rrr)
LOGIC_OP(Rdest = result, Rsrc1 & Rsrc2);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_and_cc_imm_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_and_cc_imm_rd0) /* { */
IMPL(and_cc_imm_rd0)
LOGIC_OP(/*nada*/, Rsrc1 & Simm16);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_and_cc_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_and_cc_rrr_rd0) /* { */
IMPL(and_cc_rrr_rd0)
LOGIC_OP(/*nada*/, Rsrc1 & Rsrc2);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_andn_cc_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_andn_cc_rrr) /* { */
IMPL(andn_cc_rrr)
LOGIC_OP(Rdest=result, (Rsrc1 & ~(Rsrc2)) );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_andn_cc_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_andn_cc_rrr_rd0) /* { */
IMPL(andn_cc_rrr_rd0)
LOGIC_OP(/*nada*/, (Rsrc1 & ~(Rsrc2)) );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_or_cc_imm
*/
#if !defined(HAS_NATIVE_sparcv9_or_cc_imm) /* { */
IMPL(or_cc_imm)
LOGIC_OP(Rdest=result, Rsrc1 | Simm16 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_or_cc_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_or_cc_rrr) /* { */
IMPL(or_cc_rrr)
LOGIC_OP(Rdest=result, Rsrc1 | Rsrc2 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_or_cc_imm_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_or_cc_imm_rd0) /* { */
IMPL(or_cc_imm_rd0)
LOGIC_OP(/*nada*/, Rsrc1 | Simm16 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_or_cc_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_or_cc_rrr_rd0) /* { */
IMPL(or_cc_rrr_rd0)
LOGIC_OP(/*nada*/, Rsrc1 | Rsrc2 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_orn_cc_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_orn_cc_rrr) /* { */
IMPL(orn_cc_rrr)
LOGIC_OP(Rdest=result, (Rsrc1 | ~(Rsrc2)) );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_orn_cc_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_orn_cc_rrr_rd0) /* { */
IMPL(orn_cc_rrr_rd0)
LOGIC_OP(/*nada*/, (Rsrc1 | ~(Rsrc2)) );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_xor_cc_imm
*/
#if !defined(HAS_NATIVE_sparcv9_xor_cc_imm) /* { */
IMPL(xor_cc_imm)
LOGIC_OP(Rdest=result, Rsrc1 ^ Simm16 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_xor_cc_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_xor_cc_rrr) /* { */
IMPL(xor_cc_rrr)
LOGIC_OP(Rdest=result, Rsrc1 ^ Rsrc2 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_xor_cc_imm_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_xor_cc_imm_rd0) /* { */
IMPL(xor_cc_imm_rd0)
LOGIC_OP(/*nada*/, Rsrc1 ^ Simm16 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_xor_cc_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_xor_cc_rrr_rd0) /* { */
IMPL(xor_cc_rrr_rd0)
LOGIC_OP(/*nada*/, Rsrc1 ^ Rsrc2 );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_xnor_cc_rrr
*/
#if !defined(HAS_NATIVE_sparcv9_xnor_cc_rrr) /* { */
IMPL(xnor_cc_rrr)
LOGIC_OP(Rdest=result, ~(Rsrc1 ^ Rsrc2) );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_xnor_cc_rrr_rd0
*/
#if !defined(HAS_NATIVE_sparcv9_xnor_cc_rrr_rd0) /* { */
IMPL(xnor_cc_rrr_rd0)
LOGIC_OP(/*nada*/, ~(Rsrc1 ^ Rsrc2) );
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bne_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bne_icc) /* { */
IMPL(bne_icc)
if (!(sp->v9_ccr & V9_icc_z_mask)) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_be_icc
*/
#if !defined(HAS_NATIVE_sparcv9_be_icc) /* { */
IMPL(be_icc)
if (sp->v9_ccr & V9_icc_z_mask) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bg_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bg_icc) /* { */
IMPL(bg_icc)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_g] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_ble_icc
*/
#if !defined(HAS_NATIVE_sparcv9_ble_icc) /* { */
IMPL(ble_icc)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_le] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bge_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bge_icc) /* { */
IMPL(bge_icc)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_ge] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bl_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bl_icc) /* { */
IMPL(bl_icc)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_l] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bgu_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bgu_icc) /* { */
IMPL(bgu_icc)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_gu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bleu_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bleu_icc) /* { */
IMPL(bleu_icc)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_leu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bcc_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bcc_icc) /* { */
IMPL(bcc_icc)
if ( !(sp->v9_ccr & V9_icc_c_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bcs_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bcs_icc) /* { */
IMPL(bcs_icc)
if ( sp->v9_ccr & V9_icc_c_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bpos_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bpos_icc) /* { */
IMPL(bpos_icc)
if ( !(sp->v9_ccr & V9_icc_n_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bneg_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bneg_icc) /* { */
IMPL(bneg_icc)
if ( sp->v9_ccr & V9_icc_n_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bvc_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bvc_icc) /* { */
IMPL(bvc_icc)
if ( !(sp->v9_ccr & V9_icc_v_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bvs_icc
*/
#if !defined(HAS_NATIVE_sparcv9_bvs_icc) /* { */
IMPL(bvs_icc)
if ( sp->v9_ccr & V9_icc_v_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bne_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bne_xcc) /* { */
IMPL(bne_xcc)
if (!(sp->v9_ccr & V9_xcc_z_mask)) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_be_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_be_xcc) /* { */
IMPL(be_xcc)
if (sp->v9_ccr & V9_xcc_z_mask) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bg_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bg_xcc) /* { */
IMPL(bg_xcc)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_g] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_ble_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_ble_xcc) /* { */
IMPL(ble_xcc)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_le] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bge_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bge_xcc) /* { */
IMPL(bge_xcc)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_ge] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bl_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bl_xcc) /* { */
IMPL(bl_xcc)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_l] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bgu_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bgu_xcc) /* { */
IMPL(bgu_xcc)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_gu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bleu_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bleu_xcc) /* { */
IMPL(bleu_xcc)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_leu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bcc_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bcc_xcc) /* { */
IMPL(bcc_xcc)
if ( !(sp->v9_ccr & V9_xcc_c_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bcs_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bcs_xcc) /* { */
IMPL(bcs_xcc)
if ( sp->v9_ccr & V9_xcc_c_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bpos_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bpos_xcc) /* { */
IMPL(bpos_xcc)
if ( !(sp->v9_ccr & V9_xcc_n_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bneg_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bneg_xcc) /* { */
IMPL(bneg_xcc)
if ( sp->v9_ccr & V9_xcc_n_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bvc_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bvc_xcc) /* { */
IMPL(bvc_xcc)
if ( !(sp->v9_ccr & V9_xcc_v_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bvs_xcc
*/
#if !defined(HAS_NATIVE_sparcv9_bvs_xcc) /* { */
IMPL(bvs_xcc)
if ( sp->v9_ccr & V9_xcc_v_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_bne_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bne_icc_an) /* { */
IMPL(bne_icc_an)
if (!(sp->v9_ccr & V9_icc_z_mask)) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_be_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_be_icc_an) /* { */
IMPL(be_icc_an)
if (sp->v9_ccr & V9_icc_z_mask) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bg_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bg_icc_an) /* { */
IMPL(bg_icc_an)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_g] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_ble_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_ble_icc_an) /* { */
IMPL(ble_icc_an)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_le] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bge_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bge_icc_an) /* { */
IMPL(bge_icc_an)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_ge] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bl_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bl_icc_an) /* { */
IMPL(bl_icc_an)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_l] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bgu_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bgu_icc_an) /* { */
IMPL(bgu_icc_an)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_gu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bleu_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bleu_icc_an) /* { */
IMPL(bleu_icc_an)
int cc = V9_ext_icc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_leu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bcc_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bcc_icc_an) /* { */
IMPL(bcc_icc_an)
if ( !(sp->v9_ccr & V9_icc_c_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bcs_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bcs_icc_an) /* { */
IMPL(bcs_icc_an)
if ( sp->v9_ccr & V9_icc_c_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bpos_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bpos_icc_an) /* { */
IMPL(bpos_icc_an)
if ( !(sp->v9_ccr & V9_icc_n_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bneg_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bneg_icc_an) /* { */
IMPL(bneg_icc_an)
if ( sp->v9_ccr & V9_icc_n_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bvc_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bvc_icc_an) /* { */
IMPL(bvc_icc_an)
if ( !(sp->v9_ccr & V9_icc_v_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bvs_icc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bvs_icc_an) /* { */
IMPL(bvs_icc_an)
if ( sp->v9_ccr & V9_icc_v_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bne_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bne_xcc_an) /* { */
IMPL(bne_xcc_an)
if (!(sp->v9_ccr & V9_xcc_z_mask)) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_be_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_be_xcc_an) /* { */
IMPL(be_xcc_an)
if (sp->v9_ccr & V9_xcc_z_mask) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bg_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bg_xcc_an) /* { */
IMPL(bg_xcc_an)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_g] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_ble_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_ble_xcc_an) /* { */
IMPL(ble_xcc_an)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_le] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bge_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bge_xcc_an) /* { */
IMPL(bge_xcc_an)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_ge] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bl_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bl_xcc_an) /* { */
IMPL(bl_xcc_an)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_l] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bgu_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bgu_xcc_an) /* { */
IMPL(bgu_xcc_an)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_gu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bleu_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bleu_xcc_an) /* { */
IMPL(bleu_xcc_an)
int cc = V9_ext_xcc(sp->v9_ccr);
if ( (sparcv9_cc_magic[cond_leu] >> cc) &1 ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bcc_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bcc_xcc_an) /* { */
IMPL(bcc_xcc_an)
if ( !(sp->v9_ccr & V9_xcc_c_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bcs_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bcs_xcc_an) /* { */
IMPL(bcs_xcc_an)
if ( sp->v9_ccr & V9_xcc_c_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bpos_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bpos_xcc_an) /* { */
IMPL(bpos_xcc_an)
if ( !(sp->v9_ccr & V9_xcc_n_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bneg_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bneg_xcc_an) /* { */
IMPL(bneg_xcc_an)
if ( sp->v9_ccr & V9_xcc_n_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bvc_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bvc_xcc_an) /* { */
IMPL(bvc_xcc_an)
if ( !(sp->v9_ccr & V9_xcc_v_mask) ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_bvs_xcc_an
*/
#if !defined(HAS_NATIVE_sparcv9_bvs_xcc_an) /* { */
IMPL(bvs_xcc_an)
if ( sp->v9_ccr & V9_xcc_v_mask ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_brz
*/
#if !defined(HAS_NATIVE_sparcv9_brz) /* { */
IMPL(brz)
if (0ULL == Rsrc1) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_brlez
*/
#if !defined(HAS_NATIVE_sparcv9_brlez) /* { */
IMPL(brlez)
if (SRsrc1 <= 0LL) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_brlz
*/
#if !defined(HAS_NATIVE_sparcv9_brlz) /* { */
IMPL(brlz)
if (SRsrc1 < 0LL) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_brnz
*/
#if !defined(HAS_NATIVE_sparcv9_brnz) /* { */
IMPL(brnz)
if (SRsrc1 != 0LL) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_brgz
*/
#if !defined(HAS_NATIVE_sparcv9_brgz) /* { */
IMPL(brgz)
if (SRsrc1 > 0LL) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_brgez
*/
#if !defined(HAS_NATIVE_sparcv9_brgez) /* { */
IMPL(brgez)
if (SRsrc1 >= 0LL) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_brz_an
*/
#if !defined(HAS_NATIVE_sparcv9_brz_an) /* { */
IMPL(brz_an)
if ( Rsrc1 == 0LL ) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_brlez_an
*/
#if !defined(HAS_NATIVE_sparcv9_brlez_an) /* { */
IMPL(brlez_an)
if ( SRsrc1 <= 0LL ) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_brlz_an
*/
#if !defined(HAS_NATIVE_sparcv9_brlz_an) /* { */
IMPL(brlz_an)
if ( SRsrc1 < 0LL ) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_brnz_an
*/
#if !defined(HAS_NATIVE_sparcv9_brnz_an) /* { */
IMPL(brnz_an)
if ( SRsrc1 != 0LL ) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_brgz_an
*/
#if !defined(HAS_NATIVE_sparcv9_brgz_an) /* { */
IMPL(brgz_an)
if ( SRsrc1 > 0LL ) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_brgez_an
*/
#if !defined(HAS_NATIVE_sparcv9_brgez_an) /* { */
IMPL(brgez_an)
if ( SRsrc1 >= 0LL ) {
tvaddr_t tpc = Rpc + SBRreg_off32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fadds
*/
#if !defined(HAS_NATIVE_fadds) /* { */
IMPL(fadds)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fadds")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_faddd
*/
#if !defined(HAS_NATIVE_faddd) /* { */
IMPL(faddd)
FP_EXEC_FPU_ON_CHECK;
if (F64src1 == 0 && F64src2 == 0)
F64dest = 0;
else {
FPU_NOT_IMPLEMENTED("faddd")
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fsubs
*/
#if !defined(HAS_NATIVE_fsubs) /* { */
IMPL(fsubs)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fsubs")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fsubd
*/
#if !defined(HAS_NATIVE_fsubd) /* { */
IMPL(fsubd)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fsubd")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fmuls
*/
#if !defined(HAS_NATIVE_fmuls) /* { */
IMPL(fmuls)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fmuls")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fmuld
*/
#if !defined(HAS_NATIVE_fmuld) /* { */
IMPL(fmuld)
FP_EXEC_FPU_ON_CHECK;
if (F64src1 == 0 || F64src2 == 0)
F64dest = 0;
else {
FPU_NOT_IMPLEMENTED("fmuld")
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fdivs
*/
#if !defined(HAS_NATIVE_fdivs) /* { */
IMPL(fdivs)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fdivs")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fdivd
*/
#if !defined(HAS_NATIVE_fdivd) /* { */
IMPL(fdivd)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fdivd")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fsmuld
*/
#if !defined(HAS_NATIVE_fsmuld) /* { */
IMPL(fsmuld)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fsmuld")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fstod
*/
#if !defined(HAS_NATIVE_fstod) /* { */
IMPL(fstod)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fstod")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fdtos
*/
#if !defined(HAS_NATIVE_fdtos) /* { */
IMPL(fdtos)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fdtos")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fsqrts
*/
#if !defined(HAS_NATIVE_fsqrts) /* { */
IMPL(fsqrts)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fsqrts")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fsqrtd
*/
#if !defined(HAS_NATIVE_fsqrtd) /* { */
IMPL(fsqrtd)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fsqrtd")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fmovs
*/
#if !defined(HAS_NATIVE_sparcv9_fmovs) /* { */
IMPL(fmovs)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1;
FP_CLEAR_CEXC_FTT(sp);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fmovd
*/
#if !defined(HAS_NATIVE_sparcv9_fmovd) /* { */
IMPL(fmovd)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1;
FP_CLEAR_CEXC_FTT(sp);
ENDI
#endif /* } */
IMPL( fmovscc )
int ccr;
FP_EXEC_FPU_ON_CHECK;
ccr = sp->v9_ccr;
if (MOVCC_cc) ccr>>=4;
if ( (sparcv9_cc_magic[MOVCC_cond] >> (ccr & 0xf)) &1 ) {
F32dest = F32src1;
}
NEXT_INSTN(sp);
ENDDEF
IMPL( fmovsfcc )
int ccr;
FP_EXEC_FPU_ON_CHECK;
if (MOVCC_cc == 0)
ccr = V9_FSR_FCC0(sp->v9_fsr_ctrl);
else
ccr = V9_FSR_FCCN(sp->v9_fsr_ctrl, MOVCC_cc);
if ( (sparcv9_fcc_magic[MOVCC_cond] >> ccr) & 1 ) {
F32dest = F32src1;
}
NEXT_INSTN(sp);
ENDDEF
IMPL( fmovdcc )
int ccr;
FP_EXEC_FPU_ON_CHECK;
ccr = sp->v9_ccr;
if (MOVCC_cc) ccr>>=4;
if ( (sparcv9_cc_magic[MOVCC_cond] >> (ccr & 0xf)) &1 ) {
F64dest = F64src1;
}
NEXT_INSTN(sp);
ENDDEF
IMPL( fmovrs_z )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 == 0)
F32dest = F32src2;
ENDI
IMPL( fmovrs_lez )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 <= 0)
F32dest = F32src2;
ENDI
IMPL( fmovrs_lz )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 < 0)
F32dest = F32src2;
ENDI
IMPL( fmovrs_nz )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 != 0)
F32dest = F32src2;
ENDI
IMPL( fmovrs_gz )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 > 0)
F32dest = F32src2;
ENDI
IMPL( fmovrs_gez )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 >= 0)
F32dest = F32src2;
ENDI
IMPL( fmovrd_z )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 == 0)
F64dest = F64src2;
ENDI
IMPL( fmovrd_lez )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 <= 0)
F64dest = F64src2;
ENDI
IMPL( fmovrd_lz )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 < 0)
F64dest = F64src2;
ENDI
IMPL( fmovrd_nz )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 != 0)
F64dest = F64src2;
ENDI
IMPL( fmovrd_gz )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 > 0)
F64dest = F64src2;
ENDI
IMPL( fmovrd_gez )
FP_EXEC_FPU_ON_CHECK;
if (SRsrc1 >= 0)
F64dest = F64src2;
ENDI
IMPL( fmovdfcc )
int ccr;
FP_EXEC_FPU_ON_CHECK;
if (MOVCC_cc == 0)
ccr = V9_FSR_FCC0(sp->v9_fsr_ctrl);
else
ccr = V9_FSR_FCCN(sp->v9_fsr_ctrl, MOVCC_cc);
if ( (sparcv9_fcc_magic[MOVCC_cond] >> ccr) & 1 ) {
F64dest = F64src1;
}
NEXT_INSTN(sp);
ENDDEF
/*
* Instruction: sparcv9_fnegs
*/
#if !defined(HAS_NATIVE_sparcv9_fnegs) /* { */
IMPL(fnegs)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 ^ (1u << 31);
FP_CLEAR_CEXC_FTT(sp);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fnegd
*/
#if !defined(HAS_NATIVE_sparcv9_fnegd) /* { */
IMPL(fnegd)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1 ^ (1ull << 63);
FP_CLEAR_CEXC_FTT(sp);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fabss
*/
#if !defined(HAS_NATIVE_sparcv9_fabss) /* { */
IMPL(fabss)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 & 0x7fffffffu;
FP_CLEAR_CEXC_FTT(sp);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fabsd
*/
#if !defined(HAS_NATIVE_sparcv9_fabsd) /* { */
IMPL(fabsd)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1 & 0x7fffffffffffffffull;
FP_CLEAR_CEXC_FTT(sp);
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fstoi
*/
#if !defined(HAS_NATIVE_fstoi) /* { */
IMPL(fstoi)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fstoi")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fitos
*/
#if !defined(HAS_NATIVE_fitos) /* { */
IMPL(fitos)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fitos")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fdtoi
*/
#if !defined(HAS_NATIVE_fdtoi) /* { */
IMPL(fdtoi)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fdtoi")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fitod
*/
#if !defined(HAS_NATIVE_fitod) /* { */
IMPL(fitod)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fitod")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fstox
*/
#if !defined(HAS_NATIVE_fstox) /* { */
IMPL(fstox)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fstox")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fxtos
*/
#if !defined(HAS_NATIVE_fxtos) /* { */
IMPL(fxtos)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fxtos")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fdtox
*/
#if !defined(HAS_NATIVE_fdtox) /* { */
IMPL(fdtox)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fdtox")
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fxtod
*/
#if !defined(HAS_NATIVE_fxtod) /* { */
IMPL(fxtod)
FP_EXEC_FPU_ON_CHECK;
FPU_NOT_IMPLEMENTED("fxtod")
ENDDEF
#endif /* } */
/*
* FP register logical operations.
*/
IMPL(fzerod)
FP_EXEC_FPU_ON_CHECK;
F64dest = 0;
ENDI
IMPL(fzeros)
FP_EXEC_FPU_ON_CHECK;
F32dest = 0;
ENDI
IMPL(foned)
FP_EXEC_FPU_ON_CHECK;
F64dest = (uint64_t)-1;
ENDI
IMPL(fones)
FP_EXEC_FPU_ON_CHECK;
F32dest = (uint32_t)-1;
ENDI
IMPL(fandd)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1 & F64src2;
ENDI
IMPL(fandnot1d)
FP_EXEC_FPU_ON_CHECK;
F64dest = ~F64src1 & F64src2;
ENDI
IMPL(fandnot1s)
FP_EXEC_FPU_ON_CHECK;
F32dest = ~F32src1 & F32src2;
ENDI
IMPL(fandnot2d)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1 & ~F64src2;
ENDI
IMPL(fandnot2s)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 & ~F32src2;
ENDI
IMPL(fands)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 & F32src2;
ENDI
IMPL(fnandd)
FP_EXEC_FPU_ON_CHECK;
F64dest = ~(F64src1 & F64src2);
ENDI
IMPL(fnands)
FP_EXEC_FPU_ON_CHECK;
F32dest = ~(F32src1 & F32src2);
ENDI
IMPL(fnord)
FP_EXEC_FPU_ON_CHECK;
F64dest = ~(F64src1 | F64src2);
ENDI
IMPL(fnors)
FP_EXEC_FPU_ON_CHECK;
F32dest = ~(F32src1 | F32src2);
ENDI
IMPL(fnotXd)
FP_EXEC_FPU_ON_CHECK;
F64dest = ~F64src1;
ENDI
IMPL(fnotXs)
FP_EXEC_FPU_ON_CHECK;
F32dest = ~F32src1;
ENDI
IMPL(ford)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1 | F64src2;
ENDI
IMPL(fornot1d)
FP_EXEC_FPU_ON_CHECK;
F64dest = ~F64src1 | F64src2;
ENDI
IMPL(fornot1s)
FP_EXEC_FPU_ON_CHECK;
F32dest = ~F32src1 | F32src2;
ENDI
IMPL(fornot2d)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1 | ~F64src2;
ENDI
IMPL(fornot2s)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 | ~F32src2;
ENDI
IMPL(fors)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 | F32src2;
ENDI
IMPL(fsrcXd)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1;
ENDI
IMPL(fsrcXs)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1;
ENDI
IMPL(fxnord)
FP_EXEC_FPU_ON_CHECK;
F64dest = ~(F64src1 ^ F64src2);
ENDI
IMPL(fxnors)
FP_EXEC_FPU_ON_CHECK;
F32dest = ~(F32src1 ^ F32src2);
ENDI
IMPL(fxord)
FP_EXEC_FPU_ON_CHECK;
F64dest = F64src1 ^ F64src2;
ENDI
IMPL(fxors)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 ^ F32src2;
ENDI
/*
* FP register fixed-point partitioned add and subtract.
*/
IMPL(fpadd16)
FP_EXEC_FPU_ON_CHECK;
uint16_t s1, s2, d;
uint64_t fs1, fs2, res;
uint_t i;
fs1 = F64src1;
fs2 = F64src2;
res = 0;
for (i = 0; i < 4; i++) {
s1 = (uint16_t) fs1;
s2 = (uint16_t) fs2;
d = s1 + s2;
res >>= 16;
res |= (uint64_t)d << 48;
fs1 >>= 16;
fs2 >>= 16;
}
F64dest = res;
ENDI
IMPL(fpadd16s)
FP_EXEC_FPU_ON_CHECK;
uint16_t s1, s2, d;
uint32_t fs1, fs2, res;
uint_t i;
fs1 = F32src1;
fs2 = F32src2;
res = 0;
for (i = 0; i < 2; i++) {
s1 = (uint16_t) fs1;
s2 = (uint16_t) fs2;
d = s1 + s2;
res >>= 16;
res |= (uint32_t)d << 16;
fs1 >>= 16;
fs2 >>= 16;
}
F32dest = res;
ENDI
IMPL(fpadd32)
FP_EXEC_FPU_ON_CHECK;
uint32_t s1, s2, d;
uint64_t fs1, fs2, res;
uint_t i;
fs1 = F64src1;
fs2 = F64src2;
res = 0;
for (i = 0; i < 2; i++) {
s1 = (uint32_t) fs1;
s2 = (uint32_t) fs2;
d = s1 + s2;
res >>= 32;
res |= (uint64_t)d << 32;
fs1 >>= 32;
fs2 >>= 32;
}
F64dest = res;
ENDI
IMPL(fpadd32s)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 + F32src2;
ENDI
IMPL(fpsub16)
FP_EXEC_FPU_ON_CHECK;
uint16_t s1, s2, d;
uint64_t fs1, fs2, res;
uint_t i;
fs1 = F64src1;
fs2 = F64src2;
res = 0;
for (i = 0; i < 4; i++) {
s1 = (uint16_t) fs1;
s2 = (uint16_t) fs2;
d = s1 - s2;
res >>= 16;
res |= (uint64_t)d << 48;
fs1 >>= 16;
fs2 >>= 16;
}
F64dest = res;
ENDI
IMPL(fpsub16s)
FP_EXEC_FPU_ON_CHECK;
uint16_t s1, s2, d;
uint32_t fs1, fs2, res;
uint_t i;
fs1 = F32src1;
fs2 = F32src2;
res = 0;
for (i = 0; i < 2; i++) {
s1 = (uint16_t) fs1;
s2 = (uint16_t) fs2;
d = s1 - s2;
res >>= 16;
res |= (uint32_t)d << 16;
fs1 >>= 16;
fs2 >>= 16;
}
F32dest = res;
ENDI
IMPL(fpsub32)
FP_EXEC_FPU_ON_CHECK;
uint32_t s1, s2, d;
uint64_t fs1, fs2, res;
uint_t i;
fs1 = F64src1;
fs2 = F64src2;
res = 0;
for (i = 0; i < 2; i++) {
s1 = (uint32_t) fs1;
s2 = (uint32_t) fs2;
d = s1 - s2;
res >>= 32;
res |= (uint64_t)d << 32;
fs1 >>= 32;
fs2 >>= 32;
}
F64dest = res;
ENDI
IMPL(fpsub32s)
FP_EXEC_FPU_ON_CHECK;
F32dest = F32src1 - F32src2;
ENDI
/* ------------------------------------------------------------ */
/*
* Basic branch instructions
*/
/* Note: special case - normally taken branches */
/* always execute their delay slots - not the always case ! */
IMPL( bralways_ds_annul ) /* branch always annul delay slot */
tvaddr_t xpc;
xpc = Rpc + SBRoffset32;
Rpc = xpc;
Rnpc = xpc + 4;
ENDDEF
IMPL( bralways_ds ) /* branch always executing delay slot */
tvaddr_t xpc;
xpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = xpc;
ENDDEF
IMPL( brnever_ds_annul ) /* branch never, but annul delay slot */
tvaddr_t xpc;
xpc = Rnpc + 4;
Rpc = xpc;
Rnpc = xpc + 4;
ENDDEF
/* ------------------------------------------------------------ */
/*
* Call instruction + Jump and links ...
*/
IMPL( call )
tvaddr_t tpc, opc;
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
opc = Rpc;
tpc = opc + (sint64_t)Simm32;
tpc &= ~0x3;
if (!v9p->pstate.addr_mask && v9p->check_vahole(sp, tpc)) return;
Rpc = Rnpc;
Rnpc = tpc;
if (v9p->pstate.addr_mask) opc &= MASK64(31,0); /* FIXME: SV9_ID125 ? */
IReg( Reg_sparcv9_o7 ) = opc;
ENDDEF
IMPL( jmpl_imm )
tvaddr_t opc, npc;
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
npc = Rsrc1 + Simm16;
if ((npc & 3) != 0) {
v9p->post_precise_trap(sp,
Sparcv9_trap_mem_address_not_aligned);
return;
}
if (!v9p->pstate.addr_mask && v9p->check_vahole(sp, Rsrc1 + Simm16)) return;
/* aligned check with xicache operation - maybe here instead ? FIXME*/
opc = Rpc; /* stash incase Rdest is same reg as Rsrc */
Rpc = Rnpc;
Rnpc = npc;
if (v9p->pstate.addr_mask) opc &= MASK64(31,0); /* FIXME: SV9_ID125 ? */
Rdest = opc;
ENDDEF
IMPL( jmpl_imm_rd0 ) /* fast track normal return-from-call instructions */
/* aligned check with xicache operation - maybe here instead ? FIXME*/
tvaddr_t npc;
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
npc = Rsrc1 + Simm16;
if ((npc & 3) != 0) {
v9p->post_precise_trap(sp,
Sparcv9_trap_mem_address_not_aligned);
return;
}
if (!v9p->pstate.addr_mask && v9p->check_vahole(sp, Rsrc1 + Simm16)) return;
Rpc = Rnpc;
Rnpc = npc;
ENDDEF
IMPL( jmpl_rrr )
tvaddr_t opc, npc;
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
npc = Rsrc1 + Rsrc2;
if ((npc & 3) != 0) {
v9p->post_precise_trap(sp,
Sparcv9_trap_mem_address_not_aligned);
return;
}
if (!v9p->pstate.addr_mask && v9p->check_vahole(sp, Rsrc1 + Rsrc2)) return;
/* aligned check with xicache operation - maybe here instead ? FIXME*/
opc = Rpc; /* stash incase Rdest is same reg as Rsrc */
Rpc = Rnpc;
Rnpc = npc;
if (v9p->pstate.addr_mask) opc &= MASK64(31,0); /* FIXME: SV9_ID125 ? */
Rdest = opc;
ENDDEF
IMPL( jmpl_rrr_rd0 ) /* fast track normal return-from-call instructions */
/* aligned check with xicache operation - maybe here instead ? FIXME*/
tvaddr_t npc;
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
npc = Rsrc1 + Rsrc2;
if ((npc & 3) != 0) {
v9p->post_precise_trap(sp,
Sparcv9_trap_mem_address_not_aligned);
return;
}
if (!v9p->pstate.addr_mask && v9p->check_vahole(sp, Rsrc1 + Rsrc2)) return;
Rpc = Rnpc;
Rnpc = npc;
ENDDEF
/* ------------------------------------------------------------ */
/*
* Specialist maths instructions
*/
IMPL( udiv64_imm )
sparcv9_udiv64( sp, Rdest_num, Rsrc1, Simm16 );
ENDDEF
IMPL( udiv64_rrr )
sparcv9_udiv64( sp, Rdest_num, Rsrc1, Rsrc2 );
ENDDEF
IMPL(sdiv64_imm)
if (Simm16 == 0) {
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
if (!Zero_Reg(Rdest_num)) {
if ((uint64_t)Rsrc1 == (1ULL << 63) && (int64_t)Simm16 == -1)
Rdest = 0x7fffffffffffffffULL;
else
Rdest = (int64_t)Rsrc1 / (int64_t)Simm16;
}
ENDI
IMPL(sdiv64_rrr)
if (Rsrc2 == 0) {
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
if (!Zero_Reg(Rdest_num)) {
if ((uint64_t)Rsrc1 == (1ULL << 63) && (int64_t)Rsrc2 == -1)
Rdest = 0x7fffffffffffffffULL;
else
Rdest = (int64_t)Rsrc1 / (int64_t)Rsrc2;
}
ENDI
IMPL(sdiv_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
int64_t foo;
if (Simm16 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
if (!Zero_Reg(Rdest_num)) {
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
if ((uint64_t)foo == (1ULL << 63) && (int32_t)Simm16 == -1)
foo = 0x7fffffff;
else
foo = foo / (int32_t)Simm16;
if (foo >= (1ull << 31))
foo = (1ull << 31) - 1;
else if (foo <= (int64_t)0xffffffff7fffffffULL)
foo = 0x80000000LL;
Rdest = (int32_t)foo;
}
ENDI
IMPL(udiv_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
uint64_t foo;
if (Simm16 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
if (!Zero_Reg(Rdest_num)) {
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
foo = ((uint64_t)foo) / ((uint32_t)(int32_t)Simm16);
if (foo >= (1ull << 32))
foo = (1ull << 32) - 1;
Rdest = (uint32_t)foo;
}
ENDI
IMPL(sdiv_rrr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
int64_t foo;
if ((int32_t)Rsrc2 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
if (!Zero_Reg(Rdest_num)) {
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
if ((uint64_t)foo == (1ULL << 63) && (int32_t)Rsrc2 == -1)
foo = 0x7fffffff;
else
foo = foo / (int32_t)Rsrc2;
if (foo >= (1ull << 31))
foo = (1ull << 31) - 1;
else if (foo <= (int64_t)0xffffffff7fffffffULL)
foo = 0x80000000LL;
Rdest = (int32_t)foo;
}
ENDI
IMPL(udiv_rrr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
uint64_t foo;
if ((uint32_t)Rsrc2 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
if (!Zero_Reg(Rdest_num)) {
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
foo = (int64_t)foo / (uint32_t)Rsrc2;
if (foo >= (1ull << 32))
foo = (1ull << 32) - 1;
Rdest = (uint32_t)foo;
}
ENDI
IMPL(sdiv_cc_imm)
sparcv9_cpu_t *v9p = (sparcv9_cpu_t*)(sp->specificp);
int64_t foo;
int v = 0;
if (Simm16 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
if ((uint64_t)foo == (1ULL << 63) && (int32_t)Simm16 == -1) {
foo = 0x7fffffff;
v = (1 << 1); /* icc.v */
} else
foo = foo / (int32_t)Simm16;
if (foo >= (1ll << 31)) {
foo = (1ll << 31) - 1;
v = (1 << 1); /* icc.v */
} else if (foo <= (int64_t)0xffffffff7fffffffULL) {
foo = 0x80000000LL;
v = (1 << 1); /* icc.v */
}
foo = (int32_t)foo;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0) | /* xcc.z */
v;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(udiv_cc_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
uint64_t foo;
int v = 0;
if (Simm16 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
foo = ((uint64_t)foo) / ((uint32_t)(int32_t)Simm16);
if (foo >= (1ull << 32)) {
foo = (1ull << 32) - 1;
v = (1 << 1); /* icc.v */
}
foo = (uint32_t)foo;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0) | /* xcc.z */
v;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(sdiv_cc_rrr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
int64_t foo;
int v = 0;
if ((int32_t)Rsrc2 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
if ((uint64_t)foo == (1ULL << 63) && (int32_t)Rsrc2 == -1) {
foo = 0x7fffffff;
v = (1 << 1); /* icc.v */
} else
foo = foo / (int32_t)Rsrc2;
if (foo >= (1ll << 31)) {
foo = (1ll << 31) - 1;
v = (1 << 1); /* icc.v */
} else if (foo <= (int64_t)0xffffffff7fffffffULL) {
foo = 0x80000000LL;
v = (1 << 1); /* icc.v */
}
foo = (int32_t)foo;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0) | /* xcc.z */
v;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(udiv_cc_rrr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
uint64_t foo;
int v = 0;
if ((uint32_t)Rsrc2 == 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_division_by_zero);
return;
}
foo = (((int64_t)sp->v9_y) << 32) | (uint32_t)Rsrc1;
foo = ((int64_t)foo) / ((uint32_t)Rsrc2);
if (foo >= (1ull << 32)) {
foo = (1ull << 32) - 1;
v = (1 << 1); /* icc.v */
}
foo = (uint32_t)foo;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0) | /* xcc.z */
v;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(smul_imm)
uint64_t foo;
foo = (int64_t)(int32_t)Rsrc1 * (int64_t)(int32_t)Simm16;
sp->v9_y = foo >> 32;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(umul_imm)
uint64_t foo;
foo = (uint64_t)(uint32_t)Rsrc1 * (uint64_t)(uint32_t)(int32_t)Simm16;
sp->v9_y = foo >> 32;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(smul_cc_imm)
uint64_t foo;
foo = (int64_t)(int32_t)Rsrc1 * (int64_t)(int32_t)Simm16;
sp->v9_y = foo >> 32;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(umul_cc_imm)
uint64_t foo;
foo = (uint64_t)(uint32_t)Rsrc1 * (uint64_t)(uint32_t)(int32_t)Simm16;
sp->v9_y = foo >> 32;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(mulscc_rrr)
uint64_t foo;
uint32_t s1, s2;
uint32_t d;
uint32_t v, c;
foo = (((int64_t)Rsrc1) << 32) | (uint32_t)sp->v9_y;
/* icc.n xor icc.v */
s1 = ((sp->v9_ccr >> 3) & 1) ^ ((sp->v9_ccr >> 1) & 1);
s1 = (s1 << 31) | (((uint32_t)Rsrc1) >> 1);
if (foo & 1)
s2 = (uint32_t)Rsrc2;
else
s2 = 0;
foo >>= 1;
sp->v9_y = (uint32_t)foo;
d = s1 + s2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = (sp->v9_ccr & V9_xcc_mask);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = (uint32_t)d;
ENDI
IMPL(mulscc_imm)
uint64_t foo;
uint32_t s1, s2;
uint32_t d;
uint32_t v, c;
foo = (((int64_t)Rsrc1) << 32) | (uint32_t)sp->v9_y;
/* icc.n xor icc.v */
s1 = ((sp->v9_ccr >> 3) & 1) ^ ((sp->v9_ccr >> 1) & 1);
s1 = (s1 << 31) | (((uint32_t)Rsrc1) >> 1);
if (foo & 1)
s2 = (uint32_t)Simm16;
else
s2 = 0;
foo >>= 1;
sp->v9_y = (uint32_t)foo;
d = s1 + s2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
sp->v9_ccr = (sp->v9_ccr & V9_xcc_mask);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = (uint32_t)d;
ENDI
IMPL(smul_rrr)
uint64_t foo;
foo = (int64_t)(int32_t)Rsrc1 * (int64_t)(int32_t)Rsrc2;
sp->v9_y = foo >> 32;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(umul_rrr)
uint64_t foo;
foo = (uint64_t)(uint32_t)Rsrc1 * (uint64_t)(uint32_t)Rsrc2;
sp->v9_y = foo >> 32;
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(smul_cc_rrr)
uint64_t foo;
foo = (int64_t)(int32_t)Rsrc1 * (int64_t)(int32_t)Rsrc2;
sp->v9_y = foo >> 32;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
IMPL(umul_cc_rrr)
uint64_t foo;
foo = (uint64_t)(uint32_t)Rsrc1 * (uint64_t)(uint32_t)Rsrc2;
sp->v9_y = foo >> 32;
sp->v9_ccr = ((foo & (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo == 0) ? (1 << 2) : 0) | /* icc.z */
((foo & (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo == 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num))
Rdest = foo;
ENDI
/* ------------------------------------------------------------ */
/*
* trap instructions
*/
IMPL( trap_imm_fast )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
int tn = Simm16;
if (V9_User==v9p->state)
tn &= 0x7f;
else
tn &= 0xff;
if (SS_MAGIC_TRAP_CC(TRAP_cc) && SS_MAGIC_TRAP(sp, tn)) {
NEXT_INSTN(sp);
return;
}
v9p->post_precise_trap(sp, tn+Sparcv9_trap_trap_instruction);
ENDDEF
IMPL( trapcc_imm )
sparcv9_trapcc( sp, Rsrc1 + Simm16, TRAP_cc, TRAP_cond );
ENDDEF
IMPL( trapcc_rr )
sparcv9_trapcc( sp, Rsrc1 + Rsrc2, TRAP_cc, TRAP_cond );
ENDDEF
/* ------------------------------------------------------------ */
IMPL( movcc_imm )
int ccr;
ccr = sp->v9_ccr;
if (MOVCC_cc) ccr>>=4;
if ( (sparcv9_cc_magic[MOVCC_cond] >> (ccr & 0xf)) &1 ) {
Rdest = Simm16;
}
NEXT_INSTN(sp);
ENDDEF
IMPL( movcc_rr )
int ccr;
ccr = sp->v9_ccr;
if (MOVCC_cc) ccr>>=4;
if ( (sparcv9_cc_magic[MOVCC_cond] >> (ccr & 0xf)) &1 ) {
Rdest = Rsrc2;
}
NEXT_INSTN(sp);
ENDDEF
IMPL( movfcc_imm )
int ccr;
FP_EXEC_FPU_ON_CHECK;
if (MOVCC_cc == 0)
ccr = V9_FSR_FCC0(sp->v9_fsr_ctrl);
else
ccr = V9_FSR_FCCN(sp->v9_fsr_ctrl, MOVCC_cc);
if ( (sparcv9_fcc_magic[MOVCC_cond] >> ccr) & 1 ) {
Rdest = Simm16;
}
NEXT_INSTN(sp);
ENDDEF
IMPL( movfcc_rr )
int ccr;
FP_EXEC_FPU_ON_CHECK;
if (MOVCC_cc == 0)
ccr = V9_FSR_FCC0(sp->v9_fsr_ctrl);
else
ccr = V9_FSR_FCCN(sp->v9_fsr_ctrl, MOVCC_cc);
if ( (sparcv9_fcc_magic[MOVCC_cond] >> ccr) & 1 ) {
Rdest = Rsrc2;
}
NEXT_INSTN(sp);
ENDDEF
IMPL( movr_imm_z )
if (Rsrc1 == 0)
Rdest = Simm16;
ENDI
IMPL( movr_imm_lez )
if (SRsrc1 <= 0)
Rdest = Simm16;
ENDI
IMPL( movr_imm_lz )
if (SRsrc1 < 0)
Rdest = Simm16;
ENDI
IMPL( movr_imm_nz )
if (SRsrc1 != 0)
Rdest = Simm16;
ENDI
IMPL( movr_imm_gz )
if (SRsrc1 > 0)
Rdest = Simm16;
ENDI
IMPL( movr_imm_gez )
if (SRsrc1 >= 0)
Rdest = Simm16;
ENDI
IMPL( movr_rr_z )
if (Rsrc1 == 0)
Rdest = Rsrc2;
ENDI
IMPL( movr_rr_lez )
if (SRsrc1 <= 0)
Rdest = Rsrc2;
ENDI
IMPL( movr_rr_lz )
if (SRsrc1 < 0)
Rdest = Rsrc2;
ENDI
IMPL( movr_rr_nz )
if (SRsrc1 != 0)
Rdest = Rsrc2;
ENDI
IMPL( movr_rr_gz )
if (SRsrc1 > 0)
Rdest = Rsrc2;
ENDI
IMPL( movr_rr_gez )
if (SRsrc1 >= 0)
Rdest = Rsrc2;
ENDI
IMPL( popc_imm )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
int64_t cnt;
int64_t val;
if (Rsrc1_num != 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
if (!Zero_Reg(Rdest_num)) {
val = (int64_t)(int32_t)Simm16;
for (cnt = 0; val != 0; val &= val-1)
cnt++;
Rdest = cnt;
}
ENDI
IMPL( popc_rrr )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
int64_t cnt;
uint64_t val;
if (Rsrc1_num != 0) {
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
if (!Zero_Reg(Rdest_num)) {
val = Rsrc2;
for (cnt = 0; val != 0; val &= val-1)
cnt++;
Rdest = cnt;
}
ENDI
/* ------------------------------------------------------------ */
/*
* Specialist instructions ...
* ... typically implementation dependent
* ... so use the provided callbacks to the actual device
*/
IMPL( read_state_reg )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->read_state_reg( sp, Rdest_num, Rsrc1_num );
ENDDEF
IMPL( write_state_reg_imm )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->write_state_reg( sp, Rdest_num, Rsrc1 ^ Simm16 );
ENDDEF
IMPL( write_state_reg_rrr )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->write_state_reg( sp, Rdest_num, Rsrc1 ^ Rsrc2 );
ENDDEF
IMPL( read_priv_reg )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->read_priv_reg( sp, Rdest_num, Rsrc1_num );
ENDDEF
IMPL( write_priv_reg_imm )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->write_priv_reg( sp, Rdest_num, Rsrc1 ^ Simm16 );
ENDDEF
IMPL( write_priv_reg_rrr )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->write_priv_reg( sp, Rdest_num, Rsrc1 ^ Rsrc2 );
ENDDEF
IMPL( read_hyper_priv_reg )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->read_hyp_priv_reg( sp, Rdest_num, Rsrc1_num );
ENDDEF
IMPL( write_hyper_priv_reg_imm )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->write_hyp_priv_reg( sp, Rdest_num, Rsrc1 ^ Simm16 );
ENDDEF
IMPL( write_hyper_priv_reg_rrr )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->write_hyp_priv_reg( sp, Rdest_num, Rsrc1 ^ Rsrc2 );
ENDDEF
IMPL( stbar )
/* Nothing to do in legion?? */
NEXT_INSTN(sp);
ENDDEF
IMPL( membar )
/* Nothing to do in legion?? */
NEXT_INSTN(sp);
ENDDEF
IMPL( save_imm )
sparcv9_save_instr(sp, Rdest_num, Rsrc1 + Simm16);
ENDDEF
IMPL( save_rrr )
sparcv9_save_instr(sp, Rdest_num, Rsrc1 + Rsrc2);
ENDDEF
IMPL( restore_imm )
sparcv9_restore_instr(sp, Rdest_num, Rsrc1 + Simm16);
ENDDEF
IMPL( restore_rrr )
sparcv9_restore_instr(sp, Rdest_num, Rsrc1 + Rsrc2);
ENDDEF
IMPL( saved )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (V9_User == v9p->state) {
v9p->post_precise_trap(sp, Sparcv9_trap_privileged_opcode);
return;
}
v9p->cansave = INC_MOD(v9p->cansave, v9p->nwins);
if (v9p->otherwin == 0)
v9p->canrestore = DEC_MOD(v9p->canrestore, v9p->nwins);
else
v9p->otherwin = DEC_MOD(v9p->otherwin, v9p->nwins);
NEXT_INSTN(sp);
ENDDEF
IMPL( restored )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (V9_User == v9p->state) {
v9p->post_precise_trap(sp, Sparcv9_trap_privileged_opcode);
return;
}
v9p->canrestore = INC_MOD(v9p->canrestore, v9p->nwins);
if (v9p->otherwin == 0)
v9p->cansave = DEC_MOD(v9p->cansave, v9p->nwins);
else
v9p->otherwin = DEC_MOD(v9p->otherwin, v9p->nwins);
if (v9p->cleanwin < (v9p->nwins-1))
v9p->cleanwin = v9p->cleanwin + 1;
NEXT_INSTN(sp);
ENDDEF
IMPL( allclean )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (V9_User == v9p->state) {
v9p->post_precise_trap(sp, Sparcv9_trap_privileged_opcode);
return;
}
v9p->cleanwin = v9p->nwins - 1;
NEXT_INSTN(sp);
ENDDEF
IMPL( otherw )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (V9_User == v9p->state) {
v9p->post_precise_trap(sp, Sparcv9_trap_privileged_opcode);
return;
}
if (v9p->otherwin != 0) {
EXEC_WARNING(("(@pc=0x%llx) "
"otherw executed with otherwin != 0",
sp->pc));
}
v9p->otherwin = v9p->canrestore;
v9p->canrestore = 0;
NEXT_INSTN(sp);
ENDDEF
IMPL( normalw )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (V9_User == v9p->state) {
v9p->post_precise_trap(sp, Sparcv9_trap_privileged_opcode);
return;
}
if (v9p->canrestore != 0) {
EXEC_WARNING(("(@pc=0x%llx) "
"normalw executed with canrestore != 0",
sp->pc));
}
v9p->canrestore = v9p->otherwin ;
v9p->otherwin = 0;
NEXT_INSTN(sp);
ENDDEF
IMPL( invalw )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (V9_User == v9p->state) {
v9p->post_precise_trap(sp, Sparcv9_trap_privileged_opcode);
return;
}
v9p->cansave = v9p->nwins - 2;
v9p->canrestore = 0;
v9p->otherwin = 0;
NEXT_INSTN(sp);
ENDDEF
IMPL( flushw )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
sparcv9_trap_type_t tt;
if (v9p->cansave == (v9p->nwins-2)) {
NEXT_INSTN(sp);
return;
}
if (v9p->otherwin != 0) {
tt = Sparcv9_trap_spill_0_other | (v9p->wstate_other<<2);
} else {
tt = Sparcv9_trap_spill_0_normal | (v9p->wstate_normal<<2);
}
v9p->post_precise_trap(sp, tt);
ENDDEF
/*
* Return
*/
IMPL( return_imm )
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!v9p->pstate.addr_mask && v9p->check_vahole(sp, Rsrc1 + Simm16)) return;
sparcv9_return_instr(sp, Rsrc1 + Simm16);
ENDDEF
IMPL( return_rrr )
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!v9p->pstate.addr_mask && v9p->check_vahole(sp, Rsrc1 + Rsrc2)) return;
sparcv9_return_instr(sp, Rsrc1 + Rsrc2);
ENDDEF
/* ------------------------------------------------------------ */
/*
* Done / Retry
*/
IMPL( done_retry )
sparcv9_cpu_t * v9p;
v9p = (sparcv9_cpu_t *)(sp->specificp);
v9p->done_retry(sp, (bool_t)Misc32); /* true if done instruction */
ENDDEF
/* ------------------------------------------------------------ */
/*
* Instruction cache flushing
*
* Since Legion does not have a pipeline to clear, there
* is nothing to do here for processors that do not
* translate the VA.
*
* Processors that do more are those that do not maintain
* instruction cache coherency in hardware. They will have
* processor specific versions of these implementations.
*/
IMPL(iflush_rr)
tvaddr_t va;
va = (Rsrc1 + Rsrc2) & ~(tvaddr_t)7;
NEXT_INSTN(sp);
ENDDEF
IMPL(iflush_imm)
tvaddr_t va;
va = (Rsrc1 + Simm16) & ~(tvaddr_t)7;
NEXT_INSTN(sp);
ENDDEF
/* ------------------------------------------------------------ */
/*
* ASI loads and stores
*/
/* immediate forms */
IMPL(asi_reg_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->asi_access( sp, ASI_op, ASI_Rdest, sp->v9_asi, Rsrc1, Simm16, USE_ASI_REG );
ENDDEF
IMPL(asi_reg)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->asi_access( sp, ASI_op, ASI_Rdest, sp->v9_asi, Rsrc1, Rsrc2, USE_ASI_REG );
ENDDEF
IMPL(asi_num)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->asi_access( sp, ASI_op, ASI_Rdest, ASI_num, Rsrc1, Rsrc2, NO_FLAG );
ENDDEF
IMPL(memop_rrr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->asi_access( sp, ASI_op, ASI_Rdest, V9_ASI_IMPLICIT, Rsrc1, Rsrc2, NO_FLAG );
ENDDEF
IMPL(memop_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->asi_access( sp, ASI_op, ASI_Rdest, V9_ASI_IMPLICIT, Rsrc1, Simm16, NO_FLAG );
ENDDEF
IMPL(fp64asi_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, ASI_op, ASI_Rdest, sp->v9_asi, Rsrc1, Simm16, NO_FLAG );
ENDDEF
IMPL(fp64asi_rrr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, ASI_op, ASI_Rdest, ASI_num, Rsrc1, Rsrc2, NO_FLAG );
ENDDEF
IMPL(ldfsr_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_LdFSR|MA_Size32, NULL, V9_ASI_IMPLICIT, Rsrc1, Simm16, NO_FLAG );
ENDDEF
IMPL(ldxfsr_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_LdXFSR|MA_Size64, NULL, V9_ASI_IMPLICIT, Rsrc1, Simm16, NO_FLAG );
ENDDEF
IMPL(stfsr_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_StFSR|MA_Size32, NULL, V9_ASI_IMPLICIT, Rsrc1, Simm16, NO_FLAG );
ENDDEF
IMPL(stxfsr_imm)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_StXFSR|MA_Size64, NULL, V9_ASI_IMPLICIT, Rsrc1, Simm16, NO_FLAG );
ENDDEF
IMPL(ldfsr_rr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_LdFSR|MA_Size32, NULL, V9_ASI_IMPLICIT, Rsrc1, Rsrc2, NO_FLAG );
ENDDEF
IMPL(ldxfsr_rr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_LdXFSR|MA_Size64, NULL, V9_ASI_IMPLICIT, Rsrc1, Rsrc2, NO_FLAG );
ENDDEF
IMPL(stfsr_rr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_StFSR|MA_Size32, NULL, V9_ASI_IMPLICIT, Rsrc1, Rsrc2, NO_FLAG );
ENDDEF
IMPL(stxfsr_rr)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#ifndef FP_DECODE_DISABLED
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
#endif /* FP_DECODE_DISABLED */
v9p->asi_access( sp, MA_V9_StXFSR|MA_Size64, NULL, V9_ASI_IMPLICIT, Rsrc1, Rsrc2, NO_FLAG );
ENDDEF
/* ------------------------------------------------------------ */
/* Floating point branches. */
/*
* Instruction: sparcv9_fbule_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbule_fcc0) /* { */
IMPL(fbule_fcc0)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCC0(sp->v9_fsr_ctrl) != V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbg_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbg_fcc0) /* { */
IMPL(fbg_fcc0)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fblg_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fblg_fcc0) /* { */
IMPL(fblg_fcc0)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_l || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fble_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fble_fcc0) /* { */
IMPL(fble_fcc0)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_e || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbge_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbge_fcc0) /* { */
IMPL(fbge_fcc0)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_g || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbne_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbne_fcc0) /* { */
IMPL(fbne_fcc0)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc != V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbug_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbug_fcc0) /* { */
IMPL(fbug_fcc0)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbul_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbul_fcc0) /* { */
IMPL(fbul_fcc0)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbue_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbue_fcc0) /* { */
IMPL(fbue_fcc0)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbe_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbe_fcc0) /* { */
IMPL(fbe_fcc0)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbo_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbo_fcc0) /* { */
IMPL(fbo_fcc0)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) != V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbu_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbu_fcc0) /* { */
IMPL(fbu_fcc0)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbuge_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbuge_fcc0) /* { */
IMPL(fbuge_fcc0)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) != V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbvs_fcc0
*/
#if !defined(HAS_NATIVE_sparcv9_fbvs_fcc0) /* { */
IMPL(fbl_fcc0)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbule_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbule_fccN) /* { */
IMPL(fbule_fccN)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) != V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbg_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbg_fccN) /* { */
IMPL(fbg_fccN)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fblg_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fblg_fccN) /* { */
IMPL(fblg_fccN)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_l || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fble_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fble_fccN) /* { */
IMPL(fble_fccN)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_e || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbge_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbge_fccN) /* { */
IMPL(fbge_fccN)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_g || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbne_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbne_fccN) /* { */
IMPL(fbne_fccN)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc != V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbug_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbug_fccN) /* { */
IMPL(fbug_fccN)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbul_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbul_fccN) /* { */
IMPL(fbul_fccN)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbue_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbue_fccN) /* { */
IMPL(fbue_fccN)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbe_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbe_fccN) /* { */
IMPL(fbe_fccN)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbo_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbo_fccN) /* { */
IMPL(fbo_fccN)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) != V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbu_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbu_fccN) /* { */
IMPL(fbu_fccN)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbuge_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbuge_fccN) /* { */
IMPL(fbuge_fccN)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) != V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbl_fccN
*/
#if !defined(HAS_NATIVE_sparcv9_fbl_fccN) /* { */
IMPL(fbl_fccN)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
ENDI
#endif /* } */
/*
* Instruction: sparcv9_fbule_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbule_fcc0_an) /* { */
IMPL(fbule_fcc0_an)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCC0(sp->v9_fsr_ctrl) != V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbg_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbg_fcc0_an) /* { */
IMPL(fbg_fcc0_an)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fblg_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fblg_fcc0_an) /* { */
IMPL(fblg_fcc0_an)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_l || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fble_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fble_fcc0_an) /* { */
IMPL(fble_fcc0_an)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_e || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbge_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbge_fcc0_an) /* { */
IMPL(fbge_fcc0_an)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_g || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbne_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbne_fcc0_an) /* { */
IMPL(fbne_fcc0_an)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc != V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbug_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbug_fcc0_an) /* { */
IMPL(fbug_fcc0_an)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbul_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbul_fcc0_an) /* { */
IMPL(fbul_fcc0_an)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbue_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbue_fcc0_an) /* { */
IMPL(fbue_fcc0_an)
int cc = V9_FSR_FCC0(sp->v9_fsr_ctrl);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbe_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbe_fcc0_an) /* { */
IMPL(fbe_fcc0_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbo_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbo_fcc0_an) /* { */
IMPL(fbo_fcc0_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) != V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbu_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbu_fcc0_an) /* { */
IMPL(fbu_fcc0_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbuge_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbuge_fcc0_an) /* { */
IMPL(fbuge_fcc0_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) != V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbvs_fcc0_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbvs_fcc0_an) /* { */
IMPL(fbl_fcc0_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCC0(sp->v9_fsr_ctrl) == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbule_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbule_fccN_an) /* { */
IMPL(fbule_fccN_an)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) != V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbg_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbg_fccN_an) /* { */
IMPL(fbg_fccN_an)
FP_EXEC_FPU_ON_CHECK;
if (V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_g) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fblg_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fblg_fccN_an) /* { */
IMPL(fblg_fccN_an)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_l || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fble_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fble_fccN_an) /* { */
IMPL(fble_fccN_an)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_e || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbge_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbge_fccN_an) /* { */
IMPL(fbge_fccN_an)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_g || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbne_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbne_fccN_an) /* { */
IMPL(fbne_fccN_an)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc != V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbug_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbug_fccN_an) /* { */
IMPL(fbug_fccN_an)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_g ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbul_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbul_fccN_an) /* { */
IMPL(fbul_fccN_an)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbue_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbue_fccN_an) /* { */
IMPL(fbue_fccN_an)
int cc = V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc);
FP_EXEC_FPU_ON_CHECK;
if ( cc == V9_fcc_u || cc == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbe_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbe_fccN_an) /* { */
IMPL(fbe_fccN_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_e ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbo_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbo_fccN_an) /* { */
IMPL(fbo_fccN_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) != V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbu_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbu_fccN_an) /* { */
IMPL(fbu_fccN_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_u ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbuge_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbuge_fccN_an) /* { */
IMPL(fbuge_fccN_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) != V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
/*
* Instruction: sparcv9_fbl_fccN_an
*/
#if !defined(HAS_NATIVE_sparcv9_fbl_fccN_an) /* { */
IMPL(fbl_fccN_an)
FP_EXEC_FPU_ON_CHECK;
if ( V9_FSR_FCCN(sp->v9_fsr_ctrl, SBRfcc) == V9_fcc_l ) {
tvaddr_t tpc = Rpc + SBRoffset32;
Rpc = Rnpc;
Rnpc = tpc;
return;
}
Rpc = Rnpc + 4;
Rnpc = Rnpc + 8;
ENDDEF
#endif /* } */
IMPL(illtrap)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
ENDDEF
/* ------------------------------------------------------------ */
/* Miscellaneous stuff ... not real instructions, but executed that way */
IMPL(illegal_instruction)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#define IBUF 160
char ibuf[IBUF];
/*
* Hack to limit spewing of warnings about invalid instructions. Each
* opcode (as determined by bits 24-19) has a limit of 40 (actually
* INV_INST_LIMIT) error reports. Feel free to replace this code with
* something better.
*/
#define INV_INST_LIMIT 40
#define INV_INST_SIZE 64
#define OP_EXTR(inst) (((inst) >> 19) & 0x3f)
/* statics init to zero */
static int invalid_instruction_count[INV_INST_SIZE];
DBGILLINST(
sparcv9_idis(ibuf, IBUF, FE_INSTN(xcip->rawi), sp->pc);
if (invalid_instruction_count[OP_EXTR(FE_INSTN(xcip->rawi))]++ < INV_INST_LIMIT) {
lprintf(sp->gid, "illegal instruction pc=0x%llx "
"instn=%08x: %s\n", sp->pc, FE_INSTN(xcip->rawi), ibuf);
}
);
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
#undef IBUF
ENDDEF
IMPL(fp_unimplemented_instruction)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#define IBUF 160
char ibuf[IBUF];
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
DBGILLINST(
sparcv9_idis(ibuf, IBUF, FE_INSTN(xcip->rawi), sp->pc);
lprintf(sp->gid, "unimplemented fp op pc=0x%llx instn=%08x: %s\n", sp->pc, FE_INSTN(xcip->rawi), ibuf);
);
sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK;
sp->v9_fsr_ctrl |= SPARCv9_FTT_unimplemented_FPop << V9_FSR_FTT_SHIFT;
v9p->post_precise_trap(sp, Sparcv9_trap_fp_exception_other);
#undef IBUF
ENDDEF
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
IMPL(fp_invalidreg_instruction)
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
#define IBUF 160
char ibuf[IBUF];
if (!v9p->fpu_on) {
v9p->post_precise_trap(sp, Sparcv9_trap_fp_disabled);
return;
}
DBGILLINST(
sparcv9_idis(ibuf, IBUF, FE_INSTN(xcip->rawi), sp->pc);
lprintf(sp->gid, "invalid fp register pc=0x%llx instn=%08x: %s\n", sp->pc, FE_INSTN(xcip->rawi), ibuf);
);
sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK;
sp->v9_fsr_ctrl |= SPARCv9_FTT_invalid_fp_register << V9_FSR_FTT_SHIFT;
v9p->post_precise_trap(sp, Sparcv9_trap_fp_exception_other);
#undef IBUF
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc0) /* { */
IMPL(fcmps_fcc0)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmps_fcc0")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc1) /* { */
IMPL(fcmps_fcc1)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmps_fcc1")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc2) /* { */
IMPL(fcmps_fcc2)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmps_fcc2")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc3) /* { */
IMPL(fcmps_fcc3)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmps_fcc3")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc0) /* { */
IMPL(fcmpd_fcc0)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpd_fcc0")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc1) /* { */
IMPL(fcmpd_fcc1)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpd_fcc1")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc2) /* { */
IMPL(fcmpd_fcc2)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpd_fcc2")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc3) /* { */
IMPL(fcmpd_fcc3)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpd_fcc3")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc0) /* { */
IMPL(fcmpes_fcc0)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpes_fcc0")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc1) /* { */
IMPL(fcmpes_fcc1)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpes_fcc1")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc2) /* { */
IMPL(fcmpes_fcc2)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpes_fcc2")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc3) /* { */
IMPL(fcmpes_fcc3)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmpes_fcc3")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc0) /* { */
IMPL(fcmped_fcc0)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmped_fcc0")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc1) /* { */
IMPL(fcmped_fcc1)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmped_fcc1")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc2) /* { */
IMPL(fcmped_fcc2)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmped_fcc2")
ENDDEF
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc3) /* { */
IMPL(fcmped_fcc3)
FP_EXEC_FPU_ON_CHECK
FPU_NOT_IMPLEMENTED("fcmped_fcc3")
ENDDEF
#endif /* } */
/*
* Tagged add and subtract.
*/
#if !defined(HAS_NATIVE_sparcv9_tadd_co_imm) /* { */
IMPL(tadd_co_imm)
int64_t s1 = Rsrc1;
int64_t s2 = Simm16;
int64_t d;
uint64_t v, c;
uint32_t icc_v;
d = s1 + s2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_tadd_co_rrr) /* { */
IMPL(tadd_co_rrr)
uint64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
uint32_t icc_v;
d = Rsrc1 + Rsrc2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_tadd_co_tv_imm) /* { */
IMPL(tadd_co_tv_imm)
int64_t s1 = Rsrc1;
int64_t s2 = Simm16;
int64_t d;
uint64_t v, c;
uint32_t icc_v;
d = s1 + s2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
if (icc_v) {
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->post_precise_trap(sp, Sparcv9_trap_tag_overflow);
return;
}
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_tadd_co_tv_rrr) /* { */
IMPL(tadd_co_tv_rrr)
uint64_t s1 = Rsrc1, s2 = Rsrc2, d;
uint64_t v, c;
uint32_t icc_v;
d = Rsrc1 + Rsrc2;
v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
c = (s1 & s2) | (~d & (s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
if (icc_v) {
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->post_precise_trap(sp, Sparcv9_trap_tag_overflow);
return;
}
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_tsub_co_imm) /* { */
IMPL(tsub_co_imm)
int64_t s1 = Rsrc1;
int64_t s2 = Simm16;
int64_t d;
uint64_t v, c;
uint32_t icc_v;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_tsub_co_rrr) /* { */
IMPL(tsub_co_rrr)
int64_t s1 = Rsrc1;
int64_t s2 = Rsrc2;
int64_t d;
uint64_t v, c;
uint32_t icc_v;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_tsub_co_tv_imm) /* { */
IMPL(tsub_co_tv_imm)
int64_t s1 = Rsrc1;
int64_t s2 = Simm16;
int64_t d;
uint64_t v, c;
uint32_t icc_v;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
if (icc_v) {
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->post_precise_trap(sp, Sparcv9_trap_tag_overflow);
return;
}
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
#if !defined(HAS_NATIVE_sparcv9_tsub_co_tv_rrr) /* { */
IMPL(tsub_co_tv_rrr)
int64_t s1 = Rsrc1;
int64_t s2 = Rsrc2;
int64_t d;
uint64_t v, c;
uint32_t icc_v;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
icc_v = ((v >> 31) & 1) | (((s1 | s2) >> 1) & 1) | ((s1 | s2) & 1);
if (icc_v) {
sparcv9_cpu_t * v9p = (sparcv9_cpu_t*)(sp->specificp);
v9p->post_precise_trap(sp, Sparcv9_trap_tag_overflow);
return;
}
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v(icc_v);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
#endif /* } */
IMPL(siam)
uint64_t mode;
FP_EXEC_FPU_ON_CHECK;
mode = Simm16;
sp->v9_gsr = (sp->v9_gsr & ~(V9_GSR_IM_MASK|V9_GSR_IRND_MASK)) |
((mode & 7) << V9_GSR_IRND_SHIFT);
ENDI
IMPL( sir )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (V9_User == v9p->state || V9_Priv == v9p->state) {
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
v9p->post_precise_trap(sp, Sparcv9_trap_software_initiated_reset);
ENDDEF
IMPL( alignaddr )
uint64_t d;
FP_EXEC_FPU_ON_CHECK;
d = Rsrc1 + Rsrc2;
/* gsr.align = lower 3 bits */
sp->v9_gsr &= ~MASK64(2,0);
sp->v9_gsr |= (d & MASK64(2,0));
d &= ~MASK64(2,0); /* zero lower 3 bits */
if (!Zero_Reg(Rdest_num)) {
Rdest = d;
}
ENDI
IMPL( alignaddrl )
uint64_t d;
FP_EXEC_FPU_ON_CHECK;
d = Rsrc1 + Rsrc2;
/* gsr.align = two's complement of lower 3 bits */
sp->v9_gsr &= ~MASK64(2,0);
sp->v9_gsr |= (0x8 - (d & MASK64(2,0)));
d &= ~MASK64(2,0); /* zero lower 3 bits */
if (!Zero_Reg(Rdest_num)) {
Rdest = d;
}
ENDI
IMPL( bmask )
uint64_t d;
FP_EXEC_FPU_ON_CHECK;
d = Rsrc1 + Rsrc2;
if (!Zero_Reg(Rdest_num)) {
Rdest = d;
}
/* gsr.mask = lower 32 bits */
sp->v9_gsr &= ~MASK64(63,32);
sp->v9_gsr |= (d << 32);
ENDI
IMPL( bshuffle )
uint64_t d = 0x0ull;
uint32_t mask;
uint_t idx, byte;
FP_EXEC_FPU_ON_CHECK;
mask = (uint32_t) (sp->v9_gsr >> 32);
for (idx = 0; idx < 8; idx++) {
byte = (mask >> (28 - (idx*4)) & MASK64(3,0));
if (byte < 8) {
if (idx <= byte)
d |= ((F64src1 & MASK64(63-(8*byte),56-(8*byte)))<<((byte-idx)*8));
else
d |= ((F64src1 & MASK64(63-(8*byte),56-(8*byte)))>>((idx-byte)*8));
} else {
byte -= 8;
if (idx <= byte)
d |= ((F64src2 & MASK64(63-(8*byte),56-(8*byte)))<<((byte-idx)*8));
else
d |= ((F64src2 & MASK64(63-(8*byte),56-(8*byte)))>>((idx-byte)*8));
}
}
F64dest = d;
ENDI
IMPL( faligndata )
uint64_t d;
FP_EXEC_FPU_ON_CHECK;
/* align data based on GSR.align field */
d = (F64src1 << ((sp->v9_gsr & MASK64(2,0)) * 8));
if ((sp->v9_gsr & MASK64(2,0)) != 0) { /* prevent Rsrc2 >> 64 */
d |= (F64src2 >> ((8 - (sp->v9_gsr & MASK64(2,0))) * 8));
}
F64dest = d;
ENDI
IMPL( fpack32 )
uint64_t s1, s2;
uint_t gsr_scale;
uint64_t d;
int64_t i;
FP_EXEC_FPU_ON_CHECK;
s1 = F64src1;
s2 = F64src2;
gsr_scale = (sp->v9_gsr >> 3) & 0x1f;
d = 0;
i = (s2 >> 32) & 0xffffffff;
SIGN_EXT(i, 32);
i <<= gsr_scale;
i >>= 23;
if (i > 0xff)
i = 0xff;
else
if (i < 0)
i = 0;
d |= (i & 0xff) << 32;
i = (s2 >> 0) & 0xffffffff;
SIGN_EXT(i, 32);
i <<= gsr_scale;
i >>= 23;
if (i > 0xff)
i = 0xff;
else
if (i < 0)
i = 0;
d |= (i & 0xff) << 0;
s1 <<= 8;
d |= s1 & 0xffffff00ffffff00ull;
F64dest = d;
ENDI
IMPL( fpack16 )
uint64_t s2;
uint_t gsr_scale;
int64_t i;
uint32_t d;
FP_EXEC_FPU_ON_CHECK;
s2 = F64src2;
/* fpack16 ignores gsr.scale[4] */
gsr_scale = (sp->v9_gsr >> 3) & 0x0f;
d = 0;
i = (s2 >> 48) & 0xffff;
SIGN_EXT(i, 16);
i <<= gsr_scale;
i >>= 7;
if (i > 0xff)
i = 0xff;
else
if (i < 0)
i = 0;
d |= (i & 0xff) << 24;
i = (s2 >> 32) & 0xffff;
SIGN_EXT(i, 16);
i <<= gsr_scale;
i >>= 7;
if (i > 0xff)
i = 0xff;
else
if (i < 0)
i = 0;
d |= (i & 0xff) << 16;
i = (s2 >> 16) & 0xffff;
SIGN_EXT(i, 16);
i <<= gsr_scale;
i >>= 7;
if (i > 0xff)
i = 0xff;
else
if (i < 0)
i = 0;
d |= (i & 0xff) << 8;
i = (s2 >> 0) & 0xffff;
SIGN_EXT(i, 16);
i <<= gsr_scale;
i >>= 7;
if (i > 0xff)
i = 0xff;
else
if (i < 0)
i = 0;
d |= (i & 0xff) << 0;
F32dest = d;
ENDI
IMPL( fpackfix )
uint64_t s2;
uint_t gsr_scale;
int64_t i;
uint32_t d;
FP_EXEC_FPU_ON_CHECK;
s2 = F64src2;
gsr_scale = (sp->v9_gsr >> 3) & 0x1f;
d = 0;
i = (s2 >> 32) & 0xffffffffull;
SIGN_EXT(i, 32);
i <<= gsr_scale;
i >>= 16;
if (i > 0x7fff)
i = 0x7fff;
else
if (i < -0x8000)
i = -0x8000;
d |= (i & 0xffff) << 16;
i = (s2 >> 0) & 0xffffffffull;
SIGN_EXT(i, 32);
i <<= gsr_scale;
i >>= 16;
if (i > 0x7fff)
i = 0x7fff;
else
if (i < -0x8000)
i = -0x8000;
d |= (i & 0xffff) << 0;
F32dest = d;
ENDI
IMPL( pdist )
uint64_t s1, s2;
uint64_t d;
uint_t idx;
FP_EXEC_FPU_ON_CHECK;
s1 = F64src1;
s2 = F64src2;
d = 0;
for (idx = 0; idx < 8; idx++) {
if ((s1 & 0xff) > (s2 & 0xff))
d += (s1 & 0xff) - (s2 & 0xff);
else
d += (s2 & 0xff) - (s1 & 0xff);
s1 >>= 8;
s2 >>= 8;
}
F64dest += d;
ENDI
IMPL( pdistn )
uint64_t s1, s2;
uint64_t d;
uint_t idx;
FP_EXEC_FPU_ON_CHECK;
s1 = F64src1;
s2 = F64src2;
d = 0;
for (idx = 0; idx < 8; idx++) {
if ((s1 & 0xff) > (s2 & 0xff))
d += (s1 & 0xff) - (s2 & 0xff);
else
d += (s2 & 0xff) - (s1 & 0xff);
s1 >>= 8;
s2 >>= 8;
}
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
IMPL( fpmerge )
uint64_t s1, s2;
FP_EXEC_FPU_ON_CHECK;
s1 = F32src1;
s2 = F32src2;
F64dest = ((s1 & 0xff000000) << 32) |
((s1 & 0xff0000) << 24) |
((s1 & 0xff00) << 16) |
((s1 & 0xff) << 8) |
((s2 & 0xff000000) << 24) |
((s2 & 0xff0000) << 16) |
((s2 & 0xff00) << 8) |
((s2 & 0xff) << 0);
ENDI
IMPL( fexpand )
uint64_t s2;
FP_EXEC_FPU_ON_CHECK;
s2 = F32src2;
F64dest = ((s2 & 0xff000000) << 28) |
((s2 & 0xff0000) << 20) |
((s2 & 0xff00) << 12) |
((s2 & 0xff) << 4);
ENDI
IMPL( array16 )
uint64_t s1, n, d;
s1 = Rsrc1;
n = Rsrc2;
n &= 7;
if (n > 5)
n = 5;
d = 0;
d |= (s1 >> 11) & 3;
d |= ((s1 >> 33) & 3) << 2;
d |= ((s1 >> 55) & 1) << 4;
d |= ((s1 >> 13) & 0xf) << 5;
d |= ((s1 >> 35) & 0xf) << 9;
d |= ((s1 >> 56) & 0xf) << 13;
if (n != 0) {
d |= ((s1 >> 17) & ((1<<n)-1)) << 17;
d |= ((s1 >> 39) & ((1<<n)-1)) << (17+n);
}
d |= ((s1 >> 60) & 0xf) << (17+(2*n));
d <<= 1;
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
IMPL( array32 )
uint64_t s1, n, d;
s1 = Rsrc1;
n = Rsrc2;
n &= 7;
if (n > 5)
n = 5;
d = 0;
d |= (s1 >> 11) & 3;
d |= ((s1 >> 33) & 3) << 2;
d |= ((s1 >> 55) & 1) << 4;
d |= ((s1 >> 13) & 0xf) << 5;
d |= ((s1 >> 35) & 0xf) << 9;
d |= ((s1 >> 56) & 0xf) << 13;
if (n != 0) {
d |= ((s1 >> 17) & ((1<<n)-1)) << 17;
d |= ((s1 >> 39) & ((1<<n)-1)) << (17+n);
}
d |= ((s1 >> 60) & 0xf) << (17+(2*n));
d <<= 2;
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
IMPL( array8 )
uint64_t s1, n, d;
s1 = Rsrc1;
n = Rsrc2;
n &= 7;
if (n > 5)
n = 5;
d = 0;
d |= (s1 >> 11) & 3;
d |= ((s1 >> 33) & 3) << 2;
d |= ((s1 >> 55) & 1) << 4;
d |= ((s1 >> 13) & 0xf) << 5;
d |= ((s1 >> 35) & 0xf) << 9;
d |= ((s1 >> 56) & 0xf) << 13;
if (n != 0) {
d |= ((s1 >> 17) & ((1<<n)-1)) << 17;
d |= ((s1 >> 39) & ((1<<n)-1)) << (17+n);
}
d |= ((s1 >> 60) & 0xf) << (17+(2*n));
if (!Zero_Reg(Rdest_num))
Rdest = d;
ENDI
/*
* sim_edge: simulates all 12 EDGE opcodes:
edge8cc,edge8lcc,edge16cc,edge16lcc,edge32cc,edge32lcc (VIS1)
edge8n, edge8ln, edge16n, edge16ln, edge32n, edge32ln (VIS2)
No code is provided here for setting the condition code for
the cc-setting variants (opcode bit5=0). It must be computed
externally to this function by computing (op1 - op2) and
setting the condition code bits as for the SUBCC instruction.
*/
static uint64_t /* returns edge result */
sim_edge (
uint32_t instr, /* instruction opcode */
uint64_t op1, /* operand1 */
uint64_t op2, /* operand2 */
bool_t am_flag /* pstate.AM */
)
{
uint64_t res_mask; /* result mask: 8/4/2 bits */
uint64_t ledge; /* left edge */
uint64_t redge; /* right edge */
/* convert opcode bits 8:7 (8/16/32) to shift counts */
uint64_t sh_edg = (instr >> 7) & 3; /* edge shift (0/1/2) */
uint64_t sh_res = 8 >> sh_edg; /* result shift (8/4/2) */
uint64_t lsize = (op1 & 7) >> sh_edg; /* left edge size */
uint64_t rsize = (op2 & 7) >> sh_edg; /* right edge size */
/* compare address bits for equality */
uint64_t adr_diff = op1 ^ op2;
/* in 32 bit mode ignore miscompares in bits 63:32 */
if(am_flag) /* test AM bit */
{
adr_diff = (uint32_t)adr_diff;
}
adr_diff >>= 3; /* address-equality flag for later */
/* BIG or LITTLE endian instruction variant? (opcode bit 6) */
if(instr & 0x40) /* 1=little-endian */
{
/* 0xFF = starting left mask (byte0) */
/* 0x1FE = starting right mask (byte1) and extra ones */
res_mask = (0xFFu << sh_res) >> 8;
ledge = (0xFFu << lsize) & res_mask;
if(0==adr_diff)
{
redge = ((0x1FEu << rsize) >> 8) & res_mask;
ledge &= redge;
}
}
else /* 0=big-endian */
{
/* 0xFF = starting left mask (byte0) */
/* 0x7F80 = starting right mask (byte0) and extra ones */
res_mask = (0xFF00u >> sh_res) & 0xFF;
int rjust = 8 - sh_res;
ledge = ((0xFFu >> lsize) & res_mask) >> rjust;
if(0==adr_diff)
{
redge = ((0x7F80u >> rsize) & res_mask) >> rjust;
ledge &= redge;
}
}
return ledge;
/* Don't forget to calculate the CC from op1-op2 */
}
static void
sim_edge_cc(simcpu_t *sp, uint64_t op1, uint64_t op2)
{
int64_t s1 = op1;
int64_t s2 = op1;
int64_t d;
uint64_t v, c;
d = s1 - s2;
v = (s1 & ~s2 & ~d) | (~s1 & s2 & d);
c = (~s1 & s2) | (d & (~s1 | s2));
sp->v9_ccr = V9_xcc_v((v >> 63) & 1);
sp->v9_ccr |= V9_icc_v((v >> 31) & 1);
sp->v9_ccr |= V9_xcc_c((c >> 63) & 1);
sp->v9_ccr |= V9_icc_c((c >> 31) & 1);
sp->v9_ccr |= V9_xcc_n((d >> 63) & 1);
sp->v9_ccr |= V9_icc_n((d >> 31) & 1);
sp->v9_ccr |= V9_xcc_z(d ? 0 : 1);
sp->v9_ccr |= V9_icc_z((d & MASK64(31,0)) ? 0 : 1);
}
IMPL( edge16 )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
sim_edge_cc(sp, Rsrc1, Rsrc2);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge16l )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
sim_edge_cc(sp, Rsrc1, Rsrc2);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge16ln )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge16n )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge32 )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
sim_edge_cc(sp, Rsrc1, Rsrc2);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge32l )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
sim_edge_cc(sp, Rsrc1, Rsrc2);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge32ln )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge32n )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge8 )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
sim_edge_cc(sp, Rsrc1, Rsrc2);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge8l )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
sim_edge_cc(sp, Rsrc1, Rsrc2);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge8ln )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
IMPL( edge8n )
sparcv9_cpu_t * v9p = (sparcv9_cpu_t *)(sp->specificp);
if (!Zero_Reg(Rdest_num))
Rdest = sim_edge(xcip->rawi, Rsrc1, Rsrc2,
v9p->pstate.addr_mask);
ENDI
#define FCMPx16(_cond_) \
int32_t s1, s2; \
uint64_t fs1, fs2, res; \
uint_t i; \
fs1 = F64src1; \
fs2 = F64src2; \
res = 0; \
for (i = 0; i < 4; i++) { \
s1 = fs1 & 0xffff; \
s2 = fs2 & 0xffff; \
SIGN_EXT(s1, 16); \
SIGN_EXT(s2, 16); \
if (s1 _cond_ s2) \
res |= 1 << i; \
fs1 >>= 16; \
fs2 >>= 16; \
} \
if (Zero_Reg(Rdest_num)) \
Rdest = res;
IMPL( fcmpeq16 )
FP_EXEC_FPU_ON_CHECK;
FCMPx16(==)
ENDI
IMPL( fcmpgt16 )
FP_EXEC_FPU_ON_CHECK;
FCMPx16(>)
ENDI
IMPL( fcmple16 )
FP_EXEC_FPU_ON_CHECK;
FCMPx16(<=)
ENDI
IMPL( fcmpne16 )
FP_EXEC_FPU_ON_CHECK;
FCMPx16(!=)
ENDI
#define FCMPx32(_cond_) \
int64_t s1, s2; \
uint64_t fs1, fs2, res; \
uint_t i; \
fs1 = F64src1; \
fs2 = F64src2; \
res = 0; \
for (i = 0; i < 2; i++) { \
s1 = fs1 & 0xffffffff; \
s2 = fs2 & 0xffffffff; \
SIGN_EXT(s1, 32); \
SIGN_EXT(s2, 32); \
if (s1 _cond_ s2) \
res |= 1 << i; \
fs1 >>= 32; \
fs2 >>= 32; \
} \
if (Zero_Reg(Rdest_num)) \
Rdest = res;
IMPL( fcmpeq32 )
FP_EXEC_FPU_ON_CHECK;
FCMPx32(==)
ENDI
IMPL( fcmpgt32 )
FP_EXEC_FPU_ON_CHECK;
FCMPx32(>)
ENDI
IMPL( fcmple32 )
FP_EXEC_FPU_ON_CHECK;
FCMPx32(<=)
ENDI
IMPL( fcmpne32 )
FP_EXEC_FPU_ON_CHECK;
FCMPx32(!=)
ENDI
IMPL( fmul8x16 )
FP_EXEC_FPU_ON_CHECK;
uint16_t s1;
int32_t s2;
int32_t d;
uint32_t fs1;
uint64_t fs2, res;
uint_t i;
fs1 = F32src1;
fs2 = F64src2;
res = 0;
for (i = 0; i < 4; i++) {
s1 = (uint8_t) fs1;
s2 = (int16_t) fs2;
d = s1 * s2;
d += 0x80;
d >>= 8;
res >>= 16;
res |= (uint64_t)d << 48;
fs1 >>= 8;
fs2 >>= 16;
}
F64dest = res;
ENDI
IMPL( fmul8x16al )
FP_EXEC_FPU_ON_CHECK;
uint16_t s1;
int32_t s2;
int32_t d;
uint32_t fs1, fs2;
uint64_t res;
uint_t i;
fs1 = F32src1;
fs2 = F32src2;
res = 0;
s2 = (int16_t) fs2;
for (i = 0; i < 4; i++) {
s1 = (uint8_t) fs1;
d = s1 * s2;
d += 0x80;
d >>= 8;
res >>= 16;
res |= (uint64_t)d << 48;
fs1 >>= 8;
}
F64dest = res;
ENDI
IMPL( fmul8x16au )
FP_EXEC_FPU_ON_CHECK;
uint16_t s1;
int32_t s2;
int32_t d;
uint32_t fs1, fs2;
uint64_t res;
uint_t i;
fs1 = F32src1;
fs2 = F32src2;
res = 0;
s2 = (int16_t) (fs2 >> 16);
for (i = 0; i < 4; i++) {
s1 = (uint8_t) fs1;
d = s1 * s2;
d += 0x80;
d >>= 8;
res >>= 16;
res |= (uint64_t)d << 48;
fs1 >>= 8;
}
F64dest = res;
ENDI
IMPL( fmul8sux16 )
FP_EXEC_FPU_ON_CHECK;
int16_t s1;
int32_t s2;
int32_t d;
uint64_t fs1, fs2, res;
uint_t i;
fs1 = F64src1;
fs2 = F64src2;
res = 0;
fs1 >>= 8;
for (i = 0; i < 4; i++) {
s1 = (int8_t) fs1;
s2 = (int16_t) fs2;
d = s1 * s2;
d += 0x80;
d >>= 8;
res >>= 16;
res |= (uint64_t)d << 48;
fs1 >>= 16;
fs2 >>= 16;
}
F64dest = res;
ENDI
IMPL( fmul8ulx16 )
FP_EXEC_FPU_ON_CHECK;
uint16_t s1;
int32_t s2;
int32_t d;
uint64_t fs1, fs2, res;
uint_t i;
fs1 = F64src1;
fs2 = F64src2;
res = 0;
for (i = 0; i < 4; i++) {
s1 = (uint8_t) fs1;
s2 = (int16_t) fs2;
d = s1 * s2;
d += 0x8000;
d >>= 16;
res >>= 16;
res |= (uint64_t)d << 48;
fs1 >>= 16;
fs2 >>= 16;
}
F64dest = res;
ENDI
IMPL( fmuld8sux16 )
FP_EXEC_FPU_ON_CHECK;
int16_t s1;
int32_t s2;
int32_t d;
uint32_t fs1, fs2;
uint64_t res;
uint_t i;
fs1 = F32src1;
fs2 = F32src2;
res = 0;
fs1 >>= 8;
for (i = 0; i < 2; i++) {
s1 = (int8_t) fs1;
s2 = (int16_t) fs2;
d = s1 * s2;
d <<= 8;
res >>= 32;
res |= (uint64_t)d << 32;
fs1 >>= 16;
fs2 >>= 16;
}
F64dest = res;
ENDI
IMPL( fmuld8ulx16 )
FP_EXEC_FPU_ON_CHECK;
uint16_t s1;
int32_t s2;
int32_t d;
uint32_t fs1, fs2;
uint64_t res;
uint_t i;
fs1 = F32src1;
fs2 = F32src2;
res = 0;
for (i = 0; i < 2; i++) {
s1 = (uint8_t) fs1;
s2 = (int16_t) fs2;
d = s1 * s2;
SIGN_EXT(d, 24);
res >>= 32;
res |= (uint64_t)d << 32;
fs1 >>= 16;
fs2 >>= 16;
}
F64dest = res;
ENDI
/*************************************************************/
/*
* Floating point status register(s) ...
*
* sp->v9_fsr_ctrl holds all the FP control bits (only)
* tem, cexc and aexe fields are zeroed.
* This value is used for setup before execution .. and holds
* the current control bits and the current condition codes.
* The error and trap enable bits should always be zeroed.
* sp->v9_fsr_exc holds the exception bits (current and accumulated).
* in the same bits positions as they occur in the fsr.
* sp->v9_fsr_tem holds the trap enable bits NOTE: these bits are shifted
* down to start at bit0 in this register - this is to make
* masking with fsr_exc one instruction faster in the
* fpop execution common case.
*/
void v9_set_fsr_lower(simcpu_t * sp, uint64_t val)
{
v9_set_fsr(sp, (val & MASK64(31,0)) | (sp->v9_fsr_ctrl & ~MASK64(31,0)));
}
void v9_set_fsr(simcpu_t * sp, uint64_t val)
{
uint64_t oldval;
DBGFSR( oldval = v9_get_fsr(sp); );
#ifdef NIAGARA1
val &= V9_FSR_REG_MASK;
#else /* NIAGARA1 */
val &= V9_FSR_REG_MASK | V9_FSR_NS_MASK;
#endif /* NIAGARA1 */
val |= sp->v9_fsr_ctrl & V9_FSR_FTT_MASK;
sp->v9_fsr_ctrl = val & ~(V9_FSR_TEM_MASK | V9_FSR_AEXC_MASK | V9_FSR_CEXC_MASK);
sp->v9_fsr_tem = (val & V9_FSR_TEM_MASK)>>V9_FSR_TEM_BIT;
sp->v9_fsr_exc = val & (V9_FSR_AEXC_MASK | V9_FSR_CEXC_MASK);
DBGFSR( lprintf(sp->gid, "v9_set_fsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), oldval); );
}
uint64_t v9_get_fsr(simcpu_t * sp)
{
return sp->v9_fsr_ctrl | (sp->v9_fsr_tem<<V9_FSR_TEM_BIT) | sp->v9_fsr_exc;
}
void v9_set_fsr_fp_op (simcpu_t * sp, uint64_t val)
{
uint64_t oldval;
DBGFSR( oldval = v9_get_fsr(sp); );
sp->v9_fsr_ctrl &= ~(V9_FSR_FTT_MASK);
sp->v9_fsr_ctrl |= (val & V9_FSR_FTT_MASK);
sp->v9_fsr_exc = val & (V9_FSR_AEXC_MASK | V9_FSR_CEXC_MASK);
DBGFSR( lprintf(sp->gid, "v9_set_fsr_fp_op: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), oldval); );
}
/*-------------------------- OLD CODE ------------------------*/
#if 0 /* { */
--
-- /**/
-- /* We still get crappy code from these from the Sun*/
-- /* compiler - eventually well re-write these entirely*/
-- /* as host native routines in assembler ...*/
-- /**/
--
-- /* BTW: cico = condition codes in+out ... not just*/
-- /* carry in+carry out ... note addcc sets *all* cond codes*/
--
-- IMPL( add_ci_imm )
-- Rdest = Rsrc1 + Simm16 + (Rccr & 1LL);
-- ENDI
--
-- IMPL( add_co_imm )
-- Rdest = il_add_co( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( add_cico_imm )
-- Rdest = il_add_cico( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( add_co_imm_rd0 )
-- (void)il_add_co( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( add_cico_imm_rd0 )
-- (void)il_add_cico( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( add_ci_rrr )
-- Rdest = Rsrc1 + Rsrc2 + (Rccr & 1LL);
-- ENDI
--
-- IMPL( add_co_rrr )
-- Rdest = il_add_co( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( add_cico_rrr )
-- Rdest = il_add_cico( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( add_co_rrr_rd0 )
-- (void)il_add_co( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( add_cico_rrr_rd0 )
-- (void)il_add_cico( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
--
--
--
-- IMPL( sub_ci_imm )
-- Rdest = Rsrc1 - Simm16 - (Rccr & 1LL);
-- ENDI
--
-- IMPL( sub_co_imm )
-- Rdest = il_sub_co( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( sub_co_imm_rd0 )
-- (void)il_sub_co( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( sub_cico_imm )
-- Rdest = il_sub_cico( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( sub_cico_imm_rd0 )
-- (void)il_sub_cico( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( sub_ci_rrr )
-- Rdest = Rsrc1 - Rsrc2 - (Rccr & 1LL);
-- ENDI
--
-- IMPL( sub_co_rrr )
-- Rdest = il_sub_co( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( sub_co_rrr_rd0 )
-- (void)il_sub_co( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( sub_cico_rrr )
-- Rdest = il_sub_cico( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( sub_cico_rrr_rd0 )
-- (void)il_sub_cico( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
--
-- /**/
-- /* Logic instructions*/
-- /**/
--
-- IMPL( and_imm )
-- Rdest = Rsrc1 & Simm16;
-- ENDI
--
-- IMPL( and_cc_imm )
-- Rdest = il_and_cc( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( and_cc_imm_rd0 )
-- (void)il_and_cc( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( and_rrr )
-- Rdest = Rsrc1 & Rsrc2;
-- ENDI
--
-- IMPL( and_cc_rrr )
-- Rdest = il_and_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( and_cc_rrr_rd0 )
-- (void)il_and_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
-- IMPL( andn_rrr )
-- Rdest = Rsrc1 & ~Rsrc2;
-- ENDI
--
-- IMPL( andn_cc_rrr )
-- Rdest = il_andn_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( andn_cc_rrr_rd0 )
-- (void)il_andn_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
--
-- IMPL( or_imm )
-- Rdest = Rsrc1 | Simm16;
-- ENDI
--
-- IMPL( or_cc_imm )
-- Rdest = il_or_cc( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( or_cc_imm_rd0 )
-- (void)il_or_cc( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( or_rrr )
-- Rdest = Rsrc1 | Rsrc2;
-- ENDI
--
-- IMPL( or_cc_rrr )
-- Rdest = il_or_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( or_cc_rrr_rd0 )
-- (void)il_or_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
--
-- IMPL( orn_rrr )
-- Rdest = Rsrc1 | ~Rsrc2;
-- ENDI
--
-- IMPL( orn_cc_rrr )
-- Rdest = il_orn_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( orn_cc_rrr_rd0 )
-- (void)il_orn_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
--
-- IMPL( xor_imm )
-- Rdest = Rsrc1 ^ Simm16;
-- ENDI
--
-- IMPL( xor_cc_imm )
-- Rdest = il_xor_cc( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( xor_cc_imm_rd0 )
-- (void)il_xor_cc( Rsrc1, Simm16, &Rccr );
-- ENDI
--
-- IMPL( xor_rrr )
-- Rdest = Rsrc1 ^ Rsrc2;
-- ENDI
--
-- IMPL( xor_cc_rrr )
-- Rdest = il_xor_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( xor_cc_rrr_rd0 )
-- (void)il_xor_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
--
-- IMPL( xnor_rrr )
-- Rdest = ~(Rsrc1 ^ Rsrc2);
-- ENDI
--
-- IMPL( xnor_cc_rrr )
-- Rdest = il_xnor_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
-- IMPL( xnor_cc_rrr_rd0 )
-- (void)il_xnor_cc( Rsrc1, Rsrc2, &Rccr );
-- ENDI
--
--
--
--
-- /* branch with no annulled delay slot */
-- IMPL( brcond_ds )
-- uint64_t xpc;
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(Rccr)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* branch with annulled delay slot */
-- IMPL( brcond_ds_annul )
-- uint64_t xpc;
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(Rccr)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- cpup->annul = TRUE;
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* branch with no annulled delay slot */
-- IMPL( br_g_le_ds )
-- uint64_t xpc;
-- uint64_t func;
--
-- func = Rccr;
-- func = ((func>>1) ^ (func>>3))|(func>>2); /* get Z|(N^V) in bits 0,4 */
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(func)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* branch with annulled delay slot */
-- IMPL( br_g_le_ds_annul )
-- uint64_t xpc;
-- uint64_t func;
--
-- func = Rccr;
-- func = ((func>>1) ^ (func>>3))|(func>>2); /* get Z|(N^V) in bits 0,4 */
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(func)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- cpup->annul = TRUE;
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* branch with no annulled delay slot */
-- IMPL( br_ge_l_ds )
-- uint64_t xpc;
-- uint64_t func;
--
-- func = Rccr;
-- func = (func>>1) ^ (func>>3); /* get (N^V) in bits 0,4 */
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(func)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* branch with annulled delay slot */
-- IMPL( br_ge_l_ds_annul )
-- uint64_t xpc;
-- uint64_t func;
--
-- func = Rccr;
-- func = (func>>1) ^ (func>>3); /* get (N^V) in bits 0,4 */
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(func)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- cpup->annul = TRUE;
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* branch with no annulled delay slot */
-- IMPL( br_gu_leu_ds )
-- uint64_t xpc;
-- uint64_t func;
--
-- func = Rccr;
-- func = (func>>2) | func; /* get (Z|C) in bits 0,4 */
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(func)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* branch with annulled delay slot */
-- IMPL( br_gu_leu_ds_annul )
-- uint64_t xpc;
-- uint64_t func;
--
-- func = Rccr;
-- func = (func>>2) | func; /* get (Z|C) in bits 0,4 */
--
-- cpup->cti_executed = TRUE;
--
-- if (COMPUTE_TAKEN(func)) {
-- xpc = Rpc + SBRoffset32;
-- } else {
-- cpup->annul = TRUE;
-- xpc = Rnpc + 4;
-- }
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
--
-- IMPL( bralways_ds )
-- uint64_t xpc;
-- cpup->cti_executed = TRUE;
--
-- xpc = Rpc + SBRoffset32;
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
-- /* FIXME: prob not worth burning an instn on its own
-- * combine with bralways_ds above, and carry annul
-- * bit in the decoded form
-- */
-- IMPL( bralways_ds_annul )
-- uint64_t xpc;
-- cpup->cti_executed = TRUE;
--
-- cpup->annul = TRUE;
-- xpc = Rpc + SBRoffset32;
-- Rpc = Rnpc;
-- Rnpc = xpc;
-- ENDI
--
--
--
-- IMPL( brnever_ds_annul )
-- cpup->cti_executed = TRUE;
--
-- cpup->annul = TRUE;
-- Rpc = Rnpc;
-- Rnpc += 4;
-- ENDI
--
--
#endif /* } */