* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: sparcv9instns.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)sparcv9instns.c 1.64 07/03/19 SMI"
#include "tsparcv9internal.h"
#include "hostnative_asm.h" /* autogenerated from hostnative_asm.S */
#include "hostnative.h" /* autogenerated from hostnative.c */
#include "sparcv9decode.h"
#define FPU_NOT_IMPLEMENTED(_which) \
FIXME_WARNING(("non-native implementation of " _which \
" not yet completed")); \
sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; \
sp->v9_fsr_ctrl |= SPARCv9_FTT_unimplemented_FPop << V9_FSR_FTT_SHIFT; \
v9p->post_precise_trap(sp, Sparcv9_trap_fp_exception_other); \
* Implementation(s) of pre-decoded sparcv9 specific instructions.
#define IMPL( _n ) void decoded_impl_sparcv9_##_n (simcpu_t *sp, xicache_instn_t * xcip) {
#define ENDI NEXT_INSTN(sp); ENDDEF
#define _U (1 << V9_fcc_u)
#define _G (1 << V9_fcc_g)
#define _L (1 << V9_fcc_l)
#define _E (1 << V9_fcc_e)
static uint8_t sparcv9_fcc_magic
[16] = {
_U
|_G
|_L
|_E
, /* always */
* Some FPU instructions that can never cause exceptions still
* update the FSR - so just clear current exceptions and trap type.
#define FP_CLEAR_CEXC_FTT(_sp) do { \
(_sp)->v9_fsr_exc &= ~V9_FSR_CEXC_MASK; \
(_sp)->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; \
/* ------------------------------------------------------------ */
* Instruction: sparcv9_add_co_imm
#if !defined(HAS_NATIVE_sparcv9_add_co_imm) /* { */
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_add_co_rrr
#if !defined(HAS_NATIVE_sparcv9_add_co_rrr) /* { */
uint64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_add_co_imm_rd0
#if !defined(HAS_NATIVE_sparcv9_add_co_imm_rd0) /* { */
int64_t s1
= Rsrc1
, s2
= (int64_t)(int32_t)Simm16
, d
;
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_add_co_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_add_co_rrr_rd0) /* { */
int64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_add_ci_imm
#if !defined(HAS_NATIVE_sparcv9_add_ci_imm) /* { */
int64_t s1
= Rsrc1
, s2
= (int64_t)(int32_t)Simm16
, d
;
d
= s1
+ s2
+ (sp
->v9_ccr
& 1);
* Instruction: sparcv9_add_ci_rrr
#if !defined(HAS_NATIVE_sparcv9_add_ci_rrr) /* { */
int64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
d
= s1
+ s2
+ (sp
->v9_ccr
& 1);
* Instruction: sparcv9_add_cico_imm
#if !defined(HAS_NATIVE_sparcv9_add_cico_imm) /* { */
int64_t s1
= Rsrc1
, s2
= (int64_t)(int32_t)Simm16
, d
;
d
= s1
+ s2
+ (sp
->v9_ccr
& 1);
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_add_cico_rrr
#if !defined(HAS_NATIVE_sparcv9_add_cico_rrr) /* { */
int64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
d
= s1
+ s2
+ (sp
->v9_ccr
& 1);
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_add_cico_imm_rd0
#if !defined(HAS_NATIVE_sparcv9_add_cico_imm_rd0) /* { */
int64_t s1
= Rsrc1
, s2
= (int64_t)(int32_t)Simm16
, d
;
d
= s1
+ s2
+ (sp
->v9_ccr
& 1);
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_add_cico_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_add_cico_rrr_rd0) /* { */
int64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
d
= s1
+ s2
+ (sp
->v9_ccr
& 1);
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_co_imm
#if !defined(HAS_NATIVE_sparcv9_sub_co_imm) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_co_rrr
#if !defined(HAS_NATIVE_sparcv9_sub_co_rrr) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_co_imm_rd0
#if !defined(HAS_NATIVE_sparcv9_sub_co_imm_rd0) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_co_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_sub_co_rrr_rd0) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_ci_imm
#if !defined(HAS_NATIVE_sparcv9_sub_ci_imm) /* { */
int64_t s1
= Rsrc1
, s2
= (int64_t)(int32_t)Simm16
, d
;
d
= s1
- s2
- (sp
->v9_ccr
& 1);
* Instruction: sparcv9_sub_ci_rrr
#if !defined(HAS_NATIVE_sparcv9_sub_ci_rrr) /* { */
int64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
d
= s1
- s2
- (sp
->v9_ccr
& 1);
* Instruction: sparcv9_sub_cico_imm
#if !defined(HAS_NATIVE_sparcv9_sub_cico_imm) /* { */
int64_t s1
= Rsrc1
, s2
= (int64_t)(int32_t)Simm16
, d
;
d
= s1
- s2
- (sp
->v9_ccr
& 1);
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_cico_rrr
#if !defined(HAS_NATIVE_sparcv9_sub_cico_rrr) /* { */
int64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
d
= s1
- s2
- (sp
->v9_ccr
& 1);
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_cico_imm_rd0
#if !defined(HAS_NATIVE_sparcv9_sub_cico_imm_rd0) /* { */
int64_t s1
= Rsrc1
, s2
= (int64_t)(int32_t)Simm16
, d
;
d
= s1
- s2
- (sp
->v9_ccr
& 1);
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
* Instruction: sparcv9_sub_cico_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_sub_cico_rrr_rd0) /* { */
int64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
d
= s1
- s2
- (sp
->v9_ccr
& 1);
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
#define LOGIC_OP(_res, _op) do { \
sp->v9_ccr = V9_xcc_n((result>>63)&1) \
| V9_xcc_z(result==0LL) \
| V9_icc_n((result>>31)&1) \
| V9_icc_z((result&MASK64(31,0))==0LL); \
* Instruction: sparcv9_and_cc_imm
#if !defined(HAS_NATIVE_sparcv9_and_cc_imm) /* { */
LOGIC_OP(Rdest
= result
, Rsrc1
& Simm16
);
* Instruction: sparcv9_and_cc_rrr
#if !defined(HAS_NATIVE_sparcv9_and_cc_rrr) /* { */
LOGIC_OP(Rdest
= result
, Rsrc1
& Rsrc2
);
* Instruction: sparcv9_and_cc_imm_rd0
#if !defined(HAS_NATIVE_sparcv9_and_cc_imm_rd0) /* { */
LOGIC_OP(/*nada*/, Rsrc1
& Simm16
);
* Instruction: sparcv9_and_cc_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_and_cc_rrr_rd0) /* { */
LOGIC_OP(/*nada*/, Rsrc1
& Rsrc2
);
* Instruction: sparcv9_andn_cc_rrr
#if !defined(HAS_NATIVE_sparcv9_andn_cc_rrr) /* { */
LOGIC_OP(Rdest
=result
, (Rsrc1
& ~(Rsrc2
)) );
* Instruction: sparcv9_andn_cc_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_andn_cc_rrr_rd0) /* { */
LOGIC_OP(/*nada*/, (Rsrc1
& ~(Rsrc2
)) );
* Instruction: sparcv9_or_cc_imm
#if !defined(HAS_NATIVE_sparcv9_or_cc_imm) /* { */
LOGIC_OP(Rdest
=result
, Rsrc1
| Simm16
);
* Instruction: sparcv9_or_cc_rrr
#if !defined(HAS_NATIVE_sparcv9_or_cc_rrr) /* { */
LOGIC_OP(Rdest
=result
, Rsrc1
| Rsrc2
);
* Instruction: sparcv9_or_cc_imm_rd0
#if !defined(HAS_NATIVE_sparcv9_or_cc_imm_rd0) /* { */
LOGIC_OP(/*nada*/, Rsrc1
| Simm16
);
* Instruction: sparcv9_or_cc_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_or_cc_rrr_rd0) /* { */
LOGIC_OP(/*nada*/, Rsrc1
| Rsrc2
);
* Instruction: sparcv9_orn_cc_rrr
#if !defined(HAS_NATIVE_sparcv9_orn_cc_rrr) /* { */
LOGIC_OP(Rdest
=result
, (Rsrc1
| ~(Rsrc2
)) );
* Instruction: sparcv9_orn_cc_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_orn_cc_rrr_rd0) /* { */
LOGIC_OP(/*nada*/, (Rsrc1
| ~(Rsrc2
)) );
* Instruction: sparcv9_xor_cc_imm
#if !defined(HAS_NATIVE_sparcv9_xor_cc_imm) /* { */
LOGIC_OP(Rdest
=result
, Rsrc1
^ Simm16
);
* Instruction: sparcv9_xor_cc_rrr
#if !defined(HAS_NATIVE_sparcv9_xor_cc_rrr) /* { */
LOGIC_OP(Rdest
=result
, Rsrc1
^ Rsrc2
);
* Instruction: sparcv9_xor_cc_imm_rd0
#if !defined(HAS_NATIVE_sparcv9_xor_cc_imm_rd0) /* { */
LOGIC_OP(/*nada*/, Rsrc1
^ Simm16
);
* Instruction: sparcv9_xor_cc_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_xor_cc_rrr_rd0) /* { */
LOGIC_OP(/*nada*/, Rsrc1
^ Rsrc2
);
* Instruction: sparcv9_xnor_cc_rrr
#if !defined(HAS_NATIVE_sparcv9_xnor_cc_rrr) /* { */
LOGIC_OP(Rdest
=result
, ~(Rsrc1
^ Rsrc2
) );
* Instruction: sparcv9_xnor_cc_rrr_rd0
#if !defined(HAS_NATIVE_sparcv9_xnor_cc_rrr_rd0) /* { */
LOGIC_OP(/*nada*/, ~(Rsrc1
^ Rsrc2
) );
* Instruction: sparcv9_bne_icc
#if !defined(HAS_NATIVE_sparcv9_bne_icc) /* { */
if (!(sp
->v9_ccr
& V9_icc_z_mask
)) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_be_icc
#if !defined(HAS_NATIVE_sparcv9_be_icc) /* { */
if (sp
->v9_ccr
& V9_icc_z_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bg_icc
#if !defined(HAS_NATIVE_sparcv9_bg_icc) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_g
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_ble_icc
#if !defined(HAS_NATIVE_sparcv9_ble_icc) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_le
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bge_icc
#if !defined(HAS_NATIVE_sparcv9_bge_icc) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_ge
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bl_icc
#if !defined(HAS_NATIVE_sparcv9_bl_icc) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_l
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bgu_icc
#if !defined(HAS_NATIVE_sparcv9_bgu_icc) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_gu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bleu_icc
#if !defined(HAS_NATIVE_sparcv9_bleu_icc) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_leu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcc_icc
#if !defined(HAS_NATIVE_sparcv9_bcc_icc) /* { */
if ( !(sp
->v9_ccr
& V9_icc_c_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcs_icc
#if !defined(HAS_NATIVE_sparcv9_bcs_icc) /* { */
if ( sp
->v9_ccr
& V9_icc_c_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bpos_icc
#if !defined(HAS_NATIVE_sparcv9_bpos_icc) /* { */
if ( !(sp
->v9_ccr
& V9_icc_n_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bneg_icc
#if !defined(HAS_NATIVE_sparcv9_bneg_icc) /* { */
if ( sp
->v9_ccr
& V9_icc_n_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvc_icc
#if !defined(HAS_NATIVE_sparcv9_bvc_icc) /* { */
if ( !(sp
->v9_ccr
& V9_icc_v_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvs_icc
#if !defined(HAS_NATIVE_sparcv9_bvs_icc) /* { */
if ( sp
->v9_ccr
& V9_icc_v_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bne_xcc
#if !defined(HAS_NATIVE_sparcv9_bne_xcc) /* { */
if (!(sp
->v9_ccr
& V9_xcc_z_mask
)) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_be_xcc
#if !defined(HAS_NATIVE_sparcv9_be_xcc) /* { */
if (sp
->v9_ccr
& V9_xcc_z_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bg_xcc
#if !defined(HAS_NATIVE_sparcv9_bg_xcc) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_g
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_ble_xcc
#if !defined(HAS_NATIVE_sparcv9_ble_xcc) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_le
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bge_xcc
#if !defined(HAS_NATIVE_sparcv9_bge_xcc) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_ge
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bl_xcc
#if !defined(HAS_NATIVE_sparcv9_bl_xcc) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_l
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bgu_xcc
#if !defined(HAS_NATIVE_sparcv9_bgu_xcc) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_gu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bleu_xcc
#if !defined(HAS_NATIVE_sparcv9_bleu_xcc) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_leu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcc_xcc
#if !defined(HAS_NATIVE_sparcv9_bcc_xcc) /* { */
if ( !(sp
->v9_ccr
& V9_xcc_c_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcs_xcc
#if !defined(HAS_NATIVE_sparcv9_bcs_xcc) /* { */
if ( sp
->v9_ccr
& V9_xcc_c_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bpos_xcc
#if !defined(HAS_NATIVE_sparcv9_bpos_xcc) /* { */
if ( !(sp
->v9_ccr
& V9_xcc_n_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bneg_xcc
#if !defined(HAS_NATIVE_sparcv9_bneg_xcc) /* { */
if ( sp
->v9_ccr
& V9_xcc_n_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvc_xcc
#if !defined(HAS_NATIVE_sparcv9_bvc_xcc) /* { */
if ( !(sp
->v9_ccr
& V9_xcc_v_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvs_xcc
#if !defined(HAS_NATIVE_sparcv9_bvs_xcc) /* { */
if ( sp
->v9_ccr
& V9_xcc_v_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bne_icc_an
#if !defined(HAS_NATIVE_sparcv9_bne_icc_an) /* { */
if (!(sp
->v9_ccr
& V9_icc_z_mask
)) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_be_icc_an
#if !defined(HAS_NATIVE_sparcv9_be_icc_an) /* { */
if (sp
->v9_ccr
& V9_icc_z_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bg_icc_an
#if !defined(HAS_NATIVE_sparcv9_bg_icc_an) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_g
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_ble_icc_an
#if !defined(HAS_NATIVE_sparcv9_ble_icc_an) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_le
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bge_icc_an
#if !defined(HAS_NATIVE_sparcv9_bge_icc_an) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_ge
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bl_icc_an
#if !defined(HAS_NATIVE_sparcv9_bl_icc_an) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_l
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bgu_icc_an
#if !defined(HAS_NATIVE_sparcv9_bgu_icc_an) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_gu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bleu_icc_an
#if !defined(HAS_NATIVE_sparcv9_bleu_icc_an) /* { */
int cc
= V9_ext_icc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_leu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcc_icc_an
#if !defined(HAS_NATIVE_sparcv9_bcc_icc_an) /* { */
if ( !(sp
->v9_ccr
& V9_icc_c_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcs_icc_an
#if !defined(HAS_NATIVE_sparcv9_bcs_icc_an) /* { */
if ( sp
->v9_ccr
& V9_icc_c_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bpos_icc_an
#if !defined(HAS_NATIVE_sparcv9_bpos_icc_an) /* { */
if ( !(sp
->v9_ccr
& V9_icc_n_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bneg_icc_an
#if !defined(HAS_NATIVE_sparcv9_bneg_icc_an) /* { */
if ( sp
->v9_ccr
& V9_icc_n_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvc_icc_an
#if !defined(HAS_NATIVE_sparcv9_bvc_icc_an) /* { */
if ( !(sp
->v9_ccr
& V9_icc_v_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvs_icc_an
#if !defined(HAS_NATIVE_sparcv9_bvs_icc_an) /* { */
if ( sp
->v9_ccr
& V9_icc_v_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bne_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bne_xcc_an) /* { */
if (!(sp
->v9_ccr
& V9_xcc_z_mask
)) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_be_xcc_an
#if !defined(HAS_NATIVE_sparcv9_be_xcc_an) /* { */
if (sp
->v9_ccr
& V9_xcc_z_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bg_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bg_xcc_an) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_g
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_ble_xcc_an
#if !defined(HAS_NATIVE_sparcv9_ble_xcc_an) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_le
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bge_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bge_xcc_an) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_ge
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bl_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bl_xcc_an) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_l
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bgu_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bgu_xcc_an) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_gu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bleu_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bleu_xcc_an) /* { */
int cc
= V9_ext_xcc(sp
->v9_ccr
);
if ( (sparcv9_cc_magic
[cond_leu
] >> cc
) &1 ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcc_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bcc_xcc_an) /* { */
if ( !(sp
->v9_ccr
& V9_xcc_c_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bcs_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bcs_xcc_an) /* { */
if ( sp
->v9_ccr
& V9_xcc_c_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bpos_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bpos_xcc_an) /* { */
if ( !(sp
->v9_ccr
& V9_xcc_n_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bneg_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bneg_xcc_an) /* { */
if ( sp
->v9_ccr
& V9_xcc_n_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvc_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bvc_xcc_an) /* { */
if ( !(sp
->v9_ccr
& V9_xcc_v_mask
) ) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_bvs_xcc_an
#if !defined(HAS_NATIVE_sparcv9_bvs_xcc_an) /* { */
if ( sp
->v9_ccr
& V9_xcc_v_mask
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_brz
#if !defined(HAS_NATIVE_sparcv9_brz) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brlez
#if !defined(HAS_NATIVE_sparcv9_brlez) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brlz
#if !defined(HAS_NATIVE_sparcv9_brlz) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brnz
#if !defined(HAS_NATIVE_sparcv9_brnz) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brgz
#if !defined(HAS_NATIVE_sparcv9_brgz) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brgez
#if !defined(HAS_NATIVE_sparcv9_brgez) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brz_an
#if !defined(HAS_NATIVE_sparcv9_brz_an) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brlez_an
#if !defined(HAS_NATIVE_sparcv9_brlez_an) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brlz_an
#if !defined(HAS_NATIVE_sparcv9_brlz_an) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brnz_an
#if !defined(HAS_NATIVE_sparcv9_brnz_an) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brgz_an
#if !defined(HAS_NATIVE_sparcv9_brgz_an) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_brgez_an
#if !defined(HAS_NATIVE_sparcv9_brgez_an) /* { */
tvaddr_t tpc
= Rpc
+ SBRreg_off32
;
* Instruction: sparcv9_fadds
#if !defined(HAS_NATIVE_fadds) /* { */
FPU_NOT_IMPLEMENTED("fadds")
* Instruction: sparcv9_faddd
#if !defined(HAS_NATIVE_faddd) /* { */
if (F64src1
== 0 && F64src2
== 0)
FPU_NOT_IMPLEMENTED("faddd")
* Instruction: sparcv9_fsubs
#if !defined(HAS_NATIVE_fsubs) /* { */
FPU_NOT_IMPLEMENTED("fsubs")
* Instruction: sparcv9_fsubd
#if !defined(HAS_NATIVE_fsubd) /* { */
FPU_NOT_IMPLEMENTED("fsubd")
* Instruction: sparcv9_fmuls
#if !defined(HAS_NATIVE_fmuls) /* { */
FPU_NOT_IMPLEMENTED("fmuls")
* Instruction: sparcv9_fmuld
#if !defined(HAS_NATIVE_fmuld) /* { */
if (F64src1
== 0 || F64src2
== 0)
FPU_NOT_IMPLEMENTED("fmuld")
* Instruction: sparcv9_fdivs
#if !defined(HAS_NATIVE_fdivs) /* { */
FPU_NOT_IMPLEMENTED("fdivs")
* Instruction: sparcv9_fdivd
#if !defined(HAS_NATIVE_fdivd) /* { */
FPU_NOT_IMPLEMENTED("fdivd")
* Instruction: sparcv9_fsmuld
#if !defined(HAS_NATIVE_fsmuld) /* { */
FPU_NOT_IMPLEMENTED("fsmuld")
* Instruction: sparcv9_fstod
#if !defined(HAS_NATIVE_fstod) /* { */
FPU_NOT_IMPLEMENTED("fstod")
* Instruction: sparcv9_fdtos
#if !defined(HAS_NATIVE_fdtos) /* { */
FPU_NOT_IMPLEMENTED("fdtos")
* Instruction: sparcv9_fsqrts
#if !defined(HAS_NATIVE_fsqrts) /* { */
FPU_NOT_IMPLEMENTED("fsqrts")
* Instruction: sparcv9_fsqrtd
#if !defined(HAS_NATIVE_fsqrtd) /* { */
FPU_NOT_IMPLEMENTED("fsqrtd")
* Instruction: sparcv9_fmovs
#if !defined(HAS_NATIVE_sparcv9_fmovs) /* { */
* Instruction: sparcv9_fmovd
#if !defined(HAS_NATIVE_sparcv9_fmovd) /* { */
if ( (sparcv9_cc_magic
[MOVCC_cond
] >> (ccr
& 0xf)) &1 ) {
ccr
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
ccr
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, MOVCC_cc
);
if ( (sparcv9_fcc_magic
[MOVCC_cond
] >> ccr
) & 1 ) {
if ( (sparcv9_cc_magic
[MOVCC_cond
] >> (ccr
& 0xf)) &1 ) {
ccr
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
ccr
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, MOVCC_cc
);
if ( (sparcv9_fcc_magic
[MOVCC_cond
] >> ccr
) & 1 ) {
* Instruction: sparcv9_fnegs
#if !defined(HAS_NATIVE_sparcv9_fnegs) /* { */
F32dest
= F32src1
^ (1u << 31);
* Instruction: sparcv9_fnegd
#if !defined(HAS_NATIVE_sparcv9_fnegd) /* { */
F64dest
= F64src1
^ (1ull << 63);
* Instruction: sparcv9_fabss
#if !defined(HAS_NATIVE_sparcv9_fabss) /* { */
F32dest
= F32src1
& 0x7fffffffu
;
* Instruction: sparcv9_fabsd
#if !defined(HAS_NATIVE_sparcv9_fabsd) /* { */
F64dest
= F64src1
& 0x7fffffffffffffffull
;
* Instruction: sparcv9_fstoi
#if !defined(HAS_NATIVE_fstoi) /* { */
FPU_NOT_IMPLEMENTED("fstoi")
* Instruction: sparcv9_fitos
#if !defined(HAS_NATIVE_fitos) /* { */
FPU_NOT_IMPLEMENTED("fitos")
* Instruction: sparcv9_fdtoi
#if !defined(HAS_NATIVE_fdtoi) /* { */
FPU_NOT_IMPLEMENTED("fdtoi")
* Instruction: sparcv9_fitod
#if !defined(HAS_NATIVE_fitod) /* { */
FPU_NOT_IMPLEMENTED("fitod")
* Instruction: sparcv9_fstox
#if !defined(HAS_NATIVE_fstox) /* { */
FPU_NOT_IMPLEMENTED("fstox")
* Instruction: sparcv9_fxtos
#if !defined(HAS_NATIVE_fxtos) /* { */
FPU_NOT_IMPLEMENTED("fxtos")
* Instruction: sparcv9_fdtox
#if !defined(HAS_NATIVE_fdtox) /* { */
FPU_NOT_IMPLEMENTED("fdtox")
* Instruction: sparcv9_fxtod
#if !defined(HAS_NATIVE_fxtod) /* { */
FPU_NOT_IMPLEMENTED("fxtod")
* FP register logical operations.
F64dest
= F64src1
& F64src2
;
F64dest
= ~F64src1
& F64src2
;
F32dest
= ~F32src1
& F32src2
;
F64dest
= F64src1
& ~F64src2
;
F32dest
= F32src1
& ~F32src2
;
F32dest
= F32src1
& F32src2
;
F64dest
= ~(F64src1
& F64src2
);
F32dest
= ~(F32src1
& F32src2
);
F64dest
= ~(F64src1
| F64src2
);
F32dest
= ~(F32src1
| F32src2
);
F64dest
= F64src1
| F64src2
;
F64dest
= ~F64src1
| F64src2
;
F32dest
= ~F32src1
| F32src2
;
F64dest
= F64src1
| ~F64src2
;
F32dest
= F32src1
| ~F32src2
;
F32dest
= F32src1
| F32src2
;
F64dest
= ~(F64src1
^ F64src2
);
F32dest
= ~(F32src1
^ F32src2
);
F64dest
= F64src1
^ F64src2
;
F32dest
= F32src1
^ F32src2
;
* FP register fixed-point partitioned add and subtract.
for (i
= 0; i
< 4; i
++) {
res
|= (uint64_t)d
<< 48;
for (i
= 0; i
< 2; i
++) {
res
|= (uint32_t)d
<< 16;
for (i
= 0; i
< 2; i
++) {
res
|= (uint64_t)d
<< 32;
F32dest
= F32src1
+ F32src2
;
for (i
= 0; i
< 4; i
++) {
res
|= (uint64_t)d
<< 48;
for (i
= 0; i
< 2; i
++) {
res
|= (uint32_t)d
<< 16;
for (i
= 0; i
< 2; i
++) {
res
|= (uint64_t)d
<< 32;
F32dest
= F32src1
- F32src2
;
/* ------------------------------------------------------------ */
* Basic branch instructions
/* Note: special case - normally taken branches */
/* always execute their delay slots - not the always case ! */
IMPL( bralways_ds_annul
) /* branch always annul delay slot */
IMPL( bralways_ds
) /* branch always executing delay slot */
IMPL( brnever_ds_annul
) /* branch never, but annul delay slot */
/* ------------------------------------------------------------ */
* Call instruction + Jump and links ...
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
tpc
= opc
+ (sint64_t
)Simm32
;
if (!v9p
->pstate
.addr_mask
&& v9p
->check_vahole(sp
, tpc
)) return;
if (v9p
->pstate
.addr_mask
) opc
&= MASK64(31,0); /* FIXME: SV9_ID125 ? */
IReg( Reg_sparcv9_o7
) = opc
;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
,
Sparcv9_trap_mem_address_not_aligned
);
if (!v9p
->pstate
.addr_mask
&& v9p
->check_vahole(sp
, Rsrc1
+ Simm16
)) return;
/* aligned check with xicache operation - maybe here instead ? FIXME*/
opc
= Rpc
; /* stash incase Rdest is same reg as Rsrc */
if (v9p
->pstate
.addr_mask
) opc
&= MASK64(31,0); /* FIXME: SV9_ID125 ? */
IMPL( jmpl_imm_rd0
) /* fast track normal return-from-call instructions */
/* aligned check with xicache operation - maybe here instead ? FIXME*/
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
,
Sparcv9_trap_mem_address_not_aligned
);
if (!v9p
->pstate
.addr_mask
&& v9p
->check_vahole(sp
, Rsrc1
+ Simm16
)) return;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
,
Sparcv9_trap_mem_address_not_aligned
);
if (!v9p
->pstate
.addr_mask
&& v9p
->check_vahole(sp
, Rsrc1
+ Rsrc2
)) return;
/* aligned check with xicache operation - maybe here instead ? FIXME*/
opc
= Rpc
; /* stash incase Rdest is same reg as Rsrc */
if (v9p
->pstate
.addr_mask
) opc
&= MASK64(31,0); /* FIXME: SV9_ID125 ? */
IMPL( jmpl_rrr_rd0
) /* fast track normal return-from-call instructions */
/* aligned check with xicache operation - maybe here instead ? FIXME*/
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
,
Sparcv9_trap_mem_address_not_aligned
);
if (!v9p
->pstate
.addr_mask
&& v9p
->check_vahole(sp
, Rsrc1
+ Rsrc2
)) return;
/* ------------------------------------------------------------ */
* Specialist maths instructions
sparcv9_udiv64( sp
, Rdest_num
, Rsrc1
, Simm16
);
sparcv9_udiv64( sp
, Rdest_num
, Rsrc1
, Rsrc2
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
if (!Zero_Reg(Rdest_num
)) {
if ((uint64_t)Rsrc1
== (1ULL << 63) && (int64_t)Simm16
== -1)
Rdest
= 0x7fffffffffffffffULL
;
Rdest
= (int64_t)Rsrc1
/ (int64_t)Simm16
;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
if (!Zero_Reg(Rdest_num
)) {
if ((uint64_t)Rsrc1
== (1ULL << 63) && (int64_t)Rsrc2
== -1)
Rdest
= 0x7fffffffffffffffULL
;
Rdest
= (int64_t)Rsrc1
/ (int64_t)Rsrc2
;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
if (!Zero_Reg(Rdest_num
)) {
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
if ((uint64_t)foo
== (1ULL << 63) && (int32_t)Simm16
== -1)
foo
= foo
/ (int32_t)Simm16
;
else if (foo
<= (int64_t)0xffffffff7fffffffULL
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
if (!Zero_Reg(Rdest_num
)) {
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
foo
= ((uint64_t)foo
) / ((uint32_t)(int32_t)Simm16
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if ((int32_t)Rsrc2
== 0) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
if (!Zero_Reg(Rdest_num
)) {
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
if ((uint64_t)foo
== (1ULL << 63) && (int32_t)Rsrc2
== -1)
foo
= foo
/ (int32_t)Rsrc2
;
else if (foo
<= (int64_t)0xffffffff7fffffffULL
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if ((uint32_t)Rsrc2
== 0) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
if (!Zero_Reg(Rdest_num
)) {
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
foo
= (int64_t)foo
/ (uint32_t)Rsrc2
;
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
if ((uint64_t)foo
== (1ULL << 63) && (int32_t)Simm16
== -1) {
v
= (1 << 1); /* icc.v */
foo
= foo
/ (int32_t)Simm16
;
if (foo
>= (1ll << 31)) {
v
= (1 << 1); /* icc.v */
} else if (foo
<= (int64_t)0xffffffff7fffffffULL
) {
v
= (1 << 1); /* icc.v */
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0) | /* xcc.z */
if (!Zero_Reg(Rdest_num
))
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
foo
= ((uint64_t)foo
) / ((uint32_t)(int32_t)Simm16
);
if (foo
>= (1ull << 32)) {
v
= (1 << 1); /* icc.v */
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0) | /* xcc.z */
if (!Zero_Reg(Rdest_num
))
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if ((int32_t)Rsrc2
== 0) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
if ((uint64_t)foo
== (1ULL << 63) && (int32_t)Rsrc2
== -1) {
v
= (1 << 1); /* icc.v */
foo
= foo
/ (int32_t)Rsrc2
;
if (foo
>= (1ll << 31)) {
v
= (1 << 1); /* icc.v */
} else if (foo
<= (int64_t)0xffffffff7fffffffULL
) {
v
= (1 << 1); /* icc.v */
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0) | /* xcc.z */
if (!Zero_Reg(Rdest_num
))
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if ((uint32_t)Rsrc2
== 0) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
foo
= (((int64_t)sp
->v9_y
) << 32) | (uint32_t)Rsrc1
;
foo
= ((int64_t)foo
) / ((uint32_t)Rsrc2
);
if (foo
>= (1ull << 32)) {
v
= (1 << 1); /* icc.v */
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0) | /* xcc.z */
if (!Zero_Reg(Rdest_num
))
foo
= (int64_t)(int32_t)Rsrc1
* (int64_t)(int32_t)Simm16
;
if (!Zero_Reg(Rdest_num
))
foo
= (uint64_t)(uint32_t)Rsrc1
* (uint64_t)(uint32_t)(int32_t)Simm16
;
if (!Zero_Reg(Rdest_num
))
foo
= (int64_t)(int32_t)Rsrc1
* (int64_t)(int32_t)Simm16
;
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num
))
foo
= (uint64_t)(uint32_t)Rsrc1
* (uint64_t)(uint32_t)(int32_t)Simm16
;
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num
))
foo
= (((int64_t)Rsrc1
) << 32) | (uint32_t)sp
->v9_y
;
s1
= ((sp
->v9_ccr
>> 3) & 1) ^ ((sp
->v9_ccr
>> 1) & 1);
s1
= (s1
<< 31) | (((uint32_t)Rsrc1
) >> 1);
sp
->v9_y
= (uint32_t)foo
;
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= (sp
->v9_ccr
& V9_xcc_mask
);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
foo
= (((int64_t)Rsrc1
) << 32) | (uint32_t)sp
->v9_y
;
s1
= ((sp
->v9_ccr
>> 3) & 1) ^ ((sp
->v9_ccr
>> 1) & 1);
s1
= (s1
<< 31) | (((uint32_t)Rsrc1
) >> 1);
sp
->v9_y
= (uint32_t)foo
;
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
sp
->v9_ccr
= (sp
->v9_ccr
& V9_xcc_mask
);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
foo
= (int64_t)(int32_t)Rsrc1
* (int64_t)(int32_t)Rsrc2
;
if (!Zero_Reg(Rdest_num
))
foo
= (uint64_t)(uint32_t)Rsrc1
* (uint64_t)(uint32_t)Rsrc2
;
if (!Zero_Reg(Rdest_num
))
foo
= (int64_t)(int32_t)Rsrc1
* (int64_t)(int32_t)Rsrc2
;
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num
))
foo
= (uint64_t)(uint32_t)Rsrc1
* (uint64_t)(uint32_t)Rsrc2
;
sp
->v9_ccr
= ((foo
& (1ull << 31)) ? (1 << 3) : 0) | /* icc.n */
(((uint32_t)foo
== 0) ? (1 << 2) : 0) | /* icc.z */
((foo
& (1ull << 63)) ? (1 << 7) : 0) | /* xcc.n */
((foo
== 0) ? (1 << 6) : 0); /* xcc.z */
if (!Zero_Reg(Rdest_num
))
/* ------------------------------------------------------------ */
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (SS_MAGIC_TRAP_CC(TRAP_cc
) && SS_MAGIC_TRAP(sp
, tn
)) {
v9p
->post_precise_trap(sp
, tn
+Sparcv9_trap_trap_instruction
);
sparcv9_trapcc( sp
, Rsrc1
+ Simm16
, TRAP_cc
, TRAP_cond
);
sparcv9_trapcc( sp
, Rsrc1
+ Rsrc2
, TRAP_cc
, TRAP_cond
);
/* ------------------------------------------------------------ */
if ( (sparcv9_cc_magic
[MOVCC_cond
] >> (ccr
& 0xf)) &1 ) {
if ( (sparcv9_cc_magic
[MOVCC_cond
] >> (ccr
& 0xf)) &1 ) {
ccr
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
ccr
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, MOVCC_cc
);
if ( (sparcv9_fcc_magic
[MOVCC_cond
] >> ccr
) & 1 ) {
ccr
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
ccr
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, MOVCC_cc
);
if ( (sparcv9_fcc_magic
[MOVCC_cond
] >> ccr
) & 1 ) {
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (!Zero_Reg(Rdest_num
)) {
val
= (int64_t)(int32_t)Simm16
;
for (cnt
= 0; val
!= 0; val
&= val
-1)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (!Zero_Reg(Rdest_num
)) {
for (cnt
= 0; val
!= 0; val
&= val
-1)
/* ------------------------------------------------------------ */
* Specialist instructions ...
* ... typically implementation dependent
* ... so use the provided callbacks to the actual device
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->read_state_reg( sp
, Rdest_num
, Rsrc1_num
);
IMPL( write_state_reg_imm
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->write_state_reg( sp
, Rdest_num
, Rsrc1
^ Simm16
);
IMPL( write_state_reg_rrr
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->write_state_reg( sp
, Rdest_num
, Rsrc1
^ Rsrc2
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->read_priv_reg( sp
, Rdest_num
, Rsrc1_num
);
IMPL( write_priv_reg_imm
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->write_priv_reg( sp
, Rdest_num
, Rsrc1
^ Simm16
);
IMPL( write_priv_reg_rrr
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->write_priv_reg( sp
, Rdest_num
, Rsrc1
^ Rsrc2
);
IMPL( read_hyper_priv_reg
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->read_hyp_priv_reg( sp
, Rdest_num
, Rsrc1_num
);
IMPL( write_hyper_priv_reg_imm
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->write_hyp_priv_reg( sp
, Rdest_num
, Rsrc1
^ Simm16
);
IMPL( write_hyper_priv_reg_rrr
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->write_hyp_priv_reg( sp
, Rdest_num
, Rsrc1
^ Rsrc2
);
/* Nothing to do in legion?? */
/* Nothing to do in legion?? */
sparcv9_save_instr(sp
, Rdest_num
, Rsrc1
+ Simm16
);
sparcv9_save_instr(sp
, Rdest_num
, Rsrc1
+ Rsrc2
);
sparcv9_restore_instr(sp
, Rdest_num
, Rsrc1
+ Simm16
);
sparcv9_restore_instr(sp
, Rdest_num
, Rsrc1
+ Rsrc2
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_opcode
);
v9p
->cansave
= INC_MOD(v9p
->cansave
, v9p
->nwins
);
v9p
->canrestore
= DEC_MOD(v9p
->canrestore
, v9p
->nwins
);
v9p
->otherwin
= DEC_MOD(v9p
->otherwin
, v9p
->nwins
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_opcode
);
v9p
->canrestore
= INC_MOD(v9p
->canrestore
, v9p
->nwins
);
v9p
->cansave
= DEC_MOD(v9p
->cansave
, v9p
->nwins
);
v9p
->otherwin
= DEC_MOD(v9p
->otherwin
, v9p
->nwins
);
if (v9p
->cleanwin
< (v9p
->nwins
-1))
v9p
->cleanwin
= v9p
->cleanwin
+ 1;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_opcode
);
v9p
->cleanwin
= v9p
->nwins
- 1;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_opcode
);
if (v9p
->otherwin
!= 0) {
EXEC_WARNING(("(@pc=0x%llx) "
"otherw executed with otherwin != 0",
v9p
->otherwin
= v9p
->canrestore
;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_opcode
);
if (v9p
->canrestore
!= 0) {
EXEC_WARNING(("(@pc=0x%llx) "
"normalw executed with canrestore != 0",
v9p
->canrestore
= v9p
->otherwin
;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_opcode
);
v9p
->cansave
= v9p
->nwins
- 2;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (v9p
->cansave
== (v9p
->nwins
-2)) {
if (v9p
->otherwin
!= 0) {
tt
= Sparcv9_trap_spill_0_other
| (v9p
->wstate_other
<<2);
tt
= Sparcv9_trap_spill_0_normal
| (v9p
->wstate_normal
<<2);
v9p
->post_precise_trap(sp
, tt
);
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!v9p
->pstate
.addr_mask
&& v9p
->check_vahole(sp
, Rsrc1
+ Simm16
)) return;
sparcv9_return_instr(sp
, Rsrc1
+ Simm16
);
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!v9p
->pstate
.addr_mask
&& v9p
->check_vahole(sp
, Rsrc1
+ Rsrc2
)) return;
sparcv9_return_instr(sp
, Rsrc1
+ Rsrc2
);
/* ------------------------------------------------------------ */
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->done_retry(sp
, (bool_t
)Misc32
); /* true if done instruction */
/* ------------------------------------------------------------ */
* Instruction cache flushing
* Since Legion does not have a pipeline to clear, there
* is nothing to do here for processors that do not
* Processors that do more are those that do not maintain
* instruction cache coherency in hardware. They will have
* processor specific versions of these implementations.
va
= (Rsrc1
+ Rsrc2
) & ~(tvaddr_t
)7;
va
= (Rsrc1
+ Simm16
) & ~(tvaddr_t
)7;
/* ------------------------------------------------------------ */
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->asi_access( sp
, ASI_op
, ASI_Rdest
, sp
->v9_asi
, Rsrc1
, Simm16
, USE_ASI_REG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->asi_access( sp
, ASI_op
, ASI_Rdest
, sp
->v9_asi
, Rsrc1
, Rsrc2
, USE_ASI_REG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->asi_access( sp
, ASI_op
, ASI_Rdest
, ASI_num
, Rsrc1
, Rsrc2
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->asi_access( sp
, ASI_op
, ASI_Rdest
, V9_ASI_IMPLICIT
, Rsrc1
, Rsrc2
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->asi_access( sp
, ASI_op
, ASI_Rdest
, V9_ASI_IMPLICIT
, Rsrc1
, Simm16
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, ASI_op
, ASI_Rdest
, sp
->v9_asi
, Rsrc1
, Simm16
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, ASI_op
, ASI_Rdest
, ASI_num
, Rsrc1
, Rsrc2
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_LdFSR
|MA_Size32
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Simm16
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_LdXFSR
|MA_Size64
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Simm16
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_StFSR
|MA_Size32
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Simm16
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_StXFSR
|MA_Size64
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Simm16
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_LdFSR
|MA_Size32
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Rsrc2
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_LdXFSR
|MA_Size64
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Rsrc2
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_StFSR
|MA_Size32
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Rsrc2
, NO_FLAG
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#ifndef FP_DECODE_DISABLED
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
v9p
->asi_access( sp
, MA_V9_StXFSR
|MA_Size64
, NULL
, V9_ASI_IMPLICIT
, Rsrc1
, Rsrc2
, NO_FLAG
);
/* ------------------------------------------------------------ */
/* Floating point branches. */
* Instruction: sparcv9_fbule_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbule_fcc0) /* { */
if (V9_FSR_FCC0(sp
->v9_fsr_ctrl
) != V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbg_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbg_fcc0) /* { */
if (V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fblg_fcc0
#if !defined(HAS_NATIVE_sparcv9_fblg_fcc0) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_l
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fble_fcc0
#if !defined(HAS_NATIVE_sparcv9_fble_fcc0) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_e
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbge_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbge_fcc0) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_g
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbne_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbne_fcc0) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbug_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbug_fcc0) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbul_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbul_fcc0) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbue_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbue_fcc0) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbe_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbe_fcc0) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbo_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbo_fcc0) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) != V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbu_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbu_fcc0) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbuge_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbuge_fcc0) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) != V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbvs_fcc0
#if !defined(HAS_NATIVE_sparcv9_fbvs_fcc0) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbule_fccN
#if !defined(HAS_NATIVE_sparcv9_fbule_fccN) /* { */
if (V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) != V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbg_fccN
#if !defined(HAS_NATIVE_sparcv9_fbg_fccN) /* { */
if (V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fblg_fccN
#if !defined(HAS_NATIVE_sparcv9_fblg_fccN) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_l
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fble_fccN
#if !defined(HAS_NATIVE_sparcv9_fble_fccN) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_e
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbge_fccN
#if !defined(HAS_NATIVE_sparcv9_fbge_fccN) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_g
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbne_fccN
#if !defined(HAS_NATIVE_sparcv9_fbne_fccN) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbug_fccN
#if !defined(HAS_NATIVE_sparcv9_fbug_fccN) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbul_fccN
#if !defined(HAS_NATIVE_sparcv9_fbul_fccN) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbue_fccN
#if !defined(HAS_NATIVE_sparcv9_fbue_fccN) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbe_fccN
#if !defined(HAS_NATIVE_sparcv9_fbe_fccN) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbo_fccN
#if !defined(HAS_NATIVE_sparcv9_fbo_fccN) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) != V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbu_fccN
#if !defined(HAS_NATIVE_sparcv9_fbu_fccN) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbuge_fccN
#if !defined(HAS_NATIVE_sparcv9_fbuge_fccN) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) != V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbl_fccN
#if !defined(HAS_NATIVE_sparcv9_fbl_fccN) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbule_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbule_fcc0_an) /* { */
if (V9_FSR_FCC0(sp
->v9_fsr_ctrl
) != V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbg_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbg_fcc0_an) /* { */
if (V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fblg_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fblg_fcc0_an) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_l
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fble_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fble_fcc0_an) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_e
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbge_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbge_fcc0_an) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_g
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbne_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbne_fcc0_an) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbug_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbug_fcc0_an) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbul_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbul_fcc0_an) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbue_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbue_fcc0_an) /* { */
int cc
= V9_FSR_FCC0(sp
->v9_fsr_ctrl
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbe_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbe_fcc0_an) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbo_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbo_fcc0_an) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) != V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbu_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbu_fcc0_an) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbuge_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbuge_fcc0_an) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) != V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbvs_fcc0_an
#if !defined(HAS_NATIVE_sparcv9_fbvs_fcc0_an) /* { */
if ( V9_FSR_FCC0(sp
->v9_fsr_ctrl
) == V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbule_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbule_fccN_an) /* { */
if (V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) != V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbg_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbg_fccN_an) /* { */
if (V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fblg_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fblg_fccN_an) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_l
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fble_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fble_fccN_an) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_e
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbge_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbge_fccN_an) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_g
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbne_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbne_fccN_an) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbug_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbug_fccN_an) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_g
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbul_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbul_fccN_an) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbue_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbue_fccN_an) /* { */
int cc
= V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
);
if ( cc
== V9_fcc_u
|| cc
== V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbe_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbe_fccN_an) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_e
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbo_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbo_fccN_an) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) != V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbu_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbu_fccN_an) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_u
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbuge_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbuge_fccN_an) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) != V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
* Instruction: sparcv9_fbl_fccN_an
#if !defined(HAS_NATIVE_sparcv9_fbl_fccN_an) /* { */
if ( V9_FSR_FCCN(sp
->v9_fsr_ctrl
, SBRfcc
) == V9_fcc_l
) {
tvaddr_t tpc
= Rpc
+ SBRoffset32
;
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
/* ------------------------------------------------------------ */
/* Miscellaneous stuff ... not real instructions, but executed that way */
IMPL(illegal_instruction
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
* Hack to limit spewing of warnings about invalid instructions. Each
* opcode (as determined by bits 24-19) has a limit of 40 (actually
* INV_INST_LIMIT) error reports. Feel free to replace this code with
#define INV_INST_LIMIT 40
#define OP_EXTR(inst) (((inst) >> 19) & 0x3f)
/* statics init to zero */
static int invalid_instruction_count
[INV_INST_SIZE
];
sparcv9_idis(ibuf
, IBUF
, FE_INSTN(xcip
->rawi
), sp
->pc
);
if (invalid_instruction_count
[OP_EXTR(FE_INSTN(xcip
->rawi
))]++ < INV_INST_LIMIT
) {
lprintf(sp
->gid
, "illegal instruction pc=0x%llx "
"instn=%08x: %s\n", sp
->pc
, FE_INSTN(xcip
->rawi
), ibuf
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
IMPL(fp_unimplemented_instruction
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
sparcv9_idis(ibuf
, IBUF
, FE_INSTN(xcip
->rawi
), sp
->pc
);
lprintf(sp
->gid
, "unimplemented fp op pc=0x%llx instn=%08x: %s\n", sp
->pc
, FE_INSTN(xcip
->rawi
), ibuf
);
sp
->v9_fsr_ctrl
&= ~V9_FSR_FTT_MASK
;
sp
->v9_fsr_ctrl
|= SPARCv9_FTT_unimplemented_FPop
<< V9_FSR_FTT_SHIFT
;
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_exception_other
);
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
IMPL(fp_invalidreg_instruction
)
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
sparcv9_idis(ibuf
, IBUF
, FE_INSTN(xcip
->rawi
), sp
->pc
);
lprintf(sp
->gid
, "invalid fp register pc=0x%llx instn=%08x: %s\n", sp
->pc
, FE_INSTN(xcip
->rawi
), ibuf
);
sp
->v9_fsr_ctrl
&= ~V9_FSR_FTT_MASK
;
sp
->v9_fsr_ctrl
|= SPARCv9_FTT_invalid_fp_register
<< V9_FSR_FTT_SHIFT
;
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_exception_other
);
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc0) /* { */
FPU_NOT_IMPLEMENTED("fcmps_fcc0")
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc1) /* { */
FPU_NOT_IMPLEMENTED("fcmps_fcc1")
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc2) /* { */
FPU_NOT_IMPLEMENTED("fcmps_fcc2")
#if !defined(HAS_NATIVE_sparcv9_fcmps_fcc3) /* { */
FPU_NOT_IMPLEMENTED("fcmps_fcc3")
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc0) /* { */
FPU_NOT_IMPLEMENTED("fcmpd_fcc0")
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc1) /* { */
FPU_NOT_IMPLEMENTED("fcmpd_fcc1")
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc2) /* { */
FPU_NOT_IMPLEMENTED("fcmpd_fcc2")
#if !defined(HAS_NATIVE_sparcv9_fcmpd_fcc3) /* { */
FPU_NOT_IMPLEMENTED("fcmpd_fcc3")
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc0) /* { */
FPU_NOT_IMPLEMENTED("fcmpes_fcc0")
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc1) /* { */
FPU_NOT_IMPLEMENTED("fcmpes_fcc1")
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc2) /* { */
FPU_NOT_IMPLEMENTED("fcmpes_fcc2")
#if !defined(HAS_NATIVE_sparcv9_fcmpes_fcc3) /* { */
FPU_NOT_IMPLEMENTED("fcmpes_fcc3")
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc0) /* { */
FPU_NOT_IMPLEMENTED("fcmped_fcc0")
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc1) /* { */
FPU_NOT_IMPLEMENTED("fcmped_fcc1")
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc2) /* { */
FPU_NOT_IMPLEMENTED("fcmped_fcc2")
#if !defined(HAS_NATIVE_sparcv9_fcmped_fcc3) /* { */
FPU_NOT_IMPLEMENTED("fcmped_fcc3")
* Tagged add and subtract.
#if !defined(HAS_NATIVE_sparcv9_tadd_co_imm) /* { */
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
#if !defined(HAS_NATIVE_sparcv9_tadd_co_rrr) /* { */
uint64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
#if !defined(HAS_NATIVE_sparcv9_tadd_co_tv_imm) /* { */
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_tag_overflow
);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
#if !defined(HAS_NATIVE_sparcv9_tadd_co_tv_rrr) /* { */
uint64_t s1
= Rsrc1
, s2
= Rsrc2
, d
;
v
= (s1
& s2
& ~d
) | (~s1
& ~s2
& d
);
c
= (s1
& s2
) | (~d
& (s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_tag_overflow
);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
#if !defined(HAS_NATIVE_sparcv9_tsub_co_imm) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
#if !defined(HAS_NATIVE_sparcv9_tsub_co_rrr) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
#if !defined(HAS_NATIVE_sparcv9_tsub_co_tv_imm) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_tag_overflow
);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
#if !defined(HAS_NATIVE_sparcv9_tsub_co_tv_rrr) /* { */
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
icc_v
= ((v
>> 31) & 1) | (((s1
| s2
) >> 1) & 1) | ((s1
| s2
) & 1);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_tag_overflow
);
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v(icc_v
);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
if (!Zero_Reg(Rdest_num
))
sp
->v9_gsr
= (sp
->v9_gsr
& ~(V9_GSR_IM_MASK
|V9_GSR_IRND_MASK
)) |
((mode
& 7) << V9_GSR_IRND_SHIFT
);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
|| V9_Priv
== v9p
->state
) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_software_initiated_reset
);
/* gsr.align = lower 3 bits */
sp
->v9_gsr
&= ~MASK64(2,0);
sp
->v9_gsr
|= (d
& MASK64(2,0));
d
&= ~MASK64(2,0); /* zero lower 3 bits */
if (!Zero_Reg(Rdest_num
)) {
/* gsr.align = two's complement of lower 3 bits */
sp
->v9_gsr
&= ~MASK64(2,0);
sp
->v9_gsr
|= (0x8 - (d
& MASK64(2,0)));
d
&= ~MASK64(2,0); /* zero lower 3 bits */
if (!Zero_Reg(Rdest_num
)) {
if (!Zero_Reg(Rdest_num
)) {
/* gsr.mask = lower 32 bits */
sp
->v9_gsr
&= ~MASK64(63,32);
mask
= (uint32_t) (sp
->v9_gsr
>> 32);
for (idx
= 0; idx
< 8; idx
++) {
byte
= (mask
>> (28 - (idx
*4)) & MASK64(3,0));
d
|= ((F64src1
& MASK64(63-(8*byte
),56-(8*byte
)))<<((byte
-idx
)*8));
d
|= ((F64src1
& MASK64(63-(8*byte
),56-(8*byte
)))>>((idx
-byte
)*8));
d
|= ((F64src2
& MASK64(63-(8*byte
),56-(8*byte
)))<<((byte
-idx
)*8));
d
|= ((F64src2
& MASK64(63-(8*byte
),56-(8*byte
)))>>((idx
-byte
)*8));
/* align data based on GSR.align field */
d
= (F64src1
<< ((sp
->v9_gsr
& MASK64(2,0)) * 8));
if ((sp
->v9_gsr
& MASK64(2,0)) != 0) { /* prevent Rsrc2 >> 64 */
d
|= (F64src2
>> ((8 - (sp
->v9_gsr
& MASK64(2,0))) * 8));
gsr_scale
= (sp
->v9_gsr
>> 3) & 0x1f;
i
= (s2
>> 32) & 0xffffffff;
i
= (s2
>> 0) & 0xffffffff;
d
|= s1
& 0xffffff00ffffff00ull
;
/* fpack16 ignores gsr.scale[4] */
gsr_scale
= (sp
->v9_gsr
>> 3) & 0x0f;
gsr_scale
= (sp
->v9_gsr
>> 3) & 0x1f;
i
= (s2
>> 32) & 0xffffffffull
;
i
= (s2
>> 0) & 0xffffffffull
;
for (idx
= 0; idx
< 8; idx
++) {
if ((s1
& 0xff) > (s2
& 0xff))
d
+= (s1
& 0xff) - (s2
& 0xff);
d
+= (s2
& 0xff) - (s1
& 0xff);
for (idx
= 0; idx
< 8; idx
++) {
if ((s1
& 0xff) > (s2
& 0xff))
d
+= (s1
& 0xff) - (s2
& 0xff);
d
+= (s2
& 0xff) - (s1
& 0xff);
if (!Zero_Reg(Rdest_num
))
F64dest
= ((s1
& 0xff000000) << 32) |
((s1
& 0xff0000) << 24) |
((s2
& 0xff000000) << 24) |
((s2
& 0xff0000) << 16) |
F64dest
= ((s2
& 0xff000000) << 28) |
((s2
& 0xff0000) << 20) |
d
|= ((s1
>> 33) & 3) << 2;
d
|= ((s1
>> 55) & 1) << 4;
d
|= ((s1
>> 13) & 0xf) << 5;
d
|= ((s1
>> 35) & 0xf) << 9;
d
|= ((s1
>> 56) & 0xf) << 13;
d
|= ((s1
>> 17) & ((1<<n
)-1)) << 17;
d
|= ((s1
>> 39) & ((1<<n
)-1)) << (17+n
);
d
|= ((s1
>> 60) & 0xf) << (17+(2*n
));
if (!Zero_Reg(Rdest_num
))
d
|= ((s1
>> 33) & 3) << 2;
d
|= ((s1
>> 55) & 1) << 4;
d
|= ((s1
>> 13) & 0xf) << 5;
d
|= ((s1
>> 35) & 0xf) << 9;
d
|= ((s1
>> 56) & 0xf) << 13;
d
|= ((s1
>> 17) & ((1<<n
)-1)) << 17;
d
|= ((s1
>> 39) & ((1<<n
)-1)) << (17+n
);
d
|= ((s1
>> 60) & 0xf) << (17+(2*n
));
if (!Zero_Reg(Rdest_num
))
d
|= ((s1
>> 33) & 3) << 2;
d
|= ((s1
>> 55) & 1) << 4;
d
|= ((s1
>> 13) & 0xf) << 5;
d
|= ((s1
>> 35) & 0xf) << 9;
d
|= ((s1
>> 56) & 0xf) << 13;
d
|= ((s1
>> 17) & ((1<<n
)-1)) << 17;
d
|= ((s1
>> 39) & ((1<<n
)-1)) << (17+n
);
d
|= ((s1
>> 60) & 0xf) << (17+(2*n
));
if (!Zero_Reg(Rdest_num
))
* sim_edge: simulates all 12 EDGE opcodes:
edge8cc,edge8lcc,edge16cc,edge16lcc,edge32cc,edge32lcc (VIS1)
edge8n, edge8ln, edge16n, edge16ln, edge32n, edge32ln (VIS2)
No code is provided here for setting the condition code for
the cc-setting variants (opcode bit5=0). It must be computed
externally to this function by computing (op1 - op2) and
setting the condition code bits as for the SUBCC instruction.
static uint64_t /* returns edge result */
uint32_t instr
, /* instruction opcode */
uint64_t op1
, /* operand1 */
uint64_t op2
, /* operand2 */
bool_t am_flag
/* pstate.AM */
uint64_t res_mask
; /* result mask: 8/4/2 bits */
uint64_t ledge
; /* left edge */
uint64_t redge
; /* right edge */
/* convert opcode bits 8:7 (8/16/32) to shift counts */
uint64_t sh_edg
= (instr
>> 7) & 3; /* edge shift (0/1/2) */
uint64_t sh_res
= 8 >> sh_edg
; /* result shift (8/4/2) */
uint64_t lsize
= (op1
& 7) >> sh_edg
; /* left edge size */
uint64_t rsize
= (op2
& 7) >> sh_edg
; /* right edge size */
/* compare address bits for equality */
uint64_t adr_diff
= op1
^ op2
;
/* in 32 bit mode ignore miscompares in bits 63:32 */
if(am_flag
) /* test AM bit */
adr_diff
= (uint32_t)adr_diff
;
adr_diff
>>= 3; /* address-equality flag for later */
/* BIG or LITTLE endian instruction variant? (opcode bit 6) */
if(instr
& 0x40) /* 1=little-endian */
/* 0xFF = starting left mask (byte0) */
/* 0x1FE = starting right mask (byte1) and extra ones */
res_mask
= (0xFFu
<< sh_res
) >> 8;
ledge
= (0xFFu
<< lsize
) & res_mask
;
redge
= ((0x1FEu
<< rsize
) >> 8) & res_mask
;
/* 0xFF = starting left mask (byte0) */
/* 0x7F80 = starting right mask (byte0) and extra ones */
res_mask
= (0xFF00u
>> sh_res
) & 0xFF;
ledge
= ((0xFFu
>> lsize
) & res_mask
) >> rjust
;
redge
= ((0x7F80u
>> rsize
) & res_mask
) >> rjust
;
/* Don't forget to calculate the CC from op1-op2 */
sim_edge_cc(simcpu_t
*sp
, uint64_t op1
, uint64_t op2
)
v
= (s1
& ~s2
& ~d
) | (~s1
& s2
& d
);
c
= (~s1
& s2
) | (d
& (~s1
| s2
));
sp
->v9_ccr
= V9_xcc_v((v
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_v((v
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_c((c
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_c((c
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_n((d
>> 63) & 1);
sp
->v9_ccr
|= V9_icc_n((d
>> 31) & 1);
sp
->v9_ccr
|= V9_xcc_z(d
? 0 : 1);
sp
->v9_ccr
|= V9_icc_z((d
& MASK64(31,0)) ? 0 : 1);
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
sim_edge_cc(sp
, Rsrc1
, Rsrc2
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
sim_edge_cc(sp
, Rsrc1
, Rsrc2
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
sim_edge_cc(sp
, Rsrc1
, Rsrc2
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
sim_edge_cc(sp
, Rsrc1
, Rsrc2
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
sim_edge_cc(sp
, Rsrc1
, Rsrc2
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
sim_edge_cc(sp
, Rsrc1
, Rsrc2
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
sparcv9_cpu_t
* v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (!Zero_Reg(Rdest_num
))
Rdest
= sim_edge(xcip
->rawi
, Rsrc1
, Rsrc2
,
#define FCMPx16(_cond_) \
uint64_t fs1, fs2, res; \
for (i = 0; i < 4; i++) { \
if (Zero_Reg(Rdest_num)) \
#define FCMPx32(_cond_) \
uint64_t fs1, fs2, res; \
for (i = 0; i < 2; i++) { \
if (Zero_Reg(Rdest_num)) \
for (i
= 0; i
< 4; i
++) {
res
|= (uint64_t)d
<< 48;
for (i
= 0; i
< 4; i
++) {
res
|= (uint64_t)d
<< 48;
s2
= (int16_t) (fs2
>> 16);
for (i
= 0; i
< 4; i
++) {
res
|= (uint64_t)d
<< 48;
for (i
= 0; i
< 4; i
++) {
res
|= (uint64_t)d
<< 48;
for (i
= 0; i
< 4; i
++) {
res
|= (uint64_t)d
<< 48;
for (i
= 0; i
< 2; i
++) {
res
|= (uint64_t)d
<< 32;
for (i
= 0; i
< 2; i
++) {
res
|= (uint64_t)d
<< 32;
/*************************************************************/
* Floating point status register(s) ...
* sp->v9_fsr_ctrl holds all the FP control bits (only)
* tem, cexc and aexe fields are zeroed.
* This value is used for setup before execution .. and holds
* the current control bits and the current condition codes.
* The error and trap enable bits should always be zeroed.
* sp->v9_fsr_exc holds the exception bits (current and accumulated).
* in the same bits positions as they occur in the fsr.
* sp->v9_fsr_tem holds the trap enable bits NOTE: these bits are shifted
* down to start at bit0 in this register - this is to make
* masking with fsr_exc one instruction faster in the
* fpop execution common case.
void v9_set_fsr_lower(simcpu_t
* sp
, uint64_t val
)
v9_set_fsr(sp
, (val
& MASK64(31,0)) | (sp
->v9_fsr_ctrl
& ~MASK64(31,0)));
void v9_set_fsr(simcpu_t
* sp
, uint64_t val
)
DBGFSR( oldval
= v9_get_fsr(sp
); );
val
&= V9_FSR_REG_MASK
| V9_FSR_NS_MASK
;
val
|= sp
->v9_fsr_ctrl
& V9_FSR_FTT_MASK
;
sp
->v9_fsr_ctrl
= val
& ~(V9_FSR_TEM_MASK
| V9_FSR_AEXC_MASK
| V9_FSR_CEXC_MASK
);
sp
->v9_fsr_tem
= (val
& V9_FSR_TEM_MASK
)>>V9_FSR_TEM_BIT
;
sp
->v9_fsr_exc
= val
& (V9_FSR_AEXC_MASK
| V9_FSR_CEXC_MASK
);
DBGFSR( lprintf(sp
->gid
, "v9_set_fsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp
->pc
, v9_get_fsr(sp
), oldval
); );
uint64_t v9_get_fsr(simcpu_t
* sp
)
return sp
->v9_fsr_ctrl
| (sp
->v9_fsr_tem
<<V9_FSR_TEM_BIT
) | sp
->v9_fsr_exc
;
void v9_set_fsr_fp_op (simcpu_t
* sp
, uint64_t val
)
DBGFSR( oldval
= v9_get_fsr(sp
); );
sp
->v9_fsr_ctrl
&= ~(V9_FSR_FTT_MASK
);
sp
->v9_fsr_ctrl
|= (val
& V9_FSR_FTT_MASK
);
sp
->v9_fsr_exc
= val
& (V9_FSR_AEXC_MASK
| V9_FSR_CEXC_MASK
);
DBGFSR( lprintf(sp
->gid
, "v9_set_fsr_fp_op: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp
->pc
, v9_get_fsr(sp
), oldval
); );
/*-------------------------- OLD CODE ------------------------*/
-- /* We still get crappy code from these from the Sun*/
-- /* compiler - eventually well re-write these entirely*/
-- /* as host native routines in assembler ...*/
-- /* BTW: cico = condition codes in+out ... not just*/
-- /* carry in+carry out ... note addcc sets *all* cond codes*/
-- Rdest
= Rsrc1
+ Simm16
+ (Rccr
& 1LL);
-- Rdest
= il_add_co( Rsrc1
, Simm16
, &Rccr
);
-- Rdest
= il_add_cico( Rsrc1
, Simm16
, &Rccr
);
-- IMPL( add_co_imm_rd0
)
-- (void)il_add_co( Rsrc1
, Simm16
, &Rccr
);
-- IMPL( add_cico_imm_rd0
)
-- (void)il_add_cico( Rsrc1
, Simm16
, &Rccr
);
-- Rdest
= Rsrc1
+ Rsrc2
+ (Rccr
& 1LL);
-- Rdest
= il_add_co( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= il_add_cico( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( add_co_rrr_rd0
)
-- (void)il_add_co( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( add_cico_rrr_rd0
)
-- (void)il_add_cico( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= Rsrc1
- Simm16
- (Rccr
& 1LL);
-- Rdest
= il_sub_co( Rsrc1
, Simm16
, &Rccr
);
-- IMPL( sub_co_imm_rd0
)
-- (void)il_sub_co( Rsrc1
, Simm16
, &Rccr
);
-- Rdest
= il_sub_cico( Rsrc1
, Simm16
, &Rccr
);
-- IMPL( sub_cico_imm_rd0
)
-- (void)il_sub_cico( Rsrc1
, Simm16
, &Rccr
);
-- Rdest
= Rsrc1
- Rsrc2
- (Rccr
& 1LL);
-- Rdest
= il_sub_co( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( sub_co_rrr_rd0
)
-- (void)il_sub_co( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= il_sub_cico( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( sub_cico_rrr_rd0
)
-- (void)il_sub_cico( Rsrc1
, Rsrc2
, &Rccr
);
-- /* Logic instructions*/
-- Rdest
= Rsrc1
& Simm16
;
-- Rdest
= il_and_cc( Rsrc1
, Simm16
, &Rccr
);
-- IMPL( and_cc_imm_rd0
)
-- (void)il_and_cc( Rsrc1
, Simm16
, &Rccr
);
-- Rdest
= Rsrc1
& Rsrc2
;
-- Rdest
= il_and_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( and_cc_rrr_rd0
)
-- (void)il_and_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= Rsrc1
& ~Rsrc2
;
-- Rdest
= il_andn_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( andn_cc_rrr_rd0
)
-- (void)il_andn_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= Rsrc1
| Simm16
;
-- Rdest
= il_or_cc( Rsrc1
, Simm16
, &Rccr
);
-- (void)il_or_cc( Rsrc1
, Simm16
, &Rccr
);
-- Rdest
= Rsrc1
| Rsrc2
;
-- Rdest
= il_or_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- (void)il_or_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= Rsrc1
| ~Rsrc2
;
-- Rdest
= il_orn_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( orn_cc_rrr_rd0
)
-- (void)il_orn_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= Rsrc1
^ Simm16
;
-- Rdest
= il_xor_cc( Rsrc1
, Simm16
, &Rccr
);
-- IMPL( xor_cc_imm_rd0
)
-- (void)il_xor_cc( Rsrc1
, Simm16
, &Rccr
);
-- Rdest
= Rsrc1
^ Rsrc2
;
-- Rdest
= il_xor_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( xor_cc_rrr_rd0
)
-- (void)il_xor_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- Rdest
= ~(Rsrc1
^ Rsrc2
);
-- Rdest
= il_xnor_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- IMPL( xnor_cc_rrr_rd0
)
-- (void)il_xnor_cc( Rsrc1
, Rsrc2
, &Rccr
);
-- /* branch with no annulled delay slot */
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(Rccr
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- /* branch with annulled delay slot */
-- IMPL( brcond_ds_annul
)
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(Rccr
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- /* branch with no annulled delay slot */
-- func
= ((func
>>1) ^ (func
>>3))|(func
>>2); /* get Z|(N^V) in bits 0,4 */
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(func
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- /* branch with annulled delay slot */
-- IMPL( br_g_le_ds_annul
)
-- func
= ((func
>>1) ^ (func
>>3))|(func
>>2); /* get Z|(N^V) in bits 0,4 */
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(func
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- /* branch with no annulled delay slot */
-- func
= (func
>>1) ^ (func
>>3); /* get (N^V) in bits 0,4 */
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(func
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- /* branch with annulled delay slot */
-- IMPL( br_ge_l_ds_annul
)
-- func
= (func
>>1) ^ (func
>>3); /* get (N^V) in bits 0,4 */
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(func
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- /* branch with no annulled delay slot */
-- func
= (func
>>2) | func
; /* get (Z|C) in bits 0,4 */
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(func
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- /* branch with annulled delay slot */
-- IMPL( br_gu_leu_ds_annul
)
-- func
= (func
>>2) | func
; /* get (Z|C) in bits 0,4 */
-- cpup
->cti_executed
= TRUE
;
-- if (COMPUTE_TAKEN(func
)) {
-- xpc
= Rpc
+ SBRoffset32
;
-- cpup
->cti_executed
= TRUE
;
-- xpc
= Rpc
+ SBRoffset32
;
-- /* FIXME: prob not worth burning an instn on its own
-- * combine with bralways_ds above, and carry annul
-- * bit in the decoded form
-- IMPL( bralways_ds_annul
)
-- cpup
->cti_executed
= TRUE
;
-- xpc
= Rpc
+ SBRoffset32
;
-- IMPL( brnever_ds_annul
)
-- cpup
->cti_executed
= TRUE
;