* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: sparcv9core.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)sparcv9core.c 1.50 07/10/12 SMI"
* This module contains the core execution routines for a SPARC v9
* These augment the generic instructions implemented in the
* Moreover this module implements all the generic SPARC v9 operations
* from register window manipulations to ASI operations, as well
* as managing the processor traps, trap tables and execution state.
#include "tsparcv9internal.h"
#include "sparcv9decode.h"
#define ss_get_fsr(_sp) (_sp->v9_fsr_ctrl | (_sp->v9_fsr_tem<<V9_FSR_TEM_BIT) \
* Initialisation support functions
sparcv9_cpu_alloc(domain_t
*domainp
, config_proc_t
*config_procp
,
uint_t nwins
, uint_t nglobals
, uint_t maxtl
, uint64_t ver
,
bool_t has_fpu
, void *magicptr
)
v9p
= Xcalloc(1, sparcv9_cpu_t
);
* nwins_mask needs to be a mask of the number of
* bits needed to store nwins. We use this mask
* when we write a new value of nwins to ensure
* that only th bits implemented by nwins in the
v9p
->nwins_mask
= (i
- 1);
v9p
->nglobals
= nglobals
;
v9p
->globalsp
= Xcalloc(8 * nglobals
, uint64_t);
v9p
->winsp
= Xcalloc(16 * nwins
, uint64_t);
ASSERT(maxtl
< SPARCv9_TLSPACE
);
v9p
->fpu_on
= false; /* make all the FPU info be consistent */
v9p
->pstate
.fpu_enabled
= v9p
->has_fpu
;
v9p
->had_RED_trap
= false;
sp
= sim_cpu_alloc(config_procp
, (void *)v9p
);
* Initialize stuff the simcpu_t
/* setup the call backs for simcpu_t */
/* CPU specific fields - so force an error if not fixed */
SANITY( sp
->xic_miss
= NULL
; );
SANITY( sp
->xicachep
= NULL
; );
SANITY( sp
->xdc
.miss
= NULL
; );
sp
->decodemep
= sparcv9_decode_me
;
sp
->v9_ccr
= 0; /* sparc v9 condition codes stored in simcpu_t */
v9p
->state
= V9_UnInitialised
; /* Need a trap to get rid of this */
* Performance measurement functions
sparcv9_perf_dump(void *ptr
)
#if PERFORMANCE_CHECK /* { */
sparcv9_cpu_t
*v9p
= ptr
;
uint_t cid
= v9p
->simp
->gid
;
uint64_t rtotal
, utotal
, ptotal
, htotal
;
uint64_t xic_hits
, xic_misses
, xdc_hits
, xdc_misses
;
uint64_t xic_flushes
, xdc_flushes
;
scale
= 100.0 / (double)icount
;
PERF_ACCUMULATE_ICOUNT(v9p
);
rtotal
= v9p
->perf
.icount
[V9_RED
];
utotal
= v9p
->perf
.icount
[V9_User
];
ptotal
= v9p
->perf
.icount
[V9_Priv
];
htotal
= v9p
->perf
.icount
[V9_HyperPriv
];
xdc_hits
= sp
->xdc_hits
- sp
->prev_xdc_hits
;
xdc_misses
= sp
->xdc_misses
- sp
->prev_xdc_misses
;
xdc_flushes
= sp
->xdc_flushes
- sp
->prev_xdc_flushes
;
sp
->xic_hits
= icount
- sp
->xic_misses
;
xic_hits
= sp
->xic_hits
- sp
->prev_xic_hits
;
xic_misses
= sp
->xic_misses
- sp
->prev_xic_misses
;
xic_flushes
= sp
->xic_flushes
- sp
->prev_xic_flushes
;
log_printf(cid
, "Instn cnts: R=%llu (%.2llf%%), H=%llu (%.2llf%%), "
"P=%llu (%.2llf%%), U=%llu (%.2llf%%), Total=%llu\n",
rtotal
, scale
* (double)rtotal
,
htotal
, scale
* (double)htotal
,
ptotal
, scale
* (double)ptotal
,
utotal
, scale
* (double)utotal
,
rtotal
+ utotal
+ ptotal
+ htotal
);
ASSERT((rtotal
+ utotal
+ ptotal
+ htotal
) == ICOUNT(sp
));
log_printf(cid
, "xdcache: hits=%llu (%.2llf%%), "
"misses=%llu (%.2llf%%), "
"avg_hits=(%.2llf%%) flushes=%llu\n",
100.0 * xdc_hits
/ (double)(xdc_hits
+ xdc_misses
),
100.0 * xdc_misses
/ (double)(xdc_hits
+ xdc_misses
),
100.0 * sp
->xdc_hits
/ (double)(sp
->xdc_hits
+ sp
->xdc_misses
),
log_printf(cid
, "xicache: hits=%llu (%.2llf%%), "
"misses=%llu (%.2llf%%), "
"avg hits=(%.2llf%%) flushes=%llu\n",
100.0 * xic_hits
/ (double)(xic_hits
+ xic_misses
),
100.0 * xic_misses
/ (double)(xic_hits
+ xic_misses
),
100.0 * sp
->xic_hits
/ (double)(sp
->xic_hits
+ sp
->xic_misses
),
sp
->prev_xdc_hits
= sp
->xdc_hits
;
sp
->prev_xdc_misses
= sp
->xdc_misses
;
sp
->prev_xdc_flushes
= sp
->xdc_flushes
;
sp
->prev_xic_hits
= sp
->xic_hits
;
sp
->prev_xic_misses
= sp
->xic_misses
;
sp
->prev_xic_flushes
= sp
->xic_flushes
;
log_printf(cid
, "Instn count delta : %llu pc : 0x%llx\n",
(uint64_t)icount
- sp
->prev_icount
, sp
->pc
);
sp
->prev_icount
= (uint64_t)icount
;
#endif /* PERFORMANCE_CHECK } */
* Execution support functions (and instruction impls)
* Debugger interface support functions
sparcv9_regread(sparcv9_cpu_t
*v9p
, uint_t regnum
, uint64_t *valp
)
sparcv9_reg_t regn
= regnum
;
* Assume that everything we care about has
* been written back to the architectural
assert(v9p
->active_window
== -1);
assert(v9p
->active_global
== -1);
if (regn
>= Reg_sparcv9_g0
&& regn
<= Reg_sparcv9_g7
) {
assert(v9p
->gl
< v9p
->nglobals
);
idx
= 8*v9p
->gl
+ (regn
-Reg_sparcv9_g0
);
*valp
= v9p
->globalsp
[idx
];
if (regn
>= Reg_sparcv9_r8
&& regn
<= Reg_sparcv9_r23
) {
assert(v9p
->cwp
< v9p
->nwins
);
idx
= (v9p
->nwins
- 1 - v9p
->cwp
) * 2 * V9_REG_GROUP
+
if (regn
>= Reg_sparcv9_r24
&& regn
<= Reg_sparcv9_r31
) {
assert(v9p
->cwp
< v9p
->nwins
);
idx
= (v9p
->cwp
== 0) ? 0 :
(v9p
->nwins
- 1 - v9p
->cwp
) * 2 * V9_REG_GROUP
;
idx
+= (regn
-Reg_sparcv9_r24
);
case Reg_sparcv9_pc
: val
= v9p
->simp
->pc
; break;
case Reg_sparcv9_npc
: val
= v9p
->simp
->npc
; break;
case Reg_sparcv9_ccr
: val
= sp
->v9_ccr
; break;
case Reg_sparcv9_y
: val
= sp
->v9_y
; break;
case Reg_sparcv9_asi
: val
= sp
->v9_asi
; break;
case Reg_sparcv9_pil
: val
= v9p
->pil
; break;
val
= v9p
->pstate
.priv
? (1 << V9_PSTATE_PRIV_BIT
) : 0;
val
|= v9p
->pstate
.mm
<< V9_PSTATE_MM_BITS
;
val
|= v9p
->pstate
.int_enabled
?
(1 << V9_PSTATE_IE_BIT
) : 0;
val
|= v9p
->pstate
.fpu_enabled
?
(1 << V9_PSTATE_PEF_BIT
) : 0;
val
|= v9p
->pstate
.tle
? (1 << V9_PSTATE_TLE_BIT
) : 0;
val
|= v9p
->pstate
.cle
? (1 << V9_PSTATE_CLE_BIT
) : 0;
val
|= v9p
->pstate
.tct
? (1 << V9_PSTATE_TCT_BIT
) : 0;
val
|= v9p
->pstate
.addr_mask
?
(1<< V9_PSTATE_AM_BIT
) : 0;
val
= N_TSTATE(v9p
, v9p
->tl
);
case Reg_sparcv9_tba
: val
= v9p
->tba
; break;
case Reg_sparcv9_tl
: val
= v9p
->tl
; break;
val
= N_TT(v9p
, v9p
->tl
);
val
= N_TPC(v9p
, v9p
->tl
);
val
= N_TNPC(v9p
, v9p
->tl
);
val
= v9p
->wstate_normal
<< V9_WSTATE_NORMAL_BITS
;
val
|= v9p
->wstate_other
<< V9_WSTATE_OTHER_BITS
;
case Reg_sparcv9_cwp
: val
= v9p
->cwp
; break;
case Reg_sparcv9_cansave
: val
= v9p
->cansave
; break;
case Reg_sparcv9_canrestore
: val
= v9p
->canrestore
; break;
case Reg_sparcv9_cleanwin
: val
= v9p
->cleanwin
; break;
case Reg_sparcv9_otherwin
: val
= v9p
->otherwin
; break;
return (false); /* unknown / unsupported regnum */
val
= 0x0badcafedeadbeef;
/* returns false on failure */
sparcv9_regwrite(sparcv9_cpu_t
*v9p
, uint_t regnum
, uint64_t val
)
sparcv9_reg_t regn
= regnum
;
if (regn
>= Reg_sparcv9_g0
&& regn
<= Reg_sparcv9_g7
) {
assert(v9p
->gl
< v9p
->nglobals
);
idx
= 8*v9p
->gl
+ (regn
-Reg_sparcv9_g0
);
v9p
->globalsp
[idx
] = val
;
if (regn
>= Reg_sparcv9_r8
&& regn
<= Reg_sparcv9_r23
) {
assert(v9p
->cwp
< v9p
->nwins
);
idx
= (v9p
->nwins
- 1 - v9p
->cwp
) * 2 * V9_REG_GROUP
+
if (regn
>= Reg_sparcv9_r24
&& regn
<= Reg_sparcv9_r31
) {
assert(v9p
->cwp
< v9p
->nwins
);
0 : (v9p
->nwins
- 1 - v9p
->cwp
) * 2 * V9_REG_GROUP
;
idx
+= (regn
-Reg_sparcv9_r24
);
v9p
->cwp
= val
>= v9p
->nwins
? v9p
->nwins
- 1 : val
;
return (false); /* unknown / unsupported regnum */
* FIXME: Kludge - for the moment - only one set of breakpoints !
bp_info_t
*globalbpinfop
;
sparcv9_set_break(sparcv9_cpu_t
*v9p
, tvaddr_t bpaddr
)
if (globalbpinfop
== NULL
) globalbpinfop
= breakpoint_init();
breakpoint_insert(globalbpinfop
, bpaddr
,
DEFAULT_BP_CONTEXT
/* FIXME */);
if (sp
->bp_infop
== NULL
) sp
->bp_infop
= globalbpinfop
;
sparcv9_set_break_next(sparcv9_cpu_t
*v9p
)
if (globalbpinfop
== NULL
) globalbpinfop
= breakpoint_init();
breakpoint_insert_next(globalbpinfop
);
if (sp
->bp_infop
== NULL
) sp
->bp_infop
= globalbpinfop
;
sparcv9_clear_break_next(sparcv9_cpu_t
*v9p
)
if (globalbpinfop
== NULL
) globalbpinfop
= breakpoint_init();
breakpoint_clear_next(globalbpinfop
);
if (sp
->bp_infop
== NULL
) sp
->bp_infop
= globalbpinfop
;
sparcv9_hit_break(sparcv9_cpu_t
*v9p
, tvaddr_t bpaddr
)
if (sp
->bp_infop
== (bp_info_t
*)0)
return breakpoint_find_by_addr(sp
->bp_infop
, bpaddr
,
DEFAULT_BP_CONTEXT
) != NULL
;
sparcv9_clear_break(sparcv9_cpu_t
*v9p
, tvaddr_t bpaddr
)
if (sp
->bp_infop
== (bp_info_t
*)0)
breakpoint_delete_by_addr(sp
->bp_infop
, bpaddr
,
DEFAULT_BP_CONTEXT
/* FIXME */);
if (sp
->bp_infop
->active_count
== 0) sp
->bp_infop
= (bp_info_t
*)0;
sparcv9_print_break(sparcv9_cpu_t
*v9p
)
if (globalbpinfop
== NULL
)
globalbpinfop
= breakpoint_init();
breakpoint_print(globalbpinfop
);
if (sp
->bp_infop
== NULL
)
sp
->bp_infop
= globalbpinfop
;
* cache breakpoints in a text file
sparcv9_dump_break(sparcv9_cpu_t
*v9p
, FILE *fp
)
if (globalbpinfop
== NULL
) {
globalbpinfop
= breakpoint_init();
breakpoint_dump(globalbpinfop
, fp
);
if (sp
->bp_infop
== NULL
) {
sp
->bp_infop
= globalbpinfop
;
* restore breakpoints cached in a text file
sparcv9_restore_break(FILE *fp
)
if (globalbpinfop
== NULL
) {
globalbpinfop
= breakpoint_init();
breakpoint_restore(globalbpinfop
, fp
);
* SPARC register windows could have been implemented better.
* So here's how they are implemented in Legion;
* For each window shuffle - we copy the old window frame out
* and copy the new one in to the working register file from
* the architectural (sparc) one. We could optmise the
* common case of incrementing or decrementing the ccurrent
* window pointer (cwp), but this version deals with all cases.
* The v9p->active_window tells us which frame is supposed to
* be in the simcpu_t register file, and new_window the
* If new_window == -1 we just want to write back into the
* architectural file. If v9p->active_window == -1 we need
* to retrieve from the architectural file.
* If cwp is incremented, the old outs become the new ins
* (yes I know it's backwards )
* Anyway so that the rotation works correctly ( and without
* decoding %g0 as sim register 31), we work our way down the
* architectural register array as we increment cwp.
* So what was register offset X becomes register offset X+16
* The only special case is (by our choice) window 0, which
* sits at (nwins-1)*16 as a base, but the ins (regs 24-31)
* are placed at the bottom of the architectural array
sparcv9_active_window(simcpu_t
*sp
, uint_t new_window
)
uint64_t *sim_regp
, *arch_regp
;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (v9p
->active_window
== -1) goto load_window
;
/* OK stash back the old window's contents */
arch_regp
= &(v9p
->winsp
[(v9p
->nwins
-1 - v9p
->active_window
) *
sim_regp
= &sp
->intreg
[V9_OUT_OFFSET
];
if (v9p
->active_window
== 0) {
for (i
= V9_REG_GROUP
* 2; i
> 0; i
--) {
*arch_regp
++ = *sim_regp
++;
arch_regp
= &v9p
->winsp
[0];
sim_regp
= &sp
->intreg
[V9_IN_OFFSET
];
for (i
= V9_REG_GROUP
; i
> 0; i
--) {
*arch_regp
++ = *sim_regp
++;
for (i
= V9_REG_GROUP
* 3; i
> 0; i
--) {
*arch_regp
++ = *sim_regp
++;
v9p
->active_window
= -1; /* tag our cached window state as invalid */
arch_regp
= &(v9p
->winsp
[(v9p
->nwins
-1 - new_window
)*2*V9_REG_GROUP
]);
sim_regp
= &sp
->intreg
[V9_OUT_OFFSET
];
for (i
= V9_REG_GROUP
* 2; i
> 0; i
--) {
*sim_regp
++ = *arch_regp
++;
arch_regp
= &v9p
->winsp
[0];
sim_regp
= &sp
->intreg
[V9_IN_OFFSET
];
for (i
= V9_REG_GROUP
; i
> 0; i
--) {
*sim_regp
++ = *arch_regp
++;
for (i
= V9_REG_GROUP
* 3; i
> 0; i
--) {
*sim_regp
++ = *arch_regp
++;
v9p
->active_window
= new_window
;
* Basically same story - except that the globals are easier
* to handle as there is no overlap.
sparcv9_active_globals(simcpu_t
*sp
, uint_t new_global
)
uint64_t *sim_regp
, *arch_regp
;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (v9p
->active_global
== -1) goto load_global
;
/* OK stash back the old globals' contents */
arch_regp
= &(v9p
->globalsp
[(v9p
->active_global
)*V9_GLOBAL_GROUP
]);
sim_regp
= &sp
->intreg
[V9_GLOBAL_OFFSET
];
for (i
= V9_GLOBAL_GROUP
; i
> 0; i
--) {
*arch_regp
++ = *sim_regp
++;
v9p
->active_global
= -1; /* tag our cached global state as invalid */
arch_regp
= &(v9p
->globalsp
[new_global
*V9_GLOBAL_GROUP
]);
sim_regp
= &sp
->intreg
[V9_GLOBAL_OFFSET
];
for (i
= V9_GLOBAL_GROUP
; i
> 0; i
--) {
*sim_regp
++ = *arch_regp
++;
v9p
->active_global
= new_global
;
sparcv9_save_instr(simcpu_t
*sp
, uint_t rdest_num
, tvaddr_t newval
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#if HYPERPRIVILEGED_USE_WARN /* { */
if (V9_RED
== v9p
->state
|| V9_HyperPriv
== v9p
->state
)
EXEC_WARNING(("save instruction in hyperprivileged mode "
"(%%pc=0x%llx)", sp
->pc
));
#endif /* HYPERPRIVILEGED_USE_WARN */ /* { */
/* Possible spill trap ? */
if (v9p
->otherwin
!= 0) {
tt
= Sparcv9_trap_spill_0_other
|
(v9p
->wstate_other
<< 2);
tt
= Sparcv9_trap_spill_0_normal
|
(v9p
->wstate_normal
<< 2);
v9p
->post_precise_trap(sp
, tt
);
if ((v9p
->cleanwin
- v9p
->canrestore
) == 0) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_clean_window
);
v9p
->cwp
= INC_MOD(v9p
->cwp
, v9p
->nwins
);
v9p
->cansave
= DEC_MOD(v9p
->cansave
, v9p
->nwins
);
v9p
->canrestore
= INC_MOD(v9p
->canrestore
, v9p
->nwins
);
sparcv9_active_window(sp
, v9p
->cwp
);
if (!Zero_Reg(rdest_num
)) sp
->intreg
[rdest_num
] = newval
;
sparcv9_restore_instr(simcpu_t
*sp
, uint_t rdest_num
, tvaddr_t newval
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#if HYPERPRIVILEGED_USE_WARN /* { */
if (V9_RED
== v9p
->state
|| V9_HyperPriv
== v9p
->state
)
EXEC_WARNING(("restore instruction in hyperprivileged mode "
"(%%pc=0x%llx)", sp
->pc
));
#endif /* HYPERPRIVILEGED_USE_WARN */ /* { */
if (v9p
->canrestore
== 0) {
if (v9p
->otherwin
!= 0) {
tt
= Sparcv9_trap_fill_0_other
| (v9p
->wstate_other
<<2);
tt
= Sparcv9_trap_fill_0_normal
|
v9p
->post_precise_trap(sp
, tt
);
v9p
->cwp
= DEC_MOD(v9p
->cwp
, v9p
->nwins
);
v9p
->cansave
= INC_MOD(v9p
->cansave
, v9p
->nwins
);
v9p
->canrestore
= DEC_MOD(v9p
->canrestore
, v9p
->nwins
);
sparcv9_active_window(sp
, v9p
->cwp
);
if (!Zero_Reg(rdest_num
)) sp
->intreg
[rdest_num
] = newval
;
sparcv9_return_instr(simcpu_t
*sp
, tvaddr_t targetpc
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
#if HYPERPRIVILEGED_USE_WARN /* { */
if (V9_RED
== v9p
->state
|| V9_HyperPriv
== v9p
->state
)
EXEC_WARNING(("return instruction in hyperprivileged mode "
"(%%pc=0x%llx)", sp
->pc
));
#endif /* HYPERPRIVILEGED_USE_WARN */ /* { */
if ((targetpc
& 3) != 0) {
v9p
->post_precise_trap(sp
,
Sparcv9_trap_mem_address_not_aligned
);
if (v9p
->canrestore
== 0) {
if (v9p
->otherwin
!= 0) {
tt
= Sparcv9_trap_fill_0_other
| (v9p
->wstate_other
<<2);
tt
= Sparcv9_trap_fill_0_normal
|
v9p
->post_precise_trap(sp
, tt
);
v9p
->cwp
= DEC_MOD(v9p
->cwp
, v9p
->nwins
);
v9p
->cansave
= INC_MOD(v9p
->cansave
, v9p
->nwins
);
v9p
->canrestore
= DEC_MOD(v9p
->canrestore
, v9p
->nwins
);
sparcv9_active_window(sp
, v9p
->cwp
);
SET_PC_WITH_DS(sp
, targetpc
);
* Other misc instruction implementations
sparcv9_udiv64(simcpu_t
*sp
, uint_t rdest_num
, uint64_t a
, uint64_t b
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_division_by_zero
);
if (!Zero_Reg(rdest_num
)) sp
->intreg
[rdest_num
] = a
/ b
;
sparcv9_trapcc(simcpu_t
*sp
, uint64_t tnum
, uint_t cc
, cond_type_t cond
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (V9_User
== v9p
->state
)
if ((sparcv9_cc_magic
[cond
] >> (ccr
& 0xf)) & 1) {
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
if (SS_MAGIC_TRAP_CC(cc
) && SS_MAGIC_TRAP(sp
, tnum
)) {
v9p
->post_precise_trap(sp
,
tnum
+ Sparcv9_trap_trap_instruction
);
* In the event an instruction implementation needs to generate
* a floating point exception, this function is called
sparcv9_deliver_ieee_exception(simcpu_t
*sp
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
sp
->v9_fsr_ctrl
&= ~V9_FSR_FTT_MASK
;
sp
->v9_fsr_ctrl
|= SPARCv9_FTT_IEEE_754_exception
<< V9_FSR_FTT_SHIFT
;
/* CEXC bits are modified by TEM bits when a trap is taken */
m
= sp
->v9_fsr_exc
& sp
->v9_fsr_tem
&
(V9_FSR_OF_BIT
|V9_FSR_UF_BIT
|V9_FSR_NX_BIT
);
/* prioritize exception */
else if (m
& V9_FSR_UF_BIT
)
sp
->v9_fsr_exc
&= ~(V9_FSR_OF_BIT
|V9_FSR_UF_BIT
|V9_FSR_NX_BIT
);
DBGFSR( lprintf(sp
->gid
, "sparcv9_deliver_ieee_exception: pc=0x%llx, "
"fsr=0x%llx\n", sp
->pc
, ss_get_fsr(sp
)); );
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_exception_ieee_754
);
#ifndef FP_DECODE_DISABLED
* In the event an instruction implementation needs to generate
* a floating point disabled exception, this function is called
sparcv9_deliver_fp_disabled_exception(simcpu_t
*sp
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_fp_disabled
);
#endif /* FP_DECODE_DISABLED */
sparcv9_invert_endianess(uint64_t *regp
, uint32_t count
)
new_reg
= (uint64_t)BSWAP_8(*regp
);
new_reg
= (uint64_t)BSWAP_16(*regp
);
new_reg
= (uint64_t)BSWAP_32(*regp
);
new_reg
= (uint64_t)BSWAP_64(*regp
);
fatal("sparcv9_invert_endianess() count of %d - not supported",
* Functions and variables to assist with debugging
* Legion's SPARC v9 support.
char *sparcv9_state_name
[] = {
/* 0 is not a legit state - tells us allocated but not inited */
ss_iflush_by_pa(simcpu_t
*sp
, uint64_t pa
, uint_t gran
)
* The current xicache implementation is completely coherent
* with memory. Therefore, there is no need to flush the
* xicache upon a flush instruction.
#if !defined(NDEBUG) /* { */
* Assumes log_lock() has been called.
sparcv9_dump_intregs(simcpu_t
*sp
)
for (i
= 0; i
< 8; i
++) {
log_printf(sp
->gid
, "g%d=0x%016llx o%d=0x%016llx "
"l%d=0x%016llx i%d=0x%016llx\n",
* - global, out, local and in registers
* - global level registers
* Assumes log_lock() has been called.
sparcv9_dump_state(simcpu_t
*sp
)
sparcv9_cpu_t
*v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
/* dump current v9 CPU state */
log_printf(id
, "cpu %d : cycle=0x%llx state=%s : PC=0x%llx "
id
, sp
->cycle
, sparcv9_state_name
[v9p
->state
], sp
->pc
,
for (i
= 1; i
<= v9p
->maxtl
; i
++)
log_printf(id
, "tstack: [%d]\tTSTATE=0x%llx\tTT=0x%llx\t"
"TPC=0x%llx\tTNPC=0x%llx\tHTSTATE=0x%llx\n",
i
, N_TSTATE(v9p
, i
), N_TT(v9p
, i
), N_TPC(v9p
, i
),
N_TNPC(v9p
, i
), N_HTSTATE(v9p
, i
));
sparcv9_dump_intregs(sp
);
log_printf(id
, "%%asi: 0x%llx : cwp=0x%x\n", sp
->v9_asi
, v9p
->cwp
);
log_printf(id
, "cansave=0x%x : canrestore=0x%x : otherwin=0x%x : "
"cleanwin=0x%x : wstate other=0x%x, normal=0x%x\n",
v9p
->cansave
, v9p
->canrestore
, v9p
->otherwin
, v9p
->cleanwin
,
v9p
->wstate_other
, v9p
->wstate_normal
);
log_printf(id
, "globals[%d (gl-1)]:\n", v9p
->gl
- 1);
gp
= &v9p
->globalsp
[(v9p
->gl
- 1) * V9_GLOBAL_GROUP
];
for (i
= 1; i
< V9_GLOBAL_GROUP
; i
++)
log_printf(id
, " %%g%d = 0x%016llx\n", i
, gp
[i
]);
sparcv9_dump_stack(simcpu_t
*sp
)
v9p
= (sparcv9_cpu_t
*)sp
->specificp
;
log_printf(sp
->gid
, "[0x%llx:%s]\n",
sp
->cycle
, sparcv9_state_name
[v9p
->state
]);
for (i
= 0; i
< 8; i
++) {
log_printf(sp
->gid
, "g%d=0x%016llx o%d=0x%016llx "
"l%d=0x%016llx i%d=0x%016llx\n",
for (i
= 1; i
<= v9p
->tl
; i
++) {
log_printf(id
, "tstack[%d]:\thtstate=0x%llx\t"
"tstate=0x%llx\ttt=0x%llx\ttpc=0x%llx\t"
i
, N_HTSTATE(v9p
, i
), N_TSTATE(v9p
, i
),
N_TT(v9p
, i
), N_TPC(v9p
, i
), N_TNPC(v9p
, i
));
for (i
= 0; i
< v9p
->gl
; i
++) {
gp
= &v9p
->globalsp
[i
* V9_GLOBAL_GROUP
];
log_printf(id
, "global[%d]:\t%%g1=0x%llx %%g2=0x%llx "
"%%g3=0x%llx %%g4=0x%llx %%g5=0x%llx %%g6=0x%llx "
i
, gp
[1], gp
[2], gp
[3], gp
[4], gp
[5], gp
[6], gp
[7]);
* - current instruction (passed in)
* - global, out, local and in registers
* - global level registers
* This function holds the log lock to ensure that all the output happens
sparcv9_trace_output(simcpu_t
*sp
, uint32_t instn
)
v9p
= (sparcv9_cpu_t
*)sp
->specificp
;
DBGEL( sparcv9_dump_stack(sp
); );
sparcv9_idis(ibuf
, 160, instn
, sp
->pc
);
log_printf(sp
->gid
, "[0x%llx:%.6s] pc=0x%llx npc=0x%llx tl=%d gl=%d "
"asi=0x%x instn=%08x: %s\n",
sp
->cycle
, sparcv9_state_name
[v9p
->state
],
sp
->pc
, sp
->npc
, v9p
->tl
, v9p
->gl
, sp
->v9_asi
, instn
, ibuf
);
#if (INTERNAL_BUILD == 0) /* { */
* Dummy function to dis-assemble an instruction
void sparcv9_idis(char * bufp
, uint_t size
, uint32_t instn
, tvaddr_t address
)
snprintf(bufp
, size
, ".word\t0x%08x", instn
);