* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: niagara.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)niagara.c 1.62 07/02/28 SMI"
#include <string.h> /* memcpy/memset */
static void niagara_init_trap_list();
static bool_t
niagara_init_proc_type(proc_type_t
* proc_typep
);
static op_funcp
niagara_decode_me(simcpu_t
*sp
, xicache_instn_t
* xcip
, uint32_t instn
);
static void niagara_get_pseudo_dev(config_proc_t
*config_procp
, char *dev_namep
, void *devp
);
static void niagara_send_xirq(simcpu_t
* sp
, uint64_t val
);
static void niagara_set_sfsr(simcpu_t
*sp
, ss_mmu_t
*mmup
, tvaddr_t addr
,
uint_t ft
, ss_ctx_t ct
, uint_t asi
, uint_t w
, uint_t e
);
static void niagara_domain_check(domain_t
*domainp
);
static void niagara_init_trap_list()
static ss_trap_list_t setup_list
[] = {
/* Priorities 0 = highest, XX = Lowest */
/* Number Name Priority User Priv HPriv */
/* 0x00 */ { T( legion_save_state
), Pri( 0, 0), H
, H
, H
},
/* 0x01 */ { T( power_on_reset
), Pri( 0, 0), H
, H
, H
},
/* 0x02 */ { T( watchdog_reset
), Pri( 1, 0), H
, H
, H
},
/* 0x03 */ { T( externally_initiated_reset
), Pri( 1, 0), H
, H
, H
},
/* 0x04 */ { T( software_initiated_reset
), Pri( 1, 0), H
, H
, H
},
/* 0x05 */ { T( RED_state_exception
), Pri( 1, 0), H
, H
, H
},
/* 0x08 */ { T( instruction_access_exception
), Pri( 5, 0), H
, H
, X
},
/* 0x09 */ { T( instruction_access_MMU_miss
), Pri( 2,16), SW
, SW
, SW
},
/* 0x0a */ { T( instruction_access_error
), Pri( 3, 0), H
, H
, H
},
/* 0x10 */ { T( illegal_instruction
), Pri( 7, 0), H
, H
, H
},
/* 0x11 */ { T( privileged_opcode
), Pri( 6, 0), P
, X
, X
},
/* LDD and STD are in fact implemented by niagara */
/* 0x12 */ { T( unimplemented_LDD
), Pri( 6, 0), X
, X
, X
}, /* error if received by hypervisor. */
/* 0x13 */ { T( unimplemented_STD
), Pri( 6, 0), X
, X
, X
}, /* error if received by hypervisor. */
/* 0x20 */ { T( fp_disabled
), Pri( 8, 0), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x21 */ { T( fp_exception_ieee_754
), Pri(11, 0), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x22 */ { T( fp_exception_other
), Pri(11, 0), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x23 */ { T( tag_overflow
), Pri(14, 0), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x24 */ { T( clean_window
), Pri(10, 0), P
, P
, UH
}, /* error if received by hypervisor - windows not used. */
/* 0x28 */ { T( division_by_zero
), Pri(15, 0), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x29 */ { T( internal_processor_error
), Pri( 4, 0), H
, H
, H
}, /* generated by register parity errors */
/* 0x30 */ { T( data_access_exception
), Pri(12, 0), H
, H
, UH
}, /* error if received by hypervisor - MMU not used. */
/* 0x31 */ { T( data_access_MMU_miss
), Pri(12, 0), SW
, SW
, SW
}, /* Should not be generated by hardware */
/* 0x32 */ { T( data_access_error
), Pri(12, 0), H
, H
, H
}, /* handle error and generate report to appropriate supervisor. */
/* 0x33 */ { T( data_access_protection
), Pri(12, 0), H
, H
, H
}, /* error if received by hypervisor - MMU not used. */
/* 0x34 */ { T( mem_address_not_aligned
), Pri(10, 0), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x35 */ { T( LDDF_mem_address_not_aligned
), Pri(10, 0), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x36 */ { T( STDF_mem_address_not_aligned
), Pri(10, 0), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x37 */ { T( privileged_action
), Pri(11, 0), H
, X
, X
}, /* error if received from hypervisor. */
/* 0x38 */ { T( LDQF_mem_address_not_aligned
), Pri(10, 0), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x39 */ { T( STQF_mem_address_not_aligned
), Pri(10, 0), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x3e */ { T( instruction_real_translation_miss
), Pri(2, 0), H
, H
, H
}, /* real to pa entry not found in ITLB */
/* 0x3f */ { T( data_real_translation_miss
), Pri(12, 0), H
, H
, H
}, /* real to pa entry not found in DTLB */
/* this one ever generated ? */
/* 0x40 */ { T( async_data_error
), Pri( 2, 0), H
, H
, H
}, /* remap to sun4v error report */
/* 0x41 */ { T( interrupt_level_1
), Pri(31, 0), P
, P
, X
},
/* 0x42 */ { T( interrupt_level_2
), Pri(30, 0), P
, P
, X
},
/* 0x43 */ { T( interrupt_level_3
), Pri(29, 0), P
, P
, X
},
/* 0x44 */ { T( interrupt_level_4
), Pri(28, 0), P
, P
, X
},
/* 0x45 */ { T( interrupt_level_5
), Pri(27, 0), P
, P
, X
},
/* 0x46 */ { T( interrupt_level_6
), Pri(26, 0), P
, P
, X
},
/* 0x47 */ { T( interrupt_level_7
), Pri(25, 0), P
, P
, X
},
/* 0x48 */ { T( interrupt_level_8
), Pri(24, 0), P
, P
, X
},
/* 0x49 */ { T( interrupt_level_9
), Pri(23, 0), P
, P
, X
},
/* 0x4a */ { T( interrupt_level_a
), Pri(22, 0), P
, P
, X
},
/* 0x4b */ { T( interrupt_level_b
), Pri(21, 0), P
, P
, X
},
/* 0x4c */ { T( interrupt_level_c
), Pri(20, 0), P
, P
, X
},
/* 0x4d */ { T( interrupt_level_d
), Pri(19, 0), P
, P
, X
},
/* 0x4e */ { T( interrupt_level_e
), Pri(18, 0), P
, P
, X
},
/* 0x4f */ { T( interrupt_level_f
), Pri(17, 0), P
, P
, X
},
/* 0x5e */ { T( hstick_match
), Pri( 2, 0), H
, H
, H
},
/* 0x5f */ { T( trap_level_zero
), Pri( 2, 8), H
, H
, X
}, /* This trap requires TL==0, priv==1 and hpriv==0 */
/* 0x60 */ { T( interrupt_vector_trap
), Pri(16, 0), H
, H
, H
}, /* handle & remap to sun4v as appropriate mondo queue */
/* 0x61 */ { T( RA_watchpoint
), Pri(12, 0), SW
, SW
, SW
}, /* not used by hypervisor, so error if received from hypervisor. */
/* 0x62 */ { T( VA_watchpoint
), Pri(11, 0), P
, P
, X
}, /* error - VA watchpoints should be pended if hpriv=1 */
/* 0x63 */ { T( ECC_error
), Pri(33, 0), H
, H
, H
}, /* handle & create sun4v error report(s) */
/* 0x64 */ { T( fast_instruction_access_MMU_miss
), Pri( 2,24), H
, H
, H
}, /* handle & proper TSB check. */
/* 0x68 */ { T( fast_data_access_MMU_miss
), Pri(12, 0), H
, H
, H
}, /* handle & proper TSB check. */
/* 0x6c */ { T( fast_data_access_protection
), Pri(12, 0), H
, H
, H
}, /* handle & proper TSB check. */
/* 0x74 */ { TN1( modular_arithmetic
), Pri(16, 1), H
, H
, H
},
/* 0x76 */ { T( instruction_breakpoint
), Pri(7, 1), H
, H
, H
},
/* 0x78 */ { TN1( data_error
), Pri(13, 0), H
, H
, H
},
/* 0x7c */ { T( cpu_mondo_trap
), Pri(16, 2), P
, P
, X
},
/* 0x7d */ { T( dev_mondo_trap
), Pri(16, 3), P
, P
, X
},
/* 0x7e */ { T( resumable_error
), Pri(33, 0), P
, P
, X
},
/* faked by the hypervisor */
/* 0x7f */ { T( nonresumable_error
), Pri( 4, 0), SW
, SW
, SW
},
/* 0x80 */ { T( spill_0_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x84 */ { T( spill_1_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x88 */ { T( spill_2_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x8c */ { T( spill_3_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x90 */ { T( spill_4_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x94 */ { T( spill_5_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x98 */ { T( spill_6_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x9c */ { T( spill_7_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xa0 */ { T( spill_0_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xa4 */ { T( spill_1_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xa8 */ { T( spill_2_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xac */ { T( spill_3_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xb0 */ { T( spill_4_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xb4 */ { T( spill_5_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xb8 */ { T( spill_6_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xbc */ { T( spill_7_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xc0 */ { T( fill_0_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xc4 */ { T( fill_1_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xc8 */ { T( fill_2_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xcc */ { T( fill_3_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xd0 */ { T( fill_4_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xd4 */ { T( fill_5_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xd8 */ { T( fill_6_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xdc */ { T( fill_7_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xe0 */ { T( fill_0_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xe4 */ { T( fill_1_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xe8 */ { T( fill_2_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xec */ { T( fill_3_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xf0 */ { T( fill_4_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xf4 */ { T( fill_5_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xf8 */ { T( fill_6_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xfc */ { T( fill_7_other
), Pri( 9, 0), P
, P
, UH
},
/*0x100-0x17f*/{T( trap_instruction
), Pri(16,32), P
, P
, H
}, /* hv1: handles hypervisor traps only. Error if received from hypervisor. */
/*0x180-0x1ff*/{T( htrap_instruction
), Pri(16,32), X
, H
, UH
}, /* used to implement the supervisor to hypervisor API call. */
for (i
=0; setup_list
[i
].trap_type
!= -1; i
++) {
ASSERT( setup_list
[i
].trap_type
>=SS_trap_legion_save_state
&& setup_list
[i
].trap_type
<SS_trap_illegal_value
);
ss_trap_list
[ setup_list
[i
].trap_type
] = setup_list
[i
];
/* Now clone the trap instruction entries */
for (i
=0x101; i
<0x180; i
++) {
ss_trap_list
[ i
] = ss_trap_list
[ 0x100 ];
ss_trap_list
[ i
].trap_type
= i
;
for (i
=0x181; i
<0x200; i
++) {
ss_trap_list
[ i
] = ss_trap_list
[ 0x180 ];
ss_trap_list
[ i
].trap_type
= i
;
extern struct fpsim_functions fpsim_funclist
;
proc_type_t proc_type_niagara
={
false, /* module initialised */
/* execution support functions */
#if ERROR_TRAP_GEN /* { */
/* pointer to fpsim instructions */
/* performance measuring funcs */
/* dump tlb, instruction counts etc */
ss_dump_instruction_counts
,
/* external interface methods */
/* debugger interface methods */
NULL
, /* debug_hook_dumpp */
* Niagara uses registers located at magic addresses in its physical address
* space to control functional units placed outside the direct processor core.
* We emulate these with pseudo devices that are created implicitly when a Niagara
* To support this we have a number of device and function definitions below.
static void ss_clock_init(config_dev_t
*);
static void ss_dram_ctl_init(config_dev_t
*);
static void ss_iob_init(config_dev_t
*);
static void ss_jbi_init(config_dev_t
*);
static void ss_jbus_init(config_dev_t
*);
static void ss_l2_ctl_init(config_dev_t
*);
static void ss_ssi_init(config_dev_t
*);
static bool_t
ss_clock_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
ss_dram_ctl_access(simcpu_t
*, config_addr_t
*, tpaddr_t offset
, maccess_t op
, uint64_t * regp
);
static bool_t
ss_iob_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
ss_jbi_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
ss_jbus_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
ss_l2_ctl_access(simcpu_t
*, config_addr_t
*, tpaddr_t offset
, maccess_t op
, uint64_t * regp
);
static bool_t
ss_ssi_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static dev_type_t dev_type_ss_clock
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ss_dram_ctl
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ss_l2_ctl
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ss_iob
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ss_jbi
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ss_jbus
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ss_ssi
= {
generic_device_non_cacheable
,
* Perform any processor specific parsing for "proc" elements in
* Legion config file. Returns true if token was handled by this
* function, false otherwise.
ss_parse_proc_entry(ss_proc_t
*procp
, domain_t
*domainp
)
if (streq(lex
.strp
,"rust_jbi_stores")) {
procp
->rust_jbi_stores
= true;
/* Didn't match any Niagara specific element */
/* Handled some Niagara specific element */
* Set up the pseudo physical devices specific for N1.
void ss_setup_pseudo_devs(domain_t
* domainp
, ss_proc_t
*procp
)
config_dev_t
*pd
, *overlapp
;
procp
->clockp
= Xcalloc(1, ss_clock_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ss_clock
;
insert_domain_address(domainp
, pd
, 0x9600000000LL
,
0x9600000000LL
+0x100000000LL
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* FIXME: for the moment is this fixed at 4 - need to adjust with a variable
* FIXME: Should the allocation of mbankp be in ss_init instead ?
procp
->mbankp
= Xcalloc(procp
->num_mbanks
, ss_dram_bank_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ss_dram_ctl
;
procp
->dram_ctl_devp
= pd
;
insert_domain_address(domainp
, pd
, 0x9700000000LL
,
0x9700000000LL
+4096LL*(uint64_t)procp
->num_mbanks
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
procp
->iobp
= Xcalloc(1, ss_iob_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ss_iob
;
insert_domain_address(domainp
, pd
, 0x9800000000LL
,
0x9800000000LL
+0x100000000LL
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
procp
->jbip
= Xcalloc(1, ss_jbi_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ss_jbi
;
insert_domain_address(domainp
, pd
, 0x8000000000LL
,
0x8000000000LL
+0x100000000LL
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
procp
->jbusp
= Xcalloc(1, ss_jbus_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ss_jbus
;
insert_domain_address(domainp
, pd
, 0x9f00000000LL
,
0x9f00000000LL
+0x100000000LL
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
procp
->num_l2banks
= L2_BANKS
;
procp
->l2p
= Xcalloc(1, ss_l2_cache_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ss_l2_ctl
;
insert_domain_address(domainp
, pd
, 0xA000000000LL
,
0xA000000000LL
+0x1F00000000LL
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
procp
->ssip
= Xcalloc(1, ss_ssi_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ss_ssi
;
insert_domain_address(domainp
, pd
, 0xff00000000LL
,
0xff00000000LL
+0x10000000LL
);
* Returns false if error initialising module, true if init was OK
bool_t
niagara_init_proc_type(proc_type_t
* proctp
)
if (proctp
->flag_initialised
) {
warning("Initialisation of module %s more than once - bailing", proctp
->proc_type_namep
);
/* stuff here we only need to do once if we want to use this module */
niagara_init_trap_list();
proctp
->flag_initialised
= true;
* We arrive here because:
* 1) a malformed (unaligned PC)
void ss_xic_miss(simcpu_t
* sp
, xicache_line_t
* xc_linep
, tvaddr_t pc
)
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
nsp
= v9p
->impl_specificp
;
npp
= sp
->config_procp
->procp
;
/* FIXME: need a current context variable, not a test here */
context
= (v9p
->tl
>0) ? SS_NUCLEUS_CONTEXT
: nsp
->pri_context
;
/* Quick check then for v9 bus error */
/* The PC always has bits 0 & 1 zero */
/* align the pc to the start of the XC line */
tag
= va
& XICACHE_TAG_PURE_MASK
;
* Perform a virtual to physical translation
* so we can determine if we are dealing with
* a TLB miss or simply an x-cache miss.
/* Find the pa corresponding to the line we need */
/* We assume that for SunSPARC, the TLB is off in Hyper priv mode */
/* FIXME: we should probably do this by swizzling a function pointer */
/* for this when we change mode, rather that having an if here ... fix later */
if (v9p
->pstate
.addr_mask
) {
/* NOTE: we dont mask tag ... we allow that to match the 64bit address */
ss_trap_type_t miss_trap_type
;
/* If MMU disabled, but we're in priv/user mode use real addresses */
if (!nsp
->immu
.enabled
) {
context
= SS_TLB_REAL_CONTEXT
;
* check out of range address (if lie within the "VA hole")
if ((va
>= SS_VA_HOLE_LB
) && (va
<= SS_VA_HOLE_UB
)) {
niagara_set_sfsr(sp
, &nsp
->immu
, va
,
MMU_SFSR_FT_VARANGE
, (v9p
->tl
>0) ?
ss_ctx_nucleus
: ss_ctx_primary
, 0/*fixme*/, 0, 0);
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)
SS_trap_instruction_access_exception
);
RW_rdlock(&tlbp
->rwlock
);
/* FIXME: Need a better hash than this ! */
idx
= va
>> SS_MAX_PAGE_SIZE_BITS
;
* So we search for a matching page using the info we have in the
* hash - while another thread might possibly be removing or
* inserting an entry into the same table.
for ( tep
= tlbp
->hash
[idx
].ptr
; tep
!=(tlb_entry_t
*)0; tep
= tep
->nextp
) {
/* try and match the entry as appropriate */
if (((tep
->tag_pfn
^ va
)>>tep
->match_shift
)==0 && tep
->match_context
==context
&& tep
->partid
== partid
) goto tlb_match
;
RW_unlock(&tlbp
->rwlock
);
DBGMISS( lprintf(sp
->gid
, "itlb miss: pc=%lx va=%lx ctx=%x\n", pc
, va
, context
); );
* If the MMU is "disabled" in privileged mode ... this is a real miss, not a
* virtual translation miss, so the fault context and trap type is different
miss_trap_type
= SS_trap_fast_instruction_access_MMU_miss
;
miss_context
= 0; /* null for ra->pa miss undefined ? */
miss_trap_type
= SS_trap_instruction_real_translation_miss
;
SET_ITLB_FAULT( nsp
, va
);
nsp
->immu
.tag_access_reg
= (va
& ~MASK64(12,0)) | miss_context
; /* FIXME: - do properly later */
DBGMMU( lprintf(sp
->gid
, "IMMU tag access = 0x%llx\n", nsp
->immu
.tag_access_reg
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)miss_trap_type
);
pa_tag
+= tep
->pa_offset
;
RW_unlock(&tlbp
->rwlock
);
* Errors on itlb hit: stash table_entry pointer and if
* subsequent itlb hit on same entry post error again.
if (sp
->error_check
&& (ep
= find_errconf(sp
, IFETCH
, IMDU
))) {
DBGERR( lprintf(sp
->gid
, "ss_xic_miss(): "
" errorp->itep=%x, tep=%x\n", errorp
->itep
, tep
); );
if ((tlb_entry_t
*)errorp
->itep
== tep
) {
ss_error_condition(sp
, ep
);
ss_error_condition(sp
, ep
);
if ( (flags
& SS_TLB_FLAG_PRIV
) && v9p
->state
== V9_User
) {
SET_ITLB_FAULT( nsp
, va
);
nsp
->immu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) | context
; /* FIXME: - do properly later */
DBGMMU( lprintf(sp
->gid
, "priv mapping, state==user: IMMU tag access = 0x%llx\n", nsp
->immu
.tag_access_reg
); );
niagara_set_sfsr(sp
, &nsp
->immu
, va
, MMU_SFSR_FT_PRIV
, (v9p
->tl
>0) ? ss_ctx_nucleus
: ss_ctx_primary
, 0/*fixme*/, 0, 0);
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)SS_trap_instruction_access_exception
);
/* Niagara has no EXEC permission check for I fetches */
/* Niagara only implements 40 bits of PA, the tlb code
masks PA so here we need to mask bypass PAs */
* Now that we have the internal PA, map it to the real
* external PA before looking it up in the domain.
* This does not modify memory addresses, only JBus addresses.
if (pa
>= 0x800e000000ull
&& pa
< 0x8010000000ull
) {
} else if (pa
>= 0x8010000000ull
&& pa
< 0x8100000000ull
) {
} else if (pa
>= 0xc000000000ull
&& pa
< 0xff00000000ull
) {
* OK - now go get the instructions to fill in the xc-line
* ... start by finding the device that has the
* optimise: by guessing at the last device found.
/* now find the device - looking in the cache first */
cap
= sp
->xic_miss_addrp
;
if (!(cap
&& (cap
->baseaddr
<= pa
) && (pa
< cap
->topaddr
))) {
config_proc_t
* config_procp
;
config_procp
= sp
->config_procp
;
domainp
= config_procp
->domainp
;
cap
= find_domain_address(domainp
, pa
);
/* OK it's a bus error there was no backing store */
fatal("bus error - instruction fetch from pc=0x%llx "
"(cacheline va=0x%llx -> physical 0x%llx)", pc
, va
, pa
); /* FIXME */
sp
->xic_miss_addrp
= cap
; /* cache for next time */
/* try and get the buffer pointer */
extent
= cap
->config_devp
->dev_typep
->dev_cacheable(cap
, DA_Instn
, pa_tag
-cap
->baseaddr
, &bufp
);
if (extent
< XICACHE_LINE_SIZE
) {
/* bus error again ? or fill from multiple devices ? */
fatal("fix bus error 2");
* Errors on ifetch to icache or L2 cache
* Make sure the L2 cache is enabled
if (sp
->error_check
== true && errorp
->check_xicache
) {
DBGERR( lprintf(sp
->gid
, "ss_xic_miss(): ifetch cache hit\n"); );
ep
= find_errconf(sp
, IFETCH
, ITC
|IDC
|LDAC
|LDAU
|DAC
|DAU
);
ss_error_condition(sp
, ep
);
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (npp
->l2p
->control
[bank
] & L2_DIS
) goto l2_disabled
;
ss_error_condition(sp
, ep
);
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (npp
->l2p
->control
[bank
] & L2_DIS
) goto l2_disabled
;
ss_error_condition(sp
, ep
);
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (npp
->l2p
->control
[bank
] & L2_DIS
) goto l2_disabled
;
ss_error_condition(sp
, ep
);
l2_disabled
: DBGERR( lprintf(sp
->gid
, "ss_xic_miss: No LDAC/LDAU Error"
xc_linep
->tag
= tag
| sp
->tagstate
;
xc_linep
->memoryoffset
= ((uint64_t)bufp
)-tag
;
* FIXME: If breakpoints are in use make sure we really clear the decoded line
* to ensure that we dont get instruction aliasing. XI-cache prob. needs a re-design
* from this standpoint - but this will wait until we complete the JIT version.
* Until then this is a reminder and a place holder.
if (sp
->bp_infop
) xicache_clobber_line_decodes(sp
, tag
);
xicache_line_fill_risc4(sp
, xc_linep
, tag
, bufp
);
* This is not the worlds most efficient routine, but then we assume that ASI's are
* not frequently occurring memory access types - we may have to fast path the
* ASI_AS_IF_USER_PRIMARY etc. some how if used frequently by kernel b-copy.
ss_asi_access(simcpu_t
* sp
, maccess_t op
, uint_t regnum
, uint_t asi
,
uint64_t reg1
, uint64_t reg2
, asi_flag_t asi_flag
)
ss_tsb_info_t
* tsbinfop
, * tsbinfop1
;
uint_t context_type
, idx
;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
nsp
= v9p
->impl_specificp
;
npp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
ASSERT(0LL==sp
->intreg
[Reg_sparcv9_g0
]);
if (asi
== V9_ASI_IMPLICIT
)
goto no_asi_valid_checks
;
* First check if this is a legitimate ASI based
* on current privilege level.
ASSERT( !v9p
->pstate
.priv
&& !v9p
->hpstate
.hpriv
);
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
niagara_set_sfsr(sp
, &nsp
->dmmu
, addr
, MMU_SFSR_FT_ASI
, ss_ctx_nucleus
/*checkme*/, asi
, 0/*fixme*/, 0);
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_action
);
ASSERT( v9p
->pstate
.priv
&& !v9p
->hpstate
.hpriv
);
if (asi
>=0x30 && asi
<0x80) {
/* ASIs reserved for hpriv mode appear to priv mode as data access exceptions */
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
niagara_set_sfsr(sp
, &nsp
->dmmu
, addr
, MMU_SFSR_FT_ASI
, ss_ctx_nucleus
/*checkme*/, asi
, 0/*fixme*/, 0);
v9p
->post_precise_trap(sp
, Sparcv9_trap_data_access_exception
);
ASSERT( v9p
->hpstate
.hpriv
);
ASSERT( v9p
->hpstate
.red
);
* Next pull out all the memory access ASIs ...
mflags
= (V9_User
!= v9p
->state
) ? MF_Has_Priv
: 0;
context_type
= ss_ctx_reserved
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
asi
= (v9p
->pstate
.cle
) ? SS_ASI_NUCLEUS_LITTLE
: SS_ASI_NUCLEUS
;
asi
= (v9p
->pstate
.cle
) ? SS_ASI_PRIMARY_LITTLE
: SS_ASI_PRIMARY
;
case SS_ASI_NUCLEUS_LITTLE
:
context_type
= ss_ctx_nucleus
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
case SS_ASI_PRIMARY_NO_FAULT_LITTLE
:
case SS_ASI_PRIMARY_NO_FAULT
:
if (IS_V9_MA_STORE(op
& MA_Op_Mask
))
goto data_access_exception
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
case SS_ASI_AS_IF_USER_PRIMARY_LITTLE
:
case SS_ASI_AS_IF_USER_PRIMARY
:
case SS_ASI_PRIMARY_LITTLE
: /* (88) RW Implicit Primary Address space (LE) */
case SS_ASI_PRIMARY
: /* (80) RW Implicit Primary Address space */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
context_type
= ss_ctx_primary
;
case SS_ASI_SECONDARY_NO_FAULT_LITTLE
:
case SS_ASI_SECONDARY_NO_FAULT
:
if (IS_V9_MA_STORE(op
& MA_Op_Mask
))
goto data_access_exception
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
case SS_ASI_AS_IF_USER_SECONDARY_LITTLE
:
case SS_ASI_AS_IF_USER_SECONDARY
:
case SS_ASI_SECONDARY_LITTLE
:
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
context_type
= ss_ctx_secondary
;
case SS_ASI_REAL_IO_LITTLE
: /* (1D) RW Same as ASI_PHYS_USE_EC_LITTLE for memory
addresses. For IO addresses, physical address,
non-cacheable, with side-effect (LE) */
case SS_ASI_REAL_IO
: /* (15) RW Same as ASI_PHYS_USE_EC for memory addresses.
For IO addresses, physical address, non-cacheable,
mflags
|= MF_TLB_Real_Ctx
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_REAL_MEM_LITTLE
: /* (1C) RW physical address, non-allocating in L1 cache */
case SS_ASI_REAL_MEM
: /* (14) RW physical address, non-allocating in L1 cache */
mflags
|= MF_TLB_Real_Ctx
;
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE
: /* RW 64B block load/store, primary address space, user privilege (LE) */
case SS_ASI_BLOCK_AS_IF_USER_PRIMARY
: /* RW 64B block load/store, primary address space, user privilege */
case SS_ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE
: /* RW 64B block load/store, secondary address space, user privilege (LE) */
case SS_ASI_BLOCK_AS_IF_USER_SECONDARY
: /* RW 64B block load/store, secondary address space, user privilege */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P_LITTLE
: /* Block initializing store/128b atomic LDDA, primary address, user priv (LE) */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P
: /* Block initializing store/128b atomic LDDA, primary address, user privilege */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S_LITTLE
: /* Block initializing store, secondary address, user privilege (LE) */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S
: /* Block initializing store/128b atomic LDDA, secondary address, user privilege */
case SS_ASI_QUAD_LDD_LITTLE
: /* 128b atomic LDDA (LE) */
case SS_ASI_QUAD_LDD
: /* 128b atomic LDDA */
/* This ASI must be used with an LDDA instruction */
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
/* Adjust size to 128bytes so alignment is correct */
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_QUAD_LDD_REAL_LITTLE
: /* 128b atomic LDDA, real address (LE) */
case SS_ASI_QUAD_LDD_REAL
: /* 128b atomic LDDA, real address */
/* This ASI must be used with an LDDA instruction */
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
/* Adjust size to 128bytes so alignment is correct */
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
mflags
|= MF_TLB_Real_Ctx
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD_LITTLE
: /* Block initializing store/128b atomic LDDA (LE) */
case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD
: /* Block initializing store/128b atomic LDDA */
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (MA_St
== (op
& MA_Op_Mask
) || MA_StDouble
== (op
& MA_Op_Mask
)) {
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
case SS_ASI_QUAD_LDD_PHYS_LITTLE
: /* 128b atomic LDDA, physical address (LE) */
case SS_ASI_QUAD_LDD_PHYS
: /* N1 PRM rev 1.4: any type of access causes data_access_exception */
goto data_access_exception
;
case SS_ASI_BLK_INIT_ST_QUAD_LDD_P_LITTLE
: /* Block initializing store/128b atomic LDDA, primary address (LE) */
case SS_ASI_BLK_INIT_ST_QUAD_LDD_P
: /* Block initializing store/128b atomic LDDA, primary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (MA_St
== (op
& MA_Op_Mask
) || MA_StDouble
== (op
& MA_Op_Mask
)) {
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
case SS_ASI_BLK_INIT_ST_QUAD_LDD_S_LITTLE
: /* Block initializing store/128b atomic LDDA, secondary address (LE) */
case SS_ASI_BLK_INIT_ST_QUAD_LDD_S
: /* Block initializing store/128b atomic LDDA, secondary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (MA_St
== (op
& MA_Op_Mask
) || MA_StDouble
== (op
& MA_Op_Mask
)) {
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
case SS_ASI_BLK_PL
: /* 64B block load/store, primary address (LE) */
case SS_ASI_BLK_P
: /* 64B block load/store, primary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
) ||
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? (MA_Size512
| MA_LdFloat
) :
(MA_Size512
| MA_StFloat
);
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
case SS_ASI_BLK_SL
: /* 64B block load/store, secondary address (LE) */
case SS_ASI_BLK_S
: /* 64B block load/store, secondary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
) ||
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? (MA_Size512
| MA_LdFloat
) :
(MA_Size512
| MA_StFloat
);
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
goto data_access_exception
;
goto partial_asi_unsupported
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
goto data_access_exception
;
partial_asi_unsupported
:;
if (addr
& 0x3) { /* check 32bit alignment */
v9p
->post_precise_trap(sp
, Sparcv9_trap_mem_address_not_aligned
);
if (addr
& 0x7) { /* check 64bit alignment */
if (IS_V9_MA_LOAD(op
& MA_Op_Mask
))
v9p
->post_precise_trap(sp
, Sparcv9_trap_LDDF_mem_address_not_aligned
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_STDF_mem_address_not_aligned
);
goto data_access_exception
;
case SS_ASI_BLK_COMMIT_P
:
case SS_ASI_BLK_COMMIT_S
:
/* TODO: PRM states alignment checks done. */
goto data_access_exception
;
if ((MA_LdFloat
== (op
& MA_Op_Mask
)) || (MA_StFloat
== (op
& MA_Op_Mask
)) ) {
ss_memory_asi_access(sp
, op
, (uint64_t *)&(sp
->fpreg
.s32
[regnum
]), mflags
, asi
, context_type
, mask
, reg1
, reg2
);
ss_memory_asi_access(sp
, op
, &(sp
->intreg
[regnum
]), mflags
, asi
, context_type
, mask
, reg1
, reg2
);
ASSERT(0LL==sp
->intreg
[Reg_sparcv9_g0
]);
/* OK, derive access address etc. */
size
= op
& MA_Size_Mask
;
is_load
= IS_V9_MA_LOAD(op
);
/* No MA_CAS case required for cpu state registers. */
* Finally all the cpu state registers ...
* Currently only 64bit accesses supported ..
* need to ascertain exactly what niagara does here ! FIXME
* FIXME: Of course all the alt address space accesses are different here !
if (size
!= MA_Size64
|| (addr
&0x7)!=0 || IS_V9_MA_ATOMIC(op
))
goto data_access_exception
;
ASSERT(MA_LdSigned
!= op
); /* not signed for any stxas or for ldxas */
IMPL_WARNING(("Unimplemented niagara asi %s (0x%x) accessed with address 0x%llx @ pc=%lx\n", #s, asi, addr, sp->pc)); \
if (is_load) { val = 0; goto load_complete; }\
/* If we're storing fetch the value to stuff */
val
= sp
->intreg
[regnum
];
} else { /* MA_StFloat */
val
= sp
->fpreg
.s32
[regnum
];
val
= sp
->fpreg
.s64
[regnum
>> 1];
/* Hex Access VA Repli- DESCRIPTION */
/* MANDATORY SPARC V9 ASIs */
/* All in the memory section above */
/* SunSPARC EXTENDED (non-V9) ASIs */
* 0x20 RW 0-18 Y Scratchpad Registers
* 0x20 - 20-28 N any type of access causes data_access_exception
* 0x20 RW 30-38 Y Scratchpad Registers
if (INVALID_SCRATCHPAD(addr
)) {
goto data_access_exception
;
&(nsp
->strand_reg
[SSR_ScratchPad0
+ (addr
>>3)]);
DBGSCRATCH( if (*valp
!= val
)
lprintf(sp
->gid
, "SCRATCH store 0x%x/0x%llx: "
"0x%llx -> 0x%llx pc=0x%llx\n",
asi
, addr
, *valp
, val
, sp
->pc
); );
* 0x21 RW 8 Y I/DMMU Primary Context Register
* 0x21 RW 10 Y DMMU Secondary Context Register
* 0x21 RW 120 Y I/DMMU Synchronous Fault Pointer
* 0x21 RW 108 Y I/DMMU Primary Context Register 1
* 0x21 RW 110 Y DMMU Secondary Context Register 1
val
= (uint64_t)(nsp
->pri_context
);
val
= (uint64_t)(nsp
->sec_context
);
goto data_access_exception
;
* Since we're changing a context register we should
* flush the xi and xd trans caches. However, this only matters
* for the primary context - iff we are in priv mode with
* TL=0. For all other cases (TL>0) or hpriv=1, either the
* MMU is not in use, or we're executing the nucleus context so
* we can rely on a done/retry instn / mode change to do the flush for us
* when we change mode later.
DBGMMU( lprintf(sp
->gid
, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
if (nsp
->pri_context
!=val
) {
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
nsp
->sec_context
= val
& MASK64(12,0);
goto data_access_exception
;
case SS_ASI_QUEUE
: /* 0x25 RW 3C0 Y CPU Mondo Queue Head Pointer */
/* 0x25 RW 3C8 Y CPU Mondo Queue Tail Pointer */
/* 0x25 RW 3D0 Y Device Mondo Queue Head Pointer */
/* 0x25 RW 3D8 Y Device Mondo Queue Tail Pointer */
/* 0x25 RW 3E0 Y Resumable Error Queue Head Pointer */
/* 0x25 RW 3E8 Y Resumable Error Queue Tail Pointer */
/* 0x25 RW 3F0 Y Nonresumable Error Queue Head Pointer */
/* 0x25 RW 3F8 Y Nonresumable Error Queue Tail Pointer */
* According to the PRM (1.8 Table 9-3), Niagara will
* 'nop' loads or stores to addresses 0-0x3b8.
val
= (uint16_t)(nsp
->nqueue
[ (addr
>>4) - 0x3c].head
);
val
= (uint16_t)(nsp
->nqueue
[(addr
>>4) - 0x3c].tail
);
goto data_access_exception
;
DBGMONDO( lprintf(sp
->gid
, "ASI_QUEUE load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
DBGMONDO( lprintf(sp
->gid
, "ASI_QUEUE store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
RSVD_MASK(sp
, MASK64(13, 6), val
, asi
, addr
);
nsp
->nqueue
[(addr
>>4) - 0x3c].head
= (uint16_t)val
;
nsp
->flag_queue_irq
[(addr
>>4)- 0x3c] = nsp
->nqueue
[(addr
>>4) - 0x3c].head
!= nsp
->nqueue
[(addr
>>4) - 0x3c].tail
;
if (v9p
->state
!= V9_HyperPriv
&&
goto data_access_exception
; /* DAX if store to tail in privileged mode */
nsp
->nqueue
[(addr
>>4) - 0x3c].tail
= (uint16_t)val
;
nsp
->flag_queue_irq
[(addr
>>4)- 0x3c] = nsp
->nqueue
[(addr
>>4) - 0x3c].head
!= nsp
->nqueue
[(addr
>>4) - 0x3c].tail
;
goto data_access_exception
;
case SS_ASI_DIRECT_MAP_ECACHE
: /* 0x30 - - - N1 PRM rev 1.4: any type of access causes data_access_exception */
goto data_access_exception
;
case SS_ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
: /* 0x31 RW 0 Y DMMU Context Zero TSB Base PS0 */
tsbinfop
= &(nsp
->dmmu_ctxt_zero_tsb_ps0
);
case SS_ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
: /* 0x32 RW 0 Y DMMU Context Zero TSB Base PS1 */
tsbinfop
= &(nsp
->dmmu_ctxt_zero_tsb_ps1
);
case SS_ASI_DMMU_CTXT_ZERO_CONFIG
: /* 0x33 RW 0 Y DMMU Context Zero Config Register */
tsbinfop
= &(nsp
->dmmu_ctxt_zero_tsb_ps0
);
tsbinfop1
= &(nsp
->dmmu_ctxt_zero_tsb_ps1
);
case SS_ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
: /* 0x35 RW 0 Y IMMU Context Zero TSB Base PS0 */
tsbinfop
= &(nsp
->immu_ctxt_zero_tsb_ps0
);
val
= tsbinfop
->reg_tsb_base
;
DBGMMU( lprintf(sp
->gid
, "MMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
tsbinfop
->reg_tsb_base
= val
;
tsb_size
= val
& MASK64( 3, 0 );
is_split
= ((val
>> 12)&1) ? true : false;
/* niagara catches attempts to create TSB spans larger than */
if (tsb_size
>= 11 && tsbinfop
->page_size
== 5) goto data_access_exception
;
tsbinfop
->is_split
= is_split
;
tsbinfop
->tsb_size
= tsb_size
;
tsbinfop
->base_addr
= val
& ( is_split
? MASK64(63,14+tsb_size
) : MASK64(63,13+tsb_size
) );
case SS_ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
: /* 0x36 RW 0 Y IMMU Context Zero TSB Base PS1 */
tsbinfop
= &(nsp
->immu_ctxt_zero_tsb_ps1
);
case SS_ASI_IMMU_CTXT_ZERO_CONFIG
: /* 0x37 RW 0 Y DMMU Context Zero Config Register */
tsbinfop
= &(nsp
->immu_ctxt_zero_tsb_ps0
);
tsbinfop1
= &(nsp
->immu_ctxt_zero_tsb_ps1
);
/* FIXME: what about non VA=0x0 accesses ? what about if new page-size + tsb size > span faults ? */
val
= ((uint64_t)tsbinfop1
->page_size
<< 8) | ((uint64_t)tsbinfop
->page_size
);
DBGMMU( lprintf(sp
->gid
, "MMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
static uint8_t supported
[8]={ 1, 1, 0, 1, 0, 1, 0, 0 };
DBGMMU( lprintf(sp
->gid
, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
tsbinfop1
->page_size
= (val
>> 8) & 0x7;
if (!supported
[tsbinfop1
->page_size
])
tsbinfop1
->page_size
= 5;
tsbinfop
->page_size
= val
& 0x7;
if (!supported
[tsbinfop
->page_size
])
case SS_ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
: /* 0x39 RW 0 Y DMMU Context Nonzero TSB Base PS0 */
tsbinfop
= &(nsp
->dmmu_ctxt_nonzero_tsb_ps0
);
case SS_ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
: /* 0x3A RW 0 Y DMMU Context Nonzero TSB Base PS1 */
tsbinfop
= &(nsp
->dmmu_ctxt_nonzero_tsb_ps1
);
case SS_ASI_DMMU_CTXT_NONZERO_CONFIG
: /* 0x3B RW 0 Y DMMU Context Zero Config Register */
tsbinfop
= &(nsp
->dmmu_ctxt_nonzero_tsb_ps0
);
tsbinfop1
= &(nsp
->dmmu_ctxt_nonzero_tsb_ps1
);
case SS_ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
: /* 0x3D RW 0 Y IMMU Context Nonzero TSB Base PS0 */
tsbinfop
= &(nsp
->immu_ctxt_nonzero_tsb_ps0
);
case SS_ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
: /* 0x3E RW 0 Y IMMU Context Nonzero TSB Base PS1 */
tsbinfop
= &(nsp
->immu_ctxt_nonzero_tsb_ps1
);
case SS_ASI_IMMU_CTXT_NONZERO_CONFIG
: /* 0x3F RW 0 Y DMMU Context Zero Config Register */
tsbinfop
= &(nsp
->immu_ctxt_nonzero_tsb_ps0
);
tsbinfop1
= &(nsp
->immu_ctxt_nonzero_tsb_ps1
);
#if INTERNAL_BUILD /* { */
case SS_ASI_STREAM_MA
: /* 0x40 RW 0 N Asynchronous Streaming Control Register */
/* 0x40 RW 8 N SRC Register: Asynchronous Strm state */
/* 0x40 RW 10 N DEST Register: Asynchronous Strm state */
/* 0x40 RW 18 N DATA Register: Asynchronous Strm state */
/* 0x40 RW 20 N Chaining initialisation vector for DES /3DES */
/* 0x40 RW 28 N DES Key 1 */
/* 0x40 RW 30 N DES Key 2 */
/* 0x40 RW 38 N DES Key 3 */
/* 0x40 RW 40 N HASH STATE REG 1 */
/* 0x40 RW 48 N HASH STATE REG 2 */
/* 0x40 RW 50 N HASH STATE REG 3 */
/* 0x40 RW 68 N Wait for async stream operation to complete */
/* 0x40 RW 80 N Modular Arithmetic Control Register */
/* 0x40 RW 88 N Modular Arithmetic Physical Address Register(MPA) */
/* 0x40 RW 90 N Modular Arith. Memory Address Register(MA_ADDR) */
/* 0x40 RW 98 N Modular Arithmetic NP Register */
/* 0x40 RW A0 N Wait for async MA operation to complete */
rv
= modarith_cpu_access(sp
, addr
, op
, &val
);
case MOD_ARITH_LD_COMPLETE
:
case MOD_ARITH_DATA_ACCESS_EX_TRAP
:
EXEC_WARNING(("DAX in ASI_STREAM_MA\n"));
goto data_access_exception
;
case MOD_ARITH_ILLEGAL_INST_TRAP
:
/* No version of Niagara does this... */
v9p
->post_precise_trap(sp
,
Sparcv9_trap_illegal_instruction
);
IMPL_WARNING(("fatal error during mod_arith processing"));
fatal("fatal error during mod_arith processing");
return; /* never actually executed */
IMPL_WARNING(("unknown rv (0x%x) during mod_arith "
fatal("fatal error during mod_arith processing");
return; /* never actually executed */
#endif /* } INTERNAL_BUILD */
case SS_ASI_LSU_DIAG_REG
: /* 0x42 RW 0 N Sparc BIST control register */ /* SPARC_BIST_CONTROL */
/* 0x42 RW 8 N Sparc Instruction Mask Register */ /* INST_MASK_REG */
/* 0x42 RW 10 N Load/Store Unit Diagnostic Register */ /* LSU_DIAG_REG */
val
= nsp
->icachep
->bist_ctl
;
val
= nsp
->icachep
->inst_mask
;
val
= (nsp
->dcachep
->assocdis
? 2 : 0) |
(nsp
->icachep
->assocdis
? 1 : 0);
nsp
->icachep
->bist_ctl
= val
& 0x7f;
if (val
& 1) nsp
->icachep
->bist_ctl
|= 0x400;
nsp
->icachep
->inst_mask
= val
;
if (val
& 2) nsp
->dcachep
->assocdis
= true;
if (val
& 1) nsp
->icachep
->assocdis
= true;
goto data_access_exception
;
case SS_ASI_ERROR_INJECT_REG
: /* 0x43 RW 0 N Sparc Error Injection Register */
ITODO(SS_ASI_ERROR_INJECT_REG
);
case SS_ASI_STM_CTL_REG
: /* 0x44 RW 0 N Self-timed Margin Control Register */
ITODO(SS_ASI_STM_CTL_REG
);
case SS_ASI_LSU_CONTROL_REG
: /* 0x45 RW 0 Y Load/Store Unit Control Register */
val
= (nsp
->lsu_control_raw
& ~(LSU_CTRL_DMMU_EN
| LSU_CTRL_IMMU_EN
)) |
(nsp
->dmmu
.enabled
? LSU_CTRL_DMMU_EN
: 0LL) |
(nsp
->immu
.enabled
? LSU_CTRL_IMMU_EN
: 0LL);
* can only issue this in hpriv mode, so even though we turn the mmu
* on and off, we dont need to flush the x and d translation caches
* because in hpriv mode we're only fetching physical addressses.
ASSERT( V9_RED
== v9p
->state
|| V9_HyperPriv
== v9p
->state
);
val
&= LSU_CTRL_REG_MASK
;
if ((val
& (LSU_CTRL_WATCH_VR
|LSU_CTRL_WATCH_VW
)) != 0) {
IMPL_WARNING(("ASI_LSU_CONTROL_REG watchpoint enable unimplemented @ pc=%lx\n", sp
->pc
));
nsp
->lsu_control_raw
= val
;
nsp
->dmmu
.enabled
= (val
& LSU_CTRL_DMMU_EN
) != 0;
nsp
->immu
.enabled
= (val
& LSU_CTRL_IMMU_EN
) != 0;
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
goto data_access_exception
;
case SS_ASI_DCACHE_DATA
: /* 0x46 RW - N Dcache data array diagnostics access */
uint64_t idx
, lineword
, tag
;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
lineword
= addr
&SS_DCACHE_DATA_BITS
;
tag
= (addr
&SS_DCACHE_DATA_TAG_BITS
)>>10;
RW_rdlock(&nsp
->dcachep
->rwlock
);
* must match tag to load data
* iterate over 4 ways at bits [12:11]
for (idx
=lineword
+0x1800; idx
>=lineword
; idx
=idx
-0x800) {
if (nsp
->dcachep
->tagp
[idx
] == tag
) {
val
= nsp
->dcachep
->datap
[idx
];
EXEC_WARNING( ("ASI_DCACHE_DATA load tag 0x%xll has no match",
addr
&SS_DCACHE_DATA_TAG_BITS
) );
RW_unlock(&nsp
->dcachep
->rwlock
);
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (addr
&SS_DCACHE_DATA_BITS
)>>3;
RW_wrlock(&nsp
->dcachep
->rwlock
);
nsp
->dcachep
->datap
[idx
] = val
;
RW_unlock(&nsp
->dcachep
->rwlock
);
case SS_ASI_DCACHE_TAG
: /* 0x47 RW - N Dcache tag and valid bit diagnostics access */
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (addr
&SS_DCACHE_TAG_WAYLINE_BITS
)>>4;
RW_rdlock(&nsp
->dcachep
->rwlock
);
val
= nsp
->dcachep
->tagp
[idx
];
RW_unlock(&nsp
->dcachep
->rwlock
);
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (addr
&SS_DCACHE_TAG_WAYLINE_BITS
)>>4;
RW_wrlock(&nsp
->dcachep
->rwlock
);
nsp
->dcachep
->tagp
[idx
] = val
;
RW_unlock(&nsp
->dcachep
->rwlock
);
case SS_ASI_INTR_DISPATCH_STATUS
: /* 0x48 - - - any type of access causes data_access_exception */
case SS_ASI_INTR_RECEIVE
: /* 0x49 - - - any type of access causes data_access_exception */
case SS_ASI_UPA_CONFIG_REGISTER
: /* 0x4A - - - any type of access causes data_access_exception */
goto data_access_exception
;
case SS_ASI_SPARC_ERROR_EN_REG
: /* 0x4B RW 0 N Sparc error enable reg(synchronous ecc/parity errors) */
if (0LL != addr
) goto data_access_exception
;
val
= nsp
->error
.enabled
;
nsp
->error
.enabled
= (val
& (NA_CEEN
| NA_NCEEN
));
case SS_ASI_SPARC_ERROR_STATUS_REG
: /* 0x4C RW 0 Y Sparc error status reg */
if (0LL != addr
) goto data_access_exception
;
nsp
->error
.status
&= ~val
;
case SS_ASI_SPARC_ERROR_ADDRESS_REG
: /* 0x4D RW 0 Y Sparc error address reg */
if (0LL != addr
|| !is_load
) goto data_access_exception
;
case SS_ASI_ECACHE_TAG_DATA
: /* 0x4E - - - any type of access causes data_access_exception */
goto data_access_exception
;
case SS_ASI_HYP_SCRATCHPAD
:
* 0x4F RW 0-38 Y Hypervisor Scratchpad
* 0x4F RW 0-18 Y Hypervisor Scratchpad
if (INVALID_HYP_SCRATCHPAD(addr
)) {
goto data_access_exception
;
&(nsp
->strand_reg
[SSR_HSCRATCHPAD_INDEX
+ (addr
>>3)]);
DBGSCRATCH( if (*valp
!= val
)
lprintf(sp
->gid
, "SCRATCH store 0x%x/0x%llx: "
"0x%llx -> 0x%llx pc=0x%llx\n",
asi
, addr
, *valp
, val
, sp
->pc
); );
case SS_ASI_IMMU
: /* 0x50 R 0 Y IMMU Tag Target register */
/* 0x50 RW 18 Y IMMU Synchronous Fault Status Register */
/* 0x50 RW 30 Y IMMU TLB Tag Access Register */
VA48_ASSERT(mmup
->tag_access_reg
);
val
= (mmup
->tag_access_reg
>> 22) | ((mmup
->tag_access_reg
&MASK64(12,0))<<48);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
val
= mmup
->tag_access_reg
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
mmup
->sfsr
= val
& MMU_SFSR_MASK
;
sp
->xicache_trans_flush_pending
= true;
mmup
->tag_access_reg
= VA48(val
);
DBGMMU( lprintf(sp
->gid
, "ASI 0x%x : %cMMU tag access = 0x%llx\n", asi
, mmup
->is_immu
? 'I' : 'D', mmup
->tag_access_reg
); );
goto data_access_exception
;
case SS_ASI_IMMU_TSB_PS0_PTR_REG
: /* 0x51 R 0 Y IMMU TSB PS0 pointer register */
if (!is_load
) goto data_access_exception
;
if (mmup
== &nsp
->dmmu
) {
if ((mmup
->tag_access_reg
& MASK64(12,0)) == 0)
tsbinfop
= &nsp
->dmmu_ctxt_zero_tsb_ps0
;
tsbinfop
= &nsp
->dmmu_ctxt_nonzero_tsb_ps0
;
if ((mmup
->tag_access_reg
& MASK64(12,0)) == 0)
tsbinfop
= &nsp
->immu_ctxt_zero_tsb_ps0
;
tsbinfop
= &nsp
->immu_ctxt_nonzero_tsb_ps0
;
goto common_make_tsb_ptr
;
case SS_ASI_IMMU_TSB_PS1_PTR_REG
: /* 0x52 R 0 Y IMMU TSB PS1 pointer register */
if (!is_load
) goto data_access_exception
;
if (mmup
== &nsp
->dmmu
) {
if ((mmup
->tag_access_reg
& MASK64(12,0)) == 0)
tsbinfop
= &nsp
->dmmu_ctxt_zero_tsb_ps1
;
tsbinfop
= &nsp
->dmmu_ctxt_nonzero_tsb_ps1
;
if ((mmup
->tag_access_reg
& MASK64(12,0)) == 0)
tsbinfop
= &nsp
->immu_ctxt_zero_tsb_ps1
;
tsbinfop
= &nsp
->immu_ctxt_nonzero_tsb_ps1
;
val
= 1ull << (13+tsbinfop
->tsb_size
);
* base_addr was masked when the TSB base was written,
* so no need to mask again here.
val
|= tsbinfop
->base_addr
;
val
|= (mmup
->tag_access_reg
>> (13 - 4 + tsbinfop
->page_size
* 3)) & MASK64((9+tsbinfop
->tsb_size
+4-1), 4);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
case SS_ASI_ITLB_DATA_IN_REG
: /* 0x54 W 0 N IMMU data in register */
tlb_data_in_valid_check
:;
* Check for attempts to load this ASI -or- invalid PA
* (only bits 10-9 should be set)
if (is_load
|| (addr
& ~MASK64(10,9))!=0) goto data_access_exception
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
is_real
= SS_TLB_IS_REAL(addr
);
if ((addr
>>10)&1) val
= niagara_shuffle_sun4v_format(val
);
if (!ss_tlb_insert(sp
, mmup
, tlbp
, nsp
->partid
, is_real
, val
, idx
))
goto data_access_exception
;
case SS_ASI_ITLB_DATA_ACCESS_REG
: /* 0x55 RW 0-1F8 N IMMU TLB Data Access Register */
/* Check for valid tlb index */
idx
= (addr
>> 3) & 0x3f;
if (idx
>= tlbp
->nentries
) goto data_access_exception
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
* Check for invalid PA (only bits 10-3 should be set)
if ((addr
& ~MASK64(10,3))!=0)
goto data_access_exception
;
if (sp
->error_check
== true &&
(ep
= find_errconf(sp
, ASI_LD
, IMDU
|DMDU
))) {
if (ep
->type
== IMDU
&& mmup
->is_immu
) {
sp
->errorp
->tlb_idx
[IMDU_IDX
] = idx
;
ss_error_condition(sp
, ep
);
if (ep
->type
== DMDU
&& !mmup
->is_immu
) {
sp
->errorp
->tlb_idx
[DMDU_IDX
] = idx
;
ss_error_condition(sp
, ep
);
RW_rdlock(&tlbp
->rwlock
);
tep
= &tlbp
->tlb_entryp
[idx
];
RW_unlock(&tlbp
->rwlock
);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
case SS_ASI_ITLB_TAG_READ_REG
: /* 0x56 R 0-1F8 N IMMU TLB Tag Read Register */
if (idx
>= tlbp
->nentries
) goto data_access_exception
;
if (sp
->error_check
== true &&
(ep
= find_errconf(sp
, ASI_LD
, IMTU
|DMTU
))) {
if (ep
->type
== IMTU
&& mmup
->is_immu
) {
sp
->errorp
->tlb_idx
[IMTU_IDX
] = idx
;
ss_error_condition(sp
, ep
);
if (ep
->type
== DMTU
&& !mmup
->is_immu
) {
sp
->errorp
->tlb_idx
[DMTU_IDX
] = idx
;
ss_error_condition(sp
, ep
);
RW_rdlock(&tlbp
->rwlock
);
tep
= &tlbp
->tlb_entryp
[idx
];
val
= ((uint64_t)tep
->partid
<< 61) |
((uint64_t)(tep
->is_real
?1:0) << 60);
val
|= (tep
->tag_pfn
& MASK64(55, 13)) | (uint64_t)tep
->tag_context
;
RW_unlock(&tlbp
->rwlock
);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
goto data_access_exception
;
case SS_ASI_IMMU_DEMAP
: /* 0x57 W 0 Y IMMU TLB Demap */
if (is_load
) goto data_access_exception
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
op
= (ss_demap_t
) ((addr
>>6)&0x3);
switch ((addr
>> 4)&0x3) {
case 0x0: context
= nsp
->pri_context
; break; /* primary context */
case 0x1: context
= nsp
->sec_context
; break; /* secondary context */
case 0x2: context
= SS_NUCLEUS_CONTEXT
; break; /* nucleus context */
* use of reserved value is valid but causes
* demap to be ignored for the following two ops
if (op
==NA_demap_page
|| op
==NA_demap_context
)
if (op
== NA_demap_page
) {
if ((addr
& BIT(47)) == 0) {
if ((addr
& MASK64(63, 48)) != 0) {
EXEC_WARNING(("(@pc=0x%llx) demap "
if ((addr
& MASK64(63, 48)) != MASK64(63, 48)) {
EXEC_WARNING(("(@pc=0x%llx) demap "
is_real
= SS_TLB_IS_REAL(addr
);
if (!ss_demap(sp
, op
, mmup
, tlbp
, nsp
->partid
, is_real
, context
, addr
)) goto data_access_exception
;
case SS_ASI_DMMU
: /* 0x58 R 0 Y D-MMU Tag Target Register */
/* 0x58 RW 18 Y DMMU Synchronous Fault Status Register */
/* 0x58 R 20 Y DMMU Synchronous Fault Address Register */
/* 0x58 RW 30 Y DMMU TLB Tag Access Register */
/* 0x58 RW 38 Y DMMU VA Data Watchpoint Register */
/* 0x58 RW 40 Y Niagara 2: Tablewalk Config Reg */
/* 0x58 RW 80 Y I/DMMU Partition ID */
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
val
= mmup
->va_watchpoint
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
val
= (uint64_t)(nsp
->partid
);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
mmup
->va_watchpoint
= VA48(val
);
/* can only do in hypervisor mode - switching mode causes the xi/xd
nsp
->partid
= val
& 0x7; /* three bits of part id only */
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
goto data_access_exception
;
case SS_ASI_DMMU_TSB_PS0_PTR_REG
: /* 0x59 R 0 Y DMMU TSB PS0 pointer register */
case SS_ASI_DMMU_TSB_PS1_PTR_REG
: /* 0x5A R 0 Y DMMU TSB PS1 pointer register */
case SS_ASI_DMMU_TSB_DIRECT_PTR_REG
: /* 0x5B R 0 Y DMMU TSB Direct pointer register */
if (!is_load
) goto data_access_exception
;
if (mmup
->tsb_direct_ps1
)
case SS_ASI_DTLB_DATA_IN_REG
: /* 0x5C W 0 N DMMU data in register */
goto tlb_data_in_valid_check
;
case SS_ASI_DTLB_DATA_ACCESS_REG
: /* 0x5D RW 0-1F8 N DMMU TLB Data Access Register */
case SS_ASI_DTLB_TAG_READ_REG
: /* 0x5E R 0-1F8 N DMMU TLB Tag Read Register */
case SS_ASI_DMMU_DEMAP
: /* 0x5F W 0 Y DMMU TLB Demap */
case SS_ASI_TLB_INVALIDATE_ALL
: /* 0x60 W 0 N IMMU TLB Invalidate Register */
/* 0x60 W 8 N DMMU TLB Invalidate Register */
if (is_load
|| !(addr
==0x0 || addr
==0x8)) goto data_access_exception
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
if (!ss_demap(sp
, NA_demap_init
, mmup
, tlbp
, SS_TLB_INVALID_PARTID
, false, SS_TLB_INVALID_CONTEXT
, 0))
goto data_access_exception
;
case SS_ASI_ICACHE_INSTR
: /* 0x66 RW - N Icache data array diagnostics access */
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= ((addr
&SS_ICACHE_DATA_LINEWORD_BITS
)|((addr
&SS_ICACHE_DATA_WAY_BITS
)>>3))>>3;
RW_rdlock(&nsp
->icachep
->rwlock
);
val
= nsp
->icachep
->datap
[idx
];
RW_unlock(&nsp
->icachep
->rwlock
);
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= ((addr
&SS_ICACHE_DATA_LINEWORD_BITS
)|((addr
&SS_ICACHE_DATA_WAY_BITS
)>>3))>>3;
RW_wrlock(&nsp
->icachep
->rwlock
);
nsp
->icachep
->datap
[idx
] = val
;
RW_unlock(&nsp
->icachep
->rwlock
);
case SS_ASI_ICACHE_TAG
: /* 0x67 RW - N Icache tag and valid bit diagnostics access */
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (((addr
&SS_ICACHE_TAG_LINE_BITS
)>>3)|((addr
&SS_ICACHE_TAG_WAY_BITS
)>>6))>>3;
RW_rdlock(&nsp
->icachep
->rwlock
);
val
= nsp
->icachep
->tagp
[idx
];
RW_unlock(&nsp
->icachep
->rwlock
);
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (((addr
&SS_ICACHE_TAG_LINE_BITS
)>>3)|((addr
&SS_ICACHE_TAG_WAY_BITS
)>>6))>>3;
RW_wrlock(&nsp
->icachep
->rwlock
);
nsp
->icachep
->tagp
[idx
] = val
;
RW_unlock(&nsp
->icachep
->rwlock
);
case SS_ASI_SWVR_INTR_RECEIVE
: /* 0x72 RW 0 Y Interrupt Receive Register */
if (0LL != addr
) goto data_access_exception
;
pthread_mutex_lock(&nsp
->irq_lock
);
pthread_mutex_unlock(&nsp
->irq_lock
);
DBGMONDO( lprintf(sp
->gid
, "SS_ASI_SWVR_INTR_RECEIVE load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
pthread_mutex_lock(&nsp
->irq_lock
);
DBGMONDO( oldval
= nsp
->irq_vector
; );
DBGMONDO( newval
= nsp
->irq_vector
; );
pthread_mutex_unlock(&nsp
->irq_lock
);
DBGMONDO( lprintf(sp
->gid
, "SS_ASI_SWVR_INTR_RECEIVE store 0x%x/0x%llx : 0x%llx irq_vector: 0x%llx -> 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, oldval
, newval
, sp
->pc
); );
case SS_ASI_SWVR_UDB_INTR_W
: /* 0x73 W 0 Y Interrupt Vector Dispatch Register */
if (0LL != addr
|| is_load
) goto data_access_exception
;
DBGMONDO( lprintf(sp
->gid
, "ASI_SWVR_UDB_INTR_W store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
niagara_send_xirq(sp
, val
);
case SS_ASI_SWVR_UDB_INTR_R
: /* 0x74 R 0 Y Incoming Vector Register */
if (0LL != addr
|| !is_load
) goto data_access_exception
;
pthread_mutex_lock(&nsp
->irq_lock
);
if (vec
& 0xffffffff00000000ull
) {
nsp
->irq_vector
&= ~((uint64_t)1<<bit
);
pthread_mutex_unlock(&nsp
->irq_lock
);
DBGMONDO( lprintf(sp
->gid
, "SS_ASI_SWVR_UDB_INTR_R load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
niagara_set_sfsr(sp
, &nsp
->dmmu
, addr
, MMU_SFSR_FT_ASI
, ss_ctx_primary
, asi
, 1, 0);
tt
= (sparcv9_trap_type_t
)Sparcv9_trap_data_access_exception
;
ASSERT(0LL==sp
->intreg
[Reg_sparcv9_g0
]);
v9p
->post_precise_trap(sp
, tt
);
if (regnum
!= Reg_sparcv9_g0
) sp
->intreg
[regnum
] = val
;
} else { /* op == MA_LdFloat */
ASSERT(MA_LdFloat
== op
);
sp
->fpreg
.s32
[regnum
] = val
;
sp
->fpreg
.s64
[regnum
>> 1] = val
;
IMPL_WARNING(("ASI access (0x%02x) (@pc=0x%llx) to address 0x%llx currently unimplemented", asi
, sp
->pc
, addr
));
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
* Slow generic memory access ..
* .. becomes the path for all the accesses we cant handle via the load/store hash
ss_memory_asi_access(simcpu_t
* sp
, maccess_t memop
, uint64_t * regp
,
mem_flags_t mflags
, uint_t asi
, uint_t context_type
,
uint_t align_mask
, tvaddr_t va
, tvaddr_t reg2
)
tvaddr_t tag
, perm_cache
;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
nsp
= v9p
->impl_specificp
;
npp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
mflags
^= (asi
& SS_ASI_LE_MASK
) ? MF_Little_Endian
: 0;
/* OK, derive access address etc. */
size
= memop
& MA_Size_Mask
;
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: LE load/store pc=0x%llx instr=0x%x count=%d asi=0x%x\n", sp
->pc
, op
, (1 << size
), asi
); );
* OK - Step 1 : to do or not do a TLB translation.
* The assumption here is that privilege checks have already happened.
/* quick check of alignment */
if ((va
& (tvaddr_t
)align_mask
) != 0) {
if (v9p
->pstate
.addr_mask
)
va
&= MASK64(31,0); /* SV9_ID125 FIXME */
DBGALIGN( lprintf(sp
->gid
,"Miss data access pc=0x%llx va=0x%llx align_mask=0x%llx\n", sp
->pc
, va
, (tvaddr_t
)align_mask
); );
/* alignment error force a trap */
SET_DTLB_FAULT( nsp
, VA48(va
) );
niagara_set_sfsr(sp
, &nsp
->dmmu
, va
, 0/*fixme*/, context_type
, asi
, 0, 0);
if ((MA_ldfp64
== memop
|| MA_stfp64
== memop
) &&
tt
= ((memop
== MA_ldfp64
) ?
Sparcv9_trap_LDDF_mem_address_not_aligned
:
Sparcv9_trap_STDF_mem_address_not_aligned
);
tt
= Sparcv9_trap_mem_address_not_aligned
;
v9p
->post_precise_trap(sp
, tt
);
/* Find the pa corresponding to the line we need */
tag
= va
& XDCACHE_TAG_MASK
;
* We have to get the PA from the EA ... this depends on the mode
* and the type of access.
if (v9p
->pstate
.addr_mask
) {
/* NOTE: we dont mask tag ... we allow that to match the 64bit address */
flags
= SS_TLB_FLAG_READ
| SS_TLB_FLAG_WRITE
; /* default access flags */
* OK perform the TLB access based on the context
* and partition id selected
/* default read and write permission for MMU bypass */
perm_cache
= XDCACHE_READ_PERM
| XDCACHE_WRITE_PERM
;
if (!(mflags
& MF_MMU_Bypass
)) {
ss_trap_type_t miss_trap_type
;
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: performing TLB access \n"); );
/* If not priv mode and mmu is off, translate real addresses */
context
= SS_TLB_REAL_CONTEXT
;
/* figure out the context value */
context
= nsp
->pri_context
;
context
= nsp
->sec_context
;
if (mflags
& MF_TLB_Real_Ctx
)
context
= SS_TLB_REAL_CONTEXT
;
context
= SS_NUCLEUS_CONTEXT
;
fatal("ss_memory_asi_access: Internal Error. Not expecting "
"context type 0x%x\n", context_type
);
* check out of range address (if lie within the "VA hole"
if ((va
>= SS_VA_HOLE_LB
) && (va
<= SS_VA_HOLE_UB
)) {
niagara_set_sfsr(sp
, &nsp
->dmmu
, va
,
MMU_SFSR_FT_VARANGE
, context_type
,
v9p
->post_precise_trap(sp
,
(sparcv9_trap_type_t
)SS_trap_data_access_exception
);
/* FIXME: Need a better hash than this ! */
idx
= va
>> SS_MAX_PAGE_SIZE_BITS
;
RW_rdlock(&tlbp
->rwlock
);
* So we search for a matching page using the info we have in the
* hash - while another thread might possibly be removing or
* inserting an entry into the same table.
for ( tep
= tlbp
->hash
[idx
].ptr
; tep
!=(tlb_entry_t
*)0; tep
= tep
->nextp
) {
/* try and match the entry as appropriate */
if (((tep
->tag_pfn
^ va
)>>tep
->match_shift
)==0 && tep
->match_context
==context
&& tep
->partid
== partid
) {
RW_unlock(&tlbp
->rwlock
);
DBGMISS( lprintf(sp
->gid
, "dtlb miss: pc=%lx asi=%x va=%lx ctx=%x\n", sp
->pc
, asi
, va
, context
); );
/* Based on the ASI access type setup accordingly */
case SS_ASI_REAL_MEM_LITTLE
:
case SS_ASI_REAL_IO_LITTLE
:
case SS_ASI_QUAD_LDD_REAL
:
case SS_ASI_QUAD_LDD_REAL_LITTLE
:
SET_DTLB_FAULT( nsp
, VA48(va
) );
nsp
->dmmu
.tag_access_reg
= VA48(va
) & ~MASK64(12,0); /* Do this properly later - FIXME */
DBGMMU( lprintf(sp
->gid
, "DMMU tag access = 0x%llx\n", nsp
->dmmu
.tag_access_reg
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)SS_trap_data_real_translation_miss
);
* If the MMU is "disabled" in privileged mode ... this is a real miss, not a
* virtual translation miss, so the fault context and trap type is different
miss_trap_type
= SS_trap_fast_data_access_MMU_miss
;
miss_context
= 0; /* null for ra->pa miss undefined ? */
miss_trap_type
= SS_trap_data_real_translation_miss
;
nsp
->dmmu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) | miss_context
; /* Do this properly later - FIXME */
SET_DTLB_FAULT( nsp
, VA48(va
) );
DBGMMU( lprintf(sp
->gid
, "DMMU tag access = 0x%llx\n", nsp
->dmmu
.tag_access_reg
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)miss_trap_type
);
/* we have a matching entry ... now all we have to worry about are the permissions */
pa_tag
+= tep
->pa_offset
;
RW_unlock(&tlbp
->rwlock
);
* Errors on dtlb hit: stash table_entry pointer and if
* subsequent itlb hit on same entry post error again.
if (sp
->error_check
== true && errorp
->check_dtlb
) {
bool_t is_load
, is_store
;
is_load
= IS_V9_MA_LOAD(op
);
is_store
= IS_V9_MA_STORE(op
);
if (is_load
) ep
= find_errconf(sp
, LD
, DMDU
);
if (is_store
) ep
= find_errconf(sp
, ST
, DMSU
);
DBGERR( lprintf(sp
->gid
, "ss_memory_asi_access: "
"errorp->dtep=%x, tep=%x\n",
if ((tlb_entry_t
*)errorp
->dtep
== tep
) {
ss_error_condition(sp
, ep
);
ss_error_condition(sp
, ep
);
/* privilege test apparently takes priority ... p.51 US-I PRM table 6-4 */
if ((flags
& SS_TLB_FLAG_PRIV
) && !(mflags
& MF_Has_Priv
)) {
nsp
->dmmu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT
== context
)?0:context
); /* Do this properly later - FIXME */
niagara_set_sfsr(sp
, &nsp
->dmmu
, va
, MMU_SFSR_FT_PRIV
, context_type
, asi
, 0/*fixme*/, 1);
miss_trap_type
= SS_trap_data_access_exception
;
* validate bits NFO, E and CP
if ((flags
& SS_TLB_FLAG_E
) && (mflags
& MF_No_Fault
)) {
nsp
->dmmu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT
== context
)?0:context
); /* Do this properly later - FIXME */
miss_trap_type
= SS_trap_data_access_exception
;
niagara_set_sfsr(sp
, &nsp
->dmmu
, va
, MMU_SFSR_FT_SO
, context_type
, asi
, 0/*fixme*/, 1);
if ((flags
& SS_TLB_FLAG_NFO
) && (!(mflags
& MF_No_Fault
))) {
nsp
->dmmu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT
== context
)?0:context
); /* Do this properly later - FIXME */
miss_trap_type
= SS_trap_data_access_exception
;
niagara_set_sfsr(sp
, &nsp
->dmmu
, va
, MMU_SFSR_FT_NFO
, context_type
, asi
, 0/*fixme*/, 1);
if (!(flags
& SS_TLB_FLAG_CP
) && (mflags
& MF_Atomic_Access
)) {
nsp
->dmmu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT
== context
)?0:context
); /* Do this properly later - FIXME */
miss_trap_type
= SS_trap_data_access_exception
;
niagara_set_sfsr(sp
, &nsp
->dmmu
, va
, MMU_SFSR_FT_ATOMICIO
, context_type
, asi
, 0/*fixme*/, 1);
if (IS_V9_MA_STORE(op
) && !(flags
& SS_TLB_FLAG_WRITE
)) {
nsp
->dmmu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT
== context
)?0:context
); /* Do this properly later - FIXME */
ps1
= (context
== 0) ? nsp
->dmmu_ctxt_zero_tsb_ps1
.page_size
: nsp
->dmmu_ctxt_nonzero_tsb_ps1
.page_size
;
tte_ps1
= ((tep
->data
>>(48-2))&0x4) | ((tep
->data
>>61)&0x3);
/* Is this the actual logic for direct TSB ptr select - FIXME */
/* State bit updated for data_access_protection - see PRM v1.0 p258 13.11.11 */
nsp
->dmmu
.tsb_direct_ps1
= (tte_ps1
== ps1
);
miss_trap_type
= SS_trap_fast_data_access_protection
;
niagara_set_sfsr(sp
, &nsp
->dmmu
, va
, 0/*fixme*/, context_type
, asi
, 1, 0);
mflags
^= (flags
& SS_TLB_FLAG_IE
) ? MF_Little_Endian
: 0;
perm_cache
= (flags
& SS_TLB_FLAG_WRITE
) ? XDCACHE_WRITE_PERM
: 0;
perm_cache
|= (flags
& SS_TLB_FLAG_READ
) ? XDCACHE_READ_PERM
: 0;
/* Niagara only implements 40 bits of PA, the tlb code
masks PA so here we need to mask bypass PAs */
* Now that we have the internal PA, map it to the real
* external PA before looking it up in the domain.
* This does not modify memory addresses, only JBus addresses.
if (pa
>= 0x800e000000ull
&& pa
< 0x8010000000ull
) {
} else if (pa
>= 0x8010000000ull
&& pa
< 0x8100000000ull
) {
} else if (pa
>= 0xc000000000ull
&& pa
< 0xff00000000ull
) {
* OK - now go get the pointer to the line data
* ... start by finding the device that has the
* optimise: by guessing at the last device found.
/* now find the device - looking in the cache first */
cap
= sp
->xdc
.miss_addrp
;
if (!(cap
&& (cap
->baseaddr
<= pa
) && (pa
< cap
->topaddr
))) {
config_proc_t
* config_procp
;
config_procp
= sp
->config_procp
;
domainp
= config_procp
->domainp
;
cap
= find_domain_address(domainp
, pa
);
/* OK it's a bus error there was no backing store */
EXEC_WARNING(("bus error - (@pc=0x%llx, icount=%llu) access to va=0x%llx (pid=0x%x,ctx_type=0x%x,cacheline va=0x%llx -> physical 0x%llx)", sp
->pc
, ICOUNT(sp
), va
, nsp
->partid
, context_type
, tag
, pa_tag
));
/* try and get the buffer pointer */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: calling dev_cacheable\n"); );
extent
= cap
->config_devp
->dev_typep
->dev_cacheable(cap
, da
,
pa_tag
-cap
->baseaddr
, &bufp
);
if (extent
< XDCACHE_LINE_SIZE
) {
uint64_t tempreg
, *aregp
;
pio_op
= memop
& MA_Op_Mask
;
if ((MF_Little_Endian
& mflags
) && (pio_op
== MA_St
)) {
tempreg
= sparcv9_invert_endianess(regp
, (1 << size
));
} else if ((&(sp
->intreg
[Reg_sparcv9_g0
]) == regp
) &&
((pio_op
== MA_Ld
) || (pio_op
== MA_LdSigned
))) {
status
= cap
->config_devp
->dev_typep
->dev_cpu_access(sp
, cap
, pa
-cap
->baseaddr
, memop
, aregp
);
if ((MF_Little_Endian
& mflags
) && status
&& (pio_op
== MA_Ld
|| pio_op
== MA_LdSigned
)) {
*regp
= sparcv9_invert_endianess(regp
, (1 << size
));
if (pio_op
== MA_LdSigned
) {
shift
= 64 - (8 << size
);
*regp
= ((sint64_t
)(*regp
<< shift
)) >> shift
;
ASSERT(0LL == sp
->intreg
[Reg_sparcv9_g0
]);
EXEC_WARNING(("bus error - (@pc=0x%llx) access to va=0x%llx "
"(pid=0x%x,ctx_type=0x%x,physical 0x%llx)",
sp
->pc
, va
, nsp
->partid
, context_type
, pa
));
#if !defined(NDEBUG) /* { */
config_proc_t
* config_procp
;
config_procp
= sp
->config_procp
;
ss_dump_tlbs(config_procp
, true);
/* abort(); */ /* FIXME - no longer need this ? */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: post_precise_trap \n"); );
v9p
->post_precise_trap(sp
, Sparcv9_trap_data_access_error
); /* FIXME: right trap ? */
#if ERROR_INJECTION /* { */
/* processor-wide checks for unhandled L2 and DRAM errors */
bool_t is_load
, is_store
, is_atomic
;
is_load
= IS_V9_MA_LOAD(op
);
is_store
= IS_V9_MA_STORE(op
);
is_atomic
= IS_V9_MA_ATOMIC(op
);
/* check if direct-map mode displacement flushing the error cacheline */
if (l2p
->control
[bank
] & L2_DMMODE
) {
if ((pa
& L2_DM_MASK
) == (npp
->errorp
->ldac_addr
& L2_DM_MASK
)) {
npp
->errorp
->ldac_addr
= NULL
;
if ((pa
& L2_DM_MASK
) == (npp
->errorp
->ldau_addr
& L2_DM_MASK
)) {
npp
->errorp
->ldac_addr
= NULL
;
* when accessing cacheline with error: load or partial store
* causes LDAC or LDAU, store to line with correctible error clears it,
* store to uncorrectible causes a writeback error
if (pa
== npp
->errorp
->ldac_addr
) {
(is_store
&& (size
== MA_Size8
|| size
== MA_Size16
))) {
ep
= new_errconf((is_load
? LD
: ST
), LDAC
);
npp
->errorp
->ldac_addr
= NULL
;
} else if (pa
= npp
->errorp
->ldau_addr
) {
(is_store
&& (size
== MA_Size8
|| size
== MA_Size16
))) {
ep
= new_errconf((is_load
? LD
: ST
), LDAU
);
npp
->errorp
->ldau_addr
= NULL
;
/* now check for errors to be generated from this thread's error list */
if (sp
->error_check
&& errorp
->check_xdcache
) {
bool_t is_load
, is_store
, is_atomic
;
is_load
= IS_V9_MA_LOAD(op
);
is_store
= IS_V9_MA_STORE(op
);
is_atomic
= IS_V9_MA_ATOMIC(op
);
if (is_load
) ep
= find_errconf(sp
, LD
,
(DTC
|DDC
|IRC
|IRU
|FRC
|FRU
|LDAC
|LDWC
|LDAU
|LDWU
|DAC
|DAU
));
if (is_store
) ep
= find_errconf(sp
, ST
,
(DTC
|DDC
|IRC
|IRU
|FRC
|FRU
|LDAC
|LDWC
|LDAU
|LDWU
|DAC
|DAU
));
xidx
= (xpc
>>2) & XICACHE_NUM_INSTR_MASK
;
xip
= &xicp
->instn
[xidx
];
errorp
->reg
= X_RS1(xip
->rawi
);
ss_error_condition(sp
, ep
);
ss_error_condition(sp
, ep
);
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (l2p
->control
[bank
] & L2_DIS
) goto l2_disabled
;
if (is_atomic
) errorp
->l2_write
= L2_RW_bit
;
ss_error_condition(sp
, ep
);
if (is_store
&& (size
== MA_Size8
|| size
== MA_Size16
)) {
errorp
->l2_write
= L2_RW_bit
;
errorp
->partial_st
= true;
ss_error_condition(sp
, ep
);
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (l2p
->control
[bank
] & L2_DIS
) goto l2_disabled
;
ss_error_condition(sp
, ep
);
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (l2p
->control
[bank
] & L2_DIS
) goto l2_disabled
;
if (ep
->op
== LD
&& is_load
) {
if (is_atomic
) errorp
->l2_write
= L2_RW_bit
;
ss_error_condition(sp
, ep
);
if (ep
->op
== ST
&& is_store
) {
if (size
== MA_Size8
|| size
== MA_Size16
)
errorp
->partial_st
= true;
errorp
->l2_write
= L2_RW_bit
;
ss_error_condition(sp
, ep
);
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (l2p
->control
[bank
] & L2_DIS
) goto l2_disabled
;
if (ep
->op
== LD
&& is_load
) {
if (is_atomic
) errorp
->l2_write
= L2_RW_bit
;
ss_error_condition(sp
, ep
);
if (ep
->op
== ST
&& is_store
) {
if (size
== MA_Size8
|| size
== MA_Size16
)
errorp
->partial_st
= true;
errorp
->l2_write
= L2_RW_bit
;
ss_error_condition(sp
, ep
);
l2_disabled
: DBGERR( lprintf(sp
->gid
, "ss_memory_asi_access: "
"No LDAC/LDWC/LDAU/LDWU/DAC Error - L2 disabled\n"); );
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: handling cacheable device memory\n"); );
* Now handle cacheable device memory
* Because we implicitly assume that the xdc uses the current context
* we only add missed entries to the xdc iff it was a normal memory op
if ((mflags
& (MF_Normal
|MF_Little_Endian
)) == MF_Normal
) {
sp
->xdc
.miss_addrp
= cap
; /* cache for next time */
ridx
= (va
>> XDCACHE_RAW_SHIFT
) & XDCACHE_RAW_LINE_MASK
;
xclp
= (xdcache_line_t
*)(((uint8_t*)&(sp
->xdc
.line
[0])) + ridx
);
/* only cache if memory is cacheable */
/* WARNING: This tag may be a full 64bit value even if pstate.am=1 */
/* do not use ea_offset with anything else other than tag */
xclp
->tag
= tag
| perm_cache
| sp
->tagstate
;
xclp
->offset
= ((uint64_t)bufp
) - tag
;
* Sigh now complete the load/store on behalf of the original
#if HOST_CPU_LITTLE_ENDIAN
mflags
^= MF_Little_Endian
;
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: completing load/store on behalf of original instr.\n"); );
ptr
= (uint8_t*)(bufp
+ (pa
& XDCACHE_LINE_OFFSET_MASK
) );
if (MF_Little_Endian
& mflags
) {
DBGLE( lprintf(sp
->gid
, "SunSPARC::: MA_Ld with LE - val=0x%llx count=0x%x\n",
val
= sparcv9_invert_endianess(&val
, (1 << size
));
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_LdSigned with LE - val=0x%llx count=0x%x\n",
val
= sparcv9_invert_endianess(&val
, (1 << size
));
shift
= 64 - (8 << size
);
val
= ((sint64_t
)(val
<< shift
)) >> shift
;
if (MF_Little_Endian
& mflags
) {
DBGLE( lprintf(sp
->gid
, "SunSPARC::: MA_St with LE - val=0x%llx\n", *regp
); );
val
= sparcv9_invert_endianess(regp
, (1 << size
));
if (mflags
& MF_Blk_Init
) {
/* If line in L2 cache, leave data alone, otherwise zero it */
/* XXX How to simulate? */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_LdFloat with LE - \n"); );
ASSERT(&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
);
if (MF_Little_Endian
& mflags
) {
val
= *(ieee_fp32_t
*)ptr
;
sparcv9_invert_endianess(&val
,
*(ieee_fp32_t
*)regp
= *(ieee_fp32_t
*)ptr
;
if (MF_Little_Endian
& mflags
)
sparcv9_invert_endianess(
*(ieee_fp64_t
*)regp
= *(ieee_fp64_t
*)ptr
;
if ((MF_Little_Endian
& mflags
) == 0) {
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(regp
+ i
) =
*(ieee_fp64_t
*)(ptr
+ i
*8);
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(regp
+ i
) =
sparcv9_invert_endianess(
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
ASSERT((MF_Little_Endian
& mflags
) == 0);
*(ieee_fp128_t
*)regp
= *(ieee_fp128_t
*)ptr
;
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_StFloat with LE - \n"); );
if (MF_Little_Endian
& mflags
) {
val
= *(ieee_fp32_t
*)regp
;
sparcv9_invert_endianess(&val
,
*(ieee_fp32_t
*)ptr
= *(ieee_fp32_t
*)regp
;
if (MF_Little_Endian
& mflags
)
sparcv9_invert_endianess(regp
,
*(ieee_fp64_t
*)ptr
= *(ieee_fp64_t
*)regp
;
if ((MF_Little_Endian
& mflags
) == 0) {
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(ptr
+ i
*8) =
*(ieee_fp64_t
*)(regp
+ i
);
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(ptr
+ i
*8) =
sparcv9_invert_endianess(
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
ASSERT((MF_Little_Endian
& mflags
) == 0);
*(ieee_fp128_t
*)ptr
= *(ieee_fp128_t
*)regp
;
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_LdSt with LE - \n"); );
val
= host_ldstub(ptr
, reg2
, *regp
);
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_Swap with LE - \n"); );
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(regp
, (1 << size
));
val
= host_swap((uint32_t *)ptr
, val
);
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(&val
, (1 << size
));
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_CAS with LE - \n"); );
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(regp
, (1 << size
));
cval
= sparcv9_invert_endianess(®2
, (1 << size
));
val
= host_cas32((uint32_t *)ptr
, cval
, val
);
val
= host_cas64((uint64_t *)ptr
, cval
, val
);
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(&val
, (1 << size
));
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
case MA_Size64
: /* standard sparc LDD instruction */
regp
[0] = (uint32_t)(val
>> 32);
if (MF_Little_Endian
& mflags
) {
DBGLE( lprintf(sp
->gid
, "SunSPARC::: MA_LdDouble with LE - val=0x%llx count=0x%x\n",
regp
[0] = sparcv9_invert_endianess(®p
[0], (1 << size
)>>1);
regp
[1] = sparcv9_invert_endianess(®p
[1], (1 << size
)>>1);
sp
->intreg
[Reg_sparcv9_g0
] = 0; /* regp might be %g0 */
host_atomic_get128be((uint64_t *)ptr
, regp
, ®p
[1]);
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_ldDouble with LE - val=0x%llx,0x%llx count=0x%x\n",
regp
[0], regp
[1], (1 << size
)); );
regp
[0] = sparcv9_invert_endianess(®p
[0], (1 << size
)>>1);
regp
[1] = sparcv9_invert_endianess(®p
[1], (1 << size
)>>1);
sp
->intreg
[Reg_sparcv9_g0
] = 0; /* regp might be %g0 */
fatal("ss_memory_asi_access: internal error - "
"illegal size for MA_LdDouble");
ASSERT(size
== MA_Size64
);
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_StDouble with LE - reven=0x%x rodd=0x%x count=0x%x\n",
(uint32_t)regp
[0], (uint32_t)regp
[1], (1 << size
)); );
reven
= (uint32_t)sparcv9_invert_endianess(®p
[0], (1 << size
)>>1);
rodd
= (uint32_t)sparcv9_invert_endianess(®p
[1], (1 << size
)>>1);
reven
= (uint32_t)regp
[0];
rodd
= (uint32_t)regp
[1];
val
= ((uint64_t)reven
<< 32) | ((uint32_t)rodd
);
ASSERT( MA_Size32
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
v9_set_fsr_lower(sp
, val
);
ASSERT( MA_Size64
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
ASSERT( MA_Size32
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
*(uint32_t*)ptr
= val
& MASK64(31,0);
/* FTT is cleared on read of FSR */
sp
->v9_fsr_ctrl
&= ~V9_FSR_FTT_MASK
;
DBGFSR( lprintf(sp
->gid
, "stfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp
->pc
, v9_get_fsr(sp
), val
); );
ASSERT( MA_Size64
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
/* FTT is cleared on read of FSR */
sp
->v9_fsr_ctrl
&= ~V9_FSR_FTT_MASK
;
DBGFSR( lprintf(sp
->gid
, "stxfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp
->pc
, v9_get_fsr(sp
), val
); );
* Finally go get the next instruction
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: getting the next instr.\n"); );
* Insert the entry using the mmup->tag_access_reg and the supplied data field
* into the TLB. Being careful of course to first invalidate any entries which
* first conflict with the page we're tryinng to insert
* Returns false on failure, true on success ... failure implies
* a data access exception for the caller - which it must generate.
ss_tlb_insert(simcpu_t
* sp
, ss_mmu_t
* mmup
, ss_tlb_t
* tlbp
, uint_t partid
,
bool_t is_real
, uint64_t data
, uint_t idx
)
matchcontext_t match_context
;
bool_t need_flush
= false;
/* FIXME: what does niagara do if you try to load an invalid TTE ? */
if (idx
== SS_TLB_LRU
&& ((data
>> SUN4U_TTED_V_BIT
)&1) == 0) {
EXEC_WARNING(("tlb_insert 0x%llx (@pc=0x%llx, icount=%llu) TTE invalid", data
, sp
->pc
, ICOUNT(sp
)));
size
= ((data
>>(48-2))&0x4) | ((data
>>61)&0x3);
/* figure out the useful info about our page to insert */
shift
= SUN4V_PAGE_SIZE_SHIFT(size
);
if (shift
== 0) return false;
* This is VERY important:
* The tag access register need NOT contain a correctly aligned tag entry
* for the given page size. So it is REALLY IMPORTANT when forming the TLB
* entry tag field that we correctly mask off the lower bits corresponding to
* the selected page size. This especially important because we use this value to
* compute a va-pa offset.
* Note: we do a similar mask operation later when using the PA to compute the
* offset value we create.
tag
= mmup
->tag_access_reg
& MASK64(63,shift
);
tag_context
= mmup
->tag_access_reg
& MASK64(12,0);
match_context
= is_real
? SS_TLB_REAL_CONTEXT
: tag_context
;
RW_wrlock(&tlbp
->rwlock
);
* First lets look for potentially matching pages we may have to
* de-map first. We demap the old entry if it incorporates our new
tep
= &(tlbp
->tlb_entryp
[0]);
for (i
=tlbp
->nentries
; i
>0; i
--, tep
++) {
xor = tep
->tag_pfn
^ tag
;
if ( ( (xor>>tep
->match_shift
)==0 || (xor >> shift
)==0 ) &&
tep
->match_context
== match_context
&& tep
->partid
== partid
) {
/* matching entry - put back on the free list */
ss_tlb_unhash(tlbp
, tep
);
ss_free_tlb_entry( tlbp
, tep
);
DBGERR( lprintf(sp
->gid
, "ss_tlb_insert(): errorp->itep=%x"
" errorp->dtep=%x tep=%x\n",
sp
->errorp
->itep
, sp
->errorp
->dtep
, tep
); );
tlb_entry_error_match(sp
, mmup
, tep
);
* Now we need to pick an entry for the one we wish
tep
= &tlbp
->tlb_entryp
[idx
];
if (tep
->hashidx
!= -1) {
ss_tlb_unhash(tlbp
, tep
);
ss_tlb_unfree(tlbp
, tep
);
if (tep
== (tlb_entry_t
*)0) {
/* OK replacement required - clobber a valid entry */
/* FIXME: What is Niagara's replacement policy ? */
#if SS_TLB_REPLACE_RANDOM /* { */
i
= random() % tlbp
->nentries
;
tep
= &(tlbp
->tlb_entryp
[i
]);
} while (tep
->flags
& SS_TLB_FLAG_LOCKED
);
#elif SS_TLB_REPLACE_RROBIN /* } { */
if (i
>=tlbp
->nentries
) i
=0; /* wrap */
tep
= &(tlbp
->tlb_entryp
[i
]);
if (i
==tlbp
->last_replaced
) {
* if all entries are locked, replace the final TLB entry
EXEC_WARNING(("all TLB entries are locked, the final TLB entry %d is replaced",i
));
tep
= &(tlbp
->tlb_entryp
[i
]);
} while (tep
->flags
& SS_TLB_FLAG_LOCKED
);
#error Need to define TLB replacement alg
/* put back on the free list */
ss_tlb_unhash(tlbp
, tep
);
ss_free_tlb_entry( tlbp
, tep
);
/* free entry must be invalid ! */
ASSERT(!(tep
->data
&(1ull<<SUN4U_TTED_V_BIT
)));
tlbp
->freep
= tep
->nextp
;
/* create the new entry */
tep
->match_context
= match_context
;
tep
->match_shift
= shift
;
tep
->tag_context
= tag_context
;
/* Note: variable size mask again based on page size */
tep
->pa_offset
= (data
& MASK64(39,shift
)) - tag
;
DBGMMU( lprintf(sp
->gid
, "tlb_insert: %c-TLB: tte=%llx [ sz=0x%x l=%d cp=%d cv=%d e=%d p=%d w=%d ]\n",
mmup
->is_immu
? 'I' : 'D', data
,
(uint_t
)((data
>>6)&1LL), (uint_t
)((data
>>5)&1LL),
(uint_t
)((data
>>4)&1LL), (uint_t
)((data
>>3)&1LL),
(uint_t
)((data
>>2)&1LL), (uint_t
)((data
>>1)&1LL)
lprintf(sp
->gid
, "\tpart=0x%x tag=%p ctx=%x/%x offset=%llx\n",
partid
, tag
, tag_context
, match_context
, tep
->pa_offset
); );
/* niagara doesn't have read and exec bits */
tep
->flags
= SS_TLB_FLAG_EXEC
;
tep
->flags
= SS_TLB_FLAG_READ
;
if ( ((data
>>1)&1) ) tep
->flags
|= SS_TLB_FLAG_WRITE
;
if ( ((data
>>2)&1) ) tep
->flags
|= SS_TLB_FLAG_PRIV
;
if ( ((data
>>6)&1) ) tep
->flags
|= SS_TLB_FLAG_LOCKED
;
if ( (data
&BIT(39)) == 0 ) tep
->flags
|= SS_TLB_FLAG_CP
;
if ( ((data
>>3)&1) ) tep
->flags
|= SS_TLB_FLAG_E
;
if ( ((data
>>60)&1) ) tep
->flags
|= SS_TLB_FLAG_NFO
;
if ( ((data
>>59)&1) ) tep
->flags
|= SS_TLB_FLAG_IE
;
/* Finally insert the new entry into the hash table for the TLB */
/* Hash uses match_context so it skews real->phys entries away from context 0 */
i
= tag
>> SS_MAX_PAGE_SIZE_BITS
;
i
+= match_context
+ partid
;
if (((data
>> SUN4U_TTED_V_BIT
)&1) != 0) {
tep
->hashidx
= i
; /* to help with unhooking later */
tep
->nextp
= tlbp
->hash
[i
].ptr
;
tep
->nextp
= tlbp
->freep
;
RW_unlock(&tlbp
->rwlock
);
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
ss_tlb_flush_shares(sp
, tlbp
, mmup
->is_immu
);
* Dumb function to shuffle the sun4v TTE format into the sun4u
* one used internally by Niagara.
#define SHIFT_FIELD(_data, _hi,_lo,_new) (((((uint64_t)(_data))&MASK64(_hi,_lo))>>(_lo))<<(_new))
uint64_t niagara_shuffle_sun4v_format(uint64_t data
)
val
= data
& MASK64(63,63); /* valid bit */
val
|= SHIFT_FIELD(data
, 62,62, 60); /* NFO */
val
|= SHIFT_FIELD(data
, 61,61, 6); /* locked */
val
|= SHIFT_FIELD(data
, 39,13, 13); /* pa */
val
|= SHIFT_FIELD(data
, 12,12, 59); /* invert endianness */
val
|= SHIFT_FIELD(data
, 11,11, 3); /* side effect */
val
|= SHIFT_FIELD(data
, 10, 9, 4); /* cacheable bits */
val
|= SHIFT_FIELD(data
, 8, 8, 2); /* privileged */
val
|= SHIFT_FIELD(data
, 6, 6, 1); /* writeable */
val
|= SHIFT_FIELD(data
, 2, 2, 48); /* size[2] */
val
|= SHIFT_FIELD(data
, 1, 0, 61); /* size[1:0] */
char * ss_ssi_reg_name(uint_t reg
)
case NI_SSI_TIMEOUT
: s
="ssi_timeout"; break;
case NI_SSI_LOG
: s
="ssi_log"; break;
default: s
="Illegal ssi register"; break;
char * ss_jbi_reg_name(uint_t reg
)
case NI_JBI_CONFIG1
: s
="jbi_config1"; break;
case NI_JBI_CONFIG2
: s
="jbi_config2"; break;
case NI_JBI_INT_MRGN
: s
="jbi_int_mrgn"; break;
case NI_JBI_DEBUG
: s
="jbi_debug"; break;
case NI_JBI_DEBUG_ARB
: s
="jbi_debug_arb"; break;
case NI_JBI_PERF_CTL
: s
="jbi_perf_ctl"; break;
case NI_JBI_PERF_CNT
: s
="jbi_perf_cnt"; break;
case NI_JBI_ERR_INJECT
: s
="jbi_err_inject"; break;
case NI_JBI_ERR_CONFIG
: s
="jbi_err_config"; break;
case NI_JBI_ERROR_LOG
: s
="jbi_error_log"; break;
case NI_JBI_ERROR_OVF
: s
="jbi_error_ovf"; break;
case NI_JBI_LOG_ENB
: s
="jbi_log_enb"; break;
case NI_JBI_SIG_ENB
: s
="jbi_sig_enb"; break;
case NI_JBI_LOG_ADDR
: s
="jbi_log_addr"; break;
case NI_JBI_LOG_CTRL
: s
="jbi_log_ctrl"; break;
case NI_JBI_LOG_DATA0
: s
="jbi_log_data0"; break;
case NI_JBI_LOG_DATA1
: s
="jbi_log_data1"; break;
case NI_JBI_LOG_PAR
: s
="jbi_log_par"; break;
case NI_JBI_LOG_NACK
: s
="jbi_log_nack"; break;
case NI_JBI_LOG_ARB
: s
="jbi_log_arb"; break;
case NI_JBI_L2_TIMEOUT
: s
="jbi_l2_timeout"; break;
case NI_JBI_ARB_TIMEOUT
: s
="jbi_arb_timeout"; break;
case NI_JBI_TRANS_TIMEOUT
: s
="jbi_trans_timeout"; break;
case NI_JBI_INTR_TIMEOUT
: s
="jbi_intr_timeout"; break;
case NI_JBI_MEMSIZE
: s
="jbi_memsize"; break;
default: s
="Illegal jbi register"; break;
char * ss_jbus_reg_name(uint_t reg
)
case NI_J_INT_DATA0
: s
="j_int_data0"; break;
case NI_J_INT_DATA1
: s
="j_int_data1"; break;
case NI_J_INT_ADATA0
: s
="j_int_adata0"; break;
case NI_J_INT_ADATA1
: s
="j_int_adata1"; break;
case NI_J_INT_BUSY
: s
="j_int_busy"; break;
case NI_J_INT_ABUSY
: s
="j_int_abusy"; break;
default: s
="Illegal jbus register"; break;
char * ss_iob_reg_name(uint_t reg
)
case NI_INT_MAN0
: s
="int_man0"; break;
case NI_INT_MAN1
: s
="int_man1"; break;
case NI_INT_MAN2
: s
="int_man2"; break;
case NI_INT_MAN3
: s
="int_man3"; break;
case NI_INT_CTL0
: s
="int_ctl0"; break;
case NI_INT_CTL1
: s
="int_ctl1"; break;
case NI_INT_CTL2
: s
="int_ctl2"; break;
case NI_INT_CTL3
: s
="int_ctl3"; break;
case NI_INT_VEC_DIS
: s
="int_vec_dis"; break;
case NI_J_INT_VEC
: s
="j_int_vec"; break;
case NI_RSET_STAT
: s
="rset_stat"; break;
case NI_TM_STAT_CTL
: s
="tm_stat_ctl"; break;
case NI_PROC_SER_NUM
: s
="proc_ser_num"; break;
case NI_CORE_AVAIL
: s
="core_avail"; break;
case NI_IOB_FUSE
: s
="iob_fuse"; break;
case NI_INT_MRGN_REG
: s
="int_mrgn_reg"; break;
case NI_L2_VIS_CONTROL
: s
="l2_vis_control"; break;
case NI_L2_VIS_MASK_A
: s
="l2_vis_mask_a"; break;
case NI_L2_VIS_MASK_B
: s
="l2_vis_mask_b"; break;
case NI_L2_VIS_COMPARE_A
: s
="l2_vis_compare_a"; break;
case NI_L2_VIS_COMPARE_B
: s
="l2_vis_compare_b"; break;
case NI_L2_TRIG_DELAY
: s
="l2_trig_delay"; break;
case NI_IOB_VIS_SELECT
: s
="iob_vis_select"; break;
case NI_DB_ENET_CONTROL
: s
="db_enet_control"; break;
case NI_DB_ENET_IDLEVAL
: s
="db_enet_idleval"; break;
case NI_DB_JBUS_CONTROL
: s
="db_jbus_control"; break;
case NI_DB_JBUS_MASK0
: s
="db_jbus_mask0"; break;
case NI_DB_JBUS_MASK1
: s
="db_jbus_mask1"; break;
case NI_DB_JBUS_MASK2
: s
="db_jbus_mask2"; break;
case NI_DB_JBUS_MASK3
: s
="db_jbus_mask3"; break;
case NI_DB_JBUS_COMPARE0
: s
="db_jbus_compare0"; break;
case NI_DB_JBUS_COMPARE1
: s
="db_jbus_compare1"; break;
case NI_DB_JBUS_COMPARE2
: s
="db_jbus_compare2"; break;
case NI_DB_JBUS_COMPARE3
: s
="db_jbus_compare3"; break;
case NI_DB_JBUS_COUNT
: s
="db_jbus_count"; break;
default: s
="Illegal clock register"; break;
char * ss_clock_reg_name(uint_t reg
)
case SS_CLOCK_DIVIDER
: s
="divider"; break;
case SS_CLOCK_CONTROL
: s
="control"; break;
case SS_CLOCK_DLL_CONTROL
: s
="dll_control"; break;
case SS_CLOCK_JBUS_SYNC
: s
="jbus_sync"; break;
case SS_CLOCK_DLL_BYPASS
: s
="dll_bypass"; break;
case SS_CLOCK_DRAM_SYNC
: s
="dram_sync"; break;
case SS_CLOCK_VERSION
: s
="version"; break;
default: s
="Illegal clock register"; break;
char * ss_l2_ctrl_reg_name(uint_t reg
)
case SS_L2_DIAG_DATA
: s
="diag_data"; break;
case SS_L2_DIAG_TAG
: s
="diag_tag"; break;
case SS_L2_DIAG_VUAD
: s
="diag_vuad"; break;
case SS_L2_CONTROL
: s
="control"; break;
case SS_L2_ERROR_ENABLE
: s
="error_enable"; break;
case SS_L2_ERROR_STATUS
: s
="error_status"; break;
case SS_L2_ERROR_ADDRESS
: s
="error_address"; break;
case SS_L2_ERROR_INJECT
: s
="error_inject"; break;
default: s
="Illegal L2 control register"; break;
char * ss_dram_ctrl_reg_name(uint_t reg
)
case SS_DRAM_CAS_ADDR_WIDTH
: s
="cas_addr_width"; break;
case SS_DRAM_CAS_LAT
: s
="cas_lat"; break;
case SS_DRAM_CHANNEL_DISABLED
: s
="channel_disabled"; break;
case SS_DRAM_DBG_TRG_EN
: s
="dbg_trg_en"; break;
case SS_DRAM_DIMM_INIT
: s
="dimm_init"; break;
case SS_DRAM_DIMM_PRESENT
: s
="dimm_present"; break;
case SS_DRAM_DIMM_STACK
: s
="dimm_stack"; break;
case SS_DRAM_DRAM_TRCD
: s
="dram_trcd"; break;
case SS_DRAM_ERROR_ADDRESS
: s
="error_address"; break;
case SS_DRAM_ERROR_COUNTER
: s
="error_counter"; break;
case SS_DRAM_ERROR_INJECT
: s
="error_inject"; break;
case SS_DRAM_ERROR_LOCATION
: s
="error_location"; break;
case SS_DRAM_ERROR_STATUS
: s
="error_status"; break;
case SS_DRAM_EXT_WR_MODE1
: s
="ext_wr_mode1"; break;
case SS_DRAM_EXT_WR_MODE2
: s
="ext_wr_mode2"; break;
case SS_DRAM_EXT_WR_MODE3
: s
="ext_wr_mode3"; break;
case SS_DRAM_FAILOVER_MASK
: s
="failover_mask"; break;
case SS_DRAM_FAILOVER_STATUS
: s
="failover_status"; break;
case SS_DRAM_HW_DMUX_CLK_INV
: s
="hw_dmux_clk_inv"; break;
case SS_DRAM_INIT_STATUS
: s
="init_status"; break;
case SS_DRAM_MODE_WRITE_STATUS
: s
="mode_write_status"; break;
case SS_DRAM_OPEN_BANK_MAX
: s
="open_bank_max"; break;
case SS_DRAM_PAD_EN_CLK_INV
: s
="pad_en_clk_inv"; break;
case SS_DRAM_PERF_COUNT
: s
="perf_count"; break;
case SS_DRAM_PERF_CTL
: s
="perf_ctl"; break;
case SS_DRAM_PRECHARGE_WAIT
: s
="precharge_wait"; break;
case SS_DRAM_PROG_TIME_CNTR
: s
="prog_time_cntr"; break;
case SS_DRAM_RANK1_PRESENT
: s
="rank1_present"; break;
case SS_DRAM_RAS_ADDR_WIDTH
: s
="ras_addr_width"; break;
case SS_DRAM_REFRESH_COUNTER
: s
="refresh_counter"; break;
case SS_DRAM_REFRESH_FREQ
: s
="refresh_freq"; break;
case SS_DRAM_SCRUB_ENABLE
: s
="scrub_enable"; break;
case SS_DRAM_SCRUB_FREQ
: s
="scrub_freq"; break;
case SS_DRAM_SEL_LO_ADDR_BITS
: s
="sel_lo_addr_bits"; break;
case SS_DRAM_SW_DV_COUNT
: s
="sw_dv_count"; break;
case SS_DRAM_TIWTR
: s
="tiwtr"; break;
case SS_DRAM_TMRD
: s
="tmrd"; break;
case SS_DRAM_TRAS
: s
="tras"; break;
case SS_DRAM_TRC
: s
="trc"; break;
case SS_DRAM_TRFC
: s
="trfc"; break;
case SS_DRAM_TRP
: s
="trp"; break;
case SS_DRAM_TRRD
: s
="trrd"; break;
case SS_DRAM_TRTP
: s
="trtp"; break;
case SS_DRAM_TRTW
: s
="trtw"; break;
case SS_DRAM_TWR
: s
="twr"; break;
case SS_DRAM_TWTR
: s
="twtr"; break;
case SS_DRAM_WAIR_CONTROL
: s
="wair_control"; break;
default: s
="Illegal DRAM control register"; break;
static void ss_ssi_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
static void ss_jbi_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
jbip
->config1
= JBI_PORT_LOCN(0x7f) | JBI_PORT_PRES(0x3) | JBI_MID(0x3e);
jbip
->config2
= JBI_IQ_HIGH(0x7);
jbip
->trans_timeout
= 0x0;
jbip
->intr_timeout
= 0x0;
static void ss_jbus_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
for (i
= 0; i
< IOB_JBUS_TARGETS
; i
++) {
jbusp
->j_int_data0
[i
] = 0x00;
jbusp
->j_int_data1
[i
] = 0x00;
jbusp
->j_int_busy
[i
] = 0x00;
pthread_mutex_init(&jbusp
->lock
, NULL
);
static void ss_iob_init(config_dev_t
* config_devp
)
uint64_t avail
, cores
, device
;
npp
= (ss_proc_t
*)config_devp
->devp
;
pthread_mutex_init(&iobp
->iob_lock
, NULL
);
/* IOB Interrupt Registers section 7.3 of PRM 1.2 */
for (device
=0; device
<IOB_DEV_MAX
; device
++) {
iobp
->int_man
[device
] = 0x0000;
iobp
->int_ctl
[device
] = IOB_INT_CTL_MASK
;
iobp
->int_vec_dis
= 0x0000;
iobp
->j_int_vec
= 0x0000;
pthread_mutex_init(&iobp
->int_vec_lock
, NULL
); /* FIXME: to go away ! */
/* Reset Status Register section 11.2 of PRM 1.2 */
iobp
->rset_stat
= 0x0004; /* POR bit */
/* CPU throttle control section 16.1 of PRM 1.2 */
iobp
->tm_stat_ctl
= 0x0000;
/* EFUSE Registers section 18.8 of PRM 1.2 */
iobp
->proc_ser_num
= 0x0000;
/* Internal Margin Register section 19.1.3 of PRM 1.2 */
iobp
->int_mrgn_reg
= 0x0000;
/* IOB Visibility Port Support section 19.2 of PRM 1.2 */
iobp
->l2_vis_control
= 0x0000;
iobp
->l2_vis_mask_a
= 0x0000;
iobp
->l2_vis_mask_b
= 0x0000;
iobp
->l2_vis_compare_a
= 0x0000;
iobp
->l2_vis_compare_b
= 0x0000;
iobp
->l2_trig_delay
= 0x0000;
iobp
->iob_vis_select
= 0x0000;
iobp
->db_enet_control
= 0x0000;
iobp
->db_enet_idleval
= 0x0000;
iobp
->db_jbus_control
= 0x0000;
iobp
->db_jbus_mask0
= 0x0000;
iobp
->db_jbus_mask1
= 0x0000;
iobp
->db_jbus_mask2
= 0x0000;
iobp
->db_jbus_mask3
= 0x0000;
iobp
->db_jbus_compare0
= 0x0000;
iobp
->db_jbus_compare1
= 0x0000;
iobp
->db_jbus_compare2
= 0x0000;
iobp
->db_jbus_compare3
= 0x0000;
iobp
->db_jbus_count
= 0x0000;
static void ss_clock_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
/* Clock Unit section 11.1 of PRM 1.2 */
clockp
->divider
= 0x80200200101004;
clockp
->control
= 0x0000;
clockp
->dll_control
= 0x0000;
clockp
->dll_bypass
= 0x0000;
clockp
->jbus_sync
= 0x0000;
clockp
->dram_sync
= 0x0000;
clockp
->version
= 0x0000;
static void ss_l2_ctl_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
l2p
->control
[bank
] = L2_DIS
;
l2p
->bist_ctl
[bank
] = 0x0;
l2p
->error_enable
[bank
] = 0x0;
l2p
->error_status
[bank
] = 0x0;
l2p
->error_address
[bank
] = 0x0;
l2p
->error_inject
[bank
] = 0x0;
l2p
->diag_datap
= Xmalloc(L2_DATA_SIZE
);
l2p
->diag_tagp
= Xmalloc(L2_TAG_SIZE
);
l2p
->diag_vuadp
= Xmalloc(L2_VUAD_SIZE
);
for (idx
=0; idx
<L2_DATA_SIZE
/8; idx
++) {
l2p
->diag_datap
[idx
] = 0xdeadbeef;
for (idx
=0; idx
<L2_TAG_SIZE
/8; idx
++) {
l2p
->diag_tagp
[idx
] = 0xdeadbeef;
for (idx
=0; idx
<L2_VUAD_SIZE
/8; idx
++) {
l2p
->diag_vuadp
[idx
] = 0xdeadbeef;
static void ss_dram_ctl_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
for (bidx
=0; bidx
<npp
->num_mbanks
; bidx
++) {
/* DRAM controller section 15.5 of PRM 1.2 */
dbp
= &(npp
->mbankp
[bidx
]);
dbp
->cas_addr_width
= 0xb ;
dbp
->ras_addr_width
= 0xf ;
dbp
->scrub_freq
= 0xfff ;
dbp
->refresh_freq
= 0x514 ;
dbp
->refresh_counter
= 0x0 ;
dbp
->scrub_enable
= 0x0 ;
dbp
->precharge_wait
= 0x55 ;
dbp
->ext_wr_mode2
= 0x0 ;
dbp
->ext_wr_mode1
= 0x400 ;
dbp
->ext_wr_mode3
= 0x0 ;
dbp
->wair_control
= 0x1 ;
dbp
->rank1_present
= 0x0 ;
dbp
->channel_disabled
= 0x0 ;
dbp
->sel_lo_addr_bits
= 0x0 ;
dbp
->hw_dmux_clk_inv
= 0x0 ;
dbp
->pad_en_clk_inv
= 0x3<<2 ;
dbp
->mode_write_status
= 0x0 ;
dbp
->dimm_present
= 0x3 ;
dbp
->failover_status
= 0x0 ;
dbp
->failover_mask
= 0x0 ;
/* Performance counter section 10.3 of PRM 1.1 */
/* Error handling section 12.9 of PRM 1.1 */
dbp
->error_status
= 0x0 ; /* FIXME: only bits 56-16 reset on POR .. everything else to be preserved */
dbp
->error_address
= 0x0 ; /* FIXME: bits 39-4 to be preserved accross POR */
dbp
->error_inject
= 0x0 ;
dbp
->error_counter
= 0x0 ; /* FIXME: bits 17-0 preserved accross reset */
dbp
->error_location
= 0x0 ; /* FIXME: bits 35-0 preserved accross reset */
/* Power management section 16.2 of PRM 1.1 */
dbp
->open_bank_max
= 0x1ffff ;
dbp
->prog_time_cntr
= 0xffff ;
dbp
->dbg_trg_en
= (0x1<<7) | (0x1) ; /* Hardware debug section 19.1 of PRM 1.1 */
static bool_t
ss_ssi_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
if (0LL != (val
& ~(MASK64(24,0)))) goto write_reserved
;
ssip
->timeout
&= ~(MASK64(24,0));
if (0LL != (val
& ~(MASK64(1,0)))) goto write_reserved
;
/* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in ssi:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ss_ssi_reg_name(reg
), reg
) );
val
= ssip
->timeout
& MASK64(24,0);
val
= ssip
->log
& MASK64(1,0);
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
ss_jbi_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
if (npp
->rust_jbi_stores
&&
op
== MA_st64
&& (off
& 0x7000000) == 0x7000000)
/* FIXME!! ignore write to reserved bits for BRINGUP ONLY */
#define ASSIGN_JBI(_n, _m) do { \
jbip->_n |= (val & (_m)); \
#define ASSIGN_W1C_JBI(_n, _m) do { \
jbip->_n &= (~val | ~(_m)); \
/* JBUS Interface section 14.1 of PRM 1.4 */
ASSIGN_JBI( config1
, MASK64(50,44)|MASK64(39,38)|
MASK64(31,22)|MASK64(1,0) );
ASSIGN_JBI( config2
, MASK64(30,28)|MASK64(26,24)|
MASK64(21,20)|MASK64(17,8)|MASK64(3,0) );
ASSIGN_JBI( int_mrgn
, MASK64(12,8)|MASK64(4,0) );
ASSIGN_JBI( debug
, MASK64(0,0) );
ASSIGN_JBI( debug_arb
, MASK64(24,24)|MASK64(22,18)|
ASSIGN_JBI( perf_ctl
, MASK64(7,0) );
ASSIGN_JBI( perf_cnt
, MASK64(63,0) );
ASSIGN_JBI( err_inject
, MASK64(30,0) );
ASSIGN_JBI( err_config
, MASK64(4,2) );
ASSIGN_W1C_JBI( error_log
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
ASSIGN_W1C_JBI( error_ovf
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
ASSIGN_JBI( log_enb
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
ASSIGN_JBI( sig_enb
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
ASSIGN_W1C_JBI( log_nack
, MASK64(31,0) );
ASSIGN_JBI( l2_timeout
, MASK64(31,0) );
ASSIGN_JBI( arb_timeout
, MASK64(31,0) );
case NI_JBI_TRANS_TIMEOUT
:
ASSIGN_JBI( trans_timeout
, MASK64(31,0) );
case NI_JBI_INTR_TIMEOUT
:
ASSIGN_JBI( intr_timeout
, MASK64(31,0) );
ASSIGN_JBI( memsize
, MASK64(37,30) );
/* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in jbi:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ss_jbi_reg_name(reg
), reg
) );
#define RETRIEVE_JBI(_n, _m) do { val = ((jbip->_n) & (_m)); } while (0)
/* JBUS Interface section 14.1 of PRM 1.4 */
RETRIEVE_JBI( config1
, MASK64(63,0) );
RETRIEVE_JBI( config2
, MASK64(63,0) );
RETRIEVE_JBI( int_mrgn
, MASK64(12,8)|MASK64(4,0) );
RETRIEVE_JBI( debug
, MASK64(63,0) );
RETRIEVE_JBI( debug_arb
, MASK64(63,0) );
RETRIEVE_JBI( perf_ctl
, MASK64(7,0) );
RETRIEVE_JBI( perf_cnt
, MASK64(63,0) );
RETRIEVE_JBI( err_inject
, MASK64(30,0) );
/* JBI Error Registers section 12.12.2 of PRM 1.4 */
RETRIEVE_JBI( err_config
, MASK64(4,2) );
RETRIEVE_JBI( error_log
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
RETRIEVE_JBI( error_ovf
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
RETRIEVE_JBI( log_enb
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
RETRIEVE_JBI( sig_enb
, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
RETRIEVE_JBI( log_addr
, MASK64(63,0) );
RETRIEVE_JBI( log_ctrl
, MASK64(63,0) );
RETRIEVE_JBI( log_data0
, MASK64(63,0) );
RETRIEVE_JBI( log_data1
, MASK64(63,0) );
RETRIEVE_JBI( log_par
, MASK64(32,32)|MASK64(25,20)|
MASK64(13,8)|MASK64(6,0) );
RETRIEVE_JBI( log_nack
, MASK64(31,0) );
RETRIEVE_JBI( log_arb
, MASK64(34,32)|MASK64(26,24)|
MASK64(22,16)|MASK64(14,8)|MASK64(6,0) );
RETRIEVE_JBI( l2_timeout
, MASK64(31,0) );
RETRIEVE_JBI( arb_timeout
, MASK64(31,0) );
case NI_JBI_TRANS_TIMEOUT
:
RETRIEVE_JBI( trans_timeout
, MASK64(31,0) );
case NI_JBI_INTR_TIMEOUT
:
RETRIEVE_JBI( intr_timeout
, MASK64(31,0) );
RETRIEVE_JBI( memsize
, MASK64(37,30) );
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
ss_jbus_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
nsp
= v9p
->impl_specificp
;
#define ASSIGN_JBUS(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
if (reg
>= NI_J_INT_BUSY
) {
reg
= reg
& 0xf00; /* for debug output */
if (reg
< NI_J_INT_ABUSY
) {
target
= (off
>> 3) & (IOB_JBUS_TARGETS
-1);
ASSIGN_JBUS( j_int_busy
[target
], MASK64(5,5) );
/* aliased to target thread's register */
ASSIGN_JBUS( j_int_busy
[target
], MASK64(5,5) );
return false; /* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in JBUS:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ss_jbus_reg_name(reg
), reg
) );
reg
= reg
& 0xf00; /* for debug output */
target
= (off
>> 3) & (IOB_JBUS_TARGETS
-1);
val
= jbusp
->j_int_data0
[target
];
target
= (off
>> 3) & (IOB_JBUS_TARGETS
-1);
val
= jbusp
->j_int_data1
[target
];
val
= jbusp
->j_int_data0
[target
];
val
= jbusp
->j_int_data1
[target
];
target
= (off
>> 3) & (IOB_JBUS_TARGETS
-1);
val
= jbusp
->j_int_busy
[target
];
val
= jbusp
->j_int_busy
[target
];
return false; /* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
ss_iob_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
if (off
& 7) return false; /* FIXME: 64bit access support only for the moment */
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
pthread_mutex_lock( &iobp
->iob_lock
);
#define ASSIGN_IOB(_n, _m) do { \
iobp->_n = (val & (_m)); \
if (0LL != (val & ~(_m))) goto write_reserved; \
DBGSSI( lprintf(sp
->gid
, "store to iob reg: 0x%x (%s) value 0x%LLx\n", reg
, ss_iob_reg_name(reg
), val
););
/* IOB Interrupt Registers section 7.3 of PRM 1.2 */
case NI_INT_MAN0
: /* internal */
case NI_INT_MAN1
: /* errors */
case NI_INT_MAN2
: /* SSI */
case NI_INT_MAN3
: /* reserved */
ASSIGN_IOB( int_man
[device
], MASK64(12,8)|MASK64(5,0) );
case NI_INT_CTL0
: /* internal */
case NI_INT_CTL1
: /* errors */
case NI_INT_CTL2
: /* SSI */
case NI_INT_CTL3
: /* reserved */
device
= (off
>> 3) & (IOB_DEV_MAX
-1);
if (0LL != (val
& ~(MASK64(2,1)))) goto write_reserved
;
int_ctl
= &iobp
->int_ctl
[device
];
*int_ctl
= (val
& IOB_INT_CTL_MASK
) | (*int_ctl
& ~IOB_INT_CTL_MASK
);
if (val
& IOB_INT_CTL_CLEAR
) {
*int_ctl
&= ~IOB_INT_CTL_PEND
;
* OK PRM 1.4 S 7.2.4 indicates that if mask is cleared, and pending
* is still set then an interrupt is delivered ... i.e. int_vec is set (again).
if (((*int_ctl
) & IOB_INT_CTL_PEND
) && !((*int_ctl
) & IOB_INT_CTL_MASK
)) {
*int_ctl
&= ~IOB_INT_CTL_PEND
;
if (device
== IOB_DEV_SSI
) {
pthread_mutex_unlock( &iobp
->iob_lock
);
npp
->config_procp
->proc_typep
->ext_signal(npp
->config_procp
, ES_SSI
, NULL
);
if (IOB_INT_VEC_RESUME(val
)) {
if (0LL!=(val
&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
pthread_mutex_lock(&iobp
->int_vec_lock
);
npp
->config_procp
->proc_typep
->ext_signal(
npp
->config_procp
, ES_RESUME
, NULL
);
pthread_mutex_unlock(&iobp
->int_vec_lock
);
} else if (IOB_INT_VEC_IDLE(val
)) {
if (0LL!=(val
&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
pthread_mutex_lock(&iobp
->int_vec_lock
);
npp
->config_procp
->proc_typep
->ext_signal(
npp
->config_procp
, ES_IDLE
, NULL
);
pthread_mutex_unlock(&iobp
->int_vec_lock
);
if (IOB_INT_VEC_RESET(val
)) {
if (0LL!=(val
&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
pthread_mutex_lock(&iobp
->int_vec_lock
);
npp
->config_procp
->proc_typep
->ext_signal(
npp
->config_procp
, ES_RESET
, NULL
);
pthread_mutex_unlock(&iobp
->int_vec_lock
);
if (IOB_INT_VEC_INTR(val
)) {
if (0LL!=(val
&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
niagara_send_xirq(sp
, val
);
ASSIGN_IOB( j_int_vec
, MASK64(5,0) );
ASSIGN_IOB( rset_stat
, MASK64(3,1) );
ASSIGN_IOB( tm_stat_ctl
,MASK64(63,63)|MASK64(31,0) );
EXEC_WARNING( ("Attempted write to read only register in IOB:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ss_iob_reg_name(reg
), reg
) );
goto access_failed
; /* RO regs */
ASSIGN_IOB( int_mrgn_reg
, MASK64(4,0) );
ASSIGN_IOB( l2_vis_control
, MASK64(3,2) );
ASSIGN_IOB( l2_vis_mask_a
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
ASSIGN_IOB( l2_vis_mask_b
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
case NI_L2_VIS_COMPARE_A
:
ASSIGN_IOB( l2_vis_compare_a
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
case NI_L2_VIS_COMPARE_B
:
ASSIGN_IOB( l2_vis_compare_b
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
ASSIGN_IOB( l2_trig_delay
, MASK64(31,0) );
ASSIGN_IOB( iob_vis_select
, MASK64(3,0) );
ASSIGN_IOB( db_enet_control
, MASK64(8,8)|MASK64(6,5)|
ASSIGN_IOB( db_enet_idleval
, MASK64(39,0) );
ASSIGN_IOB( db_jbus_control
, MASK64(16,16)|MASK64(6,4)|
ASSIGN_IOB( db_jbus_mask0
, MASK64(45,0) );
ASSIGN_IOB( db_jbus_mask1
, MASK64(45,0) );
ASSIGN_IOB( db_jbus_mask2
, MASK64(45,0) );
ASSIGN_IOB( db_jbus_mask3
, MASK64(45,0) );
case NI_DB_JBUS_COMPARE0
:
ASSIGN_IOB( db_jbus_compare0
, MASK64(43,0) );
case NI_DB_JBUS_COMPARE1
:
ASSIGN_IOB( db_jbus_compare1
, MASK64(43,0) );
case NI_DB_JBUS_COMPARE2
:
ASSIGN_IOB( db_jbus_compare2
, MASK64(43,0) );
case NI_DB_JBUS_COMPARE3
:
ASSIGN_IOB( db_jbus_compare3
, MASK64(43,0) );
ASSIGN_IOB( db_jbus_count
, MASK64(8,0) );
EXEC_WARNING( ("Attempted write to illegal register in IOB:"
"Write 0x%llx to register offset 0x%x",
goto access_failed
; /* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in IOB:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ss_iob_reg_name(reg
), reg
) );
pthread_mutex_unlock( &iobp
->iob_lock
);
#define RETRIEVE_IOB(_n, _m) do { val = ((iobp->_n) & (_m)); } while (0)
case NI_INT_MAN0
: /* internal */
case NI_INT_MAN1
: /* errors */
case NI_INT_MAN2
: /* SSI */
case NI_INT_MAN3
: /* reserved */
val
= iobp
->int_man
[device
];
ASSERT( 0LL == (val
& ~(MASK64(12,8)|MASK64(5,0))) );
case NI_INT_CTL0
: /* internal */
case NI_INT_CTL1
: /* errors */
case NI_INT_CTL2
: /* SSI */
case NI_INT_CTL3
: /* reserved */
val
= iobp
->int_ctl
[device
];
ASSERT( 0LL == (val
& ~0x5));
RETRIEVE_IOB( j_int_vec
, MASK64(5,0) );
EXEC_WARNING( ("Attempted read to WO register in IOB: %s",
RETRIEVE_IOB( rset_stat
, MASK64(11,9)|MASK64(3,1) );
RETRIEVE_IOB( tm_stat_ctl
, MASK64(63,63)|MASK64(31,0) );
RETRIEVE_IOB( proc_ser_num
, MASK64(63,0) );
RETRIEVE_IOB( iob_fuse
, MASK64(31,0) );
RETRIEVE_IOB( int_mrgn_reg
, MASK64(4,0) );
RETRIEVE_IOB( l2_vis_control
, MASK64(3,0) );
RETRIEVE_IOB( l2_vis_mask_a
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
RETRIEVE_IOB( l2_vis_mask_b
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
case NI_L2_VIS_COMPARE_A
:
RETRIEVE_IOB( l2_vis_compare_a
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
case NI_L2_VIS_COMPARE_B
:
RETRIEVE_IOB( l2_vis_compare_b
, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
RETRIEVE_IOB( l2_trig_delay
, MASK64(31,0) );
RETRIEVE_IOB( iob_vis_select
, MASK64(3,0) );
RETRIEVE_IOB( db_enet_control
, MASK64(8,8)|MASK64(6,5)|
RETRIEVE_IOB( db_enet_idleval
, MASK64(39,0) );
RETRIEVE_IOB( db_jbus_control
, MASK64(16,16)|MASK64(6,4)|
RETRIEVE_IOB( db_jbus_mask0
, MASK64(45,0) );
RETRIEVE_IOB( db_jbus_mask1
, MASK64(45,0) );
RETRIEVE_IOB( db_jbus_mask2
, MASK64(45,0) );
RETRIEVE_IOB( db_jbus_mask3
, MASK64(45,0) );
case NI_DB_JBUS_COMPARE0
:
RETRIEVE_IOB( db_jbus_compare0
, MASK64(43,0) );
case NI_DB_JBUS_COMPARE1
:
RETRIEVE_IOB( db_jbus_compare1
, MASK64(43,0) );
case NI_DB_JBUS_COMPARE2
:
RETRIEVE_IOB( db_jbus_compare2
, MASK64(43,0) );
case NI_DB_JBUS_COMPARE3
:
RETRIEVE_IOB( db_jbus_compare3
, MASK64(43,0) );
RETRIEVE_IOB( db_jbus_count
, MASK64(8,0) );
goto access_failed
; /* illegal reg - an error */
DBGSSI( lprintf(sp
->gid
, "read from iob reg: 0x%x (%s) value 0x%LLx\n", reg
, ss_iob_reg_name(reg
), val
););
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
pthread_mutex_unlock( &iobp
->iob_lock
);
pthread_mutex_unlock( &iobp
->iob_lock
);
static bool_t
ss_clock_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
#define ASSIGN_CLK(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
/* Clock Unit section 11.1 of PRM 1.2 */
ASSIGN_CLK( divider
, MASK64(61,28)|MASK64(26,26)|
MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
ASSIGN_CLK( control
, MASK64(63,61)|MASK64(54,48)|
MASK64(34,29)|MASK64(27,27)|MASK64(23,0) );
case SS_CLOCK_DLL_CONTROL
:
ASSIGN_CLK( dll_control
, MASK64(44,40)|MASK64(38,38)|
MASK64(36,32)|MASK64(19,0) );
ASSIGN_CLK( jbus_sync
, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
case SS_CLOCK_DLL_BYPASS
:
ASSIGN_CLK( dll_bypass
, MASK64(61,56)|MASK64(52,48)|
MASK64(45,40)|MASK64(36,32)|MASK64(29,24)|
MASK64(20,16)|MASK64(13,8)|MASK64(4,0) );
ASSIGN_CLK( dram_sync
, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
ASSIGN_CLK( version
, 0LL );
/* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in clock unit:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ss_clock_reg_name(reg
), reg
) );
#define RETRIEVE_CLK(_n, _m) do { val = ((clockp->_n) & (_m)); } while (0)
/* Clock Unit section 11.1 of PRM 1.2 */
RETRIEVE_CLK( divider
, MASK64(61,28)|MASK64(26,26)|
MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
RETRIEVE_CLK( control
, MASK64(63,61)|MASK64(54,48)|
MASK64(34,29)|MASK64(27,27)|MASK64(23,0) );
case SS_CLOCK_DLL_CONTROL
:
RETRIEVE_CLK( dll_control
, MASK64(44,40)|MASK64(38,38)|
MASK64(36,32)|MASK64(19,0) );
RETRIEVE_CLK( jbus_sync
, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
case SS_CLOCK_DLL_BYPASS
:
RETRIEVE_CLK( dll_bypass
, MASK64(61,56)|MASK64(52,48)|
MASK64(45,40)|MASK64(36,32)|MASK64(29,24)|
MASK64(20,16)|MASK64(13,8)|MASK64(4,0) );
RETRIEVE_CLK( dram_sync
, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
RETRIEVE_CLK( version
, 0LL );
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
ss_l2_ctl_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
#define ASSIGN_L2(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
/* L2 BIST Control Reg section 18.7.2 of PRM 1.4 */
ASSIGN_L2( bist_ctl
, MASK64(6,0) );
if (val
& 1) l2p
->bist_ctl
[bank
] |= 0x400;
/* L2 Control Register section 18.5.1 of PRM 1.2 */
ASSIGN_L2( control
, MASK64(21,0) );
/* Error handling section 12.6 of PRM 1.1 */
ASSIGN_L2( error_enable
, MASK64(2,0) );
l2p
->error_status
[bank
] &= ~val
;
l2p
->error_status
[bank
] &=
MASK64(63,62)|MASK64(53,35);
l2p
->error_status
[bank
] |= val
&
(MASK64(61,61)|MASK64(59,54)|MASK64(31,0));
case SS_L2_ERROR_ADDRESS
:
ASSIGN_L2( error_address
, MASK64(39,4) );
ASSIGN_L2( error_inject
, MASK64(1,0) );
/* illegal reg - an error */
/* L2 Cache Diagnostic Access section 18.6 of PRM 1.2 */
/* index stores to a 32bit word and its ECC+rsvd bits */
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
| L2_WORD
) >> 2;
/* put oddeven select bit low so data is in addr order */
idx
|= ((off
>> L2_ODDEVEN_SHIFT
) & 1);
l2p
->diag_datap
[idx
] = val
;
/*index stores to a tag and its ECC+rsvd bits */
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
) >> 6;
l2p
->diag_tagp
[idx
] = val
;
/* index valid/dirty or alloc/used bits and parity */
idx
= off
& (L2_LINE
| L2_BANK
) >> 6;
idx
|= ((off
& L2_VDSEL
) >> 10);
l2p
->diag_vuadp
[idx
] = val
;
EXEC_WARNING( ("Attempted write to reserved field in l2 cache controller:"
"Write 0x%llx to bank %d, register %s (offset 0x%x)",
val
, bank
, ss_l2_ctrl_reg_name(reg
), reg
) );
#define RETRIEVE_L2(_n, _m) do { val = ((l2p->_n[bank]) & (_m)); } while (0)
/* L2 BIST Control Reg section 18.7.2 of PRM 1.4 */
RETRIEVE_L2( bist_ctl
, MASK64(10,0) );
/* L2 Control Register section 18.5.1 of PRM 1.2 */
RETRIEVE_L2( control
, MASK64(63,57)|MASK64(15,0) );
/* Error handling section 12.6 of PRM 1.1 */
RETRIEVE_L2( error_enable
, MASK64(2,0) );
RETRIEVE_L2( error_status
,
MASK64(63,61)|MASK64(59,35)|MASK64(31,0) );
case SS_L2_ERROR_ADDRESS
:
RETRIEVE_L2( error_address
, MASK64(39,4) );
RETRIEVE_L2( error_inject
, MASK64(1,0) );
/* illegal reg - an error */
/* L2 Cache Diagnostic Access section 18.6 of PRM 1.2 */
/* index retrieves a 32bit word and its ECC+rsvd bits */
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
| L2_WORD
) >> 2;
/* put oddeven select bit low so data is in addr order */
idx
|= ((off
>> L2_ODDEVEN_SHIFT
) & 1);
val
= l2p
->diag_datap
[idx
];
/* index retrieves a tag and its ECC+rsvd bits */
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
) >> 6;
val
= l2p
->diag_tagp
[idx
];
/* index valid/dirty or alloc/used bits and parity */
idx
= off
& (L2_LINE
| L2_BANK
) >> 6;
idx
|= ((off
& L2_VDSEL
) >> 10);
val
= l2p
->diag_vuadp
[idx
];
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
ss_dram_ctl_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
ASSERT (bank
< npp
->num_mbanks
); /* this should be enforced by the config_dev range */
dbp
= &(npp
->mbankp
[bank
]);
DBGMC( lprintf(sp
->gid
, "Memory controller bank %d : register %s\n", bank
, ss_dram_ctrl_reg_name(reg
)); );
#define ASSIGN_DB(_n, _m) do { \
dbp->_n |= (val & (_m)); \
/* DRAM controller section 15.5 of RPM 1.1 */
case SS_DRAM_CAS_ADDR_WIDTH
: ASSIGN_DB( cas_addr_width
, MASK64(3, 0) ); break;
case SS_DRAM_RAS_ADDR_WIDTH
: ASSIGN_DB( ras_addr_width
, MASK64(3, 0) ); break;
case SS_DRAM_CAS_LAT
: ASSIGN_DB( cas_lat
, MASK64(2, 0) ); break;
case SS_DRAM_SCRUB_FREQ
: ASSIGN_DB( scrub_freq
, MASK64(11, 0) ); break;
case SS_DRAM_REFRESH_FREQ
: ASSIGN_DB( refresh_freq
, MASK64(12, 0) ); break;
case SS_DRAM_REFRESH_COUNTER
: ASSIGN_DB( refresh_counter
, MASK64(12, 0) ); break;
case SS_DRAM_SCRUB_ENABLE
: ASSIGN_DB( scrub_enable
, MASK64(0, 0) ); break;
case SS_DRAM_TRRD
: ASSIGN_DB( trrd
, MASK64(3, 0) ); break;
case SS_DRAM_TRC
: ASSIGN_DB( trc
, MASK64(4, 0) ); break;
case SS_DRAM_DRAM_TRCD
: ASSIGN_DB( dram_trcd
, MASK64(3, 0) ); break;
case SS_DRAM_TWTR
: ASSIGN_DB( twtr
, MASK64(3, 0) ); break;
case SS_DRAM_TRTW
: ASSIGN_DB( trtw
, MASK64(3, 0) ); break;
case SS_DRAM_TRTP
: ASSIGN_DB( trtp
, MASK64(2, 0) ); break;
case SS_DRAM_TRAS
: ASSIGN_DB( tras
, MASK64(3, 0) ); break;
case SS_DRAM_TRP
: ASSIGN_DB( trp
, MASK64(3, 0) ); break;
case SS_DRAM_TWR
: ASSIGN_DB( twr
, MASK64(3, 0) ); break;
case SS_DRAM_TRFC
: ASSIGN_DB( trfc
, MASK64(6, 0) ); break;
case SS_DRAM_TMRD
: ASSIGN_DB( tmrd
, MASK64(1, 0) ); break;
case SS_DRAM_TIWTR
: ASSIGN_DB( tiwtr
, MASK64(1, 0) ); break;
case SS_DRAM_PRECHARGE_WAIT
: ASSIGN_DB( precharge_wait
, MASK64(7, 0) ); break;
case SS_DRAM_DIMM_STACK
: ASSIGN_DB( dimm_stack
, MASK64(0, 0) ); break;
case SS_DRAM_EXT_WR_MODE2
: ASSIGN_DB( ext_wr_mode2
, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE1
: ASSIGN_DB( ext_wr_mode1
, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE3
: ASSIGN_DB( ext_wr_mode3
, MASK64(14, 0) ); break;
case SS_DRAM_WAIR_CONTROL
: ASSIGN_DB( wair_control
, MASK64(0, 0) ); break;
case SS_DRAM_RANK1_PRESENT
: ASSIGN_DB( rank1_present
, MASK64(0, 0) ); break;
case SS_DRAM_CHANNEL_DISABLED
: ASSIGN_DB( channel_disabled
, MASK64(0, 0) ); break;
case SS_DRAM_SEL_LO_ADDR_BITS
: ASSIGN_DB( sel_lo_addr_bits
, MASK64(0, 0) ); break;
if (0LL != (val
& ~(7))) goto write_reserved
;
/* DRAM Init sequence done is instantaneous */
case SS_DRAM_SW_DV_COUNT
: ASSIGN_DB( sw_dv_count
, MASK64(2, 0) ); break;
case SS_DRAM_HW_DMUX_CLK_INV
: ASSIGN_DB( hw_dmux_clk_inv
, MASK64(0, 0) ); break;
case SS_DRAM_PAD_EN_CLK_INV
: ASSIGN_DB( pad_en_clk_inv
, MASK64(4, 0) ); break;
case SS_DRAM_MODE_WRITE_STATUS
: ASSIGN_DB( mode_write_status
, MASK64(0, 0) ); break;
case SS_DRAM_INIT_STATUS
: ASSIGN_DB( init_status
, MASK64(0, 0) ); break;
case SS_DRAM_DIMM_PRESENT
: ASSIGN_DB( dimm_present
, MASK64(3, 0) ); break;
case SS_DRAM_FAILOVER_STATUS
: ASSIGN_DB( failover_status
, MASK64(0, 0) ); break;
case SS_DRAM_FAILOVER_MASK
: ASSIGN_DB( failover_mask
, MASK64(34, 0) ); break;
/* Performance counter section 10.3 of PRM 1.1 */
case SS_DRAM_PERF_CTL
: ASSIGN_DB( perf_ctl
, MASK64(7, 0) ); break;
case SS_DRAM_PERF_COUNT
: ASSIGN_DB( perf_count
, MASK64(63, 0) ); break;
/* Error handling section 12.9 of PRM 1.1 */
case SS_DRAM_ERROR_STATUS
:
dbp
->error_status
&= ~val
;
dbp
->error_status
&= MASK64(63,57);
dbp
->error_status
|= val
& MASK64(15,0);
case SS_DRAM_ERROR_ADDRESS
: ASSIGN_DB( error_address
, MASK64(39,4) ); break;
case SS_DRAM_ERROR_INJECT
: ASSIGN_DB( error_inject
, MASK64(31,30)|MASK64(15,0) ); break;
case SS_DRAM_ERROR_COUNTER
: ASSIGN_DB( error_counter
, MASK64(17,0) ); break;
case SS_DRAM_ERROR_LOCATION
: ASSIGN_DB( error_location
, MASK64(35,0) ); break;
/* Power management section 16.2 of PRM 1.1 */
case SS_DRAM_OPEN_BANK_MAX
: ASSIGN_DB( open_bank_max
, MASK64(16, 0) ); break;
case SS_DRAM_PROG_TIME_CNTR
: ASSIGN_DB( prog_time_cntr
, MASK64(15, 0) ); break;
/* Hardware debug section 19.1 of PRM 1.1 */
case SS_DRAM_DBG_TRG_EN
: ASSIGN_DB( dbg_trg_en
, MASK64(7, 0) ); break;
/* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in dram controller: Write 0x%llx to bank %d, register %s (offset 0x%x)",
val
, bank
, ss_dram_ctrl_reg_name(reg
), reg
) );
#define RETRIEVE_DB(_n, _m) do { val = ((dbp->_n) & (_m)); } while (0)
/* DRAM controller section 15.5 of RPM 1.1 */
case SS_DRAM_CAS_ADDR_WIDTH
: RETRIEVE_DB( cas_addr_width
, MASK64(3, 0) ); break;
case SS_DRAM_RAS_ADDR_WIDTH
: RETRIEVE_DB( ras_addr_width
, MASK64(3, 0) ); break;
case SS_DRAM_CAS_LAT
: RETRIEVE_DB( cas_lat
, MASK64(2, 0) ); break;
case SS_DRAM_SCRUB_FREQ
: RETRIEVE_DB( scrub_freq
, MASK64(11, 0) ); break;
case SS_DRAM_REFRESH_FREQ
: RETRIEVE_DB( refresh_freq
, MASK64(12, 0) ); break;
case SS_DRAM_REFRESH_COUNTER
: RETRIEVE_DB( refresh_counter
, MASK64(12, 0) ); break;
case SS_DRAM_SCRUB_ENABLE
: RETRIEVE_DB( scrub_enable
, MASK64(0, 0) ); break;
case SS_DRAM_TRRD
: RETRIEVE_DB( trrd
, MASK64(3, 0) ); break;
case SS_DRAM_TRC
: RETRIEVE_DB( trc
, MASK64(4, 0) ); break;
case SS_DRAM_DRAM_TRCD
: RETRIEVE_DB( dram_trcd
, MASK64(3, 0) ); break;
case SS_DRAM_TWTR
: RETRIEVE_DB( twtr
, MASK64(3, 0) ); break;
case SS_DRAM_TRTW
: RETRIEVE_DB( trtw
, MASK64(3, 0) ); break;
case SS_DRAM_TRTP
: RETRIEVE_DB( trtp
, MASK64(2, 0) ); break;
case SS_DRAM_TRAS
: RETRIEVE_DB( tras
, MASK64(3, 0) ); break;
case SS_DRAM_TRP
: RETRIEVE_DB( trp
, MASK64(3, 0) ); break;
case SS_DRAM_TWR
: RETRIEVE_DB( twr
, MASK64(3, 0) ); break;
case SS_DRAM_TRFC
: RETRIEVE_DB( trfc
, MASK64(6, 0) ); break;
case SS_DRAM_TMRD
: RETRIEVE_DB( tmrd
, MASK64(1, 0) ); break;
case SS_DRAM_TIWTR
: RETRIEVE_DB( tiwtr
, MASK64(1, 0) ); break;
case SS_DRAM_PRECHARGE_WAIT
: RETRIEVE_DB( precharge_wait
, MASK64(7, 0) ); break;
case SS_DRAM_DIMM_STACK
: RETRIEVE_DB( dimm_stack
, MASK64(0, 0) ); break;
case SS_DRAM_EXT_WR_MODE2
: RETRIEVE_DB( ext_wr_mode2
, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE1
: RETRIEVE_DB( ext_wr_mode1
, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE3
: RETRIEVE_DB( ext_wr_mode3
, MASK64(14, 0) ); break;
case SS_DRAM_WAIR_CONTROL
: RETRIEVE_DB( wair_control
, MASK64(0, 0) ); break;
case SS_DRAM_RANK1_PRESENT
: RETRIEVE_DB( rank1_present
, MASK64(0, 0) ); break;
case SS_DRAM_CHANNEL_DISABLED
: RETRIEVE_DB( channel_disabled
, MASK64(0, 0) ); break;
case SS_DRAM_SEL_LO_ADDR_BITS
: RETRIEVE_DB( sel_lo_addr_bits
, MASK64(0, 0) ); break;
case SS_DRAM_DIMM_INIT
: RETRIEVE_DB( dimm_init
, MASK64(2, 0) ); break;
case SS_DRAM_SW_DV_COUNT
: RETRIEVE_DB( sw_dv_count
, MASK64(2, 0) ); break;
case SS_DRAM_HW_DMUX_CLK_INV
: RETRIEVE_DB( hw_dmux_clk_inv
, MASK64(0, 0) ); break;
case SS_DRAM_PAD_EN_CLK_INV
: RETRIEVE_DB( pad_en_clk_inv
, MASK64(4, 0) ); break;
case SS_DRAM_MODE_WRITE_STATUS
: RETRIEVE_DB( mode_write_status
, MASK64(0, 0) ); break;
case SS_DRAM_INIT_STATUS
: RETRIEVE_DB( init_status
, MASK64(0, 0) ); break;
case SS_DRAM_DIMM_PRESENT
: RETRIEVE_DB( dimm_present
, MASK64(3, 0) ); break;
case SS_DRAM_FAILOVER_STATUS
: RETRIEVE_DB( failover_status
, MASK64(0, 0) ); break;
case SS_DRAM_FAILOVER_MASK
: RETRIEVE_DB( failover_mask
, MASK64(34, 0) ); break;
/* Performance counter section 10.3 of PRM 1.1 */
case SS_DRAM_PERF_CTL
: RETRIEVE_DB( perf_ctl
, MASK64(7, 0) ); break;
case SS_DRAM_PERF_COUNT
: RETRIEVE_DB( perf_count
, MASK64(63, 0) ); break;
/* Error handling section 12.9 of PRM 1.1 */
case SS_DRAM_ERROR_STATUS
: RETRIEVE_DB( error_status
, MASK64(63,57)|MASK64(15,0) ); break;
case SS_DRAM_ERROR_ADDRESS
: RETRIEVE_DB( error_address
, MASK64(39,4) ); break;
case SS_DRAM_ERROR_INJECT
: RETRIEVE_DB( error_inject
, MASK64(31,30)|MASK64(15,0) ); break;
case SS_DRAM_ERROR_COUNTER
: RETRIEVE_DB( error_counter
, MASK64(17,0) ); break;
case SS_DRAM_ERROR_LOCATION
: RETRIEVE_DB( error_location
, MASK64(35,0) ); break;
/* Power management section 16.2 of PRM 1.1 */
case SS_DRAM_OPEN_BANK_MAX
: RETRIEVE_DB( open_bank_max
, MASK64(16, 0) ); break;
case SS_DRAM_PROG_TIME_CNTR
: RETRIEVE_DB( prog_time_cntr
, MASK64(15, 0) ); break;
/* Hardware debug section 19.1 of PRM 1.1 */
case SS_DRAM_DBG_TRG_EN
: RETRIEVE_DB( dbg_trg_en
, MASK64(7, 0) ); break;
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
/****************************************************************
* SunSPARC CPU interrupt bridge code
****************************************************************/
/* write to SS_ASI_SWVR_UDB_INTR_W */
/* FIXMENOW .... this function to go away ... use ss_ext_signal .... */
static void niagara_send_xirq(simcpu_t
* sp
, uint64_t val
)
npp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
type
= (val
>> 16) & MASK64(1,0);
/* strand captures reserved field too .. but should be zero ... */
if (type
!= 0) EXEC_WARNING(("Write to SS_ASI_SWVR_UDB_INTR_W with non-zero type field (@pc=0x%llx)", sp
->pc
));
/* check actual value against number of strands later ... */
strand
= (val
>> 8) & MASK64(4,0);
vec_bit
= val
& MASK64(5,0);
/* normalize strand to internal strand */
strand
= STRANDID2IDX(npp
, strand
);
if (!VALIDIDX(npp
, strand
)) {
EXEC_WARNING(("Write to SS_ASI_SWVR_UDB_INTR_W with illegal strand value 0x%llx (@pc=0x%llx)", strand
, sp
->pc
));
tstrandp
= &(npp
->ss_strandp
[strand
]);
pthread_mutex_lock(&tstrandp
->irq_lock
);
pay_attention
= (0LL == tstrandp
->irq_vector
);
tstrandp
->irq_vector
|= (1LL<<vec_bit
);
pthread_mutex_unlock(&tstrandp
->irq_lock
);
DBGE( lprintf(sp
->gid
, "irq_send: tstrand=%u irq_vector=%llx "
"(pc=0x%llx)\n", npp
->strand
[strand
]->simp
->gid
,
tstrandp
->irq_vector
, sp
->pc
); );
* The complicated part here is that the execution thread
* determines when the interrupt is actually delivered if at
* all, all we need to do here is to ensure that that thread
* pays attention to the fact the the interrupt vector status
* has changed .. we only care if it goes non-zero ...
tv9p
= npp
->strand
[strand
];
* non-execution threads to use this method for posting
* interrupts and other actions to simcpu.
uint64_t ss_ext_signal(config_proc_t
* config_procp
, ext_sig_t sigtype
, void *vp
)
npp
= (ss_proc_t
*)(config_procp
->procp
);
/* what if thread not running? */
strand
= IOB_INT_VEC_THREAD(npp
->iobp
->int_vec_dis
);
tidx
= STRANDID2IDX(npp
, strand
);
lprintf(sp
->gid
, "IDLE: strand=%d idx=%d\n", strand
, tidx
)); */
/* skip strands that do not exist */
if (!VALIDIDX(npp
, tidx
))
nsp
= &(npp
->ss_strandp
[tidx
]);
pthread_mutex_lock(&npp
->thread_sts_lock
);
SET_THREAD_STS_SFSM(npp
, nsp
, THREAD_STS_TSTATE_IDLE
);
simcore_cpu_state_park(sp
);
pthread_mutex_unlock(&npp
->thread_sts_lock
);
/* what if thread not idle? */
strand
= IOB_INT_VEC_THREAD(npp
->iobp
->int_vec_dis
);
tidx
= STRANDID2IDX(npp
, strand
);
/* skip strands that do not exist */
if (!VALIDIDX(npp
, tidx
))
nsp
= &(npp
->ss_strandp
[tidx
]);
pthread_mutex_lock(&npp
->thread_sts_lock
);
SET_THREAD_STS_SFSM(npp
, nsp
, THREAD_STS_TSTATE_RUN
);
simcore_cpu_state_unpark(sp
);
pthread_mutex_unlock(&npp
->thread_sts_lock
);
mondop
= (jbus_mondo_t
*)vp
;
tidx
= mondop
->adr
.target
;
pthread_mutex_lock(&jbusp
->lock
);
if (jbusp
->j_int_busy
[tidx
] & IOB_JBUS_BUSY
) {
pthread_mutex_unlock(&jbusp
->lock
);
jbusp
->j_int_data0
[tidx
] = mondop
->data0
;
jbusp
->j_int_data1
[tidx
] = mondop
->data1
;
jbusp
->j_int_busy
[tidx
] = mondop
->adr
.source
| IOB_JBUS_BUSY
;
pthread_mutex_unlock(&jbusp
->lock
);
strand
= STRANDID2IDX(npp
, tidx
);
if (!VALIDIDX(npp
, strand
))
return (IOB_JBUS_NACK
); /* XXX */
nsp
= &(npp
->ss_strandp
[strand
]);
pthread_mutex_lock(&nsp
->irq_lock
);
pay_attention
= (0LL == nsp
->irq_vector
);
nsp
->irq_vector
|= (uint64_t)1 << npp
->iobp
->j_int_vec
;
pthread_mutex_unlock(&nsp
->irq_lock
);
v9p
= npp
->strand
[strand
];
/* This used to deliver a SSI interrupt event */
/* really needs to be handled in a different way */
pthread_mutex_lock(&iobp
->iob_lock
);
/* If interrupt is masked in IOB simply set again the
* pending bit ... if not masked, then deliver an
* interrupt using the irq_vector
if (iobp
->int_ctl
[IOB_DEV_SSI
]&IOB_INT_CTL_MASK
) {
iobp
->int_ctl
[IOB_DEV_SSI
] |= IOB_INT_CTL_PEND
;
iobp
->int_ctl
[IOB_DEV_SSI
] |= IOB_INT_CTL_MASK
;
/* now go async deliver the interrupt */
strand
= IOB_INT_MAN_CPUID(npp
->iobp
->int_man
[IOB_DEV_SSI
]);
nsp
= &(npp
->ss_strandp
[strand
]);
v9p
= npp
->strand
[strand
];
pthread_mutex_lock(&nsp
->irq_lock
);
pay_attention
= (0LL == nsp
->irq_vector
);
(uint64_t)1<<(iobp
->int_man
[IOB_DEV_SSI
]&INTR_VEC_MASK
);
pthread_mutex_unlock(&nsp
->irq_lock
);
DBGSSI( lprintf(sp
->gid
, "SSI ext_signal: nsp=%p irq_vector=%llx\n", nsp
, nsp
->irq_vector
); );
DBGSSI( lprintf(sp
->gid
, "SSI ext_signal: attention set\n"); );
pthread_mutex_unlock(&iobp
->iob_lock
);
for (i
=(npp
->nstrands
)-1; i
>=0; i
--) {
nsp
= (ss_strand_t
*)(v9p
->impl_specificp
);
nsp
->pending_async_tt
= SS_trap_power_on_reset
;
sp
->exception_pending
= true;
* OK every strand on this CPU gets a reset signal
* FIXME: wake up sleeping strands or error state strands
for (i
=(npp
->nstrands
)-1; i
>=0; i
--) {
nsp
= (ss_strand_t
*)(v9p
->impl_specificp
);
nsp
->pending_async_tt
= SS_trap_externally_initiated_reset
;
DBGE( lprintf(sp
->gid
, "ES_XIR set_attention\n"); );
sp
->exception_pending
= true;
EXEC_WARNING(("processor%d: ext_signal %d ignored",
config_procp
->proc_id
, sigtype
));
* CPU specific instruction decode routine. This routine is called from the main
* instruction decoder routine only when that routine comes up empty handed (i.e.
* before declaring it an illegal or unknown instruction.) For now, we don't have
* any CPU specific instuctions implemented for Niagara, and so the performance
* impact of making this function call is negligable since it doesn't happen in
* This routine returns a pointer to the exec function which is to be run as a
* result of encountering the instruction op code in question.
static op_funcp
niagara_decode_me(simcpu_t
*sp
, xicache_instn_t
* xcip
, uint32_t instn
)
switch ((ty_code_t
)X_OP(instn
)) {
case Ty_2
: /* Arithmetic and Misc instructions */
op2c
= (T2o3_code_t
)X_OP3(instn
);
/* register x immediate -> register forms */
SET_OPv9(save_imm
); /* rd == 0 determined in instn implemenation */
if (rd
== 0 && rs1
==15) {
if (!CHECK_RESERVED_ZERO(instn
, 12, 7)) {
SET_OP_ILL_REASON(misc_reserved_field_non_zero
);
goto n1_illegal_instruction
;
simm
= X_MEMBAR_MASKS(instn
);
SET_OP_SIMM16(simm
); /* masks in immediates */
SET_OPv9( read_state_reg
);
if (!X_FMT4_CC2(instn
)) {
#ifdef FP_DECODE_DISABLED
if (!((sparcv9_cpu_t
*)(sp
->specificp
))->fpu_on
) goto n1_fp_disabled
;
#endif /* FP_DECODE_DISABLED */
if (rd
== 0) goto n1_do_noop
;
/* We attempt to fast path movfcc_a ... */
if (X_FMT4_COND(instn
) == cond_n
) goto n1_do_noop
;
if (X_FMT4_COND(instn
) == cond_a
) {
SET_OP_MOVCC_CC(X_FMT4_CC(instn
));
SET_OP_MOVCC_COND(X_FMT4_COND(instn
));
switch( (cc4bit_t
)X_FMT4_CC(instn
) ) {
case CC4bit_icc
: SET_OP_MOVCC_CC(0); break;
case CC4bit_xcc
: SET_OP_MOVCC_CC(1); break;
SET_OP_ILL_REASON(movcc_illegal_cc_field
);
goto n1_illegal_instruction
;
if (rd
== 0) goto n1_do_noop
;
/* truncate simm - as only an 11 bit
* immediate in movcc instructions, not the
* 13 bit field we extracted above
if (X_FMT4_COND(instn
) == cond_n
) goto n1_do_noop
;
if (X_FMT4_COND(instn
) == cond_a
) goto n1_do_move_simm
;
SET_OP_MOVCC_COND(X_FMT4_COND(instn
));
int fcn
= X_FMT2_FCN(instn
);
if (!CHECK_RESERVED_ZERO(instn
, 18, 0)) {
SET_OP_ILL_REASON(saved_reserved_field_non_zero
);
goto n1_illegal_instruction
;
SET_OP_ILL_REASON(saved_fcn_invalid
);
goto n1_illegal_instruction
;
switch(X_FMT3_FCN(instn
)) {
SET_OP_MISC_BITS((uint_t
)true);
SET_OP_MISC_BITS((uint_t
)false);
SET_OP_ILL_REASON(done_retry_illegal_fcn_field
);
goto n1_illegal_instruction
;
/* register x register -> register forms */
SET_OPv9(save_rrr
); /* rd == 0 determined in instn implemenation */
/* Rd == 0 handled by instruction */
goto n1_illegal_instruction
;
goto n1_done_retry_instn
;
#ifdef FP_DECODE_DISABLED
SET_OPv9(fp_unimplemented_instruction
);
#endif /* FP_DECODE_DISABLED */
SET_OPv9(illegal_instruction
);
void niagara_get_pseudo_dev(config_proc_t
*config_procp
, char *dev_namep
, void *devp
)
* This Niagara specific function is not implemented yet.
void niagara_domain_check(domain_t
* domainp
)
* This Niagara specific function is not implemented yet.
void niagara_set_sfsr(simcpu_t
*sp
, ss_mmu_t
*mmup
, tvaddr_t addr
,
uint_t ft
, ss_ctx_t ct
, uint_t asi
, uint_t w
, uint_t e
)
if ((mmup
->sfsr
& MMU_SFSR_FV
) != 0)
new_sfsr
|= (ft
<< MMU_SFSR_FT_SHIFT
);
new_sfsr
|= (ct
<< MMU_SFSR_CT_SHIFT
);
new_sfsr
|= (asi
<< MMU_SFSR_ASI_SHIFT
);
DBGMMU( lprintf(sp
->gid
, "%cMMU SFSR update 0x%llx -> 0x%llx SFAR=0x%llx\n", mmup
->is_immu
? 'I' : 'D', mmup
->sfsr
, new_sfsr
, mmup
->sfar
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU SFSR update 0x%ll-> 0x%llx x\n", mmup
->is_immu
? 'I' : 'D', mmup
->sfsr
, new_sfsr
); );
* Below are CPU specific error injection routines. They are called when an
* error condition is detected clears the error flags if no more errors to
* post error condition may not be cleared if handling required
* eg. demap tlb entry with bad parity or flush cacheline with bad ecc
void extract_error_type(error_conf_t
* errorconfp
)
errorconfp
->type_namep
= strdup(lex
.strp
);
if (streq(lex
.strp
,"IRC"))
else if (streq(lex
.strp
,"IRU"))
else if (streq(lex
.strp
,"FRC"))
else if (streq(lex
.strp
,"FRU"))
else if (streq(lex
.strp
,"IMTU"))
else if (streq(lex
.strp
,"IMDU"))
else if (streq(lex
.strp
,"DMTU"))
else if (streq(lex
.strp
,"DMDU"))
else if (streq(lex
.strp
,"DMSU"))
else if (streq(lex
.strp
,"ITC"))
else if (streq(lex
.strp
,"IDC"))
else if (streq(lex
.strp
,"DTC"))
else if (streq(lex
.strp
,"DDC"))
else if (streq(lex
.strp
,"MAU"))
else if (streq(lex
.strp
,"LDRC"))
else if (streq(lex
.strp
,"LDSC"))
else if (streq(lex
.strp
,"LTC"))
else if (streq(lex
.strp
,"LDAC"))
else if (streq(lex
.strp
,"LDWC"))
else if (streq(lex
.strp
,"LDAU"))
else if (streq(lex
.strp
,"LDWU"))
else if (streq(lex
.strp
,"DAC"))
else if (streq(lex
.strp
,"DRC"))
else if (streq(lex
.strp
,"DSC"))
else if (streq(lex
.strp
,"DAU"))
else if (streq(lex
.strp
,"DSU"))
lex_fatal("unknown error type parsing error config");
void update_errflags(simcpu_t
* sp
)
sp
->errorp
->check_xdcache
= find_errconf(sp
, (LD
|ST
),
(DTC
|DDC
|IRC
|IRU
|FRC
|FRU
|LDAC
|LDWC
|LDAU
|LDWU
|DAC
|DAU
)) ? true : false;
sp
->errorp
->check_xicache
= (find_errconf(sp
, IFETCH
,
(ITC
|IDC
|LDAC
|LDAU
|DAC
|DAU
))) ? true : false;
sp
->errorp
->check_dtlb
= (find_errconf(sp
, (LD
|ST
),
(DMDU
|DMSU
))) ? true : false;
* If demap of tlb entry with parity error detected then remove error config
void tlb_entry_error_match(simcpu_t
* sp
, ss_mmu_t
* mmup
, tlb_entry_t
* tep
)
if (sp
->errorp
->itep
== tep
&& mmup
->is_immu
) {
if ((ep
= find_errconf(sp
, IFETCH
, IMDU
)) == NULL
)
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
if (sp
->errorp
->dtep
== tep
&& !mmup
->is_immu
) {
if ((ep
= find_errconf(sp
, (LD
|ST
), DMDU
)) == NULL
)
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
tlb_warning
: EXEC_WARNING(("tlb_entry_error_match(): tracking tlb"
" entry in error for non-existent error config"));
void ss_error_condition(simcpu_t
* sp
, error_conf_t
* ep
)
nsp
= v9p
->impl_specificp
;
npp
= sp
->config_procp
->procp
;
DBGERR( lprintf(sp
->gid
, "ss_error_condition() etype = %s\n", ep
->type_namep
); );
nsp
->error
.status
= NA_IRC_bit
;
nsp
->error
.addr
= (I_REG_NUM(errorp
->reg
) | I_REG_WIN(v9p
->cwp
)
| I_SYND(IREG_FAKE_SYND_SINGLE
));
if (nsp
->error
.enabled
& NA_CEEN
) {
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= NA_IRU_bit
;
nsp
->error
.addr
= (I_REG_NUM(errorp
->reg
) | I_REG_WIN(v9p
->cwp
)
| I_SYND(IREG_FAKE_SYND_DOUBLE
));
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_internal_processor_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= NA_FRC_bit
;
nsp
->error
.addr
= (F_REG_NUM(errorp
->reg
) |
EVEN_SYND(FREG_FAKE_SYND_SINGLE
) | ODD_SYND(NULL
));
if (nsp
->error
.enabled
& NA_CEEN
) {
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= NA_FRU_bit
;
nsp
->error
.addr
= (F_REG_NUM(errorp
->reg
) |
EVEN_SYND(FREG_FAKE_SYND_DOUBLE
) | ODD_SYND(NULL
));
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_internal_processor_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= (NA_PRIV_bit
|NA_IMTU_bit
);
nsp
->error
.addr
= TLB_INDEX(errorp
->tlb_idx
[IMTU_IDX
]);
errorp
->tlb_idx
[IMTU_IDX
] = NULL
;
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= (NA_PRIV_bit
|NA_IMDU_bit
);
nsp
->error
.addr
= TLB_INDEX(errorp
->tlb_idx
[IMDU_IDX
]);
errorp
->tlb_idx
[IMDU_IDX
] = NULL
;
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= NA_IMDU_bit
;
nsp
->error
.status
|= (ep
->priv
== V9_HyperPriv
||
ep
->priv
== V9_Priv
) ? NA_PRIV_bit
: 0;
nsp
->error
.addr
= MMU_PC(sp
->pc
);
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_instruction_access_error
;
v9p
->post_precise_trap(sp
, tt
);
nsp
->error
.status
= (NA_PRIV_bit
|NA_IMTU_bit
);
nsp
->error
.addr
= TLB_INDEX(errorp
->tlb_idx
[DMTU_IDX
]);
errorp
->tlb_idx
[DMTU_IDX
] = NULL
;
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= (NA_PRIV_bit
|NA_DMDU_bit
);
nsp
->error
.addr
= TLB_INDEX(errorp
->tlb_idx
[DMDU_IDX
]);
errorp
->tlb_idx
[DMDU_IDX
] = NULL
;
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= NA_DMDU_bit
;
nsp
->error
.status
|= (ep
->priv
== V9_HyperPriv
||
ep
->priv
== V9_Priv
) ? NA_PRIV_bit
: 0;
nsp
->error
.addr
= MMU_VA(errorp
->addr
);
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
nsp
->error
.status
= NA_DMSU_bit
;
nsp
->error
.status
|= (ep
->priv
== V9_HyperPriv
||
ep
->priv
== V9_Priv
) ? NA_PRIV_bit
: 0;
nsp
->error
.addr
= MMU_VA(errorp
->addr
);
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
nsp
->error
.status
= NA_ITC_bit
;
nsp
->error
.status
= NA_IDC_bit
;
icache_error
: nsp
->error
.status
|= (ep
->priv
== V9_HyperPriv
||
ep
->priv
== V9_Priv
) ? NA_PRIV_bit
: 0;
nsp
->error
.addr
= L1_PA(errorp
->addr
);
if (nsp
->error
.enabled
& NA_CEEN
) {
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
nsp
->error
.status
= NA_DTC_bit
;
nsp
->error
.status
= NA_DDC_bit
;
dcache_error
: nsp
->error
.status
|= (ep
->priv
== V9_HyperPriv
||
ep
->priv
== V9_Priv
) ? NA_PRIV_bit
: 0;
nsp
->error
.addr
= L1_PA(errorp
->addr
);
if (nsp
->error
.enabled
& NA_CEEN
) {
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
if (remove_errconf(sp
, ep
) == NULL
) clear_errflags(sp
);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep
->type_namep
));
bank
= (errorp
->addr
>> 6) & 0x3;
l2p
->error_status
[bank
] = L2_LDAC_bit
| L2_TID(tid
) | L2_VEC_bit
|
L2_FAKE_SYND_SINGLE
| errorp
->l2_write
;
l2p
->error_address
[bank
] = L2_PA_LINE(errorp
->addr
);
if ((nsp
->error
.enabled
& NA_CEEN
) &&
(l2p
->error_enable
[bank
] & L2_CEEN
)) {
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(sp
, tt
);
/* l2 corrected on partial store or atomic hit */
npp
->errorp
->ldac_addr
= NULL
;
/* l2 uncorrected on load/ifetch hit so make error proc-wide */
npp
->errorp
->ldac_addr
= errorp
->addr
;
* NB: proper behavior is to flush all cpu xdcache's
* but there is no lock on the xdc so I didn't try it
sp
->xdcache_trans_flush_pending
= true;
/* bit of a hack - some errorconf's aren't owned by sp's so free them */
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
bank
= (errorp
->addr
>> 6) & 0x3;
l2p
->error_status
[bank
] = L2_LDWC_bit
| L2_TID(tid
) | L2_VEC_bit
|
L2_FAKE_SYND_SINGLE
| L2_RW_bit
;
l2p
->error_address
[bank
] = L2_PA_LINE(errorp
->addr
);
tid
= (l2p
->control
[bank
] & L2_ERRORSTEER
);
v9p
= npp
->strand
[STRANDID2IDX(npp
, tid
)];
nsp
= v9p
->impl_specificp
;
if ((nsp
->error
.enabled
& NA_CEEN
) &&
(l2p
->error_enable
[bank
] & L2_CEEN
)) {
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(esp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
if (remove_errconf(sp
, ep
) == NULL
) clear_errflags(sp
);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep
->type_namep
));
bank
= (errorp
->addr
>> 6) & 0x3;
l2p
->error_status
[bank
] = L2_LDAU_bit
| L2_TID(tid
) | L2_VEU_bit
|
L2_FAKE_SYND_DOUBLE
| errorp
->l2_write
;
l2p
->error_address
[bank
] = L2_PA_LINE(errorp
->addr
);
if (l2p
->error_enable
[bank
] & L2_NCEEN
) {
nsp
->error
.status
= NA_LDAU_bit
;
nsp
->error
.status
|= (ep
->priv
== V9_HyperPriv
||
ep
->priv
== V9_Priv
) ? NA_PRIV_bit
: 0;
nsp
->error
.addr
= L1_PA(errorp
->addr
);
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= (ep
->type
== IFETCH
)
? Sparcv9_trap_instruction_access_error
: Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
* store error info to cacheline for error handler diag access
* and to support direct-mapped mode displacement flushing
/* index stores to a 32bit word and its ECC+rsvd bits */
idx
= errorp
->addr
& (L2_WAY
| L2_LINE
| L2_BANK
| L2_WORD
) >> 2;
/* put oddeven select bit low so data is in addr order */
idx
|= ((errorp
->addr
>> L2_ODDEVEN_SHIFT
) & 1);
l2p
->diag_datap
[idx
] = ((0xabbadada << 7) | L2_FAKE_SYND_DOUBLE
);
/* index stores to a tag and its ECC+rsvd bits */
idx
= errorp
->addr
& (L2_WAY
| L2_LINE
| L2_BANK
) >> 6;
l2p
->diag_tagp
[idx
] = (errorp
->addr
& L2_TAG
) >> 12;
/* index valid/dirty or alloc/used bits and parity */
idx
= errorp
->addr
& (L2_LINE
| L2_BANK
) >> 6;
idx
|= ((errorp
->addr
& L2_VDSEL
) >> 10);
l2p
->diag_vuadp
[idx
] = 0xfff << 12; /* all lines valid/clean */
/* uncorrectible error in l2 so make it proc-wide */
npp
->errorp
->ldau_addr
= errorp
->addr
;
sp
->xdcache_trans_flush_pending
= true;
/* bit of a hack - some errorconf's aren't owned by sp's so free them */
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
bank
= (errorp
->addr
>> 6) & 0x3;
l2p
->error_status
[bank
] = L2_LDWU_bit
| L2_TID(tid
) | L2_VEU_bit
|
L2_FAKE_SYND_DOUBLE
| L2_RW_bit
;
l2p
->error_address
[bank
] = L2_PA_LINE(errorp
->addr
);
if ((nsp
->error
.enabled
& NA_NCEEN
) &&
(l2p
->error_enable
[bank
] & L2_NCEEN
)) {
tid
= (l2p
->control
[bank
] & L2_ERRORSTEER
);
v9p
= npp
->strand
[STRANDID2IDX(npp
, tid
)];
tt
= (sparcv9_trap_type_t
)N1_trap_data_error
;
v9p
->post_precise_trap(esp
, tt
);
npp
->errorp
->ldau_addr
= errorp
->addr
;
/* bit of a hack - some errorconf's aren't owned by sp's so free them */
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
if (remove_errconf(sp
, ep
) == NULL
) clear_errflags(sp
);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep
->type_namep
));
bank
= (errorp
->addr
>> 6) & 0x3;
dbp
= &(npp
->mbankp
[bank
]);
dbp
->error_status
= DRAM_DAC_bit
| DRAM_FAKE_SYND_SINGLE
;
/* if store miss and L2 disabled then only set DRAM error status */
if (ep
->op
== ST
&& !errorp
->partial_st
) {
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
if (l2p
->control
[bank
] & L2_DIS
)
bank
= (errorp
->addr
>> 6) & 0x3;
l2p
->error_status
[bank
] = L2_DAC_bit
| L2_TID(tid
) | L2_VEC_bit
|
l2p
->error_address
[bank
] = L2_PA_LINE(errorp
->addr
);
if ((nsp
->error
.enabled
& NA_CEEN
) &&
(l2p
->error_enable
[bank
] & L2_CEEN
)) {
* partial stores and odd-numbered cache lines
* redirected to errorsteer thread
if (errorp
->partial_st
|| (errorp
->addr
& 0x40)) {
tid
= (l2p
->control
[bank
] & L2_ERRORSTEER
);
v9p
= npp
->strand
[STRANDID2IDX(npp
, tid
)];
l2p
->error_status
[bank
] &= ~(errorp
->l2_write
);
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(esp
, tt
);
tt
= (sparcv9_trap_type_t
)SS_trap_ECC_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
bank
= (errorp
->addr
>> 6) & 0x3;
dbp
= &(npp
->mbankp
[bank
]);
dbp
->error_status
= DRAM_DAU_bit
| DRAM_FAKE_SYND_DOUBLE
;
l2p
->error_status
[bank
] = L2_DAU_bit
| L2_TID(tid
) | L2_VEU_bit
|
l2p
->error_address
[bank
] = L2_PA_LINE(errorp
->addr
);
if (l2p
->error_enable
[bank
] & L2_NCEEN
) {
nsp
->error
.status
= NA_LDAU_bit
; /* as per Table 12-4 of PRM */
nsp
->error
.status
|= (ep
->priv
== V9_HyperPriv
||
ep
->priv
== V9_Priv
) ? NA_PRIV_bit
: 0;
* partial stores and odd-numbered cache lines
* redirected to errorsteer thread
if (errorp
->partial_st
|| (errorp
->addr
& 0x40)) {
tid
= (l2p
->control
[bank
] & L2_ERRORSTEER
);
v9p
= npp
->strand
[STRANDID2IDX(npp
, tid
)];
l2p
->error_status
[bank
] &= ~(errorp
->l2_write
);
* set address to non-requested 16B block
* within the same 64B cache line
errorp
->addr
= (errorp
->addr
& ~0x30) |
(((errorp
->addr
& 0x30) + 0x10) % 0x40);
nsp
->error
.addr
= L1_PA(errorp
->addr
);
tt
= (sparcv9_trap_type_t
)N1_trap_data_error
;
v9p
->post_precise_trap(esp
, tt
);
nsp
->error
.addr
= L1_PA(errorp
->addr
);
if (nsp
->error
.enabled
& NA_NCEEN
) {
tt
= (ep
->type
== IFETCH
)
? Sparcv9_trap_instruction_access_error
: Sparcv9_trap_data_access_error
;
v9p
->post_precise_trap(sp
, tt
);
if (remove_errconf(sp
, ep
) == NULL
)
clear_errflags(sp
); else update_errflags(sp
);
if (remove_errconf(sp
, ep
) == NULL
) clear_errflags(sp
);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep
->type_namep
));
if (remove_errconf(sp
, ep
) == NULL
) clear_errflags(sp
);
EXEC_WARNING(("Unspecified Error Type: %s\n", ep
->type_namep
));