Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / procs / sunsparc / libniagara / niagara.c
/*
* ========== Copyright Header Begin ==========================================
*
* OpenSPARC T2 Processor File: niagara.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
*
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ========== Copyright Header End ============================================
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "@(#)niagara.c 1.62 07/02/28 SMI"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h> /* memcpy/memset */
#include <strings.h>
#include <thread.h>
#include "ss_common.h"
#include "jbus_mondo.h"
#include "niagara.h"
#include "fpsim.h"
#if INTERNAL_BUILD
#include "modarith.h"
#endif
static void niagara_init_trap_list();
static bool_t niagara_init_proc_type(proc_type_t * proc_typep);
static op_funcp niagara_decode_me(simcpu_t *sp, xicache_instn_t * xcip, uint32_t instn);
static void niagara_get_pseudo_dev(config_proc_t *config_procp, char *dev_namep, void *devp);
static void niagara_send_xirq(simcpu_t * sp, uint64_t val);
static void niagara_set_sfsr(simcpu_t *sp, ss_mmu_t *mmup, tvaddr_t addr,
uint_t ft, ss_ctx_t ct, uint_t asi, uint_t w, uint_t e);
static void niagara_domain_check(domain_t *domainp);
static void niagara_init_trap_list()
{
static ss_trap_list_t setup_list[] = {
/* Priorities 0 = highest, XX = Lowest */
/* Number Name Priority User Priv HPriv */
/* 0x00 */ { T( legion_save_state ), Pri( 0, 0), H, H, H },
/* 0x01 */ { T( power_on_reset ), Pri( 0, 0), H, H, H },
/* 0x02 */ { T( watchdog_reset ), Pri( 1, 0), H, H, H },
/* 0x03 */ { T( externally_initiated_reset ), Pri( 1, 0), H, H, H },
/* 0x04 */ { T( software_initiated_reset ), Pri( 1, 0), H, H, H },
/* 0x05 */ { T( RED_state_exception ), Pri( 1, 0), H, H, H },
/* 0x08 */ { T( instruction_access_exception ), Pri( 5, 0), H, H, X },
/* 0x09 */ { T( instruction_access_MMU_miss ), Pri( 2,16), SW, SW, SW },
/* 0x0a */ { T( instruction_access_error ), Pri( 3, 0), H, H, H },
/* 0x10 */ { T( illegal_instruction ), Pri( 7, 0), H, H, H },
/* 0x11 */ { T( privileged_opcode ), Pri( 6, 0), P, X, X },
/* LDD and STD are in fact implemented by niagara */
/* 0x12 */ { T( unimplemented_LDD ), Pri( 6, 0), X, X, X }, /* error if received by hypervisor. */
/* 0x13 */ { T( unimplemented_STD ), Pri( 6, 0), X, X, X }, /* error if received by hypervisor. */
/* 0x20 */ { T( fp_disabled ), Pri( 8, 0), P, P, UH }, /* error if received by hypervisor. */
/* 0x21 */ { T( fp_exception_ieee_754 ), Pri(11, 0), P, P, UH }, /* error if received by hypervisor. */
/* 0x22 */ { T( fp_exception_other ), Pri(11, 0), P, P, UH }, /* error if received by hypervisor. */
/* 0x23 */ { T( tag_overflow ), Pri(14, 0), P, P, UH }, /* error if received by hypervisor. */
/* 0x24 */ { T( clean_window ), Pri(10, 0), P, P, UH }, /* error if received by hypervisor - windows not used. */
/* 0x28 */ { T( division_by_zero ), Pri(15, 0), P, P, UH }, /* error if received by hypervisor. */
/* 0x29 */ { T( internal_processor_error ), Pri( 4, 0), H, H, H }, /* generated by register parity errors */
/* 0x30 */ { T( data_access_exception ), Pri(12, 0), H, H, UH }, /* error if received by hypervisor - MMU not used. */
/* 0x31 */ { T( data_access_MMU_miss ), Pri(12, 0), SW, SW, SW }, /* Should not be generated by hardware */
/* 0x32 */ { T( data_access_error ), Pri(12, 0), H, H, H }, /* handle error and generate report to appropriate supervisor. */
/* 0x33 */ { T( data_access_protection ), Pri(12, 0), H, H, H }, /* error if received by hypervisor - MMU not used. */
/* 0x34 */ { T( mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */
/* 0x35 */ { T( LDDF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */
/* 0x36 */ { T( STDF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */
/* 0x37 */ { T( privileged_action ), Pri(11, 0), H, X, X }, /* error if received from hypervisor. */
/* 0x38 */ { T( LDQF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */
/* 0x39 */ { T( STQF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */
/* 0x3e */ { T( instruction_real_translation_miss ), Pri(2, 0), H, H, H }, /* real to pa entry not found in ITLB */
/* 0x3f */ { T( data_real_translation_miss ), Pri(12, 0), H, H, H }, /* real to pa entry not found in DTLB */
/* this one ever generated ? */
/* 0x40 */ { T( async_data_error ), Pri( 2, 0), H, H, H }, /* remap to sun4v error report */
/* 0x41 */ { T( interrupt_level_1 ), Pri(31, 0), P, P, X },
/* 0x42 */ { T( interrupt_level_2 ), Pri(30, 0), P, P, X },
/* 0x43 */ { T( interrupt_level_3 ), Pri(29, 0), P, P, X },
/* 0x44 */ { T( interrupt_level_4 ), Pri(28, 0), P, P, X },
/* 0x45 */ { T( interrupt_level_5 ), Pri(27, 0), P, P, X },
/* 0x46 */ { T( interrupt_level_6 ), Pri(26, 0), P, P, X },
/* 0x47 */ { T( interrupt_level_7 ), Pri(25, 0), P, P, X },
/* 0x48 */ { T( interrupt_level_8 ), Pri(24, 0), P, P, X },
/* 0x49 */ { T( interrupt_level_9 ), Pri(23, 0), P, P, X },
/* 0x4a */ { T( interrupt_level_a ), Pri(22, 0), P, P, X },
/* 0x4b */ { T( interrupt_level_b ), Pri(21, 0), P, P, X },
/* 0x4c */ { T( interrupt_level_c ), Pri(20, 0), P, P, X },
/* 0x4d */ { T( interrupt_level_d ), Pri(19, 0), P, P, X },
/* 0x4e */ { T( interrupt_level_e ), Pri(18, 0), P, P, X },
/* 0x4f */ { T( interrupt_level_f ), Pri(17, 0), P, P, X },
/* 0x5e */ { T( hstick_match ), Pri( 2, 0), H, H, H },
/* 0x5f */ { T( trap_level_zero ), Pri( 2, 8), H, H, X }, /* This trap requires TL==0, priv==1 and hpriv==0 */
/* 0x60 */ { T( interrupt_vector_trap ), Pri(16, 0), H, H, H }, /* handle & remap to sun4v as appropriate mondo queue */
/* 0x61 */ { T( RA_watchpoint ), Pri(12, 0), SW, SW, SW }, /* not used by hypervisor, so error if received from hypervisor. */
/* 0x62 */ { T( VA_watchpoint ), Pri(11, 0), P, P, X }, /* error - VA watchpoints should be pended if hpriv=1 */
/* 0x63 */ { T( ECC_error ), Pri(33, 0), H, H, H }, /* handle & create sun4v error report(s) */
/* 0x64 */ { T( fast_instruction_access_MMU_miss ), Pri( 2,24), H, H, H }, /* handle & proper TSB check. */
/* 0x68 */ { T( fast_data_access_MMU_miss ), Pri(12, 0), H, H, H }, /* handle & proper TSB check. */
/* 0x6c */ { T( fast_data_access_protection ), Pri(12, 0), H, H, H }, /* handle & proper TSB check. */
/* 0x74 */ { TN1( modular_arithmetic ), Pri(16, 1), H, H, H },
/* 0x76 */ { T( instruction_breakpoint ), Pri(7, 1), H, H, H },
/* 0x78 */ { TN1( data_error ), Pri(13, 0), H, H, H },
/* 0x7c */ { T( cpu_mondo_trap ), Pri(16, 2), P, P, X },
/* 0x7d */ { T( dev_mondo_trap ), Pri(16, 3), P, P, X },
/* 0x7e */ { T( resumable_error ), Pri(33, 0), P, P, X },
/* faked by the hypervisor */
/* 0x7f */ { T( nonresumable_error ), Pri( 4, 0), SW, SW, SW },
/* 0x80 */ { T( spill_0_normal ), Pri( 9, 0), P, P, UH },
/* 0x84 */ { T( spill_1_normal ), Pri( 9, 0), P, P, UH },
/* 0x88 */ { T( spill_2_normal ), Pri( 9, 0), P, P, UH },
/* 0x8c */ { T( spill_3_normal ), Pri( 9, 0), P, P, UH },
/* 0x90 */ { T( spill_4_normal ), Pri( 9, 0), P, P, UH },
/* 0x94 */ { T( spill_5_normal ), Pri( 9, 0), P, P, UH },
/* 0x98 */ { T( spill_6_normal ), Pri( 9, 0), P, P, UH },
/* 0x9c */ { T( spill_7_normal ), Pri( 9, 0), P, P, UH },
/* 0xa0 */ { T( spill_0_other ), Pri( 9, 0), P, P, UH },
/* 0xa4 */ { T( spill_1_other ), Pri( 9, 0), P, P, UH },
/* 0xa8 */ { T( spill_2_other ), Pri( 9, 0), P, P, UH },
/* 0xac */ { T( spill_3_other ), Pri( 9, 0), P, P, UH },
/* 0xb0 */ { T( spill_4_other ), Pri( 9, 0), P, P, UH },
/* 0xb4 */ { T( spill_5_other ), Pri( 9, 0), P, P, UH },
/* 0xb8 */ { T( spill_6_other ), Pri( 9, 0), P, P, UH },
/* 0xbc */ { T( spill_7_other ), Pri( 9, 0), P, P, UH },
/* 0xc0 */ { T( fill_0_normal ), Pri( 9, 0), P, P, UH },
/* 0xc4 */ { T( fill_1_normal ), Pri( 9, 0), P, P, UH },
/* 0xc8 */ { T( fill_2_normal ), Pri( 9, 0), P, P, UH },
/* 0xcc */ { T( fill_3_normal ), Pri( 9, 0), P, P, UH },
/* 0xd0 */ { T( fill_4_normal ), Pri( 9, 0), P, P, UH },
/* 0xd4 */ { T( fill_5_normal ), Pri( 9, 0), P, P, UH },
/* 0xd8 */ { T( fill_6_normal ), Pri( 9, 0), P, P, UH },
/* 0xdc */ { T( fill_7_normal ), Pri( 9, 0), P, P, UH },
/* 0xe0 */ { T( fill_0_other ), Pri( 9, 0), P, P, UH },
/* 0xe4 */ { T( fill_1_other ), Pri( 9, 0), P, P, UH },
/* 0xe8 */ { T( fill_2_other ), Pri( 9, 0), P, P, UH },
/* 0xec */ { T( fill_3_other ), Pri( 9, 0), P, P, UH },
/* 0xf0 */ { T( fill_4_other ), Pri( 9, 0), P, P, UH },
/* 0xf4 */ { T( fill_5_other ), Pri( 9, 0), P, P, UH },
/* 0xf8 */ { T( fill_6_other ), Pri( 9, 0), P, P, UH },
/* 0xfc */ { T( fill_7_other ), Pri( 9, 0), P, P, UH },
/*0x100-0x17f*/{T( trap_instruction ), Pri(16,32), P, P, H }, /* hv1: handles hypervisor traps only. Error if received from hypervisor. */
/*0x180-0x1ff*/{T( htrap_instruction ), Pri(16,32), X, H, UH }, /* used to implement the supervisor to hypervisor API call. */
#undef T
#undef TN1
#undef TN2
#undef TRK
#undef X
#undef SW
#undef P
#undef H
#undef UH
#undef Pri
{ -1, (char*)0 },
};
uint_t i;
for (i=0; setup_list[i].trap_type != -1; i++) {
ASSERT( setup_list[i].trap_type>=SS_trap_legion_save_state && setup_list[i].trap_type<SS_trap_illegal_value );
ss_trap_list[ setup_list[i].trap_type ] = setup_list[i];
}
/* Now clone the trap instruction entries */
for (i=0x101; i<0x180; i++) {
ss_trap_list[ i ] = ss_trap_list[ 0x100 ];
ss_trap_list[ i ].trap_type = i;
}
for (i=0x181; i<0x200; i++) {
ss_trap_list[ i ] = ss_trap_list[ 0x180 ];
ss_trap_list[ i ].trap_type = i;
}
}
extern struct fpsim_functions fpsim_funclist;
proc_type_t proc_type_niagara={
"niagara",
false, /* module initialised */
niagara_init_proc_type,
/* config support */
ss_parse,
ss_init,
ss_dump,
/* execution support functions */
ss_dbgr_regread,
ss_dbgr_regwrite,
ss_exec_setup,
ss_exec_cleanup,
ss_save_state,
ss_check_async_event,
ss_take_exception,
#if ERROR_INJECTION
ss_error_condition,
#endif
#if ERROR_TRAP_GEN /* { */
trigger_error_trap,
ss_error_reload_file,
ss_error_dump_active,
ss_error_dump_supported,
#endif /* } */
n1_sp_interrupt,
niagara_decode_me,
/* pointer to fpsim instructions */
&fpsim_funclist,
/* performance measuring funcs */
sparcv9_perf_dump,
/* dump tlb, instruction counts etc */
ss_dump_tlbs,
ss_dump_instruction_counts,
/* external interface methods */
ss_ext_signal,
ss_get_cpuid,
niagara_get_pseudo_dev,
ss_dev_mem_access,
/* debugger interface methods */
ss_dbgr_attach,
ss_dbgr_detach,
ss_dbgr_mem_read,
ss_dbgr_mem_write,
ss_dbgr_mem_clear,
ss_dbgr_set_break,
ss_dbgr_clear_break,
niagara_domain_check,
sparcv9_reg_map,
NULL, /* debug_hookp */
NULL, /* debug_hook_dumpp */
CPU_MAGIC
};
/*
* Niagara uses registers located at magic addresses in its physical address
* space to control functional units placed outside the direct processor core.
* We emulate these with pseudo devices that are created implicitly when a Niagara
* is declared.
* To support this we have a number of device and function definitions below.
*/
static void ss_clock_init(config_dev_t *);
static void ss_dram_ctl_init(config_dev_t *);
static void ss_iob_init(config_dev_t *);
static void ss_jbi_init(config_dev_t *);
static void ss_jbus_init(config_dev_t *);
static void ss_l2_ctl_init(config_dev_t *);
static void ss_ssi_init(config_dev_t *);
static bool_t ss_clock_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t ss_dram_ctl_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp);
static bool_t ss_iob_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t ss_jbi_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t ss_jbus_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t ss_l2_ctl_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp);
static bool_t ss_ssi_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static dev_type_t dev_type_ss_clock = {
"ss_clock",
NULL, /* parse */
ss_clock_init,
NULL, /* dump */
generic_device_non_cacheable,
ss_clock_access,
DEV_MAGIC
};
static dev_type_t dev_type_ss_dram_ctl = {
"ss_memory_ctl",
NULL, /* parse */
ss_dram_ctl_init,
NULL, /* dump */
generic_device_non_cacheable,
ss_dram_ctl_access,
DEV_MAGIC
};
static dev_type_t dev_type_ss_l2_ctl = {
"ss_l2_ctl",
NULL, /* parse */
ss_l2_ctl_init,
NULL, /* dump */
generic_device_non_cacheable,
ss_l2_ctl_access,
DEV_MAGIC
};
static dev_type_t dev_type_ss_iob = {
"ss_iob",
NULL, /* parse */
ss_iob_init,
NULL, /* dump */
generic_device_non_cacheable,
ss_iob_access,
DEV_MAGIC
};
static dev_type_t dev_type_ss_jbi = {
"ss_jbi",
NULL, /* parse */
ss_jbi_init,
NULL, /* dump */
generic_device_non_cacheable,
ss_jbi_access,
DEV_MAGIC
};
static dev_type_t dev_type_ss_jbus = {
"ss_jbus",
NULL, /* parse */
ss_jbus_init,
NULL, /* dump */
generic_device_non_cacheable,
ss_jbus_access,
DEV_MAGIC
};
static dev_type_t dev_type_ss_ssi = {
"ss_ssi",
NULL, /* parse */
ss_ssi_init,
NULL, /* dump */
generic_device_non_cacheable,
ss_ssi_access,
DEV_MAGIC
};
/*
* Perform any processor specific parsing for "proc" elements in
* Legion config file. Returns true if token was handled by this
* function, false otherwise.
*/
bool_t
ss_parse_proc_entry(ss_proc_t *procp, domain_t *domainp)
{
if (streq(lex.strp,"rust_jbi_stores")) {
procp->rust_jbi_stores = true;
lex_get(T_S_Colon);
} else {
/* Didn't match any Niagara specific element */
return false;
}
/* Handled some Niagara specific element */
return true;
}
/*
* Set up the pseudo physical devices specific for N1.
*/
void ss_setup_pseudo_devs(domain_t * domainp, ss_proc_t *procp)
{
config_dev_t *pd, *overlapp;
/*
* Clock Unit
*/
procp->clockp = Xcalloc(1, ss_clock_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ss_clock;
pd->devp = (void*)procp;
procp->clock_devp = pd;
insert_domain_address(domainp, pd, 0x9600000000LL,
0x9600000000LL+0x100000000LL);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* Memory banks
* FIXME: for the moment is this fixed at 4 - need to adjust with a variable
* FIXME: Should the allocation of mbankp be in ss_init instead ?
*/
procp->num_mbanks = 4;
procp->mbankp = Xcalloc(procp->num_mbanks, ss_dram_bank_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ss_dram_ctl;
pd->devp = (void*)procp;
procp->dram_ctl_devp = pd;
insert_domain_address(domainp, pd, 0x9700000000LL,
0x9700000000LL+4096LL*(uint64_t)procp->num_mbanks);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* IOB
*/
procp->iobp = Xcalloc(1, ss_iob_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ss_iob;
pd->devp = (void*)procp;
procp->iob_devp = pd;
insert_domain_address(domainp, pd, 0x9800000000LL,
0x9800000000LL+0x100000000LL);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* JBI
*/
procp->jbip = Xcalloc(1, ss_jbi_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ss_jbi;
pd->devp = (void*)procp;
procp->jbi_devp = pd;
insert_domain_address(domainp, pd, 0x8000000000LL,
0x8000000000LL+0x100000000LL);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* JBUS
*/
procp->jbusp = Xcalloc(1, ss_jbus_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ss_jbus;
pd->devp = (void*)procp;
procp->jbus_devp = pd;
insert_domain_address(domainp, pd, 0x9f00000000LL,
0x9f00000000LL+0x100000000LL);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* L2 Cache banks
*/
procp->num_l2banks = L2_BANKS;
procp->l2p = Xcalloc(1, ss_l2_cache_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ss_l2_ctl;
pd->devp = (void*)procp;
procp->l2_ctl_devp = pd;
insert_domain_address(domainp, pd, 0xA000000000LL,
0xA000000000LL+0x1F00000000LL);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* SSI
*/
procp->ssip = Xcalloc(1, ss_ssi_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ss_ssi;
pd->devp = (void*)procp;
procp->ssi_devp = pd;
insert_domain_address(domainp, pd, 0xff00000000LL,
0xff00000000LL+0x10000000LL);
}
/*
* Basic module init
*
* Returns false if error initialising module, true if init was OK
*/
bool_t niagara_init_proc_type(proc_type_t * proctp)
{
if (proctp->flag_initialised) {
warning("Initialisation of module %s more than once - bailing", proctp->proc_type_namep);
return true;
}
/* stuff here we only need to do once if we want to use this module */
niagara_init_trap_list();
proctp->flag_initialised = true;
return true;
}
/*
* We arrive here because:
* 1) a malformed (unaligned PC)
* 2) a TLB / icache miss
* 3) an x-cache miss
*/
void ss_xic_miss(simcpu_t * sp, xicache_line_t * xc_linep, tvaddr_t pc)
{
tvaddr_t va, tag;
tpaddr_t pa, pa_tag;
config_addr_t * cap;
tpaddr_t extent;
uint8_t * bufp;
sparcv9_cpu_t * v9p;
ss_strand_t * nsp;
ss_proc_t * npp;
uint_t context, bank;
error_conf_t * ep;
error_t * errorp;
v9p = (sparcv9_cpu_t *)(sp->specificp);
nsp = v9p->impl_specificp;
npp = sp->config_procp->procp;
#if ERROR_INJECTION
errorp = sp->errorp;
#endif
/* FIXME: need a current context variable, not a test here */
context = (v9p->tl>0) ? SS_NUCLEUS_CONTEXT : nsp->pri_context;
/* Quick check then for v9 bus error */
/* The PC always has bits 0 & 1 zero */
ASSERT((pc & 0x3) == 0);
/* align the pc to the start of the XC line */
va = pc;
tag = va & XICACHE_TAG_PURE_MASK;
/*
* Perform a virtual to physical translation
* so we can determine if we are dealing with
* a TLB miss or simply an x-cache miss.
*/
/* Find the pa corresponding to the line we need */
/* We assume that for SunSPARC, the TLB is off in Hyper priv mode */
/* FIXME: we should probably do this by swizzling a function pointer */
/* for this when we change mode, rather that having an if here ... fix later */
pa_tag = tag;
if (v9p->pstate.addr_mask) {
pc &= MASK64(31,0);
pa_tag &= MASK64(31,0);
va &= MASK64(31,0);
/* NOTE: we dont mask tag ... we allow that to match the 64bit address */
}
pa = va;
if (!nsp->mmu_bypass) {
uint_t idx, partid;
ss_tlb_t * tlbp;
tlb_entry_t * tep;
uint_t flags;
ss_trap_type_t miss_trap_type;
uint_t miss_context;
/* If MMU disabled, but we're in priv/user mode use real addresses */
if (!nsp->immu.enabled) {
context = SS_TLB_REAL_CONTEXT;
}
/*
* check out of range address (if lie within the "VA hole")
*/
if ((va >= SS_VA_HOLE_LB) && (va <= SS_VA_HOLE_UB)) {
niagara_set_sfsr(sp, &nsp->immu, va,
MMU_SFSR_FT_VARANGE, (v9p->tl>0) ?
ss_ctx_nucleus : ss_ctx_primary, 0/*fixme*/, 0, 0);
v9p->post_precise_trap(sp, (sparcv9_trap_type_t)
SS_trap_instruction_access_exception);
return;
}
tlbp = nsp->itlbp;
RW_rdlock(&tlbp->rwlock);
partid = nsp->partid;
/* FIXME: Need a better hash than this ! */
idx = va >> SS_MAX_PAGE_SIZE_BITS;
idx += context + partid;
idx &= SS_TLB_HASH_MASK;
/*
* So we search for a matching page using the info we have in the
* hash - while another thread might possibly be removing or
* inserting an entry into the same table.
*/
for ( tep = tlbp->hash[idx].ptr; tep!=(tlb_entry_t*)0; tep = tep->nextp ) {
/* try and match the entry as appropriate */
if (((tep->tag_pfn ^ va)>>tep->match_shift)==0 && tep->match_context==context && tep->partid == partid) goto tlb_match;
}
RW_unlock(&tlbp->rwlock);
DBGMISS( lprintf(sp->gid, "itlb miss: pc=%lx va=%lx ctx=%x\n", pc, va, context); );
/*
* If the MMU is "disabled" in privileged mode ... this is a real miss, not a
* virtual translation miss, so the fault context and trap type is different
*/
if (nsp->immu.enabled) {
miss_context = context;
miss_trap_type = SS_trap_fast_instruction_access_MMU_miss;
} else {
miss_context = 0; /* null for ra->pa miss undefined ? */
miss_trap_type = SS_trap_instruction_real_translation_miss;
}
VA48_WARNING(sp, va);
SET_ITLB_FAULT( nsp, va );
nsp->immu.tag_access_reg = (va & ~MASK64(12,0)) | miss_context; /* FIXME: - do properly later */
DBGMMU( lprintf(sp->gid, "IMMU tag access = 0x%llx\n", nsp->immu.tag_access_reg); );
MEMORY_ACCESS_TRAP();
v9p->post_precise_trap(sp, (sparcv9_trap_type_t)miss_trap_type);
return;
tlb_match:;
flags = tep->flags;
pa += tep->pa_offset;
pa_tag += tep->pa_offset;
RW_unlock(&tlbp->rwlock);
/*
* Errors on itlb hit: stash table_entry pointer and if
* subsequent itlb hit on same entry post error again.
*/
#if ERROR_INJECTION
if (sp->error_check && (ep = find_errconf(sp, IFETCH, IMDU))) {
if (errorp->itep) {
DBGERR( lprintf(sp->gid, "ss_xic_miss(): "
" errorp->itep=%x, tep=%x\n", errorp->itep, tep); );
if ((tlb_entry_t *)errorp->itep == tep) {
ss_error_condition(sp, ep);
return;
}
} else {
errorp->itep = tep;
ss_error_condition(sp, ep);
return;
}
}
#endif
/*
* privilege test
*/
if ( (flags & SS_TLB_FLAG_PRIV) && v9p->state == V9_User) {
VA48_WARNING(sp, va);
SET_ITLB_FAULT( nsp, va );
nsp->immu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | context; /* FIXME: - do properly later */
DBGMMU( lprintf(sp->gid, "priv mapping, state==user: IMMU tag access = 0x%llx\n", nsp->immu.tag_access_reg); );
MEMORY_ACCESS_TRAP();
niagara_set_sfsr(sp, &nsp->immu, va, MMU_SFSR_FT_PRIV, (v9p->tl>0) ? ss_ctx_nucleus : ss_ctx_primary, 0/*fixme*/, 0, 0);
v9p->post_precise_trap(sp, (sparcv9_trap_type_t)SS_trap_instruction_access_exception);
return;
}
/* Niagara has no EXEC permission check for I fetches */
} else {
/* Niagara only implements 40 bits of PA, the tlb code
masks PA so here we need to mask bypass PAs */
pa &= MASK64(39,0);
}
/*
* Now that we have the internal PA, map it to the real
* external PA before looking it up in the domain.
* This does not modify memory addresses, only JBus addresses.
*/
if (pa >= 0x800e000000ull && pa < 0x8010000000ull) {
pa &= 0xffffffffull;
pa |= 0x40000000000ull;
} else if (pa >= 0x8010000000ull && pa < 0x8100000000ull) {
pa &= 0xffffffffull;
pa |= 0x60000000000ull;
} else if (pa >= 0xc000000000ull && pa < 0xff00000000ull) {
pa |= 0x70000000000ull;
}
/*
* OK - now go get the instructions to fill in the xc-line
* ... start by finding the device that has the
* memory we need.
* optimise: by guessing at the last device found.
*
*/
/* now find the device - looking in the cache first */
cap = sp->xic_miss_addrp;
if (!(cap && (cap->baseaddr <= pa) && (pa < cap->topaddr))) {
domain_t * domainp;
config_proc_t * config_procp;
config_procp = sp->config_procp;
domainp = config_procp->domainp;
cap = find_domain_address(domainp, pa);
if (cap == NULL) {
/* OK it's a bus error there was no backing store */
fatal("bus error - instruction fetch from pc=0x%llx "
"(cacheline va=0x%llx -> physical 0x%llx)", pc, va, pa); /* FIXME */
}
sp->xic_miss_addrp = cap; /* cache for next time */
}
/* try and get the buffer pointer */
extent = cap->config_devp->dev_typep->dev_cacheable(cap, DA_Instn, pa_tag-cap->baseaddr, &bufp);
if (extent < XICACHE_LINE_SIZE) {
/* bus error again ? or fill from multiple devices ? */
fatal("fix bus error 2");
/* FIXME */
}
/*
* Errors on ifetch to icache or L2 cache
* Make sure the L2 cache is enabled
*/
#if ERROR_INJECTION
if (sp->error_check == true && errorp->check_xicache) {
DBGERR( lprintf(sp->gid, "ss_xic_miss(): ifetch cache hit\n"); );
ep = find_errconf(sp, IFETCH, ITC|IDC|LDAC|LDAU|DAC|DAU);
if (ep)
switch(ep->type) {
case ITC:
case IDC:
errorp->addr = pa;
ss_error_condition(sp, ep);
break;
case LDAC:
case LDAU:
for (bank=0; bank<npp->num_l2banks; bank++) {
if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled;
}
errorp->addr = pa;
ss_error_condition(sp, ep);
break;
case DAC:
for (bank=0; bank<npp->num_l2banks; bank++) {
if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled;
}
errorp->addr = pa;
ss_error_condition(sp, ep);
break;
case DAU:
for (bank=0; bank<npp->num_l2banks; bank++) {
if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled;
}
errorp->addr = pa;
ss_error_condition(sp, ep);
break;
l2_disabled: DBGERR( lprintf(sp->gid, "ss_xic_miss: No LDAC/LDAU Error"
" - L2 disabled\n"); );
break;
default:
break;
}
}
#endif
xc_linep->tag = tag | sp->tagstate;
xc_linep->memoryoffset = ((uint64_t)bufp)-tag;
/*
* FIXME: If breakpoints are in use make sure we really clear the decoded line
* to ensure that we dont get instruction aliasing. XI-cache prob. needs a re-design
* from this standpoint - but this will wait until we complete the JIT version.
* Until then this is a reminder and a place holder.
*/
if (sp->bp_infop) xicache_clobber_line_decodes(sp, tag);
#if 0 /* { */
xicache_line_fill_risc4(sp, xc_linep, tag, bufp);
#endif /* } */
}
/*
* This is not the worlds most efficient routine, but then we assume that ASI's are
* not frequently occurring memory access types - we may have to fast path the
* ASI_AS_IF_USER_PRIMARY etc. some how if used frequently by kernel b-copy.
*/
void
ss_asi_access(simcpu_t * sp, maccess_t op, uint_t regnum, uint_t asi,
uint64_t reg1, uint64_t reg2, asi_flag_t asi_flag)
{
sparcv9_cpu_t * v9p;
ss_strand_t * nsp;
ss_proc_t *npp;
uint64_t val;
ss_tsb_info_t * tsbinfop, * tsbinfop1;
ss_mmu_t * mmup;
ss_tlb_t * tlbp;
bool_t is_load;
uint_t size, mask;
uint_t context_type, idx;
tvaddr_t addr;
mem_flags_t mflags;
bool_t is_real;
sparcv9_trap_type_t tt;
error_conf_t * ep;
v9p = (sparcv9_cpu_t *)(sp->specificp);
nsp = v9p->impl_specificp;
npp = (ss_proc_t *)(sp->config_procp->procp);
ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]);
if (asi == V9_ASI_IMPLICIT)
goto no_asi_valid_checks;
/*
* First check if this is a legitimate ASI based
* on current privilege level.
*/
switch( v9p->state ) {
case V9_User:
ASSERT( !v9p->pstate.priv && !v9p->hpstate.hpriv );
if (asi<0x80) {
addr = ((op & MA_Op_Mask) == MA_CAS) ?
reg1 : (reg1 + reg2);
niagara_set_sfsr(sp, &nsp->dmmu, addr, MMU_SFSR_FT_ASI, ss_ctx_nucleus/*checkme*/, asi, 0/*fixme*/, 0);
v9p->post_precise_trap(sp, Sparcv9_trap_privileged_action);
return;
}
break;
case V9_Priv:
ASSERT( v9p->pstate.priv && !v9p->hpstate.hpriv );
if (asi>=0x30 && asi<0x80) {
/* ASIs reserved for hpriv mode appear to priv mode as data access exceptions */
MEMORY_ACCESS_TRAP();
addr = ((op & MA_Op_Mask) == MA_CAS) ?
reg1 : (reg1 + reg2);
niagara_set_sfsr(sp, &nsp->dmmu, addr, MMU_SFSR_FT_ASI, ss_ctx_nucleus/*checkme*/, asi, 0/*fixme*/, 0);
v9p->post_precise_trap(sp, Sparcv9_trap_data_access_exception);
return;
}
break;
case V9_HyperPriv:
ASSERT( v9p->hpstate.hpriv );
break;
case V9_RED:
ASSERT( v9p->hpstate.red );
break;
default:
abort();
}
no_asi_valid_checks:;
/*
* Next pull out all the memory access ASIs ...
*/
mflags = (V9_User != v9p->state) ? MF_Has_Priv : 0;
context_type = ss_ctx_reserved;
mask = (1<<(op & MA_Size_Mask))-1;
switch(asi) {
case V9_ASI_IMPLICIT:
if (v9p->tl > 0) {
asi = (v9p->pstate.cle) ? SS_ASI_NUCLEUS_LITTLE : SS_ASI_NUCLEUS;
goto ss_asi_nucleus;
}
asi = (v9p->pstate.cle) ? SS_ASI_PRIMARY_LITTLE : SS_ASI_PRIMARY;
goto ss_asi_primary;
case SS_ASI_NUCLEUS_LITTLE:
case SS_ASI_NUCLEUS:
ss_asi_nucleus:;
asi_nuc:;
context_type = ss_ctx_nucleus;
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access;
goto memory_access;
case SS_ASI_PRIMARY_NO_FAULT_LITTLE:
case SS_ASI_PRIMARY_NO_FAULT:
if (IS_V9_MA_STORE(op & MA_Op_Mask))
goto data_access_exception;
mflags |= MF_No_Fault;
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
goto asi_prim;
case SS_ASI_AS_IF_USER_PRIMARY_LITTLE:
case SS_ASI_AS_IF_USER_PRIMARY:
mflags &= ~MF_Has_Priv;
goto asi_prim;
case SS_ASI_PRIMARY_LITTLE: /* (88) RW Implicit Primary Address space (LE) */
case SS_ASI_PRIMARY: /* (80) RW Implicit Primary Address space */
ss_asi_primary:;
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
asi_prim:;
if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access;
context_type = ss_ctx_primary;
goto memory_access;
case SS_ASI_SECONDARY_NO_FAULT_LITTLE:
case SS_ASI_SECONDARY_NO_FAULT:
if (IS_V9_MA_STORE(op & MA_Op_Mask))
goto data_access_exception;
mflags |= MF_No_Fault;
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
goto asi_sec;
case SS_ASI_AS_IF_USER_SECONDARY_LITTLE:
case SS_ASI_AS_IF_USER_SECONDARY:
mflags &= ~MF_Has_Priv;
goto asi_sec;
case SS_ASI_SECONDARY_LITTLE:
case SS_ASI_SECONDARY:
asi_sec:;
if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access;
context_type = ss_ctx_secondary;
goto memory_access;
case SS_ASI_REAL_IO_LITTLE: /* (1D) RW Same as ASI_PHYS_USE_EC_LITTLE for memory
addresses. For IO addresses, physical address,
non-cacheable, with side-effect (LE) */
case SS_ASI_REAL_IO: /* (15) RW Same as ASI_PHYS_USE_EC for memory addresses.
For IO addresses, physical address, non-cacheable,
with side-effect */
mflags |= MF_IO_Access;
mflags |= MF_TLB_Real_Ctx;
context_type = ss_ctx_nucleus;
goto memory_access;
case SS_ASI_REAL_MEM_LITTLE: /* (1C) RW physical address, non-allocating in L1 cache */
case SS_ASI_REAL_MEM: /* (14) RW physical address, non-allocating in L1 cache */
mflags |= MF_TLB_Real_Ctx;
if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access;
context_type = ss_ctx_nucleus;
goto memory_access;
case SS_ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE: /* RW 64B block load/store, primary address space, user privilege (LE) */
case SS_ASI_BLOCK_AS_IF_USER_PRIMARY: /* RW 64B block load/store, primary address space, user privilege */
mflags &= ~MF_Has_Priv;
goto asi_blk_prim;
case SS_ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE: /* RW 64B block load/store, secondary address space, user privilege (LE) */
case SS_ASI_BLOCK_AS_IF_USER_SECONDARY: /* RW 64B block load/store, secondary address space, user privilege */
mflags &= ~MF_Has_Priv;
goto asi_blk_sec;
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P_LITTLE: /* Block initializing store/128b atomic LDDA, primary address, user priv (LE) */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P: /* Block initializing store/128b atomic LDDA, primary address, user privilege */
mflags &= ~MF_Has_Priv;
goto blk_init_prim;
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S_LITTLE: /* Block initializing store, secondary address, user privilege (LE) */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S: /* Block initializing store/128b atomic LDDA, secondary address, user privilege */
mflags &= ~MF_Has_Priv;
goto blk_init_sec;
case SS_ASI_QUAD_LDD_LITTLE: /* 128b atomic LDDA (LE) */
case SS_ASI_QUAD_LDD: /* 128b atomic LDDA */
/* This ASI must be used with an LDDA instruction */
if (MA_lddu64 != op) {
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
/* Adjust size to 128bytes so alignment is correct */
op = MA_lddu128;
mask = (1<<(op & MA_Size_Mask))-1;
mflags |= MF_Atomic_Access;
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
context_type = ss_ctx_nucleus;
goto memory_access;
case SS_ASI_QUAD_LDD_REAL_LITTLE: /* 128b atomic LDDA, real address (LE) */
case SS_ASI_QUAD_LDD_REAL: /* 128b atomic LDDA, real address */
/* This ASI must be used with an LDDA instruction */
if (MA_lddu64 != op) {
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
/* Adjust size to 128bytes so alignment is correct */
op = MA_lddu128;
mask = (1<<(op & MA_Size_Mask))-1;
mflags |= MF_Atomic_Access;
mflags |= MF_TLB_Real_Ctx;
context_type = ss_ctx_nucleus;
goto memory_access;
case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD_LITTLE: /* Block initializing store/128b atomic LDDA (LE) */
case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD: /* Block initializing store/128b atomic LDDA */
if (MA_lddu64 == op) {
op = MA_lddu128;
mask = (1<<(op & MA_Size_Mask))-1;
mflags |= MF_Atomic_Access;
goto asi_nuc;
} else
if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) {
/* block init effect */
addr = ((op & MA_Op_Mask) == MA_CAS) ?
reg1 : (reg1 + reg2);
if ((addr & 0x3f) == 0)
mflags |= MF_Blk_Init;
goto asi_nuc;
}
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
case SS_ASI_QUAD_LDD_PHYS_LITTLE: /* 128b atomic LDDA, physical address (LE) */
case SS_ASI_QUAD_LDD_PHYS: /* N1 PRM rev 1.4: any type of access causes data_access_exception */
goto data_access_exception;
case SS_ASI_BLK_INIT_ST_QUAD_LDD_P_LITTLE: /* Block initializing store/128b atomic LDDA, primary address (LE) */
case SS_ASI_BLK_INIT_ST_QUAD_LDD_P: /* Block initializing store/128b atomic LDDA, primary address */
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
blk_init_prim:;
if (MA_lddu64 == op) {
op = MA_lddu128;
mask = (1<<(op & MA_Size_Mask))-1;
mflags |= MF_Atomic_Access;
goto asi_prim;
} else
if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) {
/* block init effect */
addr = ((op & MA_Op_Mask) == MA_CAS) ?
reg1 : (reg1 + reg2);
if ((addr & 0x3f) == 0)
mflags |= MF_Blk_Init;
goto asi_prim;
}
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
case SS_ASI_BLK_INIT_ST_QUAD_LDD_S_LITTLE: /* Block initializing store/128b atomic LDDA, secondary address (LE) */
case SS_ASI_BLK_INIT_ST_QUAD_LDD_S: /* Block initializing store/128b atomic LDDA, secondary address */
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
blk_init_sec:;
if (MA_lddu64 == op) {
op = MA_lddu128;
mask = (1<<(op & MA_Size_Mask))-1;
mflags |= MF_Atomic_Access;
goto asi_sec;
} else
if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) {
/* block init effect */
addr = ((op & MA_Op_Mask) == MA_CAS) ?
reg1 : (reg1 + reg2);
if ((addr & 0x3f) == 0)
mflags |= MF_Blk_Init;
goto asi_sec;
}
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
case SS_ASI_BLK_PL: /* 64B block load/store, primary address (LE) */
case SS_ASI_BLK_P: /* 64B block load/store, primary address */
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
asi_blk_prim:;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64 == op || MA_stfp64 == op) ||
((regnum & 0xf) != 0)) {
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
op = (MA_ldfp64 == op) ? (MA_Size512 | MA_LdFloat) :
(MA_Size512 | MA_StFloat);
mask = (1<<(op & MA_Size_Mask))-1;
mflags |= MF_Atomic_Access;
goto asi_prim;
case SS_ASI_BLK_SL: /* 64B block load/store, secondary address (LE) */
case SS_ASI_BLK_S: /* 64B block load/store, secondary address */
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
asi_blk_sec:;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64 == op || MA_stfp64 == op) ||
((regnum & 0xf) != 0)) {
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
op = (MA_ldfp64 == op) ? (MA_Size512 | MA_LdFloat) :
(MA_Size512 | MA_StFloat);
mask = (1<<(op & MA_Size_Mask))-1;
mflags |= MF_Atomic_Access;
goto asi_sec;
case SS_ASI_PST8_PL:
case SS_ASI_PST8_P:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_PST8_SL:
case SS_ASI_PST8_S:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_PST16_PL:
case SS_ASI_PST16_P:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_PST16_SL:
case SS_ASI_PST16_S:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_PST32_PL:
case SS_ASI_PST32_P:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_PST32_SL:
case SS_ASI_PST32_S:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_FL8_PL:
case SS_ASI_FL8_P:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64 == op || MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_FL8_SL:
case SS_ASI_FL8_S:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64 == op || MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_FL16_PL:
case SS_ASI_FL16_P:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64 == op || MA_stfp64 == op)) {
goto data_access_exception;
}
goto partial_asi_unsupported;
case SS_ASI_FL16_SL:
case SS_ASI_FL16_S:
if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64 == op || MA_stfp64 == op)) {
goto data_access_exception;
}
partial_asi_unsupported:;
addr = reg1 + reg2;
if (addr & 0x3) { /* check 32bit alignment */
v9p->post_precise_trap(sp, Sparcv9_trap_mem_address_not_aligned);
return;
}
if (addr & 0x7) { /* check 64bit alignment */
if (IS_V9_MA_LOAD(op & MA_Op_Mask))
v9p->post_precise_trap(sp, Sparcv9_trap_LDDF_mem_address_not_aligned);
else
v9p->post_precise_trap(sp, Sparcv9_trap_STDF_mem_address_not_aligned);
return;
}
goto data_access_exception;
case SS_ASI_BLK_COMMIT_P:
case SS_ASI_BLK_COMMIT_S:
/* TODO: PRM states alignment checks done. */
goto data_access_exception;
memory_access:;
if ((MA_LdFloat == (op & MA_Op_Mask)) || (MA_StFloat == (op & MA_Op_Mask)) ) {
ss_memory_asi_access(sp, op, (uint64_t *)&(sp->fpreg.s32[regnum]), mflags, asi, context_type, mask, reg1, reg2);
} else {
ss_memory_asi_access(sp, op, &(sp->intreg[regnum]), mflags, asi, context_type, mask, reg1, reg2);
ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]);
}
return;
default:
break;
}
/* OK, derive access address etc. */
size = op & MA_Size_Mask;
op &= MA_Op_Mask;
is_load = IS_V9_MA_LOAD(op);
/* No MA_CAS case required for cpu state registers. */
addr = reg1 + reg2;
/*
* Finally all the cpu state registers ...
* Currently only 64bit accesses supported ..
* need to ascertain exactly what niagara does here ! FIXME
* FIXME: Of course all the alt address space accesses are different here !
*/
if (size != MA_Size64 || (addr&0x7)!=0 || IS_V9_MA_ATOMIC(op))
goto data_access_exception;
ASSERT(MA_LdSigned != op); /* not signed for any stxas or for ldxas */
#define ITODO(s) do { \
IMPL_WARNING(("Unimplemented niagara asi %s (0x%x) accessed with address 0x%llx @ pc=%lx\n", #s, asi, addr, sp->pc)); \
if (is_load) { val = 0; goto load_complete; }\
} while (0)
/* If we're storing fetch the value to stuff */
if (!is_load) {
if (op == MA_St) {
val = sp->intreg[regnum];
} else { /* MA_StFloat */
switch(size) {
case MA_Size32:
val = sp->fpreg.s32[regnum];
break;
case MA_Size64:
val = sp->fpreg.s64[regnum >> 1];
break;
default:
goto unimplemented;
}
}
};
/* Hex Access VA Repli- DESCRIPTION */
/* (hex) cated */
switch(asi) {
/* MANDATORY SPARC V9 ASIs */
/* All in the memory section above */
/* SunSPARC EXTENDED (non-V9) ASIs */
case SS_ASI_SCRATCHPAD:
/*
* 0x20 RW 0-18 Y Scratchpad Registers
* 0x20 - 20-28 N any type of access causes data_access_exception
* 0x20 RW 30-38 Y Scratchpad Registers
*/
if (INVALID_SCRATCHPAD(addr)) {
goto data_access_exception;
} else {
uint64_t * valp =
&(nsp->strand_reg[SSR_ScratchPad0 + (addr>>3)]);
if (is_load) {
val = *valp;
goto load_complete;
}
DBGSCRATCH( if (*valp != val)
lprintf(sp->gid, "SCRATCH store 0x%x/0x%llx: "
"0x%llx -> 0x%llx pc=0x%llx\n",
asi, addr, *valp, val, sp->pc); );
*valp = val;
}
break;
case SS_ASI_MMU:
/* Niagara 1:
* 0x21 RW 8 Y I/DMMU Primary Context Register
* 0x21 RW 10 Y DMMU Secondary Context Register
* 0x21 RW 120 Y I/DMMU Synchronous Fault Pointer
* Niagara 2:
* 0x21 RW 108 Y I/DMMU Primary Context Register 1
* 0x21 RW 110 Y DMMU Secondary Context Register 1
*/
if (is_load) {
switch(addr) {
case 0x08:
val = (uint64_t)(nsp->pri_context);
goto load_complete;
case 0x10:
val = (uint64_t)(nsp->sec_context);
goto load_complete;
default:
break;
}
goto data_access_exception;
} else {
/*
* Since we're changing a context register we should
* flush the xi and xd trans caches. However, this only matters
* for the primary context - iff we are in priv mode with
* TL=0. For all other cases (TL>0) or hpriv=1, either the
* MMU is not in use, or we're executing the nucleus context so
* we can rely on a done/retry instn / mode change to do the flush for us
* when we change mode later.
*/
DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
switch(addr) {
case 0x08:
val &= MASK64(12,0);
if (nsp->pri_context!=val) {
sp->xicache_trans_flush_pending = true;
sp->xdcache_trans_flush_pending = true;
xcache_set_tagstate(sp);
}
nsp->pri_context = val;
break;
case 0x10:
nsp->sec_context = val & MASK64(12,0);
break;
default:
goto data_access_exception;
}
}
break;
case SS_ASI_QUEUE: /* 0x25 RW 3C0 Y CPU Mondo Queue Head Pointer */
/* 0x25 RW 3C8 Y CPU Mondo Queue Tail Pointer */
/* 0x25 RW 3D0 Y Device Mondo Queue Head Pointer */
/* 0x25 RW 3D8 Y Device Mondo Queue Tail Pointer */
/* 0x25 RW 3E0 Y Resumable Error Queue Head Pointer */
/* 0x25 RW 3E8 Y Resumable Error Queue Tail Pointer */
/* 0x25 RW 3F0 Y Nonresumable Error Queue Head Pointer */
/* 0x25 RW 3F8 Y Nonresumable Error Queue Tail Pointer */
/*
* According to the PRM (1.8 Table 9-3), Niagara will
* 'nop' loads or stores to addresses 0-0x3b8.
*/
if (is_load) {
switch(addr) {
case 0x3c0:
case 0x3d0:
case 0x3e0:
case 0x3f0:
val = (uint16_t)(nsp->nqueue[ (addr>>4) - 0x3c].head);
break;
case 0x3c8:
case 0x3d8:
case 0x3e8:
case 0x3f8:
val = (uint16_t)(nsp->nqueue[(addr>>4) - 0x3c].tail);
break;
default:
goto data_access_exception;
}
DBGMONDO( lprintf(sp->gid, "ASI_QUEUE load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
goto load_complete;
} else {
DBGMONDO( lprintf(sp->gid, "ASI_QUEUE store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
RSVD_MASK(sp, MASK64(13, 6), val, asi, addr);
switch(addr) {
case 0x3c0:
case 0x3d0:
case 0x3e0:
case 0x3f0:
nsp->nqueue[(addr>>4) - 0x3c].head = (uint16_t)val;
nsp->flag_queue_irq[(addr>>4)- 0x3c] = nsp->nqueue[(addr>>4) - 0x3c].head != nsp->nqueue[(addr>>4) - 0x3c].tail;
break;
case 0x3c8:
case 0x3d8:
case 0x3e8:
case 0x3f8:
if (v9p->state != V9_HyperPriv &&
v9p->state != V9_RED)
goto data_access_exception; /* DAX if store to tail in privileged mode */
nsp->nqueue[(addr>>4) - 0x3c].tail = (uint16_t)val;
nsp->flag_queue_irq[(addr>>4)- 0x3c] = nsp->nqueue[(addr>>4) - 0x3c].head != nsp->nqueue[(addr>>4) - 0x3c].tail;
break;
default:
goto data_access_exception;
}
ss_check_interrupts(sp);
}
break;
case SS_ASI_DIRECT_MAP_ECACHE: /* 0x30 - - - N1 PRM rev 1.4: any type of access causes data_access_exception */
goto data_access_exception;
case SS_ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0: /* 0x31 RW 0 Y DMMU Context Zero TSB Base PS0 */
tsbinfop = &(nsp->dmmu_ctxt_zero_tsb_ps0);
goto mmu_tsb_base;
case SS_ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1: /* 0x32 RW 0 Y DMMU Context Zero TSB Base PS1 */
tsbinfop = &(nsp->dmmu_ctxt_zero_tsb_ps1);
goto mmu_tsb_base;
case SS_ASI_DMMU_CTXT_ZERO_CONFIG: /* 0x33 RW 0 Y DMMU Context Zero Config Register */
tsbinfop = &(nsp->dmmu_ctxt_zero_tsb_ps0);
tsbinfop1 = &(nsp->dmmu_ctxt_zero_tsb_ps1);
goto mmu_tsb_config;
case SS_ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0: /* 0x35 RW 0 Y IMMU Context Zero TSB Base PS0 */
tsbinfop = &(nsp->immu_ctxt_zero_tsb_ps0);
mmu_tsb_base:;
if (is_load) {
val = tsbinfop->reg_tsb_base;
DBGMMU( lprintf(sp->gid, "MMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
goto load_complete;
} else {
uint_t tsb_size;
bool_t is_split;
DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
tsbinfop->reg_tsb_base = val;
tsb_size = val & MASK64( 3, 0 );
is_split = ((val >> 12)&1) ? true : false;
/* niagara catches attempts to create TSB spans larger than */
/* legal VA span */
if (tsb_size >= 11 && tsbinfop->page_size == 5) goto data_access_exception;
tsbinfop->is_split = is_split;
tsbinfop->tsb_size = tsb_size;
tsbinfop->base_addr = val & ( is_split ? MASK64(63,14+tsb_size) : MASK64(63,13+tsb_size) );
}
break;
case SS_ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1: /* 0x36 RW 0 Y IMMU Context Zero TSB Base PS1 */
tsbinfop = &(nsp->immu_ctxt_zero_tsb_ps1);
goto mmu_tsb_base;
case SS_ASI_IMMU_CTXT_ZERO_CONFIG: /* 0x37 RW 0 Y DMMU Context Zero Config Register */
tsbinfop = &(nsp->immu_ctxt_zero_tsb_ps0);
tsbinfop1 = &(nsp->immu_ctxt_zero_tsb_ps1);
mmu_tsb_config:;
/* FIXME: what about non VA=0x0 accesses ? what about if new page-size + tsb size > span faults ? */
if (is_load) {
val = ((uint64_t)tsbinfop1->page_size << 8) | ((uint64_t)tsbinfop->page_size);
DBGMMU( lprintf(sp->gid, "MMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
goto load_complete;
} else {
static uint8_t supported[8]={ 1, 1, 0, 1, 0, 1, 0, 0 };
DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
tsbinfop1->page_size = (val >> 8) & 0x7;
if (!supported[tsbinfop1->page_size])
tsbinfop1->page_size = 5;
tsbinfop->page_size = val & 0x7;
if (!supported[tsbinfop->page_size])
tsbinfop->page_size = 5;
}
break;
case SS_ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0: /* 0x39 RW 0 Y DMMU Context Nonzero TSB Base PS0 */
tsbinfop = &(nsp->dmmu_ctxt_nonzero_tsb_ps0);
goto mmu_tsb_base;
case SS_ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1: /* 0x3A RW 0 Y DMMU Context Nonzero TSB Base PS1 */
tsbinfop = &(nsp->dmmu_ctxt_nonzero_tsb_ps1);
goto mmu_tsb_base;
case SS_ASI_DMMU_CTXT_NONZERO_CONFIG: /* 0x3B RW 0 Y DMMU Context Zero Config Register */
tsbinfop = &(nsp->dmmu_ctxt_nonzero_tsb_ps0);
tsbinfop1 = &(nsp->dmmu_ctxt_nonzero_tsb_ps1);
goto mmu_tsb_config;
case SS_ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0: /* 0x3D RW 0 Y IMMU Context Nonzero TSB Base PS0 */
tsbinfop = &(nsp->immu_ctxt_nonzero_tsb_ps0);
goto mmu_tsb_base;
case SS_ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1: /* 0x3E RW 0 Y IMMU Context Nonzero TSB Base PS1 */
tsbinfop = &(nsp->immu_ctxt_nonzero_tsb_ps1);
goto mmu_tsb_base;
case SS_ASI_IMMU_CTXT_NONZERO_CONFIG: /* 0x3F RW 0 Y DMMU Context Zero Config Register */
tsbinfop = &(nsp->immu_ctxt_nonzero_tsb_ps0);
tsbinfop1 = &(nsp->immu_ctxt_nonzero_tsb_ps1);
goto mmu_tsb_config;
#if INTERNAL_BUILD /* { */
case SS_ASI_STREAM_MA: /* 0x40 RW 0 N Asynchronous Streaming Control Register */
/* 0x40 RW 8 N SRC Register: Asynchronous Strm state */
/* 0x40 RW 10 N DEST Register: Asynchronous Strm state */
/* 0x40 RW 18 N DATA Register: Asynchronous Strm state */
/* 0x40 RW 20 N Chaining initialisation vector for DES /3DES */
/* 0x40 RW 28 N DES Key 1 */
/* 0x40 RW 30 N DES Key 2 */
/* 0x40 RW 38 N DES Key 3 */
/* 0x40 RW 40 N HASH STATE REG 1 */
/* 0x40 RW 48 N HASH STATE REG 2 */
/* 0x40 RW 50 N HASH STATE REG 3 */
/* 0x40 RW 68 N Wait for async stream operation to complete */
/* 0x40 RW 80 N Modular Arithmetic Control Register */
/* 0x40 RW 88 N Modular Arithmetic Physical Address Register(MPA) */
/* 0x40 RW 90 N Modular Arith. Memory Address Register(MA_ADDR) */
/* 0x40 RW 98 N Modular Arithmetic NP Register */
/* 0x40 RW A0 N Wait for async MA operation to complete */
{
uint_t rv;
rv = modarith_cpu_access(sp, addr, op, &val);
switch (rv) {
case MOD_ARITH_DONE:
break;
case MOD_ARITH_LD_COMPLETE:
goto load_complete;
case MOD_ARITH_DATA_ACCESS_EX_TRAP:
EXEC_WARNING(("DAX in ASI_STREAM_MA\n"));
goto data_access_exception;
case MOD_ARITH_ILLEGAL_INST_TRAP:
/* No version of Niagara does this... */
v9p->post_precise_trap(sp,
Sparcv9_trap_illegal_instruction);
return;
case MOD_ARITH_FATAL:
IMPL_WARNING(("fatal error during mod_arith processing"));
fatal("fatal error during mod_arith processing");
return; /* never actually executed */
default:
IMPL_WARNING(("unknown rv (0x%x) during mod_arith "
"processing", rv));
fatal("fatal error during mod_arith processing");
return; /* never actually executed */
}
}
break;
#endif /* } INTERNAL_BUILD */
case SS_ASI_LSU_DIAG_REG: /* 0x42 RW 0 N Sparc BIST control register */ /* SPARC_BIST_CONTROL */
/* 0x42 RW 8 N Sparc Instruction Mask Register */ /* INST_MASK_REG */
/* 0x42 RW 10 N Load/Store Unit Diagnostic Register */ /* LSU_DIAG_REG */
if (is_load) {
switch(addr) {
case 0x0:
val = nsp->icachep->bist_ctl;
goto load_complete;
case 0x8:
val = nsp->icachep->inst_mask;
goto load_complete;
case 0x10:
val = (nsp->dcachep->assocdis ? 2 : 0) |
(nsp->icachep->assocdis ? 1 : 0);
goto load_complete;
default:
break;
}
} else {
switch(addr) {
case 0x0:
nsp->icachep->bist_ctl = val & 0x7f;
if (val & 1) nsp->icachep->bist_ctl |= 0x400;
goto complete;
case 0x8:
nsp->icachep->inst_mask = val;
goto complete;
case 0x10:
if (val & 2) nsp->dcachep->assocdis = true;
if (val & 1) nsp->icachep->assocdis = true;
goto complete;
default:
break;
}
}
goto data_access_exception;
case SS_ASI_ERROR_INJECT_REG: /* 0x43 RW 0 N Sparc Error Injection Register */
ITODO(SS_ASI_ERROR_INJECT_REG);
break;
case SS_ASI_STM_CTL_REG: /* 0x44 RW 0 N Self-timed Margin Control Register */
ITODO(SS_ASI_STM_CTL_REG);
break;
case SS_ASI_LSU_CONTROL_REG: /* 0x45 RW 0 Y Load/Store Unit Control Register */
switch(addr) {
case 0x0:
if (is_load) {
val = (nsp->lsu_control_raw & ~(LSU_CTRL_DMMU_EN | LSU_CTRL_IMMU_EN)) |
(nsp->dmmu.enabled ? LSU_CTRL_DMMU_EN : 0LL) |
(nsp->immu.enabled ? LSU_CTRL_IMMU_EN : 0LL);
goto load_complete;
} else {
/*
* can only issue this in hpriv mode, so even though we turn the mmu
* on and off, we dont need to flush the x and d translation caches
* because in hpriv mode we're only fetching physical addressses.
*/
ASSERT( V9_RED == v9p->state || V9_HyperPriv == v9p->state );
val &= LSU_CTRL_REG_MASK;
if ((val & (LSU_CTRL_WATCH_VR|LSU_CTRL_WATCH_VW)) != 0) {
IMPL_WARNING(("ASI_LSU_CONTROL_REG watchpoint enable unimplemented @ pc=%lx\n", sp->pc));
}
nsp->lsu_control_raw = val;
nsp->dmmu.enabled = (val & LSU_CTRL_DMMU_EN) != 0;
nsp->immu.enabled = (val & LSU_CTRL_IMMU_EN) != 0;
sp->xicache_trans_flush_pending = true;
sp->xdcache_trans_flush_pending = true;
}
break;
default:
goto data_access_exception;
}
break;
case SS_ASI_DCACHE_DATA: /* 0x46 RW - N Dcache data array diagnostics access */
if (is_load) {
uint64_t idx, lineword, tag;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
lineword = addr&SS_DCACHE_DATA_BITS;
tag = (addr&SS_DCACHE_DATA_TAG_BITS)>>10;
RW_rdlock(&nsp->dcachep->rwlock);
/*
* must match tag to load data
* iterate over 4 ways at bits [12:11]
*/
for (idx=lineword+0x1800; idx>=lineword; idx=idx-0x800) {
if (nsp->dcachep->tagp[idx] == tag) {
val = nsp->dcachep->datap[idx];
break;
}
EXEC_WARNING( ("ASI_DCACHE_DATA load tag 0x%xll has no match",
addr&SS_DCACHE_DATA_TAG_BITS) );
}
RW_unlock(&nsp->dcachep->rwlock);
goto load_complete;
} else {
uint64_t idx;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx = (addr&SS_DCACHE_DATA_BITS)>>3;
RW_wrlock(&nsp->dcachep->rwlock);
nsp->dcachep->datap[idx] = val;
RW_unlock(&nsp->dcachep->rwlock);
goto complete;
}
case SS_ASI_DCACHE_TAG: /* 0x47 RW - N Dcache tag and valid bit diagnostics access */
if (is_load) {
uint64_t idx;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx = (addr&SS_DCACHE_TAG_WAYLINE_BITS)>>4;
RW_rdlock(&nsp->dcachep->rwlock);
val = nsp->dcachep->tagp[idx];
RW_unlock(&nsp->dcachep->rwlock);
goto load_complete;
} else {
uint64_t idx;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx = (addr&SS_DCACHE_TAG_WAYLINE_BITS)>>4;
RW_wrlock(&nsp->dcachep->rwlock);
nsp->dcachep->tagp[idx] = val;
RW_unlock(&nsp->dcachep->rwlock);
goto complete;
}
case SS_ASI_INTR_DISPATCH_STATUS: /* 0x48 - - - any type of access causes data_access_exception */
case SS_ASI_INTR_RECEIVE: /* 0x49 - - - any type of access causes data_access_exception */
case SS_ASI_UPA_CONFIG_REGISTER: /* 0x4A - - - any type of access causes data_access_exception */
goto data_access_exception;
case SS_ASI_SPARC_ERROR_EN_REG: /* 0x4B RW 0 N Sparc error enable reg(synchronous ecc/parity errors) */
if (0LL != addr) goto data_access_exception;
if (is_load) {
val = nsp->error.enabled;
goto load_complete;
} else {
nsp->error.enabled = (val & (NA_CEEN | NA_NCEEN));
}
break;
case SS_ASI_SPARC_ERROR_STATUS_REG: /* 0x4C RW 0 Y Sparc error status reg */
if (0LL != addr) goto data_access_exception;
if (is_load) {
val = nsp->error.status;
goto load_complete;
} else {
nsp->error.status &= ~val;
}
break;
case SS_ASI_SPARC_ERROR_ADDRESS_REG: /* 0x4D RW 0 Y Sparc error address reg */
if (0LL != addr || !is_load) goto data_access_exception;
val = nsp->error.addr;
goto load_complete;
case SS_ASI_ECACHE_TAG_DATA: /* 0x4E - - - any type of access causes data_access_exception */
goto data_access_exception;
case SS_ASI_HYP_SCRATCHPAD:
/*
* Niagara1/N2 :
* 0x4F RW 0-38 Y Hypervisor Scratchpad
* Rock :
* 0x4F RW 0-18 Y Hypervisor Scratchpad
*/
if (INVALID_HYP_SCRATCHPAD(addr)) {
goto data_access_exception;
} else {
uint64_t *valp =
&(nsp->strand_reg[SSR_HSCRATCHPAD_INDEX + (addr>>3)]);
if (is_load) {
val = *valp;
goto load_complete;
}
DBGSCRATCH( if (*valp != val)
lprintf(sp->gid, "SCRATCH store 0x%x/0x%llx: "
"0x%llx -> 0x%llx pc=0x%llx\n",
asi, addr, *valp, val, sp->pc); );
*valp = val;
}
break;
case SS_ASI_IMMU: /* 0x50 R 0 Y IMMU Tag Target register */
/* 0x50 RW 18 Y IMMU Synchronous Fault Status Register */
/* 0x50 RW 30 Y IMMU TLB Tag Access Register */
mmup = &(nsp->immu);
if (is_load) {
switch(addr) {
case 0x0:
tag_target_read:;
VA48_ASSERT(mmup->tag_access_reg);
val = (mmup->tag_access_reg >> 22) | ((mmup->tag_access_reg&MASK64(12,0))<<48);
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
goto load_complete;
case 0x18:
read_sfsr:;
val = mmup->sfsr;
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
goto load_complete;
case 0x30:
tag_access_read:;
val = mmup->tag_access_reg;
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
VA48_ASSERT(val);
goto load_complete;
default:
break;
}
} else {
DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
switch(addr) {
case 0x18:
write_sfsr:;
mmup->sfsr = val & MMU_SFSR_MASK;
sp->xicache_trans_flush_pending = true;
goto complete;
case 0x30:
tag_access_write:;
VA48_WARNING(sp, val);
mmup->tag_access_reg = VA48(val);
DBGMMU( lprintf(sp->gid, "ASI 0x%x : %cMMU tag access = 0x%llx\n", asi, mmup->is_immu ? 'I' : 'D', mmup->tag_access_reg); );
goto complete;
default:
break;
}
}
goto data_access_exception;
case SS_ASI_IMMU_TSB_PS0_PTR_REG: /* 0x51 R 0 Y IMMU TSB PS0 pointer register */
mmup = &(nsp->immu);
read_tsb_ps0_ptr:;
if (!is_load) goto data_access_exception;
if (mmup == &nsp->dmmu) {
if ((mmup->tag_access_reg & MASK64(12,0)) == 0)
tsbinfop = &nsp->dmmu_ctxt_zero_tsb_ps0;
else
tsbinfop = &nsp->dmmu_ctxt_nonzero_tsb_ps0;
} else {
if ((mmup->tag_access_reg & MASK64(12,0)) == 0)
tsbinfop = &nsp->immu_ctxt_zero_tsb_ps0;
else
tsbinfop = &nsp->immu_ctxt_nonzero_tsb_ps0;
}
val = 0;
goto common_make_tsb_ptr;
case SS_ASI_IMMU_TSB_PS1_PTR_REG: /* 0x52 R 0 Y IMMU TSB PS1 pointer register */
mmup = &(nsp->immu);
read_tsb_ps1_ptr:;
if (!is_load) goto data_access_exception;
if (mmup == &nsp->dmmu) {
if ((mmup->tag_access_reg & MASK64(12,0)) == 0)
tsbinfop = &nsp->dmmu_ctxt_zero_tsb_ps1;
else
tsbinfop = &nsp->dmmu_ctxt_nonzero_tsb_ps1;
} else {
if ((mmup->tag_access_reg & MASK64(12,0)) == 0)
tsbinfop = &nsp->immu_ctxt_zero_tsb_ps1;
else
tsbinfop = &nsp->immu_ctxt_nonzero_tsb_ps1;
}
if (tsbinfop->is_split)
val = 1ull << (13+tsbinfop->tsb_size);
else
val = 0;
common_make_tsb_ptr:;
/*
* base_addr was masked when the TSB base was written,
* so no need to mask again here.
*/
val |= tsbinfop->base_addr;
val |= (mmup->tag_access_reg >> (13 - 4 + tsbinfop->page_size * 3)) & MASK64((9+tsbinfop->tsb_size+4-1), 4);
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
goto load_complete;
case SS_ASI_ITLB_DATA_IN_REG: /* 0x54 W 0 N IMMU data in register */
tlbp = nsp->itlbp;
mmup = &(nsp->immu);
tlb_data_in_valid_check:;
/*
* Check for attempts to load this ASI -or- invalid PA
* (only bits 10-9 should be set)
*/
if (is_load || (addr & ~MASK64(10,9))!=0) goto data_access_exception;
DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
idx = SS_TLB_LRU;
tlb_data_in:;
is_real = SS_TLB_IS_REAL(addr);
if ((addr>>10)&1) val = niagara_shuffle_sun4v_format(val);
if (!ss_tlb_insert(sp, mmup, tlbp, nsp->partid, is_real, val, idx))
goto data_access_exception;
goto complete;
case SS_ASI_ITLB_DATA_ACCESS_REG: /* 0x55 RW 0-1F8 N IMMU TLB Data Access Register */
tlbp = nsp->itlbp;
mmup = &(nsp->immu);
tlb_data_access:;
/* Check for valid tlb index */
idx = (addr >> 3) & 0x3f;
if (idx >= tlbp->nentries) goto data_access_exception;
if (!is_load) {
DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
/*
* Store
*
* Check for invalid PA (only bits 10-3 should be set)
*/
if ((addr & ~MASK64(10,3))!=0)
goto data_access_exception;
goto tlb_data_in;
} else {
/*
* Load
*/
tlb_entry_t * tep;
#if ERROR_INJECTION
if (sp->error_check == true &&
(ep = find_errconf(sp, ASI_LD, IMDU|DMDU))) {
if (ep->type == IMDU && mmup->is_immu) {
sp->errorp->tlb_idx[IMDU_IDX] = idx;
ss_error_condition(sp, ep);
return;
} else
if (ep->type == DMDU && !mmup->is_immu) {
sp->errorp->tlb_idx[DMDU_IDX] = idx;
ss_error_condition(sp, ep);
return;
}
}
#endif
RW_rdlock(&tlbp->rwlock);
tep = &tlbp->tlb_entryp[idx];
val = tep->data;
RW_unlock(&tlbp->rwlock);
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
goto load_complete;
}
case SS_ASI_ITLB_TAG_READ_REG: /* 0x56 R 0-1F8 N IMMU TLB Tag Read Register */
tlbp = nsp->itlbp;
mmup = &(nsp->immu);
tlb_tag_read:;
if (is_load) {
tlb_entry_t * tep;
idx = addr >> 3;
if (idx >= tlbp->nentries) goto data_access_exception;
#if ERROR_INJECTION
if (sp->error_check == true &&
(ep = find_errconf(sp, ASI_LD, IMTU|DMTU))) {
if (ep->type == IMTU && mmup->is_immu) {
sp->errorp->tlb_idx[IMTU_IDX] = idx;
ss_error_condition(sp, ep);
return;
} else
if (ep->type == DMTU && !mmup->is_immu) {
sp->errorp->tlb_idx[DMTU_IDX] = idx;
ss_error_condition(sp, ep);
return;
}
}
#endif
RW_rdlock(&tlbp->rwlock);
tep = &tlbp->tlb_entryp[idx];
val = ((uint64_t)tep->partid << 61) |
((uint64_t)(tep->is_real?1:0) << 60);
val |= (tep->tag_pfn & MASK64(55, 13)) | (uint64_t)tep->tag_context;
RW_unlock(&tlbp->rwlock);
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
goto load_complete;
}
goto data_access_exception;
case SS_ASI_IMMU_DEMAP: /* 0x57 W 0 Y IMMU TLB Demap */
mmup = &(nsp->immu);
tlbp = nsp->itlbp;
tlb_demap:;
if (is_load) goto data_access_exception;
DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
{
ss_demap_t op;
uint_t context;
op = (ss_demap_t) ((addr>>6)&0x3);
switch ((addr >> 4)&0x3) {
case 0x0: context = nsp->pri_context; break; /* primary context */
case 0x1: context = nsp->sec_context; break; /* secondary context */
case 0x2: context = SS_NUCLEUS_CONTEXT; break; /* nucleus context */
case 0x3:
/*
* use of reserved value is valid but causes
* demap to be ignored for the following two ops
*/
if (op==NA_demap_page || op==NA_demap_context)
goto complete;
}
if (op == NA_demap_page) {
if ((addr & BIT(47)) == 0) {
if ((addr & MASK64(63, 48)) != 0) {
EXEC_WARNING(("(@pc=0x%llx) demap "
"address range "
"asi=0x%x va=0x%llx",
sp->pc, asi, addr));
}
addr &= MASK64(47, 0);
} else {
if ((addr & MASK64(63, 48)) != MASK64(63, 48)) {
EXEC_WARNING(("(@pc=0x%llx) demap "
"address range "
"asi=0x%x va=0x%llx",
sp->pc, asi, addr));
}
addr |= MASK64(63, 48);
}
}
is_real = SS_TLB_IS_REAL(addr);
if (!ss_demap(sp, op, mmup, tlbp, nsp->partid, is_real, context, addr)) goto data_access_exception;
}
goto complete;
case SS_ASI_DMMU: /* 0x58 R 0 Y D-MMU Tag Target Register */
/* 0x58 RW 18 Y DMMU Synchronous Fault Status Register */
/* 0x58 R 20 Y DMMU Synchronous Fault Address Register */
/* 0x58 RW 30 Y DMMU TLB Tag Access Register */
/* 0x58 RW 38 Y DMMU VA Data Watchpoint Register */
/* 0x58 RW 40 Y Niagara 2: Tablewalk Config Reg */
/* 0x58 RW 80 Y I/DMMU Partition ID */
mmup = &(nsp->dmmu);
if (is_load) {
switch(addr) {
case 0x0:
goto tag_target_read;
case 0x18:
goto read_sfsr;
case 0x20:
val = mmup->sfar;
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
VA48_ASSERT(val);
goto load_complete;
case 0x30:
goto tag_access_read;
case 0x38:
val = mmup->va_watchpoint;
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
goto load_complete;
case 0x80:
val = (uint64_t)(nsp->partid);
DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
goto load_complete;
default:
break;
}
} else {
DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
switch(addr) {
case 0x18:
goto write_sfsr;
case 0x30:
goto tag_access_write;
case 0x38:
mmup->va_watchpoint = VA48(val);
goto complete;
case 0x80:
/* can only do in hypervisor mode - switching mode causes the xi/xd
* cache flush anyway
*/
nsp->partid = val & 0x7; /* three bits of part id only */
sp->xicache_trans_flush_pending = true;
sp->xdcache_trans_flush_pending = true;
goto complete;
default:
break;
}
}
goto data_access_exception;
case SS_ASI_DMMU_TSB_PS0_PTR_REG: /* 0x59 R 0 Y DMMU TSB PS0 pointer register */
mmup = &(nsp->dmmu);
goto read_tsb_ps0_ptr;
case SS_ASI_DMMU_TSB_PS1_PTR_REG: /* 0x5A R 0 Y DMMU TSB PS1 pointer register */
mmup = &(nsp->dmmu);
goto read_tsb_ps1_ptr;
case SS_ASI_DMMU_TSB_DIRECT_PTR_REG: /* 0x5B R 0 Y DMMU TSB Direct pointer register */
if (!is_load) goto data_access_exception;
mmup = &(nsp->dmmu);
if (mmup->tsb_direct_ps1)
goto read_tsb_ps1_ptr;
goto read_tsb_ps0_ptr;
case SS_ASI_DTLB_DATA_IN_REG: /* 0x5C W 0 N DMMU data in register */
tlbp = nsp->dtlbp;
mmup = &(nsp->dmmu);
goto tlb_data_in_valid_check;
case SS_ASI_DTLB_DATA_ACCESS_REG: /* 0x5D RW 0-1F8 N DMMU TLB Data Access Register */
tlbp = nsp->dtlbp;
mmup = &(nsp->dmmu);
goto tlb_data_access;
case SS_ASI_DTLB_TAG_READ_REG: /* 0x5E R 0-1F8 N DMMU TLB Tag Read Register */
tlbp = nsp->dtlbp;
mmup = &(nsp->dmmu);
goto tlb_tag_read;
case SS_ASI_DMMU_DEMAP: /* 0x5F W 0 Y DMMU TLB Demap */
mmup = &(nsp->dmmu);
tlbp = nsp->dtlbp;
goto tlb_demap;
case SS_ASI_TLB_INVALIDATE_ALL: /* 0x60 W 0 N IMMU TLB Invalidate Register */
/* 0x60 W 8 N DMMU TLB Invalidate Register */
if (is_load || !(addr==0x0 || addr==0x8)) goto data_access_exception;
if (addr == 0) {
mmup = &nsp->immu;
tlbp = nsp->itlbp;
} else {
mmup = &nsp->dmmu;
tlbp = nsp->dtlbp;
}
DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); );
if (!ss_demap(sp, NA_demap_init, mmup, tlbp, SS_TLB_INVALID_PARTID, false, SS_TLB_INVALID_CONTEXT, 0))
goto data_access_exception;
goto complete;
case SS_ASI_ICACHE_INSTR: /* 0x66 RW - N Icache data array diagnostics access */
if (is_load) {
uint64_t idx;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx = ((addr&SS_ICACHE_DATA_LINEWORD_BITS)|((addr&SS_ICACHE_DATA_WAY_BITS)>>3))>>3;
RW_rdlock(&nsp->icachep->rwlock);
val = nsp->icachep->datap[idx];
RW_unlock(&nsp->icachep->rwlock);
goto load_complete;
} else {
uint64_t idx;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx = ((addr&SS_ICACHE_DATA_LINEWORD_BITS)|((addr&SS_ICACHE_DATA_WAY_BITS)>>3))>>3;
RW_wrlock(&nsp->icachep->rwlock);
nsp->icachep->datap[idx] = val;
RW_unlock(&nsp->icachep->rwlock);
goto complete;
}
case SS_ASI_ICACHE_TAG: /* 0x67 RW - N Icache tag and valid bit diagnostics access */
if (is_load) {
uint64_t idx;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx = (((addr&SS_ICACHE_TAG_LINE_BITS)>>3)|((addr&SS_ICACHE_TAG_WAY_BITS)>>6))>>3;
RW_rdlock(&nsp->icachep->rwlock);
val = nsp->icachep->tagp[idx];
RW_unlock(&nsp->icachep->rwlock);
goto load_complete;
} else {
uint64_t idx;
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx = (((addr&SS_ICACHE_TAG_LINE_BITS)>>3)|((addr&SS_ICACHE_TAG_WAY_BITS)>>6))>>3;
RW_wrlock(&nsp->icachep->rwlock);
nsp->icachep->tagp[idx] = val;
RW_unlock(&nsp->icachep->rwlock);
goto complete;
}
case SS_ASI_SWVR_INTR_RECEIVE: /* 0x72 RW 0 Y Interrupt Receive Register */
if (0LL != addr) goto data_access_exception;
if (is_load) {
pthread_mutex_lock(&nsp->irq_lock);
val = nsp->irq_vector;
pthread_mutex_unlock(&nsp->irq_lock);
DBGMONDO( lprintf(sp->gid, "SS_ASI_SWVR_INTR_RECEIVE load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
goto load_complete;
} else {
uint64_t oldval, newval;
pthread_mutex_lock(&nsp->irq_lock);
DBGMONDO( oldval = nsp->irq_vector; );
nsp->irq_vector &= val;
DBGMONDO( newval = nsp->irq_vector; );
pthread_mutex_unlock(&nsp->irq_lock);
DBGMONDO( lprintf(sp->gid, "SS_ASI_SWVR_INTR_RECEIVE store 0x%x/0x%llx : 0x%llx irq_vector: 0x%llx -> 0x%llx (pc=0x%llx)\n", asi, addr, val, oldval, newval, sp->pc); );
ss_check_interrupts(sp);
}
break;
case SS_ASI_SWVR_UDB_INTR_W: /* 0x73 W 0 Y Interrupt Vector Dispatch Register */
if (0LL != addr || is_load) goto data_access_exception;
DBGMONDO( lprintf(sp->gid, "ASI_SWVR_UDB_INTR_W store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
niagara_send_xirq(sp, val);
break;
case SS_ASI_SWVR_UDB_INTR_R: /* 0x74 R 0 Y Incoming Vector Register */
if (0LL != addr || !is_load) goto data_access_exception;
pthread_mutex_lock(&nsp->irq_lock);
{
uint64_t vec;
uint8_t bit = 0;
vec = nsp->irq_vector;
if (vec == 0) {
val = 0;
goto udb_intr_r_done;
}
if (vec & 0xffffffff00000000ull) {
bit += 32; vec >>= 32;
}
if (vec & 0xffff0000) {
bit += 16; vec >>= 16;
}
if (vec & 0xff00) {
bit += 8; vec >>= 8;
}
if (vec & 0xf0) {
bit += 4; vec >>= 4;
}
if (vec & 0xc) {
bit += 2; vec >>= 2;
}
if (vec & 0x2) {
bit += 1;
}
nsp->irq_vector &= ~((uint64_t)1<<bit);
val = bit;
}
udb_intr_r_done:;
pthread_mutex_unlock(&nsp->irq_lock);
DBGMONDO( lprintf(sp->gid, "SS_ASI_SWVR_UDB_INTR_R load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); );
goto load_complete;
default:
data_access_exception:
addr = ((op & MA_Op_Mask) == MA_CAS) ?
reg1 : (reg1 + reg2);
niagara_set_sfsr(sp, &nsp->dmmu, addr, MMU_SFSR_FT_ASI, ss_ctx_primary, asi, 1, 0);
tt = (sparcv9_trap_type_t)Sparcv9_trap_data_access_exception;
ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]);
MEMORY_ACCESS_TRAP();
v9p->post_precise_trap(sp, tt);
return;
}
complete:;
NEXT_INSTN(sp);
return;
load_complete:
if (op == MA_Ld ) {
if (regnum != Reg_sparcv9_g0) sp->intreg[regnum] = val;
} else { /* op == MA_LdFloat */
ASSERT(MA_LdFloat == op);
switch(size) {
case MA_Size32:
sp->fpreg.s32[regnum] = val;
break;
case MA_Size64:
sp->fpreg.s64[regnum >> 1] = val;
break;
default:
goto unimplemented;
}
}
goto complete;
unimplemented:
IMPL_WARNING(("ASI access (0x%02x) (@pc=0x%llx) to address 0x%llx currently unimplemented", asi, sp->pc, addr));
v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction);
return;
}
/*
* Slow generic memory access ..
* .. becomes the path for all the accesses we cant handle via the load/store hash
*/
void
ss_memory_asi_access(simcpu_t * sp, maccess_t memop, uint64_t * regp,
mem_flags_t mflags, uint_t asi, uint_t context_type,
uint_t align_mask, tvaddr_t va, tvaddr_t reg2)
{
sparcv9_cpu_t * v9p;
ss_strand_t * nsp;
ss_proc_t * npp;
ss_l2_cache_t * l2p;
error_conf_t * ep;
error_t * errorp;
tpaddr_t pa;
tpaddr_t pa_tag;
tvaddr_t tag, perm_cache;
uint8_t * bufp;
uint8_t * ptr;
config_addr_t * cap;
tpaddr_t extent;
uint_t flags;
uint_t size;
uint_t op;
dev_access_t da;
v9p = (sparcv9_cpu_t *)(sp->specificp);
nsp = v9p->impl_specificp;
npp = (ss_proc_t *)(sp->config_procp->procp);
mflags ^= (asi & SS_ASI_LE_MASK) ? MF_Little_Endian : 0;
/* OK, derive access address etc. */
size = memop & MA_Size_Mask;
op = memop & MA_Op_Mask;
if (MA_CAS != op) {
va += reg2;
}
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: LE load/store pc=0x%llx instr=0x%x count=%d asi=0x%x\n", sp->pc, op, (1 << size), asi); );
/*
* OK - Step 1 : to do or not do a TLB translation.
* The assumption here is that privilege checks have already happened.
*/
#if ERROR_INJECTION
errorp = sp->errorp;
#endif
/* quick check of alignment */
if ((va & (tvaddr_t)align_mask) != 0) {
sparcv9_trap_type_t tt;
if (v9p->pstate.addr_mask)
va &= MASK64(31,0); /* SV9_ID125 FIXME */
DBGALIGN( lprintf(sp->gid,"Miss data access pc=0x%llx va=0x%llx align_mask=0x%llx\n", sp->pc, va, (tvaddr_t)align_mask); );
/* alignment error force a trap */
VA48_WARNING(sp, va);
SET_DTLB_FAULT( nsp, VA48(va) );
MEMORY_ACCESS_TRAP();
niagara_set_sfsr(sp, &nsp->dmmu, va, 0/*fixme*/, context_type, asi, 0, 0);
if ((MA_ldfp64 == memop || MA_stfp64 == memop) &&
((va & 0x7) == 0x4))
tt = ((memop == MA_ldfp64) ?
Sparcv9_trap_LDDF_mem_address_not_aligned :
Sparcv9_trap_STDF_mem_address_not_aligned);
else
tt = Sparcv9_trap_mem_address_not_aligned;
v9p->post_precise_trap(sp, tt);
return;
}
/* Find the pa corresponding to the line we need */
tag = va & XDCACHE_TAG_MASK;
/*
* We have to get the PA from the EA ... this depends on the mode
* and the type of access.
*/
pa_tag = tag;
if (v9p->pstate.addr_mask) {
pa_tag &= MASK64(31,0);
va &= MASK64(31,0);
/* NOTE: we dont mask tag ... we allow that to match the 64bit address */
}
pa = va;
flags = SS_TLB_FLAG_READ | SS_TLB_FLAG_WRITE; /* default access flags */
/*
* OK perform the TLB access based on the context
* and partition id selected
*/
/* default read and write permission for MMU bypass */
perm_cache = XDCACHE_READ_PERM | XDCACHE_WRITE_PERM;
if (!(mflags & MF_MMU_Bypass)) {
ss_tlb_t * tlbp;
tlb_entry_t * tep;
tlb_entry_t te_copy;
uint_t idx, partid;
ss_trap_type_t miss_trap_type;
uint_t context;
uint_t miss_context;
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: performing TLB access \n"); );
/* If not priv mode and mmu is off, translate real addresses */
if (!nsp->dmmu.enabled)
context = SS_TLB_REAL_CONTEXT;
else {
/* figure out the context value */
switch (context_type) {
case ss_ctx_primary:
context = nsp->pri_context;
break;
case ss_ctx_secondary:
context = nsp->sec_context;
break;
case ss_ctx_nucleus:
if (mflags & MF_TLB_Real_Ctx)
context = SS_TLB_REAL_CONTEXT;
else
context = SS_NUCLEUS_CONTEXT;
break;
default:
fatal("ss_memory_asi_access: Internal Error. Not expecting "
"context type 0x%x\n", context_type);
}
}
/*
* check out of range address (if lie within the "VA hole"
* or "RA hole")
*/
if ((va >= SS_VA_HOLE_LB) && (va <= SS_VA_HOLE_UB)) {
niagara_set_sfsr(sp, &nsp->dmmu, va,
MMU_SFSR_FT_VARANGE, context_type,
asi, 0/*fixme*/, 1);
v9p->post_precise_trap(sp,
(sparcv9_trap_type_t)SS_trap_data_access_exception);
return;
}
partid = nsp->partid;
/* FIXME: Need a better hash than this ! */
idx = va >> SS_MAX_PAGE_SIZE_BITS;
idx += context + partid;
idx &= SS_TLB_HASH_MASK;
tlbp = nsp->dtlbp;
RW_rdlock(&tlbp->rwlock);
/*
* So we search for a matching page using the info we have in the
* hash - while another thread might possibly be removing or
* inserting an entry into the same table.
*/
for ( tep = tlbp->hash[idx].ptr; tep!=(tlb_entry_t*)0; tep = tep->nextp ) {
/* try and match the entry as appropriate */
if (((tep->tag_pfn ^ va)>>tep->match_shift)==0 && tep->match_context==context && tep->partid == partid) {
goto tlb_match;
}
}
RW_unlock(&tlbp->rwlock);
DBGMISS( lprintf(sp->gid, "dtlb miss: pc=%lx asi=%x va=%lx ctx=%x\n", sp->pc, asi, va, context); );
/* Based on the ASI access type setup accordingly */
switch (asi) {
case SS_ASI_REAL_MEM:
case SS_ASI_REAL_IO:
case SS_ASI_REAL_MEM_LITTLE:
case SS_ASI_REAL_IO_LITTLE:
case SS_ASI_QUAD_LDD_REAL:
case SS_ASI_QUAD_LDD_REAL_LITTLE:
VA48_WARNING(sp, va);
SET_DTLB_FAULT( nsp, VA48(va) );
nsp->dmmu.tag_access_reg = VA48(va) & ~MASK64(12,0); /* Do this properly later - FIXME */
DBGMMU( lprintf(sp->gid, "DMMU tag access = 0x%llx\n", nsp->dmmu.tag_access_reg); );
MEMORY_ACCESS_TRAP();
v9p->post_precise_trap(sp, (sparcv9_trap_type_t)SS_trap_data_real_translation_miss);
break;
default:
/*
* If the MMU is "disabled" in privileged mode ... this is a real miss, not a
* virtual translation miss, so the fault context and trap type is different
*/
if (nsp->dmmu.enabled) {
miss_context = context;
miss_trap_type = SS_trap_fast_data_access_MMU_miss;
} else {
miss_context = 0; /* null for ra->pa miss undefined ? */
miss_trap_type = SS_trap_data_real_translation_miss;
}
nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | miss_context; /* Do this properly later - FIXME */
tlb_trap:;
VA48_WARNING(sp, va);
SET_DTLB_FAULT( nsp, VA48(va) );
DBGMMU( lprintf(sp->gid, "DMMU tag access = 0x%llx\n", nsp->dmmu.tag_access_reg); );
MEMORY_ACCESS_TRAP();
v9p->post_precise_trap(sp, (sparcv9_trap_type_t)miss_trap_type);
break;
}
return;
tlb_match:;
/* we have a matching entry ... now all we have to worry about are the permissions */
flags = tep->flags;
pa += tep->pa_offset;
pa_tag += tep->pa_offset;
RW_unlock(&tlbp->rwlock);
/*
* Errors on dtlb hit: stash table_entry pointer and if
* subsequent itlb hit on same entry post error again.
*/
#if ERROR_INJECTION
if (sp->error_check == true && errorp->check_dtlb) {
bool_t is_load, is_store;
is_load = IS_V9_MA_LOAD(op);
is_store = IS_V9_MA_STORE(op);
if (is_load) ep = find_errconf(sp, LD, DMDU);
else
if (is_store) ep = find_errconf(sp, ST, DMSU);
if (ep) {
if (errorp->dtep) {
DBGERR( lprintf(sp->gid, "ss_memory_asi_access: "
"errorp->dtep=%x, tep=%x\n",
errorp->dtep,tep); );
if ((tlb_entry_t *)errorp->dtep == tep) {
ss_error_condition(sp, ep);
return;
}
} else {
errorp->dtep = tep;
errorp->addr = va;
ss_error_condition(sp, ep);
return;
}
}
}
#endif
/* privilege test apparently takes priority ... p.51 US-I PRM table 6-4 */
if ((flags & SS_TLB_FLAG_PRIV) && !(mflags & MF_Has_Priv)) {
nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */
niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_PRIV, context_type, asi, 0/*fixme*/, 1);
miss_trap_type = SS_trap_data_access_exception;
goto tlb_trap;
}
/*
* validate bits NFO, E and CP
*/
if ((flags & SS_TLB_FLAG_E) && (mflags & MF_No_Fault)) {
nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */
miss_trap_type = SS_trap_data_access_exception;
niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_SO, context_type, asi, 0/*fixme*/, 1);
goto tlb_trap;
}
if ((flags & SS_TLB_FLAG_NFO) && (!(mflags & MF_No_Fault))) {
nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */
miss_trap_type = SS_trap_data_access_exception;
niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_NFO, context_type, asi, 0/*fixme*/, 1);
goto tlb_trap;
}
if (!(flags & SS_TLB_FLAG_CP) && (mflags & MF_Atomic_Access)) {
nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */
miss_trap_type = SS_trap_data_access_exception;
niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_ATOMICIO, context_type, asi, 0/*fixme*/, 1);
goto tlb_trap;
}
if (IS_V9_MA_STORE(op) && !(flags & SS_TLB_FLAG_WRITE)) {
uint64_t ps1, tte_ps1;
nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */
ps1 = (context == 0) ? nsp->dmmu_ctxt_zero_tsb_ps1.page_size : nsp->dmmu_ctxt_nonzero_tsb_ps1.page_size;
tte_ps1 = ((tep->data>>(48-2))&0x4) | ((tep->data>>61)&0x3);
/* Is this the actual logic for direct TSB ptr select - FIXME */
/* State bit updated for data_access_protection - see PRM v1.0 p258 13.11.11 */
nsp->dmmu.tsb_direct_ps1 = (tte_ps1 == ps1);
miss_trap_type = SS_trap_fast_data_access_protection;
niagara_set_sfsr(sp, &nsp->dmmu, va, 0/*fixme*/, context_type, asi, 1, 0);
goto tlb_trap;
}
mflags ^= (flags & SS_TLB_FLAG_IE) ? MF_Little_Endian : 0;
perm_cache = (flags & SS_TLB_FLAG_WRITE) ? XDCACHE_WRITE_PERM : 0;
perm_cache |= (flags & SS_TLB_FLAG_READ) ? XDCACHE_READ_PERM : 0;
} else {
/* Niagara only implements 40 bits of PA, the tlb code
masks PA so here we need to mask bypass PAs */
pa &= MASK64(39,0);
}
/*
* Now that we have the internal PA, map it to the real
* external PA before looking it up in the domain.
* This does not modify memory addresses, only JBus addresses.
*/
if (pa >= 0x800e000000ull && pa < 0x8010000000ull) {
pa &= 0xffffffffull;
pa |= 0x40000000000ull;
} else if (pa >= 0x8010000000ull && pa < 0x8100000000ull) {
pa &= 0x0ffffffffull;
pa |= 0x60000000000ull;
} else if (pa >= 0xc000000000ull && pa < 0xff00000000ull) {
pa |= 0x70000000000ull;
}
/*
* OK - now go get the pointer to the line data
* ... start by finding the device that has the
* memory we need.
* optimise: by guessing at the last device found.
*/
/* now find the device - looking in the cache first */
cap = sp->xdc.miss_addrp;
if (!(cap && (cap->baseaddr <= pa) && (pa < cap->topaddr))) {
domain_t * domainp;
config_proc_t * config_procp;
config_procp = sp->config_procp;
domainp = config_procp->domainp;
cap = find_domain_address(domainp, pa);
if (cap == NULL) {
/* OK it's a bus error there was no backing store */
EXEC_WARNING(("bus error - (@pc=0x%llx, icount=%llu) access to va=0x%llx (pid=0x%x,ctx_type=0x%x,cacheline va=0x%llx -> physical 0x%llx)", sp->pc, ICOUNT(sp), va, nsp->partid, context_type, tag, pa_tag));
goto data_access_error;
}
}
/* try and get the buffer pointer */
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: calling dev_cacheable\n"); );
da = 0;
if (IS_V9_MA_LOAD(op))
da |= DA_Load;
if (IS_V9_MA_STORE(op))
da |= DA_Store;
extent = cap->config_devp->dev_typep->dev_cacheable(cap, da,
pa_tag-cap->baseaddr, &bufp);
if (extent < XDCACHE_LINE_SIZE) {
bool_t status;
uint_t pio_op;
uint64_t tempreg, *aregp;
pio_op = memop & MA_Op_Mask;
if ((MF_Little_Endian & mflags) && (pio_op == MA_St)) {
tempreg = sparcv9_invert_endianess(regp, (1 << size));
aregp = &tempreg;
} else if ((&(sp->intreg[Reg_sparcv9_g0]) == regp) &&
((pio_op == MA_Ld) || (pio_op == MA_LdSigned))) {
aregp = &tempreg;
} else {
aregp = regp;
}
status = cap->config_devp->dev_typep->dev_cpu_access(sp, cap, pa-cap->baseaddr, memop, aregp);
if ((MF_Little_Endian & mflags) && status && (pio_op == MA_Ld || pio_op == MA_LdSigned)) {
*regp = sparcv9_invert_endianess(regp, (1 << size));
if (pio_op == MA_LdSigned) {
uint32_t shift;
shift = 64 - (8 << size);
*regp = ((sint64_t)(*regp << shift)) >> shift;
}
}
ASSERT(0LL == sp->intreg[Reg_sparcv9_g0]);
if (status)
goto done;
EXEC_WARNING(("bus error - (@pc=0x%llx) access to va=0x%llx "
"(pid=0x%x,ctx_type=0x%x,physical 0x%llx)",
sp->pc, va, nsp->partid, context_type, pa));
data_access_error:;
#if !defined(NDEBUG) /* { */
do {
config_proc_t * config_procp;
config_procp = sp->config_procp;
ss_dump_tlbs(config_procp, true);
/* abort(); */ /* FIXME - no longer need this ? */
} while (0);
#endif /* } */
MEMORY_ACCESS_TRAP();
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: post_precise_trap \n"); );
v9p->post_precise_trap(sp, Sparcv9_trap_data_access_error); /* FIXME: right trap ? */
return;
}
#if ERROR_INJECTION /* { */
/* processor-wide checks for unhandled L2 and DRAM errors */
if (npp->error_check) {
bool_t is_load, is_store, is_atomic;
uint8_t bank;
is_load = IS_V9_MA_LOAD(op);
is_store = IS_V9_MA_STORE(op);
is_atomic = IS_V9_MA_ATOMIC(op);
/* check if direct-map mode displacement flushing the error cacheline */
l2p = npp->l2p;
bank = (pa >> 6) & 0x3;
if (l2p->control[bank] & L2_DMMODE) {
if ((pa & L2_DM_MASK) == (npp->errorp->ldac_addr & L2_DM_MASK)) {
npp->errorp->ldac_addr = NULL;
ss_set_errcheck(npp);
goto npp_err_done;
}
if ((pa & L2_DM_MASK) == (npp->errorp->ldau_addr & L2_DM_MASK)) {
npp->errorp->ldac_addr = NULL;
ss_set_errcheck(npp);
goto npp_err_done;
}
}
/*
* when accessing cacheline with error: load or partial store
* causes LDAC or LDAU, store to line with correctible error clears it,
* store to uncorrectible causes a writeback error
*/
if (pa == npp->errorp->ldac_addr) {
if (is_load ||
(is_store && (size == MA_Size8 || size == MA_Size16))) {
ep = new_errconf((is_load ? LD : ST), LDAC);
ep->npp = true;
goto lda_err;
} else if (is_store) {
npp->errorp->ldac_addr = NULL;
ss_set_errcheck(npp);
}
} else if (pa = npp->errorp->ldau_addr) {
if (is_load ||
(is_store && (size == MA_Size8 || size == MA_Size16))) {
ep = new_errconf((is_load ? LD : ST), LDAU);
ep->npp = true;
goto lda_err;
} else if (is_store) {
npp->errorp->ldau_addr = NULL;
ss_set_errcheck(npp);
}
}
}
npp_err_done:
/* now check for errors to be generated from this thread's error list */
if (sp->error_check && errorp->check_xdcache) {
bool_t is_load, is_store, is_atomic;
uint8_t bank;
xicache_t * xicp;
xicache_instn_t * xip;
uint64_t xidx;
tvaddr_t xpc;
is_load = IS_V9_MA_LOAD(op);
is_store = IS_V9_MA_STORE(op);
is_atomic = IS_V9_MA_ATOMIC(op);
if (is_load) ep = find_errconf(sp, LD,
(DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU));
else
if (is_store) ep = find_errconf(sp, ST,
(DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU));
if (ep)
switch(ep->type) {
case IRC:
case IRU:
case FRC:
case FRU:
xicp = sp->xicachep;
xpc = sp->pc;
xidx = (xpc>>2) & XICACHE_NUM_INSTR_MASK;
xip = &xicp->instn[xidx];
errorp->reg = X_RS1(xip->rawi);
ss_error_condition(sp, ep);
return;
case DTC:
case DDC:
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
lda_err: case LDAU:
case LDAC:
l2p = npp->l2p;
for (bank=0; bank<npp->num_l2banks; bank++) {
if (l2p->control[bank] & L2_DIS) goto l2_disabled;
}
if (is_load) {
if (is_atomic) errorp->l2_write = L2_RW_bit;
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
} else
if (is_store && (size == MA_Size8 || size == MA_Size16)) {
errorp->l2_write = L2_RW_bit;
errorp->partial_st = true;
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
}
break;
ldw_err: case LDWU:
case LDWC:
l2p = npp->l2p;
for (bank=0; bank<npp->num_l2banks; bank++) {
if (l2p->control[bank] & L2_DIS) goto l2_disabled;
}
if (is_store) {
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
}
break;
case DAC:
l2p = npp->l2p;
for (bank=0; bank<npp->num_l2banks; bank++) {
if (l2p->control[bank] & L2_DIS) goto l2_disabled;
}
if (ep->op == LD && is_load) {
if (is_atomic) errorp->l2_write = L2_RW_bit;
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
} else
if (ep->op == ST && is_store) {
if (size == MA_Size8 || size == MA_Size16)
errorp->partial_st = true;
errorp->l2_write = L2_RW_bit;
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
}
break;
case DAU:
l2p = npp->l2p;
for (bank=0; bank<npp->num_l2banks; bank++) {
if (l2p->control[bank] & L2_DIS) goto l2_disabled;
}
if (ep->op == LD && is_load) {
if (is_atomic) errorp->l2_write = L2_RW_bit;
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
} else
if (ep->op == ST && is_store) {
if (size == MA_Size8 || size == MA_Size16)
errorp->partial_st = true;
errorp->l2_write = L2_RW_bit;
errorp->addr = pa;
ss_error_condition(sp, ep);
return;
}
break;
l2_disabled: DBGERR( lprintf(sp->gid, "ss_memory_asi_access: "
"No LDAC/LDWC/LDAU/LDWU/DAC Error - L2 disabled\n"); );
break;
}
}
#endif /* } */
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: handling cacheable device memory\n"); );
/*
* Now handle cacheable device memory
*
* Because we implicitly assume that the xdc uses the current context
* we only add missed entries to the xdc iff it was a normal memory op
*/
if ((mflags & (MF_Normal|MF_Little_Endian)) == MF_Normal) {
long ridx;
xdcache_line_t * xclp;
sp->xdc.miss_addrp = cap; /* cache for next time */
ridx = (va >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK;
xclp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + ridx);
/* only cache if memory is cacheable */
/* fill in the line */
/* WARNING: This tag may be a full 64bit value even if pstate.am=1 */
/* do not use ea_offset with anything else other than tag */
xclp->tag = tag | perm_cache | sp->tagstate;
xclp->offset = ((uint64_t)bufp) - tag;
}
/*
* Sigh now complete the load/store on behalf of the original
* load instruction
*/
#if HOST_CPU_LITTLE_ENDIAN
/* temporary hack */
mflags ^= MF_Little_Endian;
#endif
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: completing load/store on behalf of original instr.\n"); );
ptr = (uint8_t*)(bufp + (pa & XDCACHE_LINE_OFFSET_MASK) );
switch (op) {
uint64_t val, cval;
case MA_Ld:
switch (size) {
case MA_Size8:
val = *(uint8_t*)ptr;
break;
case MA_Size16:
val = *(uint16_t*)ptr;
break;
case MA_Size32:
val = *(uint32_t*)ptr;
break;
case MA_Size64:
val = *(uint64_t*)ptr;
break;
default:
abort();
}
if (MF_Little_Endian & mflags) {
DBGLE( lprintf(sp->gid, "SunSPARC::: MA_Ld with LE - val=0x%llx count=0x%x\n",
val, (1 << size)); );
val = sparcv9_invert_endianess(&val, (1 << size));
}
goto complete_load;
case MA_LdSigned:
switch (size) {
case MA_Size8:
val = *(sint8_t*)ptr;
break;
case MA_Size16:
val = *(sint16_t*)ptr;
break;
case MA_Size32:
val = *(sint32_t*)ptr;
break;
default:
abort();
}
if (MF_Little_Endian & mflags) {
uint32_t shift;
DBGLE(lprintf(sp->gid, "SunSPARC::: MA_LdSigned with LE - val=0x%llx count=0x%x\n",
val, (1 << size)); );
val = sparcv9_invert_endianess(&val, (1 << size));
shift = 64 - (8 << size);
val = ((sint64_t)(val << shift)) >> shift;
}
goto complete_load;
case MA_St:
if (MF_Little_Endian & mflags) {
DBGLE( lprintf(sp->gid, "SunSPARC::: MA_St with LE - val=0x%llx\n", *regp); );
val = sparcv9_invert_endianess(regp, (1 << size));
} else {
val = *regp;
}
if (mflags & MF_Blk_Init) {
/* If line in L2 cache, leave data alone, otherwise zero it */
/* XXX How to simulate? */
((uint64_t*)ptr)[0] = 0;
((uint64_t*)ptr)[1] = 0;
((uint64_t*)ptr)[2] = 0;
((uint64_t*)ptr)[3] = 0;
((uint64_t*)ptr)[4] = 0;
((uint64_t*)ptr)[5] = 0;
((uint64_t*)ptr)[6] = 0;
((uint64_t*)ptr)[7] = 0;
}
switch (size) {
case MA_Size8:
*(uint8_t*)ptr = val;
break;
case MA_Size16:
*(uint16_t*)ptr = val;
break;
case MA_Size32:
*(uint32_t*)ptr = val;
break;
case MA_Size64:
*(uint64_t*)ptr = val;
break;
default:
abort();
}
break;
case MA_LdFloat:
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_LdFloat with LE - \n"); );
ASSERT(&(sp->intreg[Reg_sparcv9_g0]) != regp);
switch (size) {
case MA_Size32:
if (MF_Little_Endian & mflags) {
val = *(ieee_fp32_t*)ptr;
*(ieee_fp32_t*)regp =
sparcv9_invert_endianess(&val,
sizeof (ieee_fp32_t));
} else
*(ieee_fp32_t*)regp = *(ieee_fp32_t*)ptr;
break;
case MA_Size64:
if (MF_Little_Endian & mflags)
*(ieee_fp64_t*)regp =
sparcv9_invert_endianess(
(uint64_t *)ptr,
sizeof (ieee_fp64_t));
else
*(ieee_fp64_t*)regp = *(ieee_fp64_t*)ptr;
break;
case MA_Size512:
if ((MF_Little_Endian & mflags) == 0) {
uint_t i;
for (i = 0; i < 8; i++) {
*(ieee_fp64_t*)(regp + i) =
*(ieee_fp64_t*)(ptr + i*8);
}
} else {
uint_t i;
for (i = 0; i < 8; i++) {
*(ieee_fp64_t*)(regp + i) =
sparcv9_invert_endianess(
(uint64_t *)(ptr + i*8),
sizeof (ieee_fp64_t));
}
}
break;
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
case MA_Size128:
ASSERT((MF_Little_Endian & mflags) == 0);
*(ieee_fp128_t*)regp = *(ieee_fp128_t*)ptr;
break;
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */
default:
abort();
}
goto done;
case MA_StFloat:
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_StFloat with LE - \n"); );
switch (size) {
case MA_Size32:
if (MF_Little_Endian & mflags) {
val = *(ieee_fp32_t*)regp;
*(ieee_fp32_t*)ptr =
sparcv9_invert_endianess(&val,
sizeof (ieee_fp32_t));
} else
*(ieee_fp32_t*)ptr = *(ieee_fp32_t*)regp;
break;
case MA_Size64:
if (MF_Little_Endian & mflags)
*(ieee_fp64_t*)ptr =
sparcv9_invert_endianess(regp,
sizeof (ieee_fp64_t));
else
*(ieee_fp64_t*)ptr = *(ieee_fp64_t*)regp;
break;
case MA_Size512:
if ((MF_Little_Endian & mflags) == 0) {
uint_t i;
for (i = 0; i < 8; i++) {
*(ieee_fp64_t*)(ptr + i*8) =
*(ieee_fp64_t*)(regp + i);
}
} else {
uint_t i;
for (i = 0; i < 8; i++) {
*(ieee_fp64_t*)(ptr + i*8) =
sparcv9_invert_endianess(
(regp + i),
sizeof (ieee_fp64_t));
}
}
break;
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
case MA_Size128:
ASSERT((MF_Little_Endian & mflags) == 0);
*(ieee_fp128_t*)ptr = *(ieee_fp128_t*)regp;
break;
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */
default:
abort();
}
goto done;
case MA_LdSt:
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_LdSt with LE - \n"); );
switch (size) {
case MA_Size8:
val = host_ldstub(ptr, reg2, *regp);
break;
default:
abort();
}
goto complete_load;
case MA_Swap:
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_Swap with LE - \n"); );
if (MF_Little_Endian & mflags) {
val = sparcv9_invert_endianess(regp, (1 << size));
} else {
val = *regp;
}
switch (size) {
case MA_Size32:
val = host_swap((uint32_t *)ptr, val);
break;
default:
abort();
}
if (MF_Little_Endian & mflags) {
val = sparcv9_invert_endianess(&val, (1 << size));
}
goto complete_load;
case MA_CAS:
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_CAS with LE - \n"); );
if (MF_Little_Endian & mflags) {
val = sparcv9_invert_endianess(regp, (1 << size));
cval = sparcv9_invert_endianess(&reg2, (1 << size));
} else {
val = *regp;
cval = reg2;
}
switch (size) {
case MA_Size32:
val = host_cas32((uint32_t *)ptr, cval, val);
break;
case MA_Size64:
val = host_cas64((uint64_t *)ptr, cval, val);
break;
default:
abort();
}
if (MF_Little_Endian & mflags) {
val = sparcv9_invert_endianess(&val, (1 << size));
}
goto complete_load;
complete_load:
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
case MA_LdDouble:
switch (size) {
case MA_Size64: /* standard sparc LDD instruction */
val = *(uint64_t *)ptr;
regp[0] = (uint32_t)(val >> 32);
regp[1] = (uint32_t)val;
if (MF_Little_Endian & mflags) {
DBGLE( lprintf(sp->gid, "SunSPARC::: MA_LdDouble with LE - val=0x%llx count=0x%x\n",
val, (1 << size)); );
regp[0] = sparcv9_invert_endianess(&regp[0], (1 << size)>>1);
regp[1] = sparcv9_invert_endianess(&regp[1], (1 << size)>>1);
}
sp->intreg[Reg_sparcv9_g0] = 0; /* regp might be %g0 */
break;
case MA_Size128:
host_atomic_get128be((uint64_t *)ptr, regp, &regp[1]);
if (MF_Little_Endian & mflags) {
DBGLE(lprintf(sp->gid, "SunSPARC::: MA_ldDouble with LE - val=0x%llx,0x%llx count=0x%x\n",
regp[0], regp[1], (1 << size)); );
regp[0] = sparcv9_invert_endianess(&regp[0], (1 << size)>>1);
regp[1] = sparcv9_invert_endianess(&regp[1], (1 << size)>>1);
}
sp->intreg[Reg_sparcv9_g0] = 0; /* regp might be %g0 */
break;
default:
fatal("ss_memory_asi_access: internal error - "
"illegal size for MA_LdDouble");
}
break;
case MA_StDouble:
{
uint32_t reven;
uint32_t rodd;
ASSERT(size == MA_Size64);
if (MF_Little_Endian & mflags) {
DBGLE(lprintf(sp->gid, "SunSPARC::: MA_StDouble with LE - reven=0x%x rodd=0x%x count=0x%x\n",
(uint32_t)regp[0], (uint32_t)regp[1], (1 << size)); );
reven = (uint32_t)sparcv9_invert_endianess(&regp[0], (1 << size)>>1);
rodd = (uint32_t)sparcv9_invert_endianess(&regp[1], (1 << size)>>1);
} else {
reven = (uint32_t)regp[0];
rodd = (uint32_t)regp[1];
}
val = ((uint64_t)reven << 32) | ((uint32_t)rodd);
*(uint64_t *)ptr = val;
}
break;
case MA_V9_LdFSR:
ASSERT( MA_Size32 == size );
val = *(uint32_t*)ptr;
if (MF_Little_Endian & mflags)
val = sparcv9_invert_endianess(&val, (1 << size));
v9_set_fsr_lower(sp, val);
break;
case MA_V9_LdXFSR:
ASSERT( MA_Size64 == size );
val = *(uint64_t*)ptr;
if (MF_Little_Endian & mflags)
val = sparcv9_invert_endianess(&val, (1 << size));
v9_set_fsr(sp, val);
break;
case MA_V9_StFSR:
ASSERT( MA_Size32 == size );
val = v9_get_fsr(sp);
if (MF_Little_Endian & mflags)
val = sparcv9_invert_endianess(&val, (1 << size));
*(uint32_t*)ptr = val & MASK64(31,0);
/* FTT is cleared on read of FSR */
sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK;
DBGFSR( lprintf(sp->gid, "stfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), val); );
break;
case MA_V9_StXFSR:
ASSERT( MA_Size64 == size );
val = v9_get_fsr(sp);
if (MF_Little_Endian & mflags)
val = sparcv9_invert_endianess(&val, (1 << size));
*(uint64_t*)ptr = val;
/* FTT is cleared on read of FSR */
sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK;
DBGFSR( lprintf(sp->gid, "stxfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), val); );
break;
default:
abort();
}
done:;
/*
* Finally go get the next instruction
*/
DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: getting the next instr.\n"); );
NEXT_INSTN(sp);
}
/*
* Insert the entry using the mmup->tag_access_reg and the supplied data field
* into the TLB. Being careful of course to first invalidate any entries which
* first conflict with the page we're tryinng to insert
*
* Returns false on failure, true on success ... failure implies
* a data access exception for the caller - which it must generate.
*/
bool_t
ss_tlb_insert(simcpu_t * sp, ss_mmu_t * mmup, ss_tlb_t * tlbp, uint_t partid,
bool_t is_real, uint64_t data, uint_t idx)
{
tlb_entry_t * tep;
tlb_entry_t te_copy;
uint_t shift, size;
tvaddr_t tag;
uint16_t tag_context;
matchcontext_t match_context;
uint_t i;
bool_t need_flush = false;
/* FIXME: what does niagara do if you try to load an invalid TTE ? */
if (idx == SS_TLB_LRU && ((data >> SUN4U_TTED_V_BIT)&1) == 0) {
EXEC_WARNING(("tlb_insert 0x%llx (@pc=0x%llx, icount=%llu) TTE invalid", data, sp->pc, ICOUNT(sp)));
}
size = ((data>>(48-2))&0x4) | ((data>>61)&0x3);
/* figure out the useful info about our page to insert */
shift = SUN4V_PAGE_SIZE_SHIFT(size);
if (shift == 0) return false;
/*
* This is VERY important:
* The tag access register need NOT contain a correctly aligned tag entry
* for the given page size. So it is REALLY IMPORTANT when forming the TLB
* entry tag field that we correctly mask off the lower bits corresponding to
* the selected page size. This especially important because we use this value to
* compute a va-pa offset.
* Note: we do a similar mask operation later when using the PA to compute the
* offset value we create.
*/
tag = mmup->tag_access_reg & MASK64(63,shift);
tag_context = mmup->tag_access_reg & MASK64(12,0);
match_context = is_real ? SS_TLB_REAL_CONTEXT : tag_context;
RW_wrlock(&tlbp->rwlock);
/*
* First lets look for potentially matching pages we may have to
* de-map first. We demap the old entry if it incorporates our new
* page, or vice-versa.
*/
tep = &(tlbp->tlb_entryp[0]);
for (i=tlbp->nentries; i>0; i--, tep++) {
tvaddr_t xor;
if (tep->hashidx == -1)
continue;
xor = tep->tag_pfn ^ tag;
if ( ( (xor>>tep->match_shift)==0 || (xor >> shift)==0 ) &&
tep->match_context == match_context && tep->partid == partid) {
need_flush = true;
/* matching entry - put back on the free list */
ss_tlb_unhash(tlbp, tep);
ss_free_tlb_entry( tlbp, tep );
#if ERROR_INJECTION
DBGERR( lprintf(sp->gid, "ss_tlb_insert(): errorp->itep=%x"
" errorp->dtep=%x tep=%x\n",
sp->errorp->itep, sp->errorp->dtep, tep); );
tlb_entry_error_match(sp, mmup, tep);
#endif
}
}
/*
* Now we need to pick an entry for the one we wish
* to insert
*/
if (idx != SS_TLB_LRU) {
tep = &tlbp->tlb_entryp[idx];
if (tep->hashidx != -1) {
need_flush = true;
ss_tlb_unhash(tlbp, tep);
} else
ss_tlb_unfree(tlbp, tep);
} else {
tep = tlbp->freep;
if (tep == (tlb_entry_t*)0) {
/* OK replacement required - clobber a valid entry */
/* FIXME: What is Niagara's replacement policy ? */
#if SS_TLB_REPLACE_RANDOM /* { */
do {
i = random() % tlbp->nentries;
tep = &(tlbp->tlb_entryp[i]);
} while (tep->flags & SS_TLB_FLAG_LOCKED);
#elif SS_TLB_REPLACE_RROBIN /* } { */
i = tlbp->last_replaced;
do {
i = i+1;
if (i>=tlbp->nentries) i=0; /* wrap */
tep = &(tlbp->tlb_entryp[i]);
if (i==tlbp->last_replaced) {
/*
* if all entries are locked, replace the final TLB entry
*/
i = tlbp->nentries - 1;
EXEC_WARNING(("all TLB entries are locked, the final TLB entry %d is replaced",i));
tep = &(tlbp->tlb_entryp[i]);
break;
}
} while (tep->flags & SS_TLB_FLAG_LOCKED);
tlbp->last_replaced = i;
#else
#error Need to define TLB replacement alg
#endif /* } */
need_flush = true;
/* put back on the free list */
ss_tlb_unhash(tlbp, tep);
ss_free_tlb_entry( tlbp, tep );
tep = tlbp->freep;
}
/* free entry must be invalid ! */
ASSERT(!(tep->data&(1ull<<SUN4U_TTED_V_BIT)));
tlbp->freep = tep->nextp;
}
/* create the new entry */
tep->is_real = is_real;
tep->match_context = match_context;
tep->partid = partid;
tep->match_shift = shift;
tep->tag_pfn = tag;
tep->tag_context = tag_context;
tep->data = data;
/* Note: variable size mask again based on page size */
tep->pa_offset = (data & MASK64(39,shift)) - tag;
DBGMMU( lprintf(sp->gid, "tlb_insert: %c-TLB: tte=%llx [ sz=0x%x l=%d cp=%d cv=%d e=%d p=%d w=%d ]\n",
mmup->is_immu ? 'I' : 'D', data,
size,
(uint_t)((data>>6)&1LL), (uint_t)((data>>5)&1LL),
(uint_t)((data>>4)&1LL), (uint_t)((data>>3)&1LL),
(uint_t)((data>>2)&1LL), (uint_t)((data>>1)&1LL)
);
lprintf(sp->gid, "\tpart=0x%x tag=%p ctx=%x/%x offset=%llx\n",
partid, tag, tag_context, match_context, tep->pa_offset); );
/* niagara doesn't have read and exec bits */
if (mmup->is_immu)
tep->flags = SS_TLB_FLAG_EXEC;
else
tep->flags = SS_TLB_FLAG_READ;
if ( ((data>>1)&1) ) tep->flags |= SS_TLB_FLAG_WRITE;
if ( ((data>>2)&1) ) tep->flags |= SS_TLB_FLAG_PRIV;
if ( ((data>>6)&1) ) tep->flags |= SS_TLB_FLAG_LOCKED;
if ( (data&BIT(39)) == 0 ) tep->flags |= SS_TLB_FLAG_CP;
if ( ((data>>3)&1) ) tep->flags |= SS_TLB_FLAG_E;
if ( ((data>>60)&1) ) tep->flags |= SS_TLB_FLAG_NFO;
if ( ((data>>59)&1) ) tep->flags |= SS_TLB_FLAG_IE;
/* Finally insert the new entry into the hash table for the TLB */
/* Hash uses match_context so it skews real->phys entries away from context 0 */
i = tag >> SS_MAX_PAGE_SIZE_BITS;
i += match_context + partid;
i &= SS_TLB_HASH_MASK;
if (((data >> SUN4U_TTED_V_BIT)&1) != 0) {
tep->hashidx = i; /* to help with unhooking later */
tep->nextp = tlbp->hash[i].ptr;
tlbp->hash[i].ptr = tep;
} else {
tep->nextp = tlbp->freep;
tlbp->freep = tep;
}
RW_unlock(&tlbp->rwlock);
if (need_flush) {
if (mmup->is_immu)
sp->xicache_trans_flush_pending = true;
else
sp->xdcache_trans_flush_pending = true;
if (tlbp->shares > 1) {
ss_tlb_flush_shares(sp, tlbp, mmup->is_immu);
}
}
return true;
}
/*
* Dumb function to shuffle the sun4v TTE format into the sun4u
* one used internally by Niagara.
*/
#define SHIFT_FIELD(_data, _hi,_lo,_new) (((((uint64_t)(_data))&MASK64(_hi,_lo))>>(_lo))<<(_new))
uint64_t niagara_shuffle_sun4v_format(uint64_t data)
{
uint64_t val;
val = data & MASK64(63,63); /* valid bit */
val |= SHIFT_FIELD(data, 62,62, 60); /* NFO */
val |= SHIFT_FIELD(data, 61,61, 6); /* locked */
val |= SHIFT_FIELD(data, 39,13, 13); /* pa */
val |= SHIFT_FIELD(data, 12,12, 59); /* invert endianness */
val |= SHIFT_FIELD(data, 11,11, 3); /* side effect */
val |= SHIFT_FIELD(data, 10, 9, 4); /* cacheable bits */
val |= SHIFT_FIELD(data, 8, 8, 2); /* privileged */
val |= SHIFT_FIELD(data, 6, 6, 1); /* writeable */
val |= SHIFT_FIELD(data, 2, 2, 48); /* size[2] */
val |= SHIFT_FIELD(data, 1, 0, 61); /* size[1:0] */
return val;
}
/*
* Pseudo devices
*/
#ifndef NDEBUG /* { */
char * ss_ssi_reg_name(uint_t reg)
{
char * s;
switch (reg) {
case NI_SSI_TIMEOUT: s="ssi_timeout"; break;
case NI_SSI_LOG: s="ssi_log"; break;
default: s="Illegal ssi register"; break;
}
return s;
}
char * ss_jbi_reg_name(uint_t reg)
{
char * s;
switch (reg) {
case NI_JBI_CONFIG1: s="jbi_config1"; break;
case NI_JBI_CONFIG2: s="jbi_config2"; break;
case NI_JBI_INT_MRGN: s="jbi_int_mrgn"; break;
case NI_JBI_DEBUG: s="jbi_debug"; break;
case NI_JBI_DEBUG_ARB: s="jbi_debug_arb"; break;
case NI_JBI_PERF_CTL: s="jbi_perf_ctl"; break;
case NI_JBI_PERF_CNT: s="jbi_perf_cnt"; break;
case NI_JBI_ERR_INJECT: s="jbi_err_inject"; break;
case NI_JBI_ERR_CONFIG: s="jbi_err_config"; break;
case NI_JBI_ERROR_LOG: s="jbi_error_log"; break;
case NI_JBI_ERROR_OVF: s="jbi_error_ovf"; break;
case NI_JBI_LOG_ENB: s="jbi_log_enb"; break;
case NI_JBI_SIG_ENB: s="jbi_sig_enb"; break;
case NI_JBI_LOG_ADDR: s="jbi_log_addr"; break;
case NI_JBI_LOG_CTRL: s="jbi_log_ctrl"; break;
case NI_JBI_LOG_DATA0: s="jbi_log_data0"; break;
case NI_JBI_LOG_DATA1: s="jbi_log_data1"; break;
case NI_JBI_LOG_PAR: s="jbi_log_par"; break;
case NI_JBI_LOG_NACK: s="jbi_log_nack"; break;
case NI_JBI_LOG_ARB: s="jbi_log_arb"; break;
case NI_JBI_L2_TIMEOUT: s="jbi_l2_timeout"; break;
case NI_JBI_ARB_TIMEOUT: s="jbi_arb_timeout"; break;
case NI_JBI_TRANS_TIMEOUT: s="jbi_trans_timeout"; break;
case NI_JBI_INTR_TIMEOUT: s="jbi_intr_timeout"; break;
case NI_JBI_MEMSIZE: s="jbi_memsize"; break;
default: s="Illegal jbi register"; break;
}
return s;
}
char * ss_jbus_reg_name(uint_t reg)
{
char * s;
switch (reg) {
case NI_J_INT_DATA0: s="j_int_data0"; break;
case NI_J_INT_DATA1: s="j_int_data1"; break;
case NI_J_INT_ADATA0: s="j_int_adata0"; break;
case NI_J_INT_ADATA1: s="j_int_adata1"; break;
case NI_J_INT_BUSY: s="j_int_busy"; break;
case NI_J_INT_ABUSY: s="j_int_abusy"; break;
default: s="Illegal jbus register"; break;
}
return s;
}
char * ss_iob_reg_name(uint_t reg)
{
char * s;
switch (reg) {
case NI_INT_MAN0: s="int_man0"; break;
case NI_INT_MAN1: s="int_man1"; break;
case NI_INT_MAN2: s="int_man2"; break;
case NI_INT_MAN3: s="int_man3"; break;
case NI_INT_CTL0: s="int_ctl0"; break;
case NI_INT_CTL1: s="int_ctl1"; break;
case NI_INT_CTL2: s="int_ctl2"; break;
case NI_INT_CTL3: s="int_ctl3"; break;
case NI_INT_VEC_DIS: s="int_vec_dis"; break;
case NI_J_INT_VEC: s="j_int_vec"; break;
case NI_RSET_STAT: s="rset_stat"; break;
case NI_TM_STAT_CTL: s="tm_stat_ctl"; break;
case NI_PROC_SER_NUM: s="proc_ser_num"; break;
case NI_CORE_AVAIL: s="core_avail"; break;
case NI_IOB_FUSE: s="iob_fuse"; break;
case NI_INT_MRGN_REG: s="int_mrgn_reg"; break;
case NI_L2_VIS_CONTROL: s="l2_vis_control"; break;
case NI_L2_VIS_MASK_A: s="l2_vis_mask_a"; break;
case NI_L2_VIS_MASK_B: s="l2_vis_mask_b"; break;
case NI_L2_VIS_COMPARE_A: s="l2_vis_compare_a"; break;
case NI_L2_VIS_COMPARE_B: s="l2_vis_compare_b"; break;
case NI_L2_TRIG_DELAY: s="l2_trig_delay"; break;
case NI_IOB_VIS_SELECT: s="iob_vis_select"; break;
case NI_DB_ENET_CONTROL: s="db_enet_control"; break;
case NI_DB_ENET_IDLEVAL: s="db_enet_idleval"; break;
case NI_DB_JBUS_CONTROL: s="db_jbus_control"; break;
case NI_DB_JBUS_MASK0: s="db_jbus_mask0"; break;
case NI_DB_JBUS_MASK1: s="db_jbus_mask1"; break;
case NI_DB_JBUS_MASK2: s="db_jbus_mask2"; break;
case NI_DB_JBUS_MASK3: s="db_jbus_mask3"; break;
case NI_DB_JBUS_COMPARE0: s="db_jbus_compare0"; break;
case NI_DB_JBUS_COMPARE1: s="db_jbus_compare1"; break;
case NI_DB_JBUS_COMPARE2: s="db_jbus_compare2"; break;
case NI_DB_JBUS_COMPARE3: s="db_jbus_compare3"; break;
case NI_DB_JBUS_COUNT: s="db_jbus_count"; break;
default: s="Illegal clock register"; break;
}
return s;
}
char * ss_clock_reg_name(uint_t reg)
{
char * s;
switch (reg) {
case SS_CLOCK_DIVIDER: s="divider"; break;
case SS_CLOCK_CONTROL: s="control"; break;
case SS_CLOCK_DLL_CONTROL: s="dll_control"; break;
case SS_CLOCK_JBUS_SYNC: s="jbus_sync"; break;
case SS_CLOCK_DLL_BYPASS: s="dll_bypass"; break;
case SS_CLOCK_DRAM_SYNC: s="dram_sync"; break;
case SS_CLOCK_VERSION: s="version"; break;
default: s="Illegal clock register"; break;
}
return s;
}
char * ss_l2_ctrl_reg_name(uint_t reg)
{
char * s;
switch (reg) {
case SS_L2_DIAG_DATA: s="diag_data"; break;
case SS_L2_DIAG_TAG: s="diag_tag"; break;
case SS_L2_DIAG_VUAD: s="diag_vuad"; break;
case SS_L2_CONTROL: s="control"; break;
case SS_L2_ERROR_ENABLE: s="error_enable"; break;
case SS_L2_ERROR_STATUS: s="error_status"; break;
case SS_L2_ERROR_ADDRESS: s="error_address"; break;
case SS_L2_ERROR_INJECT: s="error_inject"; break;
default: s="Illegal L2 control register"; break;
}
return s;
}
char * ss_dram_ctrl_reg_name(uint_t reg)
{
char * s;
switch (reg) {
case SS_DRAM_CAS_ADDR_WIDTH: s="cas_addr_width"; break;
case SS_DRAM_CAS_LAT: s="cas_lat"; break;
case SS_DRAM_CHANNEL_DISABLED: s="channel_disabled"; break;
case SS_DRAM_DBG_TRG_EN: s="dbg_trg_en"; break;
case SS_DRAM_DIMM_INIT: s="dimm_init"; break;
case SS_DRAM_DIMM_PRESENT: s="dimm_present"; break;
case SS_DRAM_DIMM_STACK: s="dimm_stack"; break;
case SS_DRAM_DRAM_TRCD: s="dram_trcd"; break;
case SS_DRAM_ERROR_ADDRESS: s="error_address"; break;
case SS_DRAM_ERROR_COUNTER: s="error_counter"; break;
case SS_DRAM_ERROR_INJECT: s="error_inject"; break;
case SS_DRAM_ERROR_LOCATION: s="error_location"; break;
case SS_DRAM_ERROR_STATUS: s="error_status"; break;
case SS_DRAM_EXT_WR_MODE1: s="ext_wr_mode1"; break;
case SS_DRAM_EXT_WR_MODE2: s="ext_wr_mode2"; break;
case SS_DRAM_EXT_WR_MODE3: s="ext_wr_mode3"; break;
case SS_DRAM_FAILOVER_MASK: s="failover_mask"; break;
case SS_DRAM_FAILOVER_STATUS: s="failover_status"; break;
case SS_DRAM_HW_DMUX_CLK_INV: s="hw_dmux_clk_inv"; break;
case SS_DRAM_INIT_STATUS: s="init_status"; break;
case SS_DRAM_MODE_WRITE_STATUS: s="mode_write_status"; break;
case SS_DRAM_OPEN_BANK_MAX: s="open_bank_max"; break;
case SS_DRAM_PAD_EN_CLK_INV: s="pad_en_clk_inv"; break;
case SS_DRAM_PERF_COUNT: s="perf_count"; break;
case SS_DRAM_PERF_CTL: s="perf_ctl"; break;
case SS_DRAM_PRECHARGE_WAIT: s="precharge_wait"; break;
case SS_DRAM_PROG_TIME_CNTR: s="prog_time_cntr"; break;
case SS_DRAM_RANK1_PRESENT: s="rank1_present"; break;
case SS_DRAM_RAS_ADDR_WIDTH: s="ras_addr_width"; break;
case SS_DRAM_REFRESH_COUNTER: s="refresh_counter"; break;
case SS_DRAM_REFRESH_FREQ: s="refresh_freq"; break;
case SS_DRAM_SCRUB_ENABLE: s="scrub_enable"; break;
case SS_DRAM_SCRUB_FREQ: s="scrub_freq"; break;
case SS_DRAM_SEL_LO_ADDR_BITS: s="sel_lo_addr_bits"; break;
case SS_DRAM_SW_DV_COUNT: s="sw_dv_count"; break;
case SS_DRAM_TIWTR: s="tiwtr"; break;
case SS_DRAM_TMRD: s="tmrd"; break;
case SS_DRAM_TRAS: s="tras"; break;
case SS_DRAM_TRC: s="trc"; break;
case SS_DRAM_TRFC: s="trfc"; break;
case SS_DRAM_TRP: s="trp"; break;
case SS_DRAM_TRRD: s="trrd"; break;
case SS_DRAM_TRTP: s="trtp"; break;
case SS_DRAM_TRTW: s="trtw"; break;
case SS_DRAM_TWR: s="twr"; break;
case SS_DRAM_TWTR: s="twtr"; break;
case SS_DRAM_WAIR_CONTROL: s="wair_control"; break;
default: s="Illegal DRAM control register"; break;
}
return s;
}
#endif /* } */
static void ss_ssi_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
npp = (ss_proc_t *)config_devp->devp;
npp->ssip->timeout = 0;
npp->ssip->log = 0;
}
static void ss_jbi_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
ss_jbi_t * jbip;
npp = (ss_proc_t *)config_devp->devp;
jbip = npp->jbip;
jbip->config1 = JBI_PORT_LOCN(0x7f) | JBI_PORT_PRES(0x3) | JBI_MID(0x3e);
jbip->config2 = JBI_IQ_HIGH(0x7);
jbip->int_mrgn = 0x1515;
jbip->debug = 0x0;
jbip->debug_arb = 0x0;
jbip->perf_ctl = 0x0;
jbip->perf_cnt = 0x0;
jbip->err_inject = 0x0;
jbip->err_config = 0x0;
jbip->error_log = 0x0;
jbip->error_ovf = 0x0;
jbip->log_enb = 0x0;
jbip->sig_enb = 0x0;
jbip->log_addr = 0x0;
jbip->log_ctrl = 0x0;
jbip->log_data0 = 0x0;
jbip->log_data1 = 0x0;
jbip->log_par = 0x0;
jbip->log_nack = 0x0;
jbip->log_arb = 0x0;
jbip->l2_timeout = 0x0;
jbip->arb_timeout = 0x0;
jbip->trans_timeout = 0x0;
jbip->intr_timeout = 0x0;
jbip->memsize = 0x0;
}
static void ss_jbus_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
ss_jbus_t * jbusp;
uint_t i;
npp = (ss_proc_t *)config_devp->devp;
jbusp = npp->jbusp;
for (i = 0; i < IOB_JBUS_TARGETS; i++) {
jbusp->j_int_data0[i] = 0x00;
jbusp->j_int_data1[i] = 0x00;
jbusp->j_int_busy[i] = 0x00;
}
pthread_mutex_init(&jbusp->lock, NULL);
}
static void ss_iob_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
ss_iob_t * iobp;
uint64_t avail, cores, device;
npp = (ss_proc_t *)config_devp->devp;
iobp = npp->iobp;
pthread_mutex_init(&iobp->iob_lock, NULL);
/* IOB Interrupt Registers section 7.3 of PRM 1.2 */
for (device=0; device<IOB_DEV_MAX; device++) {
iobp->int_man[device] = 0x0000;
iobp->int_ctl[device] = IOB_INT_CTL_MASK;
}
iobp->int_vec_dis = 0x0000;
iobp->j_int_vec = 0x0000;
pthread_mutex_init(&iobp->int_vec_lock, NULL); /* FIXME: to go away ! */
/* Reset Status Register section 11.2 of PRM 1.2 */
iobp->rset_stat = 0x0004; /* POR bit */
/* CPU throttle control section 16.1 of PRM 1.2 */
iobp->tm_stat_ctl = 0x0000;
/* EFUSE Registers section 18.8 of PRM 1.2 */
iobp->proc_ser_num = 0x0000;
iobp->iob_fuse = 0x0000;
/* Internal Margin Register section 19.1.3 of PRM 1.2 */
iobp->int_mrgn_reg = 0x0000;
/* IOB Visibility Port Support section 19.2 of PRM 1.2 */
iobp->l2_vis_control = 0x0000;
iobp->l2_vis_mask_a = 0x0000;
iobp->l2_vis_mask_b = 0x0000;
iobp->l2_vis_compare_a = 0x0000;
iobp->l2_vis_compare_b = 0x0000;
iobp->l2_trig_delay = 0x0000;
iobp->iob_vis_select = 0x0000;
iobp->db_enet_control = 0x0000;
iobp->db_enet_idleval = 0x0000;
iobp->db_jbus_control = 0x0000;
iobp->db_jbus_mask0 = 0x0000;
iobp->db_jbus_mask1 = 0x0000;
iobp->db_jbus_mask2 = 0x0000;
iobp->db_jbus_mask3 = 0x0000;
iobp->db_jbus_compare0 = 0x0000;
iobp->db_jbus_compare1 = 0x0000;
iobp->db_jbus_compare2 = 0x0000;
iobp->db_jbus_compare3 = 0x0000;
iobp->db_jbus_count = 0x0000;
}
static void ss_clock_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
ss_clock_t * clockp;
npp = (ss_proc_t *)config_devp->devp;
clockp = npp->clockp;
/* Clock Unit section 11.1 of PRM 1.2 */
clockp->divider = 0x80200200101004;
clockp->control = 0x0000;
clockp->dll_control = 0x0000;
clockp->dll_bypass = 0x0000;
clockp->jbus_sync = 0x0000;
clockp->dram_sync = 0x0000;
clockp->version = 0x0000;
}
static void ss_l2_ctl_init(config_dev_t * config_devp)
{
uint_t bank, idx;
ss_proc_t * npp;
ss_l2_cache_t * l2p;
npp = (ss_proc_t *)config_devp->devp;
l2p = npp->l2p;
for (bank=0; bank<npp->num_l2banks; bank++) {
l2p->control[bank] = L2_DIS;
l2p->bist_ctl[bank] = 0x0;
l2p->error_enable[bank] = 0x0;
l2p->error_status[bank] = 0x0;
l2p->error_address[bank] = 0x0;
l2p->error_inject[bank] = 0x0;
}
l2p->diag_datap = Xmalloc(L2_DATA_SIZE);
l2p->diag_tagp = Xmalloc(L2_TAG_SIZE);
l2p->diag_vuadp = Xmalloc(L2_VUAD_SIZE);
for (idx=0; idx<L2_DATA_SIZE/8; idx++) {
l2p->diag_datap[idx] = 0xdeadbeef;
}
for (idx=0; idx<L2_TAG_SIZE/8; idx++) {
l2p->diag_tagp[idx] = 0xdeadbeef;
}
for (idx=0; idx<L2_VUAD_SIZE/8; idx++) {
l2p->diag_vuadp[idx] = 0xdeadbeef;
}
}
static void ss_dram_ctl_init(config_dev_t * config_devp)
{
uint_t bidx;
ss_proc_t * npp;
ss_dram_bank_t * dbp;
npp = (ss_proc_t *)config_devp->devp;
for (bidx=0; bidx<npp->num_mbanks; bidx++) {
/* DRAM controller section 15.5 of PRM 1.2 */
dbp = &(npp->mbankp[bidx]);
dbp->cas_addr_width = 0xb ;
dbp->ras_addr_width = 0xf ;
dbp->cas_lat = 0x3 ;
dbp->scrub_freq = 0xfff ;
dbp->refresh_freq = 0x514 ;
dbp->refresh_counter = 0x0 ;
dbp->scrub_enable = 0x0 ;
dbp->trrd = 0x2 ;
dbp->trc = 0xc ;
dbp->dram_trcd = 0x3 ;
dbp->twtr = 0x0 ;
dbp->trtw = 0x0 ;
dbp->trtp = 0x2 ;
dbp->tras = 0x9 ;
dbp->trp = 0x3 ;
dbp->twr = 0x3 ;
dbp->trfc = 0x27 ;
dbp->tmrd = 0x2 ;
dbp->tiwtr = 0x2 ;
dbp->precharge_wait = 0x55 ;
dbp->dimm_stack = 0x0 ;
dbp->ext_wr_mode2 = 0x0 ;
dbp->ext_wr_mode1 = 0x400 ;
dbp->ext_wr_mode3 = 0x0 ;
dbp->wair_control = 0x1 ;
dbp->rank1_present = 0x0 ;
dbp->channel_disabled = 0x0 ;
dbp->sel_lo_addr_bits = 0x0 ;
dbp->dimm_init = 0x0 ;
dbp->sw_dv_count = 0x1 ;
dbp->hw_dmux_clk_inv = 0x0 ;
dbp->pad_en_clk_inv = 0x3<<2 ;
dbp->mode_write_status = 0x0 ;
dbp->init_status = 0x0 ;
dbp->dimm_present = 0x3 ;
dbp->failover_status = 0x0 ;
dbp->failover_mask = 0x0 ;
/* Performance counter section 10.3 of PRM 1.1 */
dbp->perf_ctl = 0x0 ;
dbp->perf_count = 0x0 ;
/* Error handling section 12.9 of PRM 1.1 */
dbp->error_status = 0x0 ; /* FIXME: only bits 56-16 reset on POR .. everything else to be preserved */
dbp->error_address = 0x0 ; /* FIXME: bits 39-4 to be preserved accross POR */
dbp->error_inject = 0x0 ;
dbp->error_counter = 0x0 ; /* FIXME: bits 17-0 preserved accross reset */
dbp->error_location = 0x0 ; /* FIXME: bits 35-0 preserved accross reset */
/* Power management section 16.2 of PRM 1.1 */
dbp->open_bank_max = 0x1ffff ;
dbp->prog_time_cntr = 0xffff ;
dbp->dbg_trg_en = (0x1<<7) | (0x1) ; /* Hardware debug section 19.1 of PRM 1.1 */
}
}
static bool_t ss_ssi_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
uint_t reg;
uint64_t val;
ss_proc_t *npp;
ss_ssi_t * ssip;
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
ssip = npp->ssip;
reg = off & 0x1ffff;
switch (op) {
case MA_st64:
val = *regp;
switch (reg) {
case NI_SSI_TIMEOUT:
if (0LL != (val & ~(MASK64(24,0)))) goto write_reserved;
ssip->timeout &= ~(MASK64(24,0));
ssip->timeout |= val;
break;
case NI_SSI_LOG:
if (0LL != (val & ~(MASK64(1,0)))) goto write_reserved;
ssip->timeout &= ~val;
break;
default:
/* illegal reg - an error */
return false;
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in ssi:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ss_ssi_reg_name(reg), reg ) );
return false;
case MA_ldu64:
switch (reg) {
case NI_SSI_TIMEOUT:
val = ssip->timeout & MASK64(24,0);
break;
case NI_SSI_LOG:
val = ssip->log & MASK64(1,0);
break;
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t ss_jbi_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
uint_t reg;
uint64_t val;
ss_proc_t *npp;
ss_jbi_t * jbip;
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
jbip = npp->jbip;
if (npp->rust_jbi_stores &&
op == MA_st64 && (off & 0x7000000) == 0x7000000)
return true;
reg = off & 0xfffff;
switch (op) {
case MA_st64:
val = *regp;
/* FIXME!! ignore write to reserved bits for BRINGUP ONLY */
#define ASSIGN_JBI(_n, _m) do { \
jbip->_n &= ~(_m); \
jbip->_n |= (val & (_m)); \
} while (0)
#define ASSIGN_W1C_JBI(_n, _m) do { \
jbip->_n &= (~val | ~(_m)); \
} while (0)
switch (reg) {
/* JBUS Interface section 14.1 of PRM 1.4 */
case NI_JBI_CONFIG1:
ASSIGN_JBI( config1, MASK64(50,44)|MASK64(39,38)|
MASK64(31,22)|MASK64(1,0) );
break;
case NI_JBI_CONFIG2:
ASSIGN_JBI( config2, MASK64(30,28)|MASK64(26,24)|
MASK64(21,20)|MASK64(17,8)|MASK64(3,0) );
break;
case NI_JBI_INT_MRGN:
ASSIGN_JBI( int_mrgn, MASK64(12,8)|MASK64(4,0) );
break;
case NI_JBI_DEBUG:
ASSIGN_JBI( debug, MASK64(0,0) );
break;
case NI_JBI_DEBUG_ARB:
ASSIGN_JBI( debug_arb, MASK64(24,24)|MASK64(22,18)|
MASK64(16,0) );
break;
case NI_JBI_PERF_CTL:
ASSIGN_JBI( perf_ctl, MASK64(7,0) );
break;
case NI_JBI_PERF_CNT:
ASSIGN_JBI( perf_cnt, MASK64(63,0) );
break;
case NI_JBI_ERR_INJECT:
ASSIGN_JBI( err_inject, MASK64(30,0) );
break;
case NI_JBI_ERR_CONFIG:
ASSIGN_JBI( err_config, MASK64(4,2) );
break;
case NI_JBI_ERROR_LOG:
ASSIGN_W1C_JBI( error_log, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_ERROR_OVF:
ASSIGN_W1C_JBI( error_ovf, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_LOG_ENB:
ASSIGN_JBI( log_enb, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_SIG_ENB:
ASSIGN_JBI( sig_enb, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_LOG_ADDR:
case NI_JBI_LOG_CTRL:
case NI_JBI_LOG_DATA0:
case NI_JBI_LOG_DATA1:
case NI_JBI_LOG_PAR:
case NI_JBI_LOG_ARB:
goto write_reserved;
case NI_JBI_LOG_NACK:
ASSIGN_W1C_JBI( log_nack, MASK64(31,0) );
break;
case NI_JBI_L2_TIMEOUT:
ASSIGN_JBI( l2_timeout, MASK64(31,0) );
break;
case NI_JBI_ARB_TIMEOUT:
ASSIGN_JBI( arb_timeout, MASK64(31,0) );
break;
case NI_JBI_TRANS_TIMEOUT:
ASSIGN_JBI( trans_timeout, MASK64(31,0) );
break;
case NI_JBI_INTR_TIMEOUT:
ASSIGN_JBI( intr_timeout, MASK64(31,0) );
break;
case NI_JBI_MEMSIZE:
ASSIGN_JBI( memsize, MASK64(37,30) );
break;
default:
/* illegal reg - an error */
return false;
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in jbi:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ss_jbi_reg_name(reg), reg ) );
return false;
case MA_ldu64:
#define RETRIEVE_JBI(_n, _m) do { val = ((jbip->_n) & (_m)); } while (0)
switch (reg) {
/* JBUS Interface section 14.1 of PRM 1.4 */
case NI_JBI_CONFIG1:
RETRIEVE_JBI( config1, MASK64(63,0) );
break;
case NI_JBI_CONFIG2:
RETRIEVE_JBI( config2, MASK64(63,0) );
break;
case NI_JBI_INT_MRGN:
RETRIEVE_JBI( int_mrgn, MASK64(12,8)|MASK64(4,0) );
break;
case NI_JBI_DEBUG:
RETRIEVE_JBI( debug, MASK64(63,0) );
break;
case NI_JBI_DEBUG_ARB:
RETRIEVE_JBI( debug_arb, MASK64(63,0) );
case NI_JBI_PERF_CTL:
RETRIEVE_JBI( perf_ctl, MASK64(7,0) );
break;
case NI_JBI_PERF_CNT:
RETRIEVE_JBI( perf_cnt, MASK64(63,0) );
break;
case NI_JBI_ERR_INJECT:
RETRIEVE_JBI( err_inject, MASK64(30,0) );
break;
/* JBI Error Registers section 12.12.2 of PRM 1.4 */
case NI_JBI_ERR_CONFIG:
RETRIEVE_JBI( err_config, MASK64(4,2) );
break;
case NI_JBI_ERROR_LOG:
RETRIEVE_JBI( error_log, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_ERROR_OVF:
RETRIEVE_JBI( error_ovf, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_LOG_ENB:
RETRIEVE_JBI( log_enb, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_SIG_ENB:
RETRIEVE_JBI( sig_enb, MASK64(28,24)|MASK64(17,8)|
MASK64(5,4)|MASK64(2,0) );
break;
case NI_JBI_LOG_ADDR:
RETRIEVE_JBI( log_addr, MASK64(63,0) );
break;
case NI_JBI_LOG_CTRL:
RETRIEVE_JBI( log_ctrl, MASK64(63,0) );
break;
case NI_JBI_LOG_DATA0:
RETRIEVE_JBI( log_data0, MASK64(63,0) );
break;
case NI_JBI_LOG_DATA1:
RETRIEVE_JBI( log_data1, MASK64(63,0) );
break;
case NI_JBI_LOG_PAR:
RETRIEVE_JBI( log_par, MASK64(32,32)|MASK64(25,20)|
MASK64(13,8)|MASK64(6,0) );
break;
case NI_JBI_LOG_NACK:
RETRIEVE_JBI( log_nack, MASK64(31,0) );
break;
case NI_JBI_LOG_ARB:
RETRIEVE_JBI( log_arb, MASK64(34,32)|MASK64(26,24)|
MASK64(22,16)|MASK64(14,8)|MASK64(6,0) );
break;
case NI_JBI_L2_TIMEOUT:
RETRIEVE_JBI( l2_timeout, MASK64(31,0) );
break;
case NI_JBI_ARB_TIMEOUT:
RETRIEVE_JBI( arb_timeout, MASK64(31,0) );
break;
case NI_JBI_TRANS_TIMEOUT:
RETRIEVE_JBI( trans_timeout, MASK64(31,0) );
break;
case NI_JBI_INTR_TIMEOUT:
RETRIEVE_JBI( intr_timeout, MASK64(31,0) );
break;
case NI_JBI_MEMSIZE:
RETRIEVE_JBI( memsize, MASK64(37,30) );
break;
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t ss_jbus_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
uint_t reg, target;
uint64_t val;
ss_proc_t *npp;
ss_strand_t *nsp;
ss_jbus_t *jbusp;
sparcv9_cpu_t *v9p;
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
nsp = v9p->impl_specificp;
v9p = sp->specificp;
jbusp = npp->jbusp;
reg = off & 0xffff;
switch (op) {
case MA_st64:
val = *regp;
#define ASSIGN_JBUS(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
jbusp->_n = val; \
} while (0)
if (reg >= NI_J_INT_BUSY) {
reg = reg & 0xf00; /* for debug output */
if (reg < NI_J_INT_ABUSY) {
ASSERT(reg == 0x900);
target = (off >> 3) & (IOB_JBUS_TARGETS-1);
ASSIGN_JBUS( j_int_busy[target], MASK64(5,5) );
} else {
/* aliased to target thread's register */
ASSERT(reg == 0xb00);
target = nsp->vcore_id;
ASSIGN_JBUS( j_int_busy[target], MASK64(5,5) );
}
return true;
}
switch (reg) {
case NI_J_INT_DATA0:
case NI_J_INT_DATA1:
case NI_J_INT_ADATA0:
case NI_J_INT_ADATA1:
goto write_reserved;
default:
return false; /* illegal reg - an error */
}
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in JBUS:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ss_jbus_reg_name(reg), reg ) );
return false;
case MA_ldu64:
reg = reg & 0xf00; /* for debug output */
switch (reg) {
case NI_J_INT_DATA0:
target = (off >> 3) & (IOB_JBUS_TARGETS-1);
val = jbusp->j_int_data0[target];
break;
case NI_J_INT_DATA1:
target = (off >> 3) & (IOB_JBUS_TARGETS-1);
val = jbusp->j_int_data1[target];
break;
case NI_J_INT_ADATA0:
target = nsp->vcore_id;
val = jbusp->j_int_data0[target];
break;
case NI_J_INT_ADATA1:
target = nsp->vcore_id;
val = jbusp->j_int_data1[target];
break;
case NI_J_INT_BUSY:
target = (off >> 3) & (IOB_JBUS_TARGETS-1);
val = jbusp->j_int_busy[target];
break;
case NI_J_INT_ABUSY:
target = nsp->vcore_id;
val = jbusp->j_int_busy[target];
break;
default:
return false; /* illegal reg - an error */
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t ss_iob_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
uint_t reg;
uint64_t val;
ss_proc_t *npp;
ss_iob_t * iobp;
uint_t device;
/*
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
* with the Niagara team.
*/
if (MA_ldu64!=op && MA_st64!=op) return false;
if (off & 7) return false; /* FIXME: 64bit access support only for the moment */
npp = (ss_proc_t *)config_addrp->config_devp->devp;
iobp = npp->iobp;
device = off >> 3;
reg = off & 0xffff;
pthread_mutex_lock( &iobp->iob_lock );
switch (op) {
case MA_st64:
val = *regp;
#define ASSIGN_IOB(_n, _m) do { \
iobp->_n = (val & (_m)); \
if (0LL != (val & ~(_m))) goto write_reserved; \
} while (0)
DBGSSI( lprintf(sp->gid, "store to iob reg: 0x%x (%s) value 0x%LLx\n", reg, ss_iob_reg_name(reg), val););
/* IOB Interrupt Registers section 7.3 of PRM 1.2 */
switch (reg) {
case NI_INT_MAN0: /* internal */
case NI_INT_MAN1: /* errors */
case NI_INT_MAN2: /* SSI */
case NI_INT_MAN3: /* reserved */
ASSIGN_IOB( int_man[device], MASK64(12,8)|MASK64(5,0) );
break;
case NI_INT_CTL0: /* internal */
case NI_INT_CTL1: /* errors */
case NI_INT_CTL2: /* SSI */
case NI_INT_CTL3: /* reserved */
device = (off >> 3) & (IOB_DEV_MAX-1);
if (0LL != (val & ~(MASK64(2,1)))) goto write_reserved;
do {
uint8_t *int_ctl;
int_ctl = &iobp->int_ctl[device];
*int_ctl = (val & IOB_INT_CTL_MASK) | (*int_ctl & ~IOB_INT_CTL_MASK);
if (val & IOB_INT_CTL_CLEAR) {
*int_ctl &= ~IOB_INT_CTL_PEND;
}
/*
* OK PRM 1.4 S 7.2.4 indicates that if mask is cleared, and pending
* is still set then an interrupt is delivered ... i.e. int_vec is set (again).
*/
if (((*int_ctl) & IOB_INT_CTL_PEND) && !((*int_ctl) & IOB_INT_CTL_MASK)) {
*int_ctl &= ~IOB_INT_CTL_PEND;
if (device == IOB_DEV_SSI) {
pthread_mutex_unlock( &iobp->iob_lock );
npp->config_procp->proc_typep->ext_signal(npp->config_procp, ES_SSI, NULL);
return true;
}
}
} while (0);
break;
case NI_INT_VEC_DIS:
if (IOB_INT_VEC_RESUME(val)) {
if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
goto write_reserved;
pthread_mutex_lock(&iobp->int_vec_lock);
iobp->int_vec_dis = val;
npp->config_procp->proc_typep->ext_signal(
npp->config_procp, ES_RESUME, NULL);
pthread_mutex_unlock(&iobp->int_vec_lock);
} else if (IOB_INT_VEC_IDLE(val)) {
simcpu_t *sp;
if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
goto write_reserved;
pthread_mutex_lock(&iobp->int_vec_lock);
iobp->int_vec_dis = val;
npp->config_procp->proc_typep->ext_signal(
npp->config_procp, ES_IDLE, NULL);
pthread_mutex_unlock(&iobp->int_vec_lock);
} else
if (IOB_INT_VEC_RESET(val)) {
uint_t tidx;
if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
goto write_reserved;
pthread_mutex_lock(&iobp->int_vec_lock);
iobp->int_vec_dis = val;
npp->config_procp->proc_typep->ext_signal(
npp->config_procp, ES_RESET, NULL);
pthread_mutex_unlock(&iobp->int_vec_lock);
} else
if (IOB_INT_VEC_INTR(val)) {
uint_t tidx;
if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0))))
goto write_reserved;
niagara_send_xirq(sp, val);
}
break;
case NI_J_INT_VEC:
ASSIGN_IOB( j_int_vec, MASK64(5,0) );
break;
case NI_RSET_STAT:
ASSIGN_IOB( rset_stat, MASK64(3,1) );
break;
case NI_TM_STAT_CTL:
ASSIGN_IOB( tm_stat_ctl,MASK64(63,63)|MASK64(31,0) );
break;
case NI_PROC_SER_NUM:
case NI_CORE_AVAIL:
case NI_IOB_FUSE:
EXEC_WARNING( ("Attempted write to read only register in IOB:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ss_iob_reg_name(reg), reg ) );
goto access_failed; /* RO regs */
case NI_INT_MRGN_REG:
ASSIGN_IOB( int_mrgn_reg, MASK64(4,0) );
break;
case NI_L2_VIS_CONTROL:
ASSIGN_IOB( l2_vis_control, MASK64(3,2) );
break;
case NI_L2_VIS_MASK_A:
ASSIGN_IOB( l2_vis_mask_a, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_VIS_MASK_B:
ASSIGN_IOB( l2_vis_mask_b, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_VIS_COMPARE_A:
ASSIGN_IOB( l2_vis_compare_a, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_VIS_COMPARE_B:
ASSIGN_IOB( l2_vis_compare_b, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_TRIG_DELAY:
ASSIGN_IOB( l2_trig_delay, MASK64(31,0) );
break;
case NI_IOB_VIS_SELECT:
ASSIGN_IOB( iob_vis_select, MASK64(3,0) );
break;
case NI_DB_ENET_CONTROL:
ASSIGN_IOB( db_enet_control, MASK64(8,8)|MASK64(6,5)|
MASK64(3,0) );
break;
case NI_DB_ENET_IDLEVAL:
ASSIGN_IOB( db_enet_idleval, MASK64(39,0) );
break;
case NI_DB_JBUS_CONTROL:
ASSIGN_IOB( db_jbus_control, MASK64(16,16)|MASK64(6,4)|
MASK64(2,0) );
break;
case NI_DB_JBUS_MASK0:
ASSIGN_IOB( db_jbus_mask0, MASK64(45,0) );
break;
case NI_DB_JBUS_MASK1:
ASSIGN_IOB( db_jbus_mask1, MASK64(45,0) );
break;
case NI_DB_JBUS_MASK2:
ASSIGN_IOB( db_jbus_mask2, MASK64(45,0) );
break;
case NI_DB_JBUS_MASK3:
ASSIGN_IOB( db_jbus_mask3, MASK64(45,0) );
break;
case NI_DB_JBUS_COMPARE0:
ASSIGN_IOB( db_jbus_compare0, MASK64(43,0) );
break;
case NI_DB_JBUS_COMPARE1:
ASSIGN_IOB( db_jbus_compare1, MASK64(43,0) );
break;
case NI_DB_JBUS_COMPARE2:
ASSIGN_IOB( db_jbus_compare2, MASK64(43,0) );
break;
case NI_DB_JBUS_COMPARE3:
ASSIGN_IOB( db_jbus_compare3, MASK64(43,0) );
break;
case NI_DB_JBUS_COUNT:
ASSIGN_IOB( db_jbus_count, MASK64(8,0) );
break;
default:
EXEC_WARNING( ("Attempted write to illegal register in IOB:"
"Write 0x%llx to register offset 0x%x",
val, reg ) );
goto access_failed; /* illegal reg - an error */
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in IOB:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ss_iob_reg_name(reg), reg ) );
pthread_mutex_unlock( &iobp->iob_lock );
return true;
case MA_ldu64:
#define RETRIEVE_IOB(_n, _m) do { val = ((iobp->_n) & (_m)); } while (0)
switch (reg) {
case NI_INT_MAN0: /* internal */
case NI_INT_MAN1: /* errors */
case NI_INT_MAN2: /* SSI */
case NI_INT_MAN3: /* reserved */
val = iobp->int_man[device];
ASSERT( 0LL == (val & ~(MASK64(12,8)|MASK64(5,0))) );
break;
case NI_INT_CTL0: /* internal */
case NI_INT_CTL1: /* errors */
case NI_INT_CTL2: /* SSI */
case NI_INT_CTL3: /* reserved */
val = iobp->int_ctl[device];
ASSERT( 0LL == (val & ~0x5));
break;
case NI_J_INT_VEC:
RETRIEVE_IOB( j_int_vec, MASK64(5,0) );
break;
case NI_INT_VEC_DIS:
EXEC_WARNING( ("Attempted read to WO register in IOB: %s",
ss_iob_reg_name(reg)) );
goto access_failed;
case NI_RSET_STAT:
RETRIEVE_IOB( rset_stat, MASK64(11,9)|MASK64(3,1) );
break;
case NI_TM_STAT_CTL:
RETRIEVE_IOB( tm_stat_ctl, MASK64(63,63)|MASK64(31,0) );
break;
case NI_PROC_SER_NUM:
RETRIEVE_IOB( proc_ser_num, MASK64(63,0) );
break;
case NI_CORE_AVAIL:
val = npp->core_avail;
break;
case NI_IOB_FUSE:
RETRIEVE_IOB( iob_fuse, MASK64(31,0) );
break;
case NI_INT_MRGN_REG:
RETRIEVE_IOB( int_mrgn_reg, MASK64(4,0) );
break;
case NI_L2_VIS_CONTROL:
RETRIEVE_IOB( l2_vis_control, MASK64(3,0) );
break;
case NI_L2_VIS_MASK_A:
RETRIEVE_IOB( l2_vis_mask_a, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_VIS_MASK_B:
RETRIEVE_IOB( l2_vis_mask_b, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_VIS_COMPARE_A:
RETRIEVE_IOB( l2_vis_compare_a, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_VIS_COMPARE_B:
RETRIEVE_IOB( l2_vis_compare_b, MASK64(51,48)|MASK64(44,40)|
MASK64(33,8)|MASK64(5,2) );
break;
case NI_L2_TRIG_DELAY:
RETRIEVE_IOB( l2_trig_delay, MASK64(31,0) );
break;
case NI_IOB_VIS_SELECT:
RETRIEVE_IOB( iob_vis_select, MASK64(3,0) );
break;
case NI_DB_ENET_CONTROL:
RETRIEVE_IOB( db_enet_control, MASK64(8,8)|MASK64(6,5)|
MASK64(3,0) );
break;
case NI_DB_ENET_IDLEVAL:
RETRIEVE_IOB( db_enet_idleval, MASK64(39,0) );
break;
case NI_DB_JBUS_CONTROL:
RETRIEVE_IOB( db_jbus_control, MASK64(16,16)|MASK64(6,4)|
MASK64(2,0) );
break;
case NI_DB_JBUS_MASK0:
RETRIEVE_IOB( db_jbus_mask0, MASK64(45,0) );
break;
case NI_DB_JBUS_MASK1:
RETRIEVE_IOB( db_jbus_mask1, MASK64(45,0) );
break;
case NI_DB_JBUS_MASK2:
RETRIEVE_IOB( db_jbus_mask2, MASK64(45,0) );
break;
case NI_DB_JBUS_MASK3:
RETRIEVE_IOB( db_jbus_mask3, MASK64(45,0) );
break;
case NI_DB_JBUS_COMPARE0:
RETRIEVE_IOB( db_jbus_compare0, MASK64(43,0) );
break;
case NI_DB_JBUS_COMPARE1:
RETRIEVE_IOB( db_jbus_compare1, MASK64(43,0) );
break;
case NI_DB_JBUS_COMPARE2:
RETRIEVE_IOB( db_jbus_compare2, MASK64(43,0) );
break;
case NI_DB_JBUS_COMPARE3:
RETRIEVE_IOB( db_jbus_compare3, MASK64(43,0) );
break;
case NI_DB_JBUS_COUNT:
RETRIEVE_IOB( db_jbus_count, MASK64(8,0) );
break;
default:
goto access_failed; /* illegal reg - an error */
}
DBGSSI( lprintf(sp->gid, "read from iob reg: 0x%x (%s) value 0x%LLx\n", reg, ss_iob_reg_name(reg), val););
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
pthread_mutex_unlock( &iobp->iob_lock );
return true;
access_failed:;
pthread_mutex_unlock( &iobp->iob_lock );
return false;
}
static bool_t ss_clock_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
uint_t reg;
uint64_t val;
ss_proc_t *npp;
ss_clock_t * clockp;
/*
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
* with the Niagara team.
*/
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
clockp = npp->clockp;
reg = off & 0xff;
switch (op) {
case MA_st64:
val = *regp;
#define ASSIGN_CLK(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
clockp->_n = val; \
} while (0)
switch (reg) {
/* Clock Unit section 11.1 of PRM 1.2 */
case SS_CLOCK_DIVIDER:
ASSIGN_CLK( divider, MASK64(61,28)|MASK64(26,26)|
MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
break;
case SS_CLOCK_CONTROL:
ASSIGN_CLK( control, MASK64(63,61)|MASK64(54,48)|
MASK64(34,29)|MASK64(27,27)|MASK64(23,0) );
break;
case SS_CLOCK_DLL_CONTROL:
ASSIGN_CLK( dll_control, MASK64(44,40)|MASK64(38,38)|
MASK64(36,32)|MASK64(19,0) );
break;
case SS_CLOCK_JBUS_SYNC:
ASSIGN_CLK( jbus_sync, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
break;
case SS_CLOCK_DLL_BYPASS:
ASSIGN_CLK( dll_bypass, MASK64(61,56)|MASK64(52,48)|
MASK64(45,40)|MASK64(36,32)|MASK64(29,24)|
MASK64(20,16)|MASK64(13,8)|MASK64(4,0) );
break;
case SS_CLOCK_DRAM_SYNC:
ASSIGN_CLK( dram_sync, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
break;
case SS_CLOCK_VERSION:
ASSIGN_CLK( version, 0LL );
break;
default:
/* illegal reg - an error */
return false;
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in clock unit:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ss_clock_reg_name(reg), reg ) );
return false;
case MA_ldu64:
#define RETRIEVE_CLK(_n, _m) do { val = ((clockp->_n) & (_m)); } while (0)
switch (reg) {
/* Clock Unit section 11.1 of PRM 1.2 */
case SS_CLOCK_DIVIDER:
RETRIEVE_CLK( divider, MASK64(61,28)|MASK64(26,26)|
MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
break;
case SS_CLOCK_CONTROL:
RETRIEVE_CLK( control, MASK64(63,61)|MASK64(54,48)|
MASK64(34,29)|MASK64(27,27)|MASK64(23,0) );
break;
case SS_CLOCK_DLL_CONTROL:
RETRIEVE_CLK( dll_control, MASK64(44,40)|MASK64(38,38)|
MASK64(36,32)|MASK64(19,0) );
break;
case SS_CLOCK_JBUS_SYNC:
RETRIEVE_CLK( jbus_sync, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
break;
case SS_CLOCK_DLL_BYPASS:
RETRIEVE_CLK( dll_bypass, MASK64(61,56)|MASK64(52,48)|
MASK64(45,40)|MASK64(36,32)|MASK64(29,24)|
MASK64(20,16)|MASK64(13,8)|MASK64(4,0) );
break;
case SS_CLOCK_DRAM_SYNC:
RETRIEVE_CLK( dram_sync, MASK64(39,38)|MASK64(36,30)|
MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) );
break;
case SS_CLOCK_VERSION:
RETRIEVE_CLK( version, 0LL );
break;
case SS_DBG_INIT:
val = 0;
break;
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t ss_l2_ctl_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t * npp;
uint_t reg, bank;
uint64_t val;
ss_l2_cache_t * l2p;
/*
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
* with the Niagara team.
*/
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
l2p = npp->l2p;
bank = (off >> 6) & 0x3;
reg = (off >> 32) & 0xf;
switch (op) {
case MA_st64:
val = *regp;
if (reg >= 0x8) {
#define ASSIGN_L2(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
l2p->_n[bank] = val; \
} while (0)
switch (reg) {
/* L2 BIST Control Reg section 18.7.2 of PRM 1.4 */
case SS_L2_TAG_BIST:
ASSIGN_L2( bist_ctl, MASK64(6,0) );
if (val & 1) l2p->bist_ctl[bank] |= 0x400;
break;
/* L2 Control Register section 18.5.1 of PRM 1.2 */
case SS_L2_CONTROL:
ASSIGN_L2( control, MASK64(21,0) );
break;
/* Error handling section 12.6 of PRM 1.1 */
case SS_L2_ERROR_ENABLE:
ASSIGN_L2( error_enable, MASK64(2,0) );
break;
case SS_L2_ERROR_STATUS:
l2p->error_status[bank] &= ~val;
l2p->error_status[bank] &=
MASK64(63,62)|MASK64(53,35);
l2p->error_status[bank] |= val &
(MASK64(61,61)|MASK64(59,54)|MASK64(31,0));
break;
case SS_L2_ERROR_ADDRESS:
ASSIGN_L2( error_address, MASK64(39,4) );
break;
case SS_L2_ERROR_INJECT:
ASSIGN_L2( error_inject, MASK64(1,0) );
break;
default:
/* illegal reg - an error */
return false;
}
} else
/* L2 Cache Diagnostic Access section 18.6 of PRM 1.2 */
if (reg < 0x4) {
uint64_t idx;
/* index stores to a 32bit word and its ECC+rsvd bits */
idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
/* put oddeven select bit low so data is in addr order */
idx |= ((off >> L2_ODDEVEN_SHIFT) & 1);
l2p->diag_datap[idx] = val;
} else
if (reg < 0x6) {
uint64_t idx;
/*index stores to a tag and its ECC+rsvd bits */
idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6;
l2p->diag_tagp[idx] = val;
} else {
uint64_t idx;
/* index valid/dirty or alloc/used bits and parity */
idx = off & (L2_LINE | L2_BANK) >> 6;
idx |= ((off & L2_VDSEL) >> 10);
l2p->diag_vuadp[idx] = val;
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in l2 cache controller:"
"Write 0x%llx to bank %d, register %s (offset 0x%x)",
val, bank, ss_l2_ctrl_reg_name(reg), reg ) );
return false;
case MA_ldu64:
if (reg >= 0x8) {
#define RETRIEVE_L2(_n, _m) do { val = ((l2p->_n[bank]) & (_m)); } while (0)
switch (reg) {
/* L2 BIST Control Reg section 18.7.2 of PRM 1.4 */
case SS_L2_TAG_BIST:
RETRIEVE_L2( bist_ctl, MASK64(10,0) );
break;
/* L2 Control Register section 18.5.1 of PRM 1.2 */
case SS_L2_CONTROL:
RETRIEVE_L2( control, MASK64(63,57)|MASK64(15,0) );
break;
/* Error handling section 12.6 of PRM 1.1 */
case SS_L2_ERROR_ENABLE:
RETRIEVE_L2( error_enable, MASK64(2,0) );
break;
case SS_L2_ERROR_STATUS:
RETRIEVE_L2( error_status,
MASK64(63,61)|MASK64(59,35)|MASK64(31,0) );
break;
case SS_L2_ERROR_ADDRESS:
RETRIEVE_L2( error_address, MASK64(39,4) );
break;
case SS_L2_ERROR_INJECT:
RETRIEVE_L2( error_inject, MASK64(1,0) );
break;
default:
/* illegal reg - an error */
return false;
}
} else
/* L2 Cache Diagnostic Access section 18.6 of PRM 1.2 */
if (reg < 0x4) {
uint64_t idx;
/* index retrieves a 32bit word and its ECC+rsvd bits */
idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
/* put oddeven select bit low so data is in addr order */
idx |= ((off >> L2_ODDEVEN_SHIFT) & 1);
val = l2p->diag_datap[idx];
} else
if (reg < 0x6) {
uint64_t idx;
/* index retrieves a tag and its ECC+rsvd bits */
idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6;
val = l2p->diag_tagp[idx];
} else {
uint64_t idx;
/* index valid/dirty or alloc/used bits and parity */
idx = off & (L2_LINE | L2_BANK) >> 6;
idx |= ((off & L2_VDSEL) >> 10);
val = l2p->diag_vuadp[idx];
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t ss_dram_ctl_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t * npp;
uint_t reg, bank;
uint64_t val;
ss_dram_bank_t * dbp;
/*
* FIXME: For the moment we only support 64bit accesses to registers.
* we need to do better than this, but confirm partial access behaviour
* with the Niagara team.
*/
npp = (ss_proc_t *)config_addrp->config_devp->devp;
if (MA_ldu64!=op && MA_st64!=op) return false;
bank = off >> 12;
ASSERT (bank < npp->num_mbanks); /* this should be enforced by the config_dev range */
dbp = &(npp->mbankp[bank]);
DBGMC( lprintf(sp->gid, "Memory controller bank %d : register %s\n", bank, ss_dram_ctrl_reg_name(reg)); );
reg = off & ((1<<12)-1);
switch (op) {
case MA_st64:
val = *regp;
#define ASSIGN_DB(_n, _m) do { \
dbp->_n &= ~(_m); \
dbp->_n |= (val & (_m)); \
} while (0)
switch (reg) {
/* DRAM controller section 15.5 of RPM 1.1 */
case SS_DRAM_CAS_ADDR_WIDTH: ASSIGN_DB( cas_addr_width, MASK64(3, 0) ); break;
case SS_DRAM_RAS_ADDR_WIDTH: ASSIGN_DB( ras_addr_width, MASK64(3, 0) ); break;
case SS_DRAM_CAS_LAT: ASSIGN_DB( cas_lat, MASK64(2, 0) ); break;
case SS_DRAM_SCRUB_FREQ: ASSIGN_DB( scrub_freq, MASK64(11, 0) ); break;
case SS_DRAM_REFRESH_FREQ: ASSIGN_DB( refresh_freq, MASK64(12, 0) ); break;
case SS_DRAM_REFRESH_COUNTER: ASSIGN_DB( refresh_counter, MASK64(12, 0) ); break;
case SS_DRAM_SCRUB_ENABLE: ASSIGN_DB( scrub_enable, MASK64(0, 0) ); break;
case SS_DRAM_TRRD: ASSIGN_DB( trrd, MASK64(3, 0) ); break;
case SS_DRAM_TRC: ASSIGN_DB( trc, MASK64(4, 0) ); break;
case SS_DRAM_DRAM_TRCD: ASSIGN_DB( dram_trcd, MASK64(3, 0) ); break;
case SS_DRAM_TWTR: ASSIGN_DB( twtr, MASK64(3, 0) ); break;
case SS_DRAM_TRTW: ASSIGN_DB( trtw, MASK64(3, 0) ); break;
case SS_DRAM_TRTP: ASSIGN_DB( trtp, MASK64(2, 0) ); break;
case SS_DRAM_TRAS: ASSIGN_DB( tras, MASK64(3, 0) ); break;
case SS_DRAM_TRP: ASSIGN_DB( trp, MASK64(3, 0) ); break;
case SS_DRAM_TWR: ASSIGN_DB( twr, MASK64(3, 0) ); break;
case SS_DRAM_TRFC: ASSIGN_DB( trfc, MASK64(6, 0) ); break;
case SS_DRAM_TMRD: ASSIGN_DB( tmrd, MASK64(1, 0) ); break;
case SS_DRAM_TIWTR: ASSIGN_DB( tiwtr, MASK64(1, 0) ); break;
case SS_DRAM_PRECHARGE_WAIT: ASSIGN_DB( precharge_wait, MASK64(7, 0) ); break;
case SS_DRAM_DIMM_STACK: ASSIGN_DB( dimm_stack, MASK64(0, 0) ); break;
case SS_DRAM_EXT_WR_MODE2: ASSIGN_DB( ext_wr_mode2, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE1: ASSIGN_DB( ext_wr_mode1, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE3: ASSIGN_DB( ext_wr_mode3, MASK64(14, 0) ); break;
case SS_DRAM_WAIR_CONTROL: ASSIGN_DB( wair_control, MASK64(0, 0) ); break;
case SS_DRAM_RANK1_PRESENT: ASSIGN_DB( rank1_present, MASK64(0, 0) ); break;
case SS_DRAM_CHANNEL_DISABLED: ASSIGN_DB( channel_disabled, MASK64(0, 0) ); break;
case SS_DRAM_SEL_LO_ADDR_BITS: ASSIGN_DB( sel_lo_addr_bits, MASK64(0, 0) ); break;
case SS_DRAM_DIMM_INIT:
if (0LL != (val & ~(7))) goto write_reserved;
dbp->dimm_init = val;
/* DRAM Init sequence done is instantaneous */
dbp->init_status = 1;
break;
case SS_DRAM_SW_DV_COUNT: ASSIGN_DB( sw_dv_count, MASK64(2, 0) ); break;
case SS_DRAM_HW_DMUX_CLK_INV: ASSIGN_DB( hw_dmux_clk_inv, MASK64(0, 0) ); break;
case SS_DRAM_PAD_EN_CLK_INV: ASSIGN_DB( pad_en_clk_inv, MASK64(4, 0) ); break;
case SS_DRAM_MODE_WRITE_STATUS: ASSIGN_DB( mode_write_status, MASK64(0, 0) ); break;
case SS_DRAM_INIT_STATUS: ASSIGN_DB( init_status, MASK64(0, 0) ); break;
case SS_DRAM_DIMM_PRESENT: ASSIGN_DB( dimm_present, MASK64(3, 0) ); break;
case SS_DRAM_FAILOVER_STATUS: ASSIGN_DB( failover_status, MASK64(0, 0) ); break;
case SS_DRAM_FAILOVER_MASK: ASSIGN_DB( failover_mask, MASK64(34, 0) ); break;
/* Performance counter section 10.3 of PRM 1.1 */
case SS_DRAM_PERF_CTL: ASSIGN_DB( perf_ctl, MASK64(7, 0) ); break;
case SS_DRAM_PERF_COUNT: ASSIGN_DB( perf_count, MASK64(63, 0) ); break;
/* Error handling section 12.9 of PRM 1.1 */
case SS_DRAM_ERROR_STATUS:
dbp->error_status &= ~val;
dbp->error_status &= MASK64(63,57);
dbp->error_status |= val & MASK64(15,0);
break;
case SS_DRAM_ERROR_ADDRESS: ASSIGN_DB( error_address, MASK64(39,4) ); break;
case SS_DRAM_ERROR_INJECT: ASSIGN_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break;
case SS_DRAM_ERROR_COUNTER: ASSIGN_DB( error_counter, MASK64(17,0) ); break;
case SS_DRAM_ERROR_LOCATION: ASSIGN_DB( error_location, MASK64(35,0) ); break;
/* Power management section 16.2 of PRM 1.1 */
case SS_DRAM_OPEN_BANK_MAX: ASSIGN_DB( open_bank_max, MASK64(16, 0) ); break;
case SS_DRAM_PROG_TIME_CNTR: ASSIGN_DB( prog_time_cntr, MASK64(15, 0) ); break;
/* Hardware debug section 19.1 of PRM 1.1 */
case SS_DRAM_DBG_TRG_EN: ASSIGN_DB( dbg_trg_en, MASK64(7, 0) ); break;
default:
/* illegal reg - an error */
return false;
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in dram controller: Write 0x%llx to bank %d, register %s (offset 0x%x)",
val, bank, ss_dram_ctrl_reg_name(reg), reg ) );
return false;
case MA_ldu64:
#define RETRIEVE_DB(_n, _m) do { val = ((dbp->_n) & (_m)); } while (0)
switch (reg) {
/* DRAM controller section 15.5 of RPM 1.1 */
case SS_DRAM_CAS_ADDR_WIDTH: RETRIEVE_DB( cas_addr_width, MASK64(3, 0) ); break;
case SS_DRAM_RAS_ADDR_WIDTH: RETRIEVE_DB( ras_addr_width, MASK64(3, 0) ); break;
case SS_DRAM_CAS_LAT: RETRIEVE_DB( cas_lat, MASK64(2, 0) ); break;
case SS_DRAM_SCRUB_FREQ: RETRIEVE_DB( scrub_freq, MASK64(11, 0) ); break;
case SS_DRAM_REFRESH_FREQ: RETRIEVE_DB( refresh_freq, MASK64(12, 0) ); break;
case SS_DRAM_REFRESH_COUNTER: RETRIEVE_DB( refresh_counter, MASK64(12, 0) ); break;
case SS_DRAM_SCRUB_ENABLE: RETRIEVE_DB( scrub_enable, MASK64(0, 0) ); break;
case SS_DRAM_TRRD: RETRIEVE_DB( trrd, MASK64(3, 0) ); break;
case SS_DRAM_TRC: RETRIEVE_DB( trc, MASK64(4, 0) ); break;
case SS_DRAM_DRAM_TRCD: RETRIEVE_DB( dram_trcd, MASK64(3, 0) ); break;
case SS_DRAM_TWTR: RETRIEVE_DB( twtr, MASK64(3, 0) ); break;
case SS_DRAM_TRTW: RETRIEVE_DB( trtw, MASK64(3, 0) ); break;
case SS_DRAM_TRTP: RETRIEVE_DB( trtp, MASK64(2, 0) ); break;
case SS_DRAM_TRAS: RETRIEVE_DB( tras, MASK64(3, 0) ); break;
case SS_DRAM_TRP: RETRIEVE_DB( trp, MASK64(3, 0) ); break;
case SS_DRAM_TWR: RETRIEVE_DB( twr, MASK64(3, 0) ); break;
case SS_DRAM_TRFC: RETRIEVE_DB( trfc, MASK64(6, 0) ); break;
case SS_DRAM_TMRD: RETRIEVE_DB( tmrd, MASK64(1, 0) ); break;
case SS_DRAM_TIWTR: RETRIEVE_DB( tiwtr, MASK64(1, 0) ); break;
case SS_DRAM_PRECHARGE_WAIT: RETRIEVE_DB( precharge_wait, MASK64(7, 0) ); break;
case SS_DRAM_DIMM_STACK: RETRIEVE_DB( dimm_stack, MASK64(0, 0) ); break;
case SS_DRAM_EXT_WR_MODE2: RETRIEVE_DB( ext_wr_mode2, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE1: RETRIEVE_DB( ext_wr_mode1, MASK64(14, 0) ); break;
case SS_DRAM_EXT_WR_MODE3: RETRIEVE_DB( ext_wr_mode3, MASK64(14, 0) ); break;
case SS_DRAM_WAIR_CONTROL: RETRIEVE_DB( wair_control, MASK64(0, 0) ); break;
case SS_DRAM_RANK1_PRESENT: RETRIEVE_DB( rank1_present, MASK64(0, 0) ); break;
case SS_DRAM_CHANNEL_DISABLED: RETRIEVE_DB( channel_disabled, MASK64(0, 0) ); break;
case SS_DRAM_SEL_LO_ADDR_BITS: RETRIEVE_DB( sel_lo_addr_bits, MASK64(0, 0) ); break;
case SS_DRAM_DIMM_INIT: RETRIEVE_DB( dimm_init, MASK64(2, 0) ); break;
case SS_DRAM_SW_DV_COUNT: RETRIEVE_DB( sw_dv_count, MASK64(2, 0) ); break;
case SS_DRAM_HW_DMUX_CLK_INV: RETRIEVE_DB( hw_dmux_clk_inv, MASK64(0, 0) ); break;
case SS_DRAM_PAD_EN_CLK_INV: RETRIEVE_DB( pad_en_clk_inv, MASK64(4, 0) ); break;
case SS_DRAM_MODE_WRITE_STATUS: RETRIEVE_DB( mode_write_status, MASK64(0, 0) ); break;
case SS_DRAM_INIT_STATUS: RETRIEVE_DB( init_status, MASK64(0, 0) ); break;
case SS_DRAM_DIMM_PRESENT: RETRIEVE_DB( dimm_present, MASK64(3, 0) ); break;
case SS_DRAM_FAILOVER_STATUS: RETRIEVE_DB( failover_status, MASK64(0, 0) ); break;
case SS_DRAM_FAILOVER_MASK: RETRIEVE_DB( failover_mask, MASK64(34, 0) ); break;
/* Performance counter section 10.3 of PRM 1.1 */
case SS_DRAM_PERF_CTL: RETRIEVE_DB( perf_ctl, MASK64(7, 0) ); break;
case SS_DRAM_PERF_COUNT: RETRIEVE_DB( perf_count, MASK64(63, 0) ); break;
/* Error handling section 12.9 of PRM 1.1 */
case SS_DRAM_ERROR_STATUS: RETRIEVE_DB( error_status, MASK64(63,57)|MASK64(15,0) ); break;
case SS_DRAM_ERROR_ADDRESS: RETRIEVE_DB( error_address, MASK64(39,4) ); break;
case SS_DRAM_ERROR_INJECT: RETRIEVE_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break;
case SS_DRAM_ERROR_COUNTER: RETRIEVE_DB( error_counter, MASK64(17,0) ); break;
case SS_DRAM_ERROR_LOCATION: RETRIEVE_DB( error_location, MASK64(35,0) ); break;
/* Power management section 16.2 of PRM 1.1 */
case SS_DRAM_OPEN_BANK_MAX: RETRIEVE_DB( open_bank_max, MASK64(16, 0) ); break;
case SS_DRAM_PROG_TIME_CNTR: RETRIEVE_DB( prog_time_cntr, MASK64(15, 0) ); break;
/* Hardware debug section 19.1 of PRM 1.1 */
case SS_DRAM_DBG_TRG_EN: RETRIEVE_DB( dbg_trg_en, MASK64(7, 0) ); break;
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
/****************************************************************
*
* SunSPARC CPU interrupt bridge code
*
****************************************************************/
/* write to SS_ASI_SWVR_UDB_INTR_W */
/* FIXMENOW .... this function to go away ... use ss_ext_signal .... */
static void niagara_send_xirq(simcpu_t * sp, uint64_t val)
{
uint_t strand, core;
uint_t vec_bit;
uint_t type;
ss_strand_t * tstrandp;
ss_proc_t * npp;
bool_t pay_attention;
npp = (ss_proc_t *)(sp->config_procp->procp);
type = (val >> 16) & MASK64(1,0);
/* strand captures reserved field too .. but should be zero ... */
if (type != 0) EXEC_WARNING(("Write to SS_ASI_SWVR_UDB_INTR_W with non-zero type field (@pc=0x%llx)", sp->pc));
/* check actual value against number of strands later ... */
strand = (val >> 8) & MASK64(4,0);
vec_bit = val & MASK64(5,0);
/* normalize strand to internal strand */
strand = STRANDID2IDX(npp, strand);
if (!VALIDIDX(npp, strand)) {
EXEC_WARNING(("Write to SS_ASI_SWVR_UDB_INTR_W with illegal strand value 0x%llx (@pc=0x%llx)", strand, sp->pc));
return;
}
tstrandp = &(npp->ss_strandp[strand]);
pthread_mutex_lock(&tstrandp->irq_lock);
pay_attention = (0LL == tstrandp->irq_vector);
tstrandp->irq_vector |= (1LL<<vec_bit);
pthread_mutex_unlock(&tstrandp->irq_lock);
DBGE( lprintf(sp->gid, "irq_send: tstrand=%u irq_vector=%llx "
"(pc=0x%llx)\n", npp->strand[strand]->simp->gid,
tstrandp->irq_vector, sp->pc); );
/*
* The complicated part here is that the execution thread
* determines when the interrupt is actually delivered if at
* all, all we need to do here is to ensure that that thread
* pays attention to the fact the the interrupt vector status
* has changed .. we only care if it goes non-zero ...
*/
if (pay_attention) {
sparcv9_cpu_t * tv9p;
simcpu_t * tsp;
tv9p = npp->strand[strand];
tsp = tv9p->simp;
tsp->async_event = true;
}
}
/*
* non-execution threads to use this method for posting
* interrupts and other actions to simcpu.
*/
uint64_t ss_ext_signal(config_proc_t * config_procp, ext_sig_t sigtype, void *vp)
{
ss_proc_t *npp;
simcpu_t *sp;
sparcv9_cpu_t *v9p;
ss_strand_t *nsp;
ss_jbus_t *jbusp;
jbus_mondo_t *mondop;
bool_t pay_attention;
uint_t strand, tidx;
int i;
uint64_t intr, ret;
ss_iob_t *iobp;
npp = (ss_proc_t*)(config_procp->procp);
switch (sigtype) {
case ES_IDLE:
/* what if thread not running? */
strand = IOB_INT_VEC_THREAD(npp->iobp->int_vec_dis);
tidx = STRANDID2IDX(npp, strand);
#if 0
lprintf(sp->gid, "IDLE: strand=%d idx=%d\n", strand, tidx)); */
#endif
/* skip strands that do not exist */
if (!VALIDIDX(npp, tidx))
return (0);
v9p = npp->strand[tidx];
sp = v9p->simp;
nsp = &(npp->ss_strandp[tidx]);
pthread_mutex_lock(&npp->thread_sts_lock);
SET_THREAD_STS_SFSM(npp, nsp, THREAD_STS_TSTATE_IDLE);
if (!PARKED(sp))
simcore_cpu_state_park(sp);
pthread_mutex_unlock(&npp->thread_sts_lock);
return (0);
case ES_RESUME:
case ES_RESET:
/* what if thread not idle? */
strand = IOB_INT_VEC_THREAD(npp->iobp->int_vec_dis);
tidx = STRANDID2IDX(npp, strand);
/* skip strands that do not exist */
if (!VALIDIDX(npp, tidx))
return (0);
v9p = npp->strand[tidx];
sp = v9p->simp;
nsp = &(npp->ss_strandp[tidx]);
pthread_mutex_lock(&npp->thread_sts_lock);
SET_THREAD_STS_SFSM(npp, nsp, THREAD_STS_TSTATE_RUN);
if (PARKED(sp))
simcore_cpu_state_unpark(sp);
pthread_mutex_unlock(&npp->thread_sts_lock);
return (0);
case ES_JBUS:
jbusp = npp->jbusp;
mondop = (jbus_mondo_t *)vp;
tidx = mondop->adr.target;
pthread_mutex_lock(&jbusp->lock);
if (jbusp->j_int_busy[tidx] & IOB_JBUS_BUSY) {
pthread_mutex_unlock(&jbusp->lock);
return IOB_JBUS_NACK;
} else {
jbusp->j_int_data0[tidx] = mondop->data0;
jbusp->j_int_data1[tidx] = mondop->data1;
jbusp->j_int_busy[tidx] = mondop->adr.source | IOB_JBUS_BUSY;
}
pthread_mutex_unlock(&jbusp->lock);
strand = STRANDID2IDX(npp, tidx);
if (!VALIDIDX(npp, strand))
return (IOB_JBUS_NACK); /* XXX */
nsp = &(npp->ss_strandp[strand]);
pthread_mutex_lock(&nsp->irq_lock);
pay_attention = (0LL == nsp->irq_vector);
nsp->irq_vector |= (uint64_t)1 << npp->iobp->j_int_vec;
pthread_mutex_unlock(&nsp->irq_lock);
if (pay_attention) {
v9p = npp->strand[strand];
sp = v9p->simp;
sp->async_event = true;
}
return IOB_JBUS_ACK;
/* This used to deliver a SSI interrupt event */
/* really needs to be handled in a different way */
/* FIXME ! */
case ES_SSI:
iobp = npp->iobp;
pthread_mutex_lock(&iobp->iob_lock);
/* If interrupt is masked in IOB simply set again the
* pending bit ... if not masked, then deliver an
* interrupt using the irq_vector
*/
if (iobp->int_ctl[IOB_DEV_SSI]&IOB_INT_CTL_MASK) {
iobp->int_ctl[IOB_DEV_SSI] |= IOB_INT_CTL_PEND;
} else {
/* set MASK bit */
iobp->int_ctl[IOB_DEV_SSI] |= IOB_INT_CTL_MASK;
/* now go async deliver the interrupt */
strand = IOB_INT_MAN_CPUID(npp->iobp->int_man[IOB_DEV_SSI]);
nsp = &(npp->ss_strandp[strand]);
v9p = npp->strand[strand];
sp = v9p->simp;
pthread_mutex_lock(&nsp->irq_lock);
pay_attention = (0LL == nsp->irq_vector);
nsp->irq_vector |=
(uint64_t)1<<(iobp->int_man[IOB_DEV_SSI]&INTR_VEC_MASK);
pthread_mutex_unlock(&nsp->irq_lock);
DBGSSI( lprintf(sp->gid, "SSI ext_signal: nsp=%p irq_vector=%llx\n", nsp, nsp->irq_vector); );
if (pay_attention) {
sp->async_event = true;
DBGSSI( lprintf(sp->gid, "SSI ext_signal: attention set\n"); );
}
}
pthread_mutex_unlock(&iobp->iob_lock);
return (0);
case ES_SPOR:
for (i=(npp->nstrands)-1; i>=0; i--) {
v9p = npp->strand[i];
nsp = (ss_strand_t *)(v9p->impl_specificp);
nsp->pending_async_tt = SS_trap_power_on_reset;
sp = v9p->simp;
sp->exception_pending = true;
}
return (0);
case ES_XIR:
/*
* OK every strand on this CPU gets a reset signal
* FIXME: wake up sleeping strands or error state strands
*/
for (i=(npp->nstrands)-1; i>=0; i--) {
v9p = npp->strand[i];
nsp = (ss_strand_t *)(v9p->impl_specificp);
nsp->pending_async_tt = SS_trap_externally_initiated_reset;
sp = v9p->simp;
DBGE( lprintf(sp->gid, "ES_XIR set_attention\n"); );
sp->exception_pending = true;
}
return (0);
default:
EXEC_WARNING(("processor%d: ext_signal %d ignored",
config_procp->proc_id, sigtype));
return (0);
}
}
/*
* CPU specific instruction decode routine. This routine is called from the main
* instruction decoder routine only when that routine comes up empty handed (i.e.
* before declaring it an illegal or unknown instruction.) For now, we don't have
* any CPU specific instuctions implemented for Niagara, and so the performance
* impact of making this function call is negligable since it doesn't happen in
* the common case.
*
* This routine returns a pointer to the exec function which is to be run as a
* result of encountering the instruction op code in question.
*/
static op_funcp niagara_decode_me(simcpu_t *sp, xicache_instn_t * xcip, uint32_t instn)
{
uint_t rs1, rd, rs2;
sint32_t simm;
T2o3_code_t op2c;
op_funcp exec_funcp;
switch ((ty_code_t)X_OP(instn)) {
case Ty_2: /* Arithmetic and Misc instructions */
rs1 = X_RS1(instn);
rd = X_RD(instn);
op2c = (T2o3_code_t)X_OP3(instn);
if (X_I(instn)) {
simm = X_SIMM13(instn);
/* register x immediate -> register forms */
switch ( op2c ) {
case T2o3_mulscc :
SET_OPv9(mulscc_imm);
goto n1_do_imm;
case T2o3_save :
SET_OPv9(save_imm); /* rd == 0 determined in instn implemenation */
goto n1_do_imm;
case T2o3_restore :
SET_OPv9(restore_imm);
goto n1_do_imm;
case T2o3_rdasr :
/* Here I = 1 */
if (rd == 0 && rs1==15) {
if (!CHECK_RESERVED_ZERO(instn, 12, 7)) {
SET_OP_ILL_REASON(misc_reserved_field_non_zero);
goto n1_illegal_instruction;
}
simm = X_MEMBAR_MASKS(instn);
SET_OP_SIMM16(simm); /* masks in immediates */
SET_OPv9( membar );
goto n1_all_done;
}
/* XXX if I = 1??? */
SET_OPv9( read_state_reg );
simm = 0; /* unused */
goto n1_do_imm;
case T2o3_return :
SET_OPv9( return_imm );
goto n1_do_imm;
case T2o3_flush :
SET_OPv9(iflush_imm);
goto n1_do_imm;
case T2o3_movcc :
if (!X_FMT4_CC2(instn)) {
#ifdef FP_DECODE_DISABLED
if (!((sparcv9_cpu_t*)(sp->specificp))->fpu_on) goto n1_fp_disabled;
#endif /* FP_DECODE_DISABLED */
if (rd == 0) goto n1_do_noop;
/* We attempt to fast path movfcc_a ... */
if (X_FMT4_COND(instn) == cond_n) goto n1_do_noop;
simm = X_SIMM11(instn);
if (X_FMT4_COND(instn) == cond_a) {
goto n1_do_move_simm;
}
SET_OP_MOVCC_CC(X_FMT4_CC(instn));
SET_OP_SIMM16(simm);
SET_OP_RD(rd);
SET_OP_MOVCC_COND(X_FMT4_COND(instn));
SET_OPv9(movfcc_imm);
goto n1_all_done;
}
switch( (cc4bit_t)X_FMT4_CC(instn) ) {
case CC4bit_icc: SET_OP_MOVCC_CC(0); break;
case CC4bit_xcc: SET_OP_MOVCC_CC(1); break;
default:
SET_OP_ILL_REASON(movcc_illegal_cc_field);
goto n1_illegal_instruction;
}
if (rd == 0) goto n1_do_noop;
/* truncate simm - as only an 11 bit
* immediate in movcc instructions, not the
* 13 bit field we extracted above
*/
simm = X_SIMM11(instn);
if (X_FMT4_COND(instn) == cond_n) goto n1_do_noop;
if (X_FMT4_COND(instn) == cond_a) goto n1_do_move_simm;
SET_OP_SIMM16(simm);
SET_OP_RD(rd);
SET_OP_MOVCC_COND(X_FMT4_COND(instn));
SET_OPv9(movcc_imm);
goto n1_all_done;
case T2o3_saved:
n1_saved_instn:;
{
int fcn = X_FMT2_FCN(instn);
if (!CHECK_RESERVED_ZERO(instn, 18, 0)) {
SET_OP_ILL_REASON(saved_reserved_field_non_zero);
goto n1_illegal_instruction;
}
switch (fcn) {
case 0: /* saved */
SET_OPv9(saved);
break;
case 1:
SET_OPv9(restored);
break;
default:
SET_OP_ILL_REASON(saved_fcn_invalid);
goto n1_illegal_instruction;
}
goto n1_all_done;
}
case T2o3_retry :
n1_done_retry_instn:;
switch(X_FMT3_FCN(instn)) {
case 0:
SET_OP_MISC_BITS((uint_t)true);
break;
case 1:
SET_OP_MISC_BITS((uint_t)false);
break;
default:
SET_OP_ILL_REASON(done_retry_illegal_fcn_field);
goto n1_illegal_instruction;
}
SET_OPv9(done_retry);
goto n1_all_done;
default:
break;
}
} else {
rs2 = X_RS2(instn);
/* register x register -> register forms */
switch ( op2c ) {
case T2o3_mulscc :
SET_OPv9(mulscc_rrr);
goto n1_do_rrr;
case T2o3_save :
SET_OPv9(save_rrr); /* rd == 0 determined in instn implemenation */
goto n1_do_rrr;
case T2o3_restore :
/* Rd == 0 handled by instruction */
SET_OPv9(restore_rrr);
goto n1_do_rrr;
case T2o3_return :
SET_OPv9( return_rrr );
goto n1_do_rrr;
case T2o3_flush :
if (rd != 0)
goto n1_illegal_instruction;
SET_OPv9(iflush_rr);
goto n1_do_rrr;
case T2o3_saved:
goto n1_saved_instn;
case T2o3_retry :
goto n1_done_retry_instn;
default:
break;
}
}
default:
break;
}
n1_unknown_decode:
return (NULL);
#ifdef FP_DECODE_DISABLED
n1_fp_disabled:;
SET_OPv9(fp_unimplemented_instruction);
goto n1_all_done;
#endif /* FP_DECODE_DISABLED */
n1_do_imm:
SET_OP_RD(rd);
SET_OP_RS1(rs1);
SET_OP_SIMM16(simm);
goto n1_all_done;
n1_do_move_simm:
SET_OP( move_simm );
SET_OP_RD(rd);
SET_OP_SIMM32(simm);
goto n1_all_done;
n1_do_rrr:
SET_OP_RD(rd);
SET_OP_RS1(rs1);
SET_OP_RS2(rs2);
goto n1_all_done;
n1_do_noop:
SET_OP( noop );
goto n1_all_done;
n1_illegal_instruction:
SET_OPv9(illegal_instruction);
n1_all_done:;
return (exec_funcp);
}
void niagara_get_pseudo_dev(config_proc_t *config_procp, char *dev_namep, void *devp)
{
/*
* This Niagara specific function is not implemented yet.
*/
}
void niagara_domain_check(domain_t * domainp)
{
/*
* This Niagara specific function is not implemented yet.
*/
}
void niagara_set_sfsr(simcpu_t *sp, ss_mmu_t *mmup, tvaddr_t addr,
uint_t ft, ss_ctx_t ct, uint_t asi, uint_t w, uint_t e)
{
uint64_t new_sfsr;
new_sfsr = MMU_SFSR_FV;
if ((mmup->sfsr & MMU_SFSR_FV) != 0)
new_sfsr |= MMU_SFSR_OW;
new_sfsr |= (ft << MMU_SFSR_FT_SHIFT);
new_sfsr |= (ct << MMU_SFSR_CT_SHIFT);
if (e)
new_sfsr |= MMU_SFSR_E;
if (w)
new_sfsr |= MMU_SFSR_W;
new_sfsr |= (asi << MMU_SFSR_ASI_SHIFT);
if (!mmup->is_immu) {
mmup->sfar = VA48(addr);
DBGMMU( lprintf(sp->gid, "%cMMU SFSR update 0x%llx -> 0x%llx SFAR=0x%llx\n", mmup->is_immu ? 'I' : 'D', mmup->sfsr, new_sfsr, mmup->sfar); );
} else {
DBGMMU( lprintf(sp->gid, "%cMMU SFSR update 0x%ll-> 0x%llx x\n", mmup->is_immu ? 'I' : 'D', mmup->sfsr, new_sfsr); );
}
mmup->sfsr = new_sfsr;
}
/*
* Below are CPU specific error injection routines. They are called when an
* error condition is detected clears the error flags if no more errors to
* post error condition may not be cleared if handling required
* eg. demap tlb entry with bad parity or flush cacheline with bad ecc
*/
#if ERROR_INJECTION
void extract_error_type(error_conf_t * errorconfp)
{
errorconfp->type_namep = strdup(lex.strp);
if (streq(lex.strp,"IRC"))
errorconfp->type = IRC;
else if (streq(lex.strp,"IRU"))
errorconfp->type = IRU;
else if (streq(lex.strp,"FRC"))
errorconfp->type = FRC;
else if (streq(lex.strp,"FRU"))
errorconfp->type = FRU;
else if (streq(lex.strp,"IMTU"))
errorconfp->type = IMTU;
else if (streq(lex.strp,"IMDU"))
errorconfp->type = IMDU;
else if (streq(lex.strp,"DMTU"))
errorconfp->type = DMTU;
else if (streq(lex.strp,"DMDU"))
errorconfp->type = DMDU;
else if (streq(lex.strp,"DMSU"))
errorconfp->type = DMSU;
else if (streq(lex.strp,"ITC"))
errorconfp->type = ITC;
else if (streq(lex.strp,"IDC"))
errorconfp->type = IDC;
else if (streq(lex.strp,"DTC"))
errorconfp->type = DTC;
else if (streq(lex.strp,"DDC"))
errorconfp->type = DDC;
else if (streq(lex.strp,"MAU"))
errorconfp->type = MAU;
else if (streq(lex.strp,"LDRC"))
errorconfp->type = LDRC;
else if (streq(lex.strp,"LDSC"))
errorconfp->type = LDSC;
else if (streq(lex.strp,"LTC"))
errorconfp->type = LTC;
else if (streq(lex.strp,"LDAC"))
errorconfp->type = LDAC;
else if (streq(lex.strp,"LDWC"))
errorconfp->type = LDWC;
else if (streq(lex.strp,"LDAU"))
errorconfp->type = LDAU;
else if (streq(lex.strp,"LDWU"))
errorconfp->type = LDWU;
else if (streq(lex.strp,"DAC"))
errorconfp->type = DAC;
else if (streq(lex.strp,"DRC"))
errorconfp->type = DRC;
else if (streq(lex.strp,"DSC"))
errorconfp->type = DSC;
else if (streq(lex.strp,"DAU"))
errorconfp->type = DAU;
else if (streq(lex.strp,"DSU"))
errorconfp->type = DSU;
else
lex_fatal("unknown error type parsing error config");
}
void update_errflags(simcpu_t * sp)
{
sp->errorp->check_xdcache = find_errconf(sp, (LD|ST),
(DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU)) ? true : false;
sp->errorp->check_xicache = (find_errconf(sp, IFETCH,
(ITC|IDC|LDAC|LDAU|DAC|DAU))) ? true : false;
sp->errorp->check_dtlb = (find_errconf(sp, (LD|ST),
(DMDU|DMSU))) ? true : false;
}
/*
* If demap of tlb entry with parity error detected then remove error config
*/
void tlb_entry_error_match(simcpu_t * sp, ss_mmu_t * mmup, tlb_entry_t * tep)
{
error_conf_t * ep;
if (sp->error_enabled) {
if (sp->errorp->itep == tep && mmup->is_immu) {
if ((ep = find_errconf(sp, IFETCH, IMDU)) == NULL)
goto tlb_warning;
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
sp->errorp->itep = NULL;
return;
} else
if (sp->errorp->dtep == tep && !mmup->is_immu) {
if ((ep = find_errconf(sp, (LD|ST), DMDU)) == NULL)
goto tlb_warning;
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
sp->errorp->dtep = NULL;
return;
}
return;
tlb_warning: EXEC_WARNING(("tlb_entry_error_match(): tracking tlb"
" entry in error for non-existent error config"));
}
}
void ss_error_condition(simcpu_t * sp, error_conf_t * ep)
{
ss_strand_t * nsp;
ss_proc_t * npp;
ss_l2_cache_t * l2p;
ss_dram_bank_t * dbp;
simcpu_t * esp;
sparcv9_cpu_t * v9p;
sparcv9_trap_type_t tt;
error_t * errorp;
uint8_t bank,tid;
uint_t idx;
v9p = sp->specificp;
nsp = v9p->impl_specificp;
npp = sp->config_procp->procp;
errorp = sp->errorp;
DBGERR( lprintf(sp->gid, "ss_error_condition() etype = %s\n", ep->type_namep); );
switch (ep->type) {
case IRC:
nsp->error.status = NA_IRC_bit;
nsp->error.addr = (I_REG_NUM(errorp->reg) | I_REG_WIN(v9p->cwp)
| I_SYND(IREG_FAKE_SYND_SINGLE));
if (nsp->error.enabled & NA_CEEN) {
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case IRU:
nsp->error.status = NA_IRU_bit;
nsp->error.addr = (I_REG_NUM(errorp->reg) | I_REG_WIN(v9p->cwp)
| I_SYND(IREG_FAKE_SYND_DOUBLE));
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_internal_processor_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case FRC:
nsp->error.status = NA_FRC_bit;
nsp->error.addr = (F_REG_NUM(errorp->reg) |
EVEN_SYND(FREG_FAKE_SYND_SINGLE) | ODD_SYND(NULL));
if (nsp->error.enabled & NA_CEEN) {
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case FRU:
nsp->error.status = NA_FRU_bit;
nsp->error.addr = (F_REG_NUM(errorp->reg) |
EVEN_SYND(FREG_FAKE_SYND_DOUBLE) | ODD_SYND(NULL));
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_internal_processor_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case IMTU:
nsp->error.status = (NA_PRIV_bit|NA_IMTU_bit);
nsp->error.addr = TLB_INDEX(errorp->tlb_idx[IMTU_IDX]);
errorp->tlb_idx[IMTU_IDX] = NULL;
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case IMDU:
if (ep->op == ASI_LD) {
nsp->error.status = (NA_PRIV_bit|NA_IMDU_bit);
nsp->error.addr = TLB_INDEX(errorp->tlb_idx[IMDU_IDX]);
errorp->tlb_idx[IMDU_IDX] = NULL;
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
} else {
nsp->error.status = NA_IMDU_bit;
nsp->error.status |= (ep->priv == V9_HyperPriv ||
ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
nsp->error.addr = MMU_PC(sp->pc);
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_instruction_access_error;
v9p->post_precise_trap(sp, tt);
}
}
break;
case DMTU:
nsp->error.status = (NA_PRIV_bit|NA_IMTU_bit);
nsp->error.addr = TLB_INDEX(errorp->tlb_idx[DMTU_IDX]);
errorp->tlb_idx[DMTU_IDX] = NULL;
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case DMDU:
if (ep->op == ASI_LD) {
nsp->error.status = (NA_PRIV_bit|NA_DMDU_bit);
nsp->error.addr = TLB_INDEX(errorp->tlb_idx[DMDU_IDX]);
errorp->tlb_idx[DMDU_IDX] = NULL;
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
} else {
nsp->error.status = NA_DMDU_bit;
nsp->error.status |= (ep->priv == V9_HyperPriv ||
ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
nsp->error.addr = MMU_VA(errorp->addr);
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
}
break;
case DMSU:
nsp->error.status = NA_DMSU_bit;
nsp->error.status |= (ep->priv == V9_HyperPriv ||
ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
nsp->error.addr = MMU_VA(errorp->addr);
if (nsp->error.enabled & NA_NCEEN) {
tt = Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
break;
case ITC:
nsp->error.status = NA_ITC_bit;
goto icache_error;
case IDC:
nsp->error.status = NA_IDC_bit;
icache_error: nsp->error.status |= (ep->priv == V9_HyperPriv ||
ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
nsp->error.addr = L1_PA(errorp->addr);
if (nsp->error.enabled & NA_CEEN) {
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case DTC:
nsp->error.status = NA_DTC_bit;
goto dcache_error;
case DDC:
nsp->error.status = NA_DDC_bit;
dcache_error: nsp->error.status |= (ep->priv == V9_HyperPriv ||
ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
nsp->error.addr = L1_PA(errorp->addr);
if (nsp->error.enabled & NA_CEEN) {
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(sp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case MAU:
if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
break;
case LDAC:
bank = (errorp->addr >> 6) & 0x3;
l2p = npp->l2p;
tid = nsp->vcore_id;
l2p->error_status[bank] = L2_LDAC_bit | L2_TID(tid) | L2_VEC_bit |
L2_FAKE_SYND_SINGLE | errorp->l2_write;
l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
if ((nsp->error.enabled & NA_CEEN) &&
(l2p->error_enable[bank] & L2_CEEN)) {
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(sp, tt);
}
/* l2 corrected on partial store or atomic hit */
if (errorp->l2_write) {
npp->errorp->ldac_addr = NULL;
ss_set_errcheck(npp);
} else {
uint_t idx;
/* l2 uncorrected on load/ifetch hit so make error proc-wide */
npp->error_check = true;
npp->errorp->ldac_addr = errorp->addr;
/*
* NB: proper behavior is to flush all cpu xdcache's
* but there is no lock on the xdc so I didn't try it
*/
sp->xdcache_trans_flush_pending = true;
}
/* bit of a hack - some errorconf's aren't owned by sp's so free them */
if (ep->npp) free(ep);
else {
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
}
break;
case LDWC:
bank = (errorp->addr >> 6) & 0x3;
l2p = npp->l2p;
tid = nsp->vcore_id;
l2p->error_status[bank] = L2_LDWC_bit | L2_TID(tid) | L2_VEC_bit |
L2_FAKE_SYND_SINGLE | L2_RW_bit;
l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
tid = (l2p->control[bank] & L2_ERRORSTEER);
v9p = npp->strand[STRANDID2IDX(npp, tid)];
nsp = v9p->impl_specificp;
esp = v9p->simp;
if ((nsp->error.enabled & NA_CEEN) &&
(l2p->error_enable[bank] & L2_CEEN)) {
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(esp, tt);
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case LDRC:
case LDSC:
if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
break;
case LDAU:
bank = (errorp->addr >> 6) & 0x3;
l2p = npp->l2p;
tid = nsp->vcore_id;
l2p->error_status[bank] = L2_LDAU_bit | L2_TID(tid) | L2_VEU_bit |
L2_FAKE_SYND_DOUBLE | errorp->l2_write;
l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
if (l2p->error_enable[bank] & L2_NCEEN) {
nsp->error.status = NA_LDAU_bit;
nsp->error.status |= (ep->priv == V9_HyperPriv ||
ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
nsp->error.addr = L1_PA(errorp->addr);
if (nsp->error.enabled & NA_NCEEN) {
tt = (ep->type == IFETCH)
? Sparcv9_trap_instruction_access_error
: Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
}
/*
* store error info to cacheline for error handler diag access
* and to support direct-mapped mode displacement flushing
*/
/* index stores to a 32bit word and its ECC+rsvd bits */
idx = errorp->addr & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
/* put oddeven select bit low so data is in addr order */
idx |= ((errorp->addr >> L2_ODDEVEN_SHIFT) & 1);
l2p->diag_datap[idx] = ((0xabbadada << 7) | L2_FAKE_SYND_DOUBLE);
/* index stores to a tag and its ECC+rsvd bits */
idx = errorp->addr & (L2_WAY | L2_LINE | L2_BANK) >> 6;
l2p->diag_tagp[idx] = (errorp->addr & L2_TAG) >> 12;
/* index valid/dirty or alloc/used bits and parity */
idx = errorp->addr & (L2_LINE | L2_BANK) >> 6;
idx |= ((errorp->addr & L2_VDSEL) >> 10);
l2p->diag_vuadp[idx] = 0xfff << 12; /* all lines valid/clean */
/* uncorrectible error in l2 so make it proc-wide */
npp->error_check = true;
npp->errorp->ldau_addr = errorp->addr;
sp->xdcache_trans_flush_pending = true;
/* bit of a hack - some errorconf's aren't owned by sp's so free them */
if (ep->npp) free(ep);
else {
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
}
break;
case LDWU:
bank = (errorp->addr >> 6) & 0x3;
l2p = npp->l2p;
tid = nsp->vcore_id;
l2p->error_status[bank] = L2_LDWU_bit | L2_TID(tid) | L2_VEU_bit |
L2_FAKE_SYND_DOUBLE | L2_RW_bit;
l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
if ((nsp->error.enabled & NA_NCEEN) &&
(l2p->error_enable[bank] & L2_NCEEN)) {
tid = (l2p->control[bank] & L2_ERRORSTEER);
v9p = npp->strand[STRANDID2IDX(npp, tid)];
esp = v9p->simp;
tt = (sparcv9_trap_type_t)N1_trap_data_error;
v9p->post_precise_trap(esp, tt);
}
npp->error_check = true;
npp->errorp->ldau_addr = errorp->addr;
/* bit of a hack - some errorconf's aren't owned by sp's so free them */
if (ep->npp) free(ep);
else {
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
}
break;
case LDRU:
case LDSU:
case LTC:
case LVU:
case LRU:
if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
break;
case DAC:
l2p = npp->l2p;
bank = (errorp->addr >> 6) & 0x3;
dbp = &(npp->mbankp[bank]);
dbp->error_status = DRAM_DAC_bit | DRAM_FAKE_SYND_SINGLE;
/* if store miss and L2 disabled then only set DRAM error status */
if (ep->op == ST && !errorp->partial_st) {
for (bank=0; bank<npp->num_l2banks; bank++) {
if (l2p->control[bank] & L2_DIS)
break;
}
}
bank = (errorp->addr >> 6) & 0x3;
tid = nsp->vcore_id;
l2p->error_status[bank] = L2_DAC_bit | L2_TID(tid) | L2_VEC_bit |
errorp->l2_write;
l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
if ((nsp->error.enabled & NA_CEEN) &&
(l2p->error_enable[bank] & L2_CEEN)) {
/*
* partial stores and odd-numbered cache lines
* redirected to errorsteer thread
*/
if (errorp->partial_st || (errorp->addr & 0x40)) {
tid = (l2p->control[bank] & L2_ERRORSTEER);
v9p = npp->strand[STRANDID2IDX(npp, tid)];
esp = v9p->simp;
l2p->error_status[bank] &= ~(errorp->l2_write);
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(esp, tt);
} else {
tt = (sparcv9_trap_type_t)SS_trap_ECC_error;
v9p->post_precise_trap(sp, tt);
}
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case DSC:
case DAU:
l2p = npp->l2p;
bank = (errorp->addr >> 6) & 0x3;
dbp = &(npp->mbankp[bank]);
dbp->error_status = DRAM_DAU_bit | DRAM_FAKE_SYND_DOUBLE;
tid = nsp->vcore_id;
l2p->error_status[bank] = L2_DAU_bit | L2_TID(tid) | L2_VEU_bit |
errorp->l2_write;
l2p->error_address[bank] = L2_PA_LINE(errorp->addr);
if (l2p->error_enable[bank] & L2_NCEEN) {
nsp->error.status = NA_LDAU_bit; /* as per Table 12-4 of PRM */
nsp->error.status |= (ep->priv == V9_HyperPriv ||
ep->priv == V9_Priv) ? NA_PRIV_bit : 0;
/*
* partial stores and odd-numbered cache lines
* redirected to errorsteer thread
*/
if (errorp->partial_st || (errorp->addr & 0x40)) {
tid = (l2p->control[bank] & L2_ERRORSTEER);
v9p = npp->strand[STRANDID2IDX(npp, tid)];
esp = v9p->simp;
l2p->error_status[bank] &= ~(errorp->l2_write);
/*
* set address to non-requested 16B block
* within the same 64B cache line
*/
if (!errorp->partial_st)
errorp->addr = (errorp->addr & ~0x30) |
(((errorp->addr & 0x30) + 0x10) % 0x40);
nsp->error.addr = L1_PA(errorp->addr);
tt = (sparcv9_trap_type_t)N1_trap_data_error;
v9p->post_precise_trap(esp, tt);
break;
}
nsp->error.addr = L1_PA(errorp->addr);
if (nsp->error.enabled & NA_NCEEN) {
tt = (ep->type == IFETCH)
? Sparcv9_trap_instruction_access_error
: Sparcv9_trap_data_access_error;
v9p->post_precise_trap(sp, tt);
}
}
if (remove_errconf(sp, ep) == NULL)
clear_errflags(sp); else update_errflags(sp);
break;
case DSU:
case DBU9:
case DRAM:
if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep));
break;
default:
if (remove_errconf(sp, ep) == NULL) clear_errflags(sp);
EXEC_WARNING(("Unspecified Error Type: %s\n", ep->type_namep));
break;
}
}
#endif