* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: niagara2_device.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)niagara2_device.c 1.35 07/10/12 SMI"
#include "niagara2_device.h"
* This file contains Niagara 2 specific pseudo device models
static void ncu_init(config_dev_t
*);
static void ccu_init(config_dev_t
*);
static void mcu_init(config_dev_t
*);
static void l2c_init(config_dev_t
*);
static void ssi_init(config_dev_t
*);
static void hwdbg_init(config_dev_t
*);
static void rcu_init(config_dev_t
*);
static void jtag_init(config_dev_t
*);
static bool_t
ncu_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
ccu_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
mcu_access(simcpu_t
*, config_addr_t
*, tpaddr_t offset
, maccess_t op
, uint64_t * regp
);
static bool_t
l2c_access(simcpu_t
*, config_addr_t
*, tpaddr_t offset
, maccess_t op
, uint64_t * regp
);
static bool_t
ssi_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
hwdbg_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
rcu_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t * regp
);
static bool_t
jtag_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t *regp
);
static void ncx_init(config_dev_t
*);
static void cou_init(config_dev_t
*);
static void lfu_init(config_dev_t
*);
static bool_t
ncx_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t *regp
);
static bool_t
cou_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t *regp
);
static bool_t
lfu_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
, maccess_t op
, uint64_t *regp
);
void niagara2_send_xirq(simcpu_t
* sp
, ss_proc_t
* npp
, uint64_t val
);
static dev_type_t dev_type_ncu
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ccu
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_mcu
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_dbg
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_l2c
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ssi
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_rcu
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_jtag
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_ncx
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_cou
= {
generic_device_non_cacheable
,
static dev_type_t dev_type_lfu
= {
generic_device_non_cacheable
,
uint64_t gen_raw_entropy(double *phase
, double *frequency
, double *noise
, double dutyfactor
);
* Set up the pseudo physical devices that Niagara 2 has for it's control
* registers. For things like the clock unit and memory controllers etc.
void ss_setup_pseudo_devs(domain_t
* domainp
, ss_proc_t
*procp
)
config_dev_t
*pd
, *overlapp
;
int node_id
= procp
->config_procp
->proc_id
;
static bool_t setup_once
= false;
* NCU, mapped at MSB[39:32] = 0x80
procp
->ncup
= Xcalloc(1, ncu_t
);
procp
->ncup
->node_id
= node_id
;
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ncu
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_NCU
, PHYS_ADDR_NCU
+ NCU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* Clock Unit, mapped at MSB[39:32] = 0x83
procp
->clockp
= Xcalloc(1, ccu_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ccu
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_CCU
, PHYS_ADDR_CCU
+ CCU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* Memory Controller Unit, mapped at MSB[39:32] = 0x84
* N2 supports 4 DRAM branches, each controlled by a separate MCU,
procp
->mbankp
= Xcalloc(procp
->num_mbanks
, mcu_bank_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_mcu
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_MCU
,
PHYS_ADDR_MCU
+4096LL*(uint64_t)procp
->num_mbanks
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* L2 Cache registers, mapped at MSB[39:32] = 0xA0
procp
->num_l2banks
= L2_BANKS
;
procp
->l2p
= Xcalloc(1, l2c_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_l2c
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_L2C
, PHYS_ADDR_L2C
+ L2C_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* HW Debug Unit, mapped at MSB[39:32] = 0x86
procp
->hwdbgp
= Xcalloc(1, hwdbg_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_dbg
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_HWDBG
, PHYS_ADDR_HWDBG
+0x100000000LL
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* Reset Unit, mapped at MSB[39:32] = 0x89
procp
->rcup
= Xcalloc(1, rcu_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_rcu
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_RCU
, PHYS_ADDR_RCU
+ RCU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* JTAG, mapped at MSB[39:32] = 0x90
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_jtag
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_JTAG
, PHYS_ADDR_JTAG
+ JTAG_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* NCX, mapped at MSB[39:32] = 0x81
procp
->ncxp
= Xcalloc(1, ncx_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ncx
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_NCX
, PHYS_ADDR_NCX
+ NCX_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* COU, mapped at MSB[39:32] = 0x811
procp
->coup
= Xcalloc(1, cou_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_cou
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_COU
, PHYS_ADDR_COU
+ COU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* LFU, mapped at MSB[39:32] = 0x812
procp
->lfup
= Xcalloc(1, lfu_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_lfu
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_LFU
, PHYS_ADDR_LFU
+ LFU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* SSI, mapped at MSB[39:32] = 0xff
procp
->ssip
= Xcalloc(1, ssi_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ssi
;
insert_domain_address(domainp
, pd
, PHYS_ADDR_SSI
, PHYS_ADDR_SSI
+SSI_RANGE
);
insert_domain_address(domainp
, pd
, MAGIC_SSI
, MAGIC_SSI
+ 8);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
DBGMULNODE(lprintf(-1, "Setting up pseudo devices for node %d\n",
* Instead of getting a fatal while trying to allocate addr
* space for pseudodevices of duplicate nodes, better to
* catch this problem here.
for (i
= 0; i
< (domainp
->procs
.count
- 1); i
++) {
if (node_id
== LIST_ENTRY(domainp
->procs
, i
)->proc_id
)
lex_fatal("More than one node %d present",
* NCU, mapped at MSB[39:32] = 0x80
/* Need to allocate space only once for each node */
procp
->ncup
= Xcalloc(1, ncu_t
);
procp
->ncup
->node_id
= node_id
;
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ncu
;
phys_addr
= PHYS_ADDR_NCU_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
, phys_addr
+ NCU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* Clock Unit, mapped at MSB[39:32] = 0x83
/* Need to allocate space only once for each node */
procp
->clockp
= Xcalloc(1, ccu_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ccu
;
phys_addr
= PHYS_ADDR_CCU_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
, phys_addr
+ CCU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* Memory Controller Unit, mapped at MSB[39:32] = 0x84
* VF supports 2 DRAM branches, each controlled by a separate MCU,
/* Need to allocate space only once for each node */
procp
->mbankp
= Xcalloc(procp
->num_mbanks
, mcu_bank_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_mcu
;
phys_addr
= PHYS_ADDR_MCU_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
,
phys_addr
+ 4096LL*(uint64_t)procp
->num_mbanks
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* L2 Cache registers, mapped at MSB[39:28] = 0xA00-0xBFF
/* L2CSR is local access only. See comments in l2c_access() */
procp
->num_l2banks
= L2_BANKS
;
procp
->l2p
= Xcalloc(1, l2c_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_l2c
;
addrp
= Xmalloc(sizeof(config_addr_t
));
addrp
->baseaddr
= PHYS_ADDR_L2C
;
addrp
->topaddr
= PHYS_ADDR_L2C
+ L2C_RANGE
;
addrp
->range
= L2C_RANGE
;
* SSI, mapped at MSB[39:28] = 0xff0 - 0xfff
/* Need to allocate space only once and for each node */
procp
->ssip
= Xcalloc(1, ssi_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ssi
;
addrp
= Xmalloc(sizeof(config_addr_t
));
addrp
->baseaddr
= PHYS_ADDR_SSI
;
addrp
->topaddr
= PHYS_ADDR_SSI
+ SSI_RANGE
;
addrp
->range
= SSI_RANGE
;
/* and now create an addrp struc for the MAGIC_SSI address */
addrp
= Xmalloc(sizeof(config_addr_t
));
addrp
->baseaddr
= MAGIC_SSI
;
addrp
->topaddr
= MAGIC_SSI
+ 8;
* JTAG, mapped at MSB[39:32] = 0x90
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_jtag
;
phys_addr
= PHYS_ADDR_JTAG_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
, phys_addr
+ JTAG_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* NCX, mapped at MSB[39:32] = 0x81
procp
->ncxp
= Xcalloc(1, ncx_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_ncx
;
phys_addr
= PHYS_ADDR_NCX_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
, phys_addr
+ NCX_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* COU, mapped at MSB[39:32] = 0x811
procp
->coup
= Xcalloc(1, cou_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_cou
;
phys_addr
= PHYS_ADDR_COU_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
, phys_addr
+ COU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* LFU, mapped at MSB[39:32] = 0x812
procp
->lfup
= Xcalloc(1, lfu_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_lfu
;
phys_addr
= PHYS_ADDR_LFU_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
, phys_addr
+ LFU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
* RCU, mapped at MSB[39:32] = 0x89
procp
->rcup
= Xcalloc(1, rcu_t
);
pd
= Xcalloc(1, config_dev_t
);
pd
->dev_typep
= &dev_type_rcu
;
phys_addr
= PHYS_ADDR_RCU_REMOTE(node_id
);
insert_domain_address(domainp
, pd
, phys_addr
, phys_addr
+ RCU_RANGE
);
overlapp
= insert_domain_device(domainp
, pd
);
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp
->dev_typep
->dev_type_namep
,
overlapp
->addrp
->baseaddr
,
pd
->dev_typep
->dev_type_namep
,
char * ssi_reg_name(int reg
)
case SSI_TIMEOUT
: s
="ssi_timeout"; break;
case SSI_LOG
: s
="ssi_log"; break;
default: s
="Illegal ssi register"; break;
static char *ncu_reg_name(int reg
)
case INT_MAN
: s
="int_man"; break;
case MONDO_INT_VEC
: s
="mondo_int_vec"; break;
case SER_NUM
: s
="ser_num"; break;
case EFU_STAT
: s
="efu_stat"; break;
case CORE_AVAIL
: s
="core_avail"; break;
case BANK_AVAIL
: s
="bank_avail"; break;
case BANK_ENABLE
: s
="bank_enable"; break;
case BANK_ENABLE_STATUS
: s
="bank_enable_status"; break;
case L2_IDX_HASH_EN
: s
="l2_idx_hash_en"; break;
case L2_IDX_HASH_EN_STATUS
: s
="l2_idx_hash_en_status"; break;
case PCIE_A_MEM32_OFFSET_BASE
: s
="pcie_a_mem32_offset_base"; break;
case PCIE_A_MEM32_OFFSET_MASK
: s
="pcie_a_mem32_offset_mask"; break;
case PCIE_A_MEM64_OFFSET_BASE
: s
="pcie_a_mem64_offset_base"; break;
case PCIE_A_MEM64_OFFSET_MASK
: s
="pcie_a_mem64_offset_mask"; break;
case PCIE_A_IOCON_OFFSET_BASE
: s
="pcie_a_iocon_offset_base"; break;
case PCIE_A_IOCON_OFFSET_MASK
: s
="pcie_a_iocon_offset_mask"; break;
case PCIE_A_FSH
: s
="pcie_a_fsh"; break;
case SOC_ESR
: s
="soc_error_status"; break;
case SOC_LOG_ENABLE
: s
="soc_error_log_enable"; break;
case SOC_INTERRUPT_ENABLE
: s
="soc_error_interrupt_enable"; break;
case SOC_FATAL_ERROR_ENABLE
: s
="soc_fatal_error_enable"; break;
case SOC_PENDING_ERROR_STATUS
: s
="soc_pending_error_status"; break;
case SOC_ERROR_INJECTION
: s
="soc_error_injection"; break;
case SOC_SII_ERROR_SYNDROME
: s
="soc_sii_error_syndrome"; break;
case SOC_NCU_ERROR_SYNDROME
: s
="soc_sii_error_syndrome"; break;
case MONDO_INT_DATA0
: s
="mondo_int_data0"; break;
case MONDO_INT_DATA1
: s
="mondo_int_data1"; break;
case MONDO_INT_ADATA0
: s
="mondo_int_adata0"; break;
case MONDO_INT_ADATA1
: s
="mondo_int_adata1"; break;
case MONDO_INT_BUSY
: s
="mondo_int_busy"; break;
case MONDO_INT_ABUSY
: s
="mondo_int_abusy"; break;
default: s
="Illegal NCU register"; break;
char * ccu_reg_name(int reg
)
case CLOCK_CONTROL
: s
="clock_control"; break;
case RAND_GEN
: s
="rand_gen"; break;
case RAND_CTL
: s
="rand_ctl"; break;
default: s
="Illegal clock register"; break;
char * l2c_reg_name(int reg
)
case L2_DIAG_DATA
: s
="diag_data"; break;
case L2_DIAG_TAG
: s
="diag_tag"; break;
case L2_DIAG_VUAD
: s
="diag_vuad"; break;
case L2_CONTROL
: s
="control"; break;
case L2_ERROR_ENABLE
: s
="error_enable"; break;
case L2_ERROR_STATUS
: s
="error_status"; break;
case L2_ERROR_STATUS_II
: s
="error_status_ii"; break;
case L2_ERROR_ADDRESS
: s
="error_address"; break;
case L2_ERROR_INJECT
: s
="error_inject"; break;
case L2_ERROR_NOTDATA
: s
="error_notdata"; break;
default: s
="Illegal L2 control register"; break;
char * hwdbg_reg_name(int reg
)
case DEBUG_PORT_CONFIG
: s
="debug_port_config"; break;
case IO_QUIESCE_CONTROL
: s
="io_quiesce_control"; break;
default: s
="Illegal Debug control register"; break;
char * mcu_reg_name(int reg
)
case DRAM_CAS_ADDR_WIDTH
: s
="cas_addr_width"; break;
case DRAM_RAS_ADDR_WIDTH
: s
="ras_addr_width"; break;
case DRAM_CAS_LAT
: s
="cas_lat"; break;
case DRAM_SCRUB_FREQ
: s
="scrub_frequency"; break;
case DRAM_REFRESH_FREQ
: s
="refresh_frequency"; break;
case DRAM_OPEN_BANK_MAX
: s
="open_bank_max"; break;
case DRAM_REFRESH_COUNTER
: s
="refresh_counter"; break;
case DRAM_SCRUB_ENABLE
: s
="scrub_enable"; break;
case DRAM_PROG_TIME_CNTR
: s
="program_time_cntr"; break;
case DRAM_TRRD
: s
="trrd"; break;
case DRAM_TRC
: s
="trc"; break;
case DRAM_TRCD
: s
="trcd"; break;
case DRAM_TWTR
: s
="twtr"; break;
case DRAM_TRTW
: s
="trtw"; break;
case DRAM_TRTP
: s
="trtp"; break;
case DRAM_TRAS
: s
="tras"; break;
case DRAM_TRP
: s
="trp"; break;
case DRAM_TWR
: s
="twr"; break;
case DRAM_TRFC
: s
="trfc"; break;
case DRAM_TMRD
: s
="tmrd"; break;
case DRAM_FAWIN
: s
="fawin"; break;
case DRAM_TIWTR
: s
="tiwtr"; break;
case DRAM_DIMM_STACK
: s
="dimm_stack"; break;
case DRAM_EXT_WR_MODE1
: s
="ext_wr_mode1"; break;
case DRAM_EXT_WR_MODE2
: s
="ext_wr_mode2"; break;
case DRAM_EXT_WR_MODE3
: s
="ext_wr_mode3"; break;
case DRAM_8_BANK_MODE
: s
="8_bank_mode"; break;
case DRAM_BRANCH_DISABLED
: s
="branch_disabled"; break;
case DRAM_SEL_LO_ADDR_BITS
: s
="sel_lo_addr_bits"; break;
case DRAM_SINGLE_CHNL_MODE
: s
="single_chnl_mode"; break;
case DRAM_MIRROR_MODE
: s
="mirror_mode"; break;
case DRAM_DIMM_INIT
: s
="dimm_init"; break;
case DRAM_INIT_STATUS
: s
="init_status"; break;
case DRAM_DIMM_PRESENT
: s
="dimm_present"; break;
case DRAM_FAILOVER_STATUS
: s
="failover_status"; break;
case DRAM_FAILOVER_MASK
: s
="failover_mask"; break;
case DRAM_DBG_TRG_EN
: s
="dbg_trg_en"; break;
case DRAM_POWER_DOWN_MODE
: s
="power_down_mode"; break;
case DRAM_ERROR_STATUS
: s
="error_status"; break;
case DRAM_ERROR_ADDRESS
: s
="error_address"; break;
case DRAM_ERROR_INJECT
: s
="error_inject"; break;
case DRAM_ERROR_COUNTER
: s
="error_counter"; break;
case DRAM_ERROR_LOCATION
: s
="error_location"; break;
case DRAM_ERROR_RETRY
: s
="error_retry"; break;
case DRAM_FBD_ERROR_SYND
: s
="fbd_error_synd"; break;
case DRAM_FBD_INJ_ERROR_SRC
: s
="fbd_inj_error_src"; break;
case DRAM_FBR_COUNT
: s
="fbr_count"; break;
case DRAM_PERF_CTL
: s
="perf_ctl"; break;
case DRAM_PERF_COUNT
: s
="perf_count"; break;
case FBD_CHNL_STATE
: s
="fbd_channle_state"; break;
case FBD_FAST_RESET_FLAG
: s
="fbd_fast_reset_flag"; break;
case FBD_CHNL_RESET
: s
="fbd_channle_reset"; break;
case TS1_SB_NB_MAPPING
: s
="ts1_sb_nb_mapping"; break;
case TS1_TEST_PARAMETER
: s
="ts1_test_parameter"; break;
case TS3_FAILOVER_CONFIG
: s
="ts3_failover_config"; break;
case ELECTRICAL_IDLE_DETECTED
: s
="electrical_idle_detected"; break;
case DISABLE_STATE_PERIOD
: s
="disable_state_period"; break;
case DISABLE_STATE_PERIOD_DONE
: s
="disable_state_period_done"; break;
case CALIBRATE_STATE_PERIOD
: s
="calibrate_state_period"; break;
case CALIBRATE_STATE_PERIOD_DONE
: s
="calibrate_state_period_done"; break;
case TRAINING_STATE_MIN_TIME
: s
="training_state_min_time"; break;
case TRAINING_STATE_DONE
: s
="training_state_done"; break;
case TRAINING_STATE_TIMEOUT
: s
="training_state_timeout"; break;
case TESTING_STATE_DONE
: s
="testing_state_done"; break;
case TESTING_STATE_TIMEOUT
: s
="testing_state_timeout"; break;
case POLLING_STATE_DONE
: s
="polling_state_done"; break;
case POLLING_STATE_TIMEOUT
: s
="polling_state_timeout"; break;
case CONFIG_STATE_DONE
: s
="config_state_done"; break;
case CONFIG_STATE_TIMEOUT
: s
="config_state_timeout"; break;
case DRAM_PER_RANK_CKE
: s
="dram_per_rank_cke"; break;
case L0S_DURATION
: s
="l0s_duration"; break;
case CHNL_SYNC_FRAME_FREQ
: s
="channle_sync_frame_fre"; break;
case CHNL_READ_LAT
: s
="channle_read_lat"; break;
case CHNL_CAPABILITY
: s
="channle_capability"; break;
case LOOPBACK_MODE_CNTL
: s
="loopback_mode_cntl"; break;
case SERDES_CONFIG_BUS
: s
="serdes_config_bus"; break;
case SERDES_INVPAIR
: s
="serdes_invpair"; break;
case SERDES_TEST_CONFIG_BUS
: s
="serdes_test_config_bus"; break;
case CONFIG_REG_ACCESS_ADDR
: s
="config_reg_access_addr"; break;
case CONFIG_REG_ACCESS_DATA
: s
="config_reg_access_data"; break;
case IBIST_NBFIB_CTL
: s
="ibist_nbfib_ctl"; break;
case IBIST_SBFIB_CTL
: s
="ibist_sbfib_ctl"; break;
default: s
="Illegal DRAM control register"; break;
char * jtag_reg_name(int reg
)
case INT_VECTOR_DISPATCH
: s
="int_vector_dispatch";break;
case ASI_CORE_AVAILABLE
: s
="asi_core_available";break;
case ASI_CORE_ENABLE_STATUS
: s
="asi_core_enable_status";break;
case ASI_CORE_ENABLE
: s
="asi_core_enable";break;
case ASI_CORE_RUNNING_RW
: s
="asi_core_running_rw";break;
case ASI_CORE_RUNNING_STATUS
: s
="asi_core_running_status";break;
case ASI_CORE_RUNNING_W1S
: s
="asi_core_running_w1s";break;
case ASI_CORE_RUNNING_W1C
: s
="asi_core_running_w1c";break;
case SOC_ERROR_STEERING
: s
="soc_error_steering";break;
default: s
="Illegal JTAG region register"; break;
char * ncx_reg_name(int reg
)
case CF_SYS_MODE_REG
: s
="system_mode_reg";break;
case NCX_TIC_EN_SLOW
: s
="tick_en_slow";break;
case CF_SLOW_PULSE_WAIT
: s
="slow_pulse_wait";break;
case NCX_TWR
: s
="twr";break;
case NCX_TPESR
: s
="tpesr";break;
case NCX_TPELSE
: s
="tpelse";break;
case NCX_TPEAR
: s
="tpear";break;
default: s
="Illegal NCX region register"; break;
char * cou_reg_name(int reg
)
case COU_ERR_ENABLE_REG
: s
="cou_err_enable";break;
case COU_ESR
: s
="cou_esr";break;
case COU_EAR
: s
="cou_ear";break;
default: s
="Illegal COU region register"; break;
char * lfu_reg_name(int reg
)
case CL_INIT_STATE
: s
="cl_init_state";break;
case CL_CFG_REG
: s
="cl_cfg_reg";break;
case CL_SERDES_CFG
: s
="cl_serdes_cfg";break;
case CL_SER_INVPAIR
: s
="cl_ser_invpair";break;
case CL_TEST_CFG
: s
="cl_test_cfg";break;
case CL_ERROR_STAT
: s
="cl_error_stat";break;
default: s
="Illegal LFU region register"; break;
static void ssi_init(config_dev_t
* config_devp
)
config_proc_t
* config_procp
;
bool_t zambezi_present
= false;
* Note that for VF, each node has its own SSI region and all of them
* are addressed by the same physical address(0xFF.0000.0000 to
* 0xFF.0FFF.FFFF).(Note that this does not refer to the ROM part of the
* SSI, which *really* is a single entity shared by the different nodes.
* Address range for the ROM is FF.F000.0000 to FF.FFFF.FFFF and its
* config and setup is taken care of by an entry in the config file.)
* Nodes can only access their local SSI regions and not those of other
* nodes. So the domain structure's addressmap only contains that
* physical address(0xFF.0000.0000 to 0xFF.0FFF.FFFF) and it is up to the
* init and access routines to correctly map that PA to the correct
* This is unlike NCU, CCU, MCU etc where there is a local CSR access
* address which is common for all nodes and basically is translated by
* hw to talk to originating node's address space AND remote CSR access
* address which allows any node to access any other node's address
* space. So in this case the domain address map will contain both the
* local CSR address space as well as the remote CSR address space.
domainp
= config_devp
->domainp
;
devp
= domainp
->device
.listp
;
for (i
= 0; i
< domainp
->device
.count
; i
++) {
if (streq(devp
->dev_typep
->dev_type_namep
, "zambezi")) {
for (i
= 0; i
< domainp
->procs
.count
; i
++) {
config_procp
= LIST_ENTRY(domainp
->procs
, i
);
npp
= (ss_proc_t
*)config_procp
->procp
;
/* and init the magic ssi location with node_id and way info */
node_id
= config_procp
->proc_id
;
extern_hub
= zambezi_present
? 1:0;
mode_shift
= 11 - domainp
->procs
.count
;
val
= 1<<mode_shift
| extern_hub
<<6 | node_id
<<4 | 0xf;
npp
->ssip
->magic_ssi
= val
;
npp
= (ss_proc_t
*)config_devp
->devp
;
static void ncu_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
pthread_mutex_init(&ncup
->ncu_lock
, NULL
);
* setup init value (NCU spec, v0.99)
for (device
=0; device
< NCU_DEV_MAX
; device
++) {
ncup
->regs
.int_man
[device
] = 0x0;
ncup
->regs
.mondo_int_vec
= 0x0;
ncup
->regs
.ser_num
= 0xdeadbeef;
ncup
->regs
.efu_stat
= MASK64(63,0);
ncup
->regs
.bank_enb
= 0xff;
ncup
->regs
.bank_enb_stat
= 0x3cf;
ncup
->regs
.l2_idx_hash_en_stat
= false;
ncup
->regs
.pcie_a_mem32_offset_base
= 0x0;
ncup
->regs
.pcie_a_mem32_offset_mask
= MASK64(39,36);
ncup
->regs
.pcie_a_mem64_offset_base
= 0x0;
ncup
->regs
.pcie_a_mem64_offset_mask
= MASK64(39,36);
ncup
->regs
.pcie_a_iocon_offset_base
= 0x0;
ncup
->regs
.pcie_a_iocon_offset_mask
= MASK64(39,36);
ncup
->regs
.pcie_a_fsh
= 0x0;
ncup
->regs
.soc_esr
= 0x0;
ncup
->regs
.soc_log_enb
= 0x1fffffff;
ncup
->regs
.soc_intr_enb
= 0x0;
ncup
->regs
.soc_err_inject
= 0x0;
ncup
->regs
.soc_fatal_enb
= 0x0;
ncup
->regs
.soc_sii_err_syndrome
= 0x0;
ncup
->regs
.soc_ncu_err_syndrome
= 0x0;
for (i
= 0; i
< NCU_TARGETS
; i
++) {
ncup
->regs
.mondo_int_data0
[i
] = 0x0;
ncup
->regs
.mondo_int_data1
[i
] = 0x0;
ncup
->regs
.mondo_int_busy
[i
] = NCU_MONDO_INT_BUSY
;
static void hwdbg_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
npp
->hwdbgp
->debug_port_config
= 0;
npp
->hwdbgp
->io_quiesce_control
= 0;
static void rcu_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
npp
->rcup
->reset_gen
= 0;
npp
->rcup
->reset_status
= 0x4;
npp
->rcup
->reset_source
= 0x10;
npp
->rcup
->comt_divs
= 0;
npp
->rcup
->comt_cfg
= 0x23;
npp
->rcup
->clk_steer
= 0;
npp
->rcup
->comt_lock_time
= 0x0;
static void jtag_init(config_dev_t
* config_devp
)
static void ncx_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
npp
->ncxp
->sys_mode
= 0x0;
npp
->ncxp
->tick_en_slow
= 0x0;
npp
->ncxp
->slow_pulse_wait
= 0x0;
npp
->ncxp
->twr
= 0xfffffc00; /* Table 12-9, VF PRM 0.1 */
static void cou_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
for (link
= 0; link
< COU_LINK_MAX
; link
++) {
npp
->coup
->cou_err_enable
[link
] = 0x0;
npp
->coup
->cou_esr
[link
] = 0x0;
npp
->coup
->cou_ear
[link
] = 0x0;
static void lfu_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
for (link
= 0; link
< LFU_MAX_LINKS
; link
++) {
npp
->lfup
->cl_init_state
[link
] = 0x0;
npp
->lfup
->cl_cfg_reg
[link
] = 0x28;
npp
->lfup
->cl_serdes_cfg
[link
] = 0x1c1000000;
npp
->lfup
->cl_ser_invpair
[link
] = 0x0;
npp
->lfup
->cl_test_cfg
[link
] = 0x3;
npp
->lfup
->cl_error_stat
[link
] = 0x0;
static void ccu_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
clockp
->control
= 0x1002011c1; /* table 11.1, section 11.1 of N2 PRM rev 1.0 */
clockp
->rand_state
.ctl
= (1 << RC_MODE_SHIFT
) |
(7 << RC_NOISE_CELL_SEL_SHIFT
);
* OX3e is from N2 CCU MAS v1.61 11/01/05, Table 8.2. This is
* decimal 63, which is way too small. But that's what the HW
* does, so we model it here.
clockp
->rand_state
.ctl
|= 0x3e << RC_DELAY_SHIFT
;
static void l2c_init(config_dev_t
* config_devp
)
config_proc_t
* config_procp
;
* Note that for VF, each node has its own L2CSR region and all of them
* are addressed by the same physical address(0xA0.0000.0000 to
* 0xBF.FFFF.FFFF). Nodes can only access their local L2CSR regions and
* not those of other nodes. So the domain structure's addressmap only
* contains that physical address(0xA0.0000.0000 to 0xBF.FFFF.FFFF) and
* it is up to the init and access routines to correctly map that PA to
* the correct node's L2CSR region. This is unlike NCU, CCU, MCU etc
* where there is a local CSR access address which is common for all
* nodes and basically is translated by hw to talk to originating node's
* address space AND remote CSR access address which allows any node to
* access any other node's address space.
domainp
= config_devp
->domainp
;
for (i
= 0; i
< domainp
->procs
.count
; i
++) {
config_procp
= LIST_ENTRY(domainp
->procs
, i
);
npp
= (ss_proc_t
*)config_procp
->procp
;
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
l2p
->control
[bank
] = L2_DIS
;
l2p
->bist_ctl
[bank
] = 0x0;
l2p
->error_enable
[bank
] = 0xfffffc00; /* Table 12-3, VF PRM 0.1 */
l2p
->error_status
[bank
] = 0x0;
l2p
->error_status_ii
[bank
]= 0x0;
l2p
->error_address
[bank
]= 0x0;
l2p
->error_inject
[bank
] = 0x0;
l2p
->diag_datap
= Xmalloc(L2_DATA_SIZE
);
l2p
->diag_tagp
= Xmalloc(L2_TAG_SIZE
);
l2p
->diag_vuadp
= Xmalloc(L2_VUAD_SIZE
);
for (idx
=0; idx
<L2_DATA_SIZE
/8; idx
++) {
l2p
->diag_datap
[idx
] = 0xdeadbeef;
for (idx
=0; idx
<L2_TAG_SIZE
/8; idx
++) {
l2p
->diag_tagp
[idx
] = 0xdeadbeef;
for (idx
=0; idx
<L2_VUAD_SIZE
/8; idx
++) {
l2p
->diag_vuadp
[idx
] = 0xdeadbeef;
npp
= (ss_proc_t
*)config_devp
->devp
;
for (bank
=0; bank
<npp
->num_l2banks
; bank
++) {
l2p
->control
[bank
] = L2_DIS
;
l2p
->bist_ctl
[bank
] = 0x0;
l2p
->error_enable
[bank
] = 0x0;
l2p
->error_status
[bank
] = 0x0;
l2p
->error_address
[bank
]= 0x0;
l2p
->error_inject
[bank
] = 0x0;
l2p
->diag_datap
= Xmalloc(L2_DATA_SIZE
);
l2p
->diag_tagp
= Xmalloc(L2_TAG_SIZE
);
l2p
->diag_vuadp
= Xmalloc(L2_VUAD_SIZE
);
for (idx
=0; idx
<L2_DATA_SIZE
/8; idx
++) {
l2p
->diag_datap
[idx
] = 0xdeadbeef;
for (idx
=0; idx
<L2_TAG_SIZE
/8; idx
++) {
l2p
->diag_tagp
[idx
] = 0xdeadbeef;
for (idx
=0; idx
<L2_VUAD_SIZE
/8; idx
++) {
l2p
->diag_vuadp
[idx
] = 0xdeadbeef;
static void mcu_init(config_dev_t
* config_devp
)
npp
= (ss_proc_t
*)config_devp
->devp
;
for (bidx
=0; bidx
<npp
->num_mbanks
; bidx
++) {
dbp
= &(npp
->mbankp
[bidx
]);
dbp
->cas_addr_width
= 0xb;
dbp
->ras_addr_width
= 0xf;
dbp
->refresh_freq
= 0x514;
dbp
->refresh_counter
= 0x0;
dbp
->ext_wr_mode1
= 0x18;
dbp
->eight_bank_mode
= 0x1;
dbp
->sel_lo_addr_bits
= 0x0;
dbp
->single_chnl_mode
= 0x0;
dbp
->failover_status
= 0x0;
dbp
->failover_mask
= 0x0;
dbp
->power_down_mode
= 0x0;
dbp
->fbd_chnl_state
.val
= 0x0;
for (i
=0; i
<MAX_AMBS
; i
++){
dbp
->fbd_chnl_state
.ambstate
[i
] = 0x0;
dbp
->amb
[i
].vid_did
= 0x00E01033; /* use e0 for now until we find valid nec did */
dbp
->amb
[i
].fbds
= 0x0; /* set all error stat bits to 0 */
dbp
->amb
[i
].emask
= 0x36;
dbp
->amb
[i
].psbyte3_0
= 0x0;
dbp
->amb
[i
].psbyte7_4
= 0x0;
dbp
->amb
[i
].psbyte11_8
= 0x0;
dbp
->amb
[i
].psbyte13_12
= 0x0;
dbp
->amb
[i
].c2dincrcur_cmd2datanxt
= 0x0;
dbp
->amb
[i
].dareftc
= ((0x4e << 16) | 0x0c30);
dbp
->amb
[i
].mtr_dsreftc
= ((1 << 16) |
dbp
->amb
[i
].drc
= ((0x1 << 18) | /* set default value */
dbp
->amb
[i
].dcalcsr
= 0x0;
dbp
->amb
[i
].dcaladdr
= 0x0;
dbp
->amb
[i
].ddr2odtc
= 0x0;
dbp
->fbd_fast_reset_flag
= 0x0;
dbp
->fbd_chnl_reset
= 0x0;
dbp
->ts1_sb_nb_mapping
= 0x0;
dbp
->ts1_test_parameter
= 0x0;
dbp
->ts3_failover_config
= 0x0;
dbp
->electrical_idle_detected
= 0x0;
dbp
->disable_state_period
= 0x3f;
dbp
->disable_state_period_done
= 0x0;
dbp
->calibrate_state_period
= 0x0;
dbp
->calibrate_state_period_done
= 0x0;
dbp
->training_state_min_time
= 0xff;
dbp
->training_state_done
= 0x0;
dbp
->training_state_timeout
= 0xff;
dbp
->testing_state_done
= 0x0;
dbp
->testing_state_timeout
= 0xff;
dbp
->polling_state_done
= 0x0;
dbp
->polling_state_timeout
= 0xff;
dbp
->config_state_done
= 0x0;
dbp
->config_state_timeout
= 0xff;
dbp
->dram_per_rank_cke
= 0xffff;
dbp
->l0s_duration
= 0x2a;
dbp
->chnl_sync_frame_freq
= 0x2a;
dbp
->chnl_read_lat
= 0xffff;
dbp
->chnl_capability
= 0x0;
dbp
->loopback_mode_cntl
= 0x0;
dbp
->serdes_config_bus
= 0x0;
dbp
->serdes_invpair
= 0x0;
dbp
->config_reg_access_addr
= 0x0;
dbp
->config_reg_access_data
= 0x0;
dbp
->ibist_nbfib_ctl
= 0x03c01e478LL
;
dbp
->ibist_sbfib_ctl
= 0x23c01e478LL
;
* Performance counter section 10.3 of PRM 0.9.1
* Error handling section 25.12 of PRM 1.2
dbp
->error_address
= 0x0;
dbp
->error_counter
= 0x0;
dbp
->error_location
= 0x0;
* Power management section 26.3 of PRM 0.9.1
dbp
->open_bank_max
= 0x1ffff;
dbp
->prog_time_cntr
= 0xffff;
* Hardware debug section 29.1 of PRM 0.9.1
* Access SSI registers (mapped at offset = 0xff00000000)
static bool_t
ssi_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
* Note that for VF, each node has its own SSI region and all of them
* are addressed by the same physical address(0xFF.0000.0000 to
* 0xFF.0FFF.FFFF).(Note that this does not refer to the ROM part of the
* SSI, which *really* is a single entity shared by the different nodes.
* Address range for the ROM is FF.F000.0000 to FF.FFFF.FFFF and its
* config and setup is taken care of by an entry in the config file.)
* Nodes can only access their local SSI regions and not those of other
* nodes. So the domain structure's addressmap only contains that
* physical address(0xFF.0000.0000 to 0xFF.0FFF.FFFF) and it is up to
* the init and access routines to correctly map that PA to the correct
* This is unlike NCU, CCU, MCU etc where there is a local CSR access
* address which is common for all nodes and basically is translated by
* hw to talk to originating node's address space AND remote CSR access
* address which allows any node to access any other node's address
* space. So in this case the domain address map will contain both the
* local CSR address space as well as the remote CSR address space.
/* Redirect the common SSI PA to the correct node*/
npp
= (ss_proc_t
*)sp
->config_procp
->procp
;
/* for accessing magic SSI loc by reset so as to set up sys_mode reg */
if (config_addrp
->baseaddr
==MAGIC_SSI
){
EXEC_WARNING( ("Attempted write to reserved magic field in ssi"));
else if (op
== MA_ldu64
) {
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
*regp
= npp
->ssip
->magic_ssi
;
/* or else this is a normal SSI register access */
config_addrp
= npp
->ssi_devp
->addrp
;
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
RSVD_MASK(sp
, (MASK64(24, 0)), val
, 0, reg
);
RSVD_MASK(sp
, (MASK64(1, 0)), val
, 0, reg
);
/* illegal reg - an error */
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
hwdbg_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
RSVD_MASK(sp
, (MASK64(63,62)|MASK64(9,0)), val
, 0, reg
);
lprintf(sp
->gid
, "DEBUG_PORT_CONFIG addr=0x%llx being written with val=0x%llx\n", reg
, val
);
hwdbgp
->debug_port_config
= val
;
RSVD_MASK(sp
, MASK64(3,0), val
, 0, reg
);
lprintf(sp
->gid
, "IO_QUIESCE_CONTROL addr=0x%llx being written with val=0x%llx\n", reg
, val
);
hwdbgp
->io_quiesce_control
= val
;
/* illegal reg - an error */
val
= hwdbgp
->debug_port_config
;
val
= hwdbgp
->io_quiesce_control
;
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
rcu_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (config_addrp
->baseaddr
== PHYS_ADDR_RCU
) {
* if local RCU CSR access, need to convert to Node X(this node)
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
pa
= PHYS_ADDR_RCU_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
* If remote RCU CSR access, use config_addrp to get at the node_id.
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "RCU");
domainp
= config_addrp
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (config_addrp
->baseaddr
== PHYS_ADDR_RCU_REMOTE(node_id
))
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
* PRM states that software may only write a 1
* to one of the 3 reset gen bit fields at a time.
* .-------------------------------------------------------,
* | .. | pb_as_dbgr| dbr_gen | rsvd | xir_gen | wmr_gen |
* `-------------------------------------------------------'
RSVD_MASK(sp
, (MASK64(4,3) | MASK64(1,0)), val
, 0, reg
);
* PRM states that software may only write a 1
* to one of the 3 bit fields at a time.
* .--------------------------------------------,
* | .. | dbr_gen | rsvd | xir_gen | wmr_gen |
* `--------------------------------------------'
RSVD_MASK(sp
, (MASK64(3,3) | MASK64(1,0)), val
, 0, reg
);
switch ((val
& (MASK64(3,0)))) {
case 0x0: /* no gen bits set */
case 0x1: /* wmr_gen set */
case 0x2: /* xir_gen set */
case 0x8: /* dbr_gen set */
fatal("[0x%llx] (pc=0x%llx)\tAttempted write to more than "\
"one reset generation bit in RESET_GEN val=0x%llx",
* update the RESET_SOURCE register if dbr_gen,
* xir_gen or wmr_gen are set in RESET_GEN.
* The lower 4 bits of both registers are the
rcup
->reset_source
|= (val
& (MASK64(3,3) | MASK64(1,1) | MASK64(0,0)));
RSVD_MASK(sp
, (MASK64(3,1)), val
, 0, reg
);
rcup
->reset_status
= val
;
RSVD_MASK(sp
, (MASK64(1,0) | MASK64(18,3)), val
, 0, reg
);
RSVD_MASK(sp
, (MASK64(1,0) | MASK64(15,3)), val
, 0, reg
);
* All the non-reserved bits are W1C so we update
* the reset_source accordingly
rcup
->reset_source
&= ~(val
);
RSVD_MASK(sp
, (MASK64(23,0)), val
, 0, reg
);
RSVD_MASK(sp
, (MASK64(21,0)), val
, 0, reg
);
RSVD_MASK(sp
, (MASK64(3,0)), val
, 0, reg
);
RSVD_MASK(sp
, (MASK64(31,0)), val
, 0, reg
);
rcup
->comt_lock_time
= val
;
/* illegal reg - an error */
val
= rcup
->reset_status
;
val
= rcup
->reset_source
;
val
= rcup
->comt_lock_time
;
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
* Access registers in NCU Unit (mapped at offset = 0x8000000000)
static bool_t
ncu_access(simcpu_t
*sp
, config_addr_t
* config_addrp
,
tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (config_addrp
->baseaddr
== PHYS_ADDR_NCU
) {
* If local NCU CSR access, need to convert to Node X(this node) NCU CSR
* address. Use the simcpu to get the correct node_id and then get the
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
pa
= PHYS_ADDR_NCU_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
* If remote NCU CSR access, use config_addrp to get at the node_id.
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "NCU");
domainp
= config_addrp
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (config_addrp
->baseaddr
== PHYS_ADDR_NCU_REMOTE(node_id
))
self
= (node_id
== sp
->config_procp
->proc_id
) ? true : false;
* FIXME: For the moment we only support 64bit accesses to registers.
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
if (off
& 7) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
nsp
= v9p
->impl_specificp
;
reg
= off
& NCU_REG_MASK
;
else if (UINT64_RANGE_CHECK(MONDO_INT_DATA0
, reg
, MONDO_INT_ADATA0
))
reg
&= ~NCU_INT_TGTOFFSET_MASK
;
else if (UINT64_RANGE_CHECK(MONDO_INT_BUSY
, reg
, MONDO_INT_ABUSY
))
reg
&= ~NCU_INT_TGTOFFSET_MASK
;
* Fast-path the serial number read - it is used in the N2
* hypervisor CPU yield API.
if (op
== MA_ldu64
&& reg
== SER_NUM
) {
val
= ncup
->regs
.ser_num
;
DBGDEV(lprintf(sp
->gid
, "Read NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, ncu_reg_name(reg
), off
, val
););
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
pthread_mutex_lock( &ncup
->ncu_lock
);
#define ASSIGN_NCU(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
idx
= (off
>> 3) & (NCU_DEV_MAX
-1);
ASSIGN_NCU( int_man
[idx
], MASK64(13,8)|MASK64(5,0) );
ASSIGN_NCU( mondo_int_vec
, MASK64(5,0) );
case L2_IDX_HASH_EN_STATUS
:
EXEC_WARNING( ("Attempted write to RO register in NCU:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ncu_reg_name(reg
), reg
) );
pthread_mutex_unlock( &ncup
->ncu_lock
);
case PCIE_A_MEM32_OFFSET_BASE
:
ASSIGN_NCU( pcie_a_mem32_offset_base
, MASK64(63,63)|MASK64(35,24) );
niagara2_pcie_mapping(sp
, ncup
, PIU_REGION_MEM32
);
case PCIE_A_MEM32_OFFSET_MASK
:
ASSIGN_NCU( pcie_a_mem32_offset_mask
, MASK64(39,24) );
niagara2_pcie_mapping(sp
, ncup
, PIU_REGION_MEM32
);
case PCIE_A_MEM64_OFFSET_BASE
:
ASSIGN_NCU( pcie_a_mem64_offset_base
, MASK64(63,63)|MASK64(35,24) );
niagara2_pcie_mapping(sp
, ncup
, PIU_REGION_MEM64
);
case PCIE_A_MEM64_OFFSET_MASK
:
ASSIGN_NCU( pcie_a_mem64_offset_mask
, MASK64(39,24) );
niagara2_pcie_mapping(sp
, ncup
, PIU_REGION_MEM64
);
case PCIE_A_IOCON_OFFSET_BASE
:
ASSIGN_NCU( pcie_a_iocon_offset_base
, MASK64(63,63)|MASK64(35,24) );
niagara2_pcie_mapping(sp
, ncup
, PIU_REGION_CFGIO
);
case PCIE_A_IOCON_OFFSET_MASK
:
ASSIGN_NCU( pcie_a_iocon_offset_mask
, MASK64(39,24) );
niagara2_pcie_mapping(sp
, ncup
, PIU_REGION_CFGIO
);
FIXME_WARNING(("NCU register %s (offset 0x%x) not implemented\n",
ncu_reg_name(reg
), reg
) );
ncup
->regs
.pcie_a_fsh
= val
;
ASSIGN_NCU( soc_esr
, MASK64(63,63)|NCU_SOC_MASK
);
ASSIGN_NCU( soc_log_enb
, MASK64(42,0) );
case SOC_INTERRUPT_ENABLE
:
ASSIGN_NCU( soc_intr_enb
, MASK64(42,0) );
case SOC_ERROR_INJECTION
:
ASSIGN_NCU( soc_err_inject
, MASK64(42,0) );
case SOC_FATAL_ERROR_ENABLE
:
ASSIGN_NCU( soc_fatal_enb
, MASK64(42,0) );
case SOC_PENDING_ERROR_STATUS
:
ASSIGN_NCU( soc_esr
, MASK64(63,63)|NCU_SOC_MASK
);
case SOC_SII_ERROR_SYNDROME
:
ASSIGN_NCU( soc_sii_err_syndrome
, MASK64(63,63)|MASK64(58,0) );
case SOC_NCU_ERROR_SYNDROME
:
ASSIGN_NCU( soc_ncu_err_syndrome
, MASK64(63,58)|MASK64(55,0) );
target
= (off
>> 3) & (NCU_TARGETS
-1);
ASSIGN_NCU( mondo_int_busy
[target
], MASK64(6,6) );
/* Note from VF PRM author : N2's cpu-id based alias
* registers work on VF only when access comes from
* local node. A thread on remote-node should not access
* these registers. Since interrupts can be delivered to
* local-node only, assumption is that interrupted
* thread(or some other thread on same node) will read
* these regs and message-pass to remote node, if needed.
fatal("[0x%llx] (pc=0x%llx)\tIllegal write to "
"NCU alias register of remote node: "
"Write 0x%llx to register offset 0x%x of node %d.\n",
sp
->gid
, sp
->pc
, val
, reg
, node_id
);
ASSIGN_NCU( mondo_int_busy
[target
], MASK64(6,6) );
EXEC_WARNING( ("Attempted write to illegal register in NCU:"
"Write 0x%llx to register offset 0x%x on node %d\n",
goto access_failed
; /* illegal reg - an error */
DBGDEV(lprintf(sp
->gid
, "Write NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, ncu_reg_name(reg
), off
, val
););
EXEC_WARNING( ("Attempted write to reserved field in NCU:"
"Write 0x%llx to register %s (offset 0x%x) on node %d",
val
, ncu_reg_name(reg
), reg
, node_id
) );
pthread_mutex_unlock( &ncup
->ncu_lock
);
idx
= (off
>> 3) & (NCU_DEV_MAX
-1);
val
= ncup
->regs
.int_man
[idx
];
val
= ncup
->regs
.mondo_int_vec
;
val
= ncup
->regs
.ser_num
;
val
= npp
->cmp_regs
.core_enable_status
;
val
= ncup
->regs
.efu_stat
;
val
= ncup
->regs
.bank_enb
;
val
= ncup
->regs
.bank_enb_stat
;
case PCIE_A_MEM32_OFFSET_BASE
:
val
= ncup
->regs
.pcie_a_mem32_offset_base
;
case PCIE_A_MEM32_OFFSET_MASK
:
val
= ncup
->regs
.pcie_a_mem32_offset_mask
;
case PCIE_A_MEM64_OFFSET_BASE
:
val
= ncup
->regs
.pcie_a_mem64_offset_base
;
case PCIE_A_MEM64_OFFSET_MASK
:
val
= ncup
->regs
.pcie_a_mem64_offset_mask
;
case PCIE_A_IOCON_OFFSET_BASE
:
val
= ncup
->regs
.pcie_a_iocon_offset_base
;
case PCIE_A_IOCON_OFFSET_MASK
:
val
= ncup
->regs
.pcie_a_iocon_offset_mask
;
case L2_IDX_HASH_EN_STATUS
:
FIXME_WARNING(("NCU register %s (offset 0x%x) not implemented\n",
ncu_reg_name(reg
), reg
) );
val
= ncup
->regs
.pcie_a_fsh
;
val
= ncup
->regs
.soc_esr
;
val
= ncup
->regs
.soc_log_enb
;
case SOC_INTERRUPT_ENABLE
:
val
= ncup
->regs
.soc_intr_enb
;
case SOC_ERROR_INJECTION
:
val
= ncup
->regs
.soc_err_inject
;
case SOC_FATAL_ERROR_ENABLE
:
val
= ncup
->regs
.soc_fatal_enb
;
case SOC_PENDING_ERROR_STATUS
:
val
= ncup
->regs
.soc_esr
;
case SOC_SII_ERROR_SYNDROME
:
val
= ncup
->regs
.soc_sii_err_syndrome
;
case SOC_NCU_ERROR_SYNDROME
:
val
= ncup
->regs
.soc_ncu_err_syndrome
;
target
= (off
>> 3) & (NCU_TARGETS
-1);
val
= ncup
->regs
.mondo_int_data0
[target
];
target
= (off
>> 3) & (NCU_TARGETS
-1);
val
= ncup
->regs
.mondo_int_data1
[target
];
/* Note from VF PRM author : N2's cpu-id based alias
* registers work on VF only when access comes from
* local node. A thread on remote-node should not access
* these registers. Since interrupts can be delivered to
* local-node only, assumption is that interrupted
* thread(or some other thread on same node) will read
* these regs and message-pass to remote node, if needed.
fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
"NCU alias register of remote node: "
"Read register offset 0x%x on node %d.\n",
sp
->gid
, sp
->pc
, reg
, node_id
);
val
= ncup
->regs
.mondo_int_data0
[target
];
/* see comment block above */
fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
"NCU alias register of remote node: "
"Read register offset 0x%x on node %d.\n",
sp
->gid
, sp
->pc
, reg
, node_id
);
val
= ncup
->regs
.mondo_int_data1
[target
];
target
= (off
>> 3) & (NCU_TARGETS
-1);
val
= ncup
->regs
.mondo_int_busy
[target
];
/* see comment block above */
fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
"NCU alias register of remote node: "
"Read register offset 0x%x on node %d.\n",
sp
->gid
, sp
->pc
, reg
, node_id
);
val
= ncup
->regs
.mondo_int_busy
[target
];
goto access_failed
; /* illegal reg - an error */
DBGDEV(lprintf(sp
->gid
, "Read NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, ncu_reg_name(reg
), off
, val
););
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
pthread_mutex_unlock( &ncup
->ncu_lock
);
pthread_mutex_unlock( &ncup
->ncu_lock
);
* Access registers in Clock Unit (mapped at offset = 0x8300000000)
static bool_t
ccu_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
int osc
; /* oscillator number for RNG */
int freqidx
; /* frequency index of selected oscillator in RNG */
if (config_addrp
->baseaddr
== PHYS_ADDR_CCU
) {
* if local CCU CSR access, need to convert to Node X(this node) CCU CSR
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
pa
= PHYS_ADDR_CCU_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
/* check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "CCU");
* FIXME: For the moment we only support 64bit accesses to registers.
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
reg
= off
& ~0xfULL
; /* collapse to basic register groups */
#define ASSIGN_CLK(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
ASSIGN_CLK( control
, MASK64(33,0) );
FIXME_WARNING(("Clock register %s (offset 0x%x) not implemented\n",
ccu_reg_name(reg
), reg
) );
/* only the lower 25 bits are used */
ASSIGN_CLK(rand_state
.ctl
, RC_REG_MASK
);
switch ((clockp
->rand_state
.ctl
>>
RC_NOISE_CELL_SEL_SHIFT
) & RC_NOISE_CELL_SEL_MASK
) {
osc
= 0; /* special: no osc. selected */
* If more than one is selected, we
* don't set anything. It is not clear
* that this is exactly in line with
clockp
->rand_state
.freqidx
[osc
-1] =
(clockp
->rand_state
.ctl
>>
RC_ANALOG_SEL_SHIFT
) & RC_ANALOG_SEL_MASK
;
/* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in clock unit:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ccu_reg_name(reg
), reg
) );
#define RETRIEVE_CLK(_n, _m) do { val = ((clockp->_n) & (_m)); } while (0)
RETRIEVE_CLK( control
, MASK64(33,0) );
#if INTERNAL_BUILD /* { */
#define RNG_POLY 0x231dcee91262b8a3ULL
#define N2_RNG_OSC_DUTY_FACTOR 0.2
if (((clockp
->rand_state
.ctl
>> RC_MODE_SHIFT
) & 1) == 0) {
/* Keeping this code, because I think Axis works this way */
* Mode == 0: Shift out "raw" noise
* cells, one value every 64
* clocks. For now we assume the
* following noise cell frequencies,
* and that everything is phase
* See N2 CCU MAS v.1.61, Table 7.
rand_control
.fields
.rc_noise_cell_sel
) {
val
= 0xffffffff00000000ULL
;
val
= 0xf0f0f0f0f0f0f0f0ULL
;
val
= 0xaaaaaaaaaaaaaaaaULL
;
val
= 0xffffffff00000000ULL
^
#else /* !OLD_CONSTANT_WAY */
/* osc = clockp->rand_state.osc; */
for (osc
= 1; osc
<= 3; ++osc
) {
if (((clockp
->rand_state
.ctl
>>
RC_NOISE_CELL_SEL_SHIFT
) &
RC_NOISE_CELL_SEL_MASK
) &
rand_state
.freqidx
[osc
-1];
printf("osc=%d, freqidx=%d, "
frequency
[freqidx
][osc
-1],
frequency
[freqidx
][osc
-1],
#endif /* !OLD_CONSTANT_WAY */
* Mode is 1; The LFSR is in feedback mode.
if ((clockp
->rand_state
.ctl
>>
RC_NOISE_CELL_SEL_SHIFT
) &
RC_NOISE_CELL_SEL_MASK
) {
* For now, if any noise cells
* are turned on, return a
val
= ((uint64_t)lrand48() << 32) |
* Deterministic test. The
* RNG does 2 more cycles than
* each read, the register is
* reality, delay+2 is only
* the minimum delay, but we
lfsr64_adv(RNG_POLY
, ~0ULL,
((clockp
->rand_state
.ctl
>>
RC_DELAY_SHIFT
) & RC_DELAY_MASK
) +
RETRIEVE_CLK(rand_state
.ctl
, RC_REG_MASK
);
#endif /* INTERNAL_BUILD } */
/* illegal reg - an error */
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
* Access registers in JTAG area (mapped at offset = 0x9000000000)
static bool_t
jtag_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (config_addrp
->baseaddr
== PHYS_ADDR_JTAG
) {
* if local JTAG CSR access, need to convert to Node X(this node) JTAG
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
pa
= PHYS_ADDR_JTAG_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
* If remote JTAG CSR access, use config_addrp to get at the node_id.
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "JTAG");
domainp
= config_addrp
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (config_addrp
->baseaddr
== PHYS_ADDR_JTAG_REMOTE(node_id
))
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
; /* target proc */
case INT_VECTOR_DISPATCH
:
RSVD_MASK(sp
, (MASK64(5,0) | MASK64(13,8)), val
, 0, reg
);
/* note sp=interrupt originator, npp=interrupt target */
niagara2_send_xirq(sp
, npp
, val
);
case ASI_CORE_ENABLE_STATUS
:
case ASI_CORE_RUNNING_STATUS
:
EXEC_WARNING( ("Attempted write to RO register in JTAG/TAP:"
"Write 0x%llx attempted to register %s (offset 0x%x)",
val
, jtag_reg_name(reg
), reg
) );
IMPL_WARNING(("%s: not supported in JTAG/TAP.\n",jtag_reg_name(reg
)));
case ASI_CORE_RUNNING_RW
:
* WS: according to the CMP PRM, writing a '1' to a bit will be ignored
* if the corresponding bit in the core enable reg is 0 (i.e., the
* corresponding virtual core is not enabled)
pthread_mutex_lock(&npp
->cmp_lock
);
npp
->cmp_regs
.core_running_status
= val
& npp
->cmp_regs
.core_enable_status
;
ss_change_exec_state(npp
, npp
->cmp_regs
.core_running_status
);
pthread_mutex_unlock(&npp
->cmp_lock
);
case ASI_CORE_RUNNING_W1S
:
* W1S: new_value = old_value | new_value;
pthread_mutex_lock(&npp
->cmp_lock
);
npp
->cmp_regs
.core_running_status
|= val
;
* According to the CMP PRM, writing a '1' to a bit will be ignored
* if the corresponding bit in the core enable reg is 0 (i.e., the
* corresponding virtual core is not enabled)
npp
->cmp_regs
.core_running_status
&= npp
->cmp_regs
.core_enable_status
;
* FIXME: need to check if the virtual core is attempting to park
* all the virtual cores (this is prevented by the hardware)
ss_change_exec_state(npp
, npp
->cmp_regs
.core_running_status
);
pthread_mutex_unlock(&npp
->cmp_lock
);
case ASI_CORE_RUNNING_W1C
:
* W1C: new_value = old_value & ~new_value;
pthread_mutex_lock(&npp
->cmp_lock
);
npp
->cmp_regs
.core_running_status
&= ~val
;
ss_change_exec_state(npp
, npp
->cmp_regs
.core_running_status
);
pthread_mutex_unlock(&npp
->cmp_lock
);
RSVD_MASK(sp
, MASK64(5,0), val
, 0, reg
);
npp
->ncup
->regs
.soc_err_steering
= val
;
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Write JTAG register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, jtag_reg_name(reg
), off
, val
););
case INT_VECTOR_DISPATCH
:
EXEC_WARNING( ("Attempted read from WO register in JTAG/TAP:"
" Read attempted from register %s (offset 0x%x).\n",
jtag_reg_name(reg
), reg
) );
case ASI_CORE_ENABLE_STATUS
:
val
= npp
->cmp_regs
.core_enable_status
;
case ASI_CORE_RUNNING_RW
:
case ASI_CORE_RUNNING_STATUS
:
val
= npp
->cmp_regs
.core_running_status
;
case ASI_CORE_RUNNING_W1S
:
case ASI_CORE_RUNNING_W1C
:
EXEC_WARNING( ("Attempted read from WO register in JTAG/TAP:"
" Read attempted from register %s (offset 0x%x).\n",
jtag_reg_name(reg
), reg
) );
val
= npp
->ncup
->regs
.soc_err_steering
;
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Read JTAG register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, jtag_reg_name(reg
), off
, val
););
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
ncx_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
config_dev_t
*config_devp
= NULL
;
bool_t zambezi_present
= false;
if (config_addrp
->baseaddr
== PHYS_ADDR_NCX
) {
* if local NCX CSR access, need to convert to Node X(this node)
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
pa
= PHYS_ADDR_NCX_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
* If remote NCX CSR access, use config_addrp to get at the node_id.
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "NCX");
domainp
= config_addrp
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (config_addrp
->baseaddr
== PHYS_ADDR_NCX_REMOTE(node_id
))
self
= (node_id
== sp
->config_procp
->proc_id
) ? true : false;
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
EXEC_WARNING( ("[0x%llx] (pc=0x%llx) Access "
"attempt to %s of remote "
"node %d. PRM recommends only local "
"access\n", sp
->gid
, sp
->pc
,
ncx_reg_name(off
), node_id
));
val
= ncxp
->tick_en_slow
;
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Read NCX register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, ncx_reg_name(reg
), off
, val
););
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
#define ASSIGN_NCX(_n, _m) do { \
if (0LL != (val & ~(_m))) \
EXEC_WARNING( ("Attempted write to reserved field in NCX being masked out : " \
"Attempted write was 0x%llx to register %s (offset 0x%x) on node %d", \
val, ncx_reg_name(off), off, node_id ) ); \
EXEC_WARNING( ("[0x%llx] (pc=0x%llx) Access "
"attempt to %s of remote node %d. PRM "
"recommends only local access.\n",
sp
->gid
, sp
->pc
, ncx_reg_name(off
),
ASSIGN_NCX(sys_mode
, SM_REG_MASK
);
/* check that correct node_id value being written */
if (SM_2_NODE(ncxp
->sys_mode
) != node_id
)
fatal("[0x%llx] (pc=0x%llx)\tAttempt to write %d"
" to node_id field of CF_SYS_MODE_REG "
"(offset 0x%x) on node %d.\n",
sp
->gid
, sp
->pc
, SM_2_NODE(ncxp
->sys_mode
),
/* check if zambezi present or glueless */
config_devp
= domainp
->device
.listp
;
for (idx
= 0; idx
< domainp
->device
.count
; idx
++) {
if (streq(config_devp
->dev_typep
->dev_type_namep
, "zambezi")) {
config_devp
= config_devp
->nextp
;
if (zambezi_present
^ ((ncxp
->sys_mode
>> SM_EXTERN_HUB_SEL_SHIFT
) & 1))
fatal("[0x%llx] (pc=0x%llx)\tAttempt to write %d"
" to extern_hub field of CF_SYS_MODE_REG "
"(offset 0x%x) on a %d-node system.\n",
((ncxp
->sys_mode
>> SM_EXTERN_HUB_SEL_SHIFT
)
& 1), off
, domainp
->procs
.count
);
/* check that way info is correct and only one bit asserted too */
if ((domainp
->procs
.count
== 1) &&
((SM_EWAY_BITS(ncxp
->sys_mode
)) != 0))
fatal("[0x%llx] (pc=0x%llx)\tMultiple ways mode"
" selected in CF_SYS_MODE_REG (offset 0x%x) "
"on node %d.\nAttempted write was 0x%llx on "
"a %d-way system.\n", sp
->gid
, sp
->pc
,
off
, node_id
, ncxp
->sys_mode
,
else if (domainp
->procs
.count
> 1) {
mode_shift
= 11 - domainp
->procs
.count
;
if ((SM_EWAY_BITS(MASK64(mode_shift
, mode_shift
)) != (SM_EWAY_BITS(ncxp
->sys_mode
))))
fatal("[0x%llx] (pc=0x%llx)\tAttempt to "
"write wrong way info in "
"CF_SYS_MODE_REG (offset 0x%x) on "
"node %d.\nAttempted write was "
"0x%llx on a %d-way system.\n",
sp
->gid
, sp
->pc
, off
, node_id
,
ncxp
->sys_mode
, domainp
->procs
.count
);
DBGMULNODE(lprintf(-1, "SYS_MODE_REG for node %d set "
"to 0x%llx\n", node_id
, ncxp
->sys_mode
););
RSVD_MASK(sp
, (MASK64(0,0)), val
, 0, reg
);
pthread_mutex_lock(&npp
->tick_en_lock
);
if (!val
&& !npp
->cmp_regs
.tick_enable
&& !npp
->tick_stop
) {
/* now stop all tick counters */
for (idx
= 0; idx
< npp
->nstrands
; idx
++) {
tnsp
= &(npp
->ss_strandp
[idx
]);
if (tnsp
->core
!= core_num
) {
tv9p
->tick
->offset
+= RAW_TICK(tv9p
);
ss_recomp_tick_target(tsp
);
if (val
&& npp
->tick_stop
) {
/* now start all tick counters */
for (idx
= 0; idx
< npp
->nstrands
; idx
++) {
tnsp
= &(npp
->ss_strandp
[idx
]);
if (tnsp
->core
!= core_num
) {
tv9p
->tick
->offset
-= RAW_TICK(tv9p
);
ss_recomp_tick_target(tsp
);
ncxp
->tick_en_slow
= val
;
pthread_mutex_unlock(&npp
->tick_en_lock
);
EXEC_WARNING( ("Attempted write to RO register in NCX:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, ncx_reg_name(reg
), reg
) );
ASSIGN_NCX(twr
, MASK64(31,10));
RSVD_MASK(sp
, (MASK64(6,0)), val
, 0, reg
);
ASSIGN_NCX(tpelse
, MASK64(6,2));
RSVD_MASK(sp
, MASK64(60,0), val
, 0, reg
);
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Write NCX register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, ncx_reg_name(reg
), off
, val
););
static bool_t
cou_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (config_addrp
->baseaddr
== PHYS_ADDR_COU
) {
* if local COU CSR access, need to convert to Node X(this node)
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
pa
= PHYS_ADDR_COU_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
* No access is allowed for single node
if (domainp
->procs
.count
== 1) return false;
* If remote COU CSR access, use config_addrp to get at the node_id.
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "COU");
domainp
= config_addrp
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (config_addrp
->baseaddr
== PHYS_ADDR_COU_REMOTE(node_id
))
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
reg
= off
& ~COU_LINK_MASK
;
link
= (off
& COU_LINK_MASK
) >> COU_LINK_SHIFT
;
RSVD_MASK(sp
, MASK64(2,0), val
, 0, reg
);
coup
->cou_err_enable
[link
] = val
;
RSVD_MASK(sp
, MASK64(4,0), val
, 0, reg
);
coup
->cou_esr
[link
] &= ~val
;
EXEC_WARNING( ("Attempted write to RO register in COU:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, cou_reg_name(reg
), reg
) );
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Write COU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, cou_reg_name(reg
), off
, val
););
val
= coup
->cou_err_enable
[link
];
val
= coup
->cou_esr
[link
];
val
= coup
->cou_ear
[link
];
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Read COU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, cou_reg_name(reg
), off
, val
););
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
static bool_t
lfu_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
config_dev_t
*devp
= NULL
;
bool_t zambezi_present
= false;
if (config_addrp
->baseaddr
== PHYS_ADDR_LFU
) {
* if local LFU CSR access, need to convert to Node X(this node)
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
/* single node configs can't access LFU CSRs */
if (domainp
->procs
.count
== 1) return false;
pa
= PHYS_ADDR_LFU_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
* If remote LFU CSR access, use config_addrp to get at the node_id.
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "LFU");
domainp
= config_addrp
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (config_addrp
->baseaddr
== PHYS_ADDR_LFU_REMOTE(node_id
))
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
reg
= off
& ~LFU_LINK_MASK
;
link
= (off
& LFU_LINK_MASK
) >> 12;
EXEC_WARNING( ("Attempted write to RO register in LFU:"
"Write 0x%llx to register %s (offset 0x%x)",
val
, lfu_reg_name(reg
), reg
) );
RSVD_MASK(sp
, (MASK64(5,0)), val
, 0, reg
);
lfup
->cl_cfg_reg
[link
] = val
;
lfup
->cl_init_state
[link
] = LFU_LINK_L0
;
devp
= domainp
->device
.listp
;
for (idx
= 0; idx
< domainp
->device
.count
; idx
++) {
if (streq(devp
->dev_typep
->dev_type_namep
, "zambezi")) {
/* check if all set to master */
for (lnk
=0; lnk
<LFU_MAX_LINKS
; lnk
++) {
if ((lfup
->cl_cfg_reg
[lnk
] & MASK64(1,0)) != LFU_LINK_MASTER_EN
)
* And the below overkill galore explains all
* legal and illegal cases for glueless mode.
* nb: node A is current node whose cl_cfg_reg
* is being written to and node B is the other
* node whose register value is just being
* Legal combinations(of cl_cfg_reg bits 1, 0)
* -------------------------------------------
* 11 - master en'ed 01 - slave en'ed
* 11 - master en'ed 00 - register not yet written to
* 01 - slave en'ed 11 - master en'ed
* 01 - slave en'ed 00 - register not yet written to
* Illegal combinations(of cl_cfg_reg bits 1, 0)
* ---------------------------------------------
* 11 - master en'ed 10 - also master but not en'ed
* 11 - master en'ed 11 - also master en'ed
* 01 - slave en'ed 10 - master but not en'ed
* 01 - slave en'ed 01 - also slave
* Note : Any other values written to Node A (00 or 10)
int link_mode
= lfup
->cl_cfg_reg
[0] & (MASK64(1,0));
if ((link_mode
!= LFU_LINK_MASTER_EN
) && (link_mode
!= LFU_LINK_SLAVE_EN
)) {
/* all links for that node must be the same mode */
for (lnk
= 1; lnk
< LFU_MAX_LINKS
; lnk
++) {
if (lfup
->cl_cfg_reg
[0] ^ lfup
->cl_cfg_reg
[lnk
]) {
* now get hold of other node to make sure that
* we don't have 2 slaves or 2 masters.
for (idx
= 0; idx
< 2; idx
++) {
tnpp
= LIST_ENTRY(domainp
->procs
, idx
)->procp
;
if (tnpp
->lfup
!= lfup
) {
if (link_mode
== LFU_LINK_MASTER_EN
) {
if ((tlfup
->cl_cfg_reg
[0] & (MASK64(1,1))))
fatal("[0x%llx] (pc=0x%llx)\t"
" Attempt to write master"
" bit in %s of both nodes"
" in glueless config.\n",
/* ie. node A is slave */
switch (tlfup
->cl_cfg_reg
[0] & MASK64(1, 0)) {
fatal("[0x%llx] (pc=0x%llx)\t"
" bit in %s of both nodes"
" in glueless config.\n",
fatal("[0x%llx] (pc=0x%llx)\t"
tnpp
->config_procp
->proc_id
);
* By right, should only allow global addressing when ALL nodes
* have their lfu's set up correctly. But because of the way
* Legion cycles through the different threads by allowing
* a quantum of instructions per cpu, it will cause false errors
* by doing that. So, just checking and updating each node at a time.
npp
->global_addressing_ok
.flags
.lfu
= lfu_ok
? GLOBAL_ADDRESSING_FLAG_EN
:GLOBAL_ADDRESSING_FLAG_DIS
;
RSVD_MASK(sp
, ((MASK64(32, 28)|MASK64(26, 20)|MASK64(14, 8)|MASK64(1, 0))),
lfup
->cl_serdes_cfg
[link
] = val
;
RSVD_MASK(sp
, (MASK64(27,0)), val
, 0, reg
);
lfup
->cl_ser_invpair
[link
] = val
;
RSVD_MASK(sp
, (MASK64(15, 10)|MASK64(7,0)), val
, 0, reg
);
lfup
->cl_test_cfg
[link
] = val
;
RSVD_MASK(sp
, (MASK64(24,0)), val
, 0, reg
);
lfup
->cl_error_stat
[link
] &= ~val
;
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Write LFU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, lfu_reg_name(reg
), off
, val
););
val
= lfup
->cl_init_state
[link
];
val
= lfup
->cl_cfg_reg
[link
];
val
= lfup
->cl_serdes_cfg
[link
];
val
= lfup
->cl_ser_invpair
[link
];
val
= lfup
->cl_test_cfg
[link
];
val
= lfup
->cl_error_stat
[link
];
/* illegal reg - an error */
DBGMULNODE(lprintf(sp
->gid
, "Read LFU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id
, reg
, lfu_reg_name(reg
), off
, val
););
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
* Access L2 Cache registers, mapped at offset = 0xA000000000
static bool_t
l2c_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
* Note that for VF, each node has its own L2CSR region and all of them
* are addressed by the same physical address(0xA0.0000.0000 to
* 0xBF.FFFF.FFFF). Nodes can only access their local L2CSR regions and
* not those of other nodes. So the domain structure's addressmap only
* contains that physical address(0xA0.0000.0000 to 0xBF.FFFF.FFFF) and
* it is up to the init and access routines to correctly map that PA to
* the correct node's L2CSR region.
* This is unlike NCU, CCU, MCU etc where there is a local CSR access
* address which is common for all nodes and basically is translated
* by hw to talk to originating node's address space AND remote CSR
* access address which allows any node to access any other
* node's address space. So in this case the domain address map will
* contain both the local CSR address space as well as the remote CSR
/* Redirect the common L2CSR PA to the correct node*/
npp
= (ss_proc_t
*)sp
->config_procp
->procp
;
config_addrp
= npp
->l2c_devp
->addrp
;
* FIXME: For the moment we only support 64bit accesses to registers.
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
bank
= (off
>> 6) & 0x7; /* N2 supports 8-banked L2 cache */
#define ASSIGN_L2(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
* L2 BIST Control Reg section 28.18 of N2 PRM 0.9.1
ASSIGN_L2( bist_ctl
, MASK64(6,0) );
if (val
& 1) l2p
->bist_ctl
[bank
] |= 0x400;
* L2 Control Register section 28.1.2 of VF PRM 0.9
ASSIGN_L2( control
, (MASK64(37, 36)|MASK64(34, 0)) );
if (((l2p
->control
[bank
] & L2_NODEID
) >> L2_NODEID_SHIFT
) != sp
->config_procp
->proc_id
)
fatal("[0x%llx] (pc=0x%llx)\tIncorrect "
"node_id being set in L2 CONTROL REG"
"(offset 0x%x). Attempted write was "
"0x%llx on node 0x%x.", sp
->gid
, sp
->pc
,
off
, val
, sp
->config_procp
->proc_id
);
* Please see Victoria Falls Bug 124014 for details.
* Bug court approval 1/10/2006. The fix that was approved
* involves the addition of an L2 Control Register bit
* specifying the configuration, rather than bringing in
* ncx_2way into all 8 l2t's.
* There is an idle bit in the L2 CSR (bit 21), which was
* designated as DBG_EN in the PRM but no longer used by N2
* or VF. This bit will now need to be used to indicate a
* system with 3 or 4 VF nodes configured.
dbgen_bit
= (l2p
->control
[bank
] & L2_DBGEN
) >> L2_DBGEN_SHIFT
;
if ((sp
->config_procp
->domainp
->procs
.count
>2) ^ dbgen_bit
)
fatal("[0x%llx] (pc=0x%llx)\tIncorrect "
"value being set for L2 DBGEN bit"
" in the L2 CONTROL reg(offset 0x%x)."
" Attempted write was 0x%llx on node "
"0x%x.", sp
->gid
, sp
->pc
,
off
, val
, sp
->config_procp
->proc_id
);
* Table 12-3 of VF PRM 0.1
ASSIGN_L2( error_enable
, MASK64(31,0));
* Table 12-1 of VF PRM 0.1
* RW1C: bit [63:56], [53:33]
RSVD_MASK(sp
, (MASK64(63,56)|MASK64(53,33)|MASK64(27,0)), val
, 0, reg
);
l2p
->error_status
[bank
] &= ~val
;
l2p
->error_status
[bank
] &= MASK64(63,56)|MASK64(53,33);
l2p
->error_status
[bank
] |= val
& MASK64(27,0);
ASSIGN_L2( error_status_ii
, MASK64(63,40));
* L2 Control Register section 28.15 of N2 PRM 0.9.1
ASSIGN_L2( control
, MASK64(21,0) );
* Error handling section 25.10 of N2 PRM 1.2
ASSIGN_L2( error_enable
, MASK64(2,0) );
* Table 25-21 of N2 PRM 1.2
* RW: bit [63,54], [27:0]
RSVD_MASK(sp
, (MASK64(63,34)|MASK64(27,0)), val
, 0, reg
);
l2p
->error_status
[bank
] &= ~val
;
l2p
->error_status
[bank
] &= MASK64(63,34);
l2p
->error_status
[bank
] |= val
& MASK64(27,0);
ASSIGN_L2( error_address
, MASK64(39,4) );
ASSIGN_L2( error_inject
, MASK64(1,0) );
ASSIGN_L2( error_notdata
, MASK64(51,48)|MASK64(45,4) );
/* illegal reg - an error */
* L2 Cache Diagnostic Access section 28.17 of N2 PRM 0.9.1
* index stores to a 32bit word and its ECC+rsvd bits
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
| L2_WORD
) >> 2;
* put oddeven select bit low so data is in addr order
idx
|= ((off
>> L2_ODDEVEN_SHIFT
) & 1);
l2p
->diag_datap
[idx
] = val
;
* index stores to a tag and its ECC+rsvd bits
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
) >> 6;
l2p
->diag_tagp
[idx
] = val
;
* index valid/dirty or alloc/used bits and parity
idx
= off
& (L2_LINE
| L2_BANK
) >> 6;
idx
|= ((off
& L2_VDSEL
) >> 10);
l2p
->diag_vuadp
[idx
] = val
;
EXEC_WARNING( ("Attempted write to reserved field in l2 cache controller:"
"Write 0x%llx to bank %d, register %s (offset 0x%x)",
val
, bank
, l2c_reg_name(reg
), reg
) );
#define RETRIEVE_L2(_n, _m) do { val = ((l2p->_n[bank]) & (_m)); } while (0)
* L2 BIST Control Reg section 28.18 of N2 PRM 0.9.1
RETRIEVE_L2( bist_ctl
, MASK64(10,0) );
* L2 Control Register section 28.1.2 of VF PRM 0.9
RETRIEVE_L2( control
, MASK64(33,0));
* Error handling section 12.25.1 of VF PRM 0.1
RETRIEVE_L2( error_enable
, MASK64(31,0) );
RETRIEVE_L2( error_status
,
MASK64(63,56)|MASK64(53,33)|MASK64(27,0));
RETRIEVE_L2( error_status_ii
, MASK64(63,40));
* L2 Control Register section 28.15 of N2 PRM 0.9.1
RETRIEVE_L2( control
, MASK64(21,0) );
* Error handling section 25.10 of N2 PRM 1.2
RETRIEVE_L2( error_enable
, MASK64(2,0) );
RETRIEVE_L2( error_status
, MASK64(63,34)|MASK64(27,0));
RETRIEVE_L2( error_address
, MASK64(39,4) );
RETRIEVE_L2( error_inject
, MASK64(1,0) );
RETRIEVE_L2( error_notdata
, MASK64(51,48)|MASK64(45,4) );
/* illegal reg - an error */
* L2 Cache Diagnostic Access section 28.17 of N2 PRM 0.9.1
* index retrieves a 32bit word and its ECC+rsvd bits
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
| L2_WORD
) >> 2;
* put oddeven select bit low so data is in addr order
idx
|= ((off
>> L2_ODDEVEN_SHIFT
) & 1);
val
= l2p
->diag_datap
[idx
];
* index retrieves a tag and its ECC+rsvd bits
idx
= off
& (L2_WAY
| L2_LINE
| L2_BANK
) >> 6;
val
= l2p
->diag_tagp
[idx
];
* index valid/dirty or alloc/used bits and parity
idx
= off
& (L2_LINE
| L2_BANK
) >> 6;
idx
|= ((off
& L2_VDSEL
) >> 10);
val
= l2p
->diag_vuadp
[idx
];
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
* Access DRAM Control and Status Registers (mapped at offset = 0x8400000000)
static bool_t
mcu_access(simcpu_t
*sp
, config_addr_t
* config_addrp
, tpaddr_t off
, maccess_t op
, uint64_t * regp
)
if (config_addrp
->baseaddr
== PHYS_ADDR_MCU
) {
* If local MCU CSR access, need to convert to Node X(this node) MCU CSR
* address. Use the simcpu to get the correct node_id and then get the
node_id
= sp
->config_procp
->proc_id
;
domainp
= sp
->config_procp
->domainp
;
pa
= PHYS_ADDR_MCU_REMOTE(node_id
) + off
;
config_addrp
= find_domain_address(domainp
, pa
);
* If remote MCU CSR access, use config_addrp to get at the node_id.
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp
, "MCU");
domainp
= config_addrp
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (config_addrp
->baseaddr
== PHYS_ADDR_MCU_REMOTE(node_id
))
* FIXME: For the moment we only support 64bit accesses to registers.
npp
= (ss_proc_t
*)config_addrp
->config_devp
->devp
;
if (MA_ldu64
!=op
&& MA_st64
!=op
) return false;
ASSERT (bank
< npp
->num_mbanks
); /* this should be enforced by the config_dev range */
dbp
= &(npp
->mbankp
[bank
]);
#define ASSIGN_DB(_n, _m) do { \
dbp->_n |= (val & (_m)); \
DBGMC(lprintf(sp
->gid
, "Memory controller bank %d : Write register 0x%lx '%s' value= 0x%llx on node %d\n",
bank
, off
, mcu_reg_name(reg
), val
, node_id
); );
* DRAM controller section 25.10 of N2 RPM 0.9.1
case DRAM_CAS_ADDR_WIDTH
: ASSIGN_DB( cas_addr_width
, MASK64(3, 0) ); break;
case DRAM_RAS_ADDR_WIDTH
: ASSIGN_DB( ras_addr_width
, MASK64(3, 0) ); break;
case DRAM_CAS_LAT
: ASSIGN_DB( cas_lat
, MASK64(2, 0) ); break;
case DRAM_SCRUB_FREQ
: ASSIGN_DB( scrub_freq
, MASK64(11, 0) ); break;
case DRAM_REFRESH_FREQ
: ASSIGN_DB( refresh_freq
, MASK64(12, 0) ); break;
case DRAM_REFRESH_COUNTER
: ASSIGN_DB( refresh_counter
, MASK64(12, 0) ); break;
case DRAM_SCRUB_ENABLE
: ASSIGN_DB( scrub_enable
, MASK64(0, 0) ); break;
case DRAM_TRRD
: ASSIGN_DB( trrd
, MASK64(3, 0) ); break;
case DRAM_TRC
: ASSIGN_DB( trc
, MASK64(4, 0) ); break;
case DRAM_TRCD
: ASSIGN_DB( trcd
, MASK64(3, 0) ); break;
case DRAM_TWTR
: ASSIGN_DB( twtr
, MASK64(3, 0) ); break;
case DRAM_TRTW
: ASSIGN_DB( trtw
, MASK64(3, 0) ); break;
case DRAM_TRTP
: ASSIGN_DB( trtp
, MASK64(2, 0) ); break;
case DRAM_TRAS
: ASSIGN_DB( tras
, MASK64(3, 0) ); break;
case DRAM_TRP
: ASSIGN_DB( trp
, MASK64(3, 0) ); break;
case DRAM_TWR
: ASSIGN_DB( twr
, MASK64(3, 0) ); break;
case DRAM_TRFC
: ASSIGN_DB( trfc
, MASK64(6, 0) ); break;
case DRAM_TMRD
: ASSIGN_DB( tmrd
, MASK64(1, 0) ); break;
case DRAM_FAWIN
: ASSIGN_DB( fawin
, MASK64(4, 0) ); break;
case DRAM_TIWTR
: ASSIGN_DB( tiwtr
, MASK64(1, 0) ); break;
case DRAM_DIMM_STACK
: ASSIGN_DB( dimm_stack
, MASK64(0, 0) ); break;
case DRAM_EXT_WR_MODE2
: ASSIGN_DB( ext_wr_mode2
, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE1
: ASSIGN_DB( ext_wr_mode1
, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE3
: ASSIGN_DB( ext_wr_mode3
, MASK64(14, 0) ); break;
case DRAM_8_BANK_MODE
: ASSIGN_DB( eight_bank_mode
, MASK64(0, 0) ); break;
case DRAM_BRANCH_DISABLED
: ASSIGN_DB( branch_disabled
, MASK64(0, 0) ); break;
case DRAM_SEL_LO_ADDR_BITS
: ASSIGN_DB( sel_lo_addr_bits
, MASK64(0, 0) ); break;
case DRAM_SINGLE_CHNL_MODE
: ASSIGN_DB( single_chnl_mode
, MASK64(1, 0) ); break;
case DRAM_MIRROR_MODE
: ASSIGN_DB( mirror_mode
, MASK64(0, 0) ); break;
case DRAM_SINGLE_CHNL_MODE
: ASSIGN_DB( single_chnl_mode
, MASK64(0, 0) ); break;
if (0LL != (val
& ~(3))) goto write_reserved
;
/* DRAM Init sequence done is instantaneous */
case DRAM_INIT_STATUS
: ASSIGN_DB( init_status
, MASK64(0, 0) ); break;
case DRAM_DIMM_PRESENT
: ASSIGN_DB( dimm_present
, MASK64(3, 0) ); break;
case DRAM_FAILOVER_STATUS
: ASSIGN_DB( failover_status
, MASK64(0, 0) ); break;
case DRAM_FAILOVER_MASK
: ASSIGN_DB( failover_mask
, MASK64(34, 0) ); break;
case DRAM_POWER_DOWN_MODE
: ASSIGN_DB( power_down_mode
, MASK64(0, 0) ); break;
ASSIGN_DB( fbd_chnl_state
.val
, MASK64(7, 0) );
/* Update the appropriate _done register */
switch( (val
& MASK64(2, 0)) ) {
dbp
->disable_state_period_done
= 1; break;
dbp
->calibrate_state_period_done
= 1; break;
dbp
->training_state_done
= 1; break;
dbp
->testing_state_done
= 1; break;
dbp
->polling_state_done
= 1; break;
dbp
->config_state_done
= 1; break;
lprintf(sp
->gid
, "Unknown val (0x%llx) being stored to FBD_CHNL_STATE reg on node %d\n",
case FBD_FAST_RESET_FLAG
: ASSIGN_DB( fbd_fast_reset_flag
, MASK64(3, 0) ); break;
dbp
->fbd_chnl_reset
= val
& 0x3;
/* if FBDINIT is set channel initialization starts */
/* set the proper state value in amb link status */
for (i
=0; i
<MAX_AMBS
; i
++)
dbp
->fbd_chnl_state
.ambstate
[i
] = L0_STATE
;
/* hw clears the bit after init is done */
dbp
->fbd_chnl_reset
&= ~(1ULL);
case TS1_SB_NB_MAPPING
: ASSIGN_DB( ts1_sb_nb_mapping
, MASK64(2, 0) ); break;
case TS1_TEST_PARAMETER
: ASSIGN_DB( ts1_test_parameter
, MASK64(23, 0) ); break;
case TS3_FAILOVER_CONFIG
: ASSIGN_DB( ts3_failover_config
, MASK64(15, 0) ); break;
case DISABLE_STATE_PERIOD
: ASSIGN_DB( disable_state_period
,MASK64(5, 0) ); break;
case DISABLE_STATE_PERIOD_DONE
: ASSIGN_DB( disable_state_period_done
, MASK64(0, 0) ); break;
case CALIBRATE_STATE_PERIOD
: ASSIGN_DB( calibrate_state_period
, MASK64(19, 0) ); break;
case CALIBRATE_STATE_PERIOD_DONE
: ASSIGN_DB( calibrate_state_period_done
, MASK64(0, 0) ); break;
case TRAINING_STATE_MIN_TIME
: ASSIGN_DB( training_state_min_time
, MASK64(15, 0) ); break;
case TRAINING_STATE_DONE
: ASSIGN_DB( training_state_done
, MASK64(1, 0) ); break;
case TRAINING_STATE_TIMEOUT
: ASSIGN_DB( training_state_timeout
, MASK64(7, 0) ); break;
case TESTING_STATE_DONE
: ASSIGN_DB( testing_state_done
, MASK64(1, 0) ); break;
case TESTING_STATE_TIMEOUT
: ASSIGN_DB( testing_state_timeout
, MASK64(7, 0) ); break;
case POLLING_STATE_DONE
: ASSIGN_DB( polling_state_done
, MASK64(1, 0) ); break;
case POLLING_STATE_TIMEOUT
: ASSIGN_DB( polling_state_timeout
, MASK64(7, 0) ); break;
case CONFIG_STATE_DONE
: ASSIGN_DB( config_state_done
, MASK64(1, 0) ); break;
case CONFIG_STATE_TIMEOUT
: ASSIGN_DB( config_state_timeout
, MASK64(7, 0) ); break;
case DRAM_PER_RANK_CKE
: ASSIGN_DB( dram_per_rank_cke
, MASK64(15, 0) ); break;
case L0S_DURATION
: ASSIGN_DB( l0s_duration
, MASK64(6, 0) ); break;
case CHNL_SYNC_FRAME_FREQ
: ASSIGN_DB( chnl_sync_frame_freq
, MASK64(5, 0) ); break;
case CHNL_READ_LAT
: ASSIGN_DB( chnl_read_lat
, MASK64(15, 0) ); break;
case CHNL_CAPABILITY
: ASSIGN_DB( chnl_capability
, MASK64(9, 0) ); break;
case LOOPBACK_MODE_CNTL
: ASSIGN_DB( loopback_mode_cntl
, MASK64(1, 0) ); break;
case SERDES_CONFIG_BUS
: ASSIGN_DB( serdes_config_bus
, MASK64(24, 0) ); break;
case SERDES_INVPAIR
: ASSIGN_DB( serdes_invpair
, MASK64(47, 0) ); break;
case SERDES_TEST_CONFIG_BUS
: ASSIGN_DB( serdes_test_config_bus
, MASK64(31, 0) ); break;
case CONFIG_REG_ACCESS_ADDR
: ASSIGN_DB( config_reg_access_addr
, MASK64(15, 0) ); break;
case CONFIG_REG_ACCESS_DATA
:
ambid
= AMBID(dbp
->config_reg_access_addr
);
ASSIGN_DB(config_reg_access_data
, MASK64(31, 0) );
switch (AMBADDR(dbp
->config_reg_access_addr
)){
#define ASSIGN_AMB(_n, _m) do { \
dbp->amb[ambid]._n = (_m); \
ASSIGN_AMB(emask
, val32
& 0x3f);
ASSIGN_AMB(ferr
, val32
& 0x3f);
ASSIGN_AMB(nerr
, val32
& 0x3f);
ASSIGN_AMB(psbyte3_0
, val32
);
ASSIGN_AMB(psbyte7_4
, val32
);
ASSIGN_AMB(psbyte11_8
, val32
);
ASSIGN_AMB(psbyte13_12
, val32
& 0x0000ffff);
case C2DINCRCUR_CMD2DATANXT
:
ASSIGN_AMB(c2dincrcur_cmd2datanxt
, val32
& 0x00ff00ff);
/* clear START bit immediately */
ASSIGN_AMB(mbcsr
, val32
& 0x7fffffff);
ASSIGN_AMB(dareftc
, val32
& 0x00ffffff);
ASSIGN_AMB(mtr_dsreftc
, val32
& 0x7f01fff7);
ASSIGN_AMB(drt
, val32
& 0x7f77ffff);
ASSIGN_AMB(drc
, val32
& 0x2f87ffff);
ASSIGN_AMB(dcalcsr
, val32
& 0xf0607fff);
switch (dbp
->amb
[ambid
].dcalcsr
& 0xf) {
/* set completion status if start set */
if (dbp
->amb
[ambid
].dcalcsr
& 0x80000000) {
dbp
->amb
[ambid
].dcalcsr
&= 0x0fffffff;
EXEC_WARNING(("Invalid DCALCSR opcode: 0x%x",
dbp
->amb
[ambid
].dcalcsr
& 0xf));
ASSIGN_AMB(dcaladdr
, val32
);
ASSIGN_AMB(ddr2odtc
, val32
);
/* illegal reg - an error */
EXEC_WARNING( ("Unimplemented write amb address = 0x%x, ambid=0x%x",
AMBADDR(dbp
->config_reg_access_addr
), ambid
) );
* Performance counter section 10.3 of N2 PRM 1.1
case DRAM_PERF_CTL
: ASSIGN_DB( perf_ctl
, MASK64(7, 0) ); break;
case DRAM_PERF_COUNT
: ASSIGN_DB( perf_count
, MASK64(63, 0) ); break;
* Error handling section 25.12 of N2 PRM 1.2
dbp
->error_status
&= ~val
;
dbp
->error_status
&= MASK64(63,54);
dbp
->error_status
|= val
& MASK64(15,0);
case DRAM_ERROR_ADDRESS
: ASSIGN_DB( error_address
, MASK64(39, 4) ); break;
case DRAM_ERROR_INJECT
: ASSIGN_DB( error_inject
, MASK64(31,30)|MASK64(15,0) ); break;
case DRAM_ERROR_COUNTER
: ASSIGN_DB( error_counter
, MASK64(15, 0) ); break;
case DRAM_ERROR_LOCATION
: ASSIGN_DB( error_location
, MASK64(35, 0) ); break;
case DRAM_ERROR_RETRY
: ASSIGN_DB( error_retry
, MASK64(63, 63)|MASK64(49,32)|MASK64(17,0) ); break;
case DRAM_FBD_ERROR_SYND
: ASSIGN_DB( fbd_error_synd
, MASK64(63, 63)|MASK64(29,0) ); break;
case DRAM_FBD_INJ_ERROR_SRC
: ASSIGN_DB( fbd_inj_error_src
, MASK64(1, 0) ); break;
case DRAM_FBR_COUNT
: ASSIGN_DB( fbr_count
, MASK64(16, 0) ); break;
* Power management section 26.3 of N2 PRM 0.9.1
case DRAM_OPEN_BANK_MAX
: ASSIGN_DB( open_bank_max
, MASK64(16, 0) ); break;
case DRAM_PROG_TIME_CNTR
: ASSIGN_DB( prog_time_cntr
, MASK64(15, 0) ); break;
* Hardware debug section 29.2.2 of N2 PRM 0.9.1
case DRAM_DBG_TRG_EN
: ASSIGN_DB( dbg_trg_en
, MASK64(2, 2) ); break;
* Set done bit (34) immediately if start bit (32)
dbp
->ibist_nbfib_ctl
= (val
| (1LL << 34));
ambid
= (dbp
->fbd_chnl_state
.val
& (0xF << 3))
dbp
->fbd_chnl_state
.ambstate
[ambid
] = L0_STATE
;
dbp
->ibist_nbfib_ctl
= val
;
* Set done bit (34) immediately if start bit (32)
dbp
->ibist_sbfib_ctl
= (val
| (1LL << 34));
dbp
->ibist_sbfib_ctl
= val
;
/* illegal reg - an error */
EXEC_WARNING( ("Attempted write to reserved field in dram controller: Write 0x%llx to bank %d, register %s (offset 0x%x) on node %d",
val
, bank
, mcu_reg_name(reg
), reg
, node_id
) );
#define RETRIEVE_DB(_n, _m) do { val = ((dbp->_n) & (_m)); } while (0)
* DRAM controller section 25.10 of N2 RPM 0.9.1
case DRAM_CAS_ADDR_WIDTH
: RETRIEVE_DB( cas_addr_width
, MASK64(3, 0) ); break;
case DRAM_RAS_ADDR_WIDTH
: RETRIEVE_DB( ras_addr_width
, MASK64(3, 0) ); break;
case DRAM_CAS_LAT
: RETRIEVE_DB( cas_lat
, MASK64(2, 0) ); break;
case DRAM_SCRUB_FREQ
: RETRIEVE_DB( scrub_freq
, MASK64(11, 0) ); break;
case DRAM_REFRESH_FREQ
: RETRIEVE_DB( refresh_freq
, MASK64(12, 0) ); break;
case DRAM_REFRESH_COUNTER
: RETRIEVE_DB( refresh_counter
, MASK64(12, 0) ); break;
case DRAM_SCRUB_ENABLE
: RETRIEVE_DB( scrub_enable
, MASK64(0, 0) ); break;
case DRAM_TRRD
: RETRIEVE_DB( trrd
, MASK64(3, 0) ); break;
case DRAM_TRC
: RETRIEVE_DB( trc
, MASK64(4, 0) ); break;
case DRAM_TRCD
: RETRIEVE_DB( trcd
, MASK64(3, 0) ); break;
case DRAM_TWTR
: RETRIEVE_DB( twtr
, MASK64(3, 0) ); break;
case DRAM_TRTW
: RETRIEVE_DB( trtw
, MASK64(3, 0) ); break;
case DRAM_TRTP
: RETRIEVE_DB( trtp
, MASK64(2, 0) ); break;
case DRAM_TRAS
: RETRIEVE_DB( tras
, MASK64(3, 0) ); break;
case DRAM_TRP
: RETRIEVE_DB( trp
, MASK64(3, 0) ); break;
case DRAM_TWR
: RETRIEVE_DB( twr
, MASK64(3, 0) ); break;
case DRAM_TRFC
: RETRIEVE_DB( trfc
, MASK64(6, 0) ); break;
case DRAM_TMRD
: RETRIEVE_DB( tmrd
, MASK64(1, 0) ); break;
case DRAM_FAWIN
: RETRIEVE_DB( fawin
, MASK64(4, 0) ); break;
case DRAM_TIWTR
: RETRIEVE_DB( tiwtr
, MASK64(1, 0) ); break;
case DRAM_DIMM_STACK
: RETRIEVE_DB( dimm_stack
, MASK64(0, 0) ); break;
case DRAM_EXT_WR_MODE2
: RETRIEVE_DB( ext_wr_mode2
, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE1
: RETRIEVE_DB( ext_wr_mode1
, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE3
: RETRIEVE_DB( ext_wr_mode3
, MASK64(14, 0) ); break;
case DRAM_8_BANK_MODE
: RETRIEVE_DB( eight_bank_mode
, MASK64(0, 0) ); break;
case DRAM_BRANCH_DISABLED
: RETRIEVE_DB( branch_disabled
, MASK64(0, 0) ); break;
case DRAM_SEL_LO_ADDR_BITS
: RETRIEVE_DB( sel_lo_addr_bits
, MASK64(0, 0) ); break;
case DRAM_SINGLE_CHNL_MODE
: RETRIEVE_DB( single_chnl_mode
, MASK64(1, 0) ); break;
case DRAM_MIRROR_MODE
: RETRIEVE_DB( mirror_mode
, MASK64(0, 0) ); break;
case DRAM_SINGLE_CHNL_MODE
: RETRIEVE_DB( single_chnl_mode
, MASK64(0, 0) ); break;
case DRAM_DIMM_INIT
: RETRIEVE_DB( dimm_init
, MASK64(2, 0) ); break;
case DRAM_INIT_STATUS
: RETRIEVE_DB( init_status
, MASK64(0, 0) ); break;
case DRAM_DIMM_PRESENT
: RETRIEVE_DB( dimm_present
, MASK64(3, 0) ); break;
case DRAM_FAILOVER_STATUS
: RETRIEVE_DB( failover_status
, MASK64(0, 0) ); break;
case DRAM_FAILOVER_MASK
: RETRIEVE_DB( failover_mask
, MASK64(34, 0) ); break;
case DRAM_POWER_DOWN_MODE
: RETRIEVE_DB( power_down_mode
, MASK64(0, 0) ); break;
/* retrieve state bits for the ambid that has been set */
ambid
= (dbp
->fbd_chnl_state
.val
& (0xF << 3)) >> 3;
/* use it to index into state value */
val
= (((dbp
->fbd_chnl_state
.val
) & ~(0x7)) |
(dbp
->fbd_chnl_state
.ambstate
[ambid
]));
case FBD_FAST_RESET_FLAG
: RETRIEVE_DB( fbd_fast_reset_flag
, MASK64(3, 0) ); break;
case FBD_CHNL_RESET
: RETRIEVE_DB( fbd_chnl_reset
, MASK64(1, 0) ); break;
case TS1_SB_NB_MAPPING
: RETRIEVE_DB( ts1_sb_nb_mapping
, MASK64(2, 0) ); break;
case TS1_TEST_PARAMETER
: RETRIEVE_DB( ts1_test_parameter
, MASK64(23, 0) ); break;
case TS3_FAILOVER_CONFIG
: RETRIEVE_DB( ts3_failover_config
, MASK64(15, 0) ); break;
case ELECTRICAL_IDLE_DETECTED
: RETRIEVE_DB( electrical_idle_detected
, MASK64(27,0) ); break;
case DISABLE_STATE_PERIOD
: RETRIEVE_DB( disable_state_period
, MASK64(5, 0) ); break;
case DISABLE_STATE_PERIOD_DONE
: RETRIEVE_DB( disable_state_period_done
, MASK64(0, 0) ); break;
case CALIBRATE_STATE_PERIOD
: RETRIEVE_DB( calibrate_state_period
, MASK64(19, 0) ); break;
case CALIBRATE_STATE_PERIOD_DONE
: RETRIEVE_DB( calibrate_state_period_done
, MASK64(0, 0) ); break;
case TRAINING_STATE_MIN_TIME
: RETRIEVE_DB( training_state_min_time
, MASK64(15, 0) ); break;
case TRAINING_STATE_DONE
: RETRIEVE_DB( training_state_done
, MASK64(1, 0) ); break;
case TRAINING_STATE_TIMEOUT
: RETRIEVE_DB( training_state_timeout
, MASK64(7, 0) ); break;
case TESTING_STATE_DONE
: RETRIEVE_DB( testing_state_done
, MASK64(1, 0) ); break;
case TESTING_STATE_TIMEOUT
: RETRIEVE_DB( testing_state_timeout
, MASK64(7, 0) ); break;
case POLLING_STATE_DONE
: RETRIEVE_DB( polling_state_done
, MASK64(1, 0) ); break;
case POLLING_STATE_TIMEOUT
: RETRIEVE_DB( polling_state_timeout
, MASK64(7, 0) ); break;
case CONFIG_STATE_DONE
: RETRIEVE_DB( config_state_done
, MASK64(1, 0) ); break;
case CONFIG_STATE_TIMEOUT
: RETRIEVE_DB( config_state_timeout
, MASK64(7, 0) ); break;
case DRAM_PER_RANK_CKE
: RETRIEVE_DB( dram_per_rank_cke
, MASK64(15, 0) ); break;
case L0S_DURATION
: RETRIEVE_DB( l0s_duration
, MASK64(6, 0) ); break;
case CHNL_SYNC_FRAME_FREQ
: RETRIEVE_DB( chnl_sync_frame_freq
, MASK64(5, 0) ); break;
case CHNL_READ_LAT
: RETRIEVE_DB( chnl_read_lat
, MASK64(15, 0) ); break;
case CHNL_CAPABILITY
: RETRIEVE_DB( chnl_capability
, MASK64(9, 0) ); break;
case LOOPBACK_MODE_CNTL
: RETRIEVE_DB( loopback_mode_cntl
, MASK64(1, 0) ); break;
case SERDES_CONFIG_BUS
: RETRIEVE_DB( serdes_config_bus
, MASK64(24, 0) ); break;
case SERDES_INVPAIR
: RETRIEVE_DB( serdes_invpair
, MASK64(47, 0) ); break;
case SERDES_TEST_CONFIG_BUS
: RETRIEVE_DB( serdes_test_config_bus
, MASK64(31, 0) ); break;
case CONFIG_REG_ACCESS_ADDR
: RETRIEVE_DB( config_reg_access_addr
, MASK64(15, 0) ); break;
case CONFIG_REG_ACCESS_DATA
:
ambid
= AMBID(dbp
->config_reg_access_addr
);
switch (AMBADDR(dbp
->config_reg_access_addr
)){
case FBD_VID_DID
: val
=(uint64_t)dbp
->amb
[ambid
].vid_did
; break;
case FBDS
: val
=dbp
->amb
[ambid
].fbds
; break;
case EMASK
: val
=dbp
->amb
[ambid
].emask
; break;
case FERR
: val
=dbp
->amb
[ambid
].ferr
; break;
case NERR
: val
=dbp
->amb
[ambid
].nerr
; break;
case PSBYTE3_0
: val
=dbp
->amb
[ambid
].psbyte3_0
; break;
case PSBYTE7_4
: val
=dbp
->amb
[ambid
].psbyte7_4
; break;
case PSBYTE11_8
: val
=dbp
->amb
[ambid
].psbyte11_8
; break;
case PSBYTE13_12
: val
=dbp
->amb
[ambid
].psbyte13_12
; break;
case C2DINCRCUR_CMD2DATANXT
: val
=dbp
->amb
[ambid
].c2dincrcur_cmd2datanxt
;
case MBCSR
: val
=dbp
->amb
[ambid
].mbcsr
; break;
case DAREFTC
: val
=dbp
->amb
[ambid
].dareftc
; break;
case MTR_DSREFTC
: val
=dbp
->amb
[ambid
].mtr_dsreftc
; break;
case DRT
: val
=dbp
->amb
[ambid
].drt
; break;
case DRC
: val
=dbp
->amb
[ambid
].drc
; break;
case DCALCSR
: val
=dbp
->amb
[ambid
].dcalcsr
; break;
case DCALADDR
: val
=dbp
->amb
[ambid
].dcaladdr
; break;
case DDR2ODTC
: val
=dbp
->amb
[ambid
].ddr2odtc
; break;
/* illegal reg - an error */
EXEC_WARNING( ("Unimplemented read amb address = 0x%x, ambid=0x%x",
AMBADDR(dbp
->config_reg_access_addr
), ambid
) );
* Performance counter section 10.3 of N2 PRM 1.1
case DRAM_PERF_CTL
: RETRIEVE_DB( perf_ctl
, MASK64(7, 0) ); break;
case DRAM_PERF_COUNT
: RETRIEVE_DB( perf_count
, MASK64(63, 0) ); break;
* Error handling section 25.12 of N2 PRM 1.2
case DRAM_ERROR_STATUS
: RETRIEVE_DB( error_status
, MASK64(63, 0) ); break;
case DRAM_ERROR_ADDRESS
: RETRIEVE_DB( error_address
, MASK64(39, 4) ); break;
case DRAM_ERROR_INJECT
: RETRIEVE_DB( error_inject
, MASK64(31,30)|MASK64(15,0) ); break;
case DRAM_ERROR_COUNTER
: RETRIEVE_DB( error_counter
, MASK64(15, 0) ); break;
case DRAM_ERROR_LOCATION
: RETRIEVE_DB( error_location
, MASK64(35, 0) ); break;
case DRAM_ERROR_RETRY
: RETRIEVE_DB( error_retry
, MASK64(63, 63)|MASK64(49,32)|MASK64(17,0) ); break;
case DRAM_FBD_ERROR_SYND
: RETRIEVE_DB( fbd_error_synd
, MASK64(63, 63)|MASK64(29,0) ); break;
case DRAM_FBD_INJ_ERROR_SRC
: RETRIEVE_DB( fbd_inj_error_src
, MASK64(1, 0) ); break;
case DRAM_FBR_COUNT
: RETRIEVE_DB( fbr_count
, MASK64(16, 0) ); break;
* Power management section 26.3 of N2 PRM 0.9.1
case DRAM_OPEN_BANK_MAX
: RETRIEVE_DB( open_bank_max
, MASK64(16, 0) ); break;
case DRAM_PROG_TIME_CNTR
: RETRIEVE_DB( prog_time_cntr
, MASK64(15, 0) ); break;
* Hardware debug section 29.2.2 of N2 PRM 0.9.1
case DRAM_DBG_TRG_EN
: RETRIEVE_DB( dbg_trg_en
, MASK64(2, 2) ); break;
case IBIST_NBFIB_CTL
: RETRIEVE_DB( ibist_nbfib_ctl
, MASK64(53, 0) ); break;
case IBIST_SBFIB_CTL
: RETRIEVE_DB( ibist_sbfib_ctl
, MASK64(55, 0) ); break;
/* illegal reg - an error */
DBGMC(lprintf(sp
->gid
, "Memory controller bank %d : Read register 0x%lx '%s' value= 0x%llx on node %d\n",
bank
, off
, mcu_reg_name(reg
), val
, node_id
); );
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
* Create address mapping to access PCIE Cfg/IO, MEM32 and MEM64 space
void niagara2_pcie_mapping(simcpu_t
*sp
, ncu_t
*ncup
, piu_region_t region
)
uint64_t base
, mask
, size
;
const char *name
[3] = {"Cfg/IO", "Mem32", "Mem64"};
base
= ncup
->regs
.pcie_a_iocon_offset_base
;
mask
= ncup
->regs
.pcie_a_iocon_offset_mask
;
base
= ncup
->regs
.pcie_a_mem32_offset_base
;
mask
= ncup
->regs
.pcie_a_mem32_offset_mask
;
base
= ncup
->regs
.pcie_a_mem64_offset_base
;
mask
= ncup
->regs
.pcie_a_mem64_offset_mask
;
enable
= GETMASK64(base
,63,63);
base
&= PIU_REGION_OFFSET_MASK
;
mask
&= PIU_REGION_OFFSET_MASK
;
size
= ~(MASK64(63,36)|mask
) + 1;
ncup
->map
[region
].base
= base
;
ncup
->map
[region
].mask
= mask
;
ncup
->map
[region
].size
= size
;
ncup
->map
[region
].enable
= enable
;
DBGDEV(lprintf(sp
->gid
, "PCIE %s is mapped at 0x%llx - 0x%llx of node %d\n",
name
[region
], base
, base
+size
-1, ncup
->node_id
); );