Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / procs / sunsparc / libniagara2 / niagara2_device.c
/*
* ========== Copyright Header Begin ==========================================
*
* OpenSPARC T2 Processor File: niagara2_device.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
*
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ========== Copyright Header End ============================================
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "@(#)niagara2_device.c 1.35 07/10/12 SMI"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <strings.h>
#include "ss_common.h"
#include "niagara2.h"
#include "niagara2_device.h"
#if INTERNAL_BUILD
#include "lfsr64.h"
#endif
/*
* This file contains Niagara 2 specific pseudo device models
*/
static void ncu_init(config_dev_t *);
static void ccu_init(config_dev_t *);
static void mcu_init(config_dev_t *);
static void l2c_init(config_dev_t *);
static void ssi_init(config_dev_t *);
static void hwdbg_init(config_dev_t *);
static void rcu_init(config_dev_t *);
static void jtag_init(config_dev_t *);
static bool_t ncu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t ccu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t mcu_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp);
static bool_t l2c_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp);
static bool_t ssi_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t hwdbg_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t rcu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
static bool_t jtag_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
#ifdef VFALLS /* { */
static void ncx_init(config_dev_t *);
static void cou_init(config_dev_t *);
static void lfu_init(config_dev_t *);
static bool_t ncx_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
static bool_t cou_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
static bool_t lfu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
#endif /* } VFALLS */
void niagara2_send_xirq(simcpu_t * sp, ss_proc_t * npp, uint64_t val);
static dev_type_t dev_type_ncu = {
PSEUDO_DEV_NAME_NCU,
NULL, /* parse */
ncu_init,
NULL, /* dump */
generic_device_non_cacheable,
ncu_access,
DEV_MAGIC
};
static dev_type_t dev_type_ccu = {
PSEUDO_DEV_NAME_CCU,
NULL, /* parse */
ccu_init,
NULL, /* dump */
generic_device_non_cacheable,
ccu_access,
DEV_MAGIC
};
static dev_type_t dev_type_mcu = {
PSEUDO_DEV_NAME_MCU,
NULL, /* parse */
mcu_init,
NULL, /* dump */
generic_device_non_cacheable,
mcu_access,
DEV_MAGIC
};
static dev_type_t dev_type_dbg = {
PSEUDO_DEV_NAME_HWDBG,
NULL, /* parse */
hwdbg_init,
NULL, /* dump */
generic_device_non_cacheable,
hwdbg_access,
DEV_MAGIC
};
static dev_type_t dev_type_l2c = {
PSEUDO_DEV_NAME_L2C,
NULL, /* parse */
l2c_init,
NULL, /* dump */
generic_device_non_cacheable,
l2c_access,
DEV_MAGIC
};
static dev_type_t dev_type_ssi = {
PSEUDO_DEV_NAME_SSI,
NULL, /* parse */
ssi_init,
NULL, /* dump */
generic_device_non_cacheable,
ssi_access,
DEV_MAGIC
};
static dev_type_t dev_type_rcu = {
PSEUDO_DEV_NAME_RCU,
NULL, /* parse */
rcu_init,
NULL, /* dump */
generic_device_non_cacheable,
rcu_access,
DEV_MAGIC
};
static dev_type_t dev_type_jtag = {
PSEUDO_DEV_NAME_JTAG,
NULL, /* parse */
jtag_init, /* init */
NULL, /* dump */
generic_device_non_cacheable,
jtag_access,
DEV_MAGIC
};
#ifdef VFALLS /* { */
static dev_type_t dev_type_ncx = {
PSEUDO_DEV_NAME_NCX,
NULL, /* parse */
ncx_init, /* init */
NULL, /* dump */
generic_device_non_cacheable,
ncx_access,
DEV_MAGIC
};
static dev_type_t dev_type_cou = {
PSEUDO_DEV_NAME_COU,
NULL, /* parse */
cou_init, /* init */
NULL, /* dump */
generic_device_non_cacheable,
cou_access,
DEV_MAGIC
};
static dev_type_t dev_type_lfu = {
PSEUDO_DEV_NAME_LFU,
NULL, /* parse */
lfu_init, /* init */
NULL, /* dump */
generic_device_non_cacheable,
lfu_access,
DEV_MAGIC
};
#endif /* } VFALLS */
uint64_t gen_raw_entropy(double *phase, double *frequency, double *noise, double dutyfactor);
/*
* Set up the pseudo physical devices that Niagara 2 has for it's control
* registers. For things like the clock unit and memory controllers etc.
*/
void ss_setup_pseudo_devs(domain_t * domainp, ss_proc_t *procp)
{
config_dev_t *pd, *overlapp;
int node_id = procp->config_procp->proc_id;
uint64_t phys_addr;
config_addr_t *addrp;
int i;
static bool_t setup_once = false;
if (!setup_once) {
setup_once = true;
/*
* NCU, mapped at MSB[39:32] = 0x80
*/
procp->ncup = Xcalloc(1, ncu_t);
procp->ncup->node_id = node_id;
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ncu;
pd->devp = (void*)procp;
procp->ncu_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_NCU, PHYS_ADDR_NCU + NCU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* Clock Unit, mapped at MSB[39:32] = 0x83
*/
procp->clockp = Xcalloc(1, ccu_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ccu;
pd->devp = (void*)procp;
procp->clock_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_CCU, PHYS_ADDR_CCU + CCU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* Memory Controller Unit, mapped at MSB[39:32] = 0x84
*
* N2 supports 4 DRAM branches, each controlled by a separate MCU,
* configured as
*
* MCU0: addr[13:12]= 00b
* MCU1: addr[13:12]= 01b
* MCU2: addr[13:12]= 10b
* MCU3: addr[13:12]= 11b
*/
#ifdef VFALLS
procp->num_mbanks = 2;
#else
procp->num_mbanks = 4;
#endif
procp->mbankp = Xcalloc(procp->num_mbanks, mcu_bank_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_mcu;
pd->devp = (void*)procp;
procp->mcu_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_MCU,
PHYS_ADDR_MCU+4096LL*(uint64_t)procp->num_mbanks);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* L2 Cache registers, mapped at MSB[39:32] = 0xA0
*/
procp->num_l2banks = L2_BANKS;
procp->l2p = Xcalloc(1, l2c_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_l2c;
pd->devp = (void*)procp;
procp->l2c_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_L2C, PHYS_ADDR_L2C + L2C_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* HW Debug Unit, mapped at MSB[39:32] = 0x86
*/
procp->hwdbgp = Xcalloc(1, hwdbg_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_dbg;
pd->devp = (void*)procp;
procp->hwdbg_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_HWDBG, PHYS_ADDR_HWDBG+0x100000000LL);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* Reset Unit, mapped at MSB[39:32] = 0x89
*/
procp->rcup = Xcalloc(1, rcu_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_rcu;
pd->devp = (void*)procp;
procp->rcu_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_RCU, PHYS_ADDR_RCU + RCU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* JTAG, mapped at MSB[39:32] = 0x90
*/
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_jtag;
pd->devp = (void*)procp;
procp->jtag_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_JTAG, PHYS_ADDR_JTAG + JTAG_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
#ifdef VFALLS /* { */
/*
* NCX, mapped at MSB[39:32] = 0x81
*/
procp->ncxp = Xcalloc(1, ncx_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ncx;
pd->devp = (void*)procp;
procp->ncx_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_NCX, PHYS_ADDR_NCX + NCX_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* COU, mapped at MSB[39:32] = 0x811
*/
procp->coup = Xcalloc(1, cou_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_cou;
pd->devp = (void*)procp;
procp->cou_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_COU, PHYS_ADDR_COU + COU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* LFU, mapped at MSB[39:32] = 0x812
*/
procp->lfup = Xcalloc(1, lfu_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_lfu;
pd->devp = (void*)procp;
procp->lfu_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_LFU, PHYS_ADDR_LFU + LFU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
#endif /* } VFALLS */
/*
* SSI, mapped at MSB[39:32] = 0xff
*/
procp->ssip = Xcalloc(1, ssi_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ssi;
pd->devp = (void*)procp;
procp->ssi_devp = pd;
insert_domain_address(domainp, pd, PHYS_ADDR_SSI, PHYS_ADDR_SSI +SSI_RANGE);
#ifdef VFALLS /* { */
insert_domain_address(domainp, pd, MAGIC_SSI, MAGIC_SSI + 8);
#endif /* } VFALLS */
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
}
#ifdef VFALLS /* { */
DBGMULNODE(lprintf(-1, "Setting up pseudo devices for node %d\n",
node_id););
/*
* Instead of getting a fatal while trying to allocate addr
* space for pseudodevices of duplicate nodes, better to
* catch this problem here.
*/
for (i = 0; i < (domainp->procs.count - 1); i++) {
if (node_id == LIST_ENTRY(domainp->procs, i)->proc_id)
lex_fatal("More than one node %d present",
node_id);
}
/*
* NCU, mapped at MSB[39:32] = 0x80
*/
/* Need to allocate space only once for each node */
if (!(procp->ncup)) {
procp->ncup = Xcalloc(1, ncu_t);
procp->ncup->node_id = node_id;
}
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ncu;
pd->devp = (void*)procp;
procp->ncu_devp = pd;
phys_addr = PHYS_ADDR_NCU_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr , phys_addr + NCU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* Clock Unit, mapped at MSB[39:32] = 0x83
*/
/* Need to allocate space only once for each node */
if (!(procp->clockp))
procp->clockp = Xcalloc(1, ccu_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ccu;
pd->devp = (void*)procp;
procp->clock_devp = pd;
phys_addr = PHYS_ADDR_CCU_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr, phys_addr + CCU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* Memory Controller Unit, mapped at MSB[39:32] = 0x84
*
* VF supports 2 DRAM branches, each controlled by a separate MCU,
* configured as
*
* MCU0: addr[12]= 0b
* MCU1: addr[12]= 1b
*/
procp->num_mbanks = 2;
/* Need to allocate space only once for each node */
if (!(procp->mbankp))
procp->mbankp = Xcalloc(procp->num_mbanks, mcu_bank_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_mcu;
pd->devp = (void*)procp;
procp->mcu_devp = pd;
phys_addr = PHYS_ADDR_MCU_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr,
phys_addr + 4096LL*(uint64_t)procp->num_mbanks);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* L2 Cache registers, mapped at MSB[39:28] = 0xA00-0xBFF
*/
/* L2CSR is local access only. See comments in l2c_access() */
procp->num_l2banks = L2_BANKS;
if (!(procp->l2p))
procp->l2p = Xcalloc(1, l2c_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_l2c;
pd->devp = (void*)procp;
pd->domainp = domainp;
procp->l2c_devp = pd;
addrp = Xmalloc(sizeof(config_addr_t));
addrp->config_devp = pd;
addrp->baseaddr = PHYS_ADDR_L2C;
addrp->topaddr = PHYS_ADDR_L2C + L2C_RANGE;
addrp->range = L2C_RANGE;
pd->addrp = addrp;
/*
* SSI, mapped at MSB[39:28] = 0xff0 - 0xfff
*/
/* Need to allocate space only once and for each node */
if (!(procp->ssip))
procp->ssip = Xcalloc(1, ssi_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ssi;
pd->devp = (void*)procp;
pd->domainp = domainp;
procp->ssi_devp = pd;
addrp = Xmalloc(sizeof(config_addr_t));
addrp->config_devp = pd;
addrp->baseaddr = PHYS_ADDR_SSI;
addrp->topaddr = PHYS_ADDR_SSI + SSI_RANGE;
addrp->range = SSI_RANGE;
pd->addrp = addrp;
/* and now create an addrp struc for the MAGIC_SSI address */
addrp = Xmalloc(sizeof(config_addr_t));
addrp->config_devp = pd;
addrp->baseaddr = MAGIC_SSI;
addrp->topaddr = MAGIC_SSI + 8;
addrp->range = 8;
/*
* JTAG, mapped at MSB[39:32] = 0x90
*/
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_jtag;
pd->devp = (void*)procp;
procp->jtag_devp = pd;
phys_addr = PHYS_ADDR_JTAG_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr, phys_addr + JTAG_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* NCX, mapped at MSB[39:32] = 0x81
*/
if (!(procp->ncxp))
procp->ncxp = Xcalloc(1, ncx_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_ncx;
pd->devp = (void*)procp;
procp->ncx_devp = pd;
phys_addr = PHYS_ADDR_NCX_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr, phys_addr + NCX_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* COU, mapped at MSB[39:32] = 0x811
*/
if (!(procp->coup))
procp->coup = Xcalloc(1, cou_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_cou;
pd->devp = (void*)procp;
procp->cou_devp = pd;
phys_addr = PHYS_ADDR_COU_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr, phys_addr + COU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* LFU, mapped at MSB[39:32] = 0x812
*/
if (!(procp->lfup))
procp->lfup = Xcalloc(1, lfu_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_lfu;
pd->devp = (void*)procp;
procp->lfu_devp = pd;
phys_addr = PHYS_ADDR_LFU_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr, phys_addr + LFU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
/*
* RCU, mapped at MSB[39:32] = 0x89
*/
if (!(procp->rcup))
procp->rcup = Xcalloc(1, rcu_t);
pd = Xcalloc(1, config_dev_t);
pd->is_implied = true;
pd->dev_typep = &dev_type_rcu;
pd->devp = (void*)procp;
procp->rcu_devp = pd;
phys_addr = PHYS_ADDR_RCU_REMOTE(node_id);
insert_domain_address(domainp, pd, phys_addr, phys_addr + RCU_RANGE);
overlapp = insert_domain_device(domainp, pd);
if (overlapp != NULL) {
lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
overlapp->dev_typep->dev_type_namep,
overlapp->addrp->baseaddr,
pd->dev_typep->dev_type_namep,
pd->addrp->baseaddr);
}
#endif /* } VFALLS */
}
#ifndef NDEBUG /* { */
char * ssi_reg_name(int reg)
{
char * s;
switch (reg) {
case SSI_TIMEOUT: s="ssi_timeout"; break;
case SSI_LOG: s="ssi_log"; break;
default: s="Illegal ssi register"; break;
}
return s;
}
static char *ncu_reg_name(int reg)
{
char * s;
switch (reg) {
case INT_MAN: s="int_man"; break;
case MONDO_INT_VEC: s="mondo_int_vec"; break;
case SER_NUM: s="ser_num"; break;
case EFU_STAT: s="efu_stat"; break;
case CORE_AVAIL: s="core_avail"; break;
case BANK_AVAIL: s="bank_avail"; break;
case BANK_ENABLE: s="bank_enable"; break;
case BANK_ENABLE_STATUS: s="bank_enable_status"; break;
case L2_IDX_HASH_EN: s="l2_idx_hash_en"; break;
case L2_IDX_HASH_EN_STATUS: s="l2_idx_hash_en_status"; break;
case PCIE_A_MEM32_OFFSET_BASE: s="pcie_a_mem32_offset_base"; break;
case PCIE_A_MEM32_OFFSET_MASK: s="pcie_a_mem32_offset_mask"; break;
case PCIE_A_MEM64_OFFSET_BASE: s="pcie_a_mem64_offset_base"; break;
case PCIE_A_MEM64_OFFSET_MASK: s="pcie_a_mem64_offset_mask"; break;
case PCIE_A_IOCON_OFFSET_BASE: s="pcie_a_iocon_offset_base"; break;
case PCIE_A_IOCON_OFFSET_MASK: s="pcie_a_iocon_offset_mask"; break;
case PCIE_A_FSH: s="pcie_a_fsh"; break;
case SOC_ESR: s="soc_error_status"; break;
case SOC_LOG_ENABLE: s="soc_error_log_enable"; break;
case SOC_INTERRUPT_ENABLE: s="soc_error_interrupt_enable"; break;
case SOC_FATAL_ERROR_ENABLE: s="soc_fatal_error_enable"; break;
case SOC_PENDING_ERROR_STATUS: s="soc_pending_error_status"; break;
case SOC_ERROR_INJECTION: s="soc_error_injection"; break;
case SOC_SII_ERROR_SYNDROME: s="soc_sii_error_syndrome"; break;
case SOC_NCU_ERROR_SYNDROME: s="soc_sii_error_syndrome"; break;
case MONDO_INT_DATA0: s="mondo_int_data0"; break;
case MONDO_INT_DATA1: s="mondo_int_data1"; break;
case MONDO_INT_ADATA0: s="mondo_int_adata0"; break;
case MONDO_INT_ADATA1: s="mondo_int_adata1"; break;
case MONDO_INT_BUSY: s="mondo_int_busy"; break;
case MONDO_INT_ABUSY: s="mondo_int_abusy"; break;
default: s="Illegal NCU register"; break;
}
return s;
}
char * ccu_reg_name(int reg)
{
char * s;
switch (reg) {
case CLOCK_CONTROL: s="clock_control"; break;
case RAND_GEN: s="rand_gen"; break;
case RAND_CTL: s="rand_ctl"; break;
default: s="Illegal clock register"; break;
}
return s;
}
char * l2c_reg_name(int reg)
{
char * s;
switch (reg) {
case L2_DIAG_DATA: s="diag_data"; break;
case L2_DIAG_TAG: s="diag_tag"; break;
case L2_DIAG_VUAD: s="diag_vuad"; break;
case L2_CONTROL: s="control"; break;
case L2_ERROR_ENABLE: s="error_enable"; break;
case L2_ERROR_STATUS: s="error_status"; break;
#ifdef VFALLS
case L2_ERROR_STATUS_II: s="error_status_ii"; break;
#endif
case L2_ERROR_ADDRESS: s="error_address"; break;
case L2_ERROR_INJECT: s="error_inject"; break;
case L2_ERROR_NOTDATA: s="error_notdata"; break;
default: s="Illegal L2 control register"; break;
}
return s;
}
char * hwdbg_reg_name(int reg)
{
char * s;
switch (reg) {
case DEBUG_PORT_CONFIG: s="debug_port_config"; break;
case IO_QUIESCE_CONTROL: s="io_quiesce_control"; break;
default: s="Illegal Debug control register"; break;
}
return s;
}
char * mcu_reg_name(int reg)
{
char * s;
switch (reg) {
case DRAM_CAS_ADDR_WIDTH: s="cas_addr_width"; break;
case DRAM_RAS_ADDR_WIDTH: s="ras_addr_width"; break;
case DRAM_CAS_LAT: s="cas_lat"; break;
case DRAM_SCRUB_FREQ: s="scrub_frequency"; break;
case DRAM_REFRESH_FREQ: s="refresh_frequency"; break;
case DRAM_OPEN_BANK_MAX: s="open_bank_max"; break;
case DRAM_REFRESH_COUNTER: s="refresh_counter"; break;
case DRAM_SCRUB_ENABLE: s="scrub_enable"; break;
case DRAM_PROG_TIME_CNTR: s="program_time_cntr"; break;
case DRAM_TRRD: s="trrd"; break;
case DRAM_TRC: s="trc"; break;
case DRAM_TRCD: s="trcd"; break;
case DRAM_TWTR: s="twtr"; break;
case DRAM_TRTW: s="trtw"; break;
case DRAM_TRTP: s="trtp"; break;
case DRAM_TRAS: s="tras"; break;
case DRAM_TRP: s="trp"; break;
case DRAM_TWR: s="twr"; break;
case DRAM_TRFC: s="trfc"; break;
case DRAM_TMRD: s="tmrd"; break;
case DRAM_FAWIN: s="fawin"; break;
case DRAM_TIWTR: s="tiwtr"; break;
case DRAM_DIMM_STACK: s="dimm_stack"; break;
case DRAM_EXT_WR_MODE1: s="ext_wr_mode1"; break;
case DRAM_EXT_WR_MODE2: s="ext_wr_mode2"; break;
case DRAM_EXT_WR_MODE3: s="ext_wr_mode3"; break;
case DRAM_8_BANK_MODE: s="8_bank_mode"; break;
case DRAM_BRANCH_DISABLED: s="branch_disabled"; break;
case DRAM_SEL_LO_ADDR_BITS: s="sel_lo_addr_bits"; break;
case DRAM_SINGLE_CHNL_MODE: s="single_chnl_mode"; break;
#ifdef VFALLS
case DRAM_MIRROR_MODE: s="mirror_mode"; break;
#endif
case DRAM_DIMM_INIT: s="dimm_init"; break;
case DRAM_INIT_STATUS: s="init_status"; break;
case DRAM_DIMM_PRESENT: s="dimm_present"; break;
case DRAM_FAILOVER_STATUS: s="failover_status"; break;
case DRAM_FAILOVER_MASK: s="failover_mask"; break;
case DRAM_DBG_TRG_EN: s="dbg_trg_en"; break;
case DRAM_POWER_DOWN_MODE: s="power_down_mode"; break;
case DRAM_ERROR_STATUS: s="error_status"; break;
case DRAM_ERROR_ADDRESS: s="error_address"; break;
case DRAM_ERROR_INJECT: s="error_inject"; break;
case DRAM_ERROR_COUNTER: s="error_counter"; break;
case DRAM_ERROR_LOCATION: s="error_location"; break;
case DRAM_ERROR_RETRY: s="error_retry"; break;
case DRAM_FBD_ERROR_SYND: s="fbd_error_synd"; break;
case DRAM_FBD_INJ_ERROR_SRC: s="fbd_inj_error_src"; break;
case DRAM_FBR_COUNT: s="fbr_count"; break;
case DRAM_PERF_CTL: s="perf_ctl"; break;
case DRAM_PERF_COUNT: s="perf_count"; break;
case FBD_CHNL_STATE: s="fbd_channle_state"; break;
case FBD_FAST_RESET_FLAG: s="fbd_fast_reset_flag"; break;
case FBD_CHNL_RESET: s="fbd_channle_reset"; break;
case TS1_SB_NB_MAPPING: s="ts1_sb_nb_mapping"; break;
case TS1_TEST_PARAMETER: s="ts1_test_parameter"; break;
case TS3_FAILOVER_CONFIG: s="ts3_failover_config"; break;
case ELECTRICAL_IDLE_DETECTED: s="electrical_idle_detected"; break;
case DISABLE_STATE_PERIOD: s="disable_state_period"; break;
case DISABLE_STATE_PERIOD_DONE: s="disable_state_period_done"; break;
case CALIBRATE_STATE_PERIOD: s="calibrate_state_period"; break;
case CALIBRATE_STATE_PERIOD_DONE: s="calibrate_state_period_done"; break;
case TRAINING_STATE_MIN_TIME: s="training_state_min_time"; break;
case TRAINING_STATE_DONE: s="training_state_done"; break;
case TRAINING_STATE_TIMEOUT: s="training_state_timeout"; break;
case TESTING_STATE_DONE: s="testing_state_done"; break;
case TESTING_STATE_TIMEOUT: s="testing_state_timeout"; break;
case POLLING_STATE_DONE: s="polling_state_done"; break;
case POLLING_STATE_TIMEOUT: s="polling_state_timeout"; break;
case CONFIG_STATE_DONE: s="config_state_done"; break;
case CONFIG_STATE_TIMEOUT: s="config_state_timeout"; break;
case DRAM_PER_RANK_CKE: s="dram_per_rank_cke"; break;
case L0S_DURATION: s="l0s_duration"; break;
case CHNL_SYNC_FRAME_FREQ: s="channle_sync_frame_fre"; break;
case CHNL_READ_LAT: s="channle_read_lat"; break;
case CHNL_CAPABILITY: s="channle_capability"; break;
case LOOPBACK_MODE_CNTL: s="loopback_mode_cntl"; break;
case SERDES_CONFIG_BUS: s="serdes_config_bus"; break;
case SERDES_INVPAIR: s="serdes_invpair"; break;
case SERDES_TEST_CONFIG_BUS: s="serdes_test_config_bus"; break;
case CONFIG_REG_ACCESS_ADDR: s="config_reg_access_addr"; break;
case CONFIG_REG_ACCESS_DATA: s="config_reg_access_data"; break;
case IBIST_NBFIB_CTL: s="ibist_nbfib_ctl"; break;
case IBIST_SBFIB_CTL: s="ibist_sbfib_ctl"; break;
default: s="Illegal DRAM control register"; break;
}
return s;
}
char * jtag_reg_name(int reg)
{
char * s;
switch (reg) {
case INT_VECTOR_DISPATCH: s="int_vector_dispatch";break;
case ASI_CORE_AVAILABLE: s="asi_core_available";break;
case ASI_CORE_ENABLE_STATUS: s="asi_core_enable_status";break;
case ASI_CORE_ENABLE: s="asi_core_enable";break;
case ASI_CORE_RUNNING_RW: s="asi_core_running_rw";break;
case ASI_CORE_RUNNING_STATUS: s="asi_core_running_status";break;
case ASI_CORE_RUNNING_W1S: s="asi_core_running_w1s";break;
case ASI_CORE_RUNNING_W1C: s="asi_core_running_w1c";break;
case SOC_ERROR_STEERING: s="soc_error_steering";break;
default: s="Illegal JTAG region register"; break;
}
return s;
}
#ifdef VFALLS /* { */
char * ncx_reg_name(int reg)
{
char * s;
switch (reg) {
case CF_SYS_MODE_REG: s="system_mode_reg";break;
case NCX_TIC_EN_SLOW: s="tick_en_slow";break;
case CF_SLOW_PULSE_WAIT: s="slow_pulse_wait";break;
case NCX_TWR: s="twr";break;
case NCX_TPESR: s="tpesr";break;
case NCX_TPELSE: s="tpelse";break;
case NCX_TPEAR: s="tpear";break;
default: s="Illegal NCX region register"; break;
}
return s;
}
char * cou_reg_name(int reg)
{
char * s;
switch (reg) {
case COU_ERR_ENABLE_REG: s="cou_err_enable";break;
case COU_ESR: s="cou_esr";break;
case COU_EAR: s="cou_ear";break;
default: s="Illegal COU region register"; break;
}
return s;
}
char * lfu_reg_name(int reg)
{
char * s;
switch (reg) {
case CL_INIT_STATE: s="cl_init_state";break;
case CL_CFG_REG: s="cl_cfg_reg";break;
case CL_SERDES_CFG: s="cl_serdes_cfg";break;
case CL_SER_INVPAIR: s="cl_ser_invpair";break;
case CL_TEST_CFG: s="cl_test_cfg";break;
case CL_ERROR_STAT: s="cl_error_stat";break;
default: s="Illegal LFU region register"; break;
}
return s;
}
#endif /* } VFALLS */
#endif /* } NDEBUG */
static void ssi_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
#ifdef VFALLS /* { */
domain_t * domainp;
config_proc_t * config_procp;
int i;
int mode_shift;
uint64_t val;
int extern_hub;
int node_id;
bool_t zambezi_present = false;
config_dev_t *devp;
/*
* Note that for VF, each node has its own SSI region and all of them
* are addressed by the same physical address(0xFF.0000.0000 to
* 0xFF.0FFF.FFFF).(Note that this does not refer to the ROM part of the
* SSI, which *really* is a single entity shared by the different nodes.
* Address range for the ROM is FF.F000.0000 to FF.FFFF.FFFF and its
* config and setup is taken care of by an entry in the config file.)
* Nodes can only access their local SSI regions and not those of other
* nodes. So the domain structure's addressmap only contains that
* physical address(0xFF.0000.0000 to 0xFF.0FFF.FFFF) and it is up to the
* init and access routines to correctly map that PA to the correct
* node's SSI region.
* This is unlike NCU, CCU, MCU etc where there is a local CSR access
* address which is common for all nodes and basically is translated by
* hw to talk to originating node's address space AND remote CSR access
* address which allows any node to access any other node's address
* space. So in this case the domain address map will contain both the
* local CSR address space as well as the remote CSR address space.
*/
domainp = config_devp->domainp;
/* Zambezi present? */
devp = domainp->device.listp;
for (i = 0; i < domainp->device.count; i++) {
if (streq(devp->dev_typep->dev_type_namep, "zambezi")) {
zambezi_present = true;
break;
}
devp = devp->nextp;
}
for (i = 0; i < domainp->procs.count; i++) {
config_procp = LIST_ENTRY(domainp->procs, i);
npp = (ss_proc_t *)config_procp->procp;
npp->ssip->timeout = 0;
npp->ssip->log = 0;
/* and init the magic ssi location with node_id and way info */
node_id = config_procp->proc_id;
extern_hub = zambezi_present? 1:0;
mode_shift = 11 - domainp->procs.count;
val = 1<<mode_shift | extern_hub<<6 | node_id<<4 | 0xf;
val &= MASK64(9,0);
npp->ssip->magic_ssi = val;
}
#else
npp = (ss_proc_t *)config_devp->devp;
npp->ssip->timeout = 0;
npp->ssip->log = 0;
#endif /* } VFALLS */
}
static void ncu_init(config_dev_t * config_devp)
{
ss_proc_t *npp;
ncu_t *ncup;
uint64_t device;
int i;
npp = (ss_proc_t *)config_devp->devp;
ncup = npp->ncup;
pthread_mutex_init(&ncup->ncu_lock, NULL);
/*
* setup init value (NCU spec, v0.99)
*/
for (device=0; device < NCU_DEV_MAX; device++) {
ncup->regs.int_man[device] = 0x0;
}
ncup->regs.mondo_int_vec = 0x0;
ncup->regs.ser_num = 0xdeadbeef;
ncup->regs.efu_stat = MASK64(63,0);
ncup->regs.bank_enb = 0xff;
ncup->regs.bank_enb_stat = 0x3cf;
ncup->regs.l2_idx_hash_en_stat = false;
ncup->regs.pcie_a_mem32_offset_base = 0x0;
ncup->regs.pcie_a_mem32_offset_mask = MASK64(39,36);
ncup->regs.pcie_a_mem64_offset_base = 0x0;
ncup->regs.pcie_a_mem64_offset_mask = MASK64(39,36);
ncup->regs.pcie_a_iocon_offset_base = 0x0;
ncup->regs.pcie_a_iocon_offset_mask = MASK64(39,36);
ncup->regs.pcie_a_fsh = 0x0;
ncup->regs.soc_esr = 0x0;
ncup->regs.soc_log_enb = 0x1fffffff;
ncup->regs.soc_intr_enb = 0x0;
ncup->regs.soc_err_inject = 0x0;
ncup->regs.soc_fatal_enb = 0x0;
ncup->regs.soc_sii_err_syndrome = 0x0;
ncup->regs.soc_ncu_err_syndrome = 0x0;
for (i = 0; i < NCU_TARGETS; i++) {
ncup->regs.mondo_int_data0[i] = 0x0;
ncup->regs.mondo_int_data1[i] = 0x0;
ncup->regs.mondo_int_busy[i] = NCU_MONDO_INT_BUSY;
}
}
static void hwdbg_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
npp = (ss_proc_t *)config_devp->devp;
npp->hwdbgp->debug_port_config = 0;
npp->hwdbgp->io_quiesce_control = 0;
}
static void rcu_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
npp = (ss_proc_t *)config_devp->devp;
npp->rcup->reset_gen = 0;
npp->rcup->reset_status = 0x4;
npp->rcup->reset_source = 0x10;
#ifdef VFALLS /* { */
npp->rcup->comt_divs = 0;
npp->rcup->comt_cfg = 0x23;
npp->rcup->clk_steer = 0;
npp->rcup->comt_lock_time = 0x0;
#endif /* } */
}
static void jtag_init(config_dev_t * config_devp)
{
}
#ifdef VFALLS /* { */
static void ncx_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
npp = (ss_proc_t *)config_devp->devp;
npp->ncxp->sys_mode = 0x0;
npp->ncxp->tick_en_slow = 0x0;
npp->ncxp->slow_pulse_wait = 0x0;
npp->ncxp->twr = 0xfffffc00; /* Table 12-9, VF PRM 0.1 */
npp->ncxp->tpesr = 0x0;
npp->ncxp->tpelse = 0x0;
npp->ncxp->tpear = 0x0;
}
static void cou_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
int link;
npp = (ss_proc_t *)config_devp->devp;
for (link = 0; link < COU_LINK_MAX; link++) {
npp->coup->cou_err_enable[link] = 0x0;
npp->coup->cou_esr[link] = 0x0;
npp->coup->cou_ear[link] = 0x0;
}
}
static void lfu_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
int link;
npp = (ss_proc_t *)config_devp->devp;
for (link = 0; link < LFU_MAX_LINKS; link++) {
npp->lfup->cl_init_state[link] = 0x0;
npp->lfup->cl_cfg_reg[link] = 0x28;
npp->lfup->cl_serdes_cfg[link] = 0x1c1000000;
npp->lfup->cl_ser_invpair[link] = 0x0;
npp->lfup->cl_test_cfg[link] = 0x3;
npp->lfup->cl_error_stat[link] = 0x0;
}
}
#endif /* } VFALLS */
static void ccu_init(config_dev_t * config_devp)
{
ss_proc_t * npp;
ccu_t * clockp;
npp = (ss_proc_t *)config_devp->devp;
clockp = npp->clockp;
clockp->control = 0x1002011c1; /* table 11.1, section 11.1 of N2 PRM rev 1.0 */
clockp->rand_state.ctl = (1 << RC_MODE_SHIFT) |
(7 << RC_NOISE_CELL_SEL_SHIFT);
/*
* OX3e is from N2 CCU MAS v1.61 11/01/05, Table 8.2. This is
* decimal 63, which is way too small. But that's what the HW
* does, so we model it here.
*/
clockp->rand_state.ctl |= 0x3e << RC_DELAY_SHIFT;
}
static void l2c_init(config_dev_t * config_devp)
{
int bank, idx;
ss_proc_t * npp;
l2c_t * l2p;
#ifdef VFALLS /* { */
domain_t * domainp;
config_proc_t * config_procp;
int i;
/*
* Note that for VF, each node has its own L2CSR region and all of them
* are addressed by the same physical address(0xA0.0000.0000 to
* 0xBF.FFFF.FFFF). Nodes can only access their local L2CSR regions and
* not those of other nodes. So the domain structure's addressmap only
* contains that physical address(0xA0.0000.0000 to 0xBF.FFFF.FFFF) and
* it is up to the init and access routines to correctly map that PA to
* the correct node's L2CSR region. This is unlike NCU, CCU, MCU etc
* where there is a local CSR access address which is common for all
* nodes and basically is translated by hw to talk to originating node's
* address space AND remote CSR access address which allows any node to
* access any other node's address space.
*/
domainp = config_devp->domainp;
for (i = 0; i < domainp->procs.count; i++) {
config_procp = LIST_ENTRY(domainp->procs, i);
npp = (ss_proc_t *)config_procp->procp;
l2p = npp->l2p;
for (bank=0; bank<npp->num_l2banks; bank++) {
l2p->control[bank] = L2_DIS;
l2p->bist_ctl[bank] = 0x0;
l2p->error_enable[bank] = 0xfffffc00; /* Table 12-3, VF PRM 0.1 */
l2p->error_status[bank] = 0x0;
l2p->error_status_ii[bank]= 0x0;
l2p->error_address[bank]= 0x0;
l2p->error_inject[bank] = 0x0;
}
l2p->diag_datap = Xmalloc(L2_DATA_SIZE);
l2p->diag_tagp = Xmalloc(L2_TAG_SIZE);
l2p->diag_vuadp = Xmalloc(L2_VUAD_SIZE);
for (idx=0; idx<L2_DATA_SIZE/8; idx++) {
l2p->diag_datap[idx] = 0xdeadbeef;
}
for (idx=0; idx<L2_TAG_SIZE/8; idx++) {
l2p->diag_tagp[idx] = 0xdeadbeef;
}
for (idx=0; idx<L2_VUAD_SIZE/8; idx++) {
l2p->diag_vuadp[idx] = 0xdeadbeef;
}
}
#else
npp = (ss_proc_t *)config_devp->devp;
l2p = npp->l2p;
for (bank=0; bank<npp->num_l2banks; bank++) {
l2p->control[bank] = L2_DIS;
l2p->bist_ctl[bank] = 0x0;
l2p->error_enable[bank] = 0x0;
l2p->error_status[bank] = 0x0;
l2p->error_address[bank]= 0x0;
l2p->error_inject[bank] = 0x0;
}
l2p->diag_datap = Xmalloc(L2_DATA_SIZE);
l2p->diag_tagp = Xmalloc(L2_TAG_SIZE);
l2p->diag_vuadp = Xmalloc(L2_VUAD_SIZE);
for (idx=0; idx<L2_DATA_SIZE/8; idx++) {
l2p->diag_datap[idx] = 0xdeadbeef;
}
for (idx=0; idx<L2_TAG_SIZE/8; idx++) {
l2p->diag_tagp[idx] = 0xdeadbeef;
}
for (idx=0; idx<L2_VUAD_SIZE/8; idx++) {
l2p->diag_vuadp[idx] = 0xdeadbeef;
}
#endif /* } VFALLS */
}
static void mcu_init(config_dev_t * config_devp)
{
int bidx;
ss_proc_t * npp;
mcu_bank_t * dbp;
uint64_t i;
npp = (ss_proc_t *)config_devp->devp;
for (bidx=0; bidx<npp->num_mbanks; bidx++) {
dbp = &(npp->mbankp[bidx]);
dbp->cas_addr_width = 0xb;
dbp->ras_addr_width = 0xf;
dbp->cas_lat = 0x3;
dbp->scrub_freq = 0xfff;
dbp->refresh_freq = 0x514;
dbp->refresh_counter = 0x0;
dbp->scrub_enable = 0x0;
dbp->trrd = 0x2;
dbp->trc = 0xc;
dbp->trcd = 0x3;
dbp->twtr = 0x0;
dbp->trtw = 0x0;
dbp->trtp = 0x2;
dbp->tras = 0x9;
dbp->trp = 0x3;
dbp->twr = 0x3;
dbp->trfc = 0x27;
dbp->tmrd = 0x2;
#ifdef VFALLS
dbp->fawin = 0xa;
#else
dbp->fawin = 0x2;
#endif
dbp->tiwtr = 0x2;
dbp->dimm_stack = 0x0;
dbp->ext_wr_mode2 = 0x0;
dbp->ext_wr_mode1 = 0x18;
dbp->ext_wr_mode3 = 0x0;
dbp->eight_bank_mode = 0x1;
dbp->sel_lo_addr_bits = 0x0;
dbp->single_chnl_mode = 0x0;
#ifdef VFALLS
dbp->mirror_mode = 0x0;
#endif
dbp->dimm_init = 0x1;
dbp->init_status = 0x0;
dbp->dimm_present = 0x3;
dbp->failover_status = 0x0;
dbp->failover_mask = 0x0;
dbp->power_down_mode = 0x0;
dbp->fbd_chnl_state.val = 0x0;
for (i=0; i<MAX_AMBS; i++){
dbp->fbd_chnl_state.ambstate[i] = 0x0;
dbp->amb[i].vid_did = 0x00E01033; /* use e0 for now until we find valid nec did */
dbp->amb[i].fbds = 0x0; /* set all error stat bits to 0 */
dbp->amb[i].emask = 0x36;
dbp->amb[i].ferr = 0x0;
dbp->amb[i].nerr = 0x0;
dbp->amb[i].psbyte3_0 = 0x0;
dbp->amb[i].psbyte7_4 = 0x0;
dbp->amb[i].psbyte11_8 = 0x0;
dbp->amb[i].psbyte13_12 = 0x0;
dbp->amb[i].c2dincrcur_cmd2datanxt = 0x0;
dbp->amb[i].mbcsr = 0x0;
dbp->amb[i].dareftc = ((0x4e << 16) | 0x0c30);
dbp->amb[i].mtr_dsreftc = ((1 << 16) |
(0x56 << 8) |
(0xf << 4) | 0x7);
dbp->amb[i].drt = 0x0;
dbp->amb[i].drc = ((0x1 << 18) | /* set default value */
(0x1 << 12) |
(0x2 << 4) |
(0x3 << 0));
dbp->amb[i].dcalcsr = 0x0;
dbp->amb[i].dcaladdr = 0x0;
dbp->amb[i].ddr2odtc = 0x0;
}
dbp->fbd_fast_reset_flag = 0x0;
dbp->fbd_chnl_reset = 0x0;
dbp->ts1_sb_nb_mapping = 0x0;
dbp->ts1_test_parameter = 0x0;
dbp->ts3_failover_config = 0x0;
dbp->electrical_idle_detected = 0x0;
dbp->disable_state_period = 0x3f;
dbp->disable_state_period_done = 0x0;
dbp->calibrate_state_period = 0x0;
dbp->calibrate_state_period_done = 0x0;
dbp->training_state_min_time = 0xff;
dbp->training_state_done = 0x0;
dbp->training_state_timeout = 0xff;
dbp->testing_state_done = 0x0;
dbp->testing_state_timeout = 0xff;
dbp->polling_state_done = 0x0;
dbp->polling_state_timeout = 0xff;
dbp->config_state_done = 0x0;
dbp->config_state_timeout = 0xff;
dbp->dram_per_rank_cke = 0xffff;
dbp->l0s_duration = 0x2a;
dbp->chnl_sync_frame_freq = 0x2a;
dbp->chnl_read_lat = 0xffff;
dbp->chnl_capability = 0x0;
dbp->loopback_mode_cntl = 0x0;
dbp->serdes_config_bus = 0x0;
dbp->serdes_invpair = 0x0;
dbp->config_reg_access_addr = 0x0;
dbp->config_reg_access_data = 0x0;
dbp->ibist_nbfib_ctl = 0x03c01e478LL;
dbp->ibist_sbfib_ctl = 0x23c01e478LL;
/*
* Performance counter section 10.3 of PRM 0.9.1
*/
dbp->perf_ctl = 0x0;
dbp->perf_count = 0x0;
/*
* Error handling section 25.12 of PRM 1.2
*/
dbp->error_status = 0x0;
dbp->error_address = 0x0;
dbp->error_inject = 0x0;
dbp->error_counter = 0x0;
dbp->error_location = 0x0;
dbp->error_retry = 0x0;
/*
* Power management section 26.3 of PRM 0.9.1
*/
dbp->open_bank_max = 0x1ffff;
dbp->prog_time_cntr = 0xffff;
/*
* Hardware debug section 29.1 of PRM 0.9.1
*/
dbp->dbg_trg_en = 0x0;
}
}
/*
* Access SSI registers (mapped at offset = 0xff00000000)
*/
static bool_t ssi_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
int reg;
uint64_t val;
ss_proc_t *npp;
ssi_t * ssip;
#ifdef VFALLS /* { */
/*
* Note that for VF, each node has its own SSI region and all of them
* are addressed by the same physical address(0xFF.0000.0000 to
* 0xFF.0FFF.FFFF).(Note that this does not refer to the ROM part of the
* SSI, which *really* is a single entity shared by the different nodes.
* Address range for the ROM is FF.F000.0000 to FF.FFFF.FFFF and its
* config and setup is taken care of by an entry in the config file.)
* Nodes can only access their local SSI regions and not those of other
* nodes. So the domain structure's addressmap only contains that
* physical address(0xFF.0000.0000 to 0xFF.0FFF.FFFF) and it is up to
* the init and access routines to correctly map that PA to the correct
* node's SSI region.
* This is unlike NCU, CCU, MCU etc where there is a local CSR access
* address which is common for all nodes and basically is translated by
* hw to talk to originating node's address space AND remote CSR access
* address which allows any node to access any other node's address
* space. So in this case the domain address map will contain both the
* local CSR address space as well as the remote CSR address space.
*/
/* Redirect the common SSI PA to the correct node*/
npp = (ss_proc_t *)sp->config_procp->procp;
/* for accessing magic SSI loc by reset so as to set up sys_mode reg */
if (config_addrp->baseaddr==MAGIC_SSI){
if (op == MA_st64){
EXEC_WARNING( ("Attempted write to reserved magic field in ssi"));
return false;
}
else if (op == MA_ldu64) {
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = npp->ssip->magic_ssi;
return true;
}
else
return false;
}
/* or else this is a normal SSI register access */
else
config_addrp = npp->ssi_devp->addrp;
#endif /* } VFALLS */
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
ssip = npp->ssip;
reg = off & 0x1ffff;
switch (op) {
case MA_st64:
val = *regp;
switch (reg) {
case SSI_TIMEOUT:
RSVD_MASK(sp, (MASK64(24, 0)), val, 0, reg);
ssip->timeout = val;
break;
case SSI_LOG:
RSVD_MASK(sp, (MASK64(1, 0)), val, 0, reg);
ssip->log &= ~val;
break;
default:
/* illegal reg - an error */
return false;
}
break;
case MA_ldu64:
switch (reg) {
case SSI_TIMEOUT:
val = ssip->timeout;
break;
case SSI_LOG:
val = ssip->log;
break;
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t hwdbg_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t *npp;
hwdbg_t * hwdbgp;
uint64_t val;
int reg;
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
hwdbgp = npp->hwdbgp;
reg = off & ~0xfULL;
switch (op) {
case MA_st64:
val = *regp;
switch (reg) {
case DEBUG_PORT_CONFIG:
RSVD_MASK(sp, (MASK64(63,62)|MASK64(9,0)), val, 0, reg);
lprintf(sp->gid, "DEBUG_PORT_CONFIG addr=0x%llx being written with val=0x%llx\n", reg, val);
hwdbgp->debug_port_config = val;
break;
case IO_QUIESCE_CONTROL:
RSVD_MASK(sp, MASK64(3,0), val, 0, reg);
lprintf(sp->gid, "IO_QUIESCE_CONTROL addr=0x%llx being written with val=0x%llx\n", reg, val);
hwdbgp->io_quiesce_control = val;
break;
default:
/* illegal reg - an error */
return false;
}
break;
case MA_ldu64:
switch (reg) {
case DEBUG_PORT_CONFIG:
val = hwdbgp->debug_port_config;
break;
case IO_QUIESCE_CONTROL:
val = hwdbgp->io_quiesce_control;
break;
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t rcu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t *npp;
rcu_t * rcup;
uint64_t val;
int reg;
int node_id= 0;
#ifdef VFALLS /* { */
domain_t *domainp;
tpaddr_t pa;
int idx;
if (config_addrp->baseaddr == PHYS_ADDR_RCU) {
/*
* if local RCU CSR access, need to convert to Node X(this node)
* RCU CSR address
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
pa = PHYS_ADDR_RCU_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
} else {
/*
* If remote RCU CSR access, use config_addrp to get at the node_id.
*/
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "RCU");
domainp = config_addrp->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (config_addrp->baseaddr == PHYS_ADDR_RCU_REMOTE(node_id))
break;
}
}
#endif /* } VFALLS */
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
rcup = npp->rcup;
reg = off;
switch (op) {
case MA_st64:
val = *regp;
switch (reg) {
case RESET_GEN:
#ifdef VFALLS /* { */
/*
* PRM states that software may only write a 1
* to one of the 3 reset gen bit fields at a time.
* .-------------------------------------------------------,
* | .. | pb_as_dbgr| dbr_gen | rsvd | xir_gen | wmr_gen |
* `-------------------------------------------------------'
* 63:5 4 3 2 1 0
*/
RSVD_MASK(sp, (MASK64(4,3) | MASK64(1,0)), val, 0, reg);
#else /* } { */
/*
* PRM states that software may only write a 1
* to one of the 3 bit fields at a time.
* .--------------------------------------------,
* | .. | dbr_gen | rsvd | xir_gen | wmr_gen |
* `--------------------------------------------'
* 63:4 3 2 1 0
*/
RSVD_MASK(sp, (MASK64(3,3) | MASK64(1,0)), val, 0, reg);
#endif /* } */
switch ((val & (MASK64(3,0)))) {
case 0x0: /* no gen bits set */
case 0x1: /* wmr_gen set */
case 0x2: /* xir_gen set */
case 0x8: /* dbr_gen set */
break;
default:
fatal("[0x%llx] (pc=0x%llx)\tAttempted write to more than "\
"one reset generation bit in RESET_GEN val=0x%llx",
sp->gid, sp->pc, val);
}
rcup->reset_gen = val;
/*
* update the RESET_SOURCE register if dbr_gen,
* xir_gen or wmr_gen are set in RESET_GEN.
* The lower 4 bits of both registers are the
* same.
*/
rcup->reset_source |= (val & (MASK64(3,3) | MASK64(1,1) | MASK64(0,0)));
break;
case RESET_STATUS:
RSVD_MASK(sp, (MASK64(3,1)), val, 0, reg);
rcup->reset_status = val;
break;
case RESET_SOURCE:
#ifdef VFALLS
RSVD_MASK(sp, (MASK64(1,0) | MASK64(18,3)), val, 0, reg);
#else
RSVD_MASK(sp, (MASK64(1,0) | MASK64(15,3)), val, 0, reg);
#endif
/*
* All the non-reserved bits are W1C so we update
* the reset_source accordingly
*/
rcup->reset_source &= ~(val);
break;
#ifdef VFALLS /* { */
case COMT_DIVS:
RSVD_MASK(sp, (MASK64(23,0)), val, 0, reg);
rcup->comt_divs = val;
break;
case COMT_CFG:
RSVD_MASK(sp, (MASK64(21,0)), val, 0, reg);
rcup->comt_cfg = val;
break;
case CLK_STEER:
RSVD_MASK(sp, (MASK64(3,0)), val, 0, reg);
rcup->clk_steer = val;
break;
case COMT_LOCK_TIME:
RSVD_MASK(sp, (MASK64(31,0)), val, 0, reg);
rcup->comt_lock_time = val;
break;
#endif VFALLS /* } */
default:
/* illegal reg - an error */
return false;
}
break;
case MA_ldu64:
switch (reg) {
case RESET_GEN:
val = rcup->reset_gen;
break;
case RESET_STATUS:
val = rcup->reset_status;
break;
case RESET_SOURCE:
val = rcup->reset_source;
break;
#ifdef VFALLS /* { */
case COMT_DIVS:
val = rcup->comt_divs;
break;
case COMT_CFG:
val = rcup->comt_cfg;
break;
case CLK_STEER:
val = rcup->clk_steer;
break;
case COMT_LOCK_TIME:
val = rcup->comt_lock_time;
break;
#endif VFALLS /* } */
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
/*
* Access registers in NCU Unit (mapped at offset = 0x8000000000)
*/
static bool_t ncu_access(simcpu_t *sp, config_addr_t * config_addrp,
tpaddr_t off, maccess_t op, uint64_t * regp)
{
int reg;
uint64_t val;
ss_proc_t *npp;
ss_strand_t *nsp;
ncu_t *ncup;
sparcv9_cpu_t *v9p;
int idx, target;
int node_id= 0;
bool_t self = true;
#ifdef VFALLS /* { */
domain_t *domainp;
tpaddr_t pa;
if (config_addrp->baseaddr == PHYS_ADDR_NCU) {
/*
* If local NCU CSR access, need to convert to Node X(this node) NCU CSR
* address. Use the simcpu to get the correct node_id and then get the
* correct config_addrp.
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
pa = PHYS_ADDR_NCU_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
self = true;
} else {
/*
* If remote NCU CSR access, use config_addrp to get at the node_id.
*/
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "NCU");
domainp = config_addrp->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (config_addrp->baseaddr == PHYS_ADDR_NCU_REMOTE(node_id))
break;
}
self = (node_id == sp->config_procp->proc_id) ? true : false;
}
#endif /* } VFALLS */
/*
* FIXME: For the moment we only support 64bit accesses to registers.
*/
if (MA_ldu64!=op && MA_st64!=op) return false;
if (off & 7) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
v9p = sp->specificp;
nsp = v9p->impl_specificp;
ncup = npp->ncup;
reg = off & NCU_REG_MASK;
if (reg < MONDO_INT_VEC)
reg = INT_MAN;
else if (UINT64_RANGE_CHECK(MONDO_INT_DATA0, reg, MONDO_INT_ADATA0))
reg &= ~NCU_INT_TGTOFFSET_MASK;
else if (UINT64_RANGE_CHECK(MONDO_INT_BUSY, reg, MONDO_INT_ABUSY))
reg &= ~NCU_INT_TGTOFFSET_MASK;
/*
* Fast-path the serial number read - it is used in the N2
* hypervisor CPU yield API.
*/
if (op == MA_ldu64 && reg == SER_NUM) {
val = ncup->regs.ser_num;
DBGDEV(lprintf(sp->gid, "Read NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, ncu_reg_name(reg), off, val););
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
return true;
}
pthread_mutex_lock( &ncup->ncu_lock );
switch (op) {
case MA_st64:
val = *regp;
#define ASSIGN_NCU(_n, _m) do { \
ncup->regs._n = val; \
if (0LL != (val & ~(_m))) goto write_reserved; \
} while (0)
switch (reg) {
case INT_MAN:
idx = (off >> 3) & (NCU_DEV_MAX-1);
ASSIGN_NCU( int_man[idx], MASK64(13,8)|MASK64(5,0) );
break;
case MONDO_INT_VEC:
ASSIGN_NCU( mondo_int_vec, MASK64(5,0) );
break;
case SER_NUM:
case EFU_STAT:
case CORE_AVAIL:
case BANK_AVAIL:
case BANK_ENABLE_STATUS:
case L2_IDX_HASH_EN_STATUS:
case MONDO_INT_DATA0:
case MONDO_INT_DATA1:
case MONDO_INT_ADATA0:
case MONDO_INT_ADATA1:
EXEC_WARNING( ("Attempted write to RO register in NCU:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ncu_reg_name(reg), reg ) );
pthread_mutex_unlock( &ncup->ncu_lock );
return false;
case PCIE_A_MEM32_OFFSET_BASE:
ASSIGN_NCU( pcie_a_mem32_offset_base, MASK64(63,63)|MASK64(35,24) );
niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM32);
break;
case PCIE_A_MEM32_OFFSET_MASK:
ASSIGN_NCU( pcie_a_mem32_offset_mask, MASK64(39,24) );
niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM32);
break;
case PCIE_A_MEM64_OFFSET_BASE:
ASSIGN_NCU( pcie_a_mem64_offset_base, MASK64(63,63)|MASK64(35,24) );
niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM64);
break;
case PCIE_A_MEM64_OFFSET_MASK:
ASSIGN_NCU( pcie_a_mem64_offset_mask, MASK64(39,24) );
niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM64);
break;
case PCIE_A_IOCON_OFFSET_BASE:
ASSIGN_NCU( pcie_a_iocon_offset_base, MASK64(63,63)|MASK64(35,24) );
niagara2_pcie_mapping(sp, ncup, PIU_REGION_CFGIO);
break;
case PCIE_A_IOCON_OFFSET_MASK:
ASSIGN_NCU( pcie_a_iocon_offset_mask, MASK64(39,24) );
niagara2_pcie_mapping(sp, ncup, PIU_REGION_CFGIO);
break;
case BANK_ENABLE:
case L2_IDX_HASH_EN:
FIXME_WARNING(("NCU register %s (offset 0x%x) not implemented\n",
ncu_reg_name(reg), reg ) );
break;
case PCIE_A_FSH:
ncup->regs.pcie_a_fsh = val;
break;
case SOC_ESR:
ASSIGN_NCU( soc_esr, MASK64(63,63)|NCU_SOC_MASK );
break;
case SOC_LOG_ENABLE:
ASSIGN_NCU( soc_log_enb, MASK64(42,0) );
break;
case SOC_INTERRUPT_ENABLE:
ASSIGN_NCU( soc_intr_enb, MASK64(42,0) );
break;
case SOC_ERROR_INJECTION:
ASSIGN_NCU( soc_err_inject, MASK64(42,0) );
break;
case SOC_FATAL_ERROR_ENABLE:
ASSIGN_NCU( soc_fatal_enb, MASK64(42,0) );
break;
case SOC_PENDING_ERROR_STATUS:
/*
* same as SOC_ESR
*/
ASSIGN_NCU( soc_esr, MASK64(63,63)|NCU_SOC_MASK );
break;
case SOC_SII_ERROR_SYNDROME:
ASSIGN_NCU( soc_sii_err_syndrome, MASK64(63,63)|MASK64(58,0) );
break;
case SOC_NCU_ERROR_SYNDROME:
ASSIGN_NCU( soc_ncu_err_syndrome, MASK64(63,58)|MASK64(55,0) );
break;
case MONDO_INT_BUSY:
target = (off >> 3) & (NCU_TARGETS-1);
ASSIGN_NCU( mondo_int_busy[target], MASK64(6,6) );
break;
case MONDO_INT_ABUSY:
/* Note from VF PRM author : N2's cpu-id based alias
* registers work on VF only when access comes from
* local node. A thread on remote-node should not access
* these registers. Since interrupts can be delivered to
* local-node only, assumption is that interrupted
* thread(or some other thread on same node) will read
* these regs and message-pass to remote node, if needed.
*/
if (!self)
fatal("[0x%llx] (pc=0x%llx)\tIllegal write to "
"NCU alias register of remote node: "
"Write 0x%llx to register offset 0x%x of node %d.\n",
sp->gid, sp->pc, val, reg, node_id);
target = nsp->vcore_id;
ASSIGN_NCU( mondo_int_busy[target], MASK64(6,6) );
break;
default:
EXEC_WARNING( ("Attempted write to illegal register in NCU:"
"Write 0x%llx to register offset 0x%x on node %d\n",
val, reg, node_id ) );
goto access_failed; /* illegal reg - an error */
}
DBGDEV(lprintf(sp->gid, "Write NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, ncu_reg_name(reg), off, val););
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in NCU:"
"Write 0x%llx to register %s (offset 0x%x) on node %d",
val, ncu_reg_name(reg), reg, node_id ) );
pthread_mutex_unlock( &ncup->ncu_lock );
return true;
case MA_ldu64:
switch (reg) {
case INT_MAN:
idx = (off >> 3) & (NCU_DEV_MAX-1);
val = ncup->regs.int_man[idx];
break;
case MONDO_INT_VEC:
val = ncup->regs.mondo_int_vec;
break;
case SER_NUM:
val = ncup->regs.ser_num;
break;
case CORE_AVAIL:
val = npp->cmp_regs.core_enable_status;
break;
case EFU_STAT:
val = ncup->regs.efu_stat;
break;
case BANK_AVAIL:
case BANK_ENABLE:
val = ncup->regs.bank_enb;
break;
case BANK_ENABLE_STATUS:
val = ncup->regs.bank_enb_stat;
break;
case PCIE_A_MEM32_OFFSET_BASE:
val = ncup->regs.pcie_a_mem32_offset_base;
break;
case PCIE_A_MEM32_OFFSET_MASK:
val = ncup->regs.pcie_a_mem32_offset_mask;
break;
case PCIE_A_MEM64_OFFSET_BASE:
val = ncup->regs.pcie_a_mem64_offset_base;
break;
case PCIE_A_MEM64_OFFSET_MASK:
val = ncup->regs.pcie_a_mem64_offset_mask;
break;
case PCIE_A_IOCON_OFFSET_BASE:
val = ncup->regs.pcie_a_iocon_offset_base;
break;
case PCIE_A_IOCON_OFFSET_MASK:
val = ncup->regs.pcie_a_iocon_offset_mask;
break;
case L2_IDX_HASH_EN:
case L2_IDX_HASH_EN_STATUS:
FIXME_WARNING(("NCU register %s (offset 0x%x) not implemented\n",
ncu_reg_name(reg), reg ) );
break;
case PCIE_A_FSH:
val = ncup->regs.pcie_a_fsh;
break;
case SOC_ESR:
val = ncup->regs.soc_esr;
break;
case SOC_LOG_ENABLE:
val = ncup->regs.soc_log_enb;
break;
case SOC_INTERRUPT_ENABLE:
val = ncup->regs.soc_intr_enb;
break;
case SOC_ERROR_INJECTION:
val = ncup->regs.soc_err_inject;
break;
case SOC_FATAL_ERROR_ENABLE:
val = ncup->regs.soc_fatal_enb;
break;
case SOC_PENDING_ERROR_STATUS:
/*
* same as SOC_ESR
*/
val = ncup->regs.soc_esr;
break;
case SOC_SII_ERROR_SYNDROME:
val = ncup->regs.soc_sii_err_syndrome;
break;
case SOC_NCU_ERROR_SYNDROME:
val = ncup->regs.soc_ncu_err_syndrome;
break;
case MONDO_INT_DATA0:
target = (off >> 3) & (NCU_TARGETS-1);
val = ncup->regs.mondo_int_data0[target];
break;
case MONDO_INT_DATA1:
target = (off >> 3) & (NCU_TARGETS-1);
val = ncup->regs.mondo_int_data1[target];
break;
case MONDO_INT_ADATA0:
/* Note from VF PRM author : N2's cpu-id based alias
* registers work on VF only when access comes from
* local node. A thread on remote-node should not access
* these registers. Since interrupts can be delivered to
* local-node only, assumption is that interrupted
* thread(or some other thread on same node) will read
* these regs and message-pass to remote node, if needed.
*/
if (!self)
fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
"NCU alias register of remote node: "
"Read register offset 0x%x on node %d.\n",
sp->gid, sp->pc, reg, node_id);
target = nsp->vcore_id;
val = ncup->regs.mondo_int_data0[target];
break;
case MONDO_INT_ADATA1:
if (!self)
/* see comment block above */
fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
"NCU alias register of remote node: "
"Read register offset 0x%x on node %d.\n",
sp->gid, sp->pc, reg, node_id);
target = nsp->vcore_id;
val = ncup->regs.mondo_int_data1[target];
break;
case MONDO_INT_BUSY:
target = (off >> 3) & (NCU_TARGETS-1);
val = ncup->regs.mondo_int_busy[target];
break;
case MONDO_INT_ABUSY:
if (!self)
/* see comment block above */
fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
"NCU alias register of remote node: "
"Read register offset 0x%x on node %d.\n",
sp->gid, sp->pc, reg, node_id);
target = nsp->vcore_id;
val = ncup->regs.mondo_int_busy[target];
break;
default:
goto access_failed; /* illegal reg - an error */
}
DBGDEV(lprintf(sp->gid, "Read NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, ncu_reg_name(reg), off, val););
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
pthread_mutex_unlock( &ncup->ncu_lock );
return true;
access_failed:;
pthread_mutex_unlock( &ncup->ncu_lock );
return false;
}
/*
* Access registers in Clock Unit (mapped at offset = 0x8300000000)
*/
static bool_t ccu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
int reg;
uint64_t val;
ss_proc_t *npp;
ccu_t * clockp;
int osc; /* oscillator number for RNG */
int freqidx; /* frequency index of selected oscillator in RNG */
#ifdef VFALLS /* { */
domain_t *domainp;
int node_id;
tpaddr_t pa;
if (config_addrp->baseaddr == PHYS_ADDR_CCU) {
/*
* if local CCU CSR access, need to convert to Node X(this node) CCU CSR
* address
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
pa = PHYS_ADDR_CCU_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
} else {
/* check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "CCU");
}
#endif /* } VFALLS */
/*
* FIXME: For the moment we only support 64bit accesses to registers.
*/
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
clockp = npp->clockp;
reg = off & ~0xfULL; /* collapse to basic register groups */
switch (op) {
case MA_st64:
val = *regp;
#define ASSIGN_CLK(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
clockp->_n = val; \
} while (0)
switch (reg) {
case CLOCK_CONTROL:
ASSIGN_CLK( control, MASK64(33,0) );
FIXME_WARNING(("Clock register %s (offset 0x%x) not implemented\n",
ccu_reg_name(reg), reg ) );
break;
case RAND_CTL:
/* only the lower 25 bits are used */
ASSIGN_CLK(rand_state.ctl, RC_REG_MASK);
/* unary to binary */
switch ((clockp->rand_state.ctl >>
RC_NOISE_CELL_SEL_SHIFT) & RC_NOISE_CELL_SEL_MASK) {
case 0:
osc = 0; /* special: no osc. selected */
case 1:
osc = 1;
break;
case 2:
osc = 2;
break;
case 4:
osc = 3;
break;
default:
/*
* If more than one is selected, we
* don't set anything. It is not clear
* that this is exactly in line with
* the CCU document.
*/
osc = 0;
}
if (osc > 0) {
clockp->rand_state.freqidx[osc-1] =
(clockp->rand_state.ctl >>
RC_ANALOG_SEL_SHIFT) & RC_ANALOG_SEL_MASK;
}
break;
default:
/* illegal reg - an error */
return false;
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in clock unit:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ccu_reg_name(reg), reg ) );
return false;
case MA_ldu64:
#define RETRIEVE_CLK(_n, _m) do { val = ((clockp->_n) & (_m)); } while (0)
switch (reg) {
case CLOCK_CONTROL:
RETRIEVE_CLK( control, MASK64(33,0) );
break;
#if INTERNAL_BUILD /* { */
case RAND_GEN:
#define RNG_POLY 0x231dcee91262b8a3ULL
#define N2_RNG_OSC_DUTY_FACTOR 0.2
if (((clockp->rand_state.ctl >> RC_MODE_SHIFT) & 1) == 0) {
#ifdef OLD_CONSTANT_WAY
/* Keeping this code, because I think Axis works this way */
/*
* Mode == 0: Shift out "raw" noise
* cells, one value every 64
* clocks. For now we assume the
* following noise cell frequencies,
* and that everything is phase
* locked.
*
* Cell 1: 1/64.
* Cell 2: 1/8.
* Cell 3: 1/2.
*
* Selection is weird:
* See N2 CCU MAS v.1.61, Table 7.
*/
switch (clockp->
rand_control.fields.rc_noise_cell_sel) {
case 0:
val = 0;
break;
case 1:
val = 0xffffffff00000000ULL;
break;
case 2:
val = 0xf0f0f0f0f0f0f0f0ULL;
break;
case 4:
val = 0xaaaaaaaaaaaaaaaaULL;
break;
default:
val = 0xffffffff00000000ULL ^
0xf0f0f0f0f0f0f0f0ULL ^
0xaaaaaaaaaaaaaaaaULL;
}
#else /* !OLD_CONSTANT_WAY */
val = 0;
/* osc = clockp->rand_state.osc; */
for (osc = 1; osc <= 3; ++osc) {
if (((clockp->rand_state.ctl >>
RC_NOISE_CELL_SEL_SHIFT) &
RC_NOISE_CELL_SEL_MASK) &
(1 << (osc - 1))) {
freqidx = clockp->
rand_state.freqidx[osc-1];
#if 0
printf("osc=%d, freqidx=%d, "
"freq=%f, noise=%f\n",
osc, freqidx,
clockp->rand_state.
frequency[freqidx][osc-1],
clockp->rand_state.
noise[freqidx][osc-1]);
#endif
val ^= gen_raw_entropy(
&clockp->
rand_state.
phase[osc-1],
&clockp->rand_state.
frequency[freqidx][osc-1],
&clockp->rand_state.
noise[freqidx][osc-1],
N2_RNG_OSC_DUTY_FACTOR);
}
}
#endif /* !OLD_CONSTANT_WAY */
} else {
/*
* Mode is 1; The LFSR is in feedback mode.
*/
if ((clockp->rand_state.ctl >>
RC_NOISE_CELL_SEL_SHIFT) &
RC_NOISE_CELL_SEL_MASK) {
/*
* For now, if any noise cells
* are turned on, return a
* 64-bit random value.
*/
val = ((uint64_t)lrand48() << 32) |
(uint64_t)lrand48();
} else {
/*
* Deterministic test. The
* RNG does 2 more cycles than
* the delay value. After
* each read, the register is
* reset to ~0ULL. In
* reality, delay+2 is only
* the minimum delay, but we
* gloss over that issue.
*/
lfsr64_adv(RNG_POLY, ~0ULL,
((clockp->rand_state.ctl >>
RC_DELAY_SHIFT) & RC_DELAY_MASK) +
2,
&val);
}
}
break;
case RAND_CTL:
RETRIEVE_CLK(rand_state.ctl, RC_REG_MASK);
break;
#endif /* INTERNAL_BUILD } */
default:
/* illegal reg - an error */
return false;
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
/*
* Access registers in JTAG area (mapped at offset = 0x9000000000)
*/
static bool_t jtag_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t *npp;
int node_id;
uint64_t val;
int reg;
#ifdef VFALLS /* { */
domain_t *domainp;
tpaddr_t pa;
int idx;
if (config_addrp->baseaddr == PHYS_ADDR_JTAG) {
/*
* if local JTAG CSR access, need to convert to Node X(this node) JTAG
* CSR address.
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
pa = PHYS_ADDR_JTAG_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
} else {
/*
* If remote JTAG CSR access, use config_addrp to get at the node_id.
*/
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "JTAG");
domainp = config_addrp->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (config_addrp->baseaddr == PHYS_ADDR_JTAG_REMOTE(node_id))
break;
}
}
#endif /* } VFALLS */
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp; /* target proc */
reg = off;
switch (op) {
case MA_st64:
val = *regp;
switch (reg) {
case INT_VECTOR_DISPATCH:
RSVD_MASK(sp, (MASK64(5,0) | MASK64(13,8)), val, 0, reg);
/* note sp=interrupt originator, npp=interrupt target */
niagara2_send_xirq(sp, npp, val);
break;
case ASI_CORE_AVAILABLE:
case ASI_CORE_ENABLE_STATUS:
case ASI_CORE_RUNNING_STATUS:
EXEC_WARNING( ("Attempted write to RO register in JTAG/TAP:"
"Write 0x%llx attempted to register %s (offset 0x%x)",
val, jtag_reg_name(reg), reg ) );
return false;
case ASI_CORE_ENABLE:
IMPL_WARNING(("%s: not supported in JTAG/TAP.\n",jtag_reg_name(reg)));
break;
case ASI_CORE_RUNNING_RW:
/*
* WS: according to the CMP PRM, writing a '1' to a bit will be ignored
* if the corresponding bit in the core enable reg is 0 (i.e., the
* corresponding virtual core is not enabled)
*/
pthread_mutex_lock(&npp->cmp_lock);
npp->cmp_regs.core_running_status = val & npp->cmp_regs.core_enable_status;
ss_change_exec_state(npp, npp->cmp_regs.core_running_status);
pthread_mutex_unlock(&npp->cmp_lock);
break;
case ASI_CORE_RUNNING_W1S:
/*
* W1S: new_value = old_value | new_value;
*/
pthread_mutex_lock(&npp->cmp_lock);
npp->cmp_regs.core_running_status |= val;
/*
* According to the CMP PRM, writing a '1' to a bit will be ignored
* if the corresponding bit in the core enable reg is 0 (i.e., the
* corresponding virtual core is not enabled)
*/
npp->cmp_regs.core_running_status &= npp->cmp_regs.core_enable_status;
/*
* FIXME: need to check if the virtual core is attempting to park
* all the virtual cores (this is prevented by the hardware)
*/
ss_change_exec_state(npp, npp->cmp_regs.core_running_status);
pthread_mutex_unlock(&npp->cmp_lock);
break;
case ASI_CORE_RUNNING_W1C:
/*
* W1C: new_value = old_value & ~new_value;
*/
pthread_mutex_lock(&npp->cmp_lock);
npp->cmp_regs.core_running_status &= ~val;
ss_change_exec_state(npp, npp->cmp_regs.core_running_status);
pthread_mutex_unlock(&npp->cmp_lock);
break;
case SOC_ERROR_STEERING:
RSVD_MASK(sp, MASK64(5,0), val, 0, reg);
npp->ncup->regs.soc_err_steering = val;
break;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Write JTAG register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, jtag_reg_name(reg), off, val););
break;
case MA_ldu64:
switch (reg) {
case INT_VECTOR_DISPATCH:
EXEC_WARNING( ("Attempted read from WO register in JTAG/TAP:"
" Read attempted from register %s (offset 0x%x).\n",
jtag_reg_name(reg), reg ) );
return false;
case ASI_CORE_AVAILABLE:
case ASI_CORE_ENABLE_STATUS:
case ASI_CORE_ENABLE:
val = npp->cmp_regs.core_enable_status;
break;
case ASI_CORE_RUNNING_RW:
case ASI_CORE_RUNNING_STATUS:
val = npp->cmp_regs.core_running_status;
break;
case ASI_CORE_RUNNING_W1S:
case ASI_CORE_RUNNING_W1C:
EXEC_WARNING( ("Attempted read from WO register in JTAG/TAP:"
" Read attempted from register %s (offset 0x%x).\n",
jtag_reg_name(reg), reg ) );
return false;
case SOC_ERROR_STEERING:
val = npp->ncup->regs.soc_err_steering;
break;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Read JTAG register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, jtag_reg_name(reg), off, val););
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
#ifdef VFALLS /* { */
static bool_t ncx_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
uint64_t val;
ss_proc_t *npp;
bool_t self = true;
domain_t *domainp;
int node_id;
tpaddr_t pa;
int idx;
ncx_t *ncxp;
int mode_shift;
int reg;
uint core_num, thread;
sparcv9_cpu_t * tv9p;
simcpu_t * tsp;
config_dev_t *config_devp = NULL;
bool_t zambezi_present = false;
if (config_addrp->baseaddr == PHYS_ADDR_NCX) {
/*
* if local NCX CSR access, need to convert to Node X(this node)
* NCX CSR address
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
pa = PHYS_ADDR_NCX_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
self = true;
} else {
/*
* If remote NCX CSR access, use config_addrp to get at the node_id.
*/
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "NCX");
domainp = config_addrp->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (config_addrp->baseaddr == PHYS_ADDR_NCX_REMOTE(node_id))
break;
}
self = (node_id == sp->config_procp->proc_id) ? true : false;
}
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
ncxp = npp->ncxp;
reg = off;
switch (op) {
case MA_ldu64:
switch (reg) {
case CF_SYS_MODE_REG:
if (!self){
EXEC_WARNING( ("[0x%llx] (pc=0x%llx) Access "
"attempt to %s of remote "
"node %d. PRM recommends only local "
"access\n", sp->gid, sp->pc,
ncx_reg_name(off), node_id));
}
val = ncxp->sys_mode;
break;
case NCX_TIC_EN_SLOW:
val = ncxp->tick_en_slow;
break;
case CF_SLOW_PULSE_WAIT:
val = 0x0;
break;
case NCX_TWR:
val = ncxp->twr;
break;
case NCX_TPESR:
val = ncxp->tpesr;
break;
case NCX_TPELSE:
val = ncxp->tpelse;
break;
case NCX_TPEAR:
val = ncxp->tpear;
break;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Read NCX register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, ncx_reg_name(reg), off, val););
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
case MA_st64:
val = *regp;
#define ASSIGN_NCX(_n, _m) do { \
ncxp->_n = val & (_m); \
if (0LL != (val & ~(_m))) \
EXEC_WARNING( ("Attempted write to reserved field in NCX being masked out : " \
"Attempted write was 0x%llx to register %s (offset 0x%x) on node %d", \
val, ncx_reg_name(off), off, node_id ) ); \
} while (0)
switch (reg) {
case CF_SYS_MODE_REG:
if (!self){
EXEC_WARNING( ("[0x%llx] (pc=0x%llx) Access "
"attempt to %s of remote node %d. PRM "
"recommends only local access.\n",
sp->gid, sp->pc, ncx_reg_name(off),
node_id));
}
ASSIGN_NCX(sys_mode, SM_REG_MASK);
/* check that correct node_id value being written */
if (SM_2_NODE(ncxp->sys_mode) != node_id)
fatal("[0x%llx] (pc=0x%llx)\tAttempt to write %d"
" to node_id field of CF_SYS_MODE_REG "
"(offset 0x%x) on node %d.\n",
sp->gid, sp->pc, SM_2_NODE(ncxp->sys_mode),
off, node_id);
/* check if zambezi present or glueless */
config_devp = domainp->device.listp;
for (idx = 0; idx < domainp->device.count; idx++) {
if (streq(config_devp->dev_typep->dev_type_namep, "zambezi")) {
zambezi_present = true;
break;
}
config_devp = config_devp->nextp;
}
if (zambezi_present ^ ((ncxp->sys_mode >> SM_EXTERN_HUB_SEL_SHIFT) & 1))
fatal("[0x%llx] (pc=0x%llx)\tAttempt to write %d"
" to extern_hub field of CF_SYS_MODE_REG "
"(offset 0x%x) on a %d-node system.\n",
sp->gid, sp->pc,
((ncxp->sys_mode >> SM_EXTERN_HUB_SEL_SHIFT)
& 1), off, domainp->procs.count);
/* check that way info is correct and only one bit asserted too */
if ((domainp->procs.count == 1) &&
((SM_EWAY_BITS(ncxp->sys_mode)) != 0))
fatal("[0x%llx] (pc=0x%llx)\tMultiple ways mode"
" selected in CF_SYS_MODE_REG (offset 0x%x) "
"on node %d.\nAttempted write was 0x%llx on "
"a %d-way system.\n", sp->gid, sp->pc,
off, node_id, ncxp->sys_mode,
domainp->procs.count);
else if (domainp->procs.count > 1) {
mode_shift = 11 - domainp->procs.count;
if ((SM_EWAY_BITS(MASK64(mode_shift, mode_shift)) != (SM_EWAY_BITS(ncxp->sys_mode))))
fatal("[0x%llx] (pc=0x%llx)\tAttempt to "
"write wrong way info in "
"CF_SYS_MODE_REG (offset 0x%x) on "
"node %d.\nAttempted write was "
"0x%llx on a %d-way system.\n",
sp->gid, sp->pc, off, node_id,
ncxp->sys_mode, domainp->procs.count);
}
DBGMULNODE(lprintf(-1, "SYS_MODE_REG for node %d set "
"to 0x%llx\n", node_id, ncxp->sys_mode););
break;
case NCX_TIC_EN_SLOW:
RSVD_MASK(sp, (MASK64(0,0)), val, 0, reg);
pthread_mutex_lock(&npp->tick_en_lock);
if (!val && !npp->cmp_regs.tick_enable && !npp->tick_stop) {
ss_strand_t * tnsp;
npp->tick_stop = true;
/* now stop all tick counters */
core_num = (uint_t) -1;
for (idx = 0; idx < npp->nstrands; idx++) {
tv9p = npp->strand[idx];
tnsp = &(npp->ss_strandp[idx]);
if (tnsp->core != core_num) {
tv9p->tick->offset += RAW_TICK(tv9p);
core_num = tnsp->core;
}
tsp = tv9p->simp;
ss_recomp_tick_target(tsp);
}
}
if (val && npp->tick_stop) {
ss_strand_t * tnsp;
npp->tick_stop = false;
/* now start all tick counters */
core_num = (uint_t) -1;
for (idx = 0; idx < npp->nstrands; idx++) {
tv9p = npp->strand[idx];
tnsp = &(npp->ss_strandp[idx]);
if (tnsp->core != core_num) {
tv9p->tick->offset -= RAW_TICK(tv9p);
core_num = tnsp->core;
}
tsp = tv9p->simp;
ss_recomp_tick_target(tsp);
}
}
ncxp->tick_en_slow = val;
pthread_mutex_unlock(&npp->tick_en_lock);
break;
case CF_SLOW_PULSE_WAIT:
EXEC_WARNING( ("Attempted write to RO register in NCX:"
"Write 0x%llx to register %s (offset 0x%x)",
val, ncx_reg_name(reg), reg ) );
return false;
case NCX_TWR:
ASSIGN_NCX(twr, MASK64(31,10));
break;
case NCX_TPESR:
/*
* bit[6:0]: W1C
*/
RSVD_MASK(sp, (MASK64(6,0)), val, 0, reg);
ncxp->tpesr &= ~val;
break;
case NCX_TPELSE:
ASSIGN_NCX(tpelse, MASK64(6,2));
break;
case NCX_TPEAR:
/*
* bit[63:61]: W1C
*/
RSVD_MASK(sp, MASK64(60,0), val, 0, reg);
ncxp->tpear &= ~val;
break;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Write NCX register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, ncx_reg_name(reg), off, val););
break;
default:
ASSERT(0);
}
return true;
}
static bool_t cou_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t *npp;
cou_t * coup;
uint64_t val;
uint64_t reg;
int node_id;
tpaddr_t pa;
int idx;
domain_t *domainp;
int link;
if (config_addrp->baseaddr == PHYS_ADDR_COU) {
/*
* if local COU CSR access, need to convert to Node X(this node)
* COU CSR address
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
pa = PHYS_ADDR_COU_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
/*
* No access is allowed for single node
*/
if (domainp->procs.count == 1) return false;
} else {
/*
* If remote COU CSR access, use config_addrp to get at the node_id.
*/
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "COU");
domainp = config_addrp->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (config_addrp->baseaddr == PHYS_ADDR_COU_REMOTE(node_id))
break;
}
}
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
coup = npp->coup;
reg = off & ~COU_LINK_MASK;
link = (off & COU_LINK_MASK) >> COU_LINK_SHIFT;
switch (op) {
case MA_st64:
val = *regp;
switch (reg) {
case COU_ERR_ENABLE_REG:
RSVD_MASK(sp, MASK64(2,0), val, 0, reg);
coup->cou_err_enable[link] = val;
break;
case COU_ESR:
/*
* bit[4:0]: W1C
*/
RSVD_MASK(sp, MASK64(4,0), val, 0, reg);
coup->cou_esr[link] &= ~val;
break;
case COU_EAR:
EXEC_WARNING( ("Attempted write to RO register in COU:"
"Write 0x%llx to register %s (offset 0x%x)",
val, cou_reg_name(reg), reg ) );
return false;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Write COU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, cou_reg_name(reg), off, val););
break;
case MA_ldu64:
switch (reg) {
case COU_ERR_ENABLE_REG:
val = coup->cou_err_enable[link];
break;
case COU_ESR:
val = coup->cou_esr[link];
break;
case COU_EAR:
val = coup->cou_ear[link];
break;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Read COU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, cou_reg_name(reg), off, val););
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
static bool_t lfu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t *npp, *tnpp;
lfu_t *lfup, *tlfup;
uint64_t val;
int reg;
int node_id;
tpaddr_t pa;
int idx, lnk;
domain_t *domainp;
int link;
config_dev_t *devp = NULL;
bool_t zambezi_present = false;
bool_t lfu_ok = true;
if (config_addrp->baseaddr == PHYS_ADDR_LFU) {
/*
* if local LFU CSR access, need to convert to Node X(this node)
* LFU CSR address
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
/* single node configs can't access LFU CSRs */
if (domainp->procs.count == 1) return false;
pa = PHYS_ADDR_LFU_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
} else {
/*
* If remote LFU CSR access, use config_addrp to get at the node_id.
*/
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "LFU");
domainp = config_addrp->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (config_addrp->baseaddr == PHYS_ADDR_LFU_REMOTE(node_id))
break;
}
}
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
lfup = npp->lfup;
reg = off & ~LFU_LINK_MASK;
link = (off & LFU_LINK_MASK) >> 12;
switch (op) {
case MA_st64:
val = *regp;
switch (reg) {
case CL_INIT_STATE:
EXEC_WARNING( ("Attempted write to RO register in LFU:"
"Write 0x%llx to register %s (offset 0x%x)",
val, lfu_reg_name(reg), reg ) );
return false;
case CL_CFG_REG:
RSVD_MASK(sp, (MASK64(5,0)), val, 0, reg);
lfup->cl_cfg_reg[link] = val;
if (val & MASK64(0,0))
lfup->cl_init_state[link] = LFU_LINK_L0;
/* Zambezi present? */
devp = domainp->device.listp;
for (idx = 0; idx < domainp->device.count; idx++) {
if (streq(devp->dev_typep->dev_type_namep, "zambezi")) {
zambezi_present = true;
break;
}
devp = devp->nextp;
}
if (zambezi_present) {
/* check if all set to master */
for (lnk=0; lnk<LFU_MAX_LINKS; lnk++) {
if ((lfup->cl_cfg_reg[lnk] & MASK64(1,0)) != LFU_LINK_MASTER_EN)
lfu_ok = false;
}
} else {
/*
* And the below overkill galore explains all
* legal and illegal cases for glueless mode.
* nb: node A is current node whose cl_cfg_reg
* is being written to and node B is the other
* node whose register value is just being
* checked.
*
* Legal combinations(of cl_cfg_reg bits 1, 0)
* -------------------------------------------
*
* node A node B
* ------ ------
* 11 - master en'ed 01 - slave en'ed
* 11 - master en'ed 00 - register not yet written to
* 01 - slave en'ed 11 - master en'ed
* 01 - slave en'ed 00 - register not yet written to
*
* Illegal combinations(of cl_cfg_reg bits 1, 0)
* ---------------------------------------------
*
* node A node B
* ------ ------
* 11 - master en'ed 10 - also master but not en'ed
* 11 - master en'ed 11 - also master en'ed
* 01 - slave en'ed 10 - master but not en'ed
* 01 - slave en'ed 01 - also slave
*
* Note : Any other values written to Node A (00 or 10)
* are illegal.
*/
int link_mode = lfup->cl_cfg_reg[0] & (MASK64(1,0));
if ((link_mode != LFU_LINK_MASTER_EN) && (link_mode != LFU_LINK_SLAVE_EN)) {
lfu_ok = false;
} else {
/* all links for that node must be the same mode */
for (lnk = 1; lnk < LFU_MAX_LINKS; lnk++) {
if (lfup->cl_cfg_reg[0] ^ lfup->cl_cfg_reg[lnk]) {
lfu_ok = false;
break;
}
}
}
if (lfu_ok) {
/*
* now get hold of other node to make sure that
* we don't have 2 slaves or 2 masters.
*/
for (idx = 0; idx < 2; idx++) {
tnpp = LIST_ENTRY(domainp->procs, idx)->procp;
if (tnpp->lfup != lfup) {
tlfup = tnpp->lfup;
break;
}
}
if (link_mode == LFU_LINK_MASTER_EN) {
if ((tlfup->cl_cfg_reg[0] & (MASK64(1,1))))
fatal("[0x%llx] (pc=0x%llx)\t"
" Attempt to write master"
" bit in %s of both nodes"
" in glueless config.\n",
sp->gid, sp->pc,
lfu_reg_name(reg));
} else {
/* ie. node A is slave */
switch (tlfup->cl_cfg_reg[0] & MASK64(1, 0)) {
case 0x1:
fatal("[0x%llx] (pc=0x%llx)\t"
"Attempt to write slave"
" bit in %s of both nodes"
" in glueless config.\n",
sp->gid, sp->pc,
lfu_reg_name(reg));
break;
case 0x2:
fatal("[0x%llx] (pc=0x%llx)\t"
"Enable bit not set in"
" %s of node %d.\n",
sp->gid, sp->pc,
lfu_reg_name(reg),
tnpp->config_procp->proc_id);
break;
default:
/* all other cases ok */
break;
}
}
}
}
/*
* By right, should only allow global addressing when ALL nodes
* have their lfu's set up correctly. But because of the way
* Legion cycles through the different threads by allowing
* a quantum of instructions per cpu, it will cause false errors
* by doing that. So, just checking and updating each node at a time.
*/
npp->global_addressing_ok.flags.lfu = lfu_ok? GLOBAL_ADDRESSING_FLAG_EN:GLOBAL_ADDRESSING_FLAG_DIS;
break;
case CL_SERDES_CFG:
RSVD_MASK(sp, ((MASK64(32, 28)|MASK64(26, 20)|MASK64(14, 8)|MASK64(1, 0))),
val, 0, reg);
lfup->cl_serdes_cfg[link] = val;
break;
case CL_SER_INVPAIR:
RSVD_MASK(sp, (MASK64(27,0)), val, 0, reg);
lfup->cl_ser_invpair[link] = val;
break;
case CL_TEST_CFG:
RSVD_MASK(sp, (MASK64(15, 10)|MASK64(7,0)), val, 0, reg);
lfup->cl_test_cfg[link] = val;
break;
case CL_ERROR_STAT:
RSVD_MASK(sp, (MASK64(24,0)), val, 0, reg);
lfup->cl_error_stat[link] &= ~val;
break;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Write LFU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, lfu_reg_name(reg), off, val););
break;
case MA_ldu64:
switch (reg) {
case CL_INIT_STATE:
val = lfup->cl_init_state[link];
break;
case CL_CFG_REG:
val = lfup->cl_cfg_reg[link];
break;
case CL_SERDES_CFG:
val = lfup->cl_serdes_cfg[link];
break;
case CL_SER_INVPAIR:
val = lfup->cl_ser_invpair[link];
break;
case CL_TEST_CFG:
val = lfup->cl_test_cfg[link];
break;
case CL_ERROR_STAT:
val = lfup->cl_error_stat[link];
break;
default:
/* illegal reg - an error */
return false;
}
DBGMULNODE(lprintf(sp->gid, "Read LFU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
node_id, reg, lfu_reg_name(reg), off, val););
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
#endif /* } VFALLS */
/*
* Access L2 Cache registers, mapped at offset = 0xA000000000
*/
static bool_t l2c_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t * npp;
int reg, bank;
uint64_t val;
l2c_t * l2p;
uint dbgen_bit;
#ifdef VFALLS /* { */
/*
* Note that for VF, each node has its own L2CSR region and all of them
* are addressed by the same physical address(0xA0.0000.0000 to
* 0xBF.FFFF.FFFF). Nodes can only access their local L2CSR regions and
* not those of other nodes. So the domain structure's addressmap only
* contains that physical address(0xA0.0000.0000 to 0xBF.FFFF.FFFF) and
* it is up to the init and access routines to correctly map that PA to
* the correct node's L2CSR region.
* This is unlike NCU, CCU, MCU etc where there is a local CSR access
* address which is common for all nodes and basically is translated
* by hw to talk to originating node's address space AND remote CSR
* access address which allows any node to access any other
* node's address space. So in this case the domain address map will
* contain both the local CSR address space as well as the remote CSR
* address space.
*/
/* Redirect the common L2CSR PA to the correct node*/
npp = (ss_proc_t *)sp->config_procp->procp;
config_addrp = npp->l2c_devp->addrp;
#endif /* } VFALLS */
/*
* FIXME: For the moment we only support 64bit accesses to registers.
*/
if (MA_ldu64!=op && MA_st64!=op) return false;
npp = (ss_proc_t *)config_addrp->config_devp->devp;
l2p = npp->l2p;
bank = (off >> 6) & 0x7; /* N2 supports 8-banked L2 cache */
reg = (off >> 32) & 0xf;
switch (op) {
case MA_st64:
val = *regp;
if (reg >= 0x8) {
#define ASSIGN_L2(_n, _m) do { \
if (0LL != (val & ~(_m))) goto write_reserved; \
l2p->_n[bank] = val; \
} while (0)
switch (reg) {
/*
* L2 BIST Control Reg section 28.18 of N2 PRM 0.9.1
*/
case L2_TAG_BIST:
ASSIGN_L2( bist_ctl, MASK64(6,0) );
if (val & 1) l2p->bist_ctl[bank] |= 0x400;
break;
#ifdef VFALLS /* { */
case L2_CONTROL:
/*
* L2 Control Register section 28.1.2 of VF PRM 0.9
*/
ASSIGN_L2( control, (MASK64(37, 36)|MASK64(34, 0)) );
if (((l2p->control[bank] & L2_NODEID) >> L2_NODEID_SHIFT) != sp->config_procp->proc_id)
fatal("[0x%llx] (pc=0x%llx)\tIncorrect "
"node_id being set in L2 CONTROL REG"
"(offset 0x%x). Attempted write was "
"0x%llx on node 0x%x.", sp->gid, sp->pc,
off, val, sp->config_procp->proc_id);
/*
* Please see Victoria Falls Bug 124014 for details.
* Bug court approval 1/10/2006. The fix that was approved
* involves the addition of an L2 Control Register bit
* specifying the configuration, rather than bringing in
* ncx_2way into all 8 l2t's.
* There is an idle bit in the L2 CSR (bit 21), which was
* designated as DBG_EN in the PRM but no longer used by N2
* or VF. This bit will now need to be used to indicate a
* system with 3 or 4 VF nodes configured.
*/
dbgen_bit = (l2p->control[bank] & L2_DBGEN) >> L2_DBGEN_SHIFT;
if ((sp->config_procp->domainp->procs.count>2) ^ dbgen_bit)
fatal("[0x%llx] (pc=0x%llx)\tIncorrect "
"value being set for L2 DBGEN bit"
" in the L2 CONTROL reg(offset 0x%x)."
" Attempted write was 0x%llx on node "
"0x%x.", sp->gid, sp->pc,
off, val, sp->config_procp->proc_id);
break;
case L2_ERROR_ENABLE:
/*
* Table 12-3 of VF PRM 0.1
*/
ASSIGN_L2( error_enable, MASK64(31,0));
break;
case L2_ERROR_STATUS:
/*
* Table 12-1 of VF PRM 0.1
*
* RW1C: bit [63:56], [53:33]
* RW: bit [27:0]
*/
RSVD_MASK(sp, (MASK64(63,56)|MASK64(53,33)|MASK64(27,0)), val, 0, reg);
l2p->error_status[bank] &= ~val;
l2p->error_status[bank] &= MASK64(63,56)|MASK64(53,33);
l2p->error_status[bank] |= val & MASK64(27,0);
break;
case L2_ERROR_STATUS_II:
ASSIGN_L2( error_status_ii, MASK64(63,40));
break;
#else
case L2_CONTROL:
/*
* L2 Control Register section 28.15 of N2 PRM 0.9.1
*/
ASSIGN_L2( control, MASK64(21,0) );
break;
case L2_ERROR_ENABLE:
/*
* Error handling section 25.10 of N2 PRM 1.2
*/
ASSIGN_L2( error_enable, MASK64(2,0) );
break;
case L2_ERROR_STATUS:
/*
* Table 25-21 of N2 PRM 1.2
*
* RW1C: bit [53:34]
* RW: bit [63,54], [27:0]
*/
RSVD_MASK(sp, (MASK64(63,34)|MASK64(27,0)), val, 0, reg);
l2p->error_status[bank] &= ~val;
l2p->error_status[bank] &= MASK64(63,34);
l2p->error_status[bank] |= val & MASK64(27,0);
break;
#endif /* } VFALLS */
case L2_ERROR_ADDRESS:
ASSIGN_L2( error_address, MASK64(39,4) );
break;
case L2_ERROR_INJECT:
ASSIGN_L2( error_inject, MASK64(1,0) );
break;
case L2_ERROR_NOTDATA:
ASSIGN_L2( error_notdata, MASK64(51,48)|MASK64(45,4) );
break;
default:
/* illegal reg - an error */
return false;
}
} else {
uint64_t idx;
/*
* L2 Cache Diagnostic Access section 28.17 of N2 PRM 0.9.1
*/
if (reg < 0x4) {
/*
* index stores to a 32bit word and its ECC+rsvd bits
*/
idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
/*
* put oddeven select bit low so data is in addr order
*/
idx |= ((off >> L2_ODDEVEN_SHIFT) & 1);
l2p->diag_datap[idx] = val;
} else {
if (reg < 0x6) {
/*
* index stores to a tag and its ECC+rsvd bits
*/
idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6;
l2p->diag_tagp[idx] = val;
} else {
/*
* index valid/dirty or alloc/used bits and parity
*/
idx = off & (L2_LINE | L2_BANK) >> 6;
idx |= ((off & L2_VDSEL) >> 10);
l2p->diag_vuadp[idx] = val;
}
}
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in l2 cache controller:"
"Write 0x%llx to bank %d, register %s (offset 0x%x)",
val, bank, l2c_reg_name(reg), reg ) );
return false;
case MA_ldu64:
if (reg >= 0x8) {
#define RETRIEVE_L2(_n, _m) do { val = ((l2p->_n[bank]) & (_m)); } while (0)
switch (reg) {
/*
* L2 BIST Control Reg section 28.18 of N2 PRM 0.9.1
*/
case L2_TAG_BIST:
RETRIEVE_L2( bist_ctl, MASK64(10,0) );
break;
#ifdef VFALLS /* { */
case L2_CONTROL:
/*
* L2 Control Register section 28.1.2 of VF PRM 0.9
*/
RETRIEVE_L2( control, MASK64(33,0));
break;
case L2_ERROR_ENABLE:
/*
* Error handling section 12.25.1 of VF PRM 0.1
*/
RETRIEVE_L2( error_enable, MASK64(31,0) );
break;
case L2_ERROR_STATUS:
RETRIEVE_L2( error_status,
MASK64(63,56)|MASK64(53,33)|MASK64(27,0));
break;
case L2_ERROR_STATUS_II:
RETRIEVE_L2( error_status_ii, MASK64(63,40));
break;
#else
case L2_CONTROL:
/*
* L2 Control Register section 28.15 of N2 PRM 0.9.1
*/
RETRIEVE_L2( control, MASK64(21,0) );
break;
case L2_ERROR_ENABLE:
/*
* Error handling section 25.10 of N2 PRM 1.2
*/
RETRIEVE_L2( error_enable, MASK64(2,0) );
break;
case L2_ERROR_STATUS:
RETRIEVE_L2( error_status, MASK64(63,34)|MASK64(27,0));
break;
#endif /* } VFALLS */
case L2_ERROR_ADDRESS:
RETRIEVE_L2( error_address, MASK64(39,4) );
break;
case L2_ERROR_INJECT:
RETRIEVE_L2( error_inject, MASK64(1,0) );
break;
case L2_ERROR_NOTDATA:
RETRIEVE_L2( error_notdata, MASK64(51,48)|MASK64(45,4) );
break;
default:
/* illegal reg - an error */
return false;
}
} else {
uint64_t idx;
/*
* L2 Cache Diagnostic Access section 28.17 of N2 PRM 0.9.1
*/
if (reg < 0x4) {
/*
* index retrieves a 32bit word and its ECC+rsvd bits
*/
idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
/*
* put oddeven select bit low so data is in addr order
*/
idx |= ((off >> L2_ODDEVEN_SHIFT) & 1);
val = l2p->diag_datap[idx];
} else {
if (reg < 0x6) {
/*
* index retrieves a tag and its ECC+rsvd bits
*/
idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6;
val = l2p->diag_tagp[idx];
} else {
/*
* index valid/dirty or alloc/used bits and parity
*/
idx = off & (L2_LINE | L2_BANK) >> 6;
idx |= ((off & L2_VDSEL) >> 10);
val = l2p->diag_vuadp[idx];
}
}
}
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
/*
* Access DRAM Control and Status Registers (mapped at offset = 0x8400000000)
*/
static bool_t mcu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
{
ss_proc_t * npp;
int reg, bank;
uint64_t val;
mcu_bank_t * dbp;
uint64_t i, ambid;
uint32_t val32;
int node_id = 0;
#ifdef VFALLS /* { */
domain_t *domainp;
tpaddr_t pa;
int idx;
if (config_addrp->baseaddr == PHYS_ADDR_MCU) {
/*
* If local MCU CSR access, need to convert to Node X(this node) MCU CSR
* address. Use the simcpu to get the correct node_id and then get the
* correct config_addrp
*/
node_id = sp->config_procp->proc_id;
domainp = sp->config_procp->domainp;
pa = PHYS_ADDR_MCU_REMOTE(node_id) + off;
config_addrp = find_domain_address(domainp, pa);
} else {
/*
* If remote MCU CSR access, use config_addrp to get at the node_id.
*/
/* first check if global addressing is allowed for this config */
GLOBAL_ADDRESSING_CHECK(sp, "MCU");
domainp = config_addrp->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (config_addrp->baseaddr == PHYS_ADDR_MCU_REMOTE(node_id))
break;
}
}
#endif /* } VFALLS */
/*
* FIXME: For the moment we only support 64bit accesses to registers.
*/
npp = (ss_proc_t *)config_addrp->config_devp->devp;
if (MA_ldu64!=op && MA_st64!=op) return false;
bank = off >> 12;
ASSERT (bank < npp->num_mbanks); /* this should be enforced by the config_dev range */
dbp = &(npp->mbankp[bank]);
reg = off & ((1<<12)-1);
switch (op) {
case MA_st64:
val = *regp;
#define ASSIGN_DB(_n, _m) do { \
dbp->_n &= ~(_m); \
dbp->_n |= (val & (_m)); \
} while (0)
DBGMC(lprintf(sp->gid, "Memory controller bank %d : Write register 0x%lx '%s' value= 0x%llx on node %d\n",
bank, off, mcu_reg_name(reg), val, node_id); );
switch (reg) {
/*
* DRAM controller section 25.10 of N2 RPM 0.9.1
*/
case DRAM_CAS_ADDR_WIDTH: ASSIGN_DB( cas_addr_width, MASK64(3, 0) ); break;
case DRAM_RAS_ADDR_WIDTH: ASSIGN_DB( ras_addr_width, MASK64(3, 0) ); break;
case DRAM_CAS_LAT: ASSIGN_DB( cas_lat, MASK64(2, 0) ); break;
case DRAM_SCRUB_FREQ: ASSIGN_DB( scrub_freq, MASK64(11, 0) ); break;
case DRAM_REFRESH_FREQ: ASSIGN_DB( refresh_freq, MASK64(12, 0) ); break;
case DRAM_REFRESH_COUNTER: ASSIGN_DB( refresh_counter, MASK64(12, 0) ); break;
case DRAM_SCRUB_ENABLE: ASSIGN_DB( scrub_enable, MASK64(0, 0) ); break;
case DRAM_TRRD: ASSIGN_DB( trrd, MASK64(3, 0) ); break;
case DRAM_TRC: ASSIGN_DB( trc, MASK64(4, 0) ); break;
case DRAM_TRCD: ASSIGN_DB( trcd, MASK64(3, 0) ); break;
case DRAM_TWTR: ASSIGN_DB( twtr, MASK64(3, 0) ); break;
case DRAM_TRTW: ASSIGN_DB( trtw, MASK64(3, 0) ); break;
case DRAM_TRTP: ASSIGN_DB( trtp, MASK64(2, 0) ); break;
case DRAM_TRAS: ASSIGN_DB( tras, MASK64(3, 0) ); break;
case DRAM_TRP: ASSIGN_DB( trp, MASK64(3, 0) ); break;
case DRAM_TWR: ASSIGN_DB( twr, MASK64(3, 0) ); break;
case DRAM_TRFC: ASSIGN_DB( trfc, MASK64(6, 0) ); break;
case DRAM_TMRD: ASSIGN_DB( tmrd, MASK64(1, 0) ); break;
case DRAM_FAWIN: ASSIGN_DB( fawin, MASK64(4, 0) ); break;
case DRAM_TIWTR: ASSIGN_DB( tiwtr, MASK64(1, 0) ); break;
case DRAM_DIMM_STACK: ASSIGN_DB( dimm_stack, MASK64(0, 0) ); break;
case DRAM_EXT_WR_MODE2: ASSIGN_DB( ext_wr_mode2, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE1: ASSIGN_DB( ext_wr_mode1, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE3: ASSIGN_DB( ext_wr_mode3, MASK64(14, 0) ); break;
case DRAM_8_BANK_MODE: ASSIGN_DB( eight_bank_mode, MASK64(0, 0) ); break;
case DRAM_BRANCH_DISABLED: ASSIGN_DB( branch_disabled, MASK64(0, 0) ); break;
case DRAM_SEL_LO_ADDR_BITS: ASSIGN_DB( sel_lo_addr_bits, MASK64(0, 0) ); break;
#ifdef VFALLS
case DRAM_SINGLE_CHNL_MODE: ASSIGN_DB( single_chnl_mode, MASK64(1, 0) ); break;
case DRAM_MIRROR_MODE: ASSIGN_DB( mirror_mode, MASK64(0, 0) ); break;
#else
case DRAM_SINGLE_CHNL_MODE: ASSIGN_DB( single_chnl_mode, MASK64(0, 0) ); break;
#endif
case DRAM_DIMM_INIT:
if (0LL != (val & ~(3))) goto write_reserved;
dbp->dimm_init = val;
/* DRAM Init sequence done is instantaneous */
dbp->init_status = 1;
break;
case DRAM_INIT_STATUS: ASSIGN_DB( init_status, MASK64(0, 0) ); break;
case DRAM_DIMM_PRESENT: ASSIGN_DB( dimm_present, MASK64(3, 0) ); break;
case DRAM_FAILOVER_STATUS: ASSIGN_DB( failover_status, MASK64(0, 0) ); break;
case DRAM_FAILOVER_MASK: ASSIGN_DB( failover_mask, MASK64(34, 0) ); break;
case DRAM_POWER_DOWN_MODE: ASSIGN_DB( power_down_mode, MASK64(0, 0) ); break;
case FBD_CHNL_STATE:
ASSIGN_DB( fbd_chnl_state.val, MASK64(7, 0) );
/* Update the appropriate _done register */
switch( (val & MASK64(2, 0)) ) {
case 0:
dbp->disable_state_period_done = 1; break;
case 1:
dbp->calibrate_state_period_done = 1; break;
case 2:
dbp->training_state_done = 1; break;
case 3:
dbp->testing_state_done = 1; break;
case 4:
dbp->polling_state_done = 1; break;
case 5:
dbp->config_state_done = 1; break;
default:
lprintf(sp->gid, "Unknown val (0x%llx) being stored to FBD_CHNL_STATE reg on node %d\n",
val, node_id);
}
break;
case FBD_FAST_RESET_FLAG: ASSIGN_DB( fbd_fast_reset_flag, MASK64(3, 0) ); break;
case FBD_CHNL_RESET:
dbp->fbd_chnl_reset = val & 0x3;
/* if FBDINIT is set channel initialization starts */
if ((val & 0x1) == 1){
/* set the proper state value in amb link status */
for (i=0; i<MAX_AMBS; i++)
dbp->fbd_chnl_state.ambstate[i] = L0_STATE;
/* hw clears the bit after init is done */
dbp->fbd_chnl_reset &= ~(1ULL);
}
break;
case TS1_SB_NB_MAPPING: ASSIGN_DB( ts1_sb_nb_mapping, MASK64(2, 0) ); break;
case TS1_TEST_PARAMETER: ASSIGN_DB( ts1_test_parameter, MASK64(23, 0) ); break;
case TS3_FAILOVER_CONFIG: ASSIGN_DB( ts3_failover_config, MASK64(15, 0) ); break;
case DISABLE_STATE_PERIOD: ASSIGN_DB( disable_state_period,MASK64(5, 0) ); break;
case DISABLE_STATE_PERIOD_DONE: ASSIGN_DB( disable_state_period_done, MASK64(0, 0) ); break;
case CALIBRATE_STATE_PERIOD: ASSIGN_DB( calibrate_state_period, MASK64(19, 0) ); break;
case CALIBRATE_STATE_PERIOD_DONE: ASSIGN_DB( calibrate_state_period_done, MASK64(0, 0) ); break;
case TRAINING_STATE_MIN_TIME: ASSIGN_DB( training_state_min_time, MASK64(15, 0) ); break;
case TRAINING_STATE_DONE: ASSIGN_DB( training_state_done, MASK64(1, 0) ); break;
case TRAINING_STATE_TIMEOUT: ASSIGN_DB( training_state_timeout, MASK64(7, 0) ); break;
case TESTING_STATE_DONE: ASSIGN_DB( testing_state_done, MASK64(1, 0) ); break;
case TESTING_STATE_TIMEOUT: ASSIGN_DB( testing_state_timeout, MASK64(7, 0) ); break;
case POLLING_STATE_DONE: ASSIGN_DB( polling_state_done, MASK64(1, 0) ); break;
case POLLING_STATE_TIMEOUT: ASSIGN_DB( polling_state_timeout, MASK64(7, 0) ); break;
case CONFIG_STATE_DONE: ASSIGN_DB( config_state_done, MASK64(1, 0) ); break;
case CONFIG_STATE_TIMEOUT: ASSIGN_DB( config_state_timeout, MASK64(7, 0) ); break;
case DRAM_PER_RANK_CKE: ASSIGN_DB( dram_per_rank_cke, MASK64(15, 0) ); break;
case L0S_DURATION: ASSIGN_DB( l0s_duration, MASK64(6, 0) ); break;
case CHNL_SYNC_FRAME_FREQ: ASSIGN_DB( chnl_sync_frame_freq, MASK64(5, 0) ); break;
case CHNL_READ_LAT: ASSIGN_DB( chnl_read_lat, MASK64(15, 0) ); break;
case CHNL_CAPABILITY: ASSIGN_DB( chnl_capability, MASK64(9, 0) ); break;
case LOOPBACK_MODE_CNTL: ASSIGN_DB( loopback_mode_cntl, MASK64(1, 0) ); break;
case SERDES_CONFIG_BUS: ASSIGN_DB( serdes_config_bus, MASK64(24, 0) ); break;
case SERDES_INVPAIR: ASSIGN_DB( serdes_invpair, MASK64(47, 0) ); break;
case SERDES_TEST_CONFIG_BUS: ASSIGN_DB( serdes_test_config_bus, MASK64(31, 0) ); break;
case CONFIG_REG_ACCESS_ADDR: ASSIGN_DB( config_reg_access_addr, MASK64(15, 0) ); break;
case CONFIG_REG_ACCESS_DATA:
val32 = (uint32_t) val;
ambid = AMBID(dbp->config_reg_access_addr);
ASSIGN_DB(config_reg_access_data, MASK64(31, 0) );
switch (AMBADDR(dbp->config_reg_access_addr)){
#define ASSIGN_AMB(_n, _m) do { \
dbp->amb[ambid]._n = (_m); \
} while (0)
case FBD_VID_DID:
case FBDS:
goto write_reserved;
case EMASK:
ASSIGN_AMB(emask, val32 & 0x3f);
break;
case FERR:
ASSIGN_AMB(ferr, val32 & 0x3f);
break;
case NERR:
ASSIGN_AMB(nerr, val32 & 0x3f);
break;
case PSBYTE3_0:
ASSIGN_AMB(psbyte3_0, val32);
break;
case PSBYTE7_4:
ASSIGN_AMB(psbyte7_4, val32);
break;
case PSBYTE11_8:
ASSIGN_AMB(psbyte11_8, val32);
break;
case PSBYTE13_12:
ASSIGN_AMB(psbyte13_12, val32 & 0x0000ffff);
break;
case C2DINCRCUR_CMD2DATANXT:
ASSIGN_AMB(c2dincrcur_cmd2datanxt, val32 & 0x00ff00ff);
break;
case MBCSR:
/* clear START bit immediately */
ASSIGN_AMB(mbcsr, val32 & 0x7fffffff);
break;
case DAREFTC:
ASSIGN_AMB(dareftc, val32 & 0x00ffffff);
break;
case MTR_DSREFTC:
ASSIGN_AMB(mtr_dsreftc, val32 & 0x7f01fff7);
break;
case DRT:
ASSIGN_AMB(drt, val32 & 0x7f77ffff);
break;
case DRC:
ASSIGN_AMB(drc, val32 & 0x2f87ffff);
break;
case DCALCSR:
ASSIGN_AMB(dcalcsr, val32 & 0xf0607fff);
switch (dbp->amb[ambid].dcalcsr & 0xf) {
case 0:
case 1:
case 2:
case 3:
case 5:
case 0xc:
case 0xd:
/* set completion status if start set */
if (dbp->amb[ambid].dcalcsr & 0x80000000) {
dbp->amb[ambid].dcalcsr &= 0x0fffffff;
}
break;
default:
EXEC_WARNING(("Invalid DCALCSR opcode: 0x%x",
dbp->amb[ambid].dcalcsr & 0xf));
return false;
}
break;
case DCALADDR:
ASSIGN_AMB(dcaladdr, val32);
break;
case DDR2ODTC:
ASSIGN_AMB(ddr2odtc, val32);
break;
default:
/* illegal reg - an error */
EXEC_WARNING( ("Unimplemented write amb address = 0x%x, ambid=0x%x",
AMBADDR(dbp->config_reg_access_addr), ambid) );
return false;
}
break;
/*
* Performance counter section 10.3 of N2 PRM 1.1
*/
case DRAM_PERF_CTL: ASSIGN_DB( perf_ctl, MASK64(7, 0) ); break;
case DRAM_PERF_COUNT: ASSIGN_DB( perf_count, MASK64(63, 0) ); break;
/*
* Error handling section 25.12 of N2 PRM 1.2
*/
case DRAM_ERROR_STATUS:
dbp->error_status &= ~val;
dbp->error_status &= MASK64(63,54);
dbp->error_status |= val & MASK64(15,0);
break;
case DRAM_ERROR_ADDRESS: ASSIGN_DB( error_address, MASK64(39, 4) ); break;
case DRAM_ERROR_INJECT: ASSIGN_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break;
case DRAM_ERROR_COUNTER: ASSIGN_DB( error_counter, MASK64(15, 0) ); break;
case DRAM_ERROR_LOCATION: ASSIGN_DB( error_location, MASK64(35, 0) ); break;
case DRAM_ERROR_RETRY: ASSIGN_DB( error_retry, MASK64(63, 63)|MASK64(49,32)|MASK64(17,0) ); break;
case DRAM_FBD_ERROR_SYND: ASSIGN_DB( fbd_error_synd, MASK64(63, 63)|MASK64(29,0) ); break;
case DRAM_FBD_INJ_ERROR_SRC: ASSIGN_DB( fbd_inj_error_src, MASK64(1, 0) ); break;
case DRAM_FBR_COUNT: ASSIGN_DB( fbr_count, MASK64(16, 0) ); break;
/*
* Power management section 26.3 of N2 PRM 0.9.1
*/
case DRAM_OPEN_BANK_MAX: ASSIGN_DB( open_bank_max, MASK64(16, 0) ); break;
case DRAM_PROG_TIME_CNTR: ASSIGN_DB( prog_time_cntr, MASK64(15, 0) ); break;
/*
* Hardware debug section 29.2.2 of N2 PRM 0.9.1
*/
case DRAM_DBG_TRG_EN: ASSIGN_DB( dbg_trg_en, MASK64(2, 2) ); break;
case IBIST_NBFIB_CTL:
/*
* Set done bit (34) immediately if start bit (32)
* is set.
*/
if (val & (1LL << 32)) {
dbp->ibist_nbfib_ctl = (val | (1LL << 34));
ambid = (dbp->fbd_chnl_state.val & (0xF << 3))
>> 3;
dbp->fbd_chnl_state.ambstate[ambid] = L0_STATE;
} else {
dbp->ibist_nbfib_ctl = val;
}
break;
case IBIST_SBFIB_CTL:
/*
* Set done bit (34) immediately if start bit (32)
* is set.
*/
if (val & (1LL << 32)) {
dbp->ibist_sbfib_ctl = (val | (1LL << 34));
} else {
dbp->ibist_sbfib_ctl = val;
}
break;
default:
/* illegal reg - an error */
return false;
}
break;
write_reserved:
EXEC_WARNING( ("Attempted write to reserved field in dram controller: Write 0x%llx to bank %d, register %s (offset 0x%x) on node %d",
val, bank, mcu_reg_name(reg), reg, node_id ) );
return false;
case MA_ldu64:
#define RETRIEVE_DB(_n, _m) do { val = ((dbp->_n) & (_m)); } while (0)
switch (reg) {
/*
* DRAM controller section 25.10 of N2 RPM 0.9.1
*/
case DRAM_CAS_ADDR_WIDTH: RETRIEVE_DB( cas_addr_width, MASK64(3, 0) ); break;
case DRAM_RAS_ADDR_WIDTH: RETRIEVE_DB( ras_addr_width, MASK64(3, 0) ); break;
case DRAM_CAS_LAT: RETRIEVE_DB( cas_lat, MASK64(2, 0) ); break;
case DRAM_SCRUB_FREQ: RETRIEVE_DB( scrub_freq, MASK64(11, 0) ); break;
case DRAM_REFRESH_FREQ: RETRIEVE_DB( refresh_freq, MASK64(12, 0) ); break;
case DRAM_REFRESH_COUNTER: RETRIEVE_DB( refresh_counter, MASK64(12, 0) ); break;
case DRAM_SCRUB_ENABLE: RETRIEVE_DB( scrub_enable, MASK64(0, 0) ); break;
case DRAM_TRRD: RETRIEVE_DB( trrd, MASK64(3, 0) ); break;
case DRAM_TRC: RETRIEVE_DB( trc, MASK64(4, 0) ); break;
case DRAM_TRCD: RETRIEVE_DB( trcd, MASK64(3, 0) ); break;
case DRAM_TWTR: RETRIEVE_DB( twtr, MASK64(3, 0) ); break;
case DRAM_TRTW: RETRIEVE_DB( trtw, MASK64(3, 0) ); break;
case DRAM_TRTP: RETRIEVE_DB( trtp, MASK64(2, 0) ); break;
case DRAM_TRAS: RETRIEVE_DB( tras, MASK64(3, 0) ); break;
case DRAM_TRP: RETRIEVE_DB( trp, MASK64(3, 0) ); break;
case DRAM_TWR: RETRIEVE_DB( twr, MASK64(3, 0) ); break;
case DRAM_TRFC: RETRIEVE_DB( trfc, MASK64(6, 0) ); break;
case DRAM_TMRD: RETRIEVE_DB( tmrd, MASK64(1, 0) ); break;
case DRAM_FAWIN: RETRIEVE_DB( fawin, MASK64(4, 0) ); break;
case DRAM_TIWTR: RETRIEVE_DB( tiwtr, MASK64(1, 0) ); break;
case DRAM_DIMM_STACK: RETRIEVE_DB( dimm_stack, MASK64(0, 0) ); break;
case DRAM_EXT_WR_MODE2: RETRIEVE_DB( ext_wr_mode2, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE1: RETRIEVE_DB( ext_wr_mode1, MASK64(14, 0) ); break;
case DRAM_EXT_WR_MODE3: RETRIEVE_DB( ext_wr_mode3, MASK64(14, 0) ); break;
case DRAM_8_BANK_MODE: RETRIEVE_DB( eight_bank_mode, MASK64(0, 0) ); break;
case DRAM_BRANCH_DISABLED: RETRIEVE_DB( branch_disabled, MASK64(0, 0) ); break;
case DRAM_SEL_LO_ADDR_BITS: RETRIEVE_DB( sel_lo_addr_bits, MASK64(0, 0) ); break;
#ifdef VFALLS
case DRAM_SINGLE_CHNL_MODE: RETRIEVE_DB( single_chnl_mode, MASK64(1, 0) ); break;
case DRAM_MIRROR_MODE: RETRIEVE_DB( mirror_mode, MASK64(0, 0) ); break;
#else
case DRAM_SINGLE_CHNL_MODE: RETRIEVE_DB( single_chnl_mode, MASK64(0, 0) ); break;
#endif
case DRAM_DIMM_INIT: RETRIEVE_DB( dimm_init, MASK64(2, 0) ); break;
case DRAM_INIT_STATUS: RETRIEVE_DB( init_status, MASK64(0, 0) ); break;
case DRAM_DIMM_PRESENT: RETRIEVE_DB( dimm_present, MASK64(3, 0) ); break;
case DRAM_FAILOVER_STATUS: RETRIEVE_DB( failover_status, MASK64(0, 0) ); break;
case DRAM_FAILOVER_MASK: RETRIEVE_DB( failover_mask, MASK64(34, 0) ); break;
case DRAM_POWER_DOWN_MODE: RETRIEVE_DB( power_down_mode, MASK64(0, 0) ); break;
case FBD_CHNL_STATE:
/* retrieve state bits for the ambid that has been set */
/* extract ambid */
ambid = (dbp->fbd_chnl_state.val & (0xF << 3)) >> 3;
/* use it to index into state value */
val = (((dbp->fbd_chnl_state.val) & ~(0x7)) |
(dbp->fbd_chnl_state.ambstate[ambid]));
break;
case FBD_FAST_RESET_FLAG: RETRIEVE_DB( fbd_fast_reset_flag, MASK64(3, 0) ); break;
case FBD_CHNL_RESET: RETRIEVE_DB( fbd_chnl_reset, MASK64(1, 0) ); break;
case TS1_SB_NB_MAPPING: RETRIEVE_DB( ts1_sb_nb_mapping, MASK64(2, 0) ); break;
case TS1_TEST_PARAMETER: RETRIEVE_DB( ts1_test_parameter, MASK64(23, 0) ); break;
case TS3_FAILOVER_CONFIG: RETRIEVE_DB( ts3_failover_config, MASK64(15, 0) ); break;
case ELECTRICAL_IDLE_DETECTED: RETRIEVE_DB( electrical_idle_detected, MASK64(27,0) ); break;
case DISABLE_STATE_PERIOD: RETRIEVE_DB( disable_state_period, MASK64(5, 0) ); break;
case DISABLE_STATE_PERIOD_DONE: RETRIEVE_DB( disable_state_period_done, MASK64(0, 0) ); break;
case CALIBRATE_STATE_PERIOD: RETRIEVE_DB( calibrate_state_period, MASK64(19, 0) ); break;
case CALIBRATE_STATE_PERIOD_DONE: RETRIEVE_DB( calibrate_state_period_done, MASK64(0, 0) ); break;
case TRAINING_STATE_MIN_TIME: RETRIEVE_DB( training_state_min_time, MASK64(15, 0) ); break;
case TRAINING_STATE_DONE: RETRIEVE_DB( training_state_done, MASK64(1, 0) ); break;
case TRAINING_STATE_TIMEOUT: RETRIEVE_DB( training_state_timeout, MASK64(7, 0) ); break;
case TESTING_STATE_DONE: RETRIEVE_DB( testing_state_done, MASK64(1, 0) ); break;
case TESTING_STATE_TIMEOUT: RETRIEVE_DB( testing_state_timeout, MASK64(7, 0) ); break;
case POLLING_STATE_DONE: RETRIEVE_DB( polling_state_done, MASK64(1, 0) ); break;
case POLLING_STATE_TIMEOUT: RETRIEVE_DB( polling_state_timeout, MASK64(7, 0) ); break;
case CONFIG_STATE_DONE: RETRIEVE_DB( config_state_done, MASK64(1, 0) ); break;
case CONFIG_STATE_TIMEOUT: RETRIEVE_DB( config_state_timeout, MASK64(7, 0) ); break;
case DRAM_PER_RANK_CKE: RETRIEVE_DB( dram_per_rank_cke, MASK64(15, 0) ); break;
case L0S_DURATION: RETRIEVE_DB( l0s_duration, MASK64(6, 0) ); break;
case CHNL_SYNC_FRAME_FREQ: RETRIEVE_DB( chnl_sync_frame_freq, MASK64(5, 0) ); break;
case CHNL_READ_LAT: RETRIEVE_DB( chnl_read_lat, MASK64(15, 0) ); break;
case CHNL_CAPABILITY: RETRIEVE_DB( chnl_capability, MASK64(9, 0) ); break;
case LOOPBACK_MODE_CNTL: RETRIEVE_DB( loopback_mode_cntl, MASK64(1, 0) ); break;
case SERDES_CONFIG_BUS: RETRIEVE_DB( serdes_config_bus, MASK64(24, 0) ); break;
case SERDES_INVPAIR: RETRIEVE_DB( serdes_invpair, MASK64(47, 0) ); break;
case SERDES_TEST_CONFIG_BUS: RETRIEVE_DB( serdes_test_config_bus, MASK64(31, 0) ); break;
case CONFIG_REG_ACCESS_ADDR: RETRIEVE_DB( config_reg_access_addr, MASK64(15, 0) ); break;
case CONFIG_REG_ACCESS_DATA:
ambid = AMBID(dbp->config_reg_access_addr);
switch (AMBADDR(dbp->config_reg_access_addr)){
case FBD_VID_DID: val=(uint64_t)dbp->amb[ambid].vid_did; break;
case FBDS: val=dbp->amb[ambid].fbds; break;
case EMASK: val=dbp->amb[ambid].emask; break;
case FERR: val=dbp->amb[ambid].ferr; break;
case NERR: val=dbp->amb[ambid].nerr; break;
case PSBYTE3_0: val=dbp->amb[ambid].psbyte3_0; break;
case PSBYTE7_4: val=dbp->amb[ambid].psbyte7_4; break;
case PSBYTE11_8: val=dbp->amb[ambid].psbyte11_8; break;
case PSBYTE13_12: val=dbp->amb[ambid].psbyte13_12; break;
case C2DINCRCUR_CMD2DATANXT: val=dbp->amb[ambid].c2dincrcur_cmd2datanxt;
break;
case MBCSR: val=dbp->amb[ambid].mbcsr; break;
case DAREFTC: val=dbp->amb[ambid].dareftc; break;
case MTR_DSREFTC: val=dbp->amb[ambid].mtr_dsreftc; break;
case DRT: val=dbp->amb[ambid].drt; break;
case DRC: val=dbp->amb[ambid].drc; break;
case DCALCSR: val=dbp->amb[ambid].dcalcsr; break;
case DCALADDR: val=dbp->amb[ambid].dcaladdr; break;
case DDR2ODTC: val=dbp->amb[ambid].ddr2odtc; break;
default:
/* illegal reg - an error */
EXEC_WARNING( ("Unimplemented read amb address = 0x%x, ambid=0x%x",
AMBADDR(dbp->config_reg_access_addr), ambid) );
return false;
}
break;
/*
* Performance counter section 10.3 of N2 PRM 1.1
*/
case DRAM_PERF_CTL: RETRIEVE_DB( perf_ctl, MASK64(7, 0) ); break;
case DRAM_PERF_COUNT: RETRIEVE_DB( perf_count, MASK64(63, 0) ); break;
/*
* Error handling section 25.12 of N2 PRM 1.2
*/
case DRAM_ERROR_STATUS: RETRIEVE_DB( error_status, MASK64(63, 0) ); break;
case DRAM_ERROR_ADDRESS: RETRIEVE_DB( error_address, MASK64(39, 4) ); break;
case DRAM_ERROR_INJECT: RETRIEVE_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break;
case DRAM_ERROR_COUNTER: RETRIEVE_DB( error_counter, MASK64(15, 0) ); break;
case DRAM_ERROR_LOCATION: RETRIEVE_DB( error_location, MASK64(35, 0) ); break;
case DRAM_ERROR_RETRY: RETRIEVE_DB( error_retry, MASK64(63, 63)|MASK64(49,32)|MASK64(17,0) ); break;
case DRAM_FBD_ERROR_SYND: RETRIEVE_DB( fbd_error_synd, MASK64(63, 63)|MASK64(29,0) ); break;
case DRAM_FBD_INJ_ERROR_SRC: RETRIEVE_DB( fbd_inj_error_src, MASK64(1, 0) ); break;
case DRAM_FBR_COUNT: RETRIEVE_DB( fbr_count, MASK64(16, 0) ); break;
/*
* Power management section 26.3 of N2 PRM 0.9.1
*/
case DRAM_OPEN_BANK_MAX: RETRIEVE_DB( open_bank_max, MASK64(16, 0) ); break;
case DRAM_PROG_TIME_CNTR: RETRIEVE_DB( prog_time_cntr, MASK64(15, 0) ); break;
/*
* Hardware debug section 29.2.2 of N2 PRM 0.9.1
*/
case DRAM_DBG_TRG_EN: RETRIEVE_DB( dbg_trg_en, MASK64(2, 2) ); break;
case IBIST_NBFIB_CTL: RETRIEVE_DB( ibist_nbfib_ctl, MASK64(53, 0) ); break;
case IBIST_SBFIB_CTL: RETRIEVE_DB( ibist_sbfib_ctl, MASK64(55, 0) ); break;
default:
/* illegal reg - an error */
return false;
}
DBGMC(lprintf(sp->gid, "Memory controller bank %d : Read register 0x%lx '%s' value= 0x%llx on node %d\n",
bank, off, mcu_reg_name(reg), val, node_id); );
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = val;
break;
default:
ASSERT(0);
}
return true;
}
/*
* Create address mapping to access PCIE Cfg/IO, MEM32 and MEM64 space
*/
void niagara2_pcie_mapping(simcpu_t *sp, ncu_t *ncup, piu_region_t region)
{
uint64_t base, mask, size;
bool_t enable;
const char *name[3] = {"Cfg/IO", "Mem32", "Mem64"};
switch (region) {
case PIU_REGION_CFGIO:
base = ncup->regs.pcie_a_iocon_offset_base;
mask = ncup->regs.pcie_a_iocon_offset_mask;
break;
case PIU_REGION_MEM32:
base = ncup->regs.pcie_a_mem32_offset_base;
mask = ncup->regs.pcie_a_mem32_offset_mask;
break;
case PIU_REGION_MEM64:
base = ncup->regs.pcie_a_mem64_offset_base;
mask = ncup->regs.pcie_a_mem64_offset_mask;
break;
default:
ASSERT(0);
}
enable = GETMASK64(base,63,63);
base &= PIU_REGION_OFFSET_MASK;
mask &= PIU_REGION_OFFSET_MASK;
if (enable) {
size = ~(MASK64(63,36)|mask) + 1;
ncup->map[region].base = base;
ncup->map[region].mask = mask;
ncup->map[region].size = size;
ncup->map[region].enable = enable;
DBGDEV(lprintf(sp->gid, "PCIE %s is mapped at 0x%llx - 0x%llx of node %d\n",
name[region], base, base+size-1, ncup->node_id); );
}
}