Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / devices / mem_bus / libpiu / piu.c
/*
* ========== Copyright Header Begin ==========================================
*
* OpenSPARC T2 Processor File: piu.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
*
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ========== Copyright Header End ============================================
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "@(#)piu.c 1.15 07/09/19 SMI"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/dkio.h>
#include <sys/dklabel.h>
#include <sys/vtoc.h>
#include <strings.h>
#include "basics.h"
#include "allocate.h"
#include "lexer.h"
#include "simcore.h"
#include "config.h"
#include "dumpinfo.h"
#include "strutil.h"
#include "fatal.h"
#include "tsparcv9internal.h"
#include "sparcv9regs.h"
#include "device.h"
#include "pcie_device.h"
#include "piu.h"
static void piu_parse(config_dev_t *);
static void piu_init(config_dev_t *);
static void piu_dump(config_dev_t *);
void piu_init_csr(pcie_model_t *piup);
static bool_t piu_cpu_access(simcpu_t *, config_addr_t *,
tpaddr_t offset, maccess_t op, uint64_t * regp);
static void parse_pcie_device(char *devname, config_dev_t *config_devp);
extern dev_type_t *find_dev_type(char * devnamep);
LIST_DEF(dev_child_cache, dev_child_type_t);
#ifdef VFALLS /* { */
#define PIU_NAME "piu_vf"
#define PIU_TYPE dev_type_piu_vf
#else
#define PIU_NAME "piu"
#define PIU_TYPE dev_type_piu
#endif /* } VFALLS */
/*
* PIU interfaces exported to legion
*/
dev_type_t PIU_TYPE={
PIU_NAME,
piu_parse,
piu_init,
piu_dump,
generic_device_non_cacheable,
piu_cpu_access,
DEV_MAGIC
};
/*
* Complete the creation and parsing of the PCI Express Interface Unit (PIU).
*
* The 'piu' directive specifies the address mappings of two noncacheable
* regions as below:
*
* - an 8 MB region for PIO access to the PCIE CSRs within PIU
*
* - an 64 GB region that maps to sub-regions for
*
* * PCIE Cfg/IO
* * PCIE MEM32
* * PCIE MEM64
*
* It also supports a 'pcie_device' directive which is used to define each PCIE
* device connected to piu.
*
* For Niagara2, the format in the conf file is as follows:
*
* device "piu"
* 0x8800000000 + 8M,
* 0xC000000000 + 64G
* {
* pcie_device "dev_name" <pcie device properties>;
* }
*
*/
void piu_parse(config_dev_t *config_devp)
{
pcie_model_t *piup;
lexer_tok_t tok;
char *proc_type_namep;
DBGDEV( lprintf(-1, "piu_parse: parsing device %d\n", config_devp->device_id); );
piup = (void *)Xcalloc(1, pcie_model_t);
proc_type_namep = LIST_ENTRY(config_devp->domainp->procs, 0)->proc_typep->proc_type_namep;
piup->proc_type_namep = strdup(proc_type_namep);
config_devp->devp = (void *)piup;
/*
* For N2 case where the node id is zero, and is not parsed from the
* config file.
*/
if (!(piup->config_procp))
piup->config_procp = LIST_ENTRY(config_devp->domainp->procs, 0);
/*
* Continue parsing the 'piu' directive
*/
tok = lex_get_token();
switch(tok) {
case T_S_Colon:
return; /* nothing more to parse */
case T_L_Brace:
break; /* must be a pcie_device, so continue */
default:
lex_fatal("expecting either ; or Left Brace when parsing PIU");
}
/*
* We've found a Left Brace so lets continue the parsing - should find a
* pcie_device
*/
do {
tok = lex_get_token();
if (tok == T_R_Brace) break; /* We're done */
if (tok != T_Token)
lex_fatal("expected pcie_device directive when parsing piu");
#ifdef VFALLS /* { */
if (streq(lex.strp, "node_id")) {
int idx;
lex_get(T_Number);
NODE2IDX(lex.val, config_devp->domainp, idx);
piup->config_procp = LIST_ENTRY(config_devp->domainp->procs, idx);
DBGDEV(lprintf(-1, "PIU addresses %llx to %llx assigned to nodeid %d\n",
config_devp->addrp->baseaddr, config_devp->addrp->topaddr, lex.val););
lex_get(T_S_Colon);
} else
if (streq(lex.strp, "dmu_local")) {
tpaddr_t baseaddr, topaddr;
bool_t is_size;
static bool_t ins_once = false;
lex_get(T_Number);
baseaddr = lex.val;
is_size = false;
tok = lex_get_token();
if (tok == T_Plus) is_size = true; else lex_unget();
lex_get(T_Number);
topaddr = lex.val;
if (is_size)
topaddr += baseaddr;
if (topaddr <= baseaddr)
lex_fatal("top address <= base address with device %s",
config_devp->dev_typep->dev_type_namep);
if (!ins_once) {
/* only need to do this once since same local address for all nodes */
insert_domain_address(config_devp->domainp, config_devp, baseaddr, topaddr);
ins_once = true;
}
lex_get(T_S_Colon);
} else
#endif /* } VFALLS */
/* Check if we've found a pcie_device */
if (streq(lex.strp, "pcie_device")) {
char *pcie_dev_name;
/*
* Now parse the name of the PCIE device
*/
tok = lex_get_token();
switch (tok) {
case T_String:
pcie_dev_name = Xstrdup(lex.strp);
break;
default:
lex_fatal("Expected a pcie_device name directive");
}
/*
* We've found a PCIE device and we got it's name, so
* lets locate the library for that device and let it
* continue parsing.
*/
DBGDEV( lprintf(-1, "piu_parse: found a [%s] device\n", pcie_dev_name); );
parse_pcie_device(pcie_dev_name, config_devp);
lex_get(T_S_Colon);
continue;
}
} while (1);
}
static struct pcie_upbound piu_up = {
piu_dma_access,
piu_assert_intx,
piu_deassert_intx
};
/*
* We've found a pcie_device within the 'piu' directive, so we need to
* locate the library for that device. The PCIE device is implemented by
* the use of a new device type, 'pcie_dev_type_t', which is an extended
* format from the existing DEV_TYPE.
*/
static void
parse_pcie_device(char *devnamep, config_dev_t *config_devp)
{
pcie_dev_type_t *pcie_dev_typep = NULL;
pcie_dev_inst_t *pcie_dev_instp = NULL;
pcie_dwbound_t *pcie_dp = NULL;
dev_child_type_t *childp = NULL;
pcie_model_t *piup;
piup = (pcie_model_t *)config_devp->devp;
/*
* Load PCIE end point device model
*/
pcie_dev_typep = (pcie_dev_type_t*)find_dev_type(devnamep);
if (pcie_dev_typep == NULL) {
DBGDEV( lprintf(-1, "Error! Cannot find device for [%s]\n", devnamep); );
return;
}
pcie_dev_instp = xmalloc(sizeof(*pcie_dev_instp), __LINE__, __FILE__);
if (pcie_dev_instp == NULL) {
perror("couldn't allocate memory for pcie_dev_instp");
exit(-10);
}
pcie_dev_instp->pcie_modp = piup;
/*
* Export PIU internal functions through the pcie_access_t interface
* to support upbound transactions (from PCIE end device to PIU)
*/
pcie_dev_instp->pcie_up = &piu_up;
/*
* Init this PCIE device
*/
pcie_dev_typep->dev_init(pcie_dev_instp);
/*
* Call the device specific parse routine to continue parsing the conf file
*/
pcie_dev_typep->dev_parse(pcie_dev_instp->hdl);
/*
* Register this PCIE device with the piu model
*/
piu_register_pcie_device(piup, pcie_dev_instp);
/*
* Create a global list for dev_child_cache and each entry
* contains a pointer to the device_id of the PCIE end device
* connected to the PIU model.
*/
childp = (dev_child_type_t *)xmalloc(sizeof(dev_child_type_t), __LINE__, __FILE__);
if (childp == NULL) {
perror("couldn't allocation memory for dev_child_type_t");
exit(-10);
}
childp->child_devicep = (void *)pcie_dev_instp;
childp->parent_device_id = config_devp->device_id;
LIST_ADD_PTR(dev_child_cache, dev_child_type_t, childp);
}
/*
* Initialize the PIU model after parsing is complete
*/
void piu_init(config_dev_t *config_devp)
{
pcie_model_t *piup;
LIST_INIT(dev_child_cache, dev_child_type_t);
piup = (pcie_model_t *)config_devp->devp;
piup->config_devp = config_devp;
/*
* init PIU CSR with power on reset
*/
piu_init_csr(piup);
/*
* init error lookup table
*/
piu_init_error_list();
}
/*
* Piu configuration dump
*/
void piu_dump(config_dev_t * config_devp)
{
}
/*
* Initialize PIU CSR with power on reset value
*/
void piu_init_csr(pcie_model_t *piup)
{
piu_csr_t *csrs = &piup->csrs;
int i, nwords;
nwords = pcie_csrs[PIU_Event_Queue_State_Register].nwords;
for (i = 0; i < nwords; i++)
csrs->Event_Queue_State_Register[i] = EQ_IDLE;
}
/*
* Access PIU (downbound)
*/
bool_t piu_cpu_access(simcpu_t *sp, config_addr_t *cap, tpaddr_t offset, maccess_t memop, uint64_t *regp)
{
pcie_model_t *piup;
bool_t cfgio,status;
piu_region_t region;
uint32_t count;
uint64_t addr, pa = cap->baseaddr + offset;
int node_id = 0;
#ifdef VFALLS /* { */
domain_t *domainp;
int idx;
/*
* VF PIU ADDRESS MAP
* ------------------
*
* Single Node Config
* ------------------
*
* ACCESS DMU PCI-Express Space
* ---------------------------------------------------------------------
* Node 0 0x88.0000.0000 + 8M 0xC0.0000.0000 - 0xCF.FFFF.FFFF(64GB)
*
* Multinode config
* ---------------
*
* ACCESS DMU PCI-Express Space
* ---------------------------------------------------------------------
* Local 0x88.0000.0000 + 8M none
* Node 0 0xD2.0000.0000 + 8M 0xC0.0000.0000 - 0xC3.FFFF.FFFF(16GB)
* Node 1 0xD6.0000.0000 + 8M 0xC4.0000.0000 - 0xC7.FFFF.FFFF(16GB)
* Node 2 0xDA.0000.0000 + 8M 0xC8.0000.0000 - 0xCB.FFFF.FFFF(16GB)
* Node 3 0xDE.0000.0000 + 8M 0xCC.0000.0000 - 0xCF.FFFF.FFFF(16GB)
*/
domainp = sp->config_procp->domainp;
if (domainp->procs.count > 1) {
/*
* If local DMU CSR access, need to convert to Node X(this node) DMU CSR
* address.
* Use the simcpu to get the correct node_id and then get the correct cap
*/
if (cap->baseaddr == PHYS_ADDR_DMU) {
node_id = sp->config_procp->proc_id;
pa = PHYS_ADDR_DMU_REMOTE(node_id) + offset;
cap = find_domain_address(domainp, pa);
}
/*
* accessing 64GB region which may belong to a single node or divided
* into 16GB sections for each node
*/
else if ((PHYS_ADDR_PIU_LB <= cap->baseaddr) && \
(((cap->topaddr-1) & PHYS_ADDR_MASK) <= PHYS_ADDR_PIU_UB)) {
domainp = cap->config_devp->domainp;
/* each node gets 16GB - case where there are multiple nodes */
for (idx = 0; idx<domainp->procs.count; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (cap->baseaddr == PHYS_ADDR_PIU_REMOTE(node_id))
break;
}
/*
* to catch case where trying to access address space
* of a node that is not populated
*/
if (idx == domainp->procs.count)
fatal("PIU access to pa %llx failed. Parent "
"node for this address not present.\n",pa);
}
/*
* If remote DMU CSR access, use cap to get at the node_id.
*/
else {
domainp = cap->config_devp->domainp;
for (idx = 0; idx<domainp->procs.count ; idx++) {
node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
if (cap->baseaddr == PHYS_ADDR_DMU_REMOTE(node_id))
break;
}
/*
* to catch case where trying to access address space
* of a node that is not populated
*/
if (idx == domainp->procs.count)
fatal("PIU Access to pa %llx failed. Parent "
"node for this address not present.\n");
}
}
#endif /* } VFALLS */
piup = (pcie_model_t *) cap->config_devp->devp;
/*
* N2 PIU only supports 1,2,4 and 8-byte aligned PIO access the 64GB region
* and 8-byte to the CSRs in the 8MB region (section 16.3.2.1, N2 PRM Rev. 1.0)
*/
switch(memop & MA_Size_Mask) {
case MA_Size8 :
count = 1;
break;
case MA_Size16 :
count = 2;
break;
case MA_Size32 :
count = 4;
break;
case MA_Size64 :
count = 8;
break;
default:
ASSERT(0);
}
region = piu_decode_region(sp, piup, pa, &addr);
switch (region) {
case PIU_REGION_CFGIO:
cfgio = GETMASK64(addr, 28, 28);
if (count == 8) {
DBGDEV( lprintf(sp->gid, "ERROR: illegal 8 byte access to PCI "
"Cfg/IO addr = 0x%llx\n on node %d", addr, node_id); );
return false;
}
if (cfgio) {
uint64_t ioaddr = addr & PCIE_IO_ADDR_MASK;
status = piu_io_access(piup, memop, ioaddr, count, regp);
} else {
status = piu_cfg_access(piup, memop, addr, count, regp);
}
break;
case PIU_REGION_MEM32:
status = piu_mem_access(piup, memop, addr, count, regp, PCIE_MEM32);
break;
case PIU_REGION_MEM64:
status = piu_mem_access(piup, memop, addr, count, regp, PCIE_MEM64);
break;
case PIU_REGION_8MB:
status = piu_csr_access(sp, piup, memop, addr, regp);
break;
default:
lprintf(sp->gid, "ERROR: out of range access to PCIE space: "
"pc=0x%llx pa=0x%llx on node %d\n", sp->pc, pa, node_id);
ASSERT(0);
}
return status;
}
/*
* Decode PCIE non-cachable regions
*
* - 8MB region for CSRs
*
* - 64GB region partitioned into three subregions as
*
* * PCIE-A Cfg/IO (512 MB)
* * PCIE-A MEM32 (16 MB - 2 GB)
* * PCIE-A MEM64 (16 MB - 32 GB)
*/
piu_region_t piu_decode_region(simcpu_t *sp, pcie_model_t *piup, uint64_t pa, uint64_t *offset)
{
uint64_t reg_addr = pa & PHYS_ADDR_MASK;
int node_id=piup->config_procp->proc_id;
#ifdef VFALLS /* { */
if ((reg_addr == (PHYS_ADDR_DMU_REMOTE(node_id))) || (reg_addr == (PHYS_ADDR_DMU))) {
#else
if (reg_addr == (PHYS_ADDR_DMU)) {
#endif /* } VFALLS */
if ((pa & DMU_8MB_GAP_MASK) == 0) {
*offset = pa & DMU_8MB_OFFSET_MASK;
return PIU_REGION_8MB;
} else {
/* should be a fatal() */
lprintf(sp->gid, "ERROR: illegal access to PIU CSRs: "
"pc=0x%llx pa=0x%llx on node %d\n",
sp->pc, pa, node_id);
ASSERT(0);
}
}
if ((reg_addr>=PHYS_ADDR_PIU_LB) && (reg_addr<=PHYS_ADDR_PIU_UB)) {
if (piu_decode_cfgio(piup, pa, offset)) {
return PIU_REGION_CFGIO;
} else if (piu_decode_mem32(piup, pa, offset)) {
return PIU_REGION_MEM32;
} else if (piu_decode_mem64(piup, pa, offset)) {
return PIU_REGION_MEM64;
} else
return PIU_REGION_UNMAPPED;
}
}
/*
* Decode PCIE Cfg/IO region
*/
bool_t piu_decode_cfgio(pcie_model_t *piup, uint64_t pa, uint64_t *offset)
{
ncu_t *ncup;
map_info_t *map;
piup->config_procp->proc_typep->get_pseudo_dev(piup->config_procp, "ncu", (void *)&ncup);
map = &ncup->map[PIU_REGION_CFGIO];
if (map->enable && ((pa & map->mask) == map->base)) {
*offset = pa & PCIE_IOCON_ADDR_MASK;
return true;
}
return false;
}
/*
* Decode PCIE MEM32 region
*/
bool_t piu_decode_mem32(pcie_model_t *piup, uint64_t pa, uint64_t *offset)
{
ncu_t *ncup;
map_info_t *map;
piup->config_procp->proc_typep->get_pseudo_dev(piup->config_procp, "ncu", (void *)&ncup);
map = &ncup->map[PIU_REGION_MEM32];
if (map->enable && ((pa & map->mask) == map->base)) {
*offset = pa & (map->size - 1);
return true;
}
return false;
}
/*
* Decode PCIE MEM64 region
*/
bool_t piu_decode_mem64(pcie_model_t *piup, uint64_t pa, uint64_t *offset)
{
ncu_t *ncup;
map_info_t *map;
uint64_t pcie_offset;
piu_csr_t *csrs = &piup->csrs;
piup->config_procp->proc_typep->get_pseudo_dev(piup->config_procp, "ncu", (void *)&ncup);
map = &ncup->map[PIU_REGION_MEM64];
if (map->enable && ((pa & map->mask) == map->base)) {
uint64_t pcie_offset;
pcie_offset = csrs->Mem_64_PCIE_Offset_Register;
*offset = (pa & ~map->mask & PCIE_MEM64_ADDR_MASK) | pcie_offset;
return true;
}
return false;
}
/*
* Access PCIE CSRs (downbound)
*/
bool_t piu_csr_access(simcpu_t *sp, pcie_model_t *piup, maccess_t memop, uint64_t offset, uint64_t *regp)
{
uint64_t old_value, value, new_error, *csrs = (uint64_t *)&piup->csrs;
pcie_csr_t index;
int size, regx, wordx;
char regname[BUFSIZ];
int node_id=piup->config_procp->proc_id;
/*
* PIU only supports 8-byte accesses to registers
*/
size = memop & MA_Size_Mask;
if (size != MA_Size64) return false;
/*
* check illegal offset
*/
index = piu_offset2reg(offset, &regx);
if (index == UND_PCIE_CSRS) {
DBGDEV(lprintf(sp->gid, "Access illegal PCIE register at offset "
"= 0x%llx on node %d\n", offset, node_id); );
return false;
}
/*
* read/write PCIE registers
*/
wordx = regx - pcie_csrs[index].regx;
strcpy(regname, pcie_csrs[index].name);
if (pcie_csrs[index].nwords > 1)
sprintf(&regname[strlen(regname)], "[%d]", wordx);
switch (memop) {
case MA_st64:
value = *regp;
old_value = csrs[regx];
/*
* check on Read only registers
*/
switch (index) {
case PIU_Interrupt_State_Status_Register_1:
case PIU_Interrupt_State_Status_Register_2:
case PIU_INTX_Status_Register:
case PIU_Event_Queue_State_Register:
case PIU_IMU_Interrupt_Status_Register:
case PIU_DMC_Core_and_Block_Error_Status_Register:
case PIU_MMU_Interrupt_Status_Register:
case PIU_ILU_Interrupt_Status_Register:
case PIU_Packet_Scoreboard_DMA_Register_Set:
case PIU_Packet_Scoreboard_PIO_Register_Set:
case PIU_Transaction_Scoreboard_Register_Set:
case PIU_Transaction_Scoreboard_Status_Register:
case PIU_PEU_Egress_Credits_Consumed_Register:
case PIU_PEU_Egress_Credit_Limit_Register:
case PIU_PEU_Egress_Retry_Buffer_Register:
case PIU_PEU_Ingress_Credits_Allocated_Register:
case PIU_PEU_Ingress_Credits_Received_Register:
case PIU_PEU_Other_Event_Interrupt_Status_Register:
case PIU_PEU_Device_Capabilities_Register:
case PIU_PEU_Device_Status_Register:
case PIU_PEU_Link_Capabilities_Register:
case PIU_PEU_Link_Status_Register:
case PIU_PEU_Uncorrectable_Error_Interrupt_Status_Register:
case PIU_PEU_Correctable_Error_Interrupt_Status_Register:
case PIU_PEU_CXPL_SERDES_Revision_Register:
case PIU_PEU_CXPL_AckNak_Latency_Timer_Register:
case PIU_PEU_CXPL_Replay_Timer_Register:
case PIU_PEU_CXPL_Core_Status_Register:
case PIU_PEU_CXPL_Event_Error_Interrupt_Status_Register:
case PIU_PEU_Link_Bit_Error_Counter_II_Register:
case PIU_PEU_SERDES_Receiver_Lane_Status_Register:
case PIU_PEU_SERDES_Transmitter_Status_Register:
DBGDEV(lprintf(sp->gid, "Error: Write Read-Only Register "
"'%s' offset=0x%llx value=0x%llx on node %d\n",
pcie_csrs[index].name, offset, *regp, node_id); );
return false; /* FIXME: should trap on the error */
}
csrs[regx] = *regp;
DBGDEV(lprintf(sp->gid, "Write PIU register '%s' at offset = "
"0x%llx value = 0x%llx on node %d\n",
pcie_csrs[index].name, offset, *regp, node_id); );
/*
* act upon write to reg
*/
switch (index) {
case PIU_Interrupt_Clear_Registers:
piu_set_irq_state(piup, wordx+INO_INTA, (*regp & MASK64(1,0)));
break;
case PIU_INT_A_Clear_Register:
case PIU_INT_B_Clear_Register:
case PIU_INT_C_Clear_Register:
case PIU_INT_D_Clear_Register:
if (*regp & 1)
piu_set_intx_state(piup, index-PIU_INT_A_Clear_Register+INO_INTA, IRQ_IDLE);
break;
case PIU_Event_Queue_Control_Set_Register:
if (value & MASK64(57,57)) {
/*
* upon ENOVERR set, update the OVERR and STATE field
* of the EQ Tail and State register
*/
piup->csrs.Event_Queue_Tail_Register[wordx] |= MASK64(57,57);
piup->csrs.Event_Queue_State_Register[wordx] = EQ_ERROR;
}
if (value & MASK64(44,44))
/*
* upon EN bit set, update the STATE bit of
* the EQ State register
*/
piup->csrs.Event_Queue_State_Register[wordx] = EQ_ACTIVE;
break;
case PIU_Event_Queue_Control_Clear_Register:
if (value & MASK64(57,57))
/* COVERR */
piup->csrs.Event_Queue_Tail_Register[wordx] &= ~MASK64(57,57);
if (value & MASK64(47,47)) {
/* E2I */
if (piup->csrs.Event_Queue_State_Register[wordx] == EQ_ERROR)
piup->csrs.Event_Queue_State_Register[wordx] = EQ_IDLE;
}
if (value & MASK64(44,44))
/* DIS */
piup->csrs.Event_Queue_State_Register[wordx] = EQ_IDLE;
break;
case PIU_Event_Queue_Tail_Register:
case PIU_Event_Queue_Head_Register:
csrs[regx] = old_value;
WRITE_PIU_CSR(csrs[regx], value, MASK64(6,0));
break;
case PIU_MSI_Mapping_Register:
csrs[regx] = old_value;
WRITE_PIU_CSR(csrs[regx], value, MASK64(63,62)|MASK64(5,0));
break;
case PIU_MSI_Clear_Registers:
if (value & MASK64(62,62))
/* EQWR_N */
piup->csrs.MSI_Mapping_Register[wordx] &= ~MASK64(62,62);
csrs[regx] = 0;
break;
case PIU_MSI_32_bit_Address_Register:
csrs[regx] = old_value;
WRITE_PIU_CSR(csrs[regx], value, MASK64(31,16));
break;
case PIU_MSI_64_bit_Address_Register:
csrs[regx] = old_value;
WRITE_PIU_CSR(csrs[regx], value, MASK64(63,16));
break;
case PIU_IMU_Error_Status_Clear_Register:
/*
* W1C: clear IMU error
*/
piup->csrs.IMU_Error_Status_Set_Register &= ~value;
break;
case PIU_IMU_Error_Status_Set_Register:
/*
* W1S to simulate actual IMU error occurence
*/
new_error = value & ~old_value;
if (new_error) {
csrs[regx] = new_error | old_value;
piu_simulate_imu_error(piup, new_error);
}
break;
case PIU_MMU_Error_Status_Clear_Register:
/*
* W1C: clear MMU error
*/
piup->csrs.MMU_Error_Status_Set_Register &= ~value;
break;
case PIU_MMU_Error_Status_Set_Register:
/*
* W1S to simulate actual MMU error occurence
*/
new_error = value & ~old_value;
if (new_error) {
csrs[regx] = new_error | old_value;
piu_simulate_mmu_error(piup, new_error);
}
break;
default:
break;
}
break;
case MA_ldu64:
value = csrs[regx];
/*
* Check on Load-Only (write-only but reads always return 0) CSRs
*/
switch (index) {
case PIU_Event_Queue_Control_Set_Register:
case PIU_Event_Queue_Control_Clear_Register:
value = 0;
break;
}
switch (index) {
case PIU_Interrupt_Clear_Registers:
value = piu_get_irq_state(piup, wordx+INO_INTA);
break;
case PIU_INT_A_Clear_Register:
case PIU_INT_B_Clear_Register:
case PIU_INT_C_Clear_Register:
case PIU_INT_D_Clear_Register:
value = piu_get_intx_state(piup, index-PIU_INT_A_Clear_Register+INO_INTA);
break;
case PIU_MSI_Clear_Registers:
/*
* return EQWR_N bit
*/
value = piup->csrs.MSI_Clear_Registers[wordx] & MASK64(62, 62);
break;
case PIU_IMU_Error_Status_Clear_Register:
value = piup->csrs.IMU_Error_Status_Set_Register;
break;
case PIU_IMU_Interrupt_Status_Register:
value = piup->csrs.IMU_Error_Status_Set_Register &
piup->csrs.IMU_Interrupt_Enable_Register;
break;
case PIU_MMU_Error_Status_Clear_Register:
value = piup->csrs.MMU_Error_Status_Set_Register;
break;
case PIU_MMU_Interrupt_Status_Register:
value = piup->csrs.MMU_Error_Status_Set_Register &
piup->csrs.MMU_Interrupt_Enable_Register;
break;
}
DBGDEV(lprintf(sp->gid, "Read PCIE register '%s' at offset = "
"0x%llx value = 0x%llx on node %d\n",
pcie_csrs[index].name, offset, value, node_id); );
if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
*regp = value;
break;
default:
lprintf(sp->gid, "ERROR: PIU only supports 8 byte CSR access, "
"node is %d\n", node_id);
ASSERT(0);
}
return true;
}
/*
* Access PCIE device's IO space (downbound)
*/
bool_t piu_io_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp)
{
pcie_dev_inst_t *pcie_devp;
pcie_dwbound_t *pcie_dp;
bool_t status;
/*
* find matching PCIE end device
*/
pcie_devp = piu_find_pcie_dev(piup, (void *)&ioaddr, PCIE_IO);
if (pcie_devp == NULL) {
DBGDEV( lprintf(-1, "PCIE device with mapped pa = 0x%lx not found!\n", ioaddr); );
return false;
}
pcie_dp = pcie_devp->pcie_dp;
switch (memop & MA_Op_Mask) {
case MA_Ld:
case MA_LdSigned:
status = pcie_dp->mem_access(pcie_devp->hdl, ioaddr, regp, count, DA_Load, PCIE_IO);
break;
case MA_St:
status = pcie_dp->mem_access(pcie_devp->hdl, ioaddr, regp, count, DA_Store, PCIE_IO);
break;
default:
ASSERT(0);
}
return status;
}
/*
* Access PCIE device's Configuation space (downbound)
*
* The data is inverted between the big and little endian format
* because PCI cfg space is structured as little endian
*/
bool_t piu_cfg_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp)
{
pcie_dev_inst_t *pcie_devp;
pcie_dwbound_t *pcie_dp;
uint8_t bus_no, dev_no, fun_no, reg_no;
uint16_t req_id;
uint64_t data;
bool_t status;
/*
* determine request Id
*/
bus_no = (ioaddr & PCIE_BUS_NO_MASK) >> PCIE_BUS_NO_SHIFT;
dev_no = (ioaddr & PCIE_DEV_NO_MASK) >> PCIE_DEV_NO_SHIFT;
fun_no = (ioaddr & PCIE_FUN_NO_MASK) >> PCIE_FUN_NO_SHIFT;
reg_no = ioaddr & PCIE_REG_NO_MASK;
req_id = PCIE_REQ_ID(bus_no, dev_no, fun_no);
/*
* find matching PCIE end device
*/
pcie_devp = piu_find_pcie_dev(piup, (void *)&req_id, PCIE_CFG);
if (pcie_devp == NULL) {
DBGDEV( lprintf(-1, "PCIE device with bus_no=0x%x dev_no=0x%x fun_no=0x%x not found!\n",
PCIE_BUS_NO(req_id), PCIE_DEV_NO(req_id), PCIE_FUN_NO(req_id)); );
return false;
}
/*
* read or write access to PCIE Cfg space
*/
pcie_dp = pcie_devp->pcie_dp;
switch (memop & MA_Op_Mask) {
case MA_Ld:
case MA_LdSigned:
status = pcie_dp->mem_access(pcie_devp->hdl, reg_no, regp, count, DA_Load, PCIE_CFG);
break;
case MA_St:
status = pcie_dp->mem_access(pcie_devp->hdl, reg_no, regp, count, DA_Store, PCIE_CFG);
break;
default:
ASSERT(0);
}
return status;
}
/*
* Access PCIE device's MEM32/MEM64 space (downbound)
*/
bool_t piu_mem_access(pcie_model_t *piup, maccess_t memop, uint64_t paddr, uint32_t count, uint64_t *regp,
pcie_space_t space_id)
{
pcie_dev_inst_t *pcie_devp;
pcie_dwbound_t *pcie_dp;
bool_t status;
/*
* find matching PCIE end device
*/
pcie_devp = piu_find_pcie_dev(piup, (void *)&paddr, space_id);
if (pcie_devp == NULL) {
DBGDEV( lprintf(-1, "PCIE device with mapped pa = 0x%lx not found!\n", paddr); );
return false;
}
pcie_dp = pcie_devp->pcie_dp;
switch (memop & MA_Op_Mask) {
case MA_Ld:
case MA_LdSigned:
status = pcie_dp->mem_access(pcie_devp->hdl, paddr, regp, count, DA_Load, space_id);
break;
case MA_St:
status = pcie_dp->mem_access(pcie_devp->hdl, paddr, regp, count, DA_Store, space_id);
break;
default:
ASSERT(0);
}
return status;
}
/*
* DMA transactions (upbound)
*
* Arguments:
* piup: handle to pcie_model structure
* va: host virtual address accessed by the DMA transaction
* datap: data read from/written to the host memory
* count: data size counted in byte
* req_id: 16 bit requester Id
* type: access type (load vs. store)
* mode: addressing mode, can be either 32 bit or 64 bit address
*/
bool_t piu_dma_access(pcie_model_t *piup, tvaddr_t va, uint8_t *datap, int count,
uint16_t req_id, dev_access_t type, dev_mode_t mode)
{
uint64_t msi_addr;
bool_t is_msi = false, status;
tpaddr_t pa;
/*
* decode MSI access
*/
if (mode == PCIE_IS32) {
msi_addr = piup->csrs.MSI_32_bit_Address_Register & MASK64(31, 16);
is_msi = ((va & MASK64(31,16)) == msi_addr);
}
if (mode == PCIE_IS64) {
int sun4v, base, bit=63;
/*
* In sun4v mode, if EQ_base_addr_reg.bit[63]=0, msi_addr_reg.bit[63]
* is not used for comparison (see 16.3.9.8 of N2 PRM, rev 1.2)
*/
sun4v = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 2, 2);
base = GETMASK64(piup->csrs.Event_Queue_Base_Address_Register, 63,63);
if (sun4v)
bit = (base == 0) ? 62 : 63;
msi_addr = piup->csrs.MSI_64_bit_Address_Register & MASK64(bit, 16);
is_msi = ((va & MASK64(bit,16)) == msi_addr);
}
if (is_msi && type == DA_Store) {
status = piu_msi_write(piup, va, datap, count, req_id, mode);
return (status);
}
/*
* perform IOMMU operation
*/
status = piu_iommu(piup, va, req_id, type, mode, &pa);
/*
* VA -> PA translation is successful, do DMA transaction with pa
*/
if (status) {
config_proc_t *procp = piup->config_procp;
status = procp->proc_typep->dev_mem_access(procp, pa, datap, count, type);
}
return (status);
}
/*
* This function performs IOMMU operation upon each DMA request.
*/
bool_t piu_iommu(pcie_model_t *piup, tvaddr_t va, uint16_t req_id, dev_access_t type,
dev_mode_t mode, tpaddr_t *pa)
{
int te, be, sun4v;
bool_t status = false;
te = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 0, 0);
be = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 1, 1);
sun4v = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 2, 2);
/*
* check if it's MMU bypass opeartion (allowed in both SUN4V and SUN4U mode)
*/
if (be && (mode == PCIE_IS64)) {
if ((va & MASK64(63, 39)) == MASK64(63, 50)) {
*pa = va & MASK64(38, 0);
DBGDEV(lprintf(-1, "piu_iommu: bypass va = 0x%llx pa = 0x%llx\n", va, *pa); );
return true;
} else {
if (!sun4v) {
/*
* FIXME: should raise the MMU BYP_OOR error in sun4u mode
*/
DBGDEV(lprintf(-1, "piu_iommu: IOMMU BYP_OOR error va = 0x%llx\n", va); );
ASSERT(0);
}
}
}
if (!be && (mode == PCIE_IS64)) {
if (!sun4v) {
/*
* FIXME: should raise the MMU BYP_ERR error in sun4u mode
*/
DBGDEV(lprintf(-1, "piu_iommu: IOMMU BYP_ERR error va = 0x%llx\n", va); );
ASSERT(0);
}
}
/*
* check whether MMU is disabled
*/
if (!te) {
/*
* FIXME: raise MMU TRN_ERR for both sun4v and sun4u mode
*/
DBGDEV(lprintf(-1, "piu_iommu: MMU is disabled, va = 0x%llx\n", va); );
ASSERT(0);
}
/*
* perform IOMMU operation under SUN4U or SUN4V mode
*/
if (sun4v) {
status = piu_iommu_sun4v(piup, va, req_id, type, mode, pa);
} else {
status = piu_iommu_sun4u(piup, va, req_id, type, mode, pa);
}
return (status);
}
/*
* Translate VA -> PA, SUN4U mode
*/
bool_t piu_iommu_sun4u(pcie_model_t *piup, tvaddr_t va, uint16_t req_id, dev_access_t type,
dev_mode_t mode, tpaddr_t *pa)
{
config_proc_t *procp = piup->config_procp;
int ts, ps, pg_bits, tsb_sz, tte_idx;
uint64_t tb, vpn, tte_addr, tte, pg_mask;
bool_t status;
ts = GETMASK64(piup->csrs.MMU_TSB_Control_Register, 3, 0);
ps = GETMASK64(piup->csrs.MMU_TSB_Control_Register, 8, 8);
tb = piup->csrs.MMU_TSB_Control_Register & MASK64(38, 13);
/*
* determine the tte index in terms of
*
* - page size: 8K (ps=0) and 64K (ps=1)
* - number of TSB entries (=1K*2^ts = 2^(10+ts))
*/
pg_bits = 13 + 3*ps;
vpn = (va & MASK64(31, pg_bits)) >> pg_bits;
tsb_sz = 1 << (ts+10);
tte_idx = (vpn & (tsb_sz-1)) << 3; /* each entry of 8 byte */
tte_addr = tb + tte_idx;
/*
* retrieve the tte entry
*/
status = procp->proc_typep->dev_mem_access(procp, tte_addr, (uint8_t *)&tte, 8, DA_Load);
if (!status) {
DBGDEV(lprintf(-1, "piu_iommu_sun4u: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
tte_addr, va); );
ASSERT(0);
}
/*
* finally do VA -> PA
*/
piu_iommu_va2pa(tte, ps, va, req_id, type, mode, pa);
DBGDEV(lprintf(-1, "piu_iommu_sun4u: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
va, *pa, tte_addr, tte); );
return true;
}
/*
* Translate VA -> PA, SUN4V mode
*/
bool_t piu_iommu_sun4v(pcie_model_t *piup, tvaddr_t va, uint16_t req_id, dev_access_t type,
dev_mode_t mode, tpaddr_t *pa)
{
config_proc_t *procp = piup->config_procp;
piu_csr_t *csrs = &piup->csrs;
int i, busid_sel, busid, ps, ts, pg_bits, tsb_sz, tte_idx;
uint8_t idx, iotsb_idx, iotsb_no;
uint64_t dev2iotsb, offset, base_pa, vpn, tte_addr, tte;
bool_t status;
/*
* Form 7 bit index id into the DEV2IOTSB table, which is implemented
* by a set of 16 x 64-bit registers, with each register containing
* 8 x 5-bit values to index into the IOTSBDESC table.
*/
busid = PCIE_BUS_NO(req_id);
busid_sel = GETMASK64(csrs->MMU_Control_and_Status_Register, 3, 3);
idx = (va >> 63) << 6;
idx |= busid_sel ? GETMASK64(busid, 5, 0) : GETMASK64(busid, 6, 1);
/*
* Use the 7 bit index id to extract the 5-bit iotsb_no from the
* DEV2IOTSB table (total of 128 index cells out of 16 regs).
*/
dev2iotsb = csrs->MMU_DEV2IOTSB_Registers[idx>>3];
iotsb_idx = GETMASK64(idx, 2, 0) << 3;
iotsb_no = GETMASK64(dev2iotsb, iotsb_idx + 4, iotsb_idx);
/*
* Use iotsb_no as index to retrieve IOTSB info from IOTSBDESC table
* (implemented by a set of 32 x 64-bit registers)
*/
base_pa = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 59, 34);
offset = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 33, 7);
ps = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 6, 4);
ts = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 3, 0);
/*
* validate VA
*/
if ((va & MASK64(62, 40)) != 0) {
uint64_t error_code, trans_type;
/*
* log the error
*/
csrs->MMU_Translation_Fault_Address_Register = va & MASK64(63, 2);
if (mode == PCIE_IS32)
trans_type = (type == DA_Load) ? TLP_MRd_FMT_TYPE_IS32 : TLP_MWr_FMT_TYPE_IS32;
else
trans_type = (type == DA_Load) ? TLP_MRd_FMT_TYPE_IS64 : TLP_MWr_FMT_TYPE_IS64;
csrs->MMU_Translation_Fault_Status_Register = req_id | (trans_type << 16);
/*
* raise mmu sun4v_va_oor error
*/
error_code = 1ULL<<SUN4V_VA_OOR_P;
csrs->MMU_Error_Status_Set_Register |= error_code;
piu_raise_mmu_error(piup, error_code);
return true;
}
/*
* determine adjusted page number using encoded ps value
* and adjusted VA at a given offset
*
* FIXME: check underflow error on vpn (error = sun4v_va_adj_uf)
*/
pg_bits = 13 + 3*ps;
vpn = ((va & MASK64(39, pg_bits)) >> pg_bits) - offset;
/*
* calculate tte index in terms of TSB size
*
* FIXME: check out of range error on vpn (error = TRN_OOR)
*/
tsb_sz = 1 << (ts+10);
tte_idx = (vpn & (tsb_sz-1)) << 3;
tte_addr = (base_pa << 13) + tte_idx;
/*
* retrieve the tte entry
*/
status = procp->proc_typep->dev_mem_access(procp, tte_addr, (uint8_t *)&tte, 8, DA_Load);
if (!status) {
DBGDEV(lprintf(-1, "piu_iommu_sun4v: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
tte_addr, va); );
ASSERT(0);
}
/*
* finally do VA -> PA
*/
piu_iommu_va2pa(tte, ps, va, req_id, type, mode, pa);
DBGDEV(lprintf(-1, "piu_iommu_sun4v: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
va, *pa, tte_addr, tte); );
return true;
}
bool_t piu_iommu_va2pa(uint64_t tte, int ps, tvaddr_t va, uint16_t req_id, dev_access_t type,
dev_mode_t mode, tpaddr_t *pa)
{
bool_t tte_key_valid, tte_data_w, tte_data_v;
uint16_t tte_dev_key;
int pg_bits;
uint64_t pg_mask, tte_data_pa;
/*
* validate tte
*/
tte_data_v = GETMASK64(tte, 0, 0);
tte_key_valid = GETMASK64(tte, 2, 2);
tte_dev_key = GETMASK64(tte, 63, 48);
/*
* assert on invalid tte entry
*/
ASSERT(tte_data_v);
#ifdef VFALLS /* { */
/*
* check the RO (Relaxed Ordering) bit
*/
if (GETMASK64(tte, 6, 6))
DBGDEV(lprintf(-1, "Relaxed Ordering is not supported"); );
#endif /* } VFALLS */
/*
* compare tte's DEV_KEY field with req_id
*/
if (tte_key_valid) {
/*
* According to N2 PIU PRM, the function number portion of
* the tte_dev_key and the source req_id should be masked
* with the FNM field of the tte.
*/
uint16_t tte_fnm = MASK64(15,3) | GETMASK64(tte, 5, 3);
if ((tte_dev_key & tte_fnm) != (req_id & tte_fnm)) {
DBGDEV(lprintf(-1, "piu_iommu_va2pa: req_id=0x%lx not matching tte dev_key=0x%lx\n",
req_id, tte_dev_key); );
ASSERT(0);
}
}
/*
* check on DATA_W for the write request
*/
tte_data_w = GETMASK64(tte, 1, 1);
if ((tte_data_w == 0) && type == DA_Store) {
DBGDEV(lprintf(-1, "piu_iommu_sun4u: write to non-writable page: va = 0x%lx tte = 0x%lx\n",
va, tte); );
ASSERT(0);
}
/*
* finally translate VA to PA
*/
pg_bits = 13 + 3*ps;
pg_mask = (1 << pg_bits) - 1;
tte_data_pa = tte & MASK64(38, pg_bits);
*pa = tte_data_pa | (va & pg_mask);
return true;
}
/*
* Assert INTx interrupt
*/
bool_t piu_assert_intx(pcie_model_t *piup, uint8_t pin_no, uint8_t dev_no)
{
uint8_t ino;
/*
* FIXME: check if PIU supports more than 4 devices
*/
ASSERT(pin_no < 4);
/*
* generate mondo interrupt
*/
ino = pin_no + INO_INTA;
piu_mondo_interrupt(piup, ino, IRQ_RECEIVED);
return true;
}
/*
* Deassert INTx interrupt
*/
bool_t piu_deassert_intx(pcie_model_t *piup, uint8_t pin_no, uint8_t dev_no)
{
uint8_t ino;
/*
* FIXME: check if PIU supports more than 4 devices
*/
ASSERT(pin_no < 4);
ino = pin_no + INO_INTA;
piu_mondo_interrupt(piup, ino, IRQ_IDLE);
return true;
}
/*
* Generate IRQ mondo interrupt and update the interrupt state accordingly
*/
void piu_mondo_interrupt(pcie_model_t *piup, uint8_t ino, irq_state_t new)
{
bool_t V;
int regx;
irq_state_t old;
DBGDEV( lprintf(-1, "piu_mondo_interrupt: ino = %d\n", ino); );
/*
* get the current IRQ mondo state
*/
regx = ino - INO_INTA;
old = piu_get_irq_state(piup, ino);
V = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 31, 31);
if ((old == IRQ_IDLE) && (new == IRQ_RECEIVED) && V) {
bool_t mdo_mode;
pcie_mondo_t *irq_mondo = (pcie_mondo_t *)Xcalloc(1, pcie_mondo_t);
irq_mondo->thread_id = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 30, 25);
mdo_mode = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 63, 63);
if (mdo_mode == 0) {
irq_mondo->data[0] = (irq_mondo->thread_id << 6) | ino;
irq_mondo->data[1] = 0;
} else {
uint64_t data0 = piup->csrs.Interrupt_Mondo_Data_0_Register;
irq_mondo->data[0] = (data0 & MASK64(63, 12)) | (irq_mondo->thread_id << 6) | ino;
irq_mondo->data[1] = piup->csrs.Interrupt_Mondo_Data_1_Register;
}
/*
* send IRQ mondo to target CPU
*/
piup->config_procp->proc_typep->ext_signal(piup->config_procp, ES_PCIE, (void *)irq_mondo);
new = IRQ_PENDING;
}
/*
* update interrupt state
*/
piu_set_irq_state(piup, ino, new);
}
/*
* Update INTx state
*/
void piu_set_intx_state(pcie_model_t *piup, uint8_t ino, irq_state_t new)
{
int bit, val;
switch (ino) {
case INO_INTA:
case INO_INTB:
case INO_INTC:
case INO_INTD:
bit = 3 - (ino - INO_INTA);
val = 1 << bit;
if (new == IRQ_IDLE)
piup->csrs.INTX_Status_Register &= ~val;
else
piup->csrs.INTX_Status_Register |= val;
}
}
/*
* Get INTx state
*/
int piu_get_intx_state(pcie_model_t *piup, uint8_t ino)
{
int val = 0, bit;
switch (ino) {
case INO_INTA:
case INO_INTB:
case INO_INTC:
case INO_INTD:
bit = 3 - (ino - INO_INTA);
val = (piup->csrs.INTX_Status_Register >> bit) & 1;
}
return val;
}
/*
* Update Interrupt State Status Register with the new mondo state 'new'
*/
void piu_set_irq_state(pcie_model_t *piup, uint8_t ino, irq_state_t new)
{
int bit;
uint64_t *regp;
/*
* determine which status register to use in terms of ino
*/
regp = ino<32 ? &piup->csrs.Interrupt_State_Status_Register_1 : &piup->csrs.Interrupt_State_Status_Register_2;
/*
* each mondo state is encoded via a 2 bit value:
*
* 00: IRQ_IDLE
* 01: IRQ_RECEIVED
* 10: IRQ_RESERVED
* 11: IRQ_PENDING
*/
bit = 2 * (ino&31);
/*
* update the approriate bits of the register
*/
*regp &= ~(IRQ_STATE_MASK << bit);
*regp |= ((uint64_t)new << bit);
/*
* update the INTx status register if ino = 20-23
*/
piu_set_intx_state(piup, ino, new);
}
/*
* Get mondo interrupt state
*/
int piu_get_irq_state(pcie_model_t *piup, uint8_t ino)
{
int bit;
uint64_t val;
irq_state_t state;
bit = 2* (ino&31);
val = ino<32 ? piup->csrs.Interrupt_State_Status_Register_1: piup->csrs.Interrupt_State_Status_Register_2;
state = (val >> bit) & IRQ_STATE_MASK;
return state;
}
/*
* This function handles memory write request for both MSI and MSI-X.
*
* Arguments:
* piup: handle to pcie_model structure
* msi_addr: host address for MSI/MSI-X write, can be either 32 bit or 64 bit
* msi_datap: pointer to the MSI/MSI-X data of size 2 or 4 byte
* count: can be either 2 byte or 4 byte to indicate MSI or MSI-X request
* req_id: 16 bit requester Id
* mode: addressing mode, can be either 32 bit or 64 bit address
*
*/
bool_t piu_msi_write(pcie_model_t *piup, uint64_t msi_addr, uint8_t *msi_datap,
int count, uint16_t req_id, dev_mode_t mode)
{
uint64_t mapping;
int map_idx, v, eqwr_n, eqnum;
uint32_t msi_vector=0;
/*
* validate count to determin MSI vs. MSI-X
*/
if ((count != 2) && (count != 4)) {
DBGDEV( lprintf(-1, "piu_msi_write: invalid msi_data size = %d \n", count); );
return false;
}
/*
* PIU implements a total of 256 MSI_Mapping_Registers as the mapping
* table to allow SW to map each MSI/MSI-X request to an event queue.
* The lower 8 bits of the MSI/MSI-X data field is used to index into
* the mapping table.
*/
memcpy((void *)&msi_vector, (void *)msi_datap, count);
map_idx = (count == 2) ? *(uint16_t *)&msi_vector : msi_vector;
DBGDEV( lprintf(-1, "piu_msi_write: addr = 0x%lx data = %d\n", msi_addr, map_idx); );
mapping = piup->csrs.MSI_Mapping_Register[map_idx];
v = GETMASK64(mapping, 63, 63);
eqwr_n = GETMASK64(mapping, 62, 62);
eqnum = GETMASK64(mapping, 5, 0);
if (v && !eqwr_n) {
/*
* assemble the event queue record
*/
eq_record_t *record = (eq_record_t *)Xcalloc(1, eq_record_t);
record->fmt_type = mode ? TLP_MSI_FMT_TYPE_IS64 : TLP_MSI_FMT_TYPE_IS32;
/*
* always one DW
*/
record->length = 1;
record->addr_15_2 = GETMASK64(msi_addr, 15, 2);
record->rid = req_id;
record->addr_hi = GETMASK64(msi_addr, 63, 16);
/*
* Remove byte swapping here, as per MSI-X RFE P1584,
* Fire 2.0 no longer loads MSI/MSI-X data into event
* queue record in the Little-Endian format.
*
* Note: the current PRM for both Fire rev 2.1, and
* N2 PIU rev 1.3 is not up-to-date with this
* change yet.
*/
if (count == 2)
record->data0 = *(uint16_t *)&msi_vector;
if (count == 4) {
record->data0 = msi_vector & MASK64(15,0);
record->data1 = (msi_vector & MASK64(31,16)) >> 16;
}
piu_eq_write(piup, eqnum, record, req_id);
}
return true;
}
/*
* Write event queue records to the queue and update the tail pointer of the queue.
*/
bool_t piu_eq_write(pcie_model_t *piup, int eqnum, eq_record_t *record, uint16_t req_id)
{
int overr, state = true;
bool_t status;
overr = GETMASK64(piup->csrs.Event_Queue_Tail_Register[eqnum], 57, 57);
state = GETMASK64(piup->csrs.Event_Queue_State_Register[eqnum], 2, 0);
DBGDEV( lprintf(-1, "piu_eq_write: eqnum = %d state = %d\n", eqnum, state); );
if ((state == EQ_ACTIVE) && !overr) {
int head = GETMASK64(piup->csrs.Event_Queue_Head_Register[eqnum], 6, 0);
int tail = GETMASK64(piup->csrs.Event_Queue_Tail_Register[eqnum], 6, 0);
int next = (tail + 1) % EQ_NUM_ENTRIES;
bool_t full = (next == head);
bool_t empty = (head == tail);
if (full) {
/*
* set the overflow bit and generate a DMU internal interrupt (ino 62)
*/
piup->csrs.Event_Queue_Tail_Register[eqnum] |= MASK64(57,57);
piup->csrs.Event_Queue_State_Register[eqnum] = EQ_ERROR;
piu_mondo_interrupt(piup, INO_DMU, IRQ_RECEIVED);
}
else {
/*
* determine the EQ record address to write the event queue record
*/
uint64_t base, offset, rec_va, pa;
dev_mode_t mode = PCIE_IS64;
int sun4v;
base = piup->csrs.Event_Queue_Base_Address_Register & MASK64(63,19);
offset = (eqnum * EQ_NUM_ENTRIES + tail) * EQ_RECORD_SIZE;
rec_va = base + offset;
DBGDEV( lprintf(-1, "piu_eq_write: EQ record va = 0x%llx\n", rec_va); );
/*
* setup the access mode of the EQ record address in terms of the
* IOMMU mode (see N2 PRM rev 1.2, section 16.3.4)
*/
sun4v = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 2, 2);
if ((!sun4v) && (rec_va & MASK64(63, 32) == 0))
mode = PCIE_IS32;
/*
* pass the EQ address to IOMMU for translation
*/
status = piu_iommu(piup, rec_va, req_id, DA_Store, mode, &pa);
/*
* write the record to the event queue
*/
if (status) {
config_proc_t *procp = piup->config_procp;
status = procp->proc_typep->dev_mem_access(procp, pa, (uint8_t *)record,
sizeof(*record), DA_Store);
/*
* update the tail pointer
*/
piup->csrs.Event_Queue_Tail_Register[eqnum] = next & MASK64(6,0);
/*
* if the queue is empty, generate a mondo interrupt depending on state
*/
if (empty)
piu_mondo_interrupt(piup, INO_EQLO+eqnum, IRQ_RECEIVED);
}
}
}
return (status);
}
void piu_init_error_list()
{
int i;
imu_error_entry_t imu_error_init_list[] = {
{ PIU_ERR ( MSI_NOT_EN_P, 0) },
{ PIU_ERR ( COR_MES_NOT_EN_P, 1) },
{ PIU_ERR ( NONFATAL_MES_NOT_EN_P, 2) },
{ PIU_ERR ( FATAL_MES_NOT_EN_P, 3) },
{ PIU_ERR ( PMPME_MES_NOT_EN_P, 4) },
{ PIU_ERR ( PMEACK_MES_NOT_EN_P, 5) },
{ PIU_ERR ( MSI_PAR_ERR_P, 6) },
{ PIU_ERR ( MSI_MAL_ERR_P, 7) },
{ PIU_ERR ( EQ_NOT_EN_P, 8) },
{ PIU_ERR ( EQ_OVER_P, 9) },
{ PIU_ERR ( MSI_NOT_EN_S, 32) },
{ PIU_ERR ( COR_MES_NOT_EN_S, 33) },
{ PIU_ERR ( NONFATAL_MES_NOT_EN_S, 34) },
{ PIU_ERR ( FATAL_MES_NOT_EN_S, 35) },
{ PIU_ERR ( PMPME_MES_NOT_EN_SEQ_OVER_S, 36) },
{ PIU_ERR ( PMEACK_MES_NOT_EN_S, 37) },
{ PIU_ERR ( MSI_PAR_ERR_S, 38) },
{ PIU_ERR ( MSI_MAL_ERR_S, 39) },
{ PIU_ERR ( EQ_NOT_EN_S, 40) },
{ PIU_ERR ( EQ_OVER_S, 41) },
{ -1, (char *)0 },
};
mmu_error_entry_t mmu_error_init_list[] = {
{ PIU_ERR ( BYP_ERR_P, 0) },
{ PIU_ERR ( BYP_OOR_P, 1) },
{ PIU_ERR ( SUN4V_INV_PG_SZ_P, 2) },
{ PIU_ERR ( SPARE1_P, 3) },
{ PIU_ERR ( TRN_ERR_P, 4) },
{ PIU_ERR ( TRN_OOR_P, 5) },
{ PIU_ERR ( TTE_INV_P, 6) },
{ PIU_ERR ( TTE_PRT_P, 7) },
{ PIU_ERR ( TTC_DPE_P, 8) },
{ PIU_ERR ( TTC_CAE_P, 9) },
{ PIU_ERR ( SPARE2_P, 10) },
{ PIU_ERR ( SPARE3_P, 11) },
{ PIU_ERR ( TBW_DME_P, 12) },
{ PIU_ERR ( TBW_UDE_P, 13) },
{ PIU_ERR ( TBW_ERR_P, 14) },
{ PIU_ERR ( TBW_DPE_P, 15) },
{ PIU_ERR ( IOTSBDESC_INV_P, 16) },
{ PIU_ERR ( IOTSBDESC_DPE_P, 17) },
{ PIU_ERR ( SUN4V_VA_OOR_P, 18) },
{ PIU_ERR ( SUN4V_VA_ADJ_UF_P, 19) },
{ PIU_ERR ( SUN4V_KEY_ERR_P, 20) },
{ PIU_ERR ( BYP_ERR_S, 32) },
{ PIU_ERR ( BYP_OOR_S, 33) },
{ PIU_ERR ( SUN4V_INV_PG_SZ_S, 34) },
{ PIU_ERR ( SPARE1_S, 35) },
{ PIU_ERR ( TRN_ERR_S, 36) },
{ PIU_ERR ( TRN_OOR_S, 37) },
{ PIU_ERR ( TTE_INV_S, 38) },
{ PIU_ERR ( TTE_PRT_S, 39) },
{ PIU_ERR ( TTC_DPE_S, 40) },
{ PIU_ERR ( TTC_CAE_S, 41) },
{ PIU_ERR ( SPARE2_S, 42) },
{ PIU_ERR ( SPARE3_S, 43) },
{ PIU_ERR ( TBW_DME_S, 44) },
{ PIU_ERR ( TBW_UDE_S, 45) },
{ PIU_ERR ( TBW_ERR_S, 46) },
{ PIU_ERR ( TBW_DPE_S, 47) },
{ PIU_ERR ( IOTSBDESC_INV_S, 48) },
{ PIU_ERR ( IOTSBDESC_DPE_S, 49) },
{ PIU_ERR ( SUN4V_VA_OOR_S, 50) },
{ PIU_ERR ( SUN4V_VA_ADJ_UF_S, 51) },
{ PIU_ERR ( SUN4V_KEY_ERR_S, 52) },
{ -1, (char *)0 },
};
for (i = 0; imu_error_init_list[i].error_type != -1; i ++)
imu_error_list[imu_error_init_list[i].error_type] = imu_error_init_list[i];
for (i = 0; mmu_error_init_list[i].error_type != -1; i ++)
mmu_error_list[mmu_error_init_list[i].error_type] = mmu_error_init_list[i];
}
/*
* imu error handler
*/
void piu_simulate_imu_error(pcie_model_t *piup, uint64_t imu_error)
{
uint64_t error_code, intr_enable, imu_ie;
int i;
/*
* loop over the error bits and raise the error only if
* the interrupt is enabled
*/
imu_ie = piup->csrs.IMU_Interrupt_Enable_Register;
for (i=0; i<IMU_ERROR_MAXNUM; i++) {
error_code = imu_error_list[i].error_code;
intr_enable = imu_error_list[i].intr_enable;
if ((imu_error & error_code) && (imu_ie & intr_enable))
piu_raise_imu_error(piup, error_code);
}
}
void piu_raise_imu_error(pcie_model_t *piup, uint64_t error_code)
{
piu_csr_t *csrs = &piup->csrs;
uint64_t dmc_cbie;
bool_t dmu, imu;
/*
* update the error status register
*/
csrs->IMU_Error_Status_Set_Register |= error_code;
/*
* generate INO_DMU mondo interrupt
*/
dmc_cbie = csrs->DMC_Core_and_Block_Interrupt_Enable_Register;
dmu = GETMASK64(dmc_cbie, 63, 63);
imu = GETMASK64(dmc_cbie, 0, 0);
if (dmu && imu) {
csrs->DMC_Core_and_Block_Error_Status_Register |= MASK64(0,0);
piu_mondo_interrupt(piup, INO_DMU, IRQ_RECEIVED);
}
}
/*
* mmu error handler
*/
void piu_simulate_mmu_error(pcie_model_t *piup, uint64_t mmu_error)
{
uint64_t error_code, intr_enable, mmu_ie;
int i;
/*
* loop over the error bits and raise the error only if
* the interrupt is enabled
*/
mmu_ie = piup->csrs.MMU_Interrupt_Enable_Register;
for (i=0; i<MMU_ERROR_MAXNUM; i++) {
error_code = mmu_error_list[i].error_code;
intr_enable = mmu_error_list[i].intr_enable;
if ((mmu_error & error_code) && (mmu_ie & intr_enable))
piu_raise_mmu_error(piup, error_code);
}
}
void piu_raise_mmu_error(pcie_model_t *piup, uint64_t error_code)
{
piu_csr_t *csrs = &piup->csrs;
uint64_t dmc_cbie;
bool_t dmu, mmu;
/*
* update the error status register
*/
csrs->MMU_Error_Status_Set_Register |= error_code;
/*
* generate INO_DMU mondo interrupt
*/
dmc_cbie = csrs->DMC_Core_and_Block_Interrupt_Enable_Register;
dmu = GETMASK64(dmc_cbie, 63, 63);
mmu = GETMASK64(dmc_cbie, 0, 0);
if (dmu && mmu) {
csrs->DMC_Core_and_Block_Error_Status_Register |= MASK64(1,1);
piu_mondo_interrupt(piup, INO_DMU, IRQ_RECEIVED);
}
}
/*
* Look up the PIU register type from its offset value
*/
pcie_csr_t piu_offset2reg(uint64_t offset, int *regx)
{
int i;
/*
* first to check the few interrupt registers
* (PIU supports less number of these regs than the Fire for Niagara 1)
*/
if ((offset > 0x6011D8) && (offset < 0x6011F0))
return UND_PCIE_CSRS;
if ((offset > 0x6015D8) && (offset < 0x6015F0))
return UND_PCIE_CSRS;
for (i = 0; i < NUM_PCIE_CSRS; i++) {
int nwords, diff = offset - pcie_csrs[i].offset;
if (diff == 0) {
*regx = pcie_csrs[i].regx;
return i;
}
if (diff < 0)
return UND_PCIE_CSRS;
if ((nwords = pcie_csrs[i].nwords) != 1) {
int wordx = diff/8;
if (wordx < nwords) {
*regx = pcie_csrs[i].regx + wordx;
return i;
}
}
}
return UND_PCIE_CSRS;
}
/*
* Add the new PCIE device to the linked list
*/
void piu_register_pcie_device(pcie_model_t *piup, pcie_dev_inst_t *new)
{
pcie_dev_inst_t *temp;
if (piup->pcie_devices_list_head == NULL) {
piup->pcie_devices_list_head = new;
return;
}
temp = piup->pcie_devices_list_head;
while (temp->next != NULL)
temp = temp->next;
temp->next = new;
}
/*
* Get the PCIE end device with the given request ID
*/
pcie_dev_inst_t *
piu_find_pcie_dev(pcie_model_t *piup, void *dptr, pcie_space_t space_id)
{
pcie_dev_inst_t *pcie_dev;
int found = 0;
config_dev_t *config_devp;
pcie_dev = piup->pcie_devices_list_head;
if (space_id == PCIE_CFG) {
uint16_t req_id = *(uint16_t *)dptr;
while (pcie_dev != NULL) {
if (pcie_dev->req_id == req_id) {
found = 1;
break;
}
pcie_dev = pcie_dev->next;
}
} else {
while (pcie_dev != NULL) {
uint64_t paddr = *(uint64_t *)dptr;
if (pcie_dev->pcie_dp->bar_test(pcie_dev->hdl, paddr, space_id)) {
found = 1;
break;
}
pcie_dev = pcie_dev->next;
}
}
if (!found)
return(NULL);
return(pcie_dev);
}