* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: piu.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)piu.c 1.15 07/09/19 SMI"
#include "tsparcv9internal.h"
static void piu_parse(config_dev_t
*);
static void piu_init(config_dev_t
*);
static void piu_dump(config_dev_t
*);
void piu_init_csr(pcie_model_t
*piup
);
static bool_t
piu_cpu_access(simcpu_t
*, config_addr_t
*,
tpaddr_t offset
, maccess_t op
, uint64_t * regp
);
static void parse_pcie_device(char *devname
, config_dev_t
*config_devp
);
extern dev_type_t
*find_dev_type(char * devnamep
);
LIST_DEF(dev_child_cache
, dev_child_type_t
);
#define PIU_NAME "piu_vf"
#define PIU_TYPE dev_type_piu_vf
#define PIU_TYPE dev_type_piu
* PIU interfaces exported to legion
generic_device_non_cacheable
,
* Complete the creation and parsing of the PCI Express Interface Unit (PIU).
* The 'piu' directive specifies the address mappings of two noncacheable
* - an 8 MB region for PIO access to the PCIE CSRs within PIU
* - an 64 GB region that maps to sub-regions for
* It also supports a 'pcie_device' directive which is used to define each PCIE
* device connected to piu.
* For Niagara2, the format in the conf file is as follows:
* pcie_device "dev_name" <pcie device properties>;
void piu_parse(config_dev_t
*config_devp
)
DBGDEV( lprintf(-1, "piu_parse: parsing device %d\n", config_devp
->device_id
); );
piup
= (void *)Xcalloc(1, pcie_model_t
);
proc_type_namep
= LIST_ENTRY(config_devp
->domainp
->procs
, 0)->proc_typep
->proc_type_namep
;
piup
->proc_type_namep
= strdup(proc_type_namep
);
config_devp
->devp
= (void *)piup
;
* For N2 case where the node id is zero, and is not parsed from the
if (!(piup
->config_procp
))
piup
->config_procp
= LIST_ENTRY(config_devp
->domainp
->procs
, 0);
* Continue parsing the 'piu' directive
return; /* nothing more to parse */
break; /* must be a pcie_device, so continue */
lex_fatal("expecting either ; or Left Brace when parsing PIU");
* We've found a Left Brace so lets continue the parsing - should find a
if (tok
== T_R_Brace
) break; /* We're done */
lex_fatal("expected pcie_device directive when parsing piu");
if (streq(lex
.strp
, "node_id")) {
NODE2IDX(lex
.val
, config_devp
->domainp
, idx
);
piup
->config_procp
= LIST_ENTRY(config_devp
->domainp
->procs
, idx
);
DBGDEV(lprintf(-1, "PIU addresses %llx to %llx assigned to nodeid %d\n",
config_devp
->addrp
->baseaddr
, config_devp
->addrp
->topaddr
, lex
.val
););
if (streq(lex
.strp
, "dmu_local")) {
tpaddr_t baseaddr
, topaddr
;
static bool_t ins_once
= false;
if (tok
== T_Plus
) is_size
= true; else lex_unget();
lex_fatal("top address <= base address with device %s",
config_devp
->dev_typep
->dev_type_namep
);
/* only need to do this once since same local address for all nodes */
insert_domain_address(config_devp
->domainp
, config_devp
, baseaddr
, topaddr
);
/* Check if we've found a pcie_device */
if (streq(lex
.strp
, "pcie_device")) {
* Now parse the name of the PCIE device
pcie_dev_name
= Xstrdup(lex
.strp
);
lex_fatal("Expected a pcie_device name directive");
* We've found a PCIE device and we got it's name, so
* lets locate the library for that device and let it
DBGDEV( lprintf(-1, "piu_parse: found a [%s] device\n", pcie_dev_name
); );
parse_pcie_device(pcie_dev_name
, config_devp
);
static struct pcie_upbound piu_up
= {
* We've found a pcie_device within the 'piu' directive, so we need to
* locate the library for that device. The PCIE device is implemented by
* the use of a new device type, 'pcie_dev_type_t', which is an extended
* format from the existing DEV_TYPE.
parse_pcie_device(char *devnamep
, config_dev_t
*config_devp
)
pcie_dev_type_t
*pcie_dev_typep
= NULL
;
pcie_dev_inst_t
*pcie_dev_instp
= NULL
;
pcie_dwbound_t
*pcie_dp
= NULL
;
dev_child_type_t
*childp
= NULL
;
piup
= (pcie_model_t
*)config_devp
->devp
;
* Load PCIE end point device model
pcie_dev_typep
= (pcie_dev_type_t
*)find_dev_type(devnamep
);
if (pcie_dev_typep
== NULL
) {
DBGDEV( lprintf(-1, "Error! Cannot find device for [%s]\n", devnamep
); );
pcie_dev_instp
= xmalloc(sizeof(*pcie_dev_instp
), __LINE__
, __FILE__
);
if (pcie_dev_instp
== NULL
) {
perror("couldn't allocate memory for pcie_dev_instp");
pcie_dev_instp
->pcie_modp
= piup
;
* Export PIU internal functions through the pcie_access_t interface
* to support upbound transactions (from PCIE end device to PIU)
pcie_dev_instp
->pcie_up
= &piu_up
;
pcie_dev_typep
->dev_init(pcie_dev_instp
);
* Call the device specific parse routine to continue parsing the conf file
pcie_dev_typep
->dev_parse(pcie_dev_instp
->hdl
);
* Register this PCIE device with the piu model
piu_register_pcie_device(piup
, pcie_dev_instp
);
* Create a global list for dev_child_cache and each entry
* contains a pointer to the device_id of the PCIE end device
* connected to the PIU model.
childp
= (dev_child_type_t
*)xmalloc(sizeof(dev_child_type_t
), __LINE__
, __FILE__
);
perror("couldn't allocation memory for dev_child_type_t");
childp
->child_devicep
= (void *)pcie_dev_instp
;
childp
->parent_device_id
= config_devp
->device_id
;
LIST_ADD_PTR(dev_child_cache
, dev_child_type_t
, childp
);
* Initialize the PIU model after parsing is complete
void piu_init(config_dev_t
*config_devp
)
LIST_INIT(dev_child_cache
, dev_child_type_t
);
piup
= (pcie_model_t
*)config_devp
->devp
;
piup
->config_devp
= config_devp
;
* init PIU CSR with power on reset
* init error lookup table
void piu_dump(config_dev_t
* config_devp
)
* Initialize PIU CSR with power on reset value
void piu_init_csr(pcie_model_t
*piup
)
piu_csr_t
*csrs
= &piup
->csrs
;
nwords
= pcie_csrs
[PIU_Event_Queue_State_Register
].nwords
;
for (i
= 0; i
< nwords
; i
++)
csrs
->Event_Queue_State_Register
[i
] = EQ_IDLE
;
bool_t
piu_cpu_access(simcpu_t
*sp
, config_addr_t
*cap
, tpaddr_t offset
, maccess_t memop
, uint64_t *regp
)
uint64_t addr
, pa
= cap
->baseaddr
+ offset
;
* ACCESS DMU PCI-Express Space
* ---------------------------------------------------------------------
* Node 0 0x88.0000.0000 + 8M 0xC0.0000.0000 - 0xCF.FFFF.FFFF(64GB)
* ACCESS DMU PCI-Express Space
* ---------------------------------------------------------------------
* Local 0x88.0000.0000 + 8M none
* Node 0 0xD2.0000.0000 + 8M 0xC0.0000.0000 - 0xC3.FFFF.FFFF(16GB)
* Node 1 0xD6.0000.0000 + 8M 0xC4.0000.0000 - 0xC7.FFFF.FFFF(16GB)
* Node 2 0xDA.0000.0000 + 8M 0xC8.0000.0000 - 0xCB.FFFF.FFFF(16GB)
* Node 3 0xDE.0000.0000 + 8M 0xCC.0000.0000 - 0xCF.FFFF.FFFF(16GB)
domainp
= sp
->config_procp
->domainp
;
if (domainp
->procs
.count
> 1) {
* If local DMU CSR access, need to convert to Node X(this node) DMU CSR
* Use the simcpu to get the correct node_id and then get the correct cap
if (cap
->baseaddr
== PHYS_ADDR_DMU
) {
node_id
= sp
->config_procp
->proc_id
;
pa
= PHYS_ADDR_DMU_REMOTE(node_id
) + offset
;
cap
= find_domain_address(domainp
, pa
);
* accessing 64GB region which may belong to a single node or divided
* into 16GB sections for each node
else if ((PHYS_ADDR_PIU_LB
<= cap
->baseaddr
) && \
(((cap
->topaddr
-1) & PHYS_ADDR_MASK
) <= PHYS_ADDR_PIU_UB
)) {
domainp
= cap
->config_devp
->domainp
;
/* each node gets 16GB - case where there are multiple nodes */
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (cap
->baseaddr
== PHYS_ADDR_PIU_REMOTE(node_id
))
* to catch case where trying to access address space
* of a node that is not populated
if (idx
== domainp
->procs
.count
)
fatal("PIU access to pa %llx failed. Parent "
"node for this address not present.\n",pa
);
* If remote DMU CSR access, use cap to get at the node_id.
domainp
= cap
->config_devp
->domainp
;
for (idx
= 0; idx
<domainp
->procs
.count
; idx
++) {
node_id
= LIST_ENTRY(domainp
->procs
, idx
)->proc_id
;
if (cap
->baseaddr
== PHYS_ADDR_DMU_REMOTE(node_id
))
* to catch case where trying to access address space
* of a node that is not populated
if (idx
== domainp
->procs
.count
)
fatal("PIU Access to pa %llx failed. Parent "
"node for this address not present.\n");
piup
= (pcie_model_t
*) cap
->config_devp
->devp
;
* N2 PIU only supports 1,2,4 and 8-byte aligned PIO access the 64GB region
* and 8-byte to the CSRs in the 8MB region (section 16.3.2.1, N2 PRM Rev. 1.0)
switch(memop
& MA_Size_Mask
) {
region
= piu_decode_region(sp
, piup
, pa
, &addr
);
cfgio
= GETMASK64(addr
, 28, 28);
DBGDEV( lprintf(sp
->gid
, "ERROR: illegal 8 byte access to PCI "
"Cfg/IO addr = 0x%llx\n on node %d", addr
, node_id
); );
uint64_t ioaddr
= addr
& PCIE_IO_ADDR_MASK
;
status
= piu_io_access(piup
, memop
, ioaddr
, count
, regp
);
status
= piu_cfg_access(piup
, memop
, addr
, count
, regp
);
status
= piu_mem_access(piup
, memop
, addr
, count
, regp
, PCIE_MEM32
);
status
= piu_mem_access(piup
, memop
, addr
, count
, regp
, PCIE_MEM64
);
status
= piu_csr_access(sp
, piup
, memop
, addr
, regp
);
lprintf(sp
->gid
, "ERROR: out of range access to PCIE space: "
"pc=0x%llx pa=0x%llx on node %d\n", sp
->pc
, pa
, node_id
);
* Decode PCIE non-cachable regions
* - 64GB region partitioned into three subregions as
* * PCIE-A Cfg/IO (512 MB)
* * PCIE-A MEM32 (16 MB - 2 GB)
* * PCIE-A MEM64 (16 MB - 32 GB)
piu_region_t
piu_decode_region(simcpu_t
*sp
, pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
uint64_t reg_addr
= pa
& PHYS_ADDR_MASK
;
int node_id
=piup
->config_procp
->proc_id
;
if ((reg_addr
== (PHYS_ADDR_DMU_REMOTE(node_id
))) || (reg_addr
== (PHYS_ADDR_DMU
))) {
if (reg_addr
== (PHYS_ADDR_DMU
)) {
if ((pa
& DMU_8MB_GAP_MASK
) == 0) {
*offset
= pa
& DMU_8MB_OFFSET_MASK
;
/* should be a fatal() */
lprintf(sp
->gid
, "ERROR: illegal access to PIU CSRs: "
"pc=0x%llx pa=0x%llx on node %d\n",
if ((reg_addr
>=PHYS_ADDR_PIU_LB
) && (reg_addr
<=PHYS_ADDR_PIU_UB
)) {
if (piu_decode_cfgio(piup
, pa
, offset
)) {
} else if (piu_decode_mem32(piup
, pa
, offset
)) {
} else if (piu_decode_mem64(piup
, pa
, offset
)) {
return PIU_REGION_UNMAPPED
;
* Decode PCIE Cfg/IO region
bool_t
piu_decode_cfgio(pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
piup
->config_procp
->proc_typep
->get_pseudo_dev(piup
->config_procp
, "ncu", (void *)&ncup
);
map
= &ncup
->map
[PIU_REGION_CFGIO
];
if (map
->enable
&& ((pa
& map
->mask
) == map
->base
)) {
*offset
= pa
& PCIE_IOCON_ADDR_MASK
;
* Decode PCIE MEM32 region
bool_t
piu_decode_mem32(pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
piup
->config_procp
->proc_typep
->get_pseudo_dev(piup
->config_procp
, "ncu", (void *)&ncup
);
map
= &ncup
->map
[PIU_REGION_MEM32
];
if (map
->enable
&& ((pa
& map
->mask
) == map
->base
)) {
*offset
= pa
& (map
->size
- 1);
* Decode PCIE MEM64 region
bool_t
piu_decode_mem64(pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
piu_csr_t
*csrs
= &piup
->csrs
;
piup
->config_procp
->proc_typep
->get_pseudo_dev(piup
->config_procp
, "ncu", (void *)&ncup
);
map
= &ncup
->map
[PIU_REGION_MEM64
];
if (map
->enable
&& ((pa
& map
->mask
) == map
->base
)) {
pcie_offset
= csrs
->Mem_64_PCIE_Offset_Register
;
*offset
= (pa
& ~map
->mask
& PCIE_MEM64_ADDR_MASK
) | pcie_offset
;
* Access PCIE CSRs (downbound)
bool_t
piu_csr_access(simcpu_t
*sp
, pcie_model_t
*piup
, maccess_t memop
, uint64_t offset
, uint64_t *regp
)
uint64_t old_value
, value
, new_error
, *csrs
= (uint64_t *)&piup
->csrs
;
int node_id
=piup
->config_procp
->proc_id
;
* PIU only supports 8-byte accesses to registers
size
= memop
& MA_Size_Mask
;
if (size
!= MA_Size64
) return false;
index
= piu_offset2reg(offset
, ®x
);
if (index
== UND_PCIE_CSRS
) {
DBGDEV(lprintf(sp
->gid
, "Access illegal PCIE register at offset "
"= 0x%llx on node %d\n", offset
, node_id
); );
* read/write PCIE registers
wordx
= regx
- pcie_csrs
[index
].regx
;
strcpy(regname
, pcie_csrs
[index
].name
);
if (pcie_csrs
[index
].nwords
> 1)
sprintf(®name
[strlen(regname
)], "[%d]", wordx
);
* check on Read only registers
case PIU_Interrupt_State_Status_Register_1
:
case PIU_Interrupt_State_Status_Register_2
:
case PIU_INTX_Status_Register
:
case PIU_Event_Queue_State_Register
:
case PIU_IMU_Interrupt_Status_Register
:
case PIU_DMC_Core_and_Block_Error_Status_Register
:
case PIU_MMU_Interrupt_Status_Register
:
case PIU_ILU_Interrupt_Status_Register
:
case PIU_Packet_Scoreboard_DMA_Register_Set
:
case PIU_Packet_Scoreboard_PIO_Register_Set
:
case PIU_Transaction_Scoreboard_Register_Set
:
case PIU_Transaction_Scoreboard_Status_Register
:
case PIU_PEU_Egress_Credits_Consumed_Register
:
case PIU_PEU_Egress_Credit_Limit_Register
:
case PIU_PEU_Egress_Retry_Buffer_Register
:
case PIU_PEU_Ingress_Credits_Allocated_Register
:
case PIU_PEU_Ingress_Credits_Received_Register
:
case PIU_PEU_Other_Event_Interrupt_Status_Register
:
case PIU_PEU_Device_Capabilities_Register
:
case PIU_PEU_Device_Status_Register
:
case PIU_PEU_Link_Capabilities_Register
:
case PIU_PEU_Link_Status_Register
:
case PIU_PEU_Uncorrectable_Error_Interrupt_Status_Register
:
case PIU_PEU_Correctable_Error_Interrupt_Status_Register
:
case PIU_PEU_CXPL_SERDES_Revision_Register
:
case PIU_PEU_CXPL_AckNak_Latency_Timer_Register
:
case PIU_PEU_CXPL_Replay_Timer_Register
:
case PIU_PEU_CXPL_Core_Status_Register
:
case PIU_PEU_CXPL_Event_Error_Interrupt_Status_Register
:
case PIU_PEU_Link_Bit_Error_Counter_II_Register
:
case PIU_PEU_SERDES_Receiver_Lane_Status_Register
:
case PIU_PEU_SERDES_Transmitter_Status_Register
:
DBGDEV(lprintf(sp
->gid
, "Error: Write Read-Only Register "
"'%s' offset=0x%llx value=0x%llx on node %d\n",
pcie_csrs
[index
].name
, offset
, *regp
, node_id
); );
return false; /* FIXME: should trap on the error */
DBGDEV(lprintf(sp
->gid
, "Write PIU register '%s' at offset = "
"0x%llx value = 0x%llx on node %d\n",
pcie_csrs
[index
].name
, offset
, *regp
, node_id
); );
case PIU_Interrupt_Clear_Registers
:
piu_set_irq_state(piup
, wordx
+INO_INTA
, (*regp
& MASK64(1,0)));
case PIU_INT_A_Clear_Register
:
case PIU_INT_B_Clear_Register
:
case PIU_INT_C_Clear_Register
:
case PIU_INT_D_Clear_Register
:
piu_set_intx_state(piup
, index
-PIU_INT_A_Clear_Register
+INO_INTA
, IRQ_IDLE
);
case PIU_Event_Queue_Control_Set_Register
:
if (value
& MASK64(57,57)) {
* upon ENOVERR set, update the OVERR and STATE field
* of the EQ Tail and State register
piup
->csrs
.Event_Queue_Tail_Register
[wordx
] |= MASK64(57,57);
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_ERROR
;
if (value
& MASK64(44,44))
* upon EN bit set, update the STATE bit of
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_ACTIVE
;
case PIU_Event_Queue_Control_Clear_Register
:
if (value
& MASK64(57,57))
piup
->csrs
.Event_Queue_Tail_Register
[wordx
] &= ~MASK64(57,57);
if (value
& MASK64(47,47)) {
if (piup
->csrs
.Event_Queue_State_Register
[wordx
] == EQ_ERROR
)
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_IDLE
;
if (value
& MASK64(44,44))
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_IDLE
;
case PIU_Event_Queue_Tail_Register
:
case PIU_Event_Queue_Head_Register
:
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(6,0));
case PIU_MSI_Mapping_Register
:
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(63,62)|MASK64(5,0));
case PIU_MSI_Clear_Registers
:
if (value
& MASK64(62,62))
piup
->csrs
.MSI_Mapping_Register
[wordx
] &= ~MASK64(62,62);
case PIU_MSI_32_bit_Address_Register
:
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(31,16));
case PIU_MSI_64_bit_Address_Register
:
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(63,16));
case PIU_IMU_Error_Status_Clear_Register
:
piup
->csrs
.IMU_Error_Status_Set_Register
&= ~value
;
case PIU_IMU_Error_Status_Set_Register
:
* W1S to simulate actual IMU error occurence
new_error
= value
& ~old_value
;
csrs
[regx
] = new_error
| old_value
;
piu_simulate_imu_error(piup
, new_error
);
case PIU_MMU_Error_Status_Clear_Register
:
piup
->csrs
.MMU_Error_Status_Set_Register
&= ~value
;
case PIU_MMU_Error_Status_Set_Register
:
* W1S to simulate actual MMU error occurence
new_error
= value
& ~old_value
;
csrs
[regx
] = new_error
| old_value
;
piu_simulate_mmu_error(piup
, new_error
);
* Check on Load-Only (write-only but reads always return 0) CSRs
case PIU_Event_Queue_Control_Set_Register
:
case PIU_Event_Queue_Control_Clear_Register
:
case PIU_Interrupt_Clear_Registers
:
value
= piu_get_irq_state(piup
, wordx
+INO_INTA
);
case PIU_INT_A_Clear_Register
:
case PIU_INT_B_Clear_Register
:
case PIU_INT_C_Clear_Register
:
case PIU_INT_D_Clear_Register
:
value
= piu_get_intx_state(piup
, index
-PIU_INT_A_Clear_Register
+INO_INTA
);
case PIU_MSI_Clear_Registers
:
value
= piup
->csrs
.MSI_Clear_Registers
[wordx
] & MASK64(62, 62);
case PIU_IMU_Error_Status_Clear_Register
:
value
= piup
->csrs
.IMU_Error_Status_Set_Register
;
case PIU_IMU_Interrupt_Status_Register
:
value
= piup
->csrs
.IMU_Error_Status_Set_Register
&
piup
->csrs
.IMU_Interrupt_Enable_Register
;
case PIU_MMU_Error_Status_Clear_Register
:
value
= piup
->csrs
.MMU_Error_Status_Set_Register
;
case PIU_MMU_Interrupt_Status_Register
:
value
= piup
->csrs
.MMU_Error_Status_Set_Register
&
piup
->csrs
.MMU_Interrupt_Enable_Register
;
DBGDEV(lprintf(sp
->gid
, "Read PCIE register '%s' at offset = "
"0x%llx value = 0x%llx on node %d\n",
pcie_csrs
[index
].name
, offset
, value
, node_id
); );
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
lprintf(sp
->gid
, "ERROR: PIU only supports 8 byte CSR access, "
"node is %d\n", node_id
);
* Access PCIE device's IO space (downbound)
bool_t
piu_io_access(pcie_model_t
*piup
, maccess_t memop
, uint64_t ioaddr
, uint32_t count
, uint64_t *regp
)
pcie_dev_inst_t
*pcie_devp
;
* find matching PCIE end device
pcie_devp
= piu_find_pcie_dev(piup
, (void *)&ioaddr
, PCIE_IO
);
DBGDEV( lprintf(-1, "PCIE device with mapped pa = 0x%lx not found!\n", ioaddr
); );
pcie_dp
= pcie_devp
->pcie_dp
;
switch (memop
& MA_Op_Mask
) {
status
= pcie_dp
->mem_access(pcie_devp
->hdl
, ioaddr
, regp
, count
, DA_Load
, PCIE_IO
);
status
= pcie_dp
->mem_access(pcie_devp
->hdl
, ioaddr
, regp
, count
, DA_Store
, PCIE_IO
);
* Access PCIE device's Configuation space (downbound)
* The data is inverted between the big and little endian format
* because PCI cfg space is structured as little endian
bool_t
piu_cfg_access(pcie_model_t
*piup
, maccess_t memop
, uint64_t ioaddr
, uint32_t count
, uint64_t *regp
)
pcie_dev_inst_t
*pcie_devp
;
uint8_t bus_no
, dev_no
, fun_no
, reg_no
;
bus_no
= (ioaddr
& PCIE_BUS_NO_MASK
) >> PCIE_BUS_NO_SHIFT
;
dev_no
= (ioaddr
& PCIE_DEV_NO_MASK
) >> PCIE_DEV_NO_SHIFT
;
fun_no
= (ioaddr
& PCIE_FUN_NO_MASK
) >> PCIE_FUN_NO_SHIFT
;
reg_no
= ioaddr
& PCIE_REG_NO_MASK
;
req_id
= PCIE_REQ_ID(bus_no
, dev_no
, fun_no
);
* find matching PCIE end device
pcie_devp
= piu_find_pcie_dev(piup
, (void *)&req_id
, PCIE_CFG
);
DBGDEV( lprintf(-1, "PCIE device with bus_no=0x%x dev_no=0x%x fun_no=0x%x not found!\n",
PCIE_BUS_NO(req_id
), PCIE_DEV_NO(req_id
), PCIE_FUN_NO(req_id
)); );
* read or write access to PCIE Cfg space
pcie_dp
= pcie_devp
->pcie_dp
;
switch (memop
& MA_Op_Mask
) {
status
= pcie_dp
->mem_access(pcie_devp
->hdl
, reg_no
, regp
, count
, DA_Load
, PCIE_CFG
);
status
= pcie_dp
->mem_access(pcie_devp
->hdl
, reg_no
, regp
, count
, DA_Store
, PCIE_CFG
);
* Access PCIE device's MEM32/MEM64 space (downbound)
bool_t
piu_mem_access(pcie_model_t
*piup
, maccess_t memop
, uint64_t paddr
, uint32_t count
, uint64_t *regp
,
pcie_dev_inst_t
*pcie_devp
;
* find matching PCIE end device
pcie_devp
= piu_find_pcie_dev(piup
, (void *)&paddr
, space_id
);
DBGDEV( lprintf(-1, "PCIE device with mapped pa = 0x%lx not found!\n", paddr
); );
pcie_dp
= pcie_devp
->pcie_dp
;
switch (memop
& MA_Op_Mask
) {
status
= pcie_dp
->mem_access(pcie_devp
->hdl
, paddr
, regp
, count
, DA_Load
, space_id
);
status
= pcie_dp
->mem_access(pcie_devp
->hdl
, paddr
, regp
, count
, DA_Store
, space_id
);
* DMA transactions (upbound)
* piup: handle to pcie_model structure
* va: host virtual address accessed by the DMA transaction
* datap: data read from/written to the host memory
* count: data size counted in byte
* req_id: 16 bit requester Id
* type: access type (load vs. store)
* mode: addressing mode, can be either 32 bit or 64 bit address
bool_t
piu_dma_access(pcie_model_t
*piup
, tvaddr_t va
, uint8_t *datap
, int count
,
uint16_t req_id
, dev_access_t type
, dev_mode_t mode
)
bool_t is_msi
= false, status
;
msi_addr
= piup
->csrs
.MSI_32_bit_Address_Register
& MASK64(31, 16);
is_msi
= ((va
& MASK64(31,16)) == msi_addr
);
* In sun4v mode, if EQ_base_addr_reg.bit[63]=0, msi_addr_reg.bit[63]
* is not used for comparison (see 16.3.9.8 of N2 PRM, rev 1.2)
sun4v
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 2, 2);
base
= GETMASK64(piup
->csrs
.Event_Queue_Base_Address_Register
, 63,63);
bit
= (base
== 0) ? 62 : 63;
msi_addr
= piup
->csrs
.MSI_64_bit_Address_Register
& MASK64(bit
, 16);
is_msi
= ((va
& MASK64(bit
,16)) == msi_addr
);
if (is_msi
&& type
== DA_Store
) {
status
= piu_msi_write(piup
, va
, datap
, count
, req_id
, mode
);
* perform IOMMU operation
status
= piu_iommu(piup
, va
, req_id
, type
, mode
, &pa
);
* VA -> PA translation is successful, do DMA transaction with pa
config_proc_t
*procp
= piup
->config_procp
;
status
= procp
->proc_typep
->dev_mem_access(procp
, pa
, datap
, count
, type
);
* This function performs IOMMU operation upon each DMA request.
bool_t
piu_iommu(pcie_model_t
*piup
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
)
te
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 0, 0);
be
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 1, 1);
sun4v
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 2, 2);
* check if it's MMU bypass opeartion (allowed in both SUN4V and SUN4U mode)
if (be
&& (mode
== PCIE_IS64
)) {
if ((va
& MASK64(63, 39)) == MASK64(63, 50)) {
*pa
= va
& MASK64(38, 0);
DBGDEV(lprintf(-1, "piu_iommu: bypass va = 0x%llx pa = 0x%llx\n", va
, *pa
); );
* FIXME: should raise the MMU BYP_OOR error in sun4u mode
DBGDEV(lprintf(-1, "piu_iommu: IOMMU BYP_OOR error va = 0x%llx\n", va
); );
if (!be
&& (mode
== PCIE_IS64
)) {
* FIXME: should raise the MMU BYP_ERR error in sun4u mode
DBGDEV(lprintf(-1, "piu_iommu: IOMMU BYP_ERR error va = 0x%llx\n", va
); );
* check whether MMU is disabled
* FIXME: raise MMU TRN_ERR for both sun4v and sun4u mode
DBGDEV(lprintf(-1, "piu_iommu: MMU is disabled, va = 0x%llx\n", va
); );
* perform IOMMU operation under SUN4U or SUN4V mode
status
= piu_iommu_sun4v(piup
, va
, req_id
, type
, mode
, pa
);
status
= piu_iommu_sun4u(piup
, va
, req_id
, type
, mode
, pa
);
* Translate VA -> PA, SUN4U mode
bool_t
piu_iommu_sun4u(pcie_model_t
*piup
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
)
config_proc_t
*procp
= piup
->config_procp
;
int ts
, ps
, pg_bits
, tsb_sz
, tte_idx
;
uint64_t tb
, vpn
, tte_addr
, tte
, pg_mask
;
ts
= GETMASK64(piup
->csrs
.MMU_TSB_Control_Register
, 3, 0);
ps
= GETMASK64(piup
->csrs
.MMU_TSB_Control_Register
, 8, 8);
tb
= piup
->csrs
.MMU_TSB_Control_Register
& MASK64(38, 13);
* determine the tte index in terms of
* - page size: 8K (ps=0) and 64K (ps=1)
* - number of TSB entries (=1K*2^ts = 2^(10+ts))
vpn
= (va
& MASK64(31, pg_bits
)) >> pg_bits
;
tte_idx
= (vpn
& (tsb_sz
-1)) << 3; /* each entry of 8 byte */
status
= procp
->proc_typep
->dev_mem_access(procp
, tte_addr
, (uint8_t *)&tte
, 8, DA_Load
);
DBGDEV(lprintf(-1, "piu_iommu_sun4u: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
piu_iommu_va2pa(tte
, ps
, va
, req_id
, type
, mode
, pa
);
DBGDEV(lprintf(-1, "piu_iommu_sun4u: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
va
, *pa
, tte_addr
, tte
); );
* Translate VA -> PA, SUN4V mode
bool_t
piu_iommu_sun4v(pcie_model_t
*piup
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
)
config_proc_t
*procp
= piup
->config_procp
;
piu_csr_t
*csrs
= &piup
->csrs
;
int i
, busid_sel
, busid
, ps
, ts
, pg_bits
, tsb_sz
, tte_idx
;
uint8_t idx
, iotsb_idx
, iotsb_no
;
uint64_t dev2iotsb
, offset
, base_pa
, vpn
, tte_addr
, tte
;
* Form 7 bit index id into the DEV2IOTSB table, which is implemented
* by a set of 16 x 64-bit registers, with each register containing
* 8 x 5-bit values to index into the IOTSBDESC table.
busid
= PCIE_BUS_NO(req_id
);
busid_sel
= GETMASK64(csrs
->MMU_Control_and_Status_Register
, 3, 3);
idx
|= busid_sel
? GETMASK64(busid
, 5, 0) : GETMASK64(busid
, 6, 1);
* Use the 7 bit index id to extract the 5-bit iotsb_no from the
* DEV2IOTSB table (total of 128 index cells out of 16 regs).
dev2iotsb
= csrs
->MMU_DEV2IOTSB_Registers
[idx
>>3];
iotsb_idx
= GETMASK64(idx
, 2, 0) << 3;
iotsb_no
= GETMASK64(dev2iotsb
, iotsb_idx
+ 4, iotsb_idx
);
* Use iotsb_no as index to retrieve IOTSB info from IOTSBDESC table
* (implemented by a set of 32 x 64-bit registers)
base_pa
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 59, 34);
offset
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 33, 7);
ps
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 6, 4);
ts
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 3, 0);
if ((va
& MASK64(62, 40)) != 0) {
uint64_t error_code
, trans_type
;
csrs
->MMU_Translation_Fault_Address_Register
= va
& MASK64(63, 2);
trans_type
= (type
== DA_Load
) ? TLP_MRd_FMT_TYPE_IS32
: TLP_MWr_FMT_TYPE_IS32
;
trans_type
= (type
== DA_Load
) ? TLP_MRd_FMT_TYPE_IS64
: TLP_MWr_FMT_TYPE_IS64
;
csrs
->MMU_Translation_Fault_Status_Register
= req_id
| (trans_type
<< 16);
* raise mmu sun4v_va_oor error
error_code
= 1ULL<<SUN4V_VA_OOR_P
;
csrs
->MMU_Error_Status_Set_Register
|= error_code
;
piu_raise_mmu_error(piup
, error_code
);
* determine adjusted page number using encoded ps value
* and adjusted VA at a given offset
* FIXME: check underflow error on vpn (error = sun4v_va_adj_uf)
vpn
= ((va
& MASK64(39, pg_bits
)) >> pg_bits
) - offset
;
* calculate tte index in terms of TSB size
* FIXME: check out of range error on vpn (error = TRN_OOR)
tte_idx
= (vpn
& (tsb_sz
-1)) << 3;
tte_addr
= (base_pa
<< 13) + tte_idx
;
status
= procp
->proc_typep
->dev_mem_access(procp
, tte_addr
, (uint8_t *)&tte
, 8, DA_Load
);
DBGDEV(lprintf(-1, "piu_iommu_sun4v: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
piu_iommu_va2pa(tte
, ps
, va
, req_id
, type
, mode
, pa
);
DBGDEV(lprintf(-1, "piu_iommu_sun4v: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
va
, *pa
, tte_addr
, tte
); );
bool_t
piu_iommu_va2pa(uint64_t tte
, int ps
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
)
bool_t tte_key_valid
, tte_data_w
, tte_data_v
;
uint64_t pg_mask
, tte_data_pa
;
tte_data_v
= GETMASK64(tte
, 0, 0);
tte_key_valid
= GETMASK64(tte
, 2, 2);
tte_dev_key
= GETMASK64(tte
, 63, 48);
* assert on invalid tte entry
* check the RO (Relaxed Ordering) bit
if (GETMASK64(tte
, 6, 6))
DBGDEV(lprintf(-1, "Relaxed Ordering is not supported"); );
* compare tte's DEV_KEY field with req_id
* According to N2 PIU PRM, the function number portion of
* the tte_dev_key and the source req_id should be masked
* with the FNM field of the tte.
uint16_t tte_fnm
= MASK64(15,3) | GETMASK64(tte
, 5, 3);
if ((tte_dev_key
& tte_fnm
) != (req_id
& tte_fnm
)) {
DBGDEV(lprintf(-1, "piu_iommu_va2pa: req_id=0x%lx not matching tte dev_key=0x%lx\n",
* check on DATA_W for the write request
tte_data_w
= GETMASK64(tte
, 1, 1);
if ((tte_data_w
== 0) && type
== DA_Store
) {
DBGDEV(lprintf(-1, "piu_iommu_sun4u: write to non-writable page: va = 0x%lx tte = 0x%lx\n",
* finally translate VA to PA
pg_mask
= (1 << pg_bits
) - 1;
tte_data_pa
= tte
& MASK64(38, pg_bits
);
*pa
= tte_data_pa
| (va
& pg_mask
);
bool_t
piu_assert_intx(pcie_model_t
*piup
, uint8_t pin_no
, uint8_t dev_no
)
* FIXME: check if PIU supports more than 4 devices
* generate mondo interrupt
piu_mondo_interrupt(piup
, ino
, IRQ_RECEIVED
);
* Deassert INTx interrupt
bool_t
piu_deassert_intx(pcie_model_t
*piup
, uint8_t pin_no
, uint8_t dev_no
)
* FIXME: check if PIU supports more than 4 devices
piu_mondo_interrupt(piup
, ino
, IRQ_IDLE
);
* Generate IRQ mondo interrupt and update the interrupt state accordingly
void piu_mondo_interrupt(pcie_model_t
*piup
, uint8_t ino
, irq_state_t
new)
DBGDEV( lprintf(-1, "piu_mondo_interrupt: ino = %d\n", ino
); );
* get the current IRQ mondo state
old
= piu_get_irq_state(piup
, ino
);
V
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 31, 31);
if ((old
== IRQ_IDLE
) && (new == IRQ_RECEIVED
) && V
) {
pcie_mondo_t
*irq_mondo
= (pcie_mondo_t
*)Xcalloc(1, pcie_mondo_t
);
irq_mondo
->thread_id
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 30, 25);
mdo_mode
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 63, 63);
irq_mondo
->data
[0] = (irq_mondo
->thread_id
<< 6) | ino
;
uint64_t data0
= piup
->csrs
.Interrupt_Mondo_Data_0_Register
;
irq_mondo
->data
[0] = (data0
& MASK64(63, 12)) | (irq_mondo
->thread_id
<< 6) | ino
;
irq_mondo
->data
[1] = piup
->csrs
.Interrupt_Mondo_Data_1_Register
;
* send IRQ mondo to target CPU
piup
->config_procp
->proc_typep
->ext_signal(piup
->config_procp
, ES_PCIE
, (void *)irq_mondo
);
piu_set_irq_state(piup
, ino
, new);
void piu_set_intx_state(pcie_model_t
*piup
, uint8_t ino
, irq_state_t
new)
bit
= 3 - (ino
- INO_INTA
);
piup
->csrs
.INTX_Status_Register
&= ~val
;
piup
->csrs
.INTX_Status_Register
|= val
;
int piu_get_intx_state(pcie_model_t
*piup
, uint8_t ino
)
bit
= 3 - (ino
- INO_INTA
);
val
= (piup
->csrs
.INTX_Status_Register
>> bit
) & 1;
* Update Interrupt State Status Register with the new mondo state 'new'
void piu_set_irq_state(pcie_model_t
*piup
, uint8_t ino
, irq_state_t
new)
* determine which status register to use in terms of ino
regp
= ino
<32 ? &piup
->csrs
.Interrupt_State_Status_Register_1
: &piup
->csrs
.Interrupt_State_Status_Register_2
;
* each mondo state is encoded via a 2 bit value:
* update the approriate bits of the register
*regp
&= ~(IRQ_STATE_MASK
<< bit
);
*regp
|= ((uint64_t)new << bit
);
* update the INTx status register if ino = 20-23
piu_set_intx_state(piup
, ino
, new);
* Get mondo interrupt state
int piu_get_irq_state(pcie_model_t
*piup
, uint8_t ino
)
val
= ino
<32 ? piup
->csrs
.Interrupt_State_Status_Register_1
: piup
->csrs
.Interrupt_State_Status_Register_2
;
state
= (val
>> bit
) & IRQ_STATE_MASK
;
* This function handles memory write request for both MSI and MSI-X.
* piup: handle to pcie_model structure
* msi_addr: host address for MSI/MSI-X write, can be either 32 bit or 64 bit
* msi_datap: pointer to the MSI/MSI-X data of size 2 or 4 byte
* count: can be either 2 byte or 4 byte to indicate MSI or MSI-X request
* req_id: 16 bit requester Id
* mode: addressing mode, can be either 32 bit or 64 bit address
bool_t
piu_msi_write(pcie_model_t
*piup
, uint64_t msi_addr
, uint8_t *msi_datap
,
int count
, uint16_t req_id
, dev_mode_t mode
)
int map_idx
, v
, eqwr_n
, eqnum
;
* validate count to determin MSI vs. MSI-X
if ((count
!= 2) && (count
!= 4)) {
DBGDEV( lprintf(-1, "piu_msi_write: invalid msi_data size = %d \n", count
); );
* PIU implements a total of 256 MSI_Mapping_Registers as the mapping
* table to allow SW to map each MSI/MSI-X request to an event queue.
* The lower 8 bits of the MSI/MSI-X data field is used to index into
memcpy((void *)&msi_vector
, (void *)msi_datap
, count
);
map_idx
= (count
== 2) ? *(uint16_t *)&msi_vector
: msi_vector
;
DBGDEV( lprintf(-1, "piu_msi_write: addr = 0x%lx data = %d\n", msi_addr
, map_idx
); );
mapping
= piup
->csrs
.MSI_Mapping_Register
[map_idx
];
v
= GETMASK64(mapping
, 63, 63);
eqwr_n
= GETMASK64(mapping
, 62, 62);
eqnum
= GETMASK64(mapping
, 5, 0);
* assemble the event queue record
eq_record_t
*record
= (eq_record_t
*)Xcalloc(1, eq_record_t
);
record
->fmt_type
= mode
? TLP_MSI_FMT_TYPE_IS64
: TLP_MSI_FMT_TYPE_IS32
;
record
->addr_15_2
= GETMASK64(msi_addr
, 15, 2);
record
->addr_hi
= GETMASK64(msi_addr
, 63, 16);
* Remove byte swapping here, as per MSI-X RFE P1584,
* Fire 2.0 no longer loads MSI/MSI-X data into event
* queue record in the Little-Endian format.
* Note: the current PRM for both Fire rev 2.1, and
* N2 PIU rev 1.3 is not up-to-date with this
record
->data0
= *(uint16_t *)&msi_vector
;
record
->data0
= msi_vector
& MASK64(15,0);
record
->data1
= (msi_vector
& MASK64(31,16)) >> 16;
piu_eq_write(piup
, eqnum
, record
, req_id
);
* Write event queue records to the queue and update the tail pointer of the queue.
bool_t
piu_eq_write(pcie_model_t
*piup
, int eqnum
, eq_record_t
*record
, uint16_t req_id
)
overr
= GETMASK64(piup
->csrs
.Event_Queue_Tail_Register
[eqnum
], 57, 57);
state
= GETMASK64(piup
->csrs
.Event_Queue_State_Register
[eqnum
], 2, 0);
DBGDEV( lprintf(-1, "piu_eq_write: eqnum = %d state = %d\n", eqnum
, state
); );
if ((state
== EQ_ACTIVE
) && !overr
) {
int head
= GETMASK64(piup
->csrs
.Event_Queue_Head_Register
[eqnum
], 6, 0);
int tail
= GETMASK64(piup
->csrs
.Event_Queue_Tail_Register
[eqnum
], 6, 0);
int next
= (tail
+ 1) % EQ_NUM_ENTRIES
;
bool_t full
= (next
== head
);
bool_t empty
= (head
== tail
);
* set the overflow bit and generate a DMU internal interrupt (ino 62)
piup
->csrs
.Event_Queue_Tail_Register
[eqnum
] |= MASK64(57,57);
piup
->csrs
.Event_Queue_State_Register
[eqnum
] = EQ_ERROR
;
piu_mondo_interrupt(piup
, INO_DMU
, IRQ_RECEIVED
);
* determine the EQ record address to write the event queue record
uint64_t base
, offset
, rec_va
, pa
;
dev_mode_t mode
= PCIE_IS64
;
base
= piup
->csrs
.Event_Queue_Base_Address_Register
& MASK64(63,19);
offset
= (eqnum
* EQ_NUM_ENTRIES
+ tail
) * EQ_RECORD_SIZE
;
DBGDEV( lprintf(-1, "piu_eq_write: EQ record va = 0x%llx\n", rec_va
); );
* setup the access mode of the EQ record address in terms of the
* IOMMU mode (see N2 PRM rev 1.2, section 16.3.4)
sun4v
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 2, 2);
if ((!sun4v
) && (rec_va
& MASK64(63, 32) == 0))
* pass the EQ address to IOMMU for translation
status
= piu_iommu(piup
, rec_va
, req_id
, DA_Store
, mode
, &pa
);
* write the record to the event queue
config_proc_t
*procp
= piup
->config_procp
;
status
= procp
->proc_typep
->dev_mem_access(procp
, pa
, (uint8_t *)record
,
sizeof(*record
), DA_Store
);
* update the tail pointer
piup
->csrs
.Event_Queue_Tail_Register
[eqnum
] = next
& MASK64(6,0);
* if the queue is empty, generate a mondo interrupt depending on state
piu_mondo_interrupt(piup
, INO_EQLO
+eqnum
, IRQ_RECEIVED
);
void piu_init_error_list()
imu_error_entry_t imu_error_init_list
[] = {
{ PIU_ERR ( MSI_NOT_EN_P
, 0) },
{ PIU_ERR ( COR_MES_NOT_EN_P
, 1) },
{ PIU_ERR ( NONFATAL_MES_NOT_EN_P
, 2) },
{ PIU_ERR ( FATAL_MES_NOT_EN_P
, 3) },
{ PIU_ERR ( PMPME_MES_NOT_EN_P
, 4) },
{ PIU_ERR ( PMEACK_MES_NOT_EN_P
, 5) },
{ PIU_ERR ( MSI_PAR_ERR_P
, 6) },
{ PIU_ERR ( MSI_MAL_ERR_P
, 7) },
{ PIU_ERR ( EQ_NOT_EN_P
, 8) },
{ PIU_ERR ( EQ_OVER_P
, 9) },
{ PIU_ERR ( MSI_NOT_EN_S
, 32) },
{ PIU_ERR ( COR_MES_NOT_EN_S
, 33) },
{ PIU_ERR ( NONFATAL_MES_NOT_EN_S
, 34) },
{ PIU_ERR ( FATAL_MES_NOT_EN_S
, 35) },
{ PIU_ERR ( PMPME_MES_NOT_EN_SEQ_OVER_S
, 36) },
{ PIU_ERR ( PMEACK_MES_NOT_EN_S
, 37) },
{ PIU_ERR ( MSI_PAR_ERR_S
, 38) },
{ PIU_ERR ( MSI_MAL_ERR_S
, 39) },
{ PIU_ERR ( EQ_NOT_EN_S
, 40) },
{ PIU_ERR ( EQ_OVER_S
, 41) },
mmu_error_entry_t mmu_error_init_list
[] = {
{ PIU_ERR ( BYP_ERR_P
, 0) },
{ PIU_ERR ( BYP_OOR_P
, 1) },
{ PIU_ERR ( SUN4V_INV_PG_SZ_P
, 2) },
{ PIU_ERR ( SPARE1_P
, 3) },
{ PIU_ERR ( TRN_ERR_P
, 4) },
{ PIU_ERR ( TRN_OOR_P
, 5) },
{ PIU_ERR ( TTE_INV_P
, 6) },
{ PIU_ERR ( TTE_PRT_P
, 7) },
{ PIU_ERR ( TTC_DPE_P
, 8) },
{ PIU_ERR ( TTC_CAE_P
, 9) },
{ PIU_ERR ( SPARE2_P
, 10) },
{ PIU_ERR ( SPARE3_P
, 11) },
{ PIU_ERR ( TBW_DME_P
, 12) },
{ PIU_ERR ( TBW_UDE_P
, 13) },
{ PIU_ERR ( TBW_ERR_P
, 14) },
{ PIU_ERR ( TBW_DPE_P
, 15) },
{ PIU_ERR ( IOTSBDESC_INV_P
, 16) },
{ PIU_ERR ( IOTSBDESC_DPE_P
, 17) },
{ PIU_ERR ( SUN4V_VA_OOR_P
, 18) },
{ PIU_ERR ( SUN4V_VA_ADJ_UF_P
, 19) },
{ PIU_ERR ( SUN4V_KEY_ERR_P
, 20) },
{ PIU_ERR ( BYP_ERR_S
, 32) },
{ PIU_ERR ( BYP_OOR_S
, 33) },
{ PIU_ERR ( SUN4V_INV_PG_SZ_S
, 34) },
{ PIU_ERR ( SPARE1_S
, 35) },
{ PIU_ERR ( TRN_ERR_S
, 36) },
{ PIU_ERR ( TRN_OOR_S
, 37) },
{ PIU_ERR ( TTE_INV_S
, 38) },
{ PIU_ERR ( TTE_PRT_S
, 39) },
{ PIU_ERR ( TTC_DPE_S
, 40) },
{ PIU_ERR ( TTC_CAE_S
, 41) },
{ PIU_ERR ( SPARE2_S
, 42) },
{ PIU_ERR ( SPARE3_S
, 43) },
{ PIU_ERR ( TBW_DME_S
, 44) },
{ PIU_ERR ( TBW_UDE_S
, 45) },
{ PIU_ERR ( TBW_ERR_S
, 46) },
{ PIU_ERR ( TBW_DPE_S
, 47) },
{ PIU_ERR ( IOTSBDESC_INV_S
, 48) },
{ PIU_ERR ( IOTSBDESC_DPE_S
, 49) },
{ PIU_ERR ( SUN4V_VA_OOR_S
, 50) },
{ PIU_ERR ( SUN4V_VA_ADJ_UF_S
, 51) },
{ PIU_ERR ( SUN4V_KEY_ERR_S
, 52) },
for (i
= 0; imu_error_init_list
[i
].error_type
!= -1; i
++)
imu_error_list
[imu_error_init_list
[i
].error_type
] = imu_error_init_list
[i
];
for (i
= 0; mmu_error_init_list
[i
].error_type
!= -1; i
++)
mmu_error_list
[mmu_error_init_list
[i
].error_type
] = mmu_error_init_list
[i
];
void piu_simulate_imu_error(pcie_model_t
*piup
, uint64_t imu_error
)
uint64_t error_code
, intr_enable
, imu_ie
;
* loop over the error bits and raise the error only if
* the interrupt is enabled
imu_ie
= piup
->csrs
.IMU_Interrupt_Enable_Register
;
for (i
=0; i
<IMU_ERROR_MAXNUM
; i
++) {
error_code
= imu_error_list
[i
].error_code
;
intr_enable
= imu_error_list
[i
].intr_enable
;
if ((imu_error
& error_code
) && (imu_ie
& intr_enable
))
piu_raise_imu_error(piup
, error_code
);
void piu_raise_imu_error(pcie_model_t
*piup
, uint64_t error_code
)
piu_csr_t
*csrs
= &piup
->csrs
;
* update the error status register
csrs
->IMU_Error_Status_Set_Register
|= error_code
;
* generate INO_DMU mondo interrupt
dmc_cbie
= csrs
->DMC_Core_and_Block_Interrupt_Enable_Register
;
dmu
= GETMASK64(dmc_cbie
, 63, 63);
imu
= GETMASK64(dmc_cbie
, 0, 0);
csrs
->DMC_Core_and_Block_Error_Status_Register
|= MASK64(0,0);
piu_mondo_interrupt(piup
, INO_DMU
, IRQ_RECEIVED
);
void piu_simulate_mmu_error(pcie_model_t
*piup
, uint64_t mmu_error
)
uint64_t error_code
, intr_enable
, mmu_ie
;
* loop over the error bits and raise the error only if
* the interrupt is enabled
mmu_ie
= piup
->csrs
.MMU_Interrupt_Enable_Register
;
for (i
=0; i
<MMU_ERROR_MAXNUM
; i
++) {
error_code
= mmu_error_list
[i
].error_code
;
intr_enable
= mmu_error_list
[i
].intr_enable
;
if ((mmu_error
& error_code
) && (mmu_ie
& intr_enable
))
piu_raise_mmu_error(piup
, error_code
);
void piu_raise_mmu_error(pcie_model_t
*piup
, uint64_t error_code
)
piu_csr_t
*csrs
= &piup
->csrs
;
* update the error status register
csrs
->MMU_Error_Status_Set_Register
|= error_code
;
* generate INO_DMU mondo interrupt
dmc_cbie
= csrs
->DMC_Core_and_Block_Interrupt_Enable_Register
;
dmu
= GETMASK64(dmc_cbie
, 63, 63);
mmu
= GETMASK64(dmc_cbie
, 0, 0);
csrs
->DMC_Core_and_Block_Error_Status_Register
|= MASK64(1,1);
piu_mondo_interrupt(piup
, INO_DMU
, IRQ_RECEIVED
);
* Look up the PIU register type from its offset value
pcie_csr_t
piu_offset2reg(uint64_t offset
, int *regx
)
* first to check the few interrupt registers
* (PIU supports less number of these regs than the Fire for Niagara 1)
if ((offset
> 0x6011D8) && (offset
< 0x6011F0))
if ((offset
> 0x6015D8) && (offset
< 0x6015F0))
for (i
= 0; i
< NUM_PCIE_CSRS
; i
++) {
int nwords
, diff
= offset
- pcie_csrs
[i
].offset
;
*regx
= pcie_csrs
[i
].regx
;
if ((nwords
= pcie_csrs
[i
].nwords
) != 1) {
*regx
= pcie_csrs
[i
].regx
+ wordx
;
* Add the new PCIE device to the linked list
void piu_register_pcie_device(pcie_model_t
*piup
, pcie_dev_inst_t
*new)
if (piup
->pcie_devices_list_head
== NULL
) {
piup
->pcie_devices_list_head
= new;
temp
= piup
->pcie_devices_list_head
;
while (temp
->next
!= NULL
)
* Get the PCIE end device with the given request ID
piu_find_pcie_dev(pcie_model_t
*piup
, void *dptr
, pcie_space_t space_id
)
pcie_dev_inst_t
*pcie_dev
;
config_dev_t
*config_devp
;
pcie_dev
= piup
->pcie_devices_list_head
;
if (space_id
== PCIE_CFG
) {
uint16_t req_id
= *(uint16_t *)dptr
;
while (pcie_dev
!= NULL
) {
if (pcie_dev
->req_id
== req_id
) {
pcie_dev
= pcie_dev
->next
;
while (pcie_dev
!= NULL
) {
uint64_t paddr
= *(uint64_t *)dptr
;
if (pcie_dev
->pcie_dp
->bar_test(pcie_dev
->hdl
, paddr
, space_id
)) {
pcie_dev
= pcie_dev
->next
;