// ========== Copyright Header Begin ==========================================
// OpenSPARC T2 Processor File: piu.cc
// Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
// The above named program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public
// License version 2 as published by the Free Software Foundation.
// The above named program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// You should have received a copy of the GNU General Public
// License along with this work; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
// ========== Copyright Header End ============================================
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#define PIU_NAME "piu_vf"
#define PIU_TYPE dev_type_piu_vf
#define PIU_TYPE dev_type_piu
#define ASSERT(X) assert(X)
#define DBGDEV( X, ... ) ((samPiu*)(X->sam_piu))->debug_more(__VA_ARGS__)
pcie_csr_desc_t pcie_csrs
[NUM_PCIE_CSRS
] = {
{0x6010a0, 44, 0, "PIU_Interrupt_Mapping_Registers"},
{0x6014a0, 44, 44, "PIU_Interrupt_Clear_Registers"},
{0x601a00, 1, 88, "PIU_Interrupt_Retry_Timer_Register"},
{0x601a10, 1, 89, "PIU_Interrupt_State_Status_Register_1"},
{0x601a18, 1, 90, "PIU_Interrupt_State_Status_Register_2"},
{0x60b000, 1, 91, "PIU_INTX_Status_Register"},
{0x60b008, 1, 92, "PIU_INT_A_Clear_Register"},
{0x60b010, 1, 93, "PIU_INT_B_Clear_Register"},
{0x60b018, 1, 94, "PIU_INT_C_Clear_Register"},
{0x60b020, 1, 95, "PIU_INT_D_Clear_Register"},
{0x610000, 1, 96, "PIU_Event_Queue_Base_Address_Register"},
{0x611000, 36, 97, "PIU_Event_Queue_Control_Set_Register"},
{0x611200, 36, 133, "PIU_Event_Queue_Control_Clear_Register"},
{0x611400, 36, 169, "PIU_Event_Queue_State_Register"},
{0x611600, 36, 205, "PIU_Event_Queue_Tail_Register"},
{0x611800, 36, 241, "PIU_Event_Queue_Head_Register"},
{0x620000, 256, 277, "PIU_MSI_Mapping_Register"},
{0x628000, 256, 533, "PIU_MSI_Clear_Registers"},
{0x62c000, 1, 789, "PIU_Interrupt_Mondo_Data_0_Register"},
{0x62c008, 1, 790, "PIU_Interrupt_Mondo_Data_1_Register"},
{0x630000, 1, 791, "PIU_ERR_COR_Mapping_Register"},
{0x630008, 1, 792, "PIU_ERR_NONFATAL_Mapping_Register"},
{0x630010, 1, 793, "PIU_ERR_FATAL_Mapping_Register"},
{0x630018, 1, 794, "PIU_PM_PME_Mapping_Register"},
{0x630020, 1, 795, "PIU_PME_To_ACK_Mapping_Register"},
{0x631000, 1, 796, "PIU_IMU_Error_Log_Enable_Register"},
{0x631008, 1, 797, "PIU_IMU_Interrupt_Enable_Register"},
{0x631010, 1, 798, "PIU_IMU_Interrupt_Status_Register"},
{0x631018, 1, 799, "PIU_IMU_Error_Status_Clear_Register"},
{0x631020, 1, 800, "PIU_IMU_Error_Status_Set_Register"},
{0x631028, 1, 801, "PIU_IMU_RDS_Error_Log_Register"},
{0x631030, 1, 802, "PIU_IMU_SCS_Error_Log_Register"},
{0x631038, 1, 803, "PIU_IMU_EQS_Error_Log_Register"},
{0x631800, 1, 804, "PIU_DMC_Core_and_Block_Interrupt_Enable_Register"},
{0x631808, 1, 805, "PIU_DMC_Core_and_Block_Error_Status_Register"},
{0x632000, 1, 806, "PIU_IMU_Performance_Counter_Select_Register"},
{0x632008, 1, 807, "PIU_IMU_Performance_Counter_Zero_Register"},
{0x632010, 1, 808, "PIU_IMU_Performance_Counter_One_Register"},
{0x634000, 1, 809, "PIU_MSI_32_bit_Address_Register"},
{0x634008, 1, 810, "PIU_MSI_64_bit_Address_Register"},
{0x634018, 1, 811, "PIU_Mem_64_PCIE_Offset_Register"},
{0x640000, 1, 812, "PIU_MMU_Control_and_Status_Register"},
{0x640008, 1, 813, "PIU_MMU_TSB_Control_Register"},
{0x640108, 1, 814, "PIU_MMU_TTE_Cache_Invalidate_Register"},
{0x641000, 1, 815, "PIU_MMU_Error_Log_Enable_Register"},
{0x641008, 1, 816, "PIU_MMU_Interrupt_Enable_Register"},
{0x641010, 1, 817, "PIU_MMU_Interrupt_Status_Register"},
{0x641018, 1, 818, "PIU_MMU_Error_Status_Clear_Register"},
{0x641020, 1, 819, "PIU_MMU_Error_Status_Set_Register"},
{0x641028, 1, 820, "PIU_MMU_Translation_Fault_Address_Register"},
{0x641030, 1, 821, "PIU_MMU_Translation_Fault_Status_Register"},
{0x642000, 1, 822, "PIU_MMU_Performance_Counter_Select_Register"},
{0x642008, 1, 823, "PIU_MMU_Performance_Counter_Zero_Register"},
{0x642010, 1, 824, "PIU_MMU_Performance_Counter_One_Register"},
{0x646000, 64, 825, "PIU_MMU_TTE_Cache_Virtual_Tag_Registers"},
{0x647000, 64, 889, "PIU_MMU_TTE_Cache_Physical_Tag_Registers"},
{0x648000, 512, 953, "PIU_MMU_TTE_Cache_Data_Registers"},
{0x649000, 16, 1465, "PIU_MMU_DEV2IOTSB_Registers"},
{0x649100, 32, 1481, "PIU_MMU_IOTSBDESC_Registers"},
{0x651000, 1, 1513, "PIU_ILU_Error_Log_Enable_Register"},
{0x651008, 1, 1514, "PIU_ILU_Interrupt_Enable_Register"},
{0x651010, 1, 1515, "PIU_ILU_Interrupt_Status_Register"},
{0x651018, 1, 1516, "PIU_ILU_Error_Status_Clear_Register"},
{0x651020, 1, 1517, "PIU_ILU_Error_Status_Set_Register"},
{0x651800, 1, 1518, "PIU_PEU_Core_and_Block_Interrupt_Enable_Register"},
{0x651808, 1, 1519, "PIU_PEU_Core_and_Block_Interrupt_Status_Register"},
{0x652000, 1, 1520, "PIU_ILU_Diagnostic_Register"},
{0x653000, 1, 1521, "PIU_DMU_Debug_Select_Register_for_DMU_Debug_Bus_A"},
{0x653008, 1, 1522, "PIU_DMU_Debug_Select_Register_for_DMU_Debug_Bus_B"},
{0x653100, 1, 1523, "PIU_DMU_PCI_Express_Configuration_Register"},
{0x660000, 32, 1524, "PIU_Packet_Scoreboard_DMA_Register_Set"},
{0x664000, 16, 1556, "PIU_Packet_Scoreboard_PIO_Register_Set"},
{0x670000, 32, 1572, "PIU_Transaction_Scoreboard_Register_Set"},
{0x670100, 1, 1604, "PIU_Transaction_Scoreboard_Status_Register"},
{0x680000, 1, 1605, "PIU_PEU_Control_Register"},
{0x680008, 1, 1606, "PIU_PEU_Status_Register"},
{0x680010, 1, 1607, "PIU_PEU_PME_Turn_Off_Generate_Register"},
{0x680018, 1, 1608, "PIU_PEU_Ingress_Credits_Initial_Register"},
{0x680100, 1, 1609, "PIU_PEU_Diagnostic_Register"},
{0x680200, 1, 1610, "PIU_PEU_Egress_Credits_Consumed_Register"},
{0x680208, 1, 1611, "PIU_PEU_Egress_Credit_Limit_Register"},
{0x680210, 1, 1612, "PIU_PEU_Egress_Retry_Buffer_Register"},
{0x680218, 1, 1613, "PIU_PEU_Ingress_Credits_Allocated_Register"},
{0x680220, 1, 1614, "PIU_PEU_Ingress_Credits_Received_Register"},
{0x681000, 1, 1615, "PIU_PEU_Other_Event_Log_Enable_Register"},
{0x681008, 1, 1616, "PIU_PEU_Other_Event_Interrupt_Enable_Register"},
{0x681010, 1, 1617, "PIU_PEU_Other_Event_Interrupt_Status_Register"},
{0x681018, 1, 1618, "PIU_PEU_Other_Event_Status_Clear_Register"},
{0x681020, 1, 1619, "PIU_PEU_Other_Event_Status_Set_Register"},
{0x681028, 1, 1620, "PIU_PEU_Receive_Other_Event_Header1_Log_Register"},
{0x681030, 1, 1621, "PIU_PEU_Receive_Other_Event_Header2_Log_Register"},
{0x681038, 1, 1622, "PIU_PEU_Transmit_Other_Event_Header1_Log_Register"},
{0x681040, 1, 1623, "PIU_PEU_Transmit_Other_Event_Header2_Log_Register"},
{0x682000, 1, 1624, "PIU_PEU_Performance_Counter_Select_Register"},
{0x682008, 1, 1625, "PIU_PEU_Performance_Counter_Zero_Register"},
{0x682010, 1, 1626, "PIU_PEU_Performance_Counter_One_Register"},
{0x682018, 1, 1627, "PIU_PEU_Performance_Counter_Two_Register"},
{0x683000, 1, 1628, "PIU_PEU_Debug_Select_A_Register"},
{0x683008, 1, 1629, "PIU_PEU_Debug_Select_B_Register"},
{0x690000, 1, 1630, "PIU_PEU_Device_Capabilities_Register"},
{0x690008, 1, 1631, "PIU_PEU_Device_Control_Register"},
{0x690010, 1, 1632, "PIU_PEU_Device_Status_Register"},
{0x690018, 1, 1633, "PIU_PEU_Link_Capabilities_Register"},
{0x690020, 1, 1634, "PIU_PEU_Link_Control_Register"},
{0x690028, 1, 1635, "PIU_PEU_Link_Status_Register"},
{0x690030, 1, 1636, "PIU_PEU_Slot_Capabilities_Register"},
{0x691000, 1, 1637, "PIU_PEU_Uncorrectable_Error_Log_Enable_Register"},
{0x691008, 1, 1638, "PIU_PEU_Uncorrectable_Error_Interrupt_Enable_Register"},
{0x691010, 1, 1639, "PIU_PEU_Uncorrectable_Error_Interrupt_Status_Register"},
{0x691018, 1, 1640, "PIU_PEU_Uncorrectable_Error_Status_Clear_Register"},
{0x691020, 1, 1641, "PIU_PEU_Uncorrectable_Error_Status_Set_Register"},
{0x691028, 1, 1642, "PIU_PEU_Receive_Uncorrectable_Error_Header1_Log_Register"},
{0x691030, 1, 1643, "PIU_PEU_Receive_Uncorrectable_Error_Header2_Log_Register"},
{0x691038, 1, 1644, "PIU_PEU_Transmit_Uncorrectable_Error_Header1_Log_Register"},
{0x691040, 1, 1645, "PIU_PEU_Transmit_Uncorrectable_Error_Header2_Log_Register"},
{0x6a1000, 1, 1646, "PIU_PEU_Correctable_Error_Log_Enable_Register"},
{0x6a1008, 1, 1647, "PIU_PEU_Correctable_Error_Interrupt_Enable_Register"},
{0x6a1010, 1, 1648, "PIU_PEU_Correctable_Error_Interrupt_Status_Register"},
{0x6a1018, 1, 1649, "PIU_PEU_Correctable_Error_Status_Clear_Register"},
{0x6a1020, 1, 1650, "PIU_PEU_Correctable_Error_Status_Set_Register"},
{0x6e2000, 1, 1651, "PIU_PEU_CXPL_SERDES_Revision_Register"},
{0x6e2008, 1, 1652, "PIU_PEU_CXPL_AckNak_Latency_Threshold_Register"},
{0x6e2010, 1, 1653, "PIU_PEU_CXPL_AckNak_Latency_Timer_Register"},
{0x6e2018, 1, 1654, "PIU_PEU_CXPL_Replay_Timer_Threshold_Register"},
{0x6e2020, 1, 1655, "PIU_PEU_CXPL_Replay_Timer_Register"},
{0x6e2040, 1, 1656, "PIU_PEU_CXPL_Vendor_DLLP_Message_Register"},
{0x6e2050, 1, 1657, "PIU_PEU_CXPL_LTSSM_Control_Register"},
{0x6e2058, 1, 1658, "PIU_PEU_CXPL_DLL_Control_Register"},
{0x6e2060, 1, 1659, "PIU_PEU_CXPL_MACL_PCS_Control_Register"},
{0x6e2068, 1, 1660, "PIU_PEU_CXPL_MACL_Lane_Skew_Control_Register"},
{0x6e2070, 1, 1661, "PIU_PEU_CXPL_MACL_Symbol_Number_Register"},
{0x6e2078, 1, 1662, "PIU_PEU_CXPL_MACL_Symbol_Timer_Register"},
{0x6e2100, 1, 1663, "PIU_PEU_CXPL_Core_Status_Register"},
{0x6e2108, 1, 1664, "PIU_PEU_CXPL_Event_Error_Log_Enable_Register"},
{0x6e2110, 1, 1665, "PIU_PEU_CXPL_Event_Error_Interrupt_Enable_Register"},
{0x6e2118, 1, 1666, "PIU_PEU_CXPL_Event_Error_Interrupt_Status_Register"},
{0x6e2120, 1, 1667, "PIU_PEU_CXPL_Event_Error_Status_Clear_Register"},
{0x6e2128, 1, 1668, "PIU_PEU_CXPL_Event_Error_Set_Register"},
{0x6e2130, 1, 1669, "PIU_PEU_Link_Bit_Error_Counter_I_Register"},
{0x6e2138, 1, 1670, "PIU_PEU_Link_Bit_Error_Counter_II_Register"},
{0x6e2200, 1, 1671, "PIU_PEU_SERDES_PLL_Control_Register"},
{0x6e2300, 8, 1672, "PIU_PEU_SERDES_Receiver_Lane_Control_Register"},
{0x6e2380, 8, 1680, "PIU_PEU_SERDES_Receiver_Lane_Status_Register"},
{0x6e2400, 8, 1688, "PIU_PEU_SERDES_Transmitter_Control_Register"},
{0x6e2480, 8, 1696, "PIU_PEU_SERDES_Transmitter_Status_Register"},
{0x6e2500, 2, 1704, "PIU_PEU_SERDES_Test_Configuration_Register"},
void piu_dump(config_dev_t
* config_devp
)
* Initialize PIU CSR with power on reset value
void piu_init_csr(pcie_model_t
*piup
)
piu_csr_t
*csrs
= &piup
->csrs
;
nwords
= pcie_csrs
[PIU_Event_Queue_State_Register
].nwords
;
for (i
= 0; i
< nwords
; i
++)
csrs
->Event_Queue_State_Register
[i
] = EQ_IDLE
;
// bool_t piu_cpu_access(simcpu_t *sp, config_addr_t *cap, tpaddr_t offset, maccess_t memop, uint64_t *regp)
pcieCompleter
piu_cpu_access(pcie_model_t
*piup
, tpaddr_t paddr
, maccess_t memop
, uint64_t *regp
,SAM_DeviceId
* id
)
uint64_t addr
, pa
= paddr
;
* N2 PIU only supports 1,2,4 and 8-byte aligned PIO access the 64GB region
* and 8-byte to the CSRs in the 8MB region (section 16.3.2.1, N2 PRM Rev. 1.0)
switch(memop
& MA_Size_Mask
) {
region
= piu_decode_region(piup
, pa
, &addr
);
cfgio
= GETMASK64(addr
, 28, 28);
DBGDEV( piup
, "ERROR: illegal 8 byte access to PCI "
"Cfg/IO addr = 0x%llx\n on node %d", addr
, node_id
);
return pcieCompleter(CA
); // XXX ???
uint64_t ioaddr
= addr
& PCIE_IO_ADDR_MASK
;
status
= piu_io_access(piup
, memop
, ioaddr
, count
, regp
,id
);
status
= piu_cfg_access(piup
, memop
, addr
, count
, regp
,id
);
status
= piu_mem_access(piup
, memop
, addr
, count
, regp
, _PCIE_MEM32
,id
);
status
= piu_mem_access(piup
, memop
, addr
, count
, regp
, _PCIE_MEM64
,id
);
status
= piu_csr_access(piup
, memop
, addr
, regp
);
*id
= ((samPiu
*)piup
->sam_piu
)->samId
;
printf("ERROR: out of range access to PCIE space: "
return pcieCompleter(UR
);
* Decode PCIE non-cachable regions
* - 64GB region partitioned into three subregions as
* * PCIE-A Cfg/IO (512 MB)
* * PCIE-A MEM32 (16 MB - 2 GB)
* * PCIE-A MEM64 (16 MB - 32 GB)
// piu_region_t piu_decode_region(simcpu_t *sp, pcie_model_t *piup, uint64_t pa, uint64_t *offset)
piu_region_t
piu_decode_region(pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
uint64_t reg_addr
= pa
& PHYS_ADDR_MASK
;
if (reg_addr
== (PHYS_ADDR_DMU
)) {
if ((pa
& DMU_8MB_GAP_MASK
) == 0) {
*offset
= pa
& DMU_8MB_OFFSET_MASK
;
/* should be a fatal() */
printf("ERROR: illegal access to PIU CSRs: "
if ((reg_addr
>=PHYS_ADDR_PIU_LB
) && (reg_addr
<=PHYS_ADDR_PIU_UB
)) {
if (piu_decode_cfgio(piup
, pa
, offset
)) {
} else if (piu_decode_mem32(piup
, pa
, offset
)) {
} else if (piu_decode_mem64(piup
, pa
, offset
)) {
return PIU_REGION_UNMAPPED
;
* Decode PCIE Cfg/IO region
bool piu_decode_cfgio(pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
const n2Ncu::map_info_t
*map
;
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
map
= sp
->getNcu()->getMap(n2Ncu::PIU_REGION_CFGIO
);
if (map
->enable
&& ((pa
& map
->mask
) == map
->base
)) {
*offset
= pa
& PCIE_IOCON_ADDR_MASK
;
* Decode PCIE MEM32 region
bool piu_decode_mem32(pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
const n2Ncu::map_info_t
*map
;
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
map
= sp
->getNcu()->getMap(n2Ncu::PIU_REGION_MEM32
);
if (map
->enable
&& ((pa
& map
->mask
) == map
->base
)) {
*offset
= pa
& (map
->size
- 1);
* Decode PCIE MEM64 region
bool piu_decode_mem64(pcie_model_t
*piup
, uint64_t pa
, uint64_t *offset
)
const n2Ncu::map_info_t
*map
;
piu_csr_t
*csrs
= &piup
->csrs
;
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
map
= sp
->getNcu()->getMap(n2Ncu::PIU_REGION_MEM64
);
if (map
->enable
&& ((pa
& map
->mask
) == map
->base
)) {
pcie_offset
= csrs
->Mem_64_PCIE_Offset_Register
;
*offset
= (pa
& ~map
->mask
& PCIE_MEM64_ADDR_MASK
) | pcie_offset
;
* Access PCIE CSRs (downbound)
// bool_t piu_csr_access(simcpu_t *sp, pcie_model_t *piup, maccess_t memop, uint64_t offset, uint64_t *regp)
pcieCompleter
piu_csr_access(pcie_model_t
*piup
, maccess_t memop
, uint64_t offset
, uint64_t *regp
)
uint64_t old_value
, value
, new_error
, *csrs
= (uint64_t *)&piup
->csrs
;
samPiu
*sp
= (samPiu
*)piup
->sam_piu
;
* PIU only supports 8-byte accesses to registers
size
= memop
& MA_Size_Mask
;
if (size
!= MA_Size64
) return pcieCompleter(CA
); // XXX ???
index
= piu_offset2reg(offset
, ®x
);
if (index
== UND_PCIE_CSRS
) {
DBGDEV(piup
, "Access illegal PCIE register at offset "
"= 0x%llx on node %d\n", offset
, node_id
);
return pcieCompleter(CA
); // XXX ???
* read/write PCIE registers
wordx
= regx
- pcie_csrs
[index
].regx
;
strcpy(regname
, pcie_csrs
[index
].name
);
if (pcie_csrs
[index
].nwords
> 1)
sprintf(®name
[strlen(regname
)], "[%d]", wordx
);
* check on Read only registers
case PIU_Interrupt_State_Status_Register_1
:
case PIU_Interrupt_State_Status_Register_2
:
case PIU_INTX_Status_Register
:
case PIU_Event_Queue_State_Register
:
case PIU_IMU_Interrupt_Status_Register
:
case PIU_DMC_Core_and_Block_Error_Status_Register
:
case PIU_MMU_Interrupt_Status_Register
:
case PIU_ILU_Interrupt_Status_Register
:
case PIU_Packet_Scoreboard_DMA_Register_Set
:
case PIU_Packet_Scoreboard_PIO_Register_Set
:
case PIU_Transaction_Scoreboard_Register_Set
:
case PIU_Transaction_Scoreboard_Status_Register
:
case PIU_PEU_Egress_Credits_Consumed_Register
:
case PIU_PEU_Egress_Credit_Limit_Register
:
case PIU_PEU_Egress_Retry_Buffer_Register
:
case PIU_PEU_Ingress_Credits_Allocated_Register
:
case PIU_PEU_Ingress_Credits_Received_Register
:
case PIU_PEU_Other_Event_Interrupt_Status_Register
:
case PIU_PEU_Device_Capabilities_Register
:
case PIU_PEU_Device_Status_Register
:
case PIU_PEU_Link_Capabilities_Register
:
case PIU_PEU_Link_Status_Register
:
case PIU_PEU_Uncorrectable_Error_Interrupt_Status_Register
:
case PIU_PEU_Correctable_Error_Interrupt_Status_Register
:
case PIU_PEU_CXPL_SERDES_Revision_Register
:
case PIU_PEU_CXPL_AckNak_Latency_Timer_Register
:
case PIU_PEU_CXPL_Replay_Timer_Register
:
case PIU_PEU_CXPL_Core_Status_Register
:
case PIU_PEU_CXPL_Event_Error_Interrupt_Status_Register
:
case PIU_PEU_Link_Bit_Error_Counter_II_Register
:
case PIU_PEU_SERDES_Receiver_Lane_Status_Register
:
case PIU_PEU_SERDES_Transmitter_Status_Register
:
DBGDEV(piup
, "Error: Write Read-Only Register "
"'%s' offset=0x%llx value=0x%llx on node %d\n",
pcie_csrs
[index
].name
, offset
, *regp
, node_id
);
// return false; /* FIXME: should trap on the error */
return pcieCompleter(CA
); // XXX ???
DBGDEV(piup
, "Write PIU register '%s' at offset = "
"0x%llx value = 0x%llx on node %d\n",
pcie_csrs
[index
].name
, offset
, *regp
, node_id
);
case PIU_Interrupt_Mapping_Registers
:
piu_mondo_interrupt(piup
,wordx
+INO_INTA
, \
(irq_state_t
)(piu_get_irq_state(piup
, wordx
+INO_INTA
)));
case PIU_Interrupt_Clear_Registers
:
// piu_set_irq_state(piup, wordx+INO_INTA, (irq_state_t)(*regp & MASK64(1,0)));
piu_mondo_interrupt(piup
,wordx
+INO_INTA
,(irq_state_t
)(*regp
& MASK64(1,0)));
case PIU_INT_A_Clear_Register
:
case PIU_INT_B_Clear_Register
:
case PIU_INT_C_Clear_Register
:
case PIU_INT_D_Clear_Register
:
piu_mondo_interrupt(piup
, index
-PIU_INT_A_Clear_Register
+INO_INTA
, IRQ_IDLE
);
case PIU_Event_Queue_Control_Set_Register
:
if (value
& MASK64(57,57)) {
* upon ENOVERR set, update the OVERR and STATE field
* of the EQ Tail and State register
piup
->csrs
.Event_Queue_Tail_Register
[wordx
] |= MASK64(57,57);
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_ERROR
;
if (value
& MASK64(44,44))
* upon EN bit set, update the STATE bit of
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_ACTIVE
;
case PIU_Event_Queue_Control_Clear_Register
:
if (value
& MASK64(57,57))
piup
->csrs
.Event_Queue_Tail_Register
[wordx
] &= ~MASK64(57,57);
if (value
& MASK64(47,47)) {
if (piup
->csrs
.Event_Queue_State_Register
[wordx
] == EQ_ERROR
)
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_IDLE
;
if (value
& MASK64(44,44))
piup
->csrs
.Event_Queue_State_Register
[wordx
] = EQ_IDLE
;
case PIU_Event_Queue_Tail_Register
:
case PIU_Event_Queue_Head_Register
:
pthread_mutex_lock(&sp
->msiMutex
[wordx
]);
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(6,0));
if(piup
->csrs
.Event_Queue_Tail_Register
[wordx
] == \
(piup
->csrs
.Event_Queue_Head_Register
[wordx
])){
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
sp
->setPending(INO_EQLO
+wordx
,false);
pthread_mutex_unlock(&sp
->msiMutex
[wordx
]);
case PIU_MSI_Mapping_Register
:
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(63,62)|MASK64(5,0));
case PIU_MSI_Clear_Registers
:
if (value
& MASK64(62,62))
piup
->csrs
.MSI_Mapping_Register
[wordx
] &= ~MASK64(62,62);
case PIU_MSI_32_bit_Address_Register
:
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(31,16));
case PIU_MSI_64_bit_Address_Register
:
WRITE_PIU_CSR(csrs
[regx
], value
, MASK64(63,16));
case PIU_IMU_Error_Status_Clear_Register
:
piup
->csrs
.IMU_Error_Status_Set_Register
&= ~value
;
case PIU_IMU_Error_Status_Set_Register
:
* W1S to simulate actual IMU error occurence
new_error
= value
& ~old_value
;
csrs
[regx
] = new_error
| old_value
;
piu_simulate_imu_error(piup
, new_error
);
case PIU_MMU_Error_Status_Clear_Register
:
piup
->csrs
.MMU_Error_Status_Set_Register
&= ~value
;
case PIU_MMU_Error_Status_Set_Register
:
* W1S to simulate actual MMU error occurence
new_error
= value
& ~old_value
;
csrs
[regx
] = new_error
| old_value
;
piu_simulate_mmu_error(piup
, new_error
);
* Check on Load-Only (write-only but reads always return 0) CSRs
case PIU_Event_Queue_Control_Set_Register
:
case PIU_Event_Queue_Control_Clear_Register
:
case PIU_Interrupt_Clear_Registers
:
value
= piu_get_irq_state(piup
, wordx
+INO_INTA
);
case PIU_INT_A_Clear_Register
:
case PIU_INT_B_Clear_Register
:
case PIU_INT_C_Clear_Register
:
case PIU_INT_D_Clear_Register
:
value
= piu_get_intx_state(piup
, index
-PIU_INT_A_Clear_Register
+INO_INTA
);
case PIU_MSI_Clear_Registers
:
value
= piup
->csrs
.MSI_Clear_Registers
[wordx
] & MASK64(62, 62);
case PIU_IMU_Error_Status_Clear_Register
:
value
= piup
->csrs
.IMU_Error_Status_Set_Register
;
case PIU_IMU_Interrupt_Status_Register
:
value
= piup
->csrs
.IMU_Error_Status_Set_Register
&
piup
->csrs
.IMU_Interrupt_Enable_Register
;
case PIU_MMU_Error_Status_Clear_Register
:
value
= piup
->csrs
.MMU_Error_Status_Set_Register
;
case PIU_MMU_Interrupt_Status_Register
:
value
= piup
->csrs
.MMU_Error_Status_Set_Register
&
piup
->csrs
.MMU_Interrupt_Enable_Register
;
case PIU_PEU_Link_Status_Register
:
// return the hard coded value for link width=x8, speed=2.5Gbps
DBGDEV(piup
, "Read PCIE register '%s' at offset = "
"0x%llx value = 0x%llx on node %d\n",
pcie_csrs
[index
].name
, offset
, value
, node_id
);
// if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
printf("ERROR: PIU only supports 8 byte CSR access, "
"node is %d\n", node_id
);
return pcieCompleter(SC
);
* Access PCIE device's IO space (downbound)
// bool_t piu_io_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp)
pcieCompleter
piu_io_access(pcie_model_t
*piup
, maccess_t memop
, uint64_t ioaddr
, uint32_t count
, uint64_t *regp
,SAM_DeviceId
* id
)
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
switch (memop
& MA_Op_Mask
) {
status
= sp
->busIf
->busif_access(PCIE_IO
,false,ioaddr
,(void*)regp
,1,pcie_countToBe(count
),sp
->getReqId(),mem_addr64
,id
);
// last arg is a don't care above.
status
= sp
->busIf
->busif_access(PCIE_IO
,true,ioaddr
,(void*)regp
,1,pcie_countToBe(count
),sp
->getReqId(),mem_addr64
,id
);
* Access PCIE device's Configuation space (downbound)
* The data is inverted between the big and little endian format
* because PCI cfg space is structured as little endian
// bool_t piu_cfg_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp)
pcieCompleter
piu_cfg_access(pcie_model_t
*piup
, maccess_t memop
, uint64_t ioaddr
, uint32_t count
, uint64_t *regp
,SAM_DeviceId
* id
)
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
bus
= ioaddr
>> busShift
& 0xff;
if ( bus
!= sp
->devif_getBusNo() )
switch (memop
& MA_Op_Mask
) {
status
= sp
->busIf
->busif_access(PCIE_CFG
,false,ioaddr
,(void*)regp
,1,pcie_countToBe(count
),sp
->getReqId(),t_type
,id
);
if((status
.status
== CA
) && ((ioaddr
& 0xfff) == 0))
status
= sp
->busIf
->busif_access(PCIE_CFG
,true,ioaddr
,(void*)regp
,1,pcie_countToBe(count
),sp
->getReqId(),t_type
,id
);
* Access PCIE device's MEM32/MEM64 space (downbound)
//bool_t piu_mem_access(pcie_model_t *piup, maccess_t memop, uint64_t paddr, uint32_t count, uint64_t *regp,
pcieCompleter
piu_mem_access(pcie_model_t
*piup
, maccess_t memop
, uint64_t paddr
, uint32_t count
, uint64_t *regp
,
pcie_space_t space_id
,SAM_DeviceId
* id
)
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
switch (memop
& MA_Op_Mask
) {
status
= sp
->busIf
->busif_access(PCIE_MEM
,false,paddr
,(void*)regp
,(count
> 4)?count
/4:1,pcie_countToBe(count
),sp
->getReqId(), \
space_id
== _PCIE_MEM32
? mem_addr32
:mem_addr64
,id
);
status
= sp
->busIf
->busif_access(PCIE_MEM
,true,paddr
,(void*)regp
,(count
> 4)?count
/4:1,pcie_countToBe(count
),sp
->getReqId(), \
space_id
== _PCIE_MEM32
? mem_addr32
:mem_addr64
,id
);
* DMA transactions (upbound)
* piup: handle to pcie_model structure
* va: host virtual address accessed by the DMA transaction
* datap: data read from/written to the host memory
* count: data size counted in byte
* req_id: 16 bit requester Id
* type: access type (load vs. store)
* mode: addressing mode, can be either 32 bit or 64 bit address
bool piu_dma_access(pcie_model_t
*piup
, tvaddr_t va
, uint8_t *datap
, int count
,
uint16_t req_id
, dev_access_t type
, dev_mode_t mode
,SAM_DeviceId id
)
bool is_msi
= false, status
;
msi_addr
= piup
->csrs
.MSI_32_bit_Address_Register
& MASK64(31, 16);
is_msi
= ((va
& MASK64(31,16)) == msi_addr
);
* In sun4v mode, if EQ_base_addr_reg.bit[63]=0, msi_addr_reg.bit[63]
* is not used for comparison (see 16.3.9.8 of N2 PRM, rev 1.2)
sun4v
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 2, 2);
base
= GETMASK64(piup
->csrs
.Event_Queue_Base_Address_Register
, 63,63);
bit
= (base
== 0) ? 62 : 63;
msi_addr
= piup
->csrs
.MSI_64_bit_Address_Register
& MASK64(bit
, 16);
is_msi
= ((va
& MASK64(bit
,16)) == msi_addr
);
if (is_msi
&& type
== DA_Store
) {
status
= piu_msi_write(piup
, va
, datap
, count
, req_id
, mode
,id
);
* perform IOMMU operation
status
= piu_iommu(piup
, va
, req_id
, type
, mode
, &pa
,id
);
* VA -> PA translation is successful, do DMA transaction with pa
config_proc_t
*procp
= piup
->config_procp
;
//status = procp->proc_typep->dev_mem_access(procp, pa, datap, count, type);
mmi_memwrite(pa
,datap
,count
,id
);
mmi_memread(pa
,datap
,count
,id
);
printf("FATAL ERROR: don't know what to do with DVMA\n");
* This function performs IOMMU operation upon each DMA request.
bool piu_iommu(pcie_model_t
*piup
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
,SAM_DeviceId id
)
te
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 0, 0);
be
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 1, 1);
sun4v
= GETMASK64(piup
->csrs
.MMU_Control_and_Status_Register
, 2, 2);
// check if this is a sun4v EQ bypass address
// the iommu is bypassed even if be bit in control status register is 0.
if(sun4v
&& ( (va
>> 39) == 0x1fff800) ){ // bits 63:50 all 1,s, 49:39 all 0's
// bypass is the correct behaviour here. However older fw releases
// enable the bypass, but still expect the address to be translated.
// the newer f/w releases (int12) disables the bypass. Hence
// continue to translate the addess for now, and hope newer f/w
// release never use the bypass mode in sun4v mode. XXX
// *pa = va & MASK64(38, 0);
return piu_iommu_sun4v(piup
, va
, req_id
, type
, mode
, pa
,id
);
}else if( (va
& MASK64(62,40)) != 0 ){
DBGDEV(piup
, "piu_iommu: sun4v mode, va = 0x%llx VA[62:40] != 0\n",va
);
DBGDEV(piup
, "piu_iommu: MMU is disabled, va = 0x%llx\n", va
);
return piu_iommu_sun4v(piup
, va
, req_id
, type
, mode
, pa
,id
);
if(sun4v
== 0 && mode
== PCIE_IS64
){ // be bit is a don't care in 32 bit mode
DBGDEV(piup
, "piu_iommu: sun4u bypass mode, 64bit addressing, be == 0, byp_err\n");
}else if( (va
& MASK64(63,50)) == (va
& MASK64(63,39)) ){
*pa
= va
& MASK64(38, 0);
DBGDEV(piup
, "piu_iommu: sun4u bypass mode, 64bit addressing, byp_oor\n");
// check whether MMU is disabled
DBGDEV(piup
, "piu_iommu: MMU is disabled, va = 0x%llx\n", va
);
//perform IOMMU operation under SUN4U mode
return piu_iommu_sun4u(piup
, va
, req_id
, type
, mode
, pa
,id
);
* Translate VA -> PA, SUN4U mode
bool piu_iommu_sun4u(pcie_model_t
*piup
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
, SAM_DeviceId id
)
config_proc_t
*procp
= piup
->config_procp
;
int ts
, ps
, pg_bits
, tsb_sz
, tte_idx
;
uint64_t tb
, vpn
, tte_addr
, tte
, pg_mask
;
ts
= GETMASK64(piup
->csrs
.MMU_TSB_Control_Register
, 3, 0);
ps
= GETMASK64(piup
->csrs
.MMU_TSB_Control_Register
, 8, 8);
tb
= piup
->csrs
.MMU_TSB_Control_Register
& MASK64(38, 13);
* determine the tte index in terms of
* - page size: 8K (ps=0) and 64K (ps=1)
* - number of TSB entries (=1K*2^ts = 2^(10+ts))
vpn
= (va
& MASK64(31, pg_bits
)) >> pg_bits
;
tte_idx
= (vpn
& (tsb_sz
-1)) << 3; /* each entry of 8 byte */
// status = procp->proc_typep->dev_mem_access(procp, tte_addr, (uint8_t *)&tte, 8, DA_Load);
mmi_memread(tte_addr
, (uint8_t *)&tte
, 8,id
);
tte
= ss_byteswap64(tte
);
// DBGDEV(lprintf(-1, "piu_iommu_sun4u: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
piu_iommu_va2pa(piup
,tte
, ps
, va
, req_id
, type
, mode
, pa
);
DBGDEV(piup
, "piu_iommu_sun4u: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
va
, *pa
, tte_addr
, tte
);
* Translate VA -> PA, SUN4V mode
bool piu_iommu_sun4v(pcie_model_t
*piup
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
, SAM_DeviceId id
)
config_proc_t
*procp
= piup
->config_procp
;
piu_csr_t
*csrs
= &piup
->csrs
;
int i
, busid_sel
, busid
, ps
, ts
, pg_bits
, tsb_sz
, tte_idx
;
uint8_t idx
, iotsb_idx
, iotsb_no
;
uint64_t dev2iotsb
, offset
, base_pa
, vpn
, tte_addr
, tte
;
* Form 7 bit index id into the DEV2IOTSB table, which is implemented
* by a set of 16 x 64-bit registers, with each register containing
* 8 x 5-bit values to index into the IOTSBDESC table.
busid
= req_id
>> 0x8 & 0xff;
busid_sel
= GETMASK64(csrs
->MMU_Control_and_Status_Register
, 3, 3);
idx
|= busid_sel
? GETMASK64(busid
, 5, 0) : GETMASK64(busid
, 6, 1);
* Use the 7 bit index id to extract the 5-bit iotsb_no from the
* DEV2IOTSB table (total of 128 index cells out of 16 regs).
dev2iotsb
= csrs
->MMU_DEV2IOTSB_Registers
[idx
>>3];
iotsb_idx
= GETMASK64(idx
, 2, 0) << 3;
iotsb_no
= GETMASK64(dev2iotsb
, iotsb_idx
+ 4, iotsb_idx
);
* Use iotsb_no as index to retrieve IOTSB info from IOTSBDESC table
* (implemented by a set of 32 x 64-bit registers)
base_pa
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 59, 34);
offset
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 33, 7);
ps
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 6, 4);
ts
= GETMASK64(csrs
->MMU_IOTSBDESC_Registers
[iotsb_no
], 3, 0);
if ((va
& MASK64(62, 40)) != 0) {
uint64_t error_code
, trans_type
;
csrs
->MMU_Translation_Fault_Address_Register
= va
& MASK64(63, 2);
trans_type
= (type
== DA_Load
) ? TLP_MRd_FMT_TYPE_IS32
: TLP_MWr_FMT_TYPE_IS32
;
trans_type
= (type
== DA_Load
) ? TLP_MRd_FMT_TYPE_IS64
: TLP_MWr_FMT_TYPE_IS64
;
csrs
->MMU_Translation_Fault_Status_Register
= req_id
| (trans_type
<< 16);
* raise mmu sun4v_va_oor error
error_code
= 1ULL<<SUN4V_VA_OOR_P
;
csrs
->MMU_Error_Status_Set_Register
|= error_code
;
piu_raise_mmu_error(piup
, error_code
);
* determine adjusted page number using encoded ps value
* and adjusted VA at a given offset
* FIXME: check underflow error on vpn (error = sun4v_va_adj_uf)
vpn
= ((va
& MASK64(39, pg_bits
)) >> pg_bits
) - offset
;
* calculate tte index in terms of TSB size
* FIXME: check out of range error on vpn (error = TRN_OOR)
tte_idx
= (vpn
& (tsb_sz
-1)) << 3;
tte_addr
= (base_pa
<< 13) + tte_idx
;
mmi_memread(tte_addr
,(uint8_t *)&tte
, 8,id
);
tte
= ss_byteswap64(tte
);
status
= true; //procp->proc_typep->dev_mem_access(procp, tte_addr, (uint8_t *)&tte, 8, DA_Load);
DBGDEV(piup
, "piu_iommu_sun4v: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
piu_iommu_va2pa(piup
, tte
, ps
, va
, req_id
, type
, mode
, pa
);
DBGDEV(piup
, "piu_iommu_sun4v: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
va
, *pa
, tte_addr
, tte
);
bool piu_iommu_va2pa(pcie_model_t
*piup
, uint64_t tte
, int ps
, tvaddr_t va
, uint16_t req_id
, dev_access_t type
,
dev_mode_t mode
, tpaddr_t
*pa
)
bool tte_key_valid
, tte_data_w
, tte_data_v
;
uint64_t pg_mask
, tte_data_pa
;
tte_data_v
= GETMASK64(tte
, 0, 0);
tte_key_valid
= GETMASK64(tte
, 2, 2);
tte_dev_key
= GETMASK64(tte
, 63, 48);
* assert on invalid tte entry
* compare tte's DEV_KEY field with req_id
* According to N2 PIU PRM, the function number portion of
* the tte_dev_key and the source req_id should be masked
* with the FNM field of the tte.
uint16_t tte_fnm
= MASK64(15,3) | GETMASK64(tte
, 5, 3);
if ((tte_dev_key
& tte_fnm
) != (req_id
& tte_fnm
)) {
DBGDEV(piup
, "piu_iommu_va2pa: req_id=0x%lx not matching tte dev_key=0x%lx\n",
* check on DATA_W for the write request
tte_data_w
= GETMASK64(tte
, 1, 1);
if ((tte_data_w
== 0) && type
== DA_Store
) {
DBGDEV(piup
, "piu_iommu_sun4u: write to non-writable page: va = 0x%lx tte = 0x%lx\n",
* finally translate VA to PA
pg_mask
= (1 << pg_bits
) - 1;
tte_data_pa
= tte
& MASK64(38, pg_bits
);
*pa
= tte_data_pa
| (va
& pg_mask
);
bool piu_assert_intx(pcie_model_t
*piup
, uint8_t pin_no
, uint8_t dev_no
)
* FIXME: check if PIU supports more than 4 devices
* generate mondo interrupt
DBGDEV(piup
, "piu_mondo_interrupt assert: ino = %d\n", ino
);
piu_mondo_interrupt(piup
, ino
, IRQ_RECEIVED
);
* Deassert INTx interrupt
bool piu_deassert_intx(pcie_model_t
*piup
, uint8_t pin_no
, uint8_t dev_no
)
* FIXME: check if PIU supports more than 4 devices
DBGDEV(piup
, "piu_mondo_interrupt deassert: ino = %d\n", ino
);
// piu_mondo_interrupt(piup, ino, IRQ_IDLE);
* Generate IRQ mondo interrupt and update the interrupt state accordingly
void piu_mondo_interrupt(pcie_model_t
*piup
, uint8_t ino
, irq_state_t n
)
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
// DBGDEV( piup, "piu_mondo_interrupt: ino = %d\n", ino );
* get the current IRQ mondo state
pthread_mutex_lock(&sp
->piuMutex
);
old
= (irq_state_t
)piu_get_irq_state(piup
, ino
);
V
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 31, 31);
// cannot send the interrupt as the valid bit is not set in the mapping
// set the bit in sp->pendingIntr. The interrupt would be
// sent as soon as the valid bit is set.
// printf("IDLE->RECV\n");
piu_set_irq_state(piup
, ino
, n
);
pcie_mondo_t irq_mondo
; // = (pcie_mondo_t *)Xcalloc(1, pcie_mondo_t);
irq_mondo
.thread_id
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 30, 25);
mdo_mode
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 63, 63);
irq_mondo
.data
[0] = (irq_mondo
.thread_id
<< 6) | ino
;
uint64_t data0
= piup
->csrs
.Interrupt_Mondo_Data_0_Register
;
irq_mondo
.data
[0] = (data0
& MASK64(63, 12)) | (irq_mondo
.thread_id
<< 6) | ino
;
irq_mondo
.data
[1] = piup
->csrs
.Interrupt_Mondo_Data_1_Register
;
* send IRQ mondo to target CPU
// the valid bit is set in the interrupt mapping register.
// printf("IDLE->PEND\n");
piu_set_irq_state(piup
, ino
, n
);
sp
->sendMondo(&irq_mondo
);
printf("warning piu_mondo_interrupt:transition IDLE->PEND for ino %d\n",ino
);
piu_set_irq_state(piup
, ino
, n
);
// printf("IDLE->IDLE\n");
}else if(old
== IRQ_RECEIVED
) {
printf("warning piu_mondo_interrupt:transition RECVD->IDLE for ino %d\n",ino
);
piu_set_irq_state(piup
, ino
, n
);
// should not be called from anywhere
printf("warning piu_mondo_interrupt:transition RECVD->PEND for ino %d .. Ignored\n",ino
);
// check to see if the valid bit has been set
// printf("RECV->RECV\n");
// check to see if the interrupt is still pending
if(sp
->getPending(ino
) == false){
// printf("RECV->IDLE\n");
piu_set_irq_state(piup
, ino
, n
);
// the interrupt is pending,
// the mapping reg valid bit has been set. send the interrupt now.
pcie_mondo_t irq_mondo
; // = (pcie_mondo_t *)Xcalloc(1, pcie_mondo_t);
irq_mondo
.thread_id
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 30, 25);
mdo_mode
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 63, 63);
irq_mondo
.data
[0] = (irq_mondo
.thread_id
<< 6) | ino
;
uint64_t data0
= piup
->csrs
.Interrupt_Mondo_Data_0_Register
;
irq_mondo
.data
[0] = (data0
& MASK64(63, 12)) | (irq_mondo
.thread_id
<< 6) | ino
;
irq_mondo
.data
[1] = piup
->csrs
.Interrupt_Mondo_Data_1_Register
;
// the valid bit is set in the interrupt mapping register.
sp
->sendMondo(&irq_mondo
);
// printf("RECV->PENDING\n");
piu_set_irq_state(piup
, ino
, n
);
}else if(old
== IRQ_PENDING
){
// check the status of the interrupt. if it is still pending
// there, raise the interrupt again.
pcie_mondo_t irq_mondo
; // = (pcie_mondo_t *)Xcalloc(1, pcie_mondo_t);
irq_mondo
.thread_id
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 30, 25);
mdo_mode
= GETMASK64(piup
->csrs
.Interrupt_Mapping_Registers
[regx
], 63, 63);
irq_mondo
.data
[0] = (irq_mondo
.thread_id
<< 6) | ino
;
uint64_t data0
= piup
->csrs
.Interrupt_Mondo_Data_0_Register
;
irq_mondo
.data
[0] = (data0
& MASK64(63, 12)) | (irq_mondo
.thread_id
<< 6) | ino
;
irq_mondo
.data
[1] = piup
->csrs
.Interrupt_Mondo_Data_1_Register
;
sp
->sendMondo(&irq_mondo
);
// printf("PEND->PEND\n");
piu_set_irq_state(piup
, ino
, n
);
// printf("PEND->IDLE\n");
piu_set_irq_state(piup
, ino
, n
);
// printf("warning piu_mondo_interrupt:transition PEND->RCVD for ino %d .. Ignored\n",ino);
// piu_set_irq_state(piup, ino, n);
pthread_mutex_unlock(&sp
->piuMutex
);
void piu_set_intx_state(pcie_model_t
*piup
, uint8_t ino
, irq_state_t n
)
bit
= 3 - (ino
- INO_INTA
);
piup
->csrs
.INTX_Status_Register
&= ~val
;
piup
->csrs
.INTX_Status_Register
|= val
;
int piu_get_intx_state(pcie_model_t
*piup
, uint8_t ino
)
bit
= 3 - (ino
- INO_INTA
);
val
= (piup
->csrs
.INTX_Status_Register
>> bit
) & 1;
* Update Interrupt State Status Register with the new mondo state 'n'
void piu_set_irq_state(pcie_model_t
*piup
, uint8_t ino
, irq_state_t n
)
* determine which status register to use in terms of ino
regp
= ino
<32 ? &piup
->csrs
.Interrupt_State_Status_Register_1
: &piup
->csrs
.Interrupt_State_Status_Register_2
;
* each mondo state is encoded via a 2 bit value:
* update the approriate bits of the register
*regp
&= ~(IRQ_STATE_MASK
<< bit
);
*regp
|= ((uint64_t)n
<< bit
);
* update the INTx status register if ino = 20-23
piu_set_intx_state(piup
, ino
, n
);
* Get mondo interrupt state
int piu_get_irq_state(pcie_model_t
*piup
, uint8_t ino
)
val
= ino
<32 ? piup
->csrs
.Interrupt_State_Status_Register_1
: piup
->csrs
.Interrupt_State_Status_Register_2
;
state
= (irq_state_t
)((val
>> bit
) & IRQ_STATE_MASK
);
* This function handles memory write request for both MSI and MSI-X.
* piup: handle to pcie_model structure
* msi_addr: host address for MSI/MSI-X write, can be either 32 bit or 64 bit
* msi_datap: pointer to the MSI/MSI-X data to write
* count: always be 4 bytes for MSI or MSI-X request
* req_id: 16 bit requester Id
* mode: addressing mode, can be either 32 bit or 64 bit address
bool piu_msi_write(pcie_model_t
*piup
, uint64_t msi_addr
, uint8_t *msi_datap
,
int count
, uint16_t req_id
, dev_mode_t mode
, SAM_DeviceId id
)
int map_idx
, v
, eqwr_n
, eqnum
;
* validate count, should always be 4
memcpy((void *)&msi_data
, (void *)msi_datap
, count
);
DBGDEV( piup
, "piu_msi_write: invalid msi_data size = %d \n", count
);
* PIU implements a total of 256 MSI_Mapping_Registers as the mapping
* table to allow SW to map each MSI/MSI-X request to an event queue.
* The lower 8 bits of the MSI/MSI-X data field is used to index into
DBGDEV( piup
, "piu_msi_write: MSI addr = 0x%llx size = %d data = 0x%02x_%02x_%02x_%02x\n",
msi_addr
, msi_data
[0], msi_data
[1], msi_data
[2], msi_data
[3]);
map_idx
= msi_data
[3] & MASK64(7,0);
// map_idx = msi_data[0] & MASK64(7,0);
mapping
= piup
->csrs
.MSI_Mapping_Register
[map_idx
];
v
= GETMASK64(mapping
, 63, 63);
eqwr_n
= GETMASK64(mapping
, 62, 62);
eqnum
= GETMASK64(mapping
, 5, 0);
* assemble the event queue record
// eq_record_t *record = (eq_record_t *)Xcalloc(1, eq_record_t);
// reserved bit should always be initialized to 0
record
.fmt_type
= mode
? TLP_MSI_FMT_TYPE_IS64
: TLP_MSI_FMT_TYPE_IS32
;
record
.addr_15_2
= GETMASK64(msi_addr
, 15, 2);
* data0, for lower 16 bits
record
.data0
= (msi_data
[2] << 8) | msi_data
[3];
record
.addr_hi
= GETMASK64(msi_addr
, 63, 16);
* data1, for higher 16 bits
record
.data1
= (msi_data
[0] << 8) | msi_data
[1];
piu_eq_write(piup
, eqnum
, &record
, req_id
, mode
,id
);
* Write event queue records to the queue and update the tail pointer of the queue.
bool piu_eq_write(pcie_model_t
*piup
, int eqnum
, eq_record_t
*record
, uint16_t req_id
, dev_mode_t mode
, SAM_DeviceId id
)
overr
= GETMASK64(piup
->csrs
.Event_Queue_Tail_Register
[eqnum
], 57, 57);
state
= GETMASK64(piup
->csrs
.Event_Queue_State_Register
[eqnum
], 2, 0);
DBGDEV( piup
, "piu_eq_write: eqnum = %d state = %d\n", eqnum
, state
);
if ((state
== EQ_ACTIVE
) && !overr
) {
int head
= GETMASK64(piup
->csrs
.Event_Queue_Head_Register
[eqnum
], 6, 0);
int tail
= GETMASK64(piup
->csrs
.Event_Queue_Tail_Register
[eqnum
], 6, 0);
int next
= (tail
+ 1) % EQ_NUM_ENTRIES
;
bool full
= (next
== head
);
* set the overflow bit and generate a DMU internal interrupt (ino 62)
piup
->csrs
.Event_Queue_Tail_Register
[eqnum
] |= MASK64(57,57);
piup
->csrs
.Event_Queue_State_Register
[eqnum
] = EQ_ERROR
;
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
sp
->setPending(INO_DMU
,true);
piu_mondo_interrupt(piup
, INO_DMU
, IRQ_RECEIVED
);
* determine the address (VA) to write the event queue record
uint64_t base
, offset
, rec_va
, pa
;
base
= piup
->csrs
.Event_Queue_Base_Address_Register
& MASK64(63,19);
offset
= (eqnum
* EQ_NUM_ENTRIES
+ tail
) * EQ_RECORD_SIZE
;
DBGDEV( piup
, "piu_eq_write: EQ record va = 0x%llx\n", rec_va
);
* translate VA of the EQ record to PA
status
= piu_iommu(piup
, rec_va
, req_id
, DA_Store
, mode
, &pa
,id
);
* write the record to the event queue
config_proc_t
*procp
= piup
->config_procp
;
//status = procp->proc_typep->dev_mem_access(procp, pa, (uint8_t *)record,
// sizeof(*record), DA_Store);
mmi_memwrite(pa
,(uint8_t *)record
, sizeof(*record
),id
);
// all the following actions (check queue, update tail, send interrupt)
// should be done atomically
samPiu
*sp
= (samPiu
*)piup
->sam_piu
;
pthread_mutex_lock(&sp
->msiMutex
[eqnum
]);
/* check if the queue is empty before adding the new entry
head
= GETMASK64(piup
->csrs
.Event_Queue_Head_Register
[eqnum
], 6, 0);
bool empty
= (head
== tail
);
* update the tail pointer
piup
->csrs
.Event_Queue_Tail_Register
[eqnum
] = next
& MASK64(6,0);
* if the queue is empty, generate a mondo interrupt depending on state
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
sp
->setPending(INO_EQLO
+eqnum
,true);
piu_mondo_interrupt(piup
, INO_EQLO
+eqnum
, IRQ_RECEIVED
);
pthread_mutex_unlock(&sp
->msiMutex
[eqnum
]);
void piu_init_error_list()
imu_error_entry_t imu_error_init_list
[] = {
{ PIU_ERR ( MSI_NOT_EN_P
, 0) },
{ PIU_ERR ( COR_MES_NOT_EN_P
, 1) },
{ PIU_ERR ( NONFATAL_MES_NOT_EN_P
, 2) },
{ PIU_ERR ( FATAL_MES_NOT_EN_P
, 3) },
{ PIU_ERR ( PMPME_MES_NOT_EN_P
, 4) },
{ PIU_ERR ( PMEACK_MES_NOT_EN_P
, 5) },
{ PIU_ERR ( MSI_PAR_ERR_P
, 6) },
{ PIU_ERR ( MSI_MAL_ERR_P
, 7) },
{ PIU_ERR ( EQ_NOT_EN_P
, 8) },
{ PIU_ERR ( EQ_OVER_P
, 9) },
{ PIU_ERR ( MSI_NOT_EN_S
, 32) },
{ PIU_ERR ( COR_MES_NOT_EN_S
, 33) },
{ PIU_ERR ( NONFATAL_MES_NOT_EN_S
, 34) },
{ PIU_ERR ( FATAL_MES_NOT_EN_S
, 35) },
{ PIU_ERR ( PMPME_MES_NOT_EN_SEQ_OVER_S
, 36) },
{ PIU_ERR ( PMEACK_MES_NOT_EN_S
, 37) },
{ PIU_ERR ( MSI_PAR_ERR_S
, 38) },
{ PIU_ERR ( MSI_MAL_ERR_S
, 39) },
{ PIU_ERR ( EQ_NOT_EN_S
, 40) },
{ PIU_ERR ( EQ_OVER_S
, 41) },
mmu_error_entry_t mmu_error_init_list
[] = {
{ PIU_ERR ( BYP_ERR_P
, 0) },
{ PIU_ERR ( BYP_OOR_P
, 1) },
{ PIU_ERR ( SUN4V_INV_PG_SZ_P
, 2) },
{ PIU_ERR ( SPARE1_P
, 3) },
{ PIU_ERR ( TRN_ERR_P
, 4) },
{ PIU_ERR ( TRN_OOR_P
, 5) },
{ PIU_ERR ( TTE_INV_P
, 6) },
{ PIU_ERR ( TTE_PRT_P
, 7) },
{ PIU_ERR ( TTC_DPE_P
, 8) },
{ PIU_ERR ( TTC_CAE_P
, 9) },
{ PIU_ERR ( SPARE2_P
, 10) },
{ PIU_ERR ( SPARE3_P
, 11) },
{ PIU_ERR ( TBW_DME_P
, 12) },
{ PIU_ERR ( TBW_UDE_P
, 13) },
{ PIU_ERR ( TBW_ERR_P
, 14) },
{ PIU_ERR ( TBW_DPE_P
, 15) },
{ PIU_ERR ( IOTSBDESC_INV_P
, 16) },
{ PIU_ERR ( IOTSBDESC_DPE_P
, 17) },
{ PIU_ERR ( SUN4V_VA_OOR_P
, 18) },
{ PIU_ERR ( SUN4V_VA_ADJ_UF_P
, 19) },
{ PIU_ERR ( SUN4V_KEY_ERR_P
, 20) },
{ PIU_ERR ( BYP_ERR_S
, 32) },
{ PIU_ERR ( BYP_OOR_S
, 33) },
{ PIU_ERR ( SUN4V_INV_PG_SZ_S
, 34) },
{ PIU_ERR ( SPARE1_S
, 35) },
{ PIU_ERR ( TRN_ERR_S
, 36) },
{ PIU_ERR ( TRN_OOR_S
, 37) },
{ PIU_ERR ( TTE_INV_S
, 38) },
{ PIU_ERR ( TTE_PRT_S
, 39) },
{ PIU_ERR ( TTC_DPE_S
, 40) },
{ PIU_ERR ( TTC_CAE_S
, 41) },
{ PIU_ERR ( SPARE2_S
, 42) },
{ PIU_ERR ( SPARE3_S
, 43) },
{ PIU_ERR ( TBW_DME_S
, 44) },
{ PIU_ERR ( TBW_UDE_S
, 45) },
{ PIU_ERR ( TBW_ERR_S
, 46) },
{ PIU_ERR ( TBW_DPE_S
, 47) },
{ PIU_ERR ( IOTSBDESC_INV_S
, 48) },
{ PIU_ERR ( IOTSBDESC_DPE_S
, 49) },
{ PIU_ERR ( SUN4V_VA_OOR_S
, 50) },
{ PIU_ERR ( SUN4V_VA_ADJ_UF_S
, 51) },
{ PIU_ERR ( SUN4V_KEY_ERR_S
, 52) },
for (i
= 0; imu_error_init_list
[i
].error_type
!= -1; i
++)
imu_error_list
[imu_error_init_list
[i
].error_type
] = imu_error_init_list
[i
];
for (i
= 0; mmu_error_init_list
[i
].error_type
!= -1; i
++)
mmu_error_list
[mmu_error_init_list
[i
].error_type
] = mmu_error_init_list
[i
];
void piu_simulate_imu_error(pcie_model_t
*piup
, uint64_t imu_error
)
uint64_t error_code
, intr_enable
, imu_ie
;
* loop over the error bits and raise the error only if
* the interrupt is enabled
imu_ie
= piup
->csrs
.IMU_Interrupt_Enable_Register
;
for (i
=0; i
<IMU_ERROR_MAXNUM
; i
++) {
error_code
= imu_error_list
[i
].error_code
;
intr_enable
= imu_error_list
[i
].intr_enable
;
if ((imu_error
& error_code
) && (imu_ie
& intr_enable
))
piu_raise_imu_error(piup
, error_code
);
void piu_raise_imu_error(pcie_model_t
*piup
, uint64_t error_code
)
piu_csr_t
*csrs
= &piup
->csrs
;
* update the error status register
csrs
->IMU_Error_Status_Set_Register
|= error_code
;
* generate INO_DMU mondo interrupt
dmc_cbie
= csrs
->DMC_Core_and_Block_Interrupt_Enable_Register
;
dmu
= GETMASK64(dmc_cbie
, 63, 63);
imu
= GETMASK64(dmc_cbie
, 0, 0);
csrs
->DMC_Core_and_Block_Error_Status_Register
|= MASK64(0,0);
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
sp
->setPending(INO_DMU
,true);
piu_mondo_interrupt(piup
, INO_DMU
, IRQ_RECEIVED
);
void piu_simulate_mmu_error(pcie_model_t
*piup
, uint64_t mmu_error
)
uint64_t error_code
, intr_enable
, mmu_ie
;
* loop over the error bits and raise the error only if
* the interrupt is enabled
mmu_ie
= piup
->csrs
.MMU_Interrupt_Enable_Register
;
for (i
=0; i
<MMU_ERROR_MAXNUM
; i
++) {
error_code
= mmu_error_list
[i
].error_code
;
intr_enable
= mmu_error_list
[i
].intr_enable
;
if ((mmu_error
& error_code
) && (mmu_ie
& intr_enable
))
piu_raise_mmu_error(piup
, error_code
);
void piu_raise_mmu_error(pcie_model_t
*piup
, uint64_t error_code
)
piu_csr_t
*csrs
= &piup
->csrs
;
* update the error status register
csrs
->MMU_Error_Status_Set_Register
|= error_code
;
* generate INO_DMU mondo interrupt
dmc_cbie
= csrs
->DMC_Core_and_Block_Interrupt_Enable_Register
;
dmu
= GETMASK64(dmc_cbie
, 63, 63);
mmu
= GETMASK64(dmc_cbie
, 0, 0);
csrs
->DMC_Core_and_Block_Error_Status_Register
|= MASK64(1,1);
samPiu
* sp
= (samPiu
*)piup
->sam_piu
;
sp
->setPending(INO_DMU
,true);
piu_mondo_interrupt(piup
, INO_DMU
, IRQ_RECEIVED
);
* Look up the PIU register type from its offset value
pcie_csr_t
piu_offset2reg(uint64_t offset
, int *regx
)
* first to check the few interrupt registers
* (PIU supports less number of these regs than the Fire for Niagara 1)
if ((offset
> 0x6011D8) && (offset
< 0x6011F0))
if ((offset
> 0x6015D8) && (offset
< 0x6015F0))
for (i
= 0; i
< NUM_PCIE_CSRS
; i
++) {
int nwords
, diff
= offset
- pcie_csrs
[i
].offset
;
*regx
= pcie_csrs
[i
].regx
;
if ((nwords
= pcie_csrs
[i
].nwords
) != 1) {
*regx
= pcie_csrs
[i
].regx
+ wordx
;