Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / sam-t2 / sam / devices / n2_piu / piu.cc
CommitLineData
920dae64
AT
1// ========== Copyright Header Begin ==========================================
2//
3// OpenSPARC T2 Processor File: piu.cc
4// Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
5// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
6//
7// The above named program is free software; you can redistribute it and/or
8// modify it under the terms of the GNU General Public
9// License version 2 as published by the Free Software Foundation.
10//
11// The above named program is distributed in the hope that it will be
12// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14// General Public License for more details.
15//
16// You should have received a copy of the GNU General Public
17// License along with this work; if not, write to the Free Software
18// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19//
20// ========== Copyright Header End ============================================
21/*
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <assert.h>
27#include "sam_piu.h"
28#include "utils.h"
29
30#ifdef VFALLS /* { */
31#define PIU_NAME "piu_vf"
32#define PIU_TYPE dev_type_piu_vf
33
34#else
35
36#define PIU_NAME "piu"
37#define PIU_TYPE dev_type_piu
38
39#endif /* } VFALLS */
40
41#define ASSERT(X) assert(X)
42//#define DBGDEV(X) X
43#define DBGDEV( X, ... ) ((samPiu*)(X->sam_piu))->debug_more(__VA_ARGS__)
44
45pcie_csr_desc_t pcie_csrs[NUM_PCIE_CSRS] = {
46 {0x6010a0, 44, 0, "PIU_Interrupt_Mapping_Registers"},
47 {0x6014a0, 44, 44, "PIU_Interrupt_Clear_Registers"},
48 {0x601a00, 1, 88, "PIU_Interrupt_Retry_Timer_Register"},
49 {0x601a10, 1, 89, "PIU_Interrupt_State_Status_Register_1"},
50 {0x601a18, 1, 90, "PIU_Interrupt_State_Status_Register_2"},
51 {0x60b000, 1, 91, "PIU_INTX_Status_Register"},
52 {0x60b008, 1, 92, "PIU_INT_A_Clear_Register"},
53 {0x60b010, 1, 93, "PIU_INT_B_Clear_Register"},
54 {0x60b018, 1, 94, "PIU_INT_C_Clear_Register"},
55 {0x60b020, 1, 95, "PIU_INT_D_Clear_Register"},
56 {0x610000, 1, 96, "PIU_Event_Queue_Base_Address_Register"},
57 {0x611000, 36, 97, "PIU_Event_Queue_Control_Set_Register"},
58 {0x611200, 36, 133, "PIU_Event_Queue_Control_Clear_Register"},
59 {0x611400, 36, 169, "PIU_Event_Queue_State_Register"},
60 {0x611600, 36, 205, "PIU_Event_Queue_Tail_Register"},
61 {0x611800, 36, 241, "PIU_Event_Queue_Head_Register"},
62 {0x620000, 256, 277, "PIU_MSI_Mapping_Register"},
63 {0x628000, 256, 533, "PIU_MSI_Clear_Registers"},
64 {0x62c000, 1, 789, "PIU_Interrupt_Mondo_Data_0_Register"},
65 {0x62c008, 1, 790, "PIU_Interrupt_Mondo_Data_1_Register"},
66 {0x630000, 1, 791, "PIU_ERR_COR_Mapping_Register"},
67 {0x630008, 1, 792, "PIU_ERR_NONFATAL_Mapping_Register"},
68 {0x630010, 1, 793, "PIU_ERR_FATAL_Mapping_Register"},
69 {0x630018, 1, 794, "PIU_PM_PME_Mapping_Register"},
70 {0x630020, 1, 795, "PIU_PME_To_ACK_Mapping_Register"},
71 {0x631000, 1, 796, "PIU_IMU_Error_Log_Enable_Register"},
72 {0x631008, 1, 797, "PIU_IMU_Interrupt_Enable_Register"},
73 {0x631010, 1, 798, "PIU_IMU_Interrupt_Status_Register"},
74 {0x631018, 1, 799, "PIU_IMU_Error_Status_Clear_Register"},
75 {0x631020, 1, 800, "PIU_IMU_Error_Status_Set_Register"},
76 {0x631028, 1, 801, "PIU_IMU_RDS_Error_Log_Register"},
77 {0x631030, 1, 802, "PIU_IMU_SCS_Error_Log_Register"},
78 {0x631038, 1, 803, "PIU_IMU_EQS_Error_Log_Register"},
79 {0x631800, 1, 804, "PIU_DMC_Core_and_Block_Interrupt_Enable_Register"},
80 {0x631808, 1, 805, "PIU_DMC_Core_and_Block_Error_Status_Register"},
81 {0x632000, 1, 806, "PIU_IMU_Performance_Counter_Select_Register"},
82 {0x632008, 1, 807, "PIU_IMU_Performance_Counter_Zero_Register"},
83 {0x632010, 1, 808, "PIU_IMU_Performance_Counter_One_Register"},
84 {0x634000, 1, 809, "PIU_MSI_32_bit_Address_Register"},
85 {0x634008, 1, 810, "PIU_MSI_64_bit_Address_Register"},
86 {0x634018, 1, 811, "PIU_Mem_64_PCIE_Offset_Register"},
87 {0x640000, 1, 812, "PIU_MMU_Control_and_Status_Register"},
88 {0x640008, 1, 813, "PIU_MMU_TSB_Control_Register"},
89 {0x640108, 1, 814, "PIU_MMU_TTE_Cache_Invalidate_Register"},
90 {0x641000, 1, 815, "PIU_MMU_Error_Log_Enable_Register"},
91 {0x641008, 1, 816, "PIU_MMU_Interrupt_Enable_Register"},
92 {0x641010, 1, 817, "PIU_MMU_Interrupt_Status_Register"},
93 {0x641018, 1, 818, "PIU_MMU_Error_Status_Clear_Register"},
94 {0x641020, 1, 819, "PIU_MMU_Error_Status_Set_Register"},
95 {0x641028, 1, 820, "PIU_MMU_Translation_Fault_Address_Register"},
96 {0x641030, 1, 821, "PIU_MMU_Translation_Fault_Status_Register"},
97 {0x642000, 1, 822, "PIU_MMU_Performance_Counter_Select_Register"},
98 {0x642008, 1, 823, "PIU_MMU_Performance_Counter_Zero_Register"},
99 {0x642010, 1, 824, "PIU_MMU_Performance_Counter_One_Register"},
100 {0x646000, 64, 825, "PIU_MMU_TTE_Cache_Virtual_Tag_Registers"},
101 {0x647000, 64, 889, "PIU_MMU_TTE_Cache_Physical_Tag_Registers"},
102 {0x648000, 512, 953, "PIU_MMU_TTE_Cache_Data_Registers"},
103 {0x649000, 16, 1465, "PIU_MMU_DEV2IOTSB_Registers"},
104 {0x649100, 32, 1481, "PIU_MMU_IOTSBDESC_Registers"},
105 {0x651000, 1, 1513, "PIU_ILU_Error_Log_Enable_Register"},
106 {0x651008, 1, 1514, "PIU_ILU_Interrupt_Enable_Register"},
107 {0x651010, 1, 1515, "PIU_ILU_Interrupt_Status_Register"},
108 {0x651018, 1, 1516, "PIU_ILU_Error_Status_Clear_Register"},
109 {0x651020, 1, 1517, "PIU_ILU_Error_Status_Set_Register"},
110 {0x651800, 1, 1518, "PIU_PEU_Core_and_Block_Interrupt_Enable_Register"},
111 {0x651808, 1, 1519, "PIU_PEU_Core_and_Block_Interrupt_Status_Register"},
112 {0x652000, 1, 1520, "PIU_ILU_Diagnostic_Register"},
113 {0x653000, 1, 1521, "PIU_DMU_Debug_Select_Register_for_DMU_Debug_Bus_A"},
114 {0x653008, 1, 1522, "PIU_DMU_Debug_Select_Register_for_DMU_Debug_Bus_B"},
115 {0x653100, 1, 1523, "PIU_DMU_PCI_Express_Configuration_Register"},
116 {0x660000, 32, 1524, "PIU_Packet_Scoreboard_DMA_Register_Set"},
117 {0x664000, 16, 1556, "PIU_Packet_Scoreboard_PIO_Register_Set"},
118 {0x670000, 32, 1572, "PIU_Transaction_Scoreboard_Register_Set"},
119 {0x670100, 1, 1604, "PIU_Transaction_Scoreboard_Status_Register"},
120 {0x680000, 1, 1605, "PIU_PEU_Control_Register"},
121 {0x680008, 1, 1606, "PIU_PEU_Status_Register"},
122 {0x680010, 1, 1607, "PIU_PEU_PME_Turn_Off_Generate_Register"},
123 {0x680018, 1, 1608, "PIU_PEU_Ingress_Credits_Initial_Register"},
124 {0x680100, 1, 1609, "PIU_PEU_Diagnostic_Register"},
125 {0x680200, 1, 1610, "PIU_PEU_Egress_Credits_Consumed_Register"},
126 {0x680208, 1, 1611, "PIU_PEU_Egress_Credit_Limit_Register"},
127 {0x680210, 1, 1612, "PIU_PEU_Egress_Retry_Buffer_Register"},
128 {0x680218, 1, 1613, "PIU_PEU_Ingress_Credits_Allocated_Register"},
129 {0x680220, 1, 1614, "PIU_PEU_Ingress_Credits_Received_Register"},
130 {0x681000, 1, 1615, "PIU_PEU_Other_Event_Log_Enable_Register"},
131 {0x681008, 1, 1616, "PIU_PEU_Other_Event_Interrupt_Enable_Register"},
132 {0x681010, 1, 1617, "PIU_PEU_Other_Event_Interrupt_Status_Register"},
133 {0x681018, 1, 1618, "PIU_PEU_Other_Event_Status_Clear_Register"},
134 {0x681020, 1, 1619, "PIU_PEU_Other_Event_Status_Set_Register"},
135 {0x681028, 1, 1620, "PIU_PEU_Receive_Other_Event_Header1_Log_Register"},
136 {0x681030, 1, 1621, "PIU_PEU_Receive_Other_Event_Header2_Log_Register"},
137 {0x681038, 1, 1622, "PIU_PEU_Transmit_Other_Event_Header1_Log_Register"},
138 {0x681040, 1, 1623, "PIU_PEU_Transmit_Other_Event_Header2_Log_Register"},
139 {0x682000, 1, 1624, "PIU_PEU_Performance_Counter_Select_Register"},
140 {0x682008, 1, 1625, "PIU_PEU_Performance_Counter_Zero_Register"},
141 {0x682010, 1, 1626, "PIU_PEU_Performance_Counter_One_Register"},
142 {0x682018, 1, 1627, "PIU_PEU_Performance_Counter_Two_Register"},
143 {0x683000, 1, 1628, "PIU_PEU_Debug_Select_A_Register"},
144 {0x683008, 1, 1629, "PIU_PEU_Debug_Select_B_Register"},
145 {0x690000, 1, 1630, "PIU_PEU_Device_Capabilities_Register"},
146 {0x690008, 1, 1631, "PIU_PEU_Device_Control_Register"},
147 {0x690010, 1, 1632, "PIU_PEU_Device_Status_Register"},
148 {0x690018, 1, 1633, "PIU_PEU_Link_Capabilities_Register"},
149 {0x690020, 1, 1634, "PIU_PEU_Link_Control_Register"},
150 {0x690028, 1, 1635, "PIU_PEU_Link_Status_Register"},
151 {0x690030, 1, 1636, "PIU_PEU_Slot_Capabilities_Register"},
152 {0x691000, 1, 1637, "PIU_PEU_Uncorrectable_Error_Log_Enable_Register"},
153 {0x691008, 1, 1638, "PIU_PEU_Uncorrectable_Error_Interrupt_Enable_Register"},
154 {0x691010, 1, 1639, "PIU_PEU_Uncorrectable_Error_Interrupt_Status_Register"},
155 {0x691018, 1, 1640, "PIU_PEU_Uncorrectable_Error_Status_Clear_Register"},
156 {0x691020, 1, 1641, "PIU_PEU_Uncorrectable_Error_Status_Set_Register"},
157 {0x691028, 1, 1642, "PIU_PEU_Receive_Uncorrectable_Error_Header1_Log_Register"},
158 {0x691030, 1, 1643, "PIU_PEU_Receive_Uncorrectable_Error_Header2_Log_Register"},
159 {0x691038, 1, 1644, "PIU_PEU_Transmit_Uncorrectable_Error_Header1_Log_Register"},
160 {0x691040, 1, 1645, "PIU_PEU_Transmit_Uncorrectable_Error_Header2_Log_Register"},
161 {0x6a1000, 1, 1646, "PIU_PEU_Correctable_Error_Log_Enable_Register"},
162 {0x6a1008, 1, 1647, "PIU_PEU_Correctable_Error_Interrupt_Enable_Register"},
163 {0x6a1010, 1, 1648, "PIU_PEU_Correctable_Error_Interrupt_Status_Register"},
164 {0x6a1018, 1, 1649, "PIU_PEU_Correctable_Error_Status_Clear_Register"},
165 {0x6a1020, 1, 1650, "PIU_PEU_Correctable_Error_Status_Set_Register"},
166 {0x6e2000, 1, 1651, "PIU_PEU_CXPL_SERDES_Revision_Register"},
167 {0x6e2008, 1, 1652, "PIU_PEU_CXPL_AckNak_Latency_Threshold_Register"},
168 {0x6e2010, 1, 1653, "PIU_PEU_CXPL_AckNak_Latency_Timer_Register"},
169 {0x6e2018, 1, 1654, "PIU_PEU_CXPL_Replay_Timer_Threshold_Register"},
170 {0x6e2020, 1, 1655, "PIU_PEU_CXPL_Replay_Timer_Register"},
171 {0x6e2040, 1, 1656, "PIU_PEU_CXPL_Vendor_DLLP_Message_Register"},
172 {0x6e2050, 1, 1657, "PIU_PEU_CXPL_LTSSM_Control_Register"},
173 {0x6e2058, 1, 1658, "PIU_PEU_CXPL_DLL_Control_Register"},
174 {0x6e2060, 1, 1659, "PIU_PEU_CXPL_MACL_PCS_Control_Register"},
175 {0x6e2068, 1, 1660, "PIU_PEU_CXPL_MACL_Lane_Skew_Control_Register"},
176 {0x6e2070, 1, 1661, "PIU_PEU_CXPL_MACL_Symbol_Number_Register"},
177 {0x6e2078, 1, 1662, "PIU_PEU_CXPL_MACL_Symbol_Timer_Register"},
178 {0x6e2100, 1, 1663, "PIU_PEU_CXPL_Core_Status_Register"},
179 {0x6e2108, 1, 1664, "PIU_PEU_CXPL_Event_Error_Log_Enable_Register"},
180 {0x6e2110, 1, 1665, "PIU_PEU_CXPL_Event_Error_Interrupt_Enable_Register"},
181 {0x6e2118, 1, 1666, "PIU_PEU_CXPL_Event_Error_Interrupt_Status_Register"},
182 {0x6e2120, 1, 1667, "PIU_PEU_CXPL_Event_Error_Status_Clear_Register"},
183 {0x6e2128, 1, 1668, "PIU_PEU_CXPL_Event_Error_Set_Register"},
184 {0x6e2130, 1, 1669, "PIU_PEU_Link_Bit_Error_Counter_I_Register"},
185 {0x6e2138, 1, 1670, "PIU_PEU_Link_Bit_Error_Counter_II_Register"},
186 {0x6e2200, 1, 1671, "PIU_PEU_SERDES_PLL_Control_Register"},
187 {0x6e2300, 8, 1672, "PIU_PEU_SERDES_Receiver_Lane_Control_Register"},
188 {0x6e2380, 8, 1680, "PIU_PEU_SERDES_Receiver_Lane_Status_Register"},
189 {0x6e2400, 8, 1688, "PIU_PEU_SERDES_Transmitter_Control_Register"},
190 {0x6e2480, 8, 1696, "PIU_PEU_SERDES_Transmitter_Status_Register"},
191 {0x6e2500, 2, 1704, "PIU_PEU_SERDES_Test_Configuration_Register"},
192};
193
194/*
195 * Piu configuration dump
196 */
197void piu_dump(config_dev_t * config_devp)
198{
199}
200
201
202/*
203 * Initialize PIU CSR with power on reset value
204 */
205void piu_init_csr(pcie_model_t *piup)
206{
207 piu_csr_t *csrs = &piup->csrs;
208 int i, nwords;
209
210 nwords = pcie_csrs[PIU_Event_Queue_State_Register].nwords;
211 for (i = 0; i < nwords; i++)
212 csrs->Event_Queue_State_Register[i] = EQ_IDLE;
213
214}
215
216
217/*
218 * Access PIU (downbound)
219 */
220// bool_t piu_cpu_access(simcpu_t *sp, config_addr_t *cap, tpaddr_t offset, maccess_t memop, uint64_t *regp)
221pcieCompleter piu_cpu_access(pcie_model_t *piup, tpaddr_t paddr, maccess_t memop, uint64_t *regp,SAM_DeviceId * id)
222{
223 bool cfgio;
224 pcieCompleter status;
225 piu_region_t region;
226 uint32_t count;
227 uint64_t addr, pa = paddr;
228 int node_id = 0;
229
230 /*
231 * N2 PIU only supports 1,2,4 and 8-byte aligned PIO access the 64GB region
232 * and 8-byte to the CSRs in the 8MB region (section 16.3.2.1, N2 PRM Rev. 1.0)
233 */
234 switch(memop & MA_Size_Mask) {
235 case MA_Size8 :
236 count = 1;
237 break;
238 case MA_Size16 :
239 count = 2;
240 break;
241 case MA_Size32 :
242 count = 4;
243 break;
244 case MA_Size64 :
245 count = 8;
246 break;
247 default:
248 ASSERT(0);
249 }
250
251 region = piu_decode_region(piup, pa, &addr);
252
253 switch (region) {
254 case PIU_REGION_CFGIO:
255 cfgio = GETMASK64(addr, 28, 28);
256
257 if (count == 8) {
258 DBGDEV( piup, "ERROR: illegal 8 byte access to PCI "
259 "Cfg/IO addr = 0x%llx\n on node %d", addr, node_id );
260 return pcieCompleter(CA); // XXX ???
261 }
262
263 if (cfgio) {
264 uint64_t ioaddr = addr & PCIE_IO_ADDR_MASK;
265
266 status = piu_io_access(piup, memop, ioaddr, count, regp,id);
267 } else {
268 status = piu_cfg_access(piup, memop, addr, count, regp,id);
269 }
270 break;
271 case PIU_REGION_MEM32:
272 status = piu_mem_access(piup, memop, addr, count, regp, _PCIE_MEM32,id);
273 break;
274 case PIU_REGION_MEM64:
275 status = piu_mem_access(piup, memop, addr, count, regp, _PCIE_MEM64,id);
276 break;
277 case PIU_REGION_8MB:
278 status = piu_csr_access(piup, memop, addr, regp);
279 *id = ((samPiu*)piup->sam_piu)->samId;
280 break;
281 default:
282 printf("ERROR: out of range access to PCIE space: "
283 "pa=0x%llx\n", pa);
284 // ASSERT(0);
285 return pcieCompleter(UR);
286 }
287
288 return status;
289}
290
291
292/*
293 * Decode PCIE non-cachable regions
294 *
295 * - 8MB region for CSRs
296 *
297 * - 64GB region partitioned into three subregions as
298 *
299 * * PCIE-A Cfg/IO (512 MB)
300 * * PCIE-A MEM32 (16 MB - 2 GB)
301 * * PCIE-A MEM64 (16 MB - 32 GB)
302 */
303// piu_region_t piu_decode_region(simcpu_t *sp, pcie_model_t *piup, uint64_t pa, uint64_t *offset)
304piu_region_t piu_decode_region(pcie_model_t *piup, uint64_t pa, uint64_t *offset)
305{
306 uint64_t reg_addr = pa & PHYS_ADDR_MASK;
307
308 if (reg_addr == (PHYS_ADDR_DMU)) {
309 if ((pa & DMU_8MB_GAP_MASK) == 0) {
310 *offset = pa & DMU_8MB_OFFSET_MASK;
311 return PIU_REGION_8MB;
312 } else {
313 /* should be a fatal() */
314 printf("ERROR: illegal access to PIU CSRs: "
315 "pa=0x%llx\n",
316 pa);
317 ASSERT(0);
318 }
319 }
320
321 if ((reg_addr>=PHYS_ADDR_PIU_LB) && (reg_addr<=PHYS_ADDR_PIU_UB)) {
322
323 if (piu_decode_cfgio(piup, pa, offset)) {
324 return PIU_REGION_CFGIO;
325 } else if (piu_decode_mem32(piup, pa, offset)) {
326 return PIU_REGION_MEM32;
327 } else if (piu_decode_mem64(piup, pa, offset)) {
328 return PIU_REGION_MEM64;
329 } else
330 return PIU_REGION_UNMAPPED;
331 }
332}
333
334
335/*
336 * Decode PCIE Cfg/IO region
337 */
338bool piu_decode_cfgio(pcie_model_t *piup, uint64_t pa, uint64_t *offset)
339{
340 const n2Ncu::map_info_t *map;
341
342 samPiu * sp = (samPiu*)piup->sam_piu;
343 map = sp->getNcu()->getMap(n2Ncu::PIU_REGION_CFGIO);
344
345 if (map->enable && ((pa & map->mask) == map->base)) {
346 *offset = pa & PCIE_IOCON_ADDR_MASK;
347 return true;
348 }
349
350 return false;
351}
352
353
354/*
355 * Decode PCIE MEM32 region
356 */
357bool piu_decode_mem32(pcie_model_t *piup, uint64_t pa, uint64_t *offset)
358{
359 const n2Ncu::map_info_t *map;
360
361 samPiu * sp = (samPiu*)piup->sam_piu;
362 map = sp->getNcu()->getMap(n2Ncu::PIU_REGION_MEM32);
363
364 if (map->enable && ((pa & map->mask) == map->base)) {
365 *offset = pa & (map->size - 1);
366 return true;
367 }
368
369 return false;
370}
371
372
373/*
374 * Decode PCIE MEM64 region
375 */
376bool piu_decode_mem64(pcie_model_t *piup, uint64_t pa, uint64_t *offset)
377{
378 const n2Ncu::map_info_t *map;
379 uint64_t pcie_offset;
380 piu_csr_t *csrs = &piup->csrs;
381
382 samPiu * sp = (samPiu*)piup->sam_piu;
383 map = sp->getNcu()->getMap(n2Ncu::PIU_REGION_MEM64);
384
385 if (map->enable && ((pa & map->mask) == map->base)) {
386 uint64_t pcie_offset;
387
388 pcie_offset = csrs->Mem_64_PCIE_Offset_Register;
389
390 *offset = (pa & ~map->mask & PCIE_MEM64_ADDR_MASK) | pcie_offset;
391 return true;
392 }
393
394 return false;
395}
396
397
398/*
399 * Access PCIE CSRs (downbound)
400 */
401// bool_t piu_csr_access(simcpu_t *sp, pcie_model_t *piup, maccess_t memop, uint64_t offset, uint64_t *regp)
402pcieCompleter piu_csr_access(pcie_model_t *piup, maccess_t memop, uint64_t offset, uint64_t *regp)
403{
404 uint64_t old_value, value, new_error, *csrs = (uint64_t *)&piup->csrs;
405 pcie_csr_t index;
406 int size, regx, wordx;
407 char regname[BUFSIZ];
408 int node_id=0;
409 samPiu *sp = (samPiu*)piup->sam_piu;
410
411 /*
412 * PIU only supports 8-byte accesses to registers
413 */
414 size = memop & MA_Size_Mask;
415 if (size != MA_Size64) return pcieCompleter(CA); // XXX ???
416
417 /*
418 * check illegal offset
419 */
420 index = piu_offset2reg(offset, &regx);
421
422 if (index == UND_PCIE_CSRS) {
423 DBGDEV(piup, "Access illegal PCIE register at offset "
424 "= 0x%llx on node %d\n", offset, node_id);
425 return pcieCompleter(CA); // XXX ???
426 }
427
428 /*
429 * read/write PCIE registers
430 */
431 wordx = regx - pcie_csrs[index].regx;
432
433 strcpy(regname, pcie_csrs[index].name);
434 if (pcie_csrs[index].nwords > 1)
435 sprintf(&regname[strlen(regname)], "[%d]", wordx);
436
437 switch (memop) {
438 case MA_st64:
439 value = *regp;
440 old_value = csrs[regx];
441 /*
442 * check on Read only registers
443 */
444 switch (index) {
445 case PIU_Interrupt_State_Status_Register_1:
446 case PIU_Interrupt_State_Status_Register_2:
447 case PIU_INTX_Status_Register:
448 case PIU_Event_Queue_State_Register:
449 case PIU_IMU_Interrupt_Status_Register:
450 case PIU_DMC_Core_and_Block_Error_Status_Register:
451 case PIU_MMU_Interrupt_Status_Register:
452 case PIU_ILU_Interrupt_Status_Register:
453 case PIU_Packet_Scoreboard_DMA_Register_Set:
454 case PIU_Packet_Scoreboard_PIO_Register_Set:
455 case PIU_Transaction_Scoreboard_Register_Set:
456 case PIU_Transaction_Scoreboard_Status_Register:
457 case PIU_PEU_Egress_Credits_Consumed_Register:
458 case PIU_PEU_Egress_Credit_Limit_Register:
459 case PIU_PEU_Egress_Retry_Buffer_Register:
460 case PIU_PEU_Ingress_Credits_Allocated_Register:
461 case PIU_PEU_Ingress_Credits_Received_Register:
462 case PIU_PEU_Other_Event_Interrupt_Status_Register:
463 case PIU_PEU_Device_Capabilities_Register:
464 case PIU_PEU_Device_Status_Register:
465 case PIU_PEU_Link_Capabilities_Register:
466 case PIU_PEU_Link_Status_Register:
467 case PIU_PEU_Uncorrectable_Error_Interrupt_Status_Register:
468 case PIU_PEU_Correctable_Error_Interrupt_Status_Register:
469 case PIU_PEU_CXPL_SERDES_Revision_Register:
470 case PIU_PEU_CXPL_AckNak_Latency_Timer_Register:
471 case PIU_PEU_CXPL_Replay_Timer_Register:
472 case PIU_PEU_CXPL_Core_Status_Register:
473 case PIU_PEU_CXPL_Event_Error_Interrupt_Status_Register:
474 case PIU_PEU_Link_Bit_Error_Counter_II_Register:
475 case PIU_PEU_SERDES_Receiver_Lane_Status_Register:
476 case PIU_PEU_SERDES_Transmitter_Status_Register:
477 DBGDEV(piup, "Error: Write Read-Only Register "
478 "'%s' offset=0x%llx value=0x%llx on node %d\n",
479 pcie_csrs[index].name, offset, *regp, node_id);
480
481 // return false; /* FIXME: should trap on the error */
482 return pcieCompleter(CA); // XXX ???
483 }
484
485 csrs[regx] = *regp;
486
487 DBGDEV(piup, "Write PIU register '%s' at offset = "
488 "0x%llx value = 0x%llx on node %d\n",
489 pcie_csrs[index].name, offset, *regp, node_id);
490 /*
491 * act upon write to reg
492 */
493 switch (index) {
494 case PIU_Interrupt_Mapping_Registers:
495 piu_mondo_interrupt(piup,wordx+INO_INTA, \
496 (irq_state_t)(piu_get_irq_state(piup, wordx+INO_INTA)));
497 break;
498 case PIU_Interrupt_Clear_Registers:
499 // piu_set_irq_state(piup, wordx+INO_INTA, (irq_state_t)(*regp & MASK64(1,0)));
500 piu_mondo_interrupt(piup,wordx+INO_INTA,(irq_state_t)(*regp & MASK64(1,0)));
501 break;
502 case PIU_INT_A_Clear_Register:
503 case PIU_INT_B_Clear_Register:
504 case PIU_INT_C_Clear_Register:
505 case PIU_INT_D_Clear_Register:
506 if (*regp & 1)
507 piu_mondo_interrupt(piup, index-PIU_INT_A_Clear_Register+INO_INTA, IRQ_IDLE);
508 break;
509 case PIU_Event_Queue_Control_Set_Register:
510 if (value & MASK64(57,57)) {
511 /*
512 * upon ENOVERR set, update the OVERR and STATE field
513 * of the EQ Tail and State register
514 */
515 piup->csrs.Event_Queue_Tail_Register[wordx] |= MASK64(57,57);
516 piup->csrs.Event_Queue_State_Register[wordx] = EQ_ERROR;
517 }
518 if (value & MASK64(44,44))
519 /*
520 * upon EN bit set, update the STATE bit of
521 * the EQ State register
522 */
523 piup->csrs.Event_Queue_State_Register[wordx] = EQ_ACTIVE;
524 break;
525 case PIU_Event_Queue_Control_Clear_Register:
526 if (value & MASK64(57,57))
527 /* COVERR */
528 piup->csrs.Event_Queue_Tail_Register[wordx] &= ~MASK64(57,57);
529 if (value & MASK64(47,47)) {
530 /* E2I */
531 if (piup->csrs.Event_Queue_State_Register[wordx] == EQ_ERROR)
532 piup->csrs.Event_Queue_State_Register[wordx] = EQ_IDLE;
533 }
534 if (value & MASK64(44,44))
535 /* DIS */
536 piup->csrs.Event_Queue_State_Register[wordx] = EQ_IDLE;
537 break;
538 case PIU_Event_Queue_Tail_Register:
539 case PIU_Event_Queue_Head_Register:
540 pthread_mutex_lock(&sp->msiMutex[wordx]);
541 csrs[regx] = old_value;
542 WRITE_PIU_CSR(csrs[regx], value, MASK64(6,0));
543 if(piup->csrs.Event_Queue_Tail_Register[wordx] == \
544 (piup->csrs.Event_Queue_Head_Register[wordx])){
545 samPiu * sp = (samPiu*)piup->sam_piu;
546 sp->setPending(INO_EQLO+wordx,false);
547 }
548 pthread_mutex_unlock(&sp->msiMutex[wordx]);
549 break;
550 case PIU_MSI_Mapping_Register:
551 csrs[regx] = old_value;
552 WRITE_PIU_CSR(csrs[regx], value, MASK64(63,62)|MASK64(5,0));
553 break;
554 case PIU_MSI_Clear_Registers:
555 if (value & MASK64(62,62))
556 /* EQWR_N */
557 piup->csrs.MSI_Mapping_Register[wordx] &= ~MASK64(62,62);
558 csrs[regx] = 0;
559 break;
560 case PIU_MSI_32_bit_Address_Register:
561 csrs[regx] = old_value;
562 WRITE_PIU_CSR(csrs[regx], value, MASK64(31,16));
563 break;
564 case PIU_MSI_64_bit_Address_Register:
565 csrs[regx] = old_value;
566 WRITE_PIU_CSR(csrs[regx], value, MASK64(63,16));
567 break;
568 case PIU_IMU_Error_Status_Clear_Register:
569 /*
570 * W1C: clear IMU error
571 */
572 piup->csrs.IMU_Error_Status_Set_Register &= ~value;
573 break;
574 case PIU_IMU_Error_Status_Set_Register:
575 /*
576 * W1S to simulate actual IMU error occurence
577 */
578 new_error = value & ~old_value;
579 if (new_error) {
580 csrs[regx] = new_error | old_value;
581 piu_simulate_imu_error(piup, new_error);
582 }
583 break;
584 case PIU_MMU_Error_Status_Clear_Register:
585 /*
586 * W1C: clear MMU error
587 */
588 piup->csrs.MMU_Error_Status_Set_Register &= ~value;
589 break;
590 case PIU_MMU_Error_Status_Set_Register:
591 /*
592 * W1S to simulate actual MMU error occurence
593 */
594 new_error = value & ~old_value;
595 if (new_error) {
596 csrs[regx] = new_error | old_value;
597 piu_simulate_mmu_error(piup, new_error);
598 }
599 break;
600 default:
601 break;
602 }
603
604
605 break;
606 case MA_ldu64:
607 value = csrs[regx];
608
609 /*
610 * Check on Load-Only (write-only but reads always return 0) CSRs
611 */
612 switch (index) {
613 case PIU_Event_Queue_Control_Set_Register:
614 case PIU_Event_Queue_Control_Clear_Register:
615 value = 0;
616 break;
617 }
618
619 switch (index) {
620 case PIU_Interrupt_Clear_Registers:
621 value = piu_get_irq_state(piup, wordx+INO_INTA);
622 break;
623 case PIU_INT_A_Clear_Register:
624 case PIU_INT_B_Clear_Register:
625 case PIU_INT_C_Clear_Register:
626 case PIU_INT_D_Clear_Register:
627 value = piu_get_intx_state(piup, index-PIU_INT_A_Clear_Register+INO_INTA);
628 break;
629 case PIU_MSI_Clear_Registers:
630 /*
631 * return EQWR_N bit
632 */
633 value = piup->csrs.MSI_Clear_Registers[wordx] & MASK64(62, 62);
634 break;
635 case PIU_IMU_Error_Status_Clear_Register:
636 value = piup->csrs.IMU_Error_Status_Set_Register;
637 break;
638 case PIU_IMU_Interrupt_Status_Register:
639 value = piup->csrs.IMU_Error_Status_Set_Register &
640 piup->csrs.IMU_Interrupt_Enable_Register;
641 break;
642 case PIU_MMU_Error_Status_Clear_Register:
643 value = piup->csrs.MMU_Error_Status_Set_Register;
644 break;
645 case PIU_MMU_Interrupt_Status_Register:
646 value = piup->csrs.MMU_Error_Status_Set_Register &
647 piup->csrs.MMU_Interrupt_Enable_Register;
648 break;
649
650 case PIU_PEU_Link_Status_Register:
651 // return the hard coded value for link width=x8, speed=2.5Gbps
652 value = 0x1081;
653 break;
654 }
655
656 DBGDEV(piup, "Read PCIE register '%s' at offset = "
657 "0x%llx value = 0x%llx on node %d\n",
658 pcie_csrs[index].name, offset, value, node_id );
659
660 // if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
661 *regp = value;
662
663 break;
664 default:
665 printf("ERROR: PIU only supports 8 byte CSR access, "
666 "node is %d\n", node_id);
667 ASSERT(0);
668 }
669
670 return pcieCompleter(SC);
671}
672
673
674/*
675 * Access PCIE device's IO space (downbound)
676 */
677// bool_t piu_io_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp)
678pcieCompleter piu_io_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp,SAM_DeviceId * id)
679{
680 pcieCompleter status;
681
682 samPiu * sp = (samPiu*)piup->sam_piu;
683
684 switch (memop & MA_Op_Mask) {
685 case MA_Ld:
686 case MA_LdSigned:
687 status = sp->busIf->busif_access(PCIE_IO,false,ioaddr,(void*)regp,1,pcie_countToBe(count),sp->getReqId(),mem_addr64,id);
688 // last arg is a don't care above.
689 break;
690 case MA_St:
691 status = sp->busIf->busif_access(PCIE_IO,true,ioaddr,(void*)regp,1,pcie_countToBe(count),sp->getReqId(),mem_addr64,id);
692 break;
693 default:
694 ASSERT(0);
695 }
696
697 return status;
698}
699
700
701/*
702 * Access PCIE device's Configuation space (downbound)
703 *
704 * The data is inverted between the big and little endian format
705 * because PCI cfg space is structured as little endian
706 */
707// bool_t piu_cfg_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp)
708pcieCompleter piu_cfg_access(pcie_model_t *piup, maccess_t memop, uint64_t ioaddr, uint32_t count, uint64_t *regp,SAM_DeviceId * id)
709{
710 uint8_t bus, reg_no;
711 pcieCompleter status;
712 addrMd_xactnType t_type;
713
714 samPiu * sp = (samPiu*)piup->sam_piu;
715
716 bus = ioaddr >> busShift & 0xff;
717 if ( bus != sp->devif_getBusNo() )
718 t_type = conf_type1;
719 else
720 t_type = conf_type0;
721
722 switch (memop & MA_Op_Mask) {
723 case MA_Ld:
724 case MA_LdSigned:
725 status = sp->busIf->busif_access(PCIE_CFG,false,ioaddr,(void*)regp,1,pcie_countToBe(count),sp->getReqId(),t_type,id);
726 if((status.status == CA) && ((ioaddr & 0xfff) == 0))
727 *regp = 0xffff;
728 break;
729 case MA_St:
730 status = sp->busIf->busif_access(PCIE_CFG,true,ioaddr,(void*)regp,1,pcie_countToBe(count),sp->getReqId(),t_type,id);
731 break;
732 default:
733 ASSERT(0);
734 }
735
736
737 return status;
738}
739
740
741/*
742 * Access PCIE device's MEM32/MEM64 space (downbound)
743 */
744//bool_t piu_mem_access(pcie_model_t *piup, maccess_t memop, uint64_t paddr, uint32_t count, uint64_t *regp,
745pcieCompleter piu_mem_access(pcie_model_t *piup, maccess_t memop, uint64_t paddr, uint32_t count, uint64_t *regp,
746 pcie_space_t space_id,SAM_DeviceId * id)
747{
748 pcieCompleter status;
749 samPiu * sp = (samPiu*)piup->sam_piu;
750
751 assert( count <= 8 );
752
753 switch (memop & MA_Op_Mask) {
754 case MA_Ld:
755 case MA_LdSigned:
756 status = sp->busIf->busif_access(PCIE_MEM,false,paddr,(void*)regp,(count > 4)?count/4:1,pcie_countToBe(count),sp->getReqId(), \
757 space_id == _PCIE_MEM32? mem_addr32:mem_addr64,id);
758 break;
759 case MA_St:
760 status = sp->busIf->busif_access(PCIE_MEM,true,paddr,(void*)regp,(count > 4)?count/4:1,pcie_countToBe(count),sp->getReqId(), \
761 space_id == _PCIE_MEM32? mem_addr32:mem_addr64,id);
762 break;
763 default:
764 ASSERT(0);
765 }
766
767 return status;
768}
769
770
771/*
772 * DMA transactions (upbound)
773 *
774 * Arguments:
775 * piup: handle to pcie_model structure
776 * va: host virtual address accessed by the DMA transaction
777 * datap: data read from/written to the host memory
778 * count: data size counted in byte
779 * req_id: 16 bit requester Id
780 * type: access type (load vs. store)
781 * mode: addressing mode, can be either 32 bit or 64 bit address
782 */
783bool piu_dma_access(pcie_model_t *piup, tvaddr_t va, uint8_t *datap, int count,
784 uint16_t req_id, dev_access_t type, dev_mode_t mode,SAM_DeviceId id)
785{
786 uint64_t msi_addr;
787 bool is_msi = false, status;
788 tpaddr_t pa;
789
790 /*
791 * decode MSI access
792 */
793 if (mode == PCIE_IS32) {
794 msi_addr = piup->csrs.MSI_32_bit_Address_Register & MASK64(31, 16);
795 is_msi = ((va & MASK64(31,16)) == msi_addr);
796 }
797 if (mode == PCIE_IS64) {
798 int sun4v, base, bit=63;
799
800 /*
801 * In sun4v mode, if EQ_base_addr_reg.bit[63]=0, msi_addr_reg.bit[63]
802 * is not used for comparison (see 16.3.9.8 of N2 PRM, rev 1.2)
803 */
804 sun4v = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 2, 2);
805 base = GETMASK64(piup->csrs.Event_Queue_Base_Address_Register, 63,63);
806
807 if (sun4v)
808 bit = (base == 0) ? 62 : 63;
809
810 msi_addr = piup->csrs.MSI_64_bit_Address_Register & MASK64(bit, 16);
811 is_msi = ((va & MASK64(bit,16)) == msi_addr);
812 }
813
814 if (is_msi && type == DA_Store) {
815 status = piu_msi_write(piup, va, datap, count, req_id, mode,id);
816 return (status);
817 }
818
819 /*
820 * perform IOMMU operation
821 */
822 status = piu_iommu(piup, va, req_id, type, mode, &pa,id);
823
824
825 /*
826 * VA -> PA translation is successful, do DMA transaction with pa
827 */
828 if (status) {
829 config_proc_t *procp = piup->config_procp;
830
831 //status = procp->proc_typep->dev_mem_access(procp, pa, datap, count, type);
832 if(type == DA_Store)
833 mmi_memwrite(pa,datap,count,id);
834 else if(type == DA_Load)
835 mmi_memread(pa,datap,count,id);
836 else{
837 printf("FATAL ERROR: don't know what to do with DVMA\n");
838 exit(0);
839 }
840 }
841
842 return (true);
843}
844
845
846/*
847 * This function performs IOMMU operation upon each DMA request.
848 */
849bool piu_iommu(pcie_model_t *piup, tvaddr_t va, uint16_t req_id, dev_access_t type,
850 dev_mode_t mode, tpaddr_t *pa,SAM_DeviceId id)
851{
852 int te, be, sun4v;
853
854 te = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 0, 0);
855 be = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 1, 1);
856 sun4v = GETMASK64(piup->csrs.MMU_Control_and_Status_Register, 2, 2);
857
858
859 // check if this is a sun4v EQ bypass address
860 // the iommu is bypassed even if be bit in control status register is 0.
861 if(sun4v && ( (va >> 39) == 0x1fff800) ){ // bits 63:50 all 1,s, 49:39 all 0's
862 *pa = va & MASK64(38,0);
863 return true;
864 }
865
866 if( sun4v ){
867 if(be){
868 // bypass is the correct behaviour here. However older fw releases
869 // enable the bypass, but still expect the address to be translated.
870 // the newer f/w releases (int12) disables the bypass. Hence
871 // continue to translate the addess for now, and hope newer f/w
872 // release never use the bypass mode in sun4v mode. XXX
873 // *pa = va & MASK64(38, 0);
874 return piu_iommu_sun4v(piup, va, req_id, type, mode, pa,id);
875 }else if( (va & MASK64(62,40)) != 0 ){
876 // raise sun4v_va_oor
877 DBGDEV(piup, "piu_iommu: sun4v mode, va = 0x%llx VA[62:40] != 0\n",va);
878 ASSERT(0);
879 }else if(!te){
880 // raise MMU TRN_ERR
881 DBGDEV(piup, "piu_iommu: MMU is disabled, va = 0x%llx\n", va );
882 ASSERT(0);
883 }else
884 return piu_iommu_sun4v(piup, va, req_id, type, mode, pa,id);
885 }
886
887 if(sun4v == 0 && mode == PCIE_IS64){ // be bit is a don't care in 32 bit mode
888 // sun4u mode
889 if(be == 0){
890 // raise mmu_byp_err
891 DBGDEV(piup, "piu_iommu: sun4u bypass mode, 64bit addressing, be == 0, byp_err\n");
892 ASSERT(0);
893 }else if( (va & MASK64(63,50)) == (va & MASK64(63,39)) ){
894 *pa = va & MASK64(38, 0);
895 return true;
896 }else{
897 // raise mmu byp_oor
898 DBGDEV(piup, "piu_iommu: sun4u bypass mode, 64bit addressing, byp_oor\n");
899 ASSERT(0);
900 }
901 }
902
903 // sun4u mode, PCIE_IS32
904 // check whether MMU is disabled
905 if (!te) {
906 // raise MMU TRN_ERR
907 DBGDEV(piup, "piu_iommu: MMU is disabled, va = 0x%llx\n", va );
908 ASSERT(0);
909 }else
910 //perform IOMMU operation under SUN4U mode
911 return piu_iommu_sun4u(piup, va, req_id, type, mode, pa,id);
912}
913
914
915/*
916 * Translate VA -> PA, SUN4U mode
917 */
918bool piu_iommu_sun4u(pcie_model_t *piup, tvaddr_t va, uint16_t req_id, dev_access_t type,
919 dev_mode_t mode, tpaddr_t *pa, SAM_DeviceId id)
920{
921 config_proc_t *procp = piup->config_procp;
922 int ts, ps, pg_bits, tsb_sz, tte_idx;
923 uint64_t tb, vpn, tte_addr, tte, pg_mask;
924 bool status;
925
926 ts = GETMASK64(piup->csrs.MMU_TSB_Control_Register, 3, 0);
927 ps = GETMASK64(piup->csrs.MMU_TSB_Control_Register, 8, 8);
928 tb = piup->csrs.MMU_TSB_Control_Register & MASK64(38, 13);
929
930 /*
931 * determine the tte index in terms of
932 *
933 * - page size: 8K (ps=0) and 64K (ps=1)
934 * - number of TSB entries (=1K*2^ts = 2^(10+ts))
935 */
936 pg_bits = 13 + 3*ps;
937 vpn = (va & MASK64(31, pg_bits)) >> pg_bits;
938 tsb_sz = 1 << (ts+10);
939 tte_idx = (vpn & (tsb_sz-1)) << 3; /* each entry of 8 byte */
940 tte_addr = tb + tte_idx;
941
942 /*
943 * retrieve the tte entry
944 */
945 // status = procp->proc_typep->dev_mem_access(procp, tte_addr, (uint8_t *)&tte, 8, DA_Load);
946
947 mmi_memread(tte_addr, (uint8_t *)&tte, 8,id);
948#if defined(ARCH_X64)
949 tte = ss_byteswap64(tte);
950#endif
951
952 //if (!status) {
953 // DBGDEV(lprintf(-1, "piu_iommu_sun4u: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
954 // tte_addr, va); );
955 // ASSERT(0);
956 //}
957
958 /*
959 * finally do VA -> PA
960 */
961 piu_iommu_va2pa(piup,tte, ps, va, req_id, type, mode, pa);
962
963 DBGDEV(piup, "piu_iommu_sun4u: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
964 va, *pa, tte_addr, tte );
965
966 return true;
967}
968
969
970/*
971 * Translate VA -> PA, SUN4V mode
972 */
973bool piu_iommu_sun4v(pcie_model_t *piup, tvaddr_t va, uint16_t req_id, dev_access_t type,
974 dev_mode_t mode, tpaddr_t *pa, SAM_DeviceId id)
975{
976 config_proc_t *procp = piup->config_procp;
977 piu_csr_t *csrs = &piup->csrs;
978 int i, busid_sel, busid, ps, ts, pg_bits, tsb_sz, tte_idx;
979 uint8_t idx, iotsb_idx, iotsb_no;
980 uint64_t dev2iotsb, offset, base_pa, vpn, tte_addr, tte;
981 bool status;
982
983 /*
984 * Form 7 bit index id into the DEV2IOTSB table, which is implemented
985 * by a set of 16 x 64-bit registers, with each register containing
986 * 8 x 5-bit values to index into the IOTSBDESC table.
987 */
988 busid = req_id >> 0x8 & 0xff;
989 busid_sel = GETMASK64(csrs->MMU_Control_and_Status_Register, 3, 3);
990
991 idx = (va >> 63) << 6;
992 idx |= busid_sel ? GETMASK64(busid, 5, 0) : GETMASK64(busid, 6, 1);
993
994 /*
995 * Use the 7 bit index id to extract the 5-bit iotsb_no from the
996 * DEV2IOTSB table (total of 128 index cells out of 16 regs).
997 */
998 dev2iotsb = csrs->MMU_DEV2IOTSB_Registers[idx>>3];
999 iotsb_idx = GETMASK64(idx, 2, 0) << 3;
1000 iotsb_no = GETMASK64(dev2iotsb, iotsb_idx + 4, iotsb_idx);
1001
1002 /*
1003 * Use iotsb_no as index to retrieve IOTSB info from IOTSBDESC table
1004 * (implemented by a set of 32 x 64-bit registers)
1005 */
1006 base_pa = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 59, 34);
1007 offset = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 33, 7);
1008 ps = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 6, 4);
1009 ts = GETMASK64(csrs->MMU_IOTSBDESC_Registers[iotsb_no], 3, 0);
1010
1011 /*
1012 * validate VA
1013 */
1014 if ((va & MASK64(62, 40)) != 0) {
1015 uint64_t error_code, trans_type;
1016
1017 /*
1018 * log the error
1019 */
1020 csrs->MMU_Translation_Fault_Address_Register = va & MASK64(63, 2);
1021
1022 if (mode == PCIE_IS32)
1023 trans_type = (type == DA_Load) ? TLP_MRd_FMT_TYPE_IS32 : TLP_MWr_FMT_TYPE_IS32;
1024 else
1025 trans_type = (type == DA_Load) ? TLP_MRd_FMT_TYPE_IS64 : TLP_MWr_FMT_TYPE_IS64;
1026
1027 csrs->MMU_Translation_Fault_Status_Register = req_id | (trans_type << 16);
1028
1029 /*
1030 * raise mmu sun4v_va_oor error
1031 */
1032 error_code = 1ULL<<SUN4V_VA_OOR_P;
1033 csrs->MMU_Error_Status_Set_Register |= error_code;
1034
1035 piu_raise_mmu_error(piup, error_code);
1036
1037 return true;
1038 }
1039
1040 /*
1041 * determine adjusted page number using encoded ps value
1042 * and adjusted VA at a given offset
1043 *
1044 * FIXME: check underflow error on vpn (error = sun4v_va_adj_uf)
1045 */
1046 pg_bits = 13 + 3*ps;
1047 vpn = ((va & MASK64(39, pg_bits)) >> pg_bits) - offset;
1048
1049 /*
1050 * calculate tte index in terms of TSB size
1051 *
1052 * FIXME: check out of range error on vpn (error = TRN_OOR)
1053 */
1054 tsb_sz = 1 << (ts+10);
1055 tte_idx = (vpn & (tsb_sz-1)) << 3;
1056 tte_addr = (base_pa << 13) + tte_idx;
1057
1058 /*
1059 * retrieve the tte entry
1060 */
1061 mmi_memread(tte_addr,(uint8_t *)&tte, 8,id);
1062#if defined(ARCH_X64)
1063 tte = ss_byteswap64(tte);
1064#endif
1065 status = true; //procp->proc_typep->dev_mem_access(procp, tte_addr, (uint8_t *)&tte, 8, DA_Load);
1066 if (!status) {
1067 DBGDEV(piup, "piu_iommu_sun4v: illegal tte_addr: tte_addr = 0x%lx va = 0x%lx\n",
1068 tte_addr, va );
1069 ASSERT(0);
1070 }
1071
1072 /*
1073 * finally do VA -> PA
1074 */
1075 piu_iommu_va2pa(piup, tte, ps, va, req_id, type, mode, pa);
1076
1077 DBGDEV(piup, "piu_iommu_sun4v: translate va = 0x%lx to pa = 0x%llx, tte_addr = 0x%lx tte = 0x%llx\n",
1078 va, *pa, tte_addr, tte );
1079
1080 return true;
1081}
1082
1083
1084bool piu_iommu_va2pa(pcie_model_t *piup, uint64_t tte, int ps, tvaddr_t va, uint16_t req_id, dev_access_t type,
1085 dev_mode_t mode, tpaddr_t *pa)
1086{
1087 bool tte_key_valid, tte_data_w, tte_data_v;
1088 uint16_t tte_dev_key;
1089 int pg_bits;
1090 uint64_t pg_mask, tte_data_pa;
1091
1092 /*
1093 * validate tte
1094 */
1095 tte_data_v = GETMASK64(tte, 0, 0);
1096 tte_key_valid = GETMASK64(tte, 2, 2);
1097 tte_dev_key = GETMASK64(tte, 63, 48);
1098
1099 /*
1100 * assert on invalid tte entry
1101 */
1102 ASSERT(tte_data_v);
1103
1104 /*
1105 * compare tte's DEV_KEY field with req_id
1106 */
1107 if (tte_key_valid) {
1108 /*
1109 * According to N2 PIU PRM, the function number portion of
1110 * the tte_dev_key and the source req_id should be masked
1111 * with the FNM field of the tte.
1112 */
1113 uint16_t tte_fnm = MASK64(15,3) | GETMASK64(tte, 5, 3);
1114
1115 if ((tte_dev_key & tte_fnm) != (req_id & tte_fnm)) {
1116 DBGDEV(piup, "piu_iommu_va2pa: req_id=0x%lx not matching tte dev_key=0x%lx\n",
1117 req_id, tte_dev_key );
1118 ASSERT(0);
1119 }
1120 }
1121
1122 /*
1123 * check on DATA_W for the write request
1124 */
1125 tte_data_w = GETMASK64(tte, 1, 1);
1126 if ((tte_data_w == 0) && type == DA_Store) {
1127 DBGDEV(piup, "piu_iommu_sun4u: write to non-writable page: va = 0x%lx tte = 0x%lx\n",
1128 va, tte );
1129 ASSERT(0);
1130 }
1131
1132 /*
1133 * finally translate VA to PA
1134 */
1135 pg_bits = 13 + 3*ps;
1136 pg_mask = (1 << pg_bits) - 1;
1137 tte_data_pa = tte & MASK64(38, pg_bits);
1138 *pa = tte_data_pa | (va & pg_mask);
1139
1140 return true;
1141}
1142
1143
1144/*
1145 * Assert INTx interrupt
1146 */
1147bool piu_assert_intx(pcie_model_t *piup, uint8_t pin_no, uint8_t dev_no)
1148{
1149 uint8_t ino;
1150
1151 /*
1152 * FIXME: check if PIU supports more than 4 devices
1153 */
1154 ASSERT(pin_no < 4);
1155
1156 /*
1157 * generate mondo interrupt
1158 */
1159 ino = pin_no + INO_INTA;
1160 DBGDEV(piup, "piu_mondo_interrupt assert: ino = %d\n", ino);
1161 piu_mondo_interrupt(piup, ino, IRQ_RECEIVED);
1162
1163 return true;
1164}
1165
1166
1167/*
1168 * Deassert INTx interrupt
1169 */
1170bool piu_deassert_intx(pcie_model_t *piup, uint8_t pin_no, uint8_t dev_no)
1171{
1172 uint8_t ino;
1173
1174 /*
1175 * FIXME: check if PIU supports more than 4 devices
1176 */
1177 ASSERT(pin_no < 4);
1178
1179 ino = pin_no + INO_INTA;
1180 DBGDEV(piup, "piu_mondo_interrupt deassert: ino = %d\n", ino);
1181 // piu_mondo_interrupt(piup, ino, IRQ_IDLE);
1182
1183 return true;
1184}
1185
1186
1187/*
1188 * Generate IRQ mondo interrupt and update the interrupt state accordingly
1189 */
1190void piu_mondo_interrupt(pcie_model_t *piup, uint8_t ino, irq_state_t n)
1191{
1192 bool V;
1193 int regx;
1194 irq_state_t old;
1195
1196 samPiu * sp = (samPiu*)piup->sam_piu;
1197
1198 // DBGDEV( piup, "piu_mondo_interrupt: ino = %d\n", ino );
1199
1200 /*
1201 * get the current IRQ mondo state
1202 */
1203
1204 pthread_mutex_lock(&sp->piuMutex);
1205
1206 regx = ino - INO_INTA;
1207 old = (irq_state_t)piu_get_irq_state(piup, ino);
1208
1209 V = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 31, 31);
1210
1211 if ((old == IRQ_IDLE) ){
1212 switch(n){
1213 case IRQ_RECEIVED:
1214 {
1215 if( !V ){
1216 // cannot send the interrupt as the valid bit is not set in the mapping
1217 // register.
1218 // set the bit in sp->pendingIntr. The interrupt would be
1219 // sent as soon as the valid bit is set.
1220 // printf("IDLE->RECV\n");
1221 piu_set_irq_state(piup, ino, n);
1222 break;
1223 }
1224
1225 bool mdo_mode;
1226 pcie_mondo_t irq_mondo ; // = (pcie_mondo_t *)Xcalloc(1, pcie_mondo_t);
1227
1228 irq_mondo.thread_id = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 30, 25);
1229
1230 mdo_mode = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 63, 63);
1231
1232 if (mdo_mode == 0) {
1233 irq_mondo.data[0] = (irq_mondo.thread_id << 6) | ino;
1234 irq_mondo.data[1] = 0;
1235 } else {
1236 uint64_t data0 = piup->csrs.Interrupt_Mondo_Data_0_Register;
1237 irq_mondo.data[0] = (data0 & MASK64(63, 12)) | (irq_mondo.thread_id << 6) | ino;
1238 irq_mondo.data[1] = piup->csrs.Interrupt_Mondo_Data_1_Register;
1239 }
1240
1241 /*
1242 * send IRQ mondo to target CPU
1243 */
1244
1245 // the valid bit is set in the interrupt mapping register.
1246
1247 n = IRQ_PENDING;
1248 // printf("IDLE->PEND\n");
1249 piu_set_irq_state(piup, ino, n);
1250 sp->sendMondo(&irq_mondo);
1251 break;
1252 }
1253 case IRQ_PENDING:
1254 printf("warning piu_mondo_interrupt:transition IDLE->PEND for ino %d\n",ino);
1255 piu_set_irq_state(piup, ino, n);
1256 break;
1257 case IRQ_IDLE:
1258 // printf("IDLE->IDLE\n");
1259 break;
1260 default:
1261 assert(0);
1262 }
1263 }else if(old == IRQ_RECEIVED) {
1264 switch(n){
1265 case IRQ_IDLE:
1266 printf("warning piu_mondo_interrupt:transition RECVD->IDLE for ino %d\n",ino);
1267 piu_set_irq_state(piup, ino, n);
1268 break;
1269 case IRQ_PENDING:
1270 // should not be called from anywhere
1271 printf("warning piu_mondo_interrupt:transition RECVD->PEND for ino %d .. Ignored\n",ino);
1272 break;
1273 case IRQ_RECEIVED:
1274 {
1275 // check to see if the valid bit has been set
1276 if(!V){
1277 // printf("RECV->RECV\n");
1278 break;
1279 }
1280
1281 // check to see if the interrupt is still pending
1282 if(sp->getPending(ino) == false){
1283 n = IRQ_IDLE;
1284 // printf("RECV->IDLE\n");
1285 piu_set_irq_state(piup, ino, n);
1286 break;
1287 }
1288
1289 // the interrupt is pending,
1290 // the mapping reg valid bit has been set. send the interrupt now.
1291 bool mdo_mode;
1292 pcie_mondo_t irq_mondo ; // = (pcie_mondo_t *)Xcalloc(1, pcie_mondo_t);
1293 irq_mondo.thread_id = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 30, 25);
1294
1295 mdo_mode = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 63, 63);
1296
1297 if (mdo_mode == 0) {
1298 irq_mondo.data[0] = (irq_mondo.thread_id << 6) | ino;
1299 irq_mondo.data[1] = 0;
1300 } else {
1301 uint64_t data0 = piup->csrs.Interrupt_Mondo_Data_0_Register;
1302 irq_mondo.data[0] = (data0 & MASK64(63, 12)) | (irq_mondo.thread_id << 6) | ino;
1303 irq_mondo.data[1] = piup->csrs.Interrupt_Mondo_Data_1_Register;
1304 }
1305
1306 // the valid bit is set in the interrupt mapping register.
1307
1308 sp->sendMondo(&irq_mondo);
1309 n = IRQ_PENDING;
1310 // printf("RECV->PENDING\n");
1311 piu_set_irq_state(piup, ino, n);
1312 break;
1313 }
1314 break;
1315 default:
1316 assert(0);
1317 }
1318 }else if(old == IRQ_PENDING){
1319 switch(n){
1320 case IRQ_IDLE:
1321 // check the status of the interrupt. if it is still pending
1322 // there, raise the interrupt again.
1323 if(sp->getPending(ino)){
1324 bool mdo_mode;
1325 pcie_mondo_t irq_mondo ; // = (pcie_mondo_t *)Xcalloc(1, pcie_mondo_t);
1326
1327 irq_mondo.thread_id = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 30, 25);
1328
1329 mdo_mode = GETMASK64(piup->csrs.Interrupt_Mapping_Registers[regx], 63, 63);
1330
1331 if (mdo_mode == 0) {
1332 irq_mondo.data[0] = (irq_mondo.thread_id << 6) | ino;
1333 irq_mondo.data[1] = 0;
1334 } else {
1335 uint64_t data0 = piup->csrs.Interrupt_Mondo_Data_0_Register;
1336 irq_mondo.data[0] = (data0 & MASK64(63, 12)) | (irq_mondo.thread_id << 6) | ino;
1337 irq_mondo.data[1] = piup->csrs.Interrupt_Mondo_Data_1_Register;
1338 }
1339
1340 sp->sendMondo(&irq_mondo);
1341 n = IRQ_PENDING;
1342 // printf("PEND->PEND\n");
1343 piu_set_irq_state(piup, ino, n);
1344 }else{
1345 // printf("PEND->IDLE\n");
1346 piu_set_irq_state(piup, ino, n);
1347 }
1348 break;
1349 case IRQ_RECEIVED:
1350 // printf("warning piu_mondo_interrupt:transition PEND->RCVD for ino %d .. Ignored\n",ino);
1351 // piu_set_irq_state(piup, ino, n);
1352 break;
1353 case IRQ_PENDING:
1354 break;
1355 default:
1356 assert(0);
1357 }
1358 }else
1359 assert(0);
1360
1361 pthread_mutex_unlock(&sp->piuMutex);
1362
1363}
1364
1365
1366/*
1367 * Update INTx state
1368 */
1369void piu_set_intx_state(pcie_model_t *piup, uint8_t ino, irq_state_t n)
1370{
1371 int bit, val;
1372
1373 switch (ino) {
1374 case INO_INTA:
1375 case INO_INTB:
1376 case INO_INTC:
1377 case INO_INTD:
1378 bit = 3 - (ino - INO_INTA);
1379 val = 1 << bit;
1380 if (n == IRQ_IDLE)
1381 piup->csrs.INTX_Status_Register &= ~val;
1382 else
1383 piup->csrs.INTX_Status_Register |= val;
1384 }
1385}
1386
1387
1388/*
1389 * Get INTx state
1390 */
1391int piu_get_intx_state(pcie_model_t *piup, uint8_t ino)
1392{
1393 int val = 0, bit;
1394
1395 switch (ino) {
1396 case INO_INTA:
1397 case INO_INTB:
1398 case INO_INTC:
1399 case INO_INTD:
1400 bit = 3 - (ino - INO_INTA);
1401 val = (piup->csrs.INTX_Status_Register >> bit) & 1;
1402 }
1403 return val;
1404}
1405
1406
1407/*
1408 * Update Interrupt State Status Register with the new mondo state 'n'
1409 */
1410void piu_set_irq_state(pcie_model_t *piup, uint8_t ino, irq_state_t n)
1411{
1412 int bit;
1413 uint64_t *regp;
1414
1415 /*
1416 * determine which status register to use in terms of ino
1417 */
1418 regp = ino<32 ? &piup->csrs.Interrupt_State_Status_Register_1 : &piup->csrs.Interrupt_State_Status_Register_2;
1419
1420 /*
1421 * each mondo state is encoded via a 2 bit value:
1422 *
1423 * 00: IRQ_IDLE
1424 * 01: IRQ_RECEIVED
1425 * 10: IRQ_RESERVED
1426 * 11: IRQ_PENDING
1427 */
1428 bit = 2 * (ino&31);
1429
1430 /*
1431 * update the approriate bits of the register
1432 */
1433 *regp &= ~(IRQ_STATE_MASK << bit);
1434 *regp |= ((uint64_t)n << bit);
1435
1436 /*
1437 * update the INTx status register if ino = 20-23
1438 */
1439 piu_set_intx_state(piup, ino, n);
1440}
1441
1442
1443/*
1444 * Get mondo interrupt state
1445 */
1446int piu_get_irq_state(pcie_model_t *piup, uint8_t ino)
1447{
1448 int bit;
1449 uint64_t val;
1450 irq_state_t state;
1451
1452 bit = 2* (ino&31);
1453 val = ino<32 ? piup->csrs.Interrupt_State_Status_Register_1: piup->csrs.Interrupt_State_Status_Register_2;
1454 state = (irq_state_t)((val >> bit) & IRQ_STATE_MASK);
1455
1456 return state;
1457}
1458
1459
1460/*
1461 * This function handles memory write request for both MSI and MSI-X.
1462 *
1463 * Arguments:
1464 * piup: handle to pcie_model structure
1465 * msi_addr: host address for MSI/MSI-X write, can be either 32 bit or 64 bit
1466 * msi_datap: pointer to the MSI/MSI-X data to write
1467 * count: always be 4 bytes for MSI or MSI-X request
1468 * req_id: 16 bit requester Id
1469 * mode: addressing mode, can be either 32 bit or 64 bit address
1470 *
1471 */
1472bool piu_msi_write(pcie_model_t *piup, uint64_t msi_addr, uint8_t *msi_datap,
1473 int count, uint16_t req_id, dev_mode_t mode, SAM_DeviceId id)
1474{
1475 uint64_t mapping;
1476 int map_idx, v, eqwr_n, eqnum;
1477 uint8_t msi_data[4];
1478
1479 /*
1480 * validate count, should always be 4
1481 */
1482 if (count == 4) {
1483 memcpy((void *)&msi_data, (void *)msi_datap, count);
1484 } else {
1485 DBGDEV( piup, "piu_msi_write: invalid msi_data size = %d \n", count);
1486 return false;
1487 }
1488
1489 /*
1490 * PIU implements a total of 256 MSI_Mapping_Registers as the mapping
1491 * table to allow SW to map each MSI/MSI-X request to an event queue.
1492 * The lower 8 bits of the MSI/MSI-X data field is used to index into
1493 * the mapping table.
1494 */
1495 DBGDEV( piup, "piu_msi_write: MSI addr = 0x%llx size = %d data = 0x%02x_%02x_%02x_%02x\n",
1496 msi_addr, msi_data[0], msi_data[1], msi_data[2], msi_data[3]);
1497 map_idx = msi_data[3] & MASK64(7,0);
1498
1499 // map_idx = msi_data[0] & MASK64(7,0);
1500 mapping = piup->csrs.MSI_Mapping_Register[map_idx];
1501
1502 v = GETMASK64(mapping, 63, 63);
1503 eqwr_n = GETMASK64(mapping, 62, 62);
1504 eqnum = GETMASK64(mapping, 5, 0);
1505
1506 if (v && !eqwr_n) {
1507 /*
1508 * assemble the event queue record
1509 */
1510 // eq_record_t *record = (eq_record_t *)Xcalloc(1, eq_record_t);
1511 eq_record_t record;
1512
1513 // reserved bit should always be initialized to 0
1514 record.reserved = 0;
1515
1516 record.fmt_type = mode ? TLP_MSI_FMT_TYPE_IS64 : TLP_MSI_FMT_TYPE_IS32;
1517 /*
1518 * always one DW
1519 */
1520 record.length = 1;
1521 record.addr_15_2 = GETMASK64(msi_addr, 15, 2);
1522 record.rid = req_id;
1523
1524 /*
1525 * data0, for lower 16 bits
1526 */
1527 record.data0 = (msi_data[2] << 8) | msi_data[3];
1528
1529 record.addr_hi = GETMASK64(msi_addr, 63, 16);
1530
1531 /*
1532 * data1, for higher 16 bits
1533 */
1534 record.data1 = (msi_data[0] << 8) | msi_data[1];
1535
1536
1537 piu_eq_write(piup, eqnum, &record, req_id, mode,id);
1538 }
1539
1540 return true;
1541}
1542
1543
1544/*
1545 * Write event queue records to the queue and update the tail pointer of the queue.
1546 */
1547bool piu_eq_write(pcie_model_t *piup, int eqnum, eq_record_t *record, uint16_t req_id, dev_mode_t mode, SAM_DeviceId id)
1548{
1549 int overr, state = true;
1550 bool status;
1551
1552 overr = GETMASK64(piup->csrs.Event_Queue_Tail_Register[eqnum], 57, 57);
1553 state = GETMASK64(piup->csrs.Event_Queue_State_Register[eqnum], 2, 0);
1554
1555 DBGDEV( piup, "piu_eq_write: eqnum = %d state = %d\n", eqnum, state);
1556
1557 if ((state == EQ_ACTIVE) && !overr) {
1558 int head = GETMASK64(piup->csrs.Event_Queue_Head_Register[eqnum], 6, 0);
1559 int tail = GETMASK64(piup->csrs.Event_Queue_Tail_Register[eqnum], 6, 0);
1560 int next = (tail + 1) % EQ_NUM_ENTRIES;
1561 bool full = (next == head);
1562
1563 if (full) {
1564 /*
1565 * set the overflow bit and generate a DMU internal interrupt (ino 62)
1566 */
1567 piup->csrs.Event_Queue_Tail_Register[eqnum] |= MASK64(57,57);
1568 piup->csrs.Event_Queue_State_Register[eqnum] = EQ_ERROR;
1569 samPiu * sp = (samPiu*)piup->sam_piu;
1570 sp->setPending(INO_DMU,true);
1571 piu_mondo_interrupt(piup, INO_DMU, IRQ_RECEIVED);
1572 }
1573 else {
1574 /*
1575 * determine the address (VA) to write the event queue record
1576 */
1577 uint64_t base, offset, rec_va, pa;
1578
1579 base = piup->csrs.Event_Queue_Base_Address_Register & MASK64(63,19);
1580 offset = (eqnum * EQ_NUM_ENTRIES + tail) * EQ_RECORD_SIZE;
1581 rec_va = base + offset;
1582
1583 DBGDEV( piup, "piu_eq_write: EQ record va = 0x%llx\n", rec_va);
1584
1585 /*
1586 * translate VA of the EQ record to PA
1587 */
1588 status = piu_iommu(piup, rec_va, req_id, DA_Store, mode, &pa,id);
1589
1590 /*
1591 * write the record to the event queue
1592 */
1593 if (status) {
1594 config_proc_t *procp = piup->config_procp;
1595
1596 //status = procp->proc_typep->dev_mem_access(procp, pa, (uint8_t *)record,
1597 // sizeof(*record), DA_Store);
1598
1599 mmi_memwrite(pa,(uint8_t *)record, sizeof(*record),id);
1600
1601 // all the following actions (check queue, update tail, send interrupt)
1602 // should be done atomically
1603 samPiu *sp = (samPiu*)piup->sam_piu;
1604
1605 pthread_mutex_lock(&sp->msiMutex[eqnum]);
1606
1607 /* check if the queue is empty before adding the new entry
1608 *
1609 */
1610 head = GETMASK64(piup->csrs.Event_Queue_Head_Register[eqnum], 6, 0);
1611 bool empty = (head == tail);
1612
1613 /*
1614 * update the tail pointer
1615 */
1616 piup->csrs.Event_Queue_Tail_Register[eqnum] = next & MASK64(6,0);
1617
1618
1619 /*
1620 * if the queue is empty, generate a mondo interrupt depending on state
1621 */
1622 if (empty){
1623 samPiu * sp = (samPiu*)piup->sam_piu;
1624 sp->setPending(INO_EQLO+eqnum,true);
1625 piu_mondo_interrupt(piup, INO_EQLO+eqnum, IRQ_RECEIVED);
1626 }
1627
1628 pthread_mutex_unlock(&sp->msiMutex[eqnum]);
1629 }
1630 }
1631 }
1632
1633 return (status);
1634}
1635
1636
1637void piu_init_error_list()
1638{
1639 int i;
1640
1641 imu_error_entry_t imu_error_init_list[] = {
1642 { PIU_ERR ( MSI_NOT_EN_P, 0) },
1643 { PIU_ERR ( COR_MES_NOT_EN_P, 1) },
1644 { PIU_ERR ( NONFATAL_MES_NOT_EN_P, 2) },
1645 { PIU_ERR ( FATAL_MES_NOT_EN_P, 3) },
1646 { PIU_ERR ( PMPME_MES_NOT_EN_P, 4) },
1647 { PIU_ERR ( PMEACK_MES_NOT_EN_P, 5) },
1648 { PIU_ERR ( MSI_PAR_ERR_P, 6) },
1649 { PIU_ERR ( MSI_MAL_ERR_P, 7) },
1650 { PIU_ERR ( EQ_NOT_EN_P, 8) },
1651 { PIU_ERR ( EQ_OVER_P, 9) },
1652 { PIU_ERR ( MSI_NOT_EN_S, 32) },
1653 { PIU_ERR ( COR_MES_NOT_EN_S, 33) },
1654 { PIU_ERR ( NONFATAL_MES_NOT_EN_S, 34) },
1655 { PIU_ERR ( FATAL_MES_NOT_EN_S, 35) },
1656 { PIU_ERR ( PMPME_MES_NOT_EN_SEQ_OVER_S, 36) },
1657 { PIU_ERR ( PMEACK_MES_NOT_EN_S, 37) },
1658 { PIU_ERR ( MSI_PAR_ERR_S, 38) },
1659 { PIU_ERR ( MSI_MAL_ERR_S, 39) },
1660 { PIU_ERR ( EQ_NOT_EN_S, 40) },
1661 { PIU_ERR ( EQ_OVER_S, 41) },
1662 { -1, (char *)0 },
1663 };
1664
1665 mmu_error_entry_t mmu_error_init_list[] = {
1666 { PIU_ERR ( BYP_ERR_P, 0) },
1667 { PIU_ERR ( BYP_OOR_P, 1) },
1668 { PIU_ERR ( SUN4V_INV_PG_SZ_P, 2) },
1669 { PIU_ERR ( SPARE1_P, 3) },
1670 { PIU_ERR ( TRN_ERR_P, 4) },
1671 { PIU_ERR ( TRN_OOR_P, 5) },
1672 { PIU_ERR ( TTE_INV_P, 6) },
1673 { PIU_ERR ( TTE_PRT_P, 7) },
1674 { PIU_ERR ( TTC_DPE_P, 8) },
1675 { PIU_ERR ( TTC_CAE_P, 9) },
1676 { PIU_ERR ( SPARE2_P, 10) },
1677 { PIU_ERR ( SPARE3_P, 11) },
1678 { PIU_ERR ( TBW_DME_P, 12) },
1679 { PIU_ERR ( TBW_UDE_P, 13) },
1680 { PIU_ERR ( TBW_ERR_P, 14) },
1681 { PIU_ERR ( TBW_DPE_P, 15) },
1682 { PIU_ERR ( IOTSBDESC_INV_P, 16) },
1683 { PIU_ERR ( IOTSBDESC_DPE_P, 17) },
1684 { PIU_ERR ( SUN4V_VA_OOR_P, 18) },
1685 { PIU_ERR ( SUN4V_VA_ADJ_UF_P, 19) },
1686 { PIU_ERR ( SUN4V_KEY_ERR_P, 20) },
1687 { PIU_ERR ( BYP_ERR_S, 32) },
1688 { PIU_ERR ( BYP_OOR_S, 33) },
1689 { PIU_ERR ( SUN4V_INV_PG_SZ_S, 34) },
1690 { PIU_ERR ( SPARE1_S, 35) },
1691 { PIU_ERR ( TRN_ERR_S, 36) },
1692 { PIU_ERR ( TRN_OOR_S, 37) },
1693 { PIU_ERR ( TTE_INV_S, 38) },
1694 { PIU_ERR ( TTE_PRT_S, 39) },
1695 { PIU_ERR ( TTC_DPE_S, 40) },
1696 { PIU_ERR ( TTC_CAE_S, 41) },
1697 { PIU_ERR ( SPARE2_S, 42) },
1698 { PIU_ERR ( SPARE3_S, 43) },
1699 { PIU_ERR ( TBW_DME_S, 44) },
1700 { PIU_ERR ( TBW_UDE_S, 45) },
1701 { PIU_ERR ( TBW_ERR_S, 46) },
1702 { PIU_ERR ( TBW_DPE_S, 47) },
1703 { PIU_ERR ( IOTSBDESC_INV_S, 48) },
1704 { PIU_ERR ( IOTSBDESC_DPE_S, 49) },
1705 { PIU_ERR ( SUN4V_VA_OOR_S, 50) },
1706 { PIU_ERR ( SUN4V_VA_ADJ_UF_S, 51) },
1707 { PIU_ERR ( SUN4V_KEY_ERR_S, 52) },
1708 { -1, (char *)0 },
1709 };
1710
1711 for (i = 0; imu_error_init_list[i].error_type != -1; i ++)
1712 imu_error_list[imu_error_init_list[i].error_type] = imu_error_init_list[i];
1713
1714 for (i = 0; mmu_error_init_list[i].error_type != -1; i ++)
1715 mmu_error_list[mmu_error_init_list[i].error_type] = mmu_error_init_list[i];
1716
1717}
1718
1719
1720/*
1721 * imu error handler
1722 */
1723void piu_simulate_imu_error(pcie_model_t *piup, uint64_t imu_error)
1724{
1725 uint64_t error_code, intr_enable, imu_ie;
1726 int i;
1727
1728 /*
1729 * loop over the error bits and raise the error only if
1730 * the interrupt is enabled
1731 */
1732 imu_ie = piup->csrs.IMU_Interrupt_Enable_Register;
1733
1734 for (i=0; i<IMU_ERROR_MAXNUM; i++) {
1735 error_code = imu_error_list[i].error_code;
1736 intr_enable = imu_error_list[i].intr_enable;
1737
1738 if ((imu_error & error_code) && (imu_ie & intr_enable))
1739 piu_raise_imu_error(piup, error_code);
1740 }
1741}
1742
1743
1744void piu_raise_imu_error(pcie_model_t *piup, uint64_t error_code)
1745{
1746 piu_csr_t *csrs = &piup->csrs;
1747 uint64_t dmc_cbie;
1748 bool dmu, imu;
1749
1750 /*
1751 * update the error status register
1752 */
1753 csrs->IMU_Error_Status_Set_Register |= error_code;
1754
1755 /*
1756 * generate INO_DMU mondo interrupt
1757 */
1758 dmc_cbie = csrs->DMC_Core_and_Block_Interrupt_Enable_Register;
1759
1760 dmu = GETMASK64(dmc_cbie, 63, 63);
1761 imu = GETMASK64(dmc_cbie, 0, 0);
1762
1763 if (dmu && imu) {
1764 csrs->DMC_Core_and_Block_Error_Status_Register |= MASK64(0,0);
1765 samPiu * sp = (samPiu*)piup->sam_piu;
1766 sp->setPending(INO_DMU,true);
1767 piu_mondo_interrupt(piup, INO_DMU, IRQ_RECEIVED);
1768 }
1769}
1770
1771
1772/*
1773 * mmu error handler
1774 */
1775void piu_simulate_mmu_error(pcie_model_t *piup, uint64_t mmu_error)
1776{
1777 uint64_t error_code, intr_enable, mmu_ie;
1778 int i;
1779
1780 /*
1781 * loop over the error bits and raise the error only if
1782 * the interrupt is enabled
1783 */
1784 mmu_ie = piup->csrs.MMU_Interrupt_Enable_Register;
1785
1786 for (i=0; i<MMU_ERROR_MAXNUM; i++) {
1787 error_code = mmu_error_list[i].error_code;
1788 intr_enable = mmu_error_list[i].intr_enable;
1789
1790 if ((mmu_error & error_code) && (mmu_ie & intr_enable))
1791 piu_raise_mmu_error(piup, error_code);
1792 }
1793}
1794
1795
1796void piu_raise_mmu_error(pcie_model_t *piup, uint64_t error_code)
1797{
1798 piu_csr_t *csrs = &piup->csrs;
1799 uint64_t dmc_cbie;
1800 bool dmu, mmu;
1801
1802 /*
1803 * update the error status register
1804 */
1805 csrs->MMU_Error_Status_Set_Register |= error_code;
1806
1807 /*
1808 * generate INO_DMU mondo interrupt
1809 */
1810 dmc_cbie = csrs->DMC_Core_and_Block_Interrupt_Enable_Register;
1811
1812 dmu = GETMASK64(dmc_cbie, 63, 63);
1813 mmu = GETMASK64(dmc_cbie, 0, 0);
1814
1815 if (dmu && mmu) {
1816 csrs->DMC_Core_and_Block_Error_Status_Register |= MASK64(1,1);
1817 samPiu * sp = (samPiu*)piup->sam_piu;
1818 sp->setPending(INO_DMU,true);
1819 piu_mondo_interrupt(piup, INO_DMU, IRQ_RECEIVED);
1820 }
1821}
1822
1823
1824/*
1825 * Look up the PIU register type from its offset value
1826 */
1827pcie_csr_t piu_offset2reg(uint64_t offset, int *regx)
1828{
1829 int i;
1830
1831 /*
1832 * first to check the few interrupt registers
1833 * (PIU supports less number of these regs than the Fire for Niagara 1)
1834 */
1835 if ((offset > 0x6011D8) && (offset < 0x6011F0))
1836 return UND_PCIE_CSRS;
1837 if ((offset > 0x6015D8) && (offset < 0x6015F0))
1838 return UND_PCIE_CSRS;
1839
1840 for (i = 0; i < NUM_PCIE_CSRS; i++) {
1841 int nwords, diff = offset - pcie_csrs[i].offset;
1842
1843 if (diff == 0) {
1844 *regx = pcie_csrs[i].regx;
1845 return (pcie_csr_t)i;
1846 }
1847
1848 if (diff < 0)
1849 return UND_PCIE_CSRS;
1850
1851 if ((nwords = pcie_csrs[i].nwords) != 1) {
1852 int wordx = diff/8;
1853
1854 if (wordx < nwords) {
1855 *regx = pcie_csrs[i].regx + wordx;
1856 return (pcie_csr_t)i;
1857 }
1858 }
1859 }
1860
1861 return UND_PCIE_CSRS;
1862}
1863
1864