Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / legion / src / procs / sunsparc / libniagara2 / niagara2_device.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* OpenSPARC T2 Processor File: niagara2_device.c
5* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
6* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
7*
8* The above named program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public
10* License version 2 as published by the Free Software Foundation.
11*
12* The above named program is distributed in the hope that it will be
13* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15* General Public License for more details.
16*
17* You should have received a copy of the GNU General Public
18* License along with this work; if not, write to the Free Software
19* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20*
21* ========== Copyright Header End ============================================
22*/
23/*
24 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
25 * Use is subject to license terms.
26 */
27
28#pragma ident "@(#)niagara2_device.c 1.35 07/10/12 SMI"
29
30#include <stdio.h>
31#include <stdlib.h>
32#include <unistd.h>
33#include <strings.h>
34
35#include "ss_common.h"
36#include "niagara2.h"
37#include "niagara2_device.h"
38
39#if INTERNAL_BUILD
40#include "lfsr64.h"
41#endif
42
43/*
44 * This file contains Niagara 2 specific pseudo device models
45 */
46static void ncu_init(config_dev_t *);
47static void ccu_init(config_dev_t *);
48static void mcu_init(config_dev_t *);
49static void l2c_init(config_dev_t *);
50static void ssi_init(config_dev_t *);
51static void hwdbg_init(config_dev_t *);
52static void rcu_init(config_dev_t *);
53static void jtag_init(config_dev_t *);
54
55static bool_t ncu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
56static bool_t ccu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
57static bool_t mcu_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp);
58static bool_t l2c_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp);
59static bool_t ssi_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
60static bool_t hwdbg_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
61static bool_t rcu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp);
62static bool_t jtag_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
63
64#ifdef VFALLS /* { */
65static void ncx_init(config_dev_t *);
66static void cou_init(config_dev_t *);
67static void lfu_init(config_dev_t *);
68static bool_t ncx_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
69static bool_t cou_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
70static bool_t lfu_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t *regp);
71#endif /* } VFALLS */
72
73void niagara2_send_xirq(simcpu_t * sp, ss_proc_t * npp, uint64_t val);
74static dev_type_t dev_type_ncu = {
75 PSEUDO_DEV_NAME_NCU,
76 NULL, /* parse */
77 ncu_init,
78 NULL, /* dump */
79 generic_device_non_cacheable,
80 ncu_access,
81 DEV_MAGIC
82};
83static dev_type_t dev_type_ccu = {
84 PSEUDO_DEV_NAME_CCU,
85 NULL, /* parse */
86 ccu_init,
87 NULL, /* dump */
88 generic_device_non_cacheable,
89 ccu_access,
90 DEV_MAGIC
91};
92static dev_type_t dev_type_mcu = {
93 PSEUDO_DEV_NAME_MCU,
94 NULL, /* parse */
95 mcu_init,
96 NULL, /* dump */
97 generic_device_non_cacheable,
98 mcu_access,
99 DEV_MAGIC
100};
101static dev_type_t dev_type_dbg = {
102 PSEUDO_DEV_NAME_HWDBG,
103 NULL, /* parse */
104 hwdbg_init,
105 NULL, /* dump */
106 generic_device_non_cacheable,
107 hwdbg_access,
108 DEV_MAGIC
109};
110static dev_type_t dev_type_l2c = {
111 PSEUDO_DEV_NAME_L2C,
112 NULL, /* parse */
113 l2c_init,
114 NULL, /* dump */
115 generic_device_non_cacheable,
116 l2c_access,
117 DEV_MAGIC
118};
119static dev_type_t dev_type_ssi = {
120 PSEUDO_DEV_NAME_SSI,
121 NULL, /* parse */
122 ssi_init,
123 NULL, /* dump */
124 generic_device_non_cacheable,
125 ssi_access,
126 DEV_MAGIC
127};
128static dev_type_t dev_type_rcu = {
129 PSEUDO_DEV_NAME_RCU,
130 NULL, /* parse */
131 rcu_init,
132 NULL, /* dump */
133 generic_device_non_cacheable,
134 rcu_access,
135 DEV_MAGIC
136};
137static dev_type_t dev_type_jtag = {
138 PSEUDO_DEV_NAME_JTAG,
139 NULL, /* parse */
140 jtag_init, /* init */
141 NULL, /* dump */
142 generic_device_non_cacheable,
143 jtag_access,
144 DEV_MAGIC
145};
146
147#ifdef VFALLS /* { */
148static dev_type_t dev_type_ncx = {
149 PSEUDO_DEV_NAME_NCX,
150 NULL, /* parse */
151 ncx_init, /* init */
152 NULL, /* dump */
153 generic_device_non_cacheable,
154 ncx_access,
155 DEV_MAGIC
156};
157
158static dev_type_t dev_type_cou = {
159 PSEUDO_DEV_NAME_COU,
160 NULL, /* parse */
161 cou_init, /* init */
162 NULL, /* dump */
163 generic_device_non_cacheable,
164 cou_access,
165 DEV_MAGIC
166};
167
168static dev_type_t dev_type_lfu = {
169 PSEUDO_DEV_NAME_LFU,
170 NULL, /* parse */
171 lfu_init, /* init */
172 NULL, /* dump */
173 generic_device_non_cacheable,
174 lfu_access,
175 DEV_MAGIC
176};
177
178#endif /* } VFALLS */
179
180uint64_t gen_raw_entropy(double *phase, double *frequency, double *noise, double dutyfactor);
181
182/*
183 * Set up the pseudo physical devices that Niagara 2 has for it's control
184 * registers. For things like the clock unit and memory controllers etc.
185 */
186void ss_setup_pseudo_devs(domain_t * domainp, ss_proc_t *procp)
187{
188
189 config_dev_t *pd, *overlapp;
190 int node_id = procp->config_procp->proc_id;
191 uint64_t phys_addr;
192 config_addr_t *addrp;
193 int i;
194 static bool_t setup_once = false;
195
196 if (!setup_once) {
197 setup_once = true;
198 /*
199 * NCU, mapped at MSB[39:32] = 0x80
200 */
201 procp->ncup = Xcalloc(1, ncu_t);
202 procp->ncup->node_id = node_id;
203
204 pd = Xcalloc(1, config_dev_t);
205 pd->is_implied = true;
206 pd->dev_typep = &dev_type_ncu;
207 pd->devp = (void*)procp;
208 procp->ncu_devp = pd;
209
210 insert_domain_address(domainp, pd, PHYS_ADDR_NCU, PHYS_ADDR_NCU + NCU_RANGE);
211
212 overlapp = insert_domain_device(domainp, pd);
213 if (overlapp != NULL) {
214 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
215 overlapp->dev_typep->dev_type_namep,
216 overlapp->addrp->baseaddr,
217 pd->dev_typep->dev_type_namep,
218 pd->addrp->baseaddr);
219 }
220 /*
221 * Clock Unit, mapped at MSB[39:32] = 0x83
222 */
223 procp->clockp = Xcalloc(1, ccu_t);
224
225 pd = Xcalloc(1, config_dev_t);
226 pd->is_implied = true;
227 pd->dev_typep = &dev_type_ccu;
228 pd->devp = (void*)procp;
229 procp->clock_devp = pd;
230
231 insert_domain_address(domainp, pd, PHYS_ADDR_CCU, PHYS_ADDR_CCU + CCU_RANGE);
232
233 overlapp = insert_domain_device(domainp, pd);
234 if (overlapp != NULL) {
235 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
236 overlapp->dev_typep->dev_type_namep,
237 overlapp->addrp->baseaddr,
238 pd->dev_typep->dev_type_namep,
239 pd->addrp->baseaddr);
240 }
241
242 /*
243 * Memory Controller Unit, mapped at MSB[39:32] = 0x84
244 *
245 * N2 supports 4 DRAM branches, each controlled by a separate MCU,
246 * configured as
247 *
248 * MCU0: addr[13:12]= 00b
249 * MCU1: addr[13:12]= 01b
250 * MCU2: addr[13:12]= 10b
251 * MCU3: addr[13:12]= 11b
252 */
253#ifdef VFALLS
254 procp->num_mbanks = 2;
255#else
256 procp->num_mbanks = 4;
257#endif
258 procp->mbankp = Xcalloc(procp->num_mbanks, mcu_bank_t);
259
260 pd = Xcalloc(1, config_dev_t);
261 pd->is_implied = true;
262 pd->dev_typep = &dev_type_mcu;
263 pd->devp = (void*)procp;
264 procp->mcu_devp = pd;
265
266 insert_domain_address(domainp, pd, PHYS_ADDR_MCU,
267 PHYS_ADDR_MCU+4096LL*(uint64_t)procp->num_mbanks);
268
269 overlapp = insert_domain_device(domainp, pd);
270 if (overlapp != NULL) {
271 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
272 overlapp->dev_typep->dev_type_namep,
273 overlapp->addrp->baseaddr,
274 pd->dev_typep->dev_type_namep,
275 pd->addrp->baseaddr);
276 }
277
278 /*
279 * L2 Cache registers, mapped at MSB[39:32] = 0xA0
280 */
281 procp->num_l2banks = L2_BANKS;
282 procp->l2p = Xcalloc(1, l2c_t);
283
284 pd = Xcalloc(1, config_dev_t);
285 pd->is_implied = true;
286 pd->dev_typep = &dev_type_l2c;
287 pd->devp = (void*)procp;
288 procp->l2c_devp = pd;
289
290 insert_domain_address(domainp, pd, PHYS_ADDR_L2C, PHYS_ADDR_L2C + L2C_RANGE);
291
292 overlapp = insert_domain_device(domainp, pd);
293 if (overlapp != NULL) {
294 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
295 overlapp->dev_typep->dev_type_namep,
296 overlapp->addrp->baseaddr,
297 pd->dev_typep->dev_type_namep,
298 pd->addrp->baseaddr);
299 }
300
301 /*
302 * HW Debug Unit, mapped at MSB[39:32] = 0x86
303 */
304 procp->hwdbgp = Xcalloc(1, hwdbg_t);
305
306 pd = Xcalloc(1, config_dev_t);
307 pd->is_implied = true;
308 pd->dev_typep = &dev_type_dbg;
309 pd->devp = (void*)procp;
310 procp->hwdbg_devp = pd;
311
312 insert_domain_address(domainp, pd, PHYS_ADDR_HWDBG, PHYS_ADDR_HWDBG+0x100000000LL);
313
314 overlapp = insert_domain_device(domainp, pd);
315 if (overlapp != NULL) {
316 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
317 overlapp->dev_typep->dev_type_namep,
318 overlapp->addrp->baseaddr,
319 pd->dev_typep->dev_type_namep,
320 pd->addrp->baseaddr);
321 }
322
323 /*
324 * Reset Unit, mapped at MSB[39:32] = 0x89
325 */
326 procp->rcup = Xcalloc(1, rcu_t);
327
328 pd = Xcalloc(1, config_dev_t);
329 pd->is_implied = true;
330 pd->dev_typep = &dev_type_rcu;
331 pd->devp = (void*)procp;
332 procp->rcu_devp = pd;
333
334 insert_domain_address(domainp, pd, PHYS_ADDR_RCU, PHYS_ADDR_RCU + RCU_RANGE);
335
336 overlapp = insert_domain_device(domainp, pd);
337 if (overlapp != NULL) {
338 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
339 overlapp->dev_typep->dev_type_namep,
340 overlapp->addrp->baseaddr,
341 pd->dev_typep->dev_type_namep,
342 pd->addrp->baseaddr);
343 }
344
345 /*
346 * JTAG, mapped at MSB[39:32] = 0x90
347 */
348
349 pd = Xcalloc(1, config_dev_t);
350 pd->is_implied = true;
351 pd->dev_typep = &dev_type_jtag;
352 pd->devp = (void*)procp;
353 procp->jtag_devp = pd;
354
355 insert_domain_address(domainp, pd, PHYS_ADDR_JTAG, PHYS_ADDR_JTAG + JTAG_RANGE);
356 overlapp = insert_domain_device(domainp, pd);
357 if (overlapp != NULL) {
358 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
359 overlapp->dev_typep->dev_type_namep,
360 overlapp->addrp->baseaddr,
361 pd->dev_typep->dev_type_namep,
362 pd->addrp->baseaddr);
363 }
364
365#ifdef VFALLS /* { */
366
367 /*
368 * NCX, mapped at MSB[39:32] = 0x81
369 */
370 procp->ncxp = Xcalloc(1, ncx_t);
371
372 pd = Xcalloc(1, config_dev_t);
373 pd->is_implied = true;
374 pd->dev_typep = &dev_type_ncx;
375 pd->devp = (void*)procp;
376 procp->ncx_devp = pd;
377
378 insert_domain_address(domainp, pd, PHYS_ADDR_NCX, PHYS_ADDR_NCX + NCX_RANGE);
379 overlapp = insert_domain_device(domainp, pd);
380 if (overlapp != NULL) {
381 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
382 overlapp->dev_typep->dev_type_namep,
383 overlapp->addrp->baseaddr,
384 pd->dev_typep->dev_type_namep,
385 pd->addrp->baseaddr);
386 }
387
388 /*
389 * COU, mapped at MSB[39:32] = 0x811
390 */
391 procp->coup = Xcalloc(1, cou_t);
392
393 pd = Xcalloc(1, config_dev_t);
394 pd->is_implied = true;
395 pd->dev_typep = &dev_type_cou;
396 pd->devp = (void*)procp;
397 procp->cou_devp = pd;
398
399 insert_domain_address(domainp, pd, PHYS_ADDR_COU, PHYS_ADDR_COU + COU_RANGE);
400 overlapp = insert_domain_device(domainp, pd);
401 if (overlapp != NULL) {
402 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
403 overlapp->dev_typep->dev_type_namep,
404 overlapp->addrp->baseaddr,
405 pd->dev_typep->dev_type_namep,
406 pd->addrp->baseaddr);
407 }
408
409 /*
410 * LFU, mapped at MSB[39:32] = 0x812
411 */
412 procp->lfup = Xcalloc(1, lfu_t);
413
414 pd = Xcalloc(1, config_dev_t);
415 pd->is_implied = true;
416 pd->dev_typep = &dev_type_lfu;
417 pd->devp = (void*)procp;
418 procp->lfu_devp = pd;
419
420 insert_domain_address(domainp, pd, PHYS_ADDR_LFU, PHYS_ADDR_LFU + LFU_RANGE);
421 overlapp = insert_domain_device(domainp, pd);
422 if (overlapp != NULL) {
423 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
424 overlapp->dev_typep->dev_type_namep,
425 overlapp->addrp->baseaddr,
426 pd->dev_typep->dev_type_namep,
427 pd->addrp->baseaddr);
428 }
429
430#endif /* } VFALLS */
431 /*
432 * SSI, mapped at MSB[39:32] = 0xff
433 */
434 procp->ssip = Xcalloc(1, ssi_t);
435
436 pd = Xcalloc(1, config_dev_t);
437 pd->is_implied = true;
438 pd->dev_typep = &dev_type_ssi;
439 pd->devp = (void*)procp;
440 procp->ssi_devp = pd;
441
442 insert_domain_address(domainp, pd, PHYS_ADDR_SSI, PHYS_ADDR_SSI +SSI_RANGE);
443#ifdef VFALLS /* { */
444 insert_domain_address(domainp, pd, MAGIC_SSI, MAGIC_SSI + 8);
445#endif /* } VFALLS */
446
447 overlapp = insert_domain_device(domainp, pd);
448 if (overlapp != NULL) {
449 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
450 overlapp->dev_typep->dev_type_namep,
451 overlapp->addrp->baseaddr,
452 pd->dev_typep->dev_type_namep,
453 pd->addrp->baseaddr);
454 }
455 }
456
457#ifdef VFALLS /* { */
458
459 DBGMULNODE(lprintf(-1, "Setting up pseudo devices for node %d\n",
460 node_id););
461 /*
462 * Instead of getting a fatal while trying to allocate addr
463 * space for pseudodevices of duplicate nodes, better to
464 * catch this problem here.
465 */
466 for (i = 0; i < (domainp->procs.count - 1); i++) {
467 if (node_id == LIST_ENTRY(domainp->procs, i)->proc_id)
468 lex_fatal("More than one node %d present",
469 node_id);
470 }
471 /*
472 * NCU, mapped at MSB[39:32] = 0x80
473 */
474 /* Need to allocate space only once for each node */
475 if (!(procp->ncup)) {
476 procp->ncup = Xcalloc(1, ncu_t);
477 procp->ncup->node_id = node_id;
478 }
479
480 pd = Xcalloc(1, config_dev_t);
481 pd->is_implied = true;
482 pd->dev_typep = &dev_type_ncu;
483 pd->devp = (void*)procp;
484 procp->ncu_devp = pd;
485
486 phys_addr = PHYS_ADDR_NCU_REMOTE(node_id);
487 insert_domain_address(domainp, pd, phys_addr , phys_addr + NCU_RANGE);
488 overlapp = insert_domain_device(domainp, pd);
489 if (overlapp != NULL) {
490 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
491 overlapp->dev_typep->dev_type_namep,
492 overlapp->addrp->baseaddr,
493 pd->dev_typep->dev_type_namep,
494 pd->addrp->baseaddr);
495 }
496 /*
497 * Clock Unit, mapped at MSB[39:32] = 0x83
498 */
499
500 /* Need to allocate space only once for each node */
501 if (!(procp->clockp))
502 procp->clockp = Xcalloc(1, ccu_t);
503
504 pd = Xcalloc(1, config_dev_t);
505 pd->is_implied = true;
506 pd->dev_typep = &dev_type_ccu;
507 pd->devp = (void*)procp;
508 procp->clock_devp = pd;
509
510 phys_addr = PHYS_ADDR_CCU_REMOTE(node_id);
511 insert_domain_address(domainp, pd, phys_addr, phys_addr + CCU_RANGE);
512
513 overlapp = insert_domain_device(domainp, pd);
514 if (overlapp != NULL) {
515 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
516 overlapp->dev_typep->dev_type_namep,
517 overlapp->addrp->baseaddr,
518 pd->dev_typep->dev_type_namep,
519 pd->addrp->baseaddr);
520 }
521
522 /*
523 * Memory Controller Unit, mapped at MSB[39:32] = 0x84
524 *
525 * VF supports 2 DRAM branches, each controlled by a separate MCU,
526 * configured as
527 *
528 * MCU0: addr[12]= 0b
529 * MCU1: addr[12]= 1b
530 */
531 procp->num_mbanks = 2;
532
533 /* Need to allocate space only once for each node */
534 if (!(procp->mbankp))
535 procp->mbankp = Xcalloc(procp->num_mbanks, mcu_bank_t);
536
537 pd = Xcalloc(1, config_dev_t);
538 pd->is_implied = true;
539 pd->dev_typep = &dev_type_mcu;
540 pd->devp = (void*)procp;
541 procp->mcu_devp = pd;
542
543 phys_addr = PHYS_ADDR_MCU_REMOTE(node_id);
544 insert_domain_address(domainp, pd, phys_addr,
545 phys_addr + 4096LL*(uint64_t)procp->num_mbanks);
546
547 overlapp = insert_domain_device(domainp, pd);
548 if (overlapp != NULL) {
549 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
550 overlapp->dev_typep->dev_type_namep,
551 overlapp->addrp->baseaddr,
552 pd->dev_typep->dev_type_namep,
553 pd->addrp->baseaddr);
554 }
555
556 /*
557 * L2 Cache registers, mapped at MSB[39:28] = 0xA00-0xBFF
558 */
559 /* L2CSR is local access only. See comments in l2c_access() */
560 procp->num_l2banks = L2_BANKS;
561 if (!(procp->l2p))
562 procp->l2p = Xcalloc(1, l2c_t);
563
564 pd = Xcalloc(1, config_dev_t);
565 pd->is_implied = true;
566 pd->dev_typep = &dev_type_l2c;
567 pd->devp = (void*)procp;
568 pd->domainp = domainp;
569 procp->l2c_devp = pd;
570
571 addrp = Xmalloc(sizeof(config_addr_t));
572 addrp->config_devp = pd;
573 addrp->baseaddr = PHYS_ADDR_L2C;
574 addrp->topaddr = PHYS_ADDR_L2C + L2C_RANGE;
575 addrp->range = L2C_RANGE;
576
577 pd->addrp = addrp;
578
579 /*
580 * SSI, mapped at MSB[39:28] = 0xff0 - 0xfff
581 */
582
583 /* Need to allocate space only once and for each node */
584 if (!(procp->ssip))
585 procp->ssip = Xcalloc(1, ssi_t);
586
587 pd = Xcalloc(1, config_dev_t);
588 pd->is_implied = true;
589 pd->dev_typep = &dev_type_ssi;
590 pd->devp = (void*)procp;
591 pd->domainp = domainp;
592 procp->ssi_devp = pd;
593
594 addrp = Xmalloc(sizeof(config_addr_t));
595 addrp->config_devp = pd;
596 addrp->baseaddr = PHYS_ADDR_SSI;
597 addrp->topaddr = PHYS_ADDR_SSI + SSI_RANGE;
598 addrp->range = SSI_RANGE;
599
600 pd->addrp = addrp;
601
602 /* and now create an addrp struc for the MAGIC_SSI address */
603 addrp = Xmalloc(sizeof(config_addr_t));
604 addrp->config_devp = pd;
605 addrp->baseaddr = MAGIC_SSI;
606 addrp->topaddr = MAGIC_SSI + 8;
607 addrp->range = 8;
608
609 /*
610 * JTAG, mapped at MSB[39:32] = 0x90
611 */
612
613 pd = Xcalloc(1, config_dev_t);
614 pd->is_implied = true;
615 pd->dev_typep = &dev_type_jtag;
616 pd->devp = (void*)procp;
617 procp->jtag_devp = pd;
618
619 phys_addr = PHYS_ADDR_JTAG_REMOTE(node_id);
620
621 insert_domain_address(domainp, pd, phys_addr, phys_addr + JTAG_RANGE);
622
623 overlapp = insert_domain_device(domainp, pd);
624 if (overlapp != NULL) {
625 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
626 overlapp->dev_typep->dev_type_namep,
627 overlapp->addrp->baseaddr,
628 pd->dev_typep->dev_type_namep,
629 pd->addrp->baseaddr);
630 }
631
632 /*
633 * NCX, mapped at MSB[39:32] = 0x81
634 */
635 if (!(procp->ncxp))
636 procp->ncxp = Xcalloc(1, ncx_t);
637
638 pd = Xcalloc(1, config_dev_t);
639 pd->is_implied = true;
640 pd->dev_typep = &dev_type_ncx;
641 pd->devp = (void*)procp;
642 procp->ncx_devp = pd;
643
644 phys_addr = PHYS_ADDR_NCX_REMOTE(node_id);
645
646 insert_domain_address(domainp, pd, phys_addr, phys_addr + NCX_RANGE);
647
648 overlapp = insert_domain_device(domainp, pd);
649 if (overlapp != NULL) {
650 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
651 overlapp->dev_typep->dev_type_namep,
652 overlapp->addrp->baseaddr,
653 pd->dev_typep->dev_type_namep,
654 pd->addrp->baseaddr);
655 }
656
657
658 /*
659 * COU, mapped at MSB[39:32] = 0x811
660 */
661 if (!(procp->coup))
662 procp->coup = Xcalloc(1, cou_t);
663
664 pd = Xcalloc(1, config_dev_t);
665 pd->is_implied = true;
666 pd->dev_typep = &dev_type_cou;
667 pd->devp = (void*)procp;
668 procp->cou_devp = pd;
669
670 phys_addr = PHYS_ADDR_COU_REMOTE(node_id);
671
672 insert_domain_address(domainp, pd, phys_addr, phys_addr + COU_RANGE);
673
674 overlapp = insert_domain_device(domainp, pd);
675 if (overlapp != NULL) {
676 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
677 overlapp->dev_typep->dev_type_namep,
678 overlapp->addrp->baseaddr,
679 pd->dev_typep->dev_type_namep,
680 pd->addrp->baseaddr);
681 }
682
683 /*
684 * LFU, mapped at MSB[39:32] = 0x812
685 */
686 if (!(procp->lfup))
687 procp->lfup = Xcalloc(1, lfu_t);
688
689 pd = Xcalloc(1, config_dev_t);
690 pd->is_implied = true;
691 pd->dev_typep = &dev_type_lfu;
692 pd->devp = (void*)procp;
693 procp->lfu_devp = pd;
694
695 phys_addr = PHYS_ADDR_LFU_REMOTE(node_id);
696
697 insert_domain_address(domainp, pd, phys_addr, phys_addr + LFU_RANGE);
698
699 overlapp = insert_domain_device(domainp, pd);
700 if (overlapp != NULL) {
701 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
702 overlapp->dev_typep->dev_type_namep,
703 overlapp->addrp->baseaddr,
704 pd->dev_typep->dev_type_namep,
705 pd->addrp->baseaddr);
706 }
707
708 /*
709 * RCU, mapped at MSB[39:32] = 0x89
710 */
711 if (!(procp->rcup))
712 procp->rcup = Xcalloc(1, rcu_t);
713
714 pd = Xcalloc(1, config_dev_t);
715 pd->is_implied = true;
716 pd->dev_typep = &dev_type_rcu;
717 pd->devp = (void*)procp;
718 procp->rcu_devp = pd;
719
720 phys_addr = PHYS_ADDR_RCU_REMOTE(node_id);
721
722 insert_domain_address(domainp, pd, phys_addr, phys_addr + RCU_RANGE);
723
724 overlapp = insert_domain_device(domainp, pd);
725 if (overlapp != NULL) {
726 lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx",
727 overlapp->dev_typep->dev_type_namep,
728 overlapp->addrp->baseaddr,
729 pd->dev_typep->dev_type_namep,
730 pd->addrp->baseaddr);
731 }
732
733#endif /* } VFALLS */
734
735}
736
737#ifndef NDEBUG /* { */
738char * ssi_reg_name(int reg)
739{
740 char * s;
741 switch (reg) {
742 case SSI_TIMEOUT: s="ssi_timeout"; break;
743 case SSI_LOG: s="ssi_log"; break;
744 default: s="Illegal ssi register"; break;
745 }
746
747 return s;
748}
749
750static char *ncu_reg_name(int reg)
751{
752 char * s;
753 switch (reg) {
754 case INT_MAN: s="int_man"; break;
755 case MONDO_INT_VEC: s="mondo_int_vec"; break;
756 case SER_NUM: s="ser_num"; break;
757 case EFU_STAT: s="efu_stat"; break;
758 case CORE_AVAIL: s="core_avail"; break;
759 case BANK_AVAIL: s="bank_avail"; break;
760 case BANK_ENABLE: s="bank_enable"; break;
761 case BANK_ENABLE_STATUS: s="bank_enable_status"; break;
762 case L2_IDX_HASH_EN: s="l2_idx_hash_en"; break;
763 case L2_IDX_HASH_EN_STATUS: s="l2_idx_hash_en_status"; break;
764 case PCIE_A_MEM32_OFFSET_BASE: s="pcie_a_mem32_offset_base"; break;
765 case PCIE_A_MEM32_OFFSET_MASK: s="pcie_a_mem32_offset_mask"; break;
766 case PCIE_A_MEM64_OFFSET_BASE: s="pcie_a_mem64_offset_base"; break;
767 case PCIE_A_MEM64_OFFSET_MASK: s="pcie_a_mem64_offset_mask"; break;
768 case PCIE_A_IOCON_OFFSET_BASE: s="pcie_a_iocon_offset_base"; break;
769 case PCIE_A_IOCON_OFFSET_MASK: s="pcie_a_iocon_offset_mask"; break;
770 case PCIE_A_FSH: s="pcie_a_fsh"; break;
771 case SOC_ESR: s="soc_error_status"; break;
772 case SOC_LOG_ENABLE: s="soc_error_log_enable"; break;
773 case SOC_INTERRUPT_ENABLE: s="soc_error_interrupt_enable"; break;
774 case SOC_FATAL_ERROR_ENABLE: s="soc_fatal_error_enable"; break;
775 case SOC_PENDING_ERROR_STATUS: s="soc_pending_error_status"; break;
776 case SOC_ERROR_INJECTION: s="soc_error_injection"; break;
777 case SOC_SII_ERROR_SYNDROME: s="soc_sii_error_syndrome"; break;
778 case SOC_NCU_ERROR_SYNDROME: s="soc_sii_error_syndrome"; break;
779 case MONDO_INT_DATA0: s="mondo_int_data0"; break;
780 case MONDO_INT_DATA1: s="mondo_int_data1"; break;
781 case MONDO_INT_ADATA0: s="mondo_int_adata0"; break;
782 case MONDO_INT_ADATA1: s="mondo_int_adata1"; break;
783 case MONDO_INT_BUSY: s="mondo_int_busy"; break;
784 case MONDO_INT_ABUSY: s="mondo_int_abusy"; break;
785 default: s="Illegal NCU register"; break;
786 }
787
788 return s;
789}
790
791char * ccu_reg_name(int reg)
792{
793 char * s;
794 switch (reg) {
795 case CLOCK_CONTROL: s="clock_control"; break;
796 case RAND_GEN: s="rand_gen"; break;
797 case RAND_CTL: s="rand_ctl"; break;
798 default: s="Illegal clock register"; break;
799 }
800
801 return s;
802}
803
804char * l2c_reg_name(int reg)
805{
806 char * s;
807 switch (reg) {
808 case L2_DIAG_DATA: s="diag_data"; break;
809 case L2_DIAG_TAG: s="diag_tag"; break;
810 case L2_DIAG_VUAD: s="diag_vuad"; break;
811 case L2_CONTROL: s="control"; break;
812 case L2_ERROR_ENABLE: s="error_enable"; break;
813 case L2_ERROR_STATUS: s="error_status"; break;
814#ifdef VFALLS
815 case L2_ERROR_STATUS_II: s="error_status_ii"; break;
816#endif
817 case L2_ERROR_ADDRESS: s="error_address"; break;
818 case L2_ERROR_INJECT: s="error_inject"; break;
819 case L2_ERROR_NOTDATA: s="error_notdata"; break;
820 default: s="Illegal L2 control register"; break;
821 }
822
823 return s;
824}
825
826char * hwdbg_reg_name(int reg)
827{
828 char * s;
829 switch (reg) {
830 case DEBUG_PORT_CONFIG: s="debug_port_config"; break;
831 case IO_QUIESCE_CONTROL: s="io_quiesce_control"; break;
832 default: s="Illegal Debug control register"; break;
833 }
834 return s;
835}
836
837char * mcu_reg_name(int reg)
838{
839 char * s;
840 switch (reg) {
841 case DRAM_CAS_ADDR_WIDTH: s="cas_addr_width"; break;
842 case DRAM_RAS_ADDR_WIDTH: s="ras_addr_width"; break;
843 case DRAM_CAS_LAT: s="cas_lat"; break;
844 case DRAM_SCRUB_FREQ: s="scrub_frequency"; break;
845 case DRAM_REFRESH_FREQ: s="refresh_frequency"; break;
846 case DRAM_OPEN_BANK_MAX: s="open_bank_max"; break;
847 case DRAM_REFRESH_COUNTER: s="refresh_counter"; break;
848 case DRAM_SCRUB_ENABLE: s="scrub_enable"; break;
849 case DRAM_PROG_TIME_CNTR: s="program_time_cntr"; break;
850 case DRAM_TRRD: s="trrd"; break;
851 case DRAM_TRC: s="trc"; break;
852 case DRAM_TRCD: s="trcd"; break;
853 case DRAM_TWTR: s="twtr"; break;
854 case DRAM_TRTW: s="trtw"; break;
855 case DRAM_TRTP: s="trtp"; break;
856 case DRAM_TRAS: s="tras"; break;
857 case DRAM_TRP: s="trp"; break;
858 case DRAM_TWR: s="twr"; break;
859 case DRAM_TRFC: s="trfc"; break;
860 case DRAM_TMRD: s="tmrd"; break;
861 case DRAM_FAWIN: s="fawin"; break;
862 case DRAM_TIWTR: s="tiwtr"; break;
863 case DRAM_DIMM_STACK: s="dimm_stack"; break;
864 case DRAM_EXT_WR_MODE1: s="ext_wr_mode1"; break;
865 case DRAM_EXT_WR_MODE2: s="ext_wr_mode2"; break;
866 case DRAM_EXT_WR_MODE3: s="ext_wr_mode3"; break;
867 case DRAM_8_BANK_MODE: s="8_bank_mode"; break;
868 case DRAM_BRANCH_DISABLED: s="branch_disabled"; break;
869 case DRAM_SEL_LO_ADDR_BITS: s="sel_lo_addr_bits"; break;
870 case DRAM_SINGLE_CHNL_MODE: s="single_chnl_mode"; break;
871#ifdef VFALLS
872 case DRAM_MIRROR_MODE: s="mirror_mode"; break;
873#endif
874 case DRAM_DIMM_INIT: s="dimm_init"; break;
875 case DRAM_INIT_STATUS: s="init_status"; break;
876 case DRAM_DIMM_PRESENT: s="dimm_present"; break;
877 case DRAM_FAILOVER_STATUS: s="failover_status"; break;
878 case DRAM_FAILOVER_MASK: s="failover_mask"; break;
879 case DRAM_DBG_TRG_EN: s="dbg_trg_en"; break;
880 case DRAM_POWER_DOWN_MODE: s="power_down_mode"; break;
881 case DRAM_ERROR_STATUS: s="error_status"; break;
882 case DRAM_ERROR_ADDRESS: s="error_address"; break;
883 case DRAM_ERROR_INJECT: s="error_inject"; break;
884 case DRAM_ERROR_COUNTER: s="error_counter"; break;
885 case DRAM_ERROR_LOCATION: s="error_location"; break;
886 case DRAM_ERROR_RETRY: s="error_retry"; break;
887 case DRAM_FBD_ERROR_SYND: s="fbd_error_synd"; break;
888 case DRAM_FBD_INJ_ERROR_SRC: s="fbd_inj_error_src"; break;
889 case DRAM_FBR_COUNT: s="fbr_count"; break;
890 case DRAM_PERF_CTL: s="perf_ctl"; break;
891 case DRAM_PERF_COUNT: s="perf_count"; break;
892 case FBD_CHNL_STATE: s="fbd_channle_state"; break;
893 case FBD_FAST_RESET_FLAG: s="fbd_fast_reset_flag"; break;
894 case FBD_CHNL_RESET: s="fbd_channle_reset"; break;
895 case TS1_SB_NB_MAPPING: s="ts1_sb_nb_mapping"; break;
896 case TS1_TEST_PARAMETER: s="ts1_test_parameter"; break;
897 case TS3_FAILOVER_CONFIG: s="ts3_failover_config"; break;
898 case ELECTRICAL_IDLE_DETECTED: s="electrical_idle_detected"; break;
899 case DISABLE_STATE_PERIOD: s="disable_state_period"; break;
900 case DISABLE_STATE_PERIOD_DONE: s="disable_state_period_done"; break;
901 case CALIBRATE_STATE_PERIOD: s="calibrate_state_period"; break;
902 case CALIBRATE_STATE_PERIOD_DONE: s="calibrate_state_period_done"; break;
903 case TRAINING_STATE_MIN_TIME: s="training_state_min_time"; break;
904 case TRAINING_STATE_DONE: s="training_state_done"; break;
905 case TRAINING_STATE_TIMEOUT: s="training_state_timeout"; break;
906 case TESTING_STATE_DONE: s="testing_state_done"; break;
907 case TESTING_STATE_TIMEOUT: s="testing_state_timeout"; break;
908 case POLLING_STATE_DONE: s="polling_state_done"; break;
909 case POLLING_STATE_TIMEOUT: s="polling_state_timeout"; break;
910 case CONFIG_STATE_DONE: s="config_state_done"; break;
911 case CONFIG_STATE_TIMEOUT: s="config_state_timeout"; break;
912 case DRAM_PER_RANK_CKE: s="dram_per_rank_cke"; break;
913 case L0S_DURATION: s="l0s_duration"; break;
914 case CHNL_SYNC_FRAME_FREQ: s="channle_sync_frame_fre"; break;
915 case CHNL_READ_LAT: s="channle_read_lat"; break;
916 case CHNL_CAPABILITY: s="channle_capability"; break;
917 case LOOPBACK_MODE_CNTL: s="loopback_mode_cntl"; break;
918 case SERDES_CONFIG_BUS: s="serdes_config_bus"; break;
919 case SERDES_INVPAIR: s="serdes_invpair"; break;
920 case SERDES_TEST_CONFIG_BUS: s="serdes_test_config_bus"; break;
921 case CONFIG_REG_ACCESS_ADDR: s="config_reg_access_addr"; break;
922 case CONFIG_REG_ACCESS_DATA: s="config_reg_access_data"; break;
923 case IBIST_NBFIB_CTL: s="ibist_nbfib_ctl"; break;
924 case IBIST_SBFIB_CTL: s="ibist_sbfib_ctl"; break;
925 default: s="Illegal DRAM control register"; break;
926 }
927
928 return s;
929}
930
931char * jtag_reg_name(int reg)
932{
933 char * s;
934 switch (reg) {
935 case INT_VECTOR_DISPATCH: s="int_vector_dispatch";break;
936 case ASI_CORE_AVAILABLE: s="asi_core_available";break;
937 case ASI_CORE_ENABLE_STATUS: s="asi_core_enable_status";break;
938 case ASI_CORE_ENABLE: s="asi_core_enable";break;
939 case ASI_CORE_RUNNING_RW: s="asi_core_running_rw";break;
940 case ASI_CORE_RUNNING_STATUS: s="asi_core_running_status";break;
941 case ASI_CORE_RUNNING_W1S: s="asi_core_running_w1s";break;
942 case ASI_CORE_RUNNING_W1C: s="asi_core_running_w1c";break;
943 case SOC_ERROR_STEERING: s="soc_error_steering";break;
944 default: s="Illegal JTAG region register"; break;
945 }
946
947 return s;
948}
949
950#ifdef VFALLS /* { */
951char * ncx_reg_name(int reg)
952{
953 char * s;
954 switch (reg) {
955 case CF_SYS_MODE_REG: s="system_mode_reg";break;
956 case NCX_TIC_EN_SLOW: s="tick_en_slow";break;
957 case CF_SLOW_PULSE_WAIT: s="slow_pulse_wait";break;
958 case NCX_TWR: s="twr";break;
959 case NCX_TPESR: s="tpesr";break;
960 case NCX_TPELSE: s="tpelse";break;
961 case NCX_TPEAR: s="tpear";break;
962 default: s="Illegal NCX region register"; break;
963 }
964
965 return s;
966}
967
968char * cou_reg_name(int reg)
969{
970 char * s;
971 switch (reg) {
972 case COU_ERR_ENABLE_REG: s="cou_err_enable";break;
973 case COU_ESR: s="cou_esr";break;
974 case COU_EAR: s="cou_ear";break;
975 default: s="Illegal COU region register"; break;
976 }
977
978 return s;
979}
980
981char * lfu_reg_name(int reg)
982{
983 char * s;
984 switch (reg) {
985 case CL_INIT_STATE: s="cl_init_state";break;
986 case CL_CFG_REG: s="cl_cfg_reg";break;
987 case CL_SERDES_CFG: s="cl_serdes_cfg";break;
988 case CL_SER_INVPAIR: s="cl_ser_invpair";break;
989 case CL_TEST_CFG: s="cl_test_cfg";break;
990 case CL_ERROR_STAT: s="cl_error_stat";break;
991
992 default: s="Illegal LFU region register"; break;
993 }
994
995 return s;
996}
997
998#endif /* } VFALLS */
999
1000#endif /* } NDEBUG */
1001
1002static void ssi_init(config_dev_t * config_devp)
1003{
1004 ss_proc_t * npp;
1005#ifdef VFALLS /* { */
1006 domain_t * domainp;
1007 config_proc_t * config_procp;
1008 int i;
1009 int mode_shift;
1010 uint64_t val;
1011 int extern_hub;
1012 int node_id;
1013 bool_t zambezi_present = false;
1014 config_dev_t *devp;
1015
1016 /*
1017 * Note that for VF, each node has its own SSI region and all of them
1018 * are addressed by the same physical address(0xFF.0000.0000 to
1019 * 0xFF.0FFF.FFFF).(Note that this does not refer to the ROM part of the
1020 * SSI, which *really* is a single entity shared by the different nodes.
1021 * Address range for the ROM is FF.F000.0000 to FF.FFFF.FFFF and its
1022 * config and setup is taken care of by an entry in the config file.)
1023 * Nodes can only access their local SSI regions and not those of other
1024 * nodes. So the domain structure's addressmap only contains that
1025 * physical address(0xFF.0000.0000 to 0xFF.0FFF.FFFF) and it is up to the
1026 * init and access routines to correctly map that PA to the correct
1027 * node's SSI region.
1028 * This is unlike NCU, CCU, MCU etc where there is a local CSR access
1029 * address which is common for all nodes and basically is translated by
1030 * hw to talk to originating node's address space AND remote CSR access
1031 * address which allows any node to access any other node's address
1032 * space. So in this case the domain address map will contain both the
1033 * local CSR address space as well as the remote CSR address space.
1034 */
1035
1036 domainp = config_devp->domainp;
1037
1038 /* Zambezi present? */
1039 devp = domainp->device.listp;
1040 for (i = 0; i < domainp->device.count; i++) {
1041 if (streq(devp->dev_typep->dev_type_namep, "zambezi")) {
1042 zambezi_present = true;
1043 break;
1044 }
1045 devp = devp->nextp;
1046 }
1047
1048 for (i = 0; i < domainp->procs.count; i++) {
1049 config_procp = LIST_ENTRY(domainp->procs, i);
1050 npp = (ss_proc_t *)config_procp->procp;
1051 npp->ssip->timeout = 0;
1052 npp->ssip->log = 0;
1053
1054 /* and init the magic ssi location with node_id and way info */
1055 node_id = config_procp->proc_id;
1056 extern_hub = zambezi_present? 1:0;
1057 mode_shift = 11 - domainp->procs.count;
1058 val = 1<<mode_shift | extern_hub<<6 | node_id<<4 | 0xf;
1059 val &= MASK64(9,0);
1060 npp->ssip->magic_ssi = val;
1061 }
1062#else
1063 npp = (ss_proc_t *)config_devp->devp;
1064 npp->ssip->timeout = 0;
1065 npp->ssip->log = 0;
1066
1067#endif /* } VFALLS */
1068}
1069
1070static void ncu_init(config_dev_t * config_devp)
1071{
1072 ss_proc_t *npp;
1073 ncu_t *ncup;
1074 uint64_t device;
1075 int i;
1076
1077 npp = (ss_proc_t *)config_devp->devp;
1078 ncup = npp->ncup;
1079
1080 pthread_mutex_init(&ncup->ncu_lock, NULL);
1081
1082 /*
1083 * setup init value (NCU spec, v0.99)
1084 */
1085 for (device=0; device < NCU_DEV_MAX; device++) {
1086 ncup->regs.int_man[device] = 0x0;
1087 }
1088 ncup->regs.mondo_int_vec = 0x0;
1089 ncup->regs.ser_num = 0xdeadbeef;
1090 ncup->regs.efu_stat = MASK64(63,0);
1091 ncup->regs.bank_enb = 0xff;
1092 ncup->regs.bank_enb_stat = 0x3cf;
1093 ncup->regs.l2_idx_hash_en_stat = false;
1094 ncup->regs.pcie_a_mem32_offset_base = 0x0;
1095 ncup->regs.pcie_a_mem32_offset_mask = MASK64(39,36);
1096 ncup->regs.pcie_a_mem64_offset_base = 0x0;
1097 ncup->regs.pcie_a_mem64_offset_mask = MASK64(39,36);
1098 ncup->regs.pcie_a_iocon_offset_base = 0x0;
1099 ncup->regs.pcie_a_iocon_offset_mask = MASK64(39,36);
1100 ncup->regs.pcie_a_fsh = 0x0;
1101 ncup->regs.soc_esr = 0x0;
1102 ncup->regs.soc_log_enb = 0x1fffffff;
1103 ncup->regs.soc_intr_enb = 0x0;
1104 ncup->regs.soc_err_inject = 0x0;
1105 ncup->regs.soc_fatal_enb = 0x0;
1106 ncup->regs.soc_sii_err_syndrome = 0x0;
1107 ncup->regs.soc_ncu_err_syndrome = 0x0;
1108
1109 for (i = 0; i < NCU_TARGETS; i++) {
1110 ncup->regs.mondo_int_data0[i] = 0x0;
1111 ncup->regs.mondo_int_data1[i] = 0x0;
1112 ncup->regs.mondo_int_busy[i] = NCU_MONDO_INT_BUSY;
1113 }
1114
1115}
1116
1117static void hwdbg_init(config_dev_t * config_devp)
1118{
1119 ss_proc_t * npp;
1120
1121 npp = (ss_proc_t *)config_devp->devp;
1122 npp->hwdbgp->debug_port_config = 0;
1123 npp->hwdbgp->io_quiesce_control = 0;
1124}
1125
1126static void rcu_init(config_dev_t * config_devp)
1127{
1128 ss_proc_t * npp;
1129
1130 npp = (ss_proc_t *)config_devp->devp;
1131 npp->rcup->reset_gen = 0;
1132 npp->rcup->reset_status = 0x4;
1133 npp->rcup->reset_source = 0x10;
1134
1135#ifdef VFALLS /* { */
1136 npp->rcup->comt_divs = 0;
1137 npp->rcup->comt_cfg = 0x23;
1138 npp->rcup->clk_steer = 0;
1139 npp->rcup->comt_lock_time = 0x0;
1140#endif /* } */
1141}
1142
1143static void jtag_init(config_dev_t * config_devp)
1144{
1145}
1146
1147#ifdef VFALLS /* { */
1148static void ncx_init(config_dev_t * config_devp)
1149{
1150 ss_proc_t * npp;
1151
1152 npp = (ss_proc_t *)config_devp->devp;
1153 npp->ncxp->sys_mode = 0x0;
1154 npp->ncxp->tick_en_slow = 0x0;
1155 npp->ncxp->slow_pulse_wait = 0x0;
1156 npp->ncxp->twr = 0xfffffc00; /* Table 12-9, VF PRM 0.1 */
1157 npp->ncxp->tpesr = 0x0;
1158 npp->ncxp->tpelse = 0x0;
1159 npp->ncxp->tpear = 0x0;
1160}
1161
1162static void cou_init(config_dev_t * config_devp)
1163{
1164 ss_proc_t * npp;
1165 int link;
1166
1167 npp = (ss_proc_t *)config_devp->devp;
1168
1169 for (link = 0; link < COU_LINK_MAX; link++) {
1170 npp->coup->cou_err_enable[link] = 0x0;
1171 npp->coup->cou_esr[link] = 0x0;
1172 npp->coup->cou_ear[link] = 0x0;
1173 }
1174}
1175
1176static void lfu_init(config_dev_t * config_devp)
1177{
1178 ss_proc_t * npp;
1179 int link;
1180
1181 npp = (ss_proc_t *)config_devp->devp;
1182
1183 for (link = 0; link < LFU_MAX_LINKS; link++) {
1184 npp->lfup->cl_init_state[link] = 0x0;
1185 npp->lfup->cl_cfg_reg[link] = 0x28;
1186 npp->lfup->cl_serdes_cfg[link] = 0x1c1000000;
1187 npp->lfup->cl_ser_invpair[link] = 0x0;
1188 npp->lfup->cl_test_cfg[link] = 0x3;
1189 npp->lfup->cl_error_stat[link] = 0x0;
1190 }
1191}
1192
1193#endif /* } VFALLS */
1194
1195static void ccu_init(config_dev_t * config_devp)
1196{
1197 ss_proc_t * npp;
1198 ccu_t * clockp;
1199
1200 npp = (ss_proc_t *)config_devp->devp;
1201 clockp = npp->clockp;
1202
1203 clockp->control = 0x1002011c1; /* table 11.1, section 11.1 of N2 PRM rev 1.0 */
1204 clockp->rand_state.ctl = (1 << RC_MODE_SHIFT) |
1205 (7 << RC_NOISE_CELL_SEL_SHIFT);
1206
1207 /*
1208 * OX3e is from N2 CCU MAS v1.61 11/01/05, Table 8.2. This is
1209 * decimal 63, which is way too small. But that's what the HW
1210 * does, so we model it here.
1211 */
1212 clockp->rand_state.ctl |= 0x3e << RC_DELAY_SHIFT;
1213}
1214
1215static void l2c_init(config_dev_t * config_devp)
1216{
1217 int bank, idx;
1218 ss_proc_t * npp;
1219 l2c_t * l2p;
1220
1221
1222#ifdef VFALLS /* { */
1223 domain_t * domainp;
1224 config_proc_t * config_procp;
1225 int i;
1226
1227 /*
1228 * Note that for VF, each node has its own L2CSR region and all of them
1229 * are addressed by the same physical address(0xA0.0000.0000 to
1230 * 0xBF.FFFF.FFFF). Nodes can only access their local L2CSR regions and
1231 * not those of other nodes. So the domain structure's addressmap only
1232 * contains that physical address(0xA0.0000.0000 to 0xBF.FFFF.FFFF) and
1233 * it is up to the init and access routines to correctly map that PA to
1234 * the correct node's L2CSR region. This is unlike NCU, CCU, MCU etc
1235 * where there is a local CSR access address which is common for all
1236 * nodes and basically is translated by hw to talk to originating node's
1237 * address space AND remote CSR access address which allows any node to
1238 * access any other node's address space.
1239 */
1240 domainp = config_devp->domainp;
1241 for (i = 0; i < domainp->procs.count; i++) {
1242 config_procp = LIST_ENTRY(domainp->procs, i);
1243 npp = (ss_proc_t *)config_procp->procp;
1244 l2p = npp->l2p;
1245
1246 for (bank=0; bank<npp->num_l2banks; bank++) {
1247 l2p->control[bank] = L2_DIS;
1248 l2p->bist_ctl[bank] = 0x0;
1249 l2p->error_enable[bank] = 0xfffffc00; /* Table 12-3, VF PRM 0.1 */
1250 l2p->error_status[bank] = 0x0;
1251 l2p->error_status_ii[bank]= 0x0;
1252 l2p->error_address[bank]= 0x0;
1253 l2p->error_inject[bank] = 0x0;
1254 }
1255
1256 l2p->diag_datap = Xmalloc(L2_DATA_SIZE);
1257 l2p->diag_tagp = Xmalloc(L2_TAG_SIZE);
1258 l2p->diag_vuadp = Xmalloc(L2_VUAD_SIZE);
1259
1260 for (idx=0; idx<L2_DATA_SIZE/8; idx++) {
1261 l2p->diag_datap[idx] = 0xdeadbeef;
1262 }
1263
1264 for (idx=0; idx<L2_TAG_SIZE/8; idx++) {
1265 l2p->diag_tagp[idx] = 0xdeadbeef;
1266 }
1267
1268 for (idx=0; idx<L2_VUAD_SIZE/8; idx++) {
1269 l2p->diag_vuadp[idx] = 0xdeadbeef;
1270 }
1271 }
1272
1273#else
1274 npp = (ss_proc_t *)config_devp->devp;
1275 l2p = npp->l2p;
1276
1277 for (bank=0; bank<npp->num_l2banks; bank++) {
1278 l2p->control[bank] = L2_DIS;
1279 l2p->bist_ctl[bank] = 0x0;
1280 l2p->error_enable[bank] = 0x0;
1281 l2p->error_status[bank] = 0x0;
1282 l2p->error_address[bank]= 0x0;
1283 l2p->error_inject[bank] = 0x0;
1284 }
1285
1286 l2p->diag_datap = Xmalloc(L2_DATA_SIZE);
1287 l2p->diag_tagp = Xmalloc(L2_TAG_SIZE);
1288 l2p->diag_vuadp = Xmalloc(L2_VUAD_SIZE);
1289
1290 for (idx=0; idx<L2_DATA_SIZE/8; idx++) {
1291 l2p->diag_datap[idx] = 0xdeadbeef;
1292 }
1293
1294 for (idx=0; idx<L2_TAG_SIZE/8; idx++) {
1295 l2p->diag_tagp[idx] = 0xdeadbeef;
1296 }
1297
1298 for (idx=0; idx<L2_VUAD_SIZE/8; idx++) {
1299 l2p->diag_vuadp[idx] = 0xdeadbeef;
1300 }
1301
1302#endif /* } VFALLS */
1303}
1304
1305static void mcu_init(config_dev_t * config_devp)
1306{
1307 int bidx;
1308 ss_proc_t * npp;
1309 mcu_bank_t * dbp;
1310 uint64_t i;
1311
1312 npp = (ss_proc_t *)config_devp->devp;
1313
1314 for (bidx=0; bidx<npp->num_mbanks; bidx++) {
1315 dbp = &(npp->mbankp[bidx]);
1316
1317 dbp->cas_addr_width = 0xb;
1318 dbp->ras_addr_width = 0xf;
1319 dbp->cas_lat = 0x3;
1320 dbp->scrub_freq = 0xfff;
1321 dbp->refresh_freq = 0x514;
1322 dbp->refresh_counter = 0x0;
1323 dbp->scrub_enable = 0x0;
1324 dbp->trrd = 0x2;
1325 dbp->trc = 0xc;
1326 dbp->trcd = 0x3;
1327 dbp->twtr = 0x0;
1328 dbp->trtw = 0x0;
1329 dbp->trtp = 0x2;
1330 dbp->tras = 0x9;
1331 dbp->trp = 0x3;
1332 dbp->twr = 0x3;
1333 dbp->trfc = 0x27;
1334 dbp->tmrd = 0x2;
1335#ifdef VFALLS
1336 dbp->fawin = 0xa;
1337#else
1338 dbp->fawin = 0x2;
1339#endif
1340 dbp->tiwtr = 0x2;
1341 dbp->dimm_stack = 0x0;
1342 dbp->ext_wr_mode2 = 0x0;
1343 dbp->ext_wr_mode1 = 0x18;
1344 dbp->ext_wr_mode3 = 0x0;
1345 dbp->eight_bank_mode = 0x1;
1346 dbp->sel_lo_addr_bits = 0x0;
1347 dbp->single_chnl_mode = 0x0;
1348#ifdef VFALLS
1349 dbp->mirror_mode = 0x0;
1350#endif
1351 dbp->dimm_init = 0x1;
1352 dbp->init_status = 0x0;
1353 dbp->dimm_present = 0x3;
1354 dbp->failover_status = 0x0;
1355 dbp->failover_mask = 0x0;
1356 dbp->power_down_mode = 0x0;
1357 dbp->fbd_chnl_state.val = 0x0;
1358 for (i=0; i<MAX_AMBS; i++){
1359 dbp->fbd_chnl_state.ambstate[i] = 0x0;
1360 dbp->amb[i].vid_did = 0x00E01033; /* use e0 for now until we find valid nec did */
1361 dbp->amb[i].fbds = 0x0; /* set all error stat bits to 0 */
1362 dbp->amb[i].emask = 0x36;
1363 dbp->amb[i].ferr = 0x0;
1364 dbp->amb[i].nerr = 0x0;
1365 dbp->amb[i].psbyte3_0 = 0x0;
1366 dbp->amb[i].psbyte7_4 = 0x0;
1367 dbp->amb[i].psbyte11_8 = 0x0;
1368 dbp->amb[i].psbyte13_12 = 0x0;
1369 dbp->amb[i].c2dincrcur_cmd2datanxt = 0x0;
1370 dbp->amb[i].mbcsr = 0x0;
1371 dbp->amb[i].dareftc = ((0x4e << 16) | 0x0c30);
1372 dbp->amb[i].mtr_dsreftc = ((1 << 16) |
1373 (0x56 << 8) |
1374 (0xf << 4) | 0x7);
1375 dbp->amb[i].drt = 0x0;
1376 dbp->amb[i].drc = ((0x1 << 18) | /* set default value */
1377 (0x1 << 12) |
1378 (0x2 << 4) |
1379 (0x3 << 0));
1380 dbp->amb[i].dcalcsr = 0x0;
1381 dbp->amb[i].dcaladdr = 0x0;
1382 dbp->amb[i].ddr2odtc = 0x0;
1383 }
1384 dbp->fbd_fast_reset_flag = 0x0;
1385 dbp->fbd_chnl_reset = 0x0;
1386 dbp->ts1_sb_nb_mapping = 0x0;
1387 dbp->ts1_test_parameter = 0x0;
1388 dbp->ts3_failover_config = 0x0;
1389 dbp->electrical_idle_detected = 0x0;
1390 dbp->disable_state_period = 0x3f;
1391 dbp->disable_state_period_done = 0x0;
1392 dbp->calibrate_state_period = 0x0;
1393 dbp->calibrate_state_period_done = 0x0;
1394 dbp->training_state_min_time = 0xff;
1395 dbp->training_state_done = 0x0;
1396 dbp->training_state_timeout = 0xff;
1397 dbp->testing_state_done = 0x0;
1398 dbp->testing_state_timeout = 0xff;
1399 dbp->polling_state_done = 0x0;
1400 dbp->polling_state_timeout = 0xff;
1401 dbp->config_state_done = 0x0;
1402 dbp->config_state_timeout = 0xff;
1403 dbp->dram_per_rank_cke = 0xffff;
1404 dbp->l0s_duration = 0x2a;
1405 dbp->chnl_sync_frame_freq = 0x2a;
1406 dbp->chnl_read_lat = 0xffff;
1407 dbp->chnl_capability = 0x0;
1408 dbp->loopback_mode_cntl = 0x0;
1409 dbp->serdes_config_bus = 0x0;
1410 dbp->serdes_invpair = 0x0;
1411 dbp->config_reg_access_addr = 0x0;
1412 dbp->config_reg_access_data = 0x0;
1413
1414 dbp->ibist_nbfib_ctl = 0x03c01e478LL;
1415 dbp->ibist_sbfib_ctl = 0x23c01e478LL;
1416
1417 /*
1418 * Performance counter section 10.3 of PRM 0.9.1
1419 */
1420 dbp->perf_ctl = 0x0;
1421 dbp->perf_count = 0x0;
1422
1423 /*
1424 * Error handling section 25.12 of PRM 1.2
1425 */
1426 dbp->error_status = 0x0;
1427 dbp->error_address = 0x0;
1428 dbp->error_inject = 0x0;
1429 dbp->error_counter = 0x0;
1430 dbp->error_location = 0x0;
1431 dbp->error_retry = 0x0;
1432
1433 /*
1434 * Power management section 26.3 of PRM 0.9.1
1435 */
1436 dbp->open_bank_max = 0x1ffff;
1437 dbp->prog_time_cntr = 0xffff;
1438
1439
1440 /*
1441 * Hardware debug section 29.1 of PRM 0.9.1
1442 */
1443 dbp->dbg_trg_en = 0x0;
1444 }
1445}
1446
1447/*
1448 * Access SSI registers (mapped at offset = 0xff00000000)
1449 */
1450static bool_t ssi_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
1451{
1452 int reg;
1453 uint64_t val;
1454 ss_proc_t *npp;
1455 ssi_t * ssip;
1456
1457#ifdef VFALLS /* { */
1458 /*
1459 * Note that for VF, each node has its own SSI region and all of them
1460 * are addressed by the same physical address(0xFF.0000.0000 to
1461 * 0xFF.0FFF.FFFF).(Note that this does not refer to the ROM part of the
1462 * SSI, which *really* is a single entity shared by the different nodes.
1463 * Address range for the ROM is FF.F000.0000 to FF.FFFF.FFFF and its
1464 * config and setup is taken care of by an entry in the config file.)
1465 * Nodes can only access their local SSI regions and not those of other
1466 * nodes. So the domain structure's addressmap only contains that
1467 * physical address(0xFF.0000.0000 to 0xFF.0FFF.FFFF) and it is up to
1468 * the init and access routines to correctly map that PA to the correct
1469 * node's SSI region.
1470 * This is unlike NCU, CCU, MCU etc where there is a local CSR access
1471 * address which is common for all nodes and basically is translated by
1472 * hw to talk to originating node's address space AND remote CSR access
1473 * address which allows any node to access any other node's address
1474 * space. So in this case the domain address map will contain both the
1475 * local CSR address space as well as the remote CSR address space.
1476 */
1477
1478 /* Redirect the common SSI PA to the correct node*/
1479 npp = (ss_proc_t *)sp->config_procp->procp;
1480
1481 /* for accessing magic SSI loc by reset so as to set up sys_mode reg */
1482 if (config_addrp->baseaddr==MAGIC_SSI){
1483 if (op == MA_st64){
1484 EXEC_WARNING( ("Attempted write to reserved magic field in ssi"));
1485 return false;
1486 }
1487 else if (op == MA_ldu64) {
1488 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
1489 *regp = npp->ssip->magic_ssi;
1490 return true;
1491 }
1492 else
1493 return false;
1494 }
1495 /* or else this is a normal SSI register access */
1496 else
1497 config_addrp = npp->ssi_devp->addrp;
1498
1499#endif /* } VFALLS */
1500
1501 if (MA_ldu64!=op && MA_st64!=op) return false;
1502
1503 npp = (ss_proc_t *)config_addrp->config_devp->devp;
1504 ssip = npp->ssip;
1505
1506 reg = off & 0x1ffff;
1507
1508 switch (op) {
1509 case MA_st64:
1510 val = *regp;
1511
1512 switch (reg) {
1513 case SSI_TIMEOUT:
1514 RSVD_MASK(sp, (MASK64(24, 0)), val, 0, reg);
1515 ssip->timeout = val;
1516 break;
1517 case SSI_LOG:
1518 RSVD_MASK(sp, (MASK64(1, 0)), val, 0, reg);
1519 ssip->log &= ~val;
1520 break;
1521 default:
1522 /* illegal reg - an error */
1523 return false;
1524 }
1525 break;
1526
1527 case MA_ldu64:
1528 switch (reg) {
1529 case SSI_TIMEOUT:
1530 val = ssip->timeout;
1531 break;
1532 case SSI_LOG:
1533 val = ssip->log;
1534 break;
1535 default:
1536 /* illegal reg - an error */
1537 return false;
1538 }
1539 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
1540 *regp = val;
1541 break;
1542
1543 default:
1544 ASSERT(0);
1545 }
1546
1547 return true;
1548}
1549
1550static bool_t hwdbg_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
1551{
1552 ss_proc_t *npp;
1553 hwdbg_t * hwdbgp;
1554 uint64_t val;
1555 int reg;
1556
1557 if (MA_ldu64!=op && MA_st64!=op) return false;
1558
1559 npp = (ss_proc_t *)config_addrp->config_devp->devp;
1560 hwdbgp = npp->hwdbgp;
1561
1562 reg = off & ~0xfULL;
1563
1564 switch (op) {
1565 case MA_st64:
1566 val = *regp;
1567 switch (reg) {
1568 case DEBUG_PORT_CONFIG:
1569 RSVD_MASK(sp, (MASK64(63,62)|MASK64(9,0)), val, 0, reg);
1570 lprintf(sp->gid, "DEBUG_PORT_CONFIG addr=0x%llx being written with val=0x%llx\n", reg, val);
1571 hwdbgp->debug_port_config = val;
1572 break;
1573 case IO_QUIESCE_CONTROL:
1574 RSVD_MASK(sp, MASK64(3,0), val, 0, reg);
1575 lprintf(sp->gid, "IO_QUIESCE_CONTROL addr=0x%llx being written with val=0x%llx\n", reg, val);
1576 hwdbgp->io_quiesce_control = val;
1577 break;
1578 default:
1579 /* illegal reg - an error */
1580 return false;
1581 }
1582 break;
1583 case MA_ldu64:
1584 switch (reg) {
1585 case DEBUG_PORT_CONFIG:
1586 val = hwdbgp->debug_port_config;
1587 break;
1588 case IO_QUIESCE_CONTROL:
1589 val = hwdbgp->io_quiesce_control;
1590 break;
1591 default:
1592 /* illegal reg - an error */
1593 return false;
1594 }
1595 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
1596 *regp = val;
1597 break;
1598
1599 default:
1600 ASSERT(0);
1601 }
1602 return true;
1603}
1604
1605
1606static bool_t rcu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
1607{
1608 ss_proc_t *npp;
1609 rcu_t * rcup;
1610 uint64_t val;
1611 int reg;
1612 int node_id= 0;
1613
1614#ifdef VFALLS /* { */
1615 domain_t *domainp;
1616 tpaddr_t pa;
1617 int idx;
1618
1619 if (config_addrp->baseaddr == PHYS_ADDR_RCU) {
1620 /*
1621 * if local RCU CSR access, need to convert to Node X(this node)
1622 * RCU CSR address
1623 */
1624 node_id = sp->config_procp->proc_id;
1625 domainp = sp->config_procp->domainp;
1626 pa = PHYS_ADDR_RCU_REMOTE(node_id) + off;
1627 config_addrp = find_domain_address(domainp, pa);
1628
1629 } else {
1630 /*
1631 * If remote RCU CSR access, use config_addrp to get at the node_id.
1632 */
1633
1634 /* first check if global addressing is allowed for this config */
1635 GLOBAL_ADDRESSING_CHECK(sp, "RCU");
1636
1637 domainp = config_addrp->config_devp->domainp;
1638
1639 for (idx = 0; idx<domainp->procs.count ; idx++) {
1640 node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
1641 if (config_addrp->baseaddr == PHYS_ADDR_RCU_REMOTE(node_id))
1642 break;
1643 }
1644 }
1645#endif /* } VFALLS */
1646
1647 if (MA_ldu64!=op && MA_st64!=op) return false;
1648
1649 npp = (ss_proc_t *)config_addrp->config_devp->devp;
1650 rcup = npp->rcup;
1651
1652 reg = off;
1653 switch (op) {
1654 case MA_st64:
1655 val = *regp;
1656 switch (reg) {
1657 case RESET_GEN:
1658#ifdef VFALLS /* { */
1659 /*
1660 * PRM states that software may only write a 1
1661 * to one of the 3 reset gen bit fields at a time.
1662 * .-------------------------------------------------------,
1663 * | .. | pb_as_dbgr| dbr_gen | rsvd | xir_gen | wmr_gen |
1664 * `-------------------------------------------------------'
1665 * 63:5 4 3 2 1 0
1666 */
1667 RSVD_MASK(sp, (MASK64(4,3) | MASK64(1,0)), val, 0, reg);
1668
1669#else /* } { */
1670 /*
1671 * PRM states that software may only write a 1
1672 * to one of the 3 bit fields at a time.
1673 * .--------------------------------------------,
1674 * | .. | dbr_gen | rsvd | xir_gen | wmr_gen |
1675 * `--------------------------------------------'
1676 * 63:4 3 2 1 0
1677 */
1678 RSVD_MASK(sp, (MASK64(3,3) | MASK64(1,0)), val, 0, reg);
1679
1680
1681#endif /* } */
1682 switch ((val & (MASK64(3,0)))) {
1683 case 0x0: /* no gen bits set */
1684 case 0x1: /* wmr_gen set */
1685 case 0x2: /* xir_gen set */
1686 case 0x8: /* dbr_gen set */
1687 break;
1688 default:
1689 fatal("[0x%llx] (pc=0x%llx)\tAttempted write to more than "\
1690 "one reset generation bit in RESET_GEN val=0x%llx",
1691 sp->gid, sp->pc, val);
1692 }
1693
1694 rcup->reset_gen = val;
1695
1696 /*
1697 * update the RESET_SOURCE register if dbr_gen,
1698 * xir_gen or wmr_gen are set in RESET_GEN.
1699 * The lower 4 bits of both registers are the
1700 * same.
1701 */
1702 rcup->reset_source |= (val & (MASK64(3,3) | MASK64(1,1) | MASK64(0,0)));
1703 break;
1704 case RESET_STATUS:
1705 RSVD_MASK(sp, (MASK64(3,1)), val, 0, reg);
1706 rcup->reset_status = val;
1707 break;
1708 case RESET_SOURCE:
1709#ifdef VFALLS
1710 RSVD_MASK(sp, (MASK64(1,0) | MASK64(18,3)), val, 0, reg);
1711#else
1712 RSVD_MASK(sp, (MASK64(1,0) | MASK64(15,3)), val, 0, reg);
1713#endif
1714 /*
1715 * All the non-reserved bits are W1C so we update
1716 * the reset_source accordingly
1717 */
1718 rcup->reset_source &= ~(val);
1719 break;
1720#ifdef VFALLS /* { */
1721 case COMT_DIVS:
1722 RSVD_MASK(sp, (MASK64(23,0)), val, 0, reg);
1723 rcup->comt_divs = val;
1724 break;
1725
1726 case COMT_CFG:
1727 RSVD_MASK(sp, (MASK64(21,0)), val, 0, reg);
1728 rcup->comt_cfg = val;
1729 break;
1730
1731 case CLK_STEER:
1732 RSVD_MASK(sp, (MASK64(3,0)), val, 0, reg);
1733 rcup->clk_steer = val;
1734 break;
1735
1736 case COMT_LOCK_TIME:
1737 RSVD_MASK(sp, (MASK64(31,0)), val, 0, reg);
1738 rcup->comt_lock_time = val;
1739 break;
1740#endif VFALLS /* } */
1741 default:
1742 /* illegal reg - an error */
1743 return false;
1744 }
1745 break;
1746 case MA_ldu64:
1747 switch (reg) {
1748 case RESET_GEN:
1749 val = rcup->reset_gen;
1750 break;
1751 case RESET_STATUS:
1752 val = rcup->reset_status;
1753 break;
1754 case RESET_SOURCE:
1755 val = rcup->reset_source;
1756 break;
1757#ifdef VFALLS /* { */
1758 case COMT_DIVS:
1759 val = rcup->comt_divs;
1760 break;
1761
1762 case COMT_CFG:
1763 val = rcup->comt_cfg;
1764 break;
1765
1766 case CLK_STEER:
1767 val = rcup->clk_steer;
1768 break;
1769
1770 case COMT_LOCK_TIME:
1771 val = rcup->comt_lock_time;
1772 break;
1773#endif VFALLS /* } */
1774
1775 default:
1776 /* illegal reg - an error */
1777 return false;
1778 }
1779 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
1780 *regp = val;
1781 break;
1782
1783 default:
1784 ASSERT(0);
1785 }
1786 return true;
1787}
1788
1789/*
1790 * Access registers in NCU Unit (mapped at offset = 0x8000000000)
1791 */
1792static bool_t ncu_access(simcpu_t *sp, config_addr_t * config_addrp,
1793 tpaddr_t off, maccess_t op, uint64_t * regp)
1794{
1795 int reg;
1796 uint64_t val;
1797 ss_proc_t *npp;
1798 ss_strand_t *nsp;
1799 ncu_t *ncup;
1800 sparcv9_cpu_t *v9p;
1801 int idx, target;
1802 int node_id= 0;
1803 bool_t self = true;
1804
1805#ifdef VFALLS /* { */
1806 domain_t *domainp;
1807 tpaddr_t pa;
1808
1809 if (config_addrp->baseaddr == PHYS_ADDR_NCU) {
1810 /*
1811 * If local NCU CSR access, need to convert to Node X(this node) NCU CSR
1812 * address. Use the simcpu to get the correct node_id and then get the
1813 * correct config_addrp.
1814 */
1815 node_id = sp->config_procp->proc_id;
1816 domainp = sp->config_procp->domainp;
1817
1818 pa = PHYS_ADDR_NCU_REMOTE(node_id) + off;
1819 config_addrp = find_domain_address(domainp, pa);
1820 self = true;
1821
1822 } else {
1823 /*
1824 * If remote NCU CSR access, use config_addrp to get at the node_id.
1825 */
1826
1827 /* first check if global addressing is allowed for this config */
1828 GLOBAL_ADDRESSING_CHECK(sp, "NCU");
1829
1830 domainp = config_addrp->config_devp->domainp;
1831
1832 for (idx = 0; idx<domainp->procs.count ; idx++) {
1833 node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
1834 if (config_addrp->baseaddr == PHYS_ADDR_NCU_REMOTE(node_id))
1835 break;
1836 }
1837 self = (node_id == sp->config_procp->proc_id) ? true : false;
1838 }
1839
1840#endif /* } VFALLS */
1841 /*
1842 * FIXME: For the moment we only support 64bit accesses to registers.
1843 */
1844 if (MA_ldu64!=op && MA_st64!=op) return false;
1845 if (off & 7) return false;
1846
1847 npp = (ss_proc_t *)config_addrp->config_devp->devp;
1848 v9p = sp->specificp;
1849 nsp = v9p->impl_specificp;
1850 ncup = npp->ncup;
1851 reg = off & NCU_REG_MASK;
1852
1853 if (reg < MONDO_INT_VEC)
1854 reg = INT_MAN;
1855 else if (UINT64_RANGE_CHECK(MONDO_INT_DATA0, reg, MONDO_INT_ADATA0))
1856 reg &= ~NCU_INT_TGTOFFSET_MASK;
1857 else if (UINT64_RANGE_CHECK(MONDO_INT_BUSY, reg, MONDO_INT_ABUSY))
1858 reg &= ~NCU_INT_TGTOFFSET_MASK;
1859
1860 /*
1861 * Fast-path the serial number read - it is used in the N2
1862 * hypervisor CPU yield API.
1863 */
1864 if (op == MA_ldu64 && reg == SER_NUM) {
1865 val = ncup->regs.ser_num;
1866
1867 DBGDEV(lprintf(sp->gid, "Read NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
1868 node_id, reg, ncu_reg_name(reg), off, val););
1869
1870 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
1871 *regp = val;
1872
1873 return true;
1874 }
1875
1876 pthread_mutex_lock( &ncup->ncu_lock );
1877
1878 switch (op) {
1879 case MA_st64:
1880 val = *regp;
1881
1882#define ASSIGN_NCU(_n, _m) do { \
1883 ncup->regs._n = val; \
1884 if (0LL != (val & ~(_m))) goto write_reserved; \
1885} while (0)
1886
1887 switch (reg) {
1888 case INT_MAN:
1889 idx = (off >> 3) & (NCU_DEV_MAX-1);
1890 ASSIGN_NCU( int_man[idx], MASK64(13,8)|MASK64(5,0) );
1891 break;
1892 case MONDO_INT_VEC:
1893 ASSIGN_NCU( mondo_int_vec, MASK64(5,0) );
1894 break;
1895 case SER_NUM:
1896 case EFU_STAT:
1897 case CORE_AVAIL:
1898 case BANK_AVAIL:
1899 case BANK_ENABLE_STATUS:
1900 case L2_IDX_HASH_EN_STATUS:
1901 case MONDO_INT_DATA0:
1902 case MONDO_INT_DATA1:
1903 case MONDO_INT_ADATA0:
1904 case MONDO_INT_ADATA1:
1905 EXEC_WARNING( ("Attempted write to RO register in NCU:"
1906 "Write 0x%llx to register %s (offset 0x%x)",
1907 val, ncu_reg_name(reg), reg ) );
1908 pthread_mutex_unlock( &ncup->ncu_lock );
1909 return false;
1910 case PCIE_A_MEM32_OFFSET_BASE:
1911 ASSIGN_NCU( pcie_a_mem32_offset_base, MASK64(63,63)|MASK64(35,24) );
1912 niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM32);
1913 break;
1914 case PCIE_A_MEM32_OFFSET_MASK:
1915 ASSIGN_NCU( pcie_a_mem32_offset_mask, MASK64(39,24) );
1916 niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM32);
1917 break;
1918 case PCIE_A_MEM64_OFFSET_BASE:
1919 ASSIGN_NCU( pcie_a_mem64_offset_base, MASK64(63,63)|MASK64(35,24) );
1920 niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM64);
1921 break;
1922 case PCIE_A_MEM64_OFFSET_MASK:
1923 ASSIGN_NCU( pcie_a_mem64_offset_mask, MASK64(39,24) );
1924 niagara2_pcie_mapping(sp, ncup, PIU_REGION_MEM64);
1925 break;
1926 case PCIE_A_IOCON_OFFSET_BASE:
1927 ASSIGN_NCU( pcie_a_iocon_offset_base, MASK64(63,63)|MASK64(35,24) );
1928 niagara2_pcie_mapping(sp, ncup, PIU_REGION_CFGIO);
1929 break;
1930 case PCIE_A_IOCON_OFFSET_MASK:
1931 ASSIGN_NCU( pcie_a_iocon_offset_mask, MASK64(39,24) );
1932 niagara2_pcie_mapping(sp, ncup, PIU_REGION_CFGIO);
1933 break;
1934 case BANK_ENABLE:
1935 case L2_IDX_HASH_EN:
1936 FIXME_WARNING(("NCU register %s (offset 0x%x) not implemented\n",
1937 ncu_reg_name(reg), reg ) );
1938 break;
1939 case PCIE_A_FSH:
1940 ncup->regs.pcie_a_fsh = val;
1941 break;
1942 case SOC_ESR:
1943 ASSIGN_NCU( soc_esr, MASK64(63,63)|NCU_SOC_MASK );
1944 break;
1945 case SOC_LOG_ENABLE:
1946 ASSIGN_NCU( soc_log_enb, MASK64(42,0) );
1947 break;
1948 case SOC_INTERRUPT_ENABLE:
1949 ASSIGN_NCU( soc_intr_enb, MASK64(42,0) );
1950 break;
1951 case SOC_ERROR_INJECTION:
1952 ASSIGN_NCU( soc_err_inject, MASK64(42,0) );
1953 break;
1954 case SOC_FATAL_ERROR_ENABLE:
1955 ASSIGN_NCU( soc_fatal_enb, MASK64(42,0) );
1956 break;
1957 case SOC_PENDING_ERROR_STATUS:
1958 /*
1959 * same as SOC_ESR
1960 */
1961 ASSIGN_NCU( soc_esr, MASK64(63,63)|NCU_SOC_MASK );
1962 break;
1963 case SOC_SII_ERROR_SYNDROME:
1964 ASSIGN_NCU( soc_sii_err_syndrome, MASK64(63,63)|MASK64(58,0) );
1965 break;
1966 case SOC_NCU_ERROR_SYNDROME:
1967 ASSIGN_NCU( soc_ncu_err_syndrome, MASK64(63,58)|MASK64(55,0) );
1968 break;
1969 case MONDO_INT_BUSY:
1970 target = (off >> 3) & (NCU_TARGETS-1);
1971 ASSIGN_NCU( mondo_int_busy[target], MASK64(6,6) );
1972 break;
1973 case MONDO_INT_ABUSY:
1974
1975 /* Note from VF PRM author : N2's cpu-id based alias
1976 * registers work on VF only when access comes from
1977 * local node. A thread on remote-node should not access
1978 * these registers. Since interrupts can be delivered to
1979 * local-node only, assumption is that interrupted
1980 * thread(or some other thread on same node) will read
1981 * these regs and message-pass to remote node, if needed.
1982 */
1983 if (!self)
1984 fatal("[0x%llx] (pc=0x%llx)\tIllegal write to "
1985 "NCU alias register of remote node: "
1986 "Write 0x%llx to register offset 0x%x of node %d.\n",
1987 sp->gid, sp->pc, val, reg, node_id);
1988 target = nsp->vcore_id;
1989 ASSIGN_NCU( mondo_int_busy[target], MASK64(6,6) );
1990 break;
1991 default:
1992 EXEC_WARNING( ("Attempted write to illegal register in NCU:"
1993 "Write 0x%llx to register offset 0x%x on node %d\n",
1994 val, reg, node_id ) );
1995 goto access_failed; /* illegal reg - an error */
1996 }
1997
1998 DBGDEV(lprintf(sp->gid, "Write NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
1999 node_id, reg, ncu_reg_name(reg), off, val););
2000
2001 break;
2002
2003write_reserved:
2004 EXEC_WARNING( ("Attempted write to reserved field in NCU:"
2005 "Write 0x%llx to register %s (offset 0x%x) on node %d",
2006 val, ncu_reg_name(reg), reg, node_id ) );
2007 pthread_mutex_unlock( &ncup->ncu_lock );
2008 return true;
2009
2010 case MA_ldu64:
2011 switch (reg) {
2012 case INT_MAN:
2013 idx = (off >> 3) & (NCU_DEV_MAX-1);
2014 val = ncup->regs.int_man[idx];
2015 break;
2016 case MONDO_INT_VEC:
2017 val = ncup->regs.mondo_int_vec;
2018 break;
2019 case SER_NUM:
2020 val = ncup->regs.ser_num;
2021 break;
2022 case CORE_AVAIL:
2023 val = npp->cmp_regs.core_enable_status;
2024 break;
2025 case EFU_STAT:
2026 val = ncup->regs.efu_stat;
2027 break;
2028 case BANK_AVAIL:
2029 case BANK_ENABLE:
2030 val = ncup->regs.bank_enb;
2031 break;
2032 case BANK_ENABLE_STATUS:
2033 val = ncup->regs.bank_enb_stat;
2034 break;
2035 case PCIE_A_MEM32_OFFSET_BASE:
2036 val = ncup->regs.pcie_a_mem32_offset_base;
2037 break;
2038 case PCIE_A_MEM32_OFFSET_MASK:
2039 val = ncup->regs.pcie_a_mem32_offset_mask;
2040 break;
2041 case PCIE_A_MEM64_OFFSET_BASE:
2042 val = ncup->regs.pcie_a_mem64_offset_base;
2043 break;
2044 case PCIE_A_MEM64_OFFSET_MASK:
2045 val = ncup->regs.pcie_a_mem64_offset_mask;
2046 break;
2047 case PCIE_A_IOCON_OFFSET_BASE:
2048 val = ncup->regs.pcie_a_iocon_offset_base;
2049 break;
2050 case PCIE_A_IOCON_OFFSET_MASK:
2051 val = ncup->regs.pcie_a_iocon_offset_mask;
2052 break;
2053 case L2_IDX_HASH_EN:
2054 case L2_IDX_HASH_EN_STATUS:
2055 FIXME_WARNING(("NCU register %s (offset 0x%x) not implemented\n",
2056 ncu_reg_name(reg), reg ) );
2057 break;
2058 case PCIE_A_FSH:
2059 val = ncup->regs.pcie_a_fsh;
2060 break;
2061 case SOC_ESR:
2062 val = ncup->regs.soc_esr;
2063 break;
2064 case SOC_LOG_ENABLE:
2065 val = ncup->regs.soc_log_enb;
2066 break;
2067 case SOC_INTERRUPT_ENABLE:
2068 val = ncup->regs.soc_intr_enb;
2069 break;
2070 case SOC_ERROR_INJECTION:
2071 val = ncup->regs.soc_err_inject;
2072 break;
2073 case SOC_FATAL_ERROR_ENABLE:
2074 val = ncup->regs.soc_fatal_enb;
2075 break;
2076 case SOC_PENDING_ERROR_STATUS:
2077 /*
2078 * same as SOC_ESR
2079 */
2080 val = ncup->regs.soc_esr;
2081 break;
2082 case SOC_SII_ERROR_SYNDROME:
2083 val = ncup->regs.soc_sii_err_syndrome;
2084 break;
2085 case SOC_NCU_ERROR_SYNDROME:
2086 val = ncup->regs.soc_ncu_err_syndrome;
2087 break;
2088 case MONDO_INT_DATA0:
2089 target = (off >> 3) & (NCU_TARGETS-1);
2090 val = ncup->regs.mondo_int_data0[target];
2091 break;
2092 case MONDO_INT_DATA1:
2093 target = (off >> 3) & (NCU_TARGETS-1);
2094 val = ncup->regs.mondo_int_data1[target];
2095 break;
2096 case MONDO_INT_ADATA0:
2097
2098 /* Note from VF PRM author : N2's cpu-id based alias
2099 * registers work on VF only when access comes from
2100 * local node. A thread on remote-node should not access
2101 * these registers. Since interrupts can be delivered to
2102 * local-node only, assumption is that interrupted
2103 * thread(or some other thread on same node) will read
2104 * these regs and message-pass to remote node, if needed.
2105 */
2106 if (!self)
2107 fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
2108 "NCU alias register of remote node: "
2109 "Read register offset 0x%x on node %d.\n",
2110 sp->gid, sp->pc, reg, node_id);
2111 target = nsp->vcore_id;
2112 val = ncup->regs.mondo_int_data0[target];
2113 break;
2114 case MONDO_INT_ADATA1:
2115 if (!self)
2116 /* see comment block above */
2117 fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
2118 "NCU alias register of remote node: "
2119 "Read register offset 0x%x on node %d.\n",
2120 sp->gid, sp->pc, reg, node_id);
2121 target = nsp->vcore_id;
2122 val = ncup->regs.mondo_int_data1[target];
2123 break;
2124 case MONDO_INT_BUSY:
2125 target = (off >> 3) & (NCU_TARGETS-1);
2126 val = ncup->regs.mondo_int_busy[target];
2127 break;
2128 case MONDO_INT_ABUSY:
2129 if (!self)
2130 /* see comment block above */
2131 fatal("[0x%llx] (pc=0x%llx)\tIllegal read from "
2132 "NCU alias register of remote node: "
2133 "Read register offset 0x%x on node %d.\n",
2134 sp->gid, sp->pc, reg, node_id);
2135 target = nsp->vcore_id;
2136 val = ncup->regs.mondo_int_busy[target];
2137 break;
2138 default:
2139 goto access_failed; /* illegal reg - an error */
2140 }
2141
2142 DBGDEV(lprintf(sp->gid, "Read NCU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
2143 node_id, reg, ncu_reg_name(reg), off, val););
2144
2145 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
2146 *regp = val;
2147 break;
2148
2149 default:
2150 ASSERT(0);
2151 }
2152
2153 pthread_mutex_unlock( &ncup->ncu_lock );
2154 return true;
2155
2156access_failed:;
2157 pthread_mutex_unlock( &ncup->ncu_lock );
2158 return false;
2159
2160}
2161
2162/*
2163 * Access registers in Clock Unit (mapped at offset = 0x8300000000)
2164 */
2165static bool_t ccu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
2166{
2167 int reg;
2168 uint64_t val;
2169 ss_proc_t *npp;
2170 ccu_t * clockp;
2171 int osc; /* oscillator number for RNG */
2172 int freqidx; /* frequency index of selected oscillator in RNG */
2173
2174#ifdef VFALLS /* { */
2175 domain_t *domainp;
2176 int node_id;
2177 tpaddr_t pa;
2178
2179 if (config_addrp->baseaddr == PHYS_ADDR_CCU) {
2180 /*
2181 * if local CCU CSR access, need to convert to Node X(this node) CCU CSR
2182 * address
2183 */
2184 node_id = sp->config_procp->proc_id;
2185 domainp = sp->config_procp->domainp;
2186
2187 pa = PHYS_ADDR_CCU_REMOTE(node_id) + off;
2188 config_addrp = find_domain_address(domainp, pa);
2189
2190 } else {
2191
2192 /* check if global addressing is allowed for this config */
2193 GLOBAL_ADDRESSING_CHECK(sp, "CCU");
2194
2195 }
2196#endif /* } VFALLS */
2197
2198 /*
2199 * FIXME: For the moment we only support 64bit accesses to registers.
2200 */
2201 if (MA_ldu64!=op && MA_st64!=op) return false;
2202
2203 npp = (ss_proc_t *)config_addrp->config_devp->devp;
2204 clockp = npp->clockp;
2205
2206 reg = off & ~0xfULL; /* collapse to basic register groups */
2207
2208 switch (op) {
2209 case MA_st64:
2210 val = *regp;
2211
2212#define ASSIGN_CLK(_n, _m) do { \
2213 if (0LL != (val & ~(_m))) goto write_reserved; \
2214 clockp->_n = val; \
2215 } while (0)
2216
2217 switch (reg) {
2218 case CLOCK_CONTROL:
2219 ASSIGN_CLK( control, MASK64(33,0) );
2220 FIXME_WARNING(("Clock register %s (offset 0x%x) not implemented\n",
2221 ccu_reg_name(reg), reg ) );
2222 break;
2223 case RAND_CTL:
2224 /* only the lower 25 bits are used */
2225 ASSIGN_CLK(rand_state.ctl, RC_REG_MASK);
2226 /* unary to binary */
2227 switch ((clockp->rand_state.ctl >>
2228 RC_NOISE_CELL_SEL_SHIFT) & RC_NOISE_CELL_SEL_MASK) {
2229 case 0:
2230 osc = 0; /* special: no osc. selected */
2231 case 1:
2232 osc = 1;
2233 break;
2234 case 2:
2235 osc = 2;
2236 break;
2237 case 4:
2238 osc = 3;
2239 break;
2240 default:
2241 /*
2242 * If more than one is selected, we
2243 * don't set anything. It is not clear
2244 * that this is exactly in line with
2245 * the CCU document.
2246 */
2247 osc = 0;
2248 }
2249 if (osc > 0) {
2250 clockp->rand_state.freqidx[osc-1] =
2251 (clockp->rand_state.ctl >>
2252 RC_ANALOG_SEL_SHIFT) & RC_ANALOG_SEL_MASK;
2253 }
2254 break;
2255 default:
2256 /* illegal reg - an error */
2257 return false;
2258 }
2259 break;
2260
2261write_reserved:
2262 EXEC_WARNING( ("Attempted write to reserved field in clock unit:"
2263 "Write 0x%llx to register %s (offset 0x%x)",
2264 val, ccu_reg_name(reg), reg ) );
2265 return false;
2266
2267 case MA_ldu64:
2268#define RETRIEVE_CLK(_n, _m) do { val = ((clockp->_n) & (_m)); } while (0)
2269 switch (reg) {
2270 case CLOCK_CONTROL:
2271 RETRIEVE_CLK( control, MASK64(33,0) );
2272 break;
2273#if INTERNAL_BUILD /* { */
2274 case RAND_GEN:
2275#define RNG_POLY 0x231dcee91262b8a3ULL
2276#define N2_RNG_OSC_DUTY_FACTOR 0.2
2277 if (((clockp->rand_state.ctl >> RC_MODE_SHIFT) & 1) == 0) {
2278#ifdef OLD_CONSTANT_WAY
2279/* Keeping this code, because I think Axis works this way */
2280 /*
2281 * Mode == 0: Shift out "raw" noise
2282 * cells, one value every 64
2283 * clocks. For now we assume the
2284 * following noise cell frequencies,
2285 * and that everything is phase
2286 * locked.
2287 *
2288 * Cell 1: 1/64.
2289 * Cell 2: 1/8.
2290 * Cell 3: 1/2.
2291 *
2292 * Selection is weird:
2293 * See N2 CCU MAS v.1.61, Table 7.
2294 */
2295 switch (clockp->
2296 rand_control.fields.rc_noise_cell_sel) {
2297 case 0:
2298 val = 0;
2299 break;
2300 case 1:
2301 val = 0xffffffff00000000ULL;
2302 break;
2303 case 2:
2304 val = 0xf0f0f0f0f0f0f0f0ULL;
2305 break;
2306 case 4:
2307 val = 0xaaaaaaaaaaaaaaaaULL;
2308 break;
2309 default:
2310 val = 0xffffffff00000000ULL ^
2311 0xf0f0f0f0f0f0f0f0ULL ^
2312 0xaaaaaaaaaaaaaaaaULL;
2313 }
2314#else /* !OLD_CONSTANT_WAY */
2315 val = 0;
2316 /* osc = clockp->rand_state.osc; */
2317 for (osc = 1; osc <= 3; ++osc) {
2318 if (((clockp->rand_state.ctl >>
2319 RC_NOISE_CELL_SEL_SHIFT) &
2320 RC_NOISE_CELL_SEL_MASK) &
2321 (1 << (osc - 1))) {
2322 freqidx = clockp->
2323 rand_state.freqidx[osc-1];
2324#if 0
2325 printf("osc=%d, freqidx=%d, "
2326 "freq=%f, noise=%f\n",
2327 osc, freqidx,
2328 clockp->rand_state.
2329 frequency[freqidx][osc-1],
2330 clockp->rand_state.
2331 noise[freqidx][osc-1]);
2332#endif
2333 val ^= gen_raw_entropy(
2334 &clockp->
2335 rand_state.
2336 phase[osc-1],
2337 &clockp->rand_state.
2338 frequency[freqidx][osc-1],
2339 &clockp->rand_state.
2340 noise[freqidx][osc-1],
2341 N2_RNG_OSC_DUTY_FACTOR);
2342 }
2343 }
2344#endif /* !OLD_CONSTANT_WAY */
2345 } else {
2346 /*
2347 * Mode is 1; The LFSR is in feedback mode.
2348 */
2349 if ((clockp->rand_state.ctl >>
2350 RC_NOISE_CELL_SEL_SHIFT) &
2351 RC_NOISE_CELL_SEL_MASK) {
2352 /*
2353 * For now, if any noise cells
2354 * are turned on, return a
2355 * 64-bit random value.
2356 */
2357 val = ((uint64_t)lrand48() << 32) |
2358 (uint64_t)lrand48();
2359 } else {
2360 /*
2361 * Deterministic test. The
2362 * RNG does 2 more cycles than
2363 * the delay value. After
2364 * each read, the register is
2365 * reset to ~0ULL. In
2366 * reality, delay+2 is only
2367 * the minimum delay, but we
2368 * gloss over that issue.
2369 */
2370 lfsr64_adv(RNG_POLY, ~0ULL,
2371 ((clockp->rand_state.ctl >>
2372 RC_DELAY_SHIFT) & RC_DELAY_MASK) +
2373 2,
2374 &val);
2375 }
2376 }
2377 break;
2378 case RAND_CTL:
2379 RETRIEVE_CLK(rand_state.ctl, RC_REG_MASK);
2380 break;
2381#endif /* INTERNAL_BUILD } */
2382 default:
2383 /* illegal reg - an error */
2384 return false;
2385 }
2386 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
2387 *regp = val;
2388 break;
2389
2390 default:
2391 ASSERT(0);
2392 }
2393
2394 return true;
2395}
2396
2397/*
2398 * Access registers in JTAG area (mapped at offset = 0x9000000000)
2399 */
2400static bool_t jtag_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
2401{
2402 ss_proc_t *npp;
2403 int node_id;
2404 uint64_t val;
2405 int reg;
2406
2407#ifdef VFALLS /* { */
2408 domain_t *domainp;
2409 tpaddr_t pa;
2410 int idx;
2411
2412 if (config_addrp->baseaddr == PHYS_ADDR_JTAG) {
2413 /*
2414 * if local JTAG CSR access, need to convert to Node X(this node) JTAG
2415 * CSR address.
2416 */
2417 node_id = sp->config_procp->proc_id;
2418 domainp = sp->config_procp->domainp;
2419
2420 pa = PHYS_ADDR_JTAG_REMOTE(node_id) + off;
2421 config_addrp = find_domain_address(domainp, pa);
2422
2423 } else {
2424 /*
2425 * If remote JTAG CSR access, use config_addrp to get at the node_id.
2426 */
2427
2428 /* first check if global addressing is allowed for this config */
2429 GLOBAL_ADDRESSING_CHECK(sp, "JTAG");
2430
2431 domainp = config_addrp->config_devp->domainp;
2432
2433 for (idx = 0; idx<domainp->procs.count ; idx++) {
2434 node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
2435 if (config_addrp->baseaddr == PHYS_ADDR_JTAG_REMOTE(node_id))
2436 break;
2437 }
2438 }
2439#endif /* } VFALLS */
2440
2441 if (MA_ldu64!=op && MA_st64!=op) return false;
2442
2443 npp = (ss_proc_t *)config_addrp->config_devp->devp; /* target proc */
2444
2445 reg = off;
2446
2447 switch (op) {
2448 case MA_st64:
2449 val = *regp;
2450 switch (reg) {
2451 case INT_VECTOR_DISPATCH:
2452 RSVD_MASK(sp, (MASK64(5,0) | MASK64(13,8)), val, 0, reg);
2453
2454 /* note sp=interrupt originator, npp=interrupt target */
2455 niagara2_send_xirq(sp, npp, val);
2456 break;
2457 case ASI_CORE_AVAILABLE:
2458 case ASI_CORE_ENABLE_STATUS:
2459 case ASI_CORE_RUNNING_STATUS:
2460 EXEC_WARNING( ("Attempted write to RO register in JTAG/TAP:"
2461 "Write 0x%llx attempted to register %s (offset 0x%x)",
2462 val, jtag_reg_name(reg), reg ) );
2463 return false;
2464 case ASI_CORE_ENABLE:
2465 IMPL_WARNING(("%s: not supported in JTAG/TAP.\n",jtag_reg_name(reg)));
2466 break;
2467 case ASI_CORE_RUNNING_RW:
2468 /*
2469 * WS: according to the CMP PRM, writing a '1' to a bit will be ignored
2470 * if the corresponding bit in the core enable reg is 0 (i.e., the
2471 * corresponding virtual core is not enabled)
2472 */
2473 pthread_mutex_lock(&npp->cmp_lock);
2474 npp->cmp_regs.core_running_status = val & npp->cmp_regs.core_enable_status;
2475 ss_change_exec_state(npp, npp->cmp_regs.core_running_status);
2476 pthread_mutex_unlock(&npp->cmp_lock);
2477 break;
2478 case ASI_CORE_RUNNING_W1S:
2479 /*
2480 * W1S: new_value = old_value | new_value;
2481 */
2482 pthread_mutex_lock(&npp->cmp_lock);
2483 npp->cmp_regs.core_running_status |= val;
2484 /*
2485 * According to the CMP PRM, writing a '1' to a bit will be ignored
2486 * if the corresponding bit in the core enable reg is 0 (i.e., the
2487 * corresponding virtual core is not enabled)
2488 */
2489 npp->cmp_regs.core_running_status &= npp->cmp_regs.core_enable_status;
2490
2491 /*
2492 * FIXME: need to check if the virtual core is attempting to park
2493 * all the virtual cores (this is prevented by the hardware)
2494 */
2495 ss_change_exec_state(npp, npp->cmp_regs.core_running_status);
2496 pthread_mutex_unlock(&npp->cmp_lock);
2497 break;
2498 case ASI_CORE_RUNNING_W1C:
2499 /*
2500 * W1C: new_value = old_value & ~new_value;
2501 */
2502 pthread_mutex_lock(&npp->cmp_lock);
2503 npp->cmp_regs.core_running_status &= ~val;
2504 ss_change_exec_state(npp, npp->cmp_regs.core_running_status);
2505 pthread_mutex_unlock(&npp->cmp_lock);
2506 break;
2507 case SOC_ERROR_STEERING:
2508 RSVD_MASK(sp, MASK64(5,0), val, 0, reg);
2509 npp->ncup->regs.soc_err_steering = val;
2510 break;
2511 default:
2512 /* illegal reg - an error */
2513 return false;
2514 }
2515 DBGMULNODE(lprintf(sp->gid, "Write JTAG register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
2516 node_id, reg, jtag_reg_name(reg), off, val););
2517 break;
2518 case MA_ldu64:
2519 switch (reg) {
2520 case INT_VECTOR_DISPATCH:
2521 EXEC_WARNING( ("Attempted read from WO register in JTAG/TAP:"
2522 " Read attempted from register %s (offset 0x%x).\n",
2523 jtag_reg_name(reg), reg ) );
2524 return false;
2525 case ASI_CORE_AVAILABLE:
2526 case ASI_CORE_ENABLE_STATUS:
2527 case ASI_CORE_ENABLE:
2528 val = npp->cmp_regs.core_enable_status;
2529 break;
2530 case ASI_CORE_RUNNING_RW:
2531 case ASI_CORE_RUNNING_STATUS:
2532 val = npp->cmp_regs.core_running_status;
2533 break;
2534 case ASI_CORE_RUNNING_W1S:
2535 case ASI_CORE_RUNNING_W1C:
2536 EXEC_WARNING( ("Attempted read from WO register in JTAG/TAP:"
2537 " Read attempted from register %s (offset 0x%x).\n",
2538 jtag_reg_name(reg), reg ) );
2539 return false;
2540 case SOC_ERROR_STEERING:
2541 val = npp->ncup->regs.soc_err_steering;
2542 break;
2543 default:
2544 /* illegal reg - an error */
2545 return false;
2546 }
2547
2548 DBGMULNODE(lprintf(sp->gid, "Read JTAG register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
2549 node_id, reg, jtag_reg_name(reg), off, val););
2550 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
2551 *regp = val;
2552 break;
2553
2554 default:
2555 ASSERT(0);
2556 }
2557
2558 return true;
2559}
2560
2561#ifdef VFALLS /* { */
2562static bool_t ncx_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
2563{
2564 uint64_t val;
2565 ss_proc_t *npp;
2566 bool_t self = true;
2567 domain_t *domainp;
2568 int node_id;
2569 tpaddr_t pa;
2570 int idx;
2571 ncx_t *ncxp;
2572 int mode_shift;
2573 int reg;
2574 uint core_num, thread;
2575 sparcv9_cpu_t * tv9p;
2576 simcpu_t * tsp;
2577 config_dev_t *config_devp = NULL;
2578 bool_t zambezi_present = false;
2579
2580 if (config_addrp->baseaddr == PHYS_ADDR_NCX) {
2581 /*
2582 * if local NCX CSR access, need to convert to Node X(this node)
2583 * NCX CSR address
2584 */
2585 node_id = sp->config_procp->proc_id;
2586 domainp = sp->config_procp->domainp;
2587 pa = PHYS_ADDR_NCX_REMOTE(node_id) + off;
2588 config_addrp = find_domain_address(domainp, pa);
2589 self = true;
2590 } else {
2591 /*
2592 * If remote NCX CSR access, use config_addrp to get at the node_id.
2593 */
2594
2595 /* first check if global addressing is allowed for this config */
2596 GLOBAL_ADDRESSING_CHECK(sp, "NCX");
2597
2598 domainp = config_addrp->config_devp->domainp;
2599
2600 for (idx = 0; idx<domainp->procs.count ; idx++) {
2601 node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
2602 if (config_addrp->baseaddr == PHYS_ADDR_NCX_REMOTE(node_id))
2603 break;
2604 }
2605 self = (node_id == sp->config_procp->proc_id) ? true : false;
2606 }
2607
2608 if (MA_ldu64!=op && MA_st64!=op) return false;
2609 npp = (ss_proc_t *)config_addrp->config_devp->devp;
2610 ncxp = npp->ncxp;
2611
2612 reg = off;
2613
2614 switch (op) {
2615 case MA_ldu64:
2616 switch (reg) {
2617 case CF_SYS_MODE_REG:
2618 if (!self){
2619 EXEC_WARNING( ("[0x%llx] (pc=0x%llx) Access "
2620 "attempt to %s of remote "
2621 "node %d. PRM recommends only local "
2622 "access\n", sp->gid, sp->pc,
2623 ncx_reg_name(off), node_id));
2624 }
2625 val = ncxp->sys_mode;
2626 break;
2627 case NCX_TIC_EN_SLOW:
2628 val = ncxp->tick_en_slow;
2629 break;
2630 case CF_SLOW_PULSE_WAIT:
2631 val = 0x0;
2632 break;
2633 case NCX_TWR:
2634 val = ncxp->twr;
2635 break;
2636 case NCX_TPESR:
2637 val = ncxp->tpesr;
2638 break;
2639 case NCX_TPELSE:
2640 val = ncxp->tpelse;
2641 break;
2642 case NCX_TPEAR:
2643 val = ncxp->tpear;
2644 break;
2645 default:
2646 /* illegal reg - an error */
2647 return false;
2648 }
2649 DBGMULNODE(lprintf(sp->gid, "Read NCX register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
2650 node_id, reg, ncx_reg_name(reg), off, val););
2651
2652 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
2653 *regp = val;
2654 break;
2655 case MA_st64:
2656 val = *regp;
2657
2658#define ASSIGN_NCX(_n, _m) do { \
2659 ncxp->_n = val & (_m); \
2660 if (0LL != (val & ~(_m))) \
2661 EXEC_WARNING( ("Attempted write to reserved field in NCX being masked out : " \
2662 "Attempted write was 0x%llx to register %s (offset 0x%x) on node %d", \
2663 val, ncx_reg_name(off), off, node_id ) ); \
2664} while (0)
2665 switch (reg) {
2666 case CF_SYS_MODE_REG:
2667 if (!self){
2668 EXEC_WARNING( ("[0x%llx] (pc=0x%llx) Access "
2669 "attempt to %s of remote node %d. PRM "
2670 "recommends only local access.\n",
2671 sp->gid, sp->pc, ncx_reg_name(off),
2672 node_id));
2673 }
2674 ASSIGN_NCX(sys_mode, SM_REG_MASK);
2675
2676 /* check that correct node_id value being written */
2677 if (SM_2_NODE(ncxp->sys_mode) != node_id)
2678 fatal("[0x%llx] (pc=0x%llx)\tAttempt to write %d"
2679 " to node_id field of CF_SYS_MODE_REG "
2680 "(offset 0x%x) on node %d.\n",
2681 sp->gid, sp->pc, SM_2_NODE(ncxp->sys_mode),
2682 off, node_id);
2683
2684 /* check if zambezi present or glueless */
2685 config_devp = domainp->device.listp;
2686 for (idx = 0; idx < domainp->device.count; idx++) {
2687 if (streq(config_devp->dev_typep->dev_type_namep, "zambezi")) {
2688 zambezi_present = true;
2689 break;
2690 }
2691 config_devp = config_devp->nextp;
2692 }
2693 if (zambezi_present ^ ((ncxp->sys_mode >> SM_EXTERN_HUB_SEL_SHIFT) & 1))
2694 fatal("[0x%llx] (pc=0x%llx)\tAttempt to write %d"
2695 " to extern_hub field of CF_SYS_MODE_REG "
2696 "(offset 0x%x) on a %d-node system.\n",
2697 sp->gid, sp->pc,
2698 ((ncxp->sys_mode >> SM_EXTERN_HUB_SEL_SHIFT)
2699 & 1), off, domainp->procs.count);
2700
2701 /* check that way info is correct and only one bit asserted too */
2702 if ((domainp->procs.count == 1) &&
2703 ((SM_EWAY_BITS(ncxp->sys_mode)) != 0))
2704 fatal("[0x%llx] (pc=0x%llx)\tMultiple ways mode"
2705 " selected in CF_SYS_MODE_REG (offset 0x%x) "
2706 "on node %d.\nAttempted write was 0x%llx on "
2707 "a %d-way system.\n", sp->gid, sp->pc,
2708 off, node_id, ncxp->sys_mode,
2709 domainp->procs.count);
2710
2711 else if (domainp->procs.count > 1) {
2712 mode_shift = 11 - domainp->procs.count;
2713 if ((SM_EWAY_BITS(MASK64(mode_shift, mode_shift)) != (SM_EWAY_BITS(ncxp->sys_mode))))
2714 fatal("[0x%llx] (pc=0x%llx)\tAttempt to "
2715 "write wrong way info in "
2716 "CF_SYS_MODE_REG (offset 0x%x) on "
2717 "node %d.\nAttempted write was "
2718 "0x%llx on a %d-way system.\n",
2719 sp->gid, sp->pc, off, node_id,
2720 ncxp->sys_mode, domainp->procs.count);
2721 }
2722 DBGMULNODE(lprintf(-1, "SYS_MODE_REG for node %d set "
2723 "to 0x%llx\n", node_id, ncxp->sys_mode););
2724 break;
2725 case NCX_TIC_EN_SLOW:
2726 RSVD_MASK(sp, (MASK64(0,0)), val, 0, reg);
2727 pthread_mutex_lock(&npp->tick_en_lock);
2728
2729 if (!val && !npp->cmp_regs.tick_enable && !npp->tick_stop) {
2730 ss_strand_t * tnsp;
2731
2732 npp->tick_stop = true;
2733
2734 /* now stop all tick counters */
2735 core_num = (uint_t) -1;
2736 for (idx = 0; idx < npp->nstrands; idx++) {
2737 tv9p = npp->strand[idx];
2738 tnsp = &(npp->ss_strandp[idx]);
2739 if (tnsp->core != core_num) {
2740 tv9p->tick->offset += RAW_TICK(tv9p);
2741 core_num = tnsp->core;
2742 }
2743 tsp = tv9p->simp;
2744 ss_recomp_tick_target(tsp);
2745
2746 }
2747 }
2748
2749 if (val && npp->tick_stop) {
2750 ss_strand_t * tnsp;
2751
2752 npp->tick_stop = false;
2753
2754 /* now start all tick counters */
2755 core_num = (uint_t) -1;
2756 for (idx = 0; idx < npp->nstrands; idx++) {
2757 tv9p = npp->strand[idx];
2758 tnsp = &(npp->ss_strandp[idx]);
2759 if (tnsp->core != core_num) {
2760 tv9p->tick->offset -= RAW_TICK(tv9p);
2761 core_num = tnsp->core;
2762 }
2763 tsp = tv9p->simp;
2764 ss_recomp_tick_target(tsp);
2765
2766 }
2767 }
2768
2769 ncxp->tick_en_slow = val;
2770 pthread_mutex_unlock(&npp->tick_en_lock);
2771 break;
2772 case CF_SLOW_PULSE_WAIT:
2773 EXEC_WARNING( ("Attempted write to RO register in NCX:"
2774 "Write 0x%llx to register %s (offset 0x%x)",
2775 val, ncx_reg_name(reg), reg ) );
2776 return false;
2777 case NCX_TWR:
2778 ASSIGN_NCX(twr, MASK64(31,10));
2779 break;
2780 case NCX_TPESR:
2781 /*
2782 * bit[6:0]: W1C
2783 */
2784 RSVD_MASK(sp, (MASK64(6,0)), val, 0, reg);
2785 ncxp->tpesr &= ~val;
2786 break;
2787 case NCX_TPELSE:
2788 ASSIGN_NCX(tpelse, MASK64(6,2));
2789 break;
2790 case NCX_TPEAR:
2791 /*
2792 * bit[63:61]: W1C
2793 */
2794 RSVD_MASK(sp, MASK64(60,0), val, 0, reg);
2795 ncxp->tpear &= ~val;
2796 break;
2797 default:
2798 /* illegal reg - an error */
2799 return false;
2800 }
2801
2802 DBGMULNODE(lprintf(sp->gid, "Write NCX register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
2803 node_id, reg, ncx_reg_name(reg), off, val););
2804
2805 break;
2806 default:
2807 ASSERT(0);
2808 }
2809
2810 return true;
2811
2812}
2813
2814static bool_t cou_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
2815{
2816 ss_proc_t *npp;
2817 cou_t * coup;
2818 uint64_t val;
2819 uint64_t reg;
2820 int node_id;
2821 tpaddr_t pa;
2822 int idx;
2823 domain_t *domainp;
2824 int link;
2825
2826 if (config_addrp->baseaddr == PHYS_ADDR_COU) {
2827 /*
2828 * if local COU CSR access, need to convert to Node X(this node)
2829 * COU CSR address
2830 */
2831 node_id = sp->config_procp->proc_id;
2832 domainp = sp->config_procp->domainp;
2833
2834 pa = PHYS_ADDR_COU_REMOTE(node_id) + off;
2835 config_addrp = find_domain_address(domainp, pa);
2836
2837 /*
2838 * No access is allowed for single node
2839 */
2840 if (domainp->procs.count == 1) return false;
2841
2842 } else {
2843 /*
2844 * If remote COU CSR access, use config_addrp to get at the node_id.
2845 */
2846
2847 /* first check if global addressing is allowed for this config */
2848 GLOBAL_ADDRESSING_CHECK(sp, "COU");
2849
2850 domainp = config_addrp->config_devp->domainp;
2851
2852 for (idx = 0; idx<domainp->procs.count ; idx++) {
2853 node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
2854 if (config_addrp->baseaddr == PHYS_ADDR_COU_REMOTE(node_id))
2855 break;
2856 }
2857 }
2858
2859 if (MA_ldu64!=op && MA_st64!=op) return false;
2860
2861 npp = (ss_proc_t *)config_addrp->config_devp->devp;
2862 coup = npp->coup;
2863
2864 reg = off & ~COU_LINK_MASK;
2865 link = (off & COU_LINK_MASK) >> COU_LINK_SHIFT;
2866
2867 switch (op) {
2868 case MA_st64:
2869 val = *regp;
2870 switch (reg) {
2871 case COU_ERR_ENABLE_REG:
2872 RSVD_MASK(sp, MASK64(2,0), val, 0, reg);
2873 coup->cou_err_enable[link] = val;
2874 break;
2875 case COU_ESR:
2876 /*
2877 * bit[4:0]: W1C
2878 */
2879 RSVD_MASK(sp, MASK64(4,0), val, 0, reg);
2880 coup->cou_esr[link] &= ~val;
2881 break;
2882 case COU_EAR:
2883 EXEC_WARNING( ("Attempted write to RO register in COU:"
2884 "Write 0x%llx to register %s (offset 0x%x)",
2885 val, cou_reg_name(reg), reg ) );
2886 return false;
2887
2888 default:
2889 /* illegal reg - an error */
2890 return false;
2891 }
2892
2893 DBGMULNODE(lprintf(sp->gid, "Write COU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
2894 node_id, reg, cou_reg_name(reg), off, val););
2895 break;
2896 case MA_ldu64:
2897 switch (reg) {
2898 case COU_ERR_ENABLE_REG:
2899 val = coup->cou_err_enable[link];
2900 break;
2901 case COU_ESR:
2902 val = coup->cou_esr[link];
2903 break;
2904 case COU_EAR:
2905 val = coup->cou_ear[link];
2906 break;
2907 default:
2908 /* illegal reg - an error */
2909 return false;
2910 }
2911
2912 DBGMULNODE(lprintf(sp->gid, "Read COU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
2913 node_id, reg, cou_reg_name(reg), off, val););
2914
2915 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
2916 *regp = val;
2917 break;
2918
2919 default:
2920 ASSERT(0);
2921 }
2922 return true;
2923}
2924
2925static bool_t lfu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
2926{
2927 ss_proc_t *npp, *tnpp;
2928 lfu_t *lfup, *tlfup;
2929 uint64_t val;
2930 int reg;
2931 int node_id;
2932 tpaddr_t pa;
2933 int idx, lnk;
2934 domain_t *domainp;
2935 int link;
2936 config_dev_t *devp = NULL;
2937 bool_t zambezi_present = false;
2938 bool_t lfu_ok = true;
2939
2940 if (config_addrp->baseaddr == PHYS_ADDR_LFU) {
2941 /*
2942 * if local LFU CSR access, need to convert to Node X(this node)
2943 * LFU CSR address
2944 */
2945
2946 node_id = sp->config_procp->proc_id;
2947 domainp = sp->config_procp->domainp;
2948
2949 /* single node configs can't access LFU CSRs */
2950 if (domainp->procs.count == 1) return false;
2951
2952 pa = PHYS_ADDR_LFU_REMOTE(node_id) + off;
2953 config_addrp = find_domain_address(domainp, pa);
2954
2955 } else {
2956 /*
2957 * If remote LFU CSR access, use config_addrp to get at the node_id.
2958 */
2959
2960 /* first check if global addressing is allowed for this config */
2961 GLOBAL_ADDRESSING_CHECK(sp, "LFU");
2962
2963 domainp = config_addrp->config_devp->domainp;
2964
2965 for (idx = 0; idx<domainp->procs.count ; idx++) {
2966 node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
2967 if (config_addrp->baseaddr == PHYS_ADDR_LFU_REMOTE(node_id))
2968 break;
2969 }
2970 }
2971
2972 if (MA_ldu64!=op && MA_st64!=op) return false;
2973
2974 npp = (ss_proc_t *)config_addrp->config_devp->devp;
2975 lfup = npp->lfup;
2976
2977 reg = off & ~LFU_LINK_MASK;
2978 link = (off & LFU_LINK_MASK) >> 12;
2979
2980 switch (op) {
2981 case MA_st64:
2982 val = *regp;
2983 switch (reg) {
2984 case CL_INIT_STATE:
2985 EXEC_WARNING( ("Attempted write to RO register in LFU:"
2986 "Write 0x%llx to register %s (offset 0x%x)",
2987 val, lfu_reg_name(reg), reg ) );
2988 return false;
2989
2990 case CL_CFG_REG:
2991 RSVD_MASK(sp, (MASK64(5,0)), val, 0, reg);
2992 lfup->cl_cfg_reg[link] = val;
2993
2994 if (val & MASK64(0,0))
2995 lfup->cl_init_state[link] = LFU_LINK_L0;
2996
2997 /* Zambezi present? */
2998 devp = domainp->device.listp;
2999 for (idx = 0; idx < domainp->device.count; idx++) {
3000 if (streq(devp->dev_typep->dev_type_namep, "zambezi")) {
3001 zambezi_present = true;
3002 break;
3003 }
3004 devp = devp->nextp;
3005 }
3006
3007 if (zambezi_present) {
3008 /* check if all set to master */
3009 for (lnk=0; lnk<LFU_MAX_LINKS; lnk++) {
3010 if ((lfup->cl_cfg_reg[lnk] & MASK64(1,0)) != LFU_LINK_MASTER_EN)
3011 lfu_ok = false;
3012 }
3013
3014 } else {
3015
3016 /*
3017 * And the below overkill galore explains all
3018 * legal and illegal cases for glueless mode.
3019 * nb: node A is current node whose cl_cfg_reg
3020 * is being written to and node B is the other
3021 * node whose register value is just being
3022 * checked.
3023 *
3024 * Legal combinations(of cl_cfg_reg bits 1, 0)
3025 * -------------------------------------------
3026 *
3027 * node A node B
3028 * ------ ------
3029 * 11 - master en'ed 01 - slave en'ed
3030 * 11 - master en'ed 00 - register not yet written to
3031 * 01 - slave en'ed 11 - master en'ed
3032 * 01 - slave en'ed 00 - register not yet written to
3033 *
3034 * Illegal combinations(of cl_cfg_reg bits 1, 0)
3035 * ---------------------------------------------
3036 *
3037 * node A node B
3038 * ------ ------
3039 * 11 - master en'ed 10 - also master but not en'ed
3040 * 11 - master en'ed 11 - also master en'ed
3041 * 01 - slave en'ed 10 - master but not en'ed
3042 * 01 - slave en'ed 01 - also slave
3043 *
3044 * Note : Any other values written to Node A (00 or 10)
3045 * are illegal.
3046 */
3047
3048 int link_mode = lfup->cl_cfg_reg[0] & (MASK64(1,0));
3049 if ((link_mode != LFU_LINK_MASTER_EN) && (link_mode != LFU_LINK_SLAVE_EN)) {
3050 lfu_ok = false;
3051
3052 } else {
3053 /* all links for that node must be the same mode */
3054 for (lnk = 1; lnk < LFU_MAX_LINKS; lnk++) {
3055 if (lfup->cl_cfg_reg[0] ^ lfup->cl_cfg_reg[lnk]) {
3056 lfu_ok = false;
3057 break;
3058 }
3059 }
3060 }
3061
3062 if (lfu_ok) {
3063 /*
3064 * now get hold of other node to make sure that
3065 * we don't have 2 slaves or 2 masters.
3066 */
3067 for (idx = 0; idx < 2; idx++) {
3068 tnpp = LIST_ENTRY(domainp->procs, idx)->procp;
3069 if (tnpp->lfup != lfup) {
3070 tlfup = tnpp->lfup;
3071 break;
3072 }
3073 }
3074
3075 if (link_mode == LFU_LINK_MASTER_EN) {
3076 if ((tlfup->cl_cfg_reg[0] & (MASK64(1,1))))
3077 fatal("[0x%llx] (pc=0x%llx)\t"
3078 " Attempt to write master"
3079 " bit in %s of both nodes"
3080 " in glueless config.\n",
3081 sp->gid, sp->pc,
3082 lfu_reg_name(reg));
3083 } else {
3084 /* ie. node A is slave */
3085 switch (tlfup->cl_cfg_reg[0] & MASK64(1, 0)) {
3086 case 0x1:
3087 fatal("[0x%llx] (pc=0x%llx)\t"
3088 "Attempt to write slave"
3089 " bit in %s of both nodes"
3090 " in glueless config.\n",
3091 sp->gid, sp->pc,
3092 lfu_reg_name(reg));
3093 break;
3094 case 0x2:
3095 fatal("[0x%llx] (pc=0x%llx)\t"
3096 "Enable bit not set in"
3097 " %s of node %d.\n",
3098 sp->gid, sp->pc,
3099 lfu_reg_name(reg),
3100 tnpp->config_procp->proc_id);
3101 break;
3102
3103 default:
3104 /* all other cases ok */
3105 break;
3106 }
3107 }
3108 }
3109 }
3110
3111 /*
3112 * By right, should only allow global addressing when ALL nodes
3113 * have their lfu's set up correctly. But because of the way
3114 * Legion cycles through the different threads by allowing
3115 * a quantum of instructions per cpu, it will cause false errors
3116 * by doing that. So, just checking and updating each node at a time.
3117 */
3118
3119 npp->global_addressing_ok.flags.lfu = lfu_ok? GLOBAL_ADDRESSING_FLAG_EN:GLOBAL_ADDRESSING_FLAG_DIS;
3120
3121 break;
3122 case CL_SERDES_CFG:
3123 RSVD_MASK(sp, ((MASK64(32, 28)|MASK64(26, 20)|MASK64(14, 8)|MASK64(1, 0))),
3124 val, 0, reg);
3125 lfup->cl_serdes_cfg[link] = val;
3126 break;
3127 case CL_SER_INVPAIR:
3128 RSVD_MASK(sp, (MASK64(27,0)), val, 0, reg);
3129 lfup->cl_ser_invpair[link] = val;
3130 break;
3131 case CL_TEST_CFG:
3132 RSVD_MASK(sp, (MASK64(15, 10)|MASK64(7,0)), val, 0, reg);
3133 lfup->cl_test_cfg[link] = val;
3134 break;
3135 case CL_ERROR_STAT:
3136 RSVD_MASK(sp, (MASK64(24,0)), val, 0, reg);
3137 lfup->cl_error_stat[link] &= ~val;
3138 break;
3139 default:
3140 /* illegal reg - an error */
3141 return false;
3142 }
3143
3144 DBGMULNODE(lprintf(sp->gid, "Write LFU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
3145 node_id, reg, lfu_reg_name(reg), off, val););
3146 break;
3147 case MA_ldu64:
3148 switch (reg) {
3149 case CL_INIT_STATE:
3150 val = lfup->cl_init_state[link];
3151 break;
3152 case CL_CFG_REG:
3153 val = lfup->cl_cfg_reg[link];
3154 break;
3155 case CL_SERDES_CFG:
3156 val = lfup->cl_serdes_cfg[link];
3157 break;
3158 case CL_SER_INVPAIR:
3159 val = lfup->cl_ser_invpair[link];
3160 break;
3161 case CL_TEST_CFG:
3162 val = lfup->cl_test_cfg[link];
3163 break;
3164 case CL_ERROR_STAT:
3165 val = lfup->cl_error_stat[link];
3166 break;
3167 default:
3168 /* illegal reg - an error */
3169 return false;
3170 }
3171
3172 DBGMULNODE(lprintf(sp->gid, "Read LFU register on node %d 0x%lx '%s' offset = 0x%llx value = 0x%llx\n",
3173 node_id, reg, lfu_reg_name(reg), off, val););
3174
3175 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
3176 *regp = val;
3177 break;
3178
3179 default:
3180 ASSERT(0);
3181 }
3182 return true;
3183}
3184
3185#endif /* } VFALLS */
3186/*
3187 * Access L2 Cache registers, mapped at offset = 0xA000000000
3188 */
3189static bool_t l2c_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
3190{
3191 ss_proc_t * npp;
3192 int reg, bank;
3193 uint64_t val;
3194 l2c_t * l2p;
3195 uint dbgen_bit;
3196#ifdef VFALLS /* { */
3197
3198 /*
3199 * Note that for VF, each node has its own L2CSR region and all of them
3200 * are addressed by the same physical address(0xA0.0000.0000 to
3201 * 0xBF.FFFF.FFFF). Nodes can only access their local L2CSR regions and
3202 * not those of other nodes. So the domain structure's addressmap only
3203 * contains that physical address(0xA0.0000.0000 to 0xBF.FFFF.FFFF) and
3204 * it is up to the init and access routines to correctly map that PA to
3205 * the correct node's L2CSR region.
3206 * This is unlike NCU, CCU, MCU etc where there is a local CSR access
3207 * address which is common for all nodes and basically is translated
3208 * by hw to talk to originating node's address space AND remote CSR
3209 * access address which allows any node to access any other
3210 * node's address space. So in this case the domain address map will
3211 * contain both the local CSR address space as well as the remote CSR
3212 * address space.
3213 */
3214
3215 /* Redirect the common L2CSR PA to the correct node*/
3216
3217 npp = (ss_proc_t *)sp->config_procp->procp;
3218 config_addrp = npp->l2c_devp->addrp;
3219#endif /* } VFALLS */
3220
3221 /*
3222 * FIXME: For the moment we only support 64bit accesses to registers.
3223 */
3224 if (MA_ldu64!=op && MA_st64!=op) return false;
3225
3226 npp = (ss_proc_t *)config_addrp->config_devp->devp;
3227 l2p = npp->l2p;
3228
3229 bank = (off >> 6) & 0x7; /* N2 supports 8-banked L2 cache */
3230 reg = (off >> 32) & 0xf;
3231
3232 switch (op) {
3233 case MA_st64:
3234 val = *regp;
3235
3236 if (reg >= 0x8) {
3237#define ASSIGN_L2(_n, _m) do { \
3238 if (0LL != (val & ~(_m))) goto write_reserved; \
3239 l2p->_n[bank] = val; \
3240 } while (0)
3241 switch (reg) {
3242 /*
3243 * L2 BIST Control Reg section 28.18 of N2 PRM 0.9.1
3244 */
3245 case L2_TAG_BIST:
3246 ASSIGN_L2( bist_ctl, MASK64(6,0) );
3247 if (val & 1) l2p->bist_ctl[bank] |= 0x400;
3248 break;
3249#ifdef VFALLS /* { */
3250 case L2_CONTROL:
3251 /*
3252 * L2 Control Register section 28.1.2 of VF PRM 0.9
3253 */
3254 ASSIGN_L2( control, (MASK64(37, 36)|MASK64(34, 0)) );
3255 if (((l2p->control[bank] & L2_NODEID) >> L2_NODEID_SHIFT) != sp->config_procp->proc_id)
3256 fatal("[0x%llx] (pc=0x%llx)\tIncorrect "
3257 "node_id being set in L2 CONTROL REG"
3258 "(offset 0x%x). Attempted write was "
3259 "0x%llx on node 0x%x.", sp->gid, sp->pc,
3260 off, val, sp->config_procp->proc_id);
3261
3262 /*
3263 * Please see Victoria Falls Bug 124014 for details.
3264 * Bug court approval 1/10/2006. The fix that was approved
3265 * involves the addition of an L2 Control Register bit
3266 * specifying the configuration, rather than bringing in
3267 * ncx_2way into all 8 l2t's.
3268 * There is an idle bit in the L2 CSR (bit 21), which was
3269 * designated as DBG_EN in the PRM but no longer used by N2
3270 * or VF. This bit will now need to be used to indicate a
3271 * system with 3 or 4 VF nodes configured.
3272 */
3273 dbgen_bit = (l2p->control[bank] & L2_DBGEN) >> L2_DBGEN_SHIFT;
3274 if ((sp->config_procp->domainp->procs.count>2) ^ dbgen_bit)
3275 fatal("[0x%llx] (pc=0x%llx)\tIncorrect "
3276 "value being set for L2 DBGEN bit"
3277 " in the L2 CONTROL reg(offset 0x%x)."
3278 " Attempted write was 0x%llx on node "
3279 "0x%x.", sp->gid, sp->pc,
3280 off, val, sp->config_procp->proc_id);
3281 break;
3282 case L2_ERROR_ENABLE:
3283 /*
3284 * Table 12-3 of VF PRM 0.1
3285 */
3286 ASSIGN_L2( error_enable, MASK64(31,0));
3287 break;
3288 case L2_ERROR_STATUS:
3289 /*
3290 * Table 12-1 of VF PRM 0.1
3291 *
3292 * RW1C: bit [63:56], [53:33]
3293 * RW: bit [27:0]
3294 */
3295 RSVD_MASK(sp, (MASK64(63,56)|MASK64(53,33)|MASK64(27,0)), val, 0, reg);
3296 l2p->error_status[bank] &= ~val;
3297 l2p->error_status[bank] &= MASK64(63,56)|MASK64(53,33);
3298 l2p->error_status[bank] |= val & MASK64(27,0);
3299 break;
3300 case L2_ERROR_STATUS_II:
3301 ASSIGN_L2( error_status_ii, MASK64(63,40));
3302 break;
3303#else
3304 case L2_CONTROL:
3305 /*
3306 * L2 Control Register section 28.15 of N2 PRM 0.9.1
3307 */
3308 ASSIGN_L2( control, MASK64(21,0) );
3309 break;
3310 case L2_ERROR_ENABLE:
3311 /*
3312 * Error handling section 25.10 of N2 PRM 1.2
3313 */
3314 ASSIGN_L2( error_enable, MASK64(2,0) );
3315 break;
3316 case L2_ERROR_STATUS:
3317 /*
3318 * Table 25-21 of N2 PRM 1.2
3319 *
3320 * RW1C: bit [53:34]
3321 * RW: bit [63,54], [27:0]
3322 */
3323 RSVD_MASK(sp, (MASK64(63,34)|MASK64(27,0)), val, 0, reg);
3324 l2p->error_status[bank] &= ~val;
3325 l2p->error_status[bank] &= MASK64(63,34);
3326 l2p->error_status[bank] |= val & MASK64(27,0);
3327 break;
3328#endif /* } VFALLS */
3329 case L2_ERROR_ADDRESS:
3330 ASSIGN_L2( error_address, MASK64(39,4) );
3331 break;
3332 case L2_ERROR_INJECT:
3333 ASSIGN_L2( error_inject, MASK64(1,0) );
3334 break;
3335 case L2_ERROR_NOTDATA:
3336 ASSIGN_L2( error_notdata, MASK64(51,48)|MASK64(45,4) );
3337 break;
3338 default:
3339 /* illegal reg - an error */
3340 return false;
3341 }
3342 } else {
3343 uint64_t idx;
3344 /*
3345 * L2 Cache Diagnostic Access section 28.17 of N2 PRM 0.9.1
3346 */
3347 if (reg < 0x4) {
3348 /*
3349 * index stores to a 32bit word and its ECC+rsvd bits
3350 */
3351 idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
3352 /*
3353 * put oddeven select bit low so data is in addr order
3354 */
3355 idx |= ((off >> L2_ODDEVEN_SHIFT) & 1);
3356 l2p->diag_datap[idx] = val;
3357
3358 } else {
3359 if (reg < 0x6) {
3360 /*
3361 * index stores to a tag and its ECC+rsvd bits
3362 */
3363 idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6;
3364 l2p->diag_tagp[idx] = val;
3365 } else {
3366 /*
3367 * index valid/dirty or alloc/used bits and parity
3368 */
3369 idx = off & (L2_LINE | L2_BANK) >> 6;
3370 idx |= ((off & L2_VDSEL) >> 10);
3371 l2p->diag_vuadp[idx] = val;
3372 }
3373 }
3374 }
3375 break;
3376
3377write_reserved:
3378 EXEC_WARNING( ("Attempted write to reserved field in l2 cache controller:"
3379 "Write 0x%llx to bank %d, register %s (offset 0x%x)",
3380 val, bank, l2c_reg_name(reg), reg ) );
3381 return false;
3382
3383 case MA_ldu64:
3384 if (reg >= 0x8) {
3385#define RETRIEVE_L2(_n, _m) do { val = ((l2p->_n[bank]) & (_m)); } while (0)
3386 switch (reg) {
3387 /*
3388 * L2 BIST Control Reg section 28.18 of N2 PRM 0.9.1
3389 */
3390 case L2_TAG_BIST:
3391 RETRIEVE_L2( bist_ctl, MASK64(10,0) );
3392 break;
3393#ifdef VFALLS /* { */
3394 case L2_CONTROL:
3395 /*
3396 * L2 Control Register section 28.1.2 of VF PRM 0.9
3397 */
3398 RETRIEVE_L2( control, MASK64(33,0));
3399 break;
3400 case L2_ERROR_ENABLE:
3401 /*
3402 * Error handling section 12.25.1 of VF PRM 0.1
3403 */
3404 RETRIEVE_L2( error_enable, MASK64(31,0) );
3405 break;
3406 case L2_ERROR_STATUS:
3407 RETRIEVE_L2( error_status,
3408 MASK64(63,56)|MASK64(53,33)|MASK64(27,0));
3409 break;
3410 case L2_ERROR_STATUS_II:
3411 RETRIEVE_L2( error_status_ii, MASK64(63,40));
3412 break;
3413#else
3414 case L2_CONTROL:
3415 /*
3416 * L2 Control Register section 28.15 of N2 PRM 0.9.1
3417 */
3418 RETRIEVE_L2( control, MASK64(21,0) );
3419 break;
3420 case L2_ERROR_ENABLE:
3421 /*
3422 * Error handling section 25.10 of N2 PRM 1.2
3423 */
3424 RETRIEVE_L2( error_enable, MASK64(2,0) );
3425 break;
3426 case L2_ERROR_STATUS:
3427 RETRIEVE_L2( error_status, MASK64(63,34)|MASK64(27,0));
3428 break;
3429#endif /* } VFALLS */
3430 case L2_ERROR_ADDRESS:
3431 RETRIEVE_L2( error_address, MASK64(39,4) );
3432 break;
3433 case L2_ERROR_INJECT:
3434 RETRIEVE_L2( error_inject, MASK64(1,0) );
3435 break;
3436 case L2_ERROR_NOTDATA:
3437 RETRIEVE_L2( error_notdata, MASK64(51,48)|MASK64(45,4) );
3438 break;
3439 default:
3440 /* illegal reg - an error */
3441 return false;
3442 }
3443 } else {
3444 uint64_t idx;
3445 /*
3446 * L2 Cache Diagnostic Access section 28.17 of N2 PRM 0.9.1
3447 */
3448 if (reg < 0x4) {
3449 /*
3450 * index retrieves a 32bit word and its ECC+rsvd bits
3451 */
3452 idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2;
3453 /*
3454 * put oddeven select bit low so data is in addr order
3455 */
3456 idx |= ((off >> L2_ODDEVEN_SHIFT) & 1);
3457 val = l2p->diag_datap[idx];
3458
3459 } else {
3460 if (reg < 0x6) {
3461 /*
3462 * index retrieves a tag and its ECC+rsvd bits
3463 */
3464 idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6;
3465 val = l2p->diag_tagp[idx];
3466 } else {
3467 /*
3468 * index valid/dirty or alloc/used bits and parity
3469 */
3470 idx = off & (L2_LINE | L2_BANK) >> 6;
3471 idx |= ((off & L2_VDSEL) >> 10);
3472 val = l2p->diag_vuadp[idx];
3473 }
3474 }
3475 }
3476 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
3477 *regp = val;
3478
3479 break;
3480 default:
3481 ASSERT(0);
3482 }
3483
3484 return true;
3485}
3486
3487/*
3488 * Access DRAM Control and Status Registers (mapped at offset = 0x8400000000)
3489 */
3490static bool_t mcu_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp)
3491{
3492 ss_proc_t * npp;
3493 int reg, bank;
3494 uint64_t val;
3495 mcu_bank_t * dbp;
3496 uint64_t i, ambid;
3497 uint32_t val32;
3498 int node_id = 0;
3499
3500#ifdef VFALLS /* { */
3501 domain_t *domainp;
3502 tpaddr_t pa;
3503 int idx;
3504
3505 if (config_addrp->baseaddr == PHYS_ADDR_MCU) {
3506 /*
3507 * If local MCU CSR access, need to convert to Node X(this node) MCU CSR
3508 * address. Use the simcpu to get the correct node_id and then get the
3509 * correct config_addrp
3510 */
3511 node_id = sp->config_procp->proc_id;
3512 domainp = sp->config_procp->domainp;
3513
3514 pa = PHYS_ADDR_MCU_REMOTE(node_id) + off;
3515 config_addrp = find_domain_address(domainp, pa);
3516
3517 } else {
3518 /*
3519 * If remote MCU CSR access, use config_addrp to get at the node_id.
3520 */
3521
3522 /* first check if global addressing is allowed for this config */
3523 GLOBAL_ADDRESSING_CHECK(sp, "MCU");
3524
3525 domainp = config_addrp->config_devp->domainp;
3526
3527 for (idx = 0; idx<domainp->procs.count ; idx++) {
3528 node_id = LIST_ENTRY(domainp->procs, idx)->proc_id;
3529 if (config_addrp->baseaddr == PHYS_ADDR_MCU_REMOTE(node_id))
3530 break;
3531 }
3532 }
3533#endif /* } VFALLS */
3534 /*
3535 * FIXME: For the moment we only support 64bit accesses to registers.
3536 */
3537 npp = (ss_proc_t *)config_addrp->config_devp->devp;
3538
3539 if (MA_ldu64!=op && MA_st64!=op) return false;
3540
3541 bank = off >> 12;
3542 ASSERT (bank < npp->num_mbanks); /* this should be enforced by the config_dev range */
3543
3544 dbp = &(npp->mbankp[bank]);
3545
3546 reg = off & ((1<<12)-1);
3547
3548 switch (op) {
3549 case MA_st64:
3550 val = *regp;
3551
3552#define ASSIGN_DB(_n, _m) do { \
3553 dbp->_n &= ~(_m); \
3554 dbp->_n |= (val & (_m)); \
3555 } while (0)
3556
3557 DBGMC(lprintf(sp->gid, "Memory controller bank %d : Write register 0x%lx '%s' value= 0x%llx on node %d\n",
3558 bank, off, mcu_reg_name(reg), val, node_id); );
3559
3560 switch (reg) {
3561 /*
3562 * DRAM controller section 25.10 of N2 RPM 0.9.1
3563 */
3564 case DRAM_CAS_ADDR_WIDTH: ASSIGN_DB( cas_addr_width, MASK64(3, 0) ); break;
3565 case DRAM_RAS_ADDR_WIDTH: ASSIGN_DB( ras_addr_width, MASK64(3, 0) ); break;
3566 case DRAM_CAS_LAT: ASSIGN_DB( cas_lat, MASK64(2, 0) ); break;
3567 case DRAM_SCRUB_FREQ: ASSIGN_DB( scrub_freq, MASK64(11, 0) ); break;
3568 case DRAM_REFRESH_FREQ: ASSIGN_DB( refresh_freq, MASK64(12, 0) ); break;
3569 case DRAM_REFRESH_COUNTER: ASSIGN_DB( refresh_counter, MASK64(12, 0) ); break;
3570 case DRAM_SCRUB_ENABLE: ASSIGN_DB( scrub_enable, MASK64(0, 0) ); break;
3571 case DRAM_TRRD: ASSIGN_DB( trrd, MASK64(3, 0) ); break;
3572 case DRAM_TRC: ASSIGN_DB( trc, MASK64(4, 0) ); break;
3573 case DRAM_TRCD: ASSIGN_DB( trcd, MASK64(3, 0) ); break;
3574 case DRAM_TWTR: ASSIGN_DB( twtr, MASK64(3, 0) ); break;
3575 case DRAM_TRTW: ASSIGN_DB( trtw, MASK64(3, 0) ); break;
3576 case DRAM_TRTP: ASSIGN_DB( trtp, MASK64(2, 0) ); break;
3577 case DRAM_TRAS: ASSIGN_DB( tras, MASK64(3, 0) ); break;
3578 case DRAM_TRP: ASSIGN_DB( trp, MASK64(3, 0) ); break;
3579 case DRAM_TWR: ASSIGN_DB( twr, MASK64(3, 0) ); break;
3580 case DRAM_TRFC: ASSIGN_DB( trfc, MASK64(6, 0) ); break;
3581 case DRAM_TMRD: ASSIGN_DB( tmrd, MASK64(1, 0) ); break;
3582 case DRAM_FAWIN: ASSIGN_DB( fawin, MASK64(4, 0) ); break;
3583 case DRAM_TIWTR: ASSIGN_DB( tiwtr, MASK64(1, 0) ); break;
3584 case DRAM_DIMM_STACK: ASSIGN_DB( dimm_stack, MASK64(0, 0) ); break;
3585 case DRAM_EXT_WR_MODE2: ASSIGN_DB( ext_wr_mode2, MASK64(14, 0) ); break;
3586 case DRAM_EXT_WR_MODE1: ASSIGN_DB( ext_wr_mode1, MASK64(14, 0) ); break;
3587 case DRAM_EXT_WR_MODE3: ASSIGN_DB( ext_wr_mode3, MASK64(14, 0) ); break;
3588 case DRAM_8_BANK_MODE: ASSIGN_DB( eight_bank_mode, MASK64(0, 0) ); break;
3589 case DRAM_BRANCH_DISABLED: ASSIGN_DB( branch_disabled, MASK64(0, 0) ); break;
3590 case DRAM_SEL_LO_ADDR_BITS: ASSIGN_DB( sel_lo_addr_bits, MASK64(0, 0) ); break;
3591#ifdef VFALLS
3592 case DRAM_SINGLE_CHNL_MODE: ASSIGN_DB( single_chnl_mode, MASK64(1, 0) ); break;
3593 case DRAM_MIRROR_MODE: ASSIGN_DB( mirror_mode, MASK64(0, 0) ); break;
3594#else
3595 case DRAM_SINGLE_CHNL_MODE: ASSIGN_DB( single_chnl_mode, MASK64(0, 0) ); break;
3596#endif
3597 case DRAM_DIMM_INIT:
3598 if (0LL != (val & ~(3))) goto write_reserved;
3599 dbp->dimm_init = val;
3600 /* DRAM Init sequence done is instantaneous */
3601 dbp->init_status = 1;
3602 break;
3603 case DRAM_INIT_STATUS: ASSIGN_DB( init_status, MASK64(0, 0) ); break;
3604 case DRAM_DIMM_PRESENT: ASSIGN_DB( dimm_present, MASK64(3, 0) ); break;
3605 case DRAM_FAILOVER_STATUS: ASSIGN_DB( failover_status, MASK64(0, 0) ); break;
3606 case DRAM_FAILOVER_MASK: ASSIGN_DB( failover_mask, MASK64(34, 0) ); break;
3607 case DRAM_POWER_DOWN_MODE: ASSIGN_DB( power_down_mode, MASK64(0, 0) ); break;
3608 case FBD_CHNL_STATE:
3609 ASSIGN_DB( fbd_chnl_state.val, MASK64(7, 0) );
3610
3611 /* Update the appropriate _done register */
3612 switch( (val & MASK64(2, 0)) ) {
3613 case 0:
3614 dbp->disable_state_period_done = 1; break;
3615 case 1:
3616 dbp->calibrate_state_period_done = 1; break;
3617 case 2:
3618 dbp->training_state_done = 1; break;
3619 case 3:
3620 dbp->testing_state_done = 1; break;
3621 case 4:
3622 dbp->polling_state_done = 1; break;
3623 case 5:
3624 dbp->config_state_done = 1; break;
3625 default:
3626 lprintf(sp->gid, "Unknown val (0x%llx) being stored to FBD_CHNL_STATE reg on node %d\n",
3627 val, node_id);
3628 }
3629 break;
3630 case FBD_FAST_RESET_FLAG: ASSIGN_DB( fbd_fast_reset_flag, MASK64(3, 0) ); break;
3631 case FBD_CHNL_RESET:
3632 dbp->fbd_chnl_reset = val & 0x3;
3633 /* if FBDINIT is set channel initialization starts */
3634 if ((val & 0x1) == 1){
3635 /* set the proper state value in amb link status */
3636 for (i=0; i<MAX_AMBS; i++)
3637 dbp->fbd_chnl_state.ambstate[i] = L0_STATE;
3638 /* hw clears the bit after init is done */
3639 dbp->fbd_chnl_reset &= ~(1ULL);
3640 }
3641 break;
3642 case TS1_SB_NB_MAPPING: ASSIGN_DB( ts1_sb_nb_mapping, MASK64(2, 0) ); break;
3643 case TS1_TEST_PARAMETER: ASSIGN_DB( ts1_test_parameter, MASK64(23, 0) ); break;
3644 case TS3_FAILOVER_CONFIG: ASSIGN_DB( ts3_failover_config, MASK64(15, 0) ); break;
3645 case DISABLE_STATE_PERIOD: ASSIGN_DB( disable_state_period,MASK64(5, 0) ); break;
3646 case DISABLE_STATE_PERIOD_DONE: ASSIGN_DB( disable_state_period_done, MASK64(0, 0) ); break;
3647 case CALIBRATE_STATE_PERIOD: ASSIGN_DB( calibrate_state_period, MASK64(19, 0) ); break;
3648 case CALIBRATE_STATE_PERIOD_DONE: ASSIGN_DB( calibrate_state_period_done, MASK64(0, 0) ); break;
3649 case TRAINING_STATE_MIN_TIME: ASSIGN_DB( training_state_min_time, MASK64(15, 0) ); break;
3650 case TRAINING_STATE_DONE: ASSIGN_DB( training_state_done, MASK64(1, 0) ); break;
3651 case TRAINING_STATE_TIMEOUT: ASSIGN_DB( training_state_timeout, MASK64(7, 0) ); break;
3652 case TESTING_STATE_DONE: ASSIGN_DB( testing_state_done, MASK64(1, 0) ); break;
3653 case TESTING_STATE_TIMEOUT: ASSIGN_DB( testing_state_timeout, MASK64(7, 0) ); break;
3654 case POLLING_STATE_DONE: ASSIGN_DB( polling_state_done, MASK64(1, 0) ); break;
3655 case POLLING_STATE_TIMEOUT: ASSIGN_DB( polling_state_timeout, MASK64(7, 0) ); break;
3656 case CONFIG_STATE_DONE: ASSIGN_DB( config_state_done, MASK64(1, 0) ); break;
3657 case CONFIG_STATE_TIMEOUT: ASSIGN_DB( config_state_timeout, MASK64(7, 0) ); break;
3658 case DRAM_PER_RANK_CKE: ASSIGN_DB( dram_per_rank_cke, MASK64(15, 0) ); break;
3659 case L0S_DURATION: ASSIGN_DB( l0s_duration, MASK64(6, 0) ); break;
3660 case CHNL_SYNC_FRAME_FREQ: ASSIGN_DB( chnl_sync_frame_freq, MASK64(5, 0) ); break;
3661 case CHNL_READ_LAT: ASSIGN_DB( chnl_read_lat, MASK64(15, 0) ); break;
3662 case CHNL_CAPABILITY: ASSIGN_DB( chnl_capability, MASK64(9, 0) ); break;
3663 case LOOPBACK_MODE_CNTL: ASSIGN_DB( loopback_mode_cntl, MASK64(1, 0) ); break;
3664 case SERDES_CONFIG_BUS: ASSIGN_DB( serdes_config_bus, MASK64(24, 0) ); break;
3665 case SERDES_INVPAIR: ASSIGN_DB( serdes_invpair, MASK64(47, 0) ); break;
3666 case SERDES_TEST_CONFIG_BUS: ASSIGN_DB( serdes_test_config_bus, MASK64(31, 0) ); break;
3667 case CONFIG_REG_ACCESS_ADDR: ASSIGN_DB( config_reg_access_addr, MASK64(15, 0) ); break;
3668 case CONFIG_REG_ACCESS_DATA:
3669
3670 val32 = (uint32_t) val;
3671 ambid = AMBID(dbp->config_reg_access_addr);
3672 ASSIGN_DB(config_reg_access_data, MASK64(31, 0) );
3673 switch (AMBADDR(dbp->config_reg_access_addr)){
3674#define ASSIGN_AMB(_n, _m) do { \
3675 dbp->amb[ambid]._n = (_m); \
3676 } while (0)
3677 case FBD_VID_DID:
3678 case FBDS:
3679 goto write_reserved;
3680 case EMASK:
3681 ASSIGN_AMB(emask, val32 & 0x3f);
3682 break;
3683 case FERR:
3684 ASSIGN_AMB(ferr, val32 & 0x3f);
3685 break;
3686 case NERR:
3687 ASSIGN_AMB(nerr, val32 & 0x3f);
3688 break;
3689 case PSBYTE3_0:
3690 ASSIGN_AMB(psbyte3_0, val32);
3691 break;
3692 case PSBYTE7_4:
3693 ASSIGN_AMB(psbyte7_4, val32);
3694 break;
3695 case PSBYTE11_8:
3696 ASSIGN_AMB(psbyte11_8, val32);
3697 break;
3698 case PSBYTE13_12:
3699 ASSIGN_AMB(psbyte13_12, val32 & 0x0000ffff);
3700 break;
3701 case C2DINCRCUR_CMD2DATANXT:
3702 ASSIGN_AMB(c2dincrcur_cmd2datanxt, val32 & 0x00ff00ff);
3703 break;
3704 case MBCSR:
3705 /* clear START bit immediately */
3706 ASSIGN_AMB(mbcsr, val32 & 0x7fffffff);
3707 break;
3708 case DAREFTC:
3709 ASSIGN_AMB(dareftc, val32 & 0x00ffffff);
3710 break;
3711 case MTR_DSREFTC:
3712 ASSIGN_AMB(mtr_dsreftc, val32 & 0x7f01fff7);
3713 break;
3714 case DRT:
3715 ASSIGN_AMB(drt, val32 & 0x7f77ffff);
3716 break;
3717 case DRC:
3718 ASSIGN_AMB(drc, val32 & 0x2f87ffff);
3719 break;
3720 case DCALCSR:
3721 ASSIGN_AMB(dcalcsr, val32 & 0xf0607fff);
3722 switch (dbp->amb[ambid].dcalcsr & 0xf) {
3723 case 0:
3724 case 1:
3725 case 2:
3726 case 3:
3727 case 5:
3728 case 0xc:
3729 case 0xd:
3730 /* set completion status if start set */
3731 if (dbp->amb[ambid].dcalcsr & 0x80000000) {
3732 dbp->amb[ambid].dcalcsr &= 0x0fffffff;
3733 }
3734 break;
3735 default:
3736 EXEC_WARNING(("Invalid DCALCSR opcode: 0x%x",
3737 dbp->amb[ambid].dcalcsr & 0xf));
3738 return false;
3739 }
3740 break;
3741 case DCALADDR:
3742 ASSIGN_AMB(dcaladdr, val32);
3743 break;
3744 case DDR2ODTC:
3745 ASSIGN_AMB(ddr2odtc, val32);
3746 break;
3747 default:
3748 /* illegal reg - an error */
3749 EXEC_WARNING( ("Unimplemented write amb address = 0x%x, ambid=0x%x",
3750 AMBADDR(dbp->config_reg_access_addr), ambid) );
3751 return false;
3752 }
3753 break;
3754 /*
3755 * Performance counter section 10.3 of N2 PRM 1.1
3756 */
3757 case DRAM_PERF_CTL: ASSIGN_DB( perf_ctl, MASK64(7, 0) ); break;
3758 case DRAM_PERF_COUNT: ASSIGN_DB( perf_count, MASK64(63, 0) ); break;
3759 /*
3760 * Error handling section 25.12 of N2 PRM 1.2
3761 */
3762 case DRAM_ERROR_STATUS:
3763 dbp->error_status &= ~val;
3764 dbp->error_status &= MASK64(63,54);
3765 dbp->error_status |= val & MASK64(15,0);
3766 break;
3767 case DRAM_ERROR_ADDRESS: ASSIGN_DB( error_address, MASK64(39, 4) ); break;
3768 case DRAM_ERROR_INJECT: ASSIGN_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break;
3769 case DRAM_ERROR_COUNTER: ASSIGN_DB( error_counter, MASK64(15, 0) ); break;
3770 case DRAM_ERROR_LOCATION: ASSIGN_DB( error_location, MASK64(35, 0) ); break;
3771 case DRAM_ERROR_RETRY: ASSIGN_DB( error_retry, MASK64(63, 63)|MASK64(49,32)|MASK64(17,0) ); break;
3772 case DRAM_FBD_ERROR_SYND: ASSIGN_DB( fbd_error_synd, MASK64(63, 63)|MASK64(29,0) ); break;
3773 case DRAM_FBD_INJ_ERROR_SRC: ASSIGN_DB( fbd_inj_error_src, MASK64(1, 0) ); break;
3774 case DRAM_FBR_COUNT: ASSIGN_DB( fbr_count, MASK64(16, 0) ); break;
3775 /*
3776 * Power management section 26.3 of N2 PRM 0.9.1
3777 */
3778 case DRAM_OPEN_BANK_MAX: ASSIGN_DB( open_bank_max, MASK64(16, 0) ); break;
3779 case DRAM_PROG_TIME_CNTR: ASSIGN_DB( prog_time_cntr, MASK64(15, 0) ); break;
3780 /*
3781 * Hardware debug section 29.2.2 of N2 PRM 0.9.1
3782 */
3783 case DRAM_DBG_TRG_EN: ASSIGN_DB( dbg_trg_en, MASK64(2, 2) ); break;
3784 case IBIST_NBFIB_CTL:
3785 /*
3786 * Set done bit (34) immediately if start bit (32)
3787 * is set.
3788 */
3789 if (val & (1LL << 32)) {
3790 dbp->ibist_nbfib_ctl = (val | (1LL << 34));
3791 ambid = (dbp->fbd_chnl_state.val & (0xF << 3))
3792 >> 3;
3793 dbp->fbd_chnl_state.ambstate[ambid] = L0_STATE;
3794 } else {
3795 dbp->ibist_nbfib_ctl = val;
3796 }
3797 break;
3798 case IBIST_SBFIB_CTL:
3799 /*
3800 * Set done bit (34) immediately if start bit (32)
3801 * is set.
3802 */
3803 if (val & (1LL << 32)) {
3804 dbp->ibist_sbfib_ctl = (val | (1LL << 34));
3805 } else {
3806 dbp->ibist_sbfib_ctl = val;
3807 }
3808 break;
3809 default:
3810 /* illegal reg - an error */
3811 return false;
3812 }
3813 break;
3814
3815write_reserved:
3816 EXEC_WARNING( ("Attempted write to reserved field in dram controller: Write 0x%llx to bank %d, register %s (offset 0x%x) on node %d",
3817 val, bank, mcu_reg_name(reg), reg, node_id ) );
3818 return false;
3819
3820 case MA_ldu64:
3821#define RETRIEVE_DB(_n, _m) do { val = ((dbp->_n) & (_m)); } while (0)
3822 switch (reg) {
3823 /*
3824 * DRAM controller section 25.10 of N2 RPM 0.9.1
3825 */
3826 case DRAM_CAS_ADDR_WIDTH: RETRIEVE_DB( cas_addr_width, MASK64(3, 0) ); break;
3827 case DRAM_RAS_ADDR_WIDTH: RETRIEVE_DB( ras_addr_width, MASK64(3, 0) ); break;
3828 case DRAM_CAS_LAT: RETRIEVE_DB( cas_lat, MASK64(2, 0) ); break;
3829 case DRAM_SCRUB_FREQ: RETRIEVE_DB( scrub_freq, MASK64(11, 0) ); break;
3830 case DRAM_REFRESH_FREQ: RETRIEVE_DB( refresh_freq, MASK64(12, 0) ); break;
3831 case DRAM_REFRESH_COUNTER: RETRIEVE_DB( refresh_counter, MASK64(12, 0) ); break;
3832 case DRAM_SCRUB_ENABLE: RETRIEVE_DB( scrub_enable, MASK64(0, 0) ); break;
3833 case DRAM_TRRD: RETRIEVE_DB( trrd, MASK64(3, 0) ); break;
3834 case DRAM_TRC: RETRIEVE_DB( trc, MASK64(4, 0) ); break;
3835 case DRAM_TRCD: RETRIEVE_DB( trcd, MASK64(3, 0) ); break;
3836 case DRAM_TWTR: RETRIEVE_DB( twtr, MASK64(3, 0) ); break;
3837 case DRAM_TRTW: RETRIEVE_DB( trtw, MASK64(3, 0) ); break;
3838 case DRAM_TRTP: RETRIEVE_DB( trtp, MASK64(2, 0) ); break;
3839 case DRAM_TRAS: RETRIEVE_DB( tras, MASK64(3, 0) ); break;
3840 case DRAM_TRP: RETRIEVE_DB( trp, MASK64(3, 0) ); break;
3841 case DRAM_TWR: RETRIEVE_DB( twr, MASK64(3, 0) ); break;
3842 case DRAM_TRFC: RETRIEVE_DB( trfc, MASK64(6, 0) ); break;
3843 case DRAM_TMRD: RETRIEVE_DB( tmrd, MASK64(1, 0) ); break;
3844 case DRAM_FAWIN: RETRIEVE_DB( fawin, MASK64(4, 0) ); break;
3845 case DRAM_TIWTR: RETRIEVE_DB( tiwtr, MASK64(1, 0) ); break;
3846 case DRAM_DIMM_STACK: RETRIEVE_DB( dimm_stack, MASK64(0, 0) ); break;
3847 case DRAM_EXT_WR_MODE2: RETRIEVE_DB( ext_wr_mode2, MASK64(14, 0) ); break;
3848 case DRAM_EXT_WR_MODE1: RETRIEVE_DB( ext_wr_mode1, MASK64(14, 0) ); break;
3849 case DRAM_EXT_WR_MODE3: RETRIEVE_DB( ext_wr_mode3, MASK64(14, 0) ); break;
3850 case DRAM_8_BANK_MODE: RETRIEVE_DB( eight_bank_mode, MASK64(0, 0) ); break;
3851 case DRAM_BRANCH_DISABLED: RETRIEVE_DB( branch_disabled, MASK64(0, 0) ); break;
3852 case DRAM_SEL_LO_ADDR_BITS: RETRIEVE_DB( sel_lo_addr_bits, MASK64(0, 0) ); break;
3853#ifdef VFALLS
3854 case DRAM_SINGLE_CHNL_MODE: RETRIEVE_DB( single_chnl_mode, MASK64(1, 0) ); break;
3855 case DRAM_MIRROR_MODE: RETRIEVE_DB( mirror_mode, MASK64(0, 0) ); break;
3856#else
3857 case DRAM_SINGLE_CHNL_MODE: RETRIEVE_DB( single_chnl_mode, MASK64(0, 0) ); break;
3858#endif
3859 case DRAM_DIMM_INIT: RETRIEVE_DB( dimm_init, MASK64(2, 0) ); break;
3860 case DRAM_INIT_STATUS: RETRIEVE_DB( init_status, MASK64(0, 0) ); break;
3861 case DRAM_DIMM_PRESENT: RETRIEVE_DB( dimm_present, MASK64(3, 0) ); break;
3862 case DRAM_FAILOVER_STATUS: RETRIEVE_DB( failover_status, MASK64(0, 0) ); break;
3863 case DRAM_FAILOVER_MASK: RETRIEVE_DB( failover_mask, MASK64(34, 0) ); break;
3864 case DRAM_POWER_DOWN_MODE: RETRIEVE_DB( power_down_mode, MASK64(0, 0) ); break;
3865 case FBD_CHNL_STATE:
3866 /* retrieve state bits for the ambid that has been set */
3867 /* extract ambid */
3868 ambid = (dbp->fbd_chnl_state.val & (0xF << 3)) >> 3;
3869 /* use it to index into state value */
3870 val = (((dbp->fbd_chnl_state.val) & ~(0x7)) |
3871 (dbp->fbd_chnl_state.ambstate[ambid]));
3872 break;
3873 case FBD_FAST_RESET_FLAG: RETRIEVE_DB( fbd_fast_reset_flag, MASK64(3, 0) ); break;
3874 case FBD_CHNL_RESET: RETRIEVE_DB( fbd_chnl_reset, MASK64(1, 0) ); break;
3875 case TS1_SB_NB_MAPPING: RETRIEVE_DB( ts1_sb_nb_mapping, MASK64(2, 0) ); break;
3876 case TS1_TEST_PARAMETER: RETRIEVE_DB( ts1_test_parameter, MASK64(23, 0) ); break;
3877 case TS3_FAILOVER_CONFIG: RETRIEVE_DB( ts3_failover_config, MASK64(15, 0) ); break;
3878 case ELECTRICAL_IDLE_DETECTED: RETRIEVE_DB( electrical_idle_detected, MASK64(27,0) ); break;
3879 case DISABLE_STATE_PERIOD: RETRIEVE_DB( disable_state_period, MASK64(5, 0) ); break;
3880 case DISABLE_STATE_PERIOD_DONE: RETRIEVE_DB( disable_state_period_done, MASK64(0, 0) ); break;
3881 case CALIBRATE_STATE_PERIOD: RETRIEVE_DB( calibrate_state_period, MASK64(19, 0) ); break;
3882 case CALIBRATE_STATE_PERIOD_DONE: RETRIEVE_DB( calibrate_state_period_done, MASK64(0, 0) ); break;
3883 case TRAINING_STATE_MIN_TIME: RETRIEVE_DB( training_state_min_time, MASK64(15, 0) ); break;
3884 case TRAINING_STATE_DONE: RETRIEVE_DB( training_state_done, MASK64(1, 0) ); break;
3885 case TRAINING_STATE_TIMEOUT: RETRIEVE_DB( training_state_timeout, MASK64(7, 0) ); break;
3886 case TESTING_STATE_DONE: RETRIEVE_DB( testing_state_done, MASK64(1, 0) ); break;
3887 case TESTING_STATE_TIMEOUT: RETRIEVE_DB( testing_state_timeout, MASK64(7, 0) ); break;
3888 case POLLING_STATE_DONE: RETRIEVE_DB( polling_state_done, MASK64(1, 0) ); break;
3889 case POLLING_STATE_TIMEOUT: RETRIEVE_DB( polling_state_timeout, MASK64(7, 0) ); break;
3890 case CONFIG_STATE_DONE: RETRIEVE_DB( config_state_done, MASK64(1, 0) ); break;
3891 case CONFIG_STATE_TIMEOUT: RETRIEVE_DB( config_state_timeout, MASK64(7, 0) ); break;
3892 case DRAM_PER_RANK_CKE: RETRIEVE_DB( dram_per_rank_cke, MASK64(15, 0) ); break;
3893 case L0S_DURATION: RETRIEVE_DB( l0s_duration, MASK64(6, 0) ); break;
3894 case CHNL_SYNC_FRAME_FREQ: RETRIEVE_DB( chnl_sync_frame_freq, MASK64(5, 0) ); break;
3895 case CHNL_READ_LAT: RETRIEVE_DB( chnl_read_lat, MASK64(15, 0) ); break;
3896 case CHNL_CAPABILITY: RETRIEVE_DB( chnl_capability, MASK64(9, 0) ); break;
3897 case LOOPBACK_MODE_CNTL: RETRIEVE_DB( loopback_mode_cntl, MASK64(1, 0) ); break;
3898 case SERDES_CONFIG_BUS: RETRIEVE_DB( serdes_config_bus, MASK64(24, 0) ); break;
3899 case SERDES_INVPAIR: RETRIEVE_DB( serdes_invpair, MASK64(47, 0) ); break;
3900 case SERDES_TEST_CONFIG_BUS: RETRIEVE_DB( serdes_test_config_bus, MASK64(31, 0) ); break;
3901 case CONFIG_REG_ACCESS_ADDR: RETRIEVE_DB( config_reg_access_addr, MASK64(15, 0) ); break;
3902 case CONFIG_REG_ACCESS_DATA:
3903 ambid = AMBID(dbp->config_reg_access_addr);
3904 switch (AMBADDR(dbp->config_reg_access_addr)){
3905 case FBD_VID_DID: val=(uint64_t)dbp->amb[ambid].vid_did; break;
3906 case FBDS: val=dbp->amb[ambid].fbds; break;
3907 case EMASK: val=dbp->amb[ambid].emask; break;
3908 case FERR: val=dbp->amb[ambid].ferr; break;
3909 case NERR: val=dbp->amb[ambid].nerr; break;
3910 case PSBYTE3_0: val=dbp->amb[ambid].psbyte3_0; break;
3911 case PSBYTE7_4: val=dbp->amb[ambid].psbyte7_4; break;
3912 case PSBYTE11_8: val=dbp->amb[ambid].psbyte11_8; break;
3913 case PSBYTE13_12: val=dbp->amb[ambid].psbyte13_12; break;
3914 case C2DINCRCUR_CMD2DATANXT: val=dbp->amb[ambid].c2dincrcur_cmd2datanxt;
3915 break;
3916 case MBCSR: val=dbp->amb[ambid].mbcsr; break;
3917 case DAREFTC: val=dbp->amb[ambid].dareftc; break;
3918 case MTR_DSREFTC: val=dbp->amb[ambid].mtr_dsreftc; break;
3919 case DRT: val=dbp->amb[ambid].drt; break;
3920 case DRC: val=dbp->amb[ambid].drc; break;
3921 case DCALCSR: val=dbp->amb[ambid].dcalcsr; break;
3922 case DCALADDR: val=dbp->amb[ambid].dcaladdr; break;
3923 case DDR2ODTC: val=dbp->amb[ambid].ddr2odtc; break;
3924 default:
3925 /* illegal reg - an error */
3926 EXEC_WARNING( ("Unimplemented read amb address = 0x%x, ambid=0x%x",
3927 AMBADDR(dbp->config_reg_access_addr), ambid) );
3928 return false;
3929 }
3930 break;
3931
3932 /*
3933 * Performance counter section 10.3 of N2 PRM 1.1
3934 */
3935 case DRAM_PERF_CTL: RETRIEVE_DB( perf_ctl, MASK64(7, 0) ); break;
3936 case DRAM_PERF_COUNT: RETRIEVE_DB( perf_count, MASK64(63, 0) ); break;
3937 /*
3938 * Error handling section 25.12 of N2 PRM 1.2
3939 */
3940 case DRAM_ERROR_STATUS: RETRIEVE_DB( error_status, MASK64(63, 0) ); break;
3941 case DRAM_ERROR_ADDRESS: RETRIEVE_DB( error_address, MASK64(39, 4) ); break;
3942 case DRAM_ERROR_INJECT: RETRIEVE_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break;
3943 case DRAM_ERROR_COUNTER: RETRIEVE_DB( error_counter, MASK64(15, 0) ); break;
3944 case DRAM_ERROR_LOCATION: RETRIEVE_DB( error_location, MASK64(35, 0) ); break;
3945 case DRAM_ERROR_RETRY: RETRIEVE_DB( error_retry, MASK64(63, 63)|MASK64(49,32)|MASK64(17,0) ); break;
3946 case DRAM_FBD_ERROR_SYND: RETRIEVE_DB( fbd_error_synd, MASK64(63, 63)|MASK64(29,0) ); break;
3947 case DRAM_FBD_INJ_ERROR_SRC: RETRIEVE_DB( fbd_inj_error_src, MASK64(1, 0) ); break;
3948 case DRAM_FBR_COUNT: RETRIEVE_DB( fbr_count, MASK64(16, 0) ); break;
3949 /*
3950 * Power management section 26.3 of N2 PRM 0.9.1
3951 */
3952 case DRAM_OPEN_BANK_MAX: RETRIEVE_DB( open_bank_max, MASK64(16, 0) ); break;
3953 case DRAM_PROG_TIME_CNTR: RETRIEVE_DB( prog_time_cntr, MASK64(15, 0) ); break;
3954 /*
3955 * Hardware debug section 29.2.2 of N2 PRM 0.9.1
3956 */
3957 case DRAM_DBG_TRG_EN: RETRIEVE_DB( dbg_trg_en, MASK64(2, 2) ); break;
3958 case IBIST_NBFIB_CTL: RETRIEVE_DB( ibist_nbfib_ctl, MASK64(53, 0) ); break;
3959 case IBIST_SBFIB_CTL: RETRIEVE_DB( ibist_sbfib_ctl, MASK64(55, 0) ); break;
3960 default:
3961 /* illegal reg - an error */
3962 return false;
3963 }
3964
3965 DBGMC(lprintf(sp->gid, "Memory controller bank %d : Read register 0x%lx '%s' value= 0x%llx on node %d\n",
3966 bank, off, mcu_reg_name(reg), val, node_id); );
3967
3968 if (&(sp->intreg[Reg_sparcv9_g0]) != regp)
3969 *regp = val;
3970 break;
3971
3972 default:
3973 ASSERT(0);
3974 }
3975
3976 return true;
3977}
3978
3979
3980/*
3981 * Create address mapping to access PCIE Cfg/IO, MEM32 and MEM64 space
3982 */
3983void niagara2_pcie_mapping(simcpu_t *sp, ncu_t *ncup, piu_region_t region)
3984{
3985 uint64_t base, mask, size;
3986 bool_t enable;
3987 const char *name[3] = {"Cfg/IO", "Mem32", "Mem64"};
3988
3989 switch (region) {
3990 case PIU_REGION_CFGIO:
3991 base = ncup->regs.pcie_a_iocon_offset_base;
3992 mask = ncup->regs.pcie_a_iocon_offset_mask;
3993 break;
3994 case PIU_REGION_MEM32:
3995 base = ncup->regs.pcie_a_mem32_offset_base;
3996 mask = ncup->regs.pcie_a_mem32_offset_mask;
3997 break;
3998 case PIU_REGION_MEM64:
3999 base = ncup->regs.pcie_a_mem64_offset_base;
4000 mask = ncup->regs.pcie_a_mem64_offset_mask;
4001 break;
4002 default:
4003 ASSERT(0);
4004 }
4005
4006 enable = GETMASK64(base,63,63);
4007 base &= PIU_REGION_OFFSET_MASK;
4008 mask &= PIU_REGION_OFFSET_MASK;
4009
4010 if (enable) {
4011 size = ~(MASK64(63,36)|mask) + 1;
4012
4013 ncup->map[region].base = base;
4014 ncup->map[region].mask = mask;
4015 ncup->map[region].size = size;
4016 ncup->map[region].enable = enable;
4017
4018 DBGDEV(lprintf(sp->gid, "PCIE %s is mapped at 0x%llx - 0x%llx of node %d\n",
4019 name[region], base, base+size-1, ncup->node_id); );
4020 }
4021}