Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / ontario / src / setup.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: setup.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)setup.s 1.50 07/05/29 SMI"
50
51/*
52 * Routines that configure the hypervisor
53 */
54
55#include <sys/asm_linkage.h>
56#include <sys/htypes.h>
57#include <hprivregs.h>
58#include <asi.h>
59#include <fpga.h>
60#include <intr.h>
61#include <sun4v/traps.h>
62#include <sun4v/mmu.h>
63#include <sun4v/asi.h>
64#include <sun4v/queue.h>
65#include <devices/pc16550.h>
66
67#include <config.h>
68#include <guest.h>
69#include <offsets.h>
70#include <md.h>
71#include <dram.h>
72#include <cpu_errs.h>
73#include <svc.h>
74#include <vdev_intr.h>
75#include <abort.h>
76#include <util.h>
77#include <debug.h>
78#include <mmu.h>
79#include <ldc.h>
80#include <fire.h>
81
82#define HVALLOC(root, size, ptr, tmp) \
83 ldx [root + CONFIG_BRK], ptr ;\
84 add ptr, size, tmp ;\
85 stx tmp, [root + CONFIG_BRK]
86
87#define STRANDID_2_MAUPID(cpu, mau) \
88 srlx cpu, STRANDID_2_COREID_SHIFT, mau
89
90
91#define CONFIG %i0
92#define GUESTS %i1
93
94
95
96
97#if defined(CONFIG_FIRE)
98/*
99 * setup_fire: Initialize Fire
100 *
101 * in:
102 * %i0 - global config pointer
103 * %i1 - base of guests
104 * %i2 - base of cpus
105 * %g7 - return address
106 *
107 * volatile:
108 * %locals
109 * %outs
110 * %globals
111 */
112 ENTRY_NP(setup_fire)
113
114 mov %g7, %l7 /* save return address */
115 PRINT("HV:setup_fire\r\n")
116 /*
117 * Relocate Fire TSB base pointers
118 */
119 ldx [%i0 + CONFIG_RELOC], %o0
120 setx fire_dev, %o2, %o1
121 sub %o1, %o0, %o1
122 ldx [%o1 + FIRE_COOKIE_IOTSB], %o2
123 sub %o2, %o0, %o2
124 stx %o2, [%o1 + FIRE_COOKIE_IOTSB]
125 add %o1, FIRE_COOKIE_SIZE, %o1
126 ldx [%o1 + FIRE_COOKIE_IOTSB], %o2
127 sub %o2, %o0, %o2
128 stx %o2, [%o1 + FIRE_COOKIE_IOTSB]
129
130 sub %o1, FIRE_COOKIE_SIZE, %o1
131
132 /*
133 * Relocate Fire MSI EQ base pointers
134 */
135 ldx [%o1 + FIRE_COOKIE_MSIEQBASE], %o2
136 sub %o2, %o0, %o2
137 stx %o2, [%o1 + FIRE_COOKIE_MSIEQBASE]
138 add %o1, FIRE_COOKIE_SIZE, %o1
139 ldx [%o1 + FIRE_COOKIE_MSIEQBASE], %o2
140 sub %o2, %o0, %o2
141 stx %o2, [%o1 + FIRE_COOKIE_MSIEQBASE]
142
143 sub %o1, FIRE_COOKIE_SIZE, %o1
144
145 /*
146 * Relocate Fire Virtual Interrupt pointer
147 */
148 ldx [%o1 + FIRE_COOKIE_VIRTUAL_INTMAP], %o2
149 sub %o2, %o0, %o2
150 stx %o2, [%o1 + FIRE_COOKIE_VIRTUAL_INTMAP]
151 add %o1, FIRE_COOKIE_SIZE, %o1
152 ldx [%o1 + FIRE_COOKIE_VIRTUAL_INTMAP], %o2
153 sub %o2, %o0, %o2
154 stx %o2, [%o1 + FIRE_COOKIE_VIRTUAL_INTMAP]
155
156 sub %o1, FIRE_COOKIE_SIZE, %o1
157
158 /*
159 * Relocate Fire MSI and ERR Cookies
160 */
161
162 ldx [%o1 + FIRE_COOKIE_ERRCOOKIE], %o2
163 sub %o2, %o0, %o2
164 stx %o2, [%o1 + FIRE_COOKIE_ERRCOOKIE]
165 ldx [%o2 + FIRE_ERR_COOKIE_FIRE], %o4
166 sub %o4, %o0, %o4
167 stx %o4, [%o2 + FIRE_ERR_COOKIE_FIRE]
168 add %o1, FIRE_COOKIE_SIZE, %o1
169 ldx [%o1 + FIRE_COOKIE_ERRCOOKIE], %o2
170 sub %o2, %o0, %o2
171 stx %o2, [%o1 + FIRE_COOKIE_ERRCOOKIE]
172 ldx [%o2 + FIRE_ERR_COOKIE_FIRE], %o4
173 sub %o4, %o0, %o4
174 stx %o4, [%o2 + FIRE_ERR_COOKIE_FIRE]
175
176 sub %o1, FIRE_COOKIE_SIZE, %o1
177
178 ldx [%o1 + FIRE_COOKIE_MSICOOKIE], %o2
179 sub %o2, %o0, %o2
180 stx %o2, [%o1 + FIRE_COOKIE_MSICOOKIE]
181 ldx [%o2 + FIRE_MSI_COOKIE_FIRE], %o4
182 sub %o4, %o0, %o4
183 stx %o4, [%o2 + FIRE_MSI_COOKIE_FIRE]
184 add %o1, FIRE_COOKIE_SIZE, %o1
185 ldx [%o1 + FIRE_COOKIE_MSICOOKIE], %o2
186 sub %o2, %o0, %o2
187 stx %o2, [%o1 + FIRE_COOKIE_MSICOOKIE]
188 ldx [%o2 + FIRE_MSI_COOKIE_FIRE], %o4
189 sub %o4, %o0, %o4
190 stx %o4, [%o2 + FIRE_MSI_COOKIE_FIRE]
191
192 setx fire_msi, %o2, %o1
193 sub %o1, %o0, %o1
194
195 mov FIRE_NEQS, %o3
196 add %o1, FIRE_MSI_COOKIE_EQ, %o2
1970:
198 ldx [%o2 + FIRE_MSIEQ_BASE], %o4
199 sub %o4, %o0, %o4
200 stx %o4, [%o2 + FIRE_MSIEQ_BASE]
201 add %o2, FIRE_MSIEQ_SIZE, %o2
202 subcc %o3, 1, %o3
203 bnz 0b
204 nop
205
206 add %o1, FIRE_MSI_COOKIE_SIZE, %o1
207
208 mov FIRE_NEQS, %o3
209 add %o1, FIRE_MSI_COOKIE_EQ, %o2
2100:
211 ldx [%o2 + FIRE_MSIEQ_BASE], %o4
212 sub %o4, %o0, %o4
213 stx %o4, [%o2 + FIRE_MSIEQ_BASE]
214 add %o2, FIRE_MSIEQ_SIZE, %o2
215 subcc %o3, 1, %o3
216 bnz 0b
217 nop
218
219 ba fire_init
220 mov %l7, %g7
221 SET_SIZE(setup_fire)
222#endif
223
224
225 /*
226 * The FPGA interrupt output is an active-low level interrupt.
227 * The Niagara SSI interrupt input is falling-edge-triggered.
228 * We can lose an interrupt across a warm reset so workaround
229 * that by injecting a fake SSI interrupt at start-up time.
230 */
231#ifdef CONFIG_FPGA /* Don't touch fpga hardware if it isn't there */
232 ENTRY_NP(fake_ssiirq)
233 setx IOBBASE, %o1, %o2
234 ldx [%o2 + INT_MAN + INT_MAN_DEV_OFF(IOBDEV_SSI)], %o1
235 stx %o1, [%o2 + INT_VEC_DIS]
236 HVRET
237 SET_SIZE(fake_ssiirq)
238#endif /* CONFIG_FPGA */
239
240
241
242
243/*
244 * dummy tsb for the hypervisor to use
245 */
246BSS_GLOBAL(dummytsb, DUMMYTSB_SIZE, DUMMYTSB_ALIGN)
247
248
249/*
250 * setup_iob
251 *
252 * in:
253 * %i0 - global config pointer
254 * %i1 - base of guests
255 * %i2 - base of cpus
256 * %g7 - return address
257 *
258 * volatile:
259 * %locals
260 */
261 ENTRY_NP(setup_iob)
262#ifdef CONFIG_FPGA
263 ldx [CONFIG + CONFIG_INTRTGT], %g1
264 setx IOBBASE, %g3, %g2
265 ! %g1 = intrtgt CPUID array (8-bits per INT_MAN target)
266 ! %g2 = IOB Base address
267
268 /*
269 * Clear interrupts for both SSIERR and SSI
270 *
271 * PRM: "After setting the MASK bit, software needs to issue a
272 * read on the INT_CTL register to guarantee the masking write
273 * is completed."
274 */
275 mov INT_CTL_MASK, %g4
276 stx %g4, [%g2 + INT_CTL + INT_CTL_DEV_OFF(IOBDEV_SSIERR)]
277 ldx [%g2 + INT_CTL + INT_CTL_DEV_OFF(IOBDEV_SSIERR)], %g0
278 stx %g4, [%g2 + INT_CTL + INT_CTL_DEV_OFF(IOBDEV_SSI)]
279 ldx [%g2 + INT_CTL + INT_CTL_DEV_OFF(IOBDEV_SSI)], %g0
280
281 /*
282 * setup the map registers for the SSI
283 */
284
285 /* SSI Error interrupt */
286 srl %g1, INTRTGT_DEVSHIFT, %g1 ! get dev1 bits in bottom
287 and %g1, INTRTGT_CPUMASK, %g3
288 sllx %g3, INT_MAN_CPU_SHIFT, %g3 ! int_man.cpu
289 or %g3, VECINTR_SSIERR, %g3 ! int_man.vecnum
290 stx %g3, [%g2 + INT_MAN + INT_MAN_DEV_OFF(IOBDEV_SSIERR)]
291
292 /* SSI Interrupt */
293 srl %g1, INTRTGT_DEVSHIFT, %g1 ! get dev2 bits in bottom
294 and %g1, INTRTGT_CPUMASK, %g3
295 sllx %g3, INT_MAN_CPU_SHIFT, %g3 ! int_man.cpu
296 or %g3, VECINTR_FPGA, %g3 ! int_man.vecnum
297 stx %g3, [%g2 + INT_MAN + INT_MAN_DEV_OFF(IOBDEV_SSI)]
298 stx %g0, [%g2 + INT_CTL + INT_CTL_DEV_OFF(IOBDEV_SSI)]
299
300#endif /* CONFIG_FPGA */
301
302 /*
303 * Set J_INT_VEC to target all JBus interrupts to vec# VECINTR_DEV
304 */
305 setx IOBBASE + J_INT_VEC, %l2, %l1
306 mov VECINTR_DEV, %l2
307 stx %l2, [%l1]
308
309 jmp %g7 + 4
310 nop
311 SET_SIZE(setup_iob)
312
313
314/*
315 * JBI_TRANS_TIMEOUT_VALUE - number of JBus clocks transactions must be
316 * completed in
317 *
318 * We need a JBus transaction timeout that's at least as long as Fire's
319 * transaction timeout. The Fire TLU CTO is currently set to 67.1ms.
320 * 80ms seems like a fine value.
321 *
322 * This value is dependent on the vpci_fire.s fire_leaf_init_table entry
323 * for the FIRE_PLC_TLU_CTB_TLR_TLU_CTL register. Changes to either value
324 * may require the other value to change as well.
325 */
326#define JBI_TRANS_TIMEOUT_MS 80
327
328#define JBI_FREQUENCY (200 MHZ) /* assumed */
329#define JBI_NS_PER_CLOCK (NS_PER_S / JBI_FREQUENCY)
330#if JBI_NS_PER_CLOCK == 0
331#error "Invalid JBI_FREQUENCY"
332#endif
333#define JBI_TRANS_TIMEOUT_VALUE \
334 (JBI_TRANS_TIMEOUT_MS * MS_PER_NS / JBI_NS_PER_CLOCK)
335
336/*
337 * setup_jbi - configure JBI global settings
338 *
339 * in:
340 * %i0 - global config pointer
341 * %i1 - base of guests
342 * %i2 - base of cpus
343 * %g7 - return address
344 *
345 * volatile:
346 * %locals
347 */
348 ENTRY_NP(setup_jbi)
349 /*
350 * The JBI transaction timeout (JBI_TRANS_TIMEOUT) must be at
351 * least as long as the Fire transaction timeout (TLU CTO).
352 */
353 setx JBI_TRANS_TIMEOUT, %g2, %g1
354 set JBI_TRANS_TIMEOUT_VALUE, %g2
355 stx %g2, [%g1]
356
357 HVRET
358 SET_SIZE(setup_jbi)
359
360
361/*
362 * Enable JBI error interrupts
363 *
364 * %g1 - errors to be enabled
365 * %g2 - clear SSIERR mask (true/false)
366 * %g3, %g4 - clobbered
367 * %g7 return address
368 */
369 ENTRY(setup_jbi_err_interrupts)
370#ifdef CONFIG_FPGA /* { */
371 /*
372 * Enable All JBUS errors which generate an SSI interrupt
373 */
374 ENABLE_JBI_INTR_ERRS(%g1, %g3, %g4)
375
376 /*
377 * Enable interrupts for SSIERR by clearing the MASK bit
378 */
379
380 brz,a %g2, 1f
381 nop
382
383 setx IOBBASE, %g3, %g4
384 stx %g0, [%g4 + INT_CTL + INT_CTL_DEV_OFF(IOBDEV_SSIERR)]
3851:
386#endif /* } CONFIG_FPGA */
387 HVRET
388 SET_SIZE(setup_jbi_err_interrupts)
389
390#ifdef CONFIG_SVC /* { */
391
392/*
393 * c_svc_register() requires that we have these 2 null functions
394 * declared here.
395 */
396/*
397 * error_svc_rx
398 *
399 * %g1 callback cookie
400 * %g2 svc pointer
401 * %g7 return address
402 */
403 ENTRY(error_svc_rx)
404 /*
405 * Done with this packet
406 */
407 ld [%g2 + SVC_CTRL_STATE], %g5
408 andn %g5, SVC_FLAGS_RI, %g5
409 st %g5, [%g2 + SVC_CTRL_STATE] ! clear RECV pending
410
411 mov %g7, %g6
412 PRINT("error_svc_rx\r\n")
413 mov %g6, %g7
414
415 jmp %g7 + 4
416 nop
417 SET_SIZE(error_svc_rx)
418
419
420/*
421 * cn_svc_tx - error report transmission completion interrupt
422 *
423 * While sram was busy an other error may have occurred. In such case, we
424 * increase the send pkt counter and mark such packet for delivery.
425 * In this function, we check to see if there are any packets to be transmitted.
426 *
427 * We search in the following way:
428 * Look at fire A jbi err buffer
429 * Look at fire A pcie err buffer
430 * Look at fire B jbi err buffer
431 * Look at fire B pcie err buffer
432 * For each strand in NSTRANDS
433 * Look at CE err buffer
434 * Look at UE err buffer
435 *
436 * We only send a packet at a time, and in the previously described order.
437 * Since we are running in the intr completion routing, the svc_internal_send
438 * has already adquire the locks. For such reason, this routing needs to use
439 * send_diag_buf_noblock.
440 *
441 * %g1 callback cookie
442 * %g2 packet
443 * %g7 return address
444 */
445 ENTRY(error_svc_tx)
446 STRAND_PUSH(%g7, %g1, %g2)
447 PRINT("error_svc_tx\r\n")
448
449 VCPU_STRUCT(%g1) /* FIXME: strand */
450 ldx [%g1 + CPU_ROOT], %g1 /* FIXME: CPU2ROOT */
451 stx %g0, [%g1 + CONFIG_SRAM_ERPT_BUF_INUSE] ! clear the inuse flag
452
453 /*
454 * See if we need to send more packets
455 */
456 ldx [%g1 + CONFIG_ERRS_TO_SEND], %g2
457 brz %g2, 4f
458 nop
459
460 PRINT("NEED TO SEND ANOTHER PACKET\r\n")
461#ifdef CONFIG_FIRE
462 /*
463 * search vpci to see if we need to send errors
464 */
465
466 /* Look at fire_a jbi */
467 GUEST_STRUCT(%g1)
468 mov FIRE_A_AID, %g2
469 DEVINST2INDEX(%g1, %g2, %g2, %g3, 4f)
470 DEVINST2COOKIE(%g1, %g2, %g2, %g3, 4f)
471 mov %g2, %g1
472 add %g1, FIRE_COOKIE_JBC_ERPT, %g5
473 add %g5, PCI_UNSENT_PKT, %g2 ! %g2 needed at 2f
474 ldsw [%g5 + PCI_UNSENT_PKT], %g4
475 mov PCIERPT_SIZE - EPKTSIZE, %g3
476 brnz %g4, 2f
477 nop
478
479 /* Look at fire_a pcie */
480 add %g1, FIRE_COOKIE_PCIE_ERPT, %g1
481 add %g1, PCI_UNSENT_PKT, %g2 ! %g2 needed at 2f
482 ldsw [%g1 + PCI_UNSENT_PKT], %g4
483 mov PCIERPT_SIZE - EPKTSIZE, %g3
484 brnz %g4, 2f
485 add %g1, PCI_ERPT_U, %g1
486
487 /* Look at fire_b jbc */
488 GUEST_STRUCT(%g1)
489 mov FIRE_B_AID, %g2
490 DEVINST2INDEX(%g1, %g2, %g2, %g3, 4f)
491 DEVINST2COOKIE(%g1, %g2, %g2, %g3, 4f)
492 mov %g2, %g1
493 add %g1, FIRE_COOKIE_JBC_ERPT, %g5
494 ldsw [%g5 + PCI_UNSENT_PKT], %g4
495 mov PCIERPT_SIZE - EPKTSIZE, %g3
496 cmp %g4, %g0
497 bnz %xcc, 2f
498 nop
499
500 /* Look at fire_b pcie */
501 add %g1, FIRE_COOKIE_PCIE_ERPT, %g1
502 ldsw [%g1 + PCI_UNSENT_PKT], %g4
503 mov PCIERPT_SIZE - EPKTSIZE, %g3
504 cmp %g4, %g0
505 bnz %xcc, 2f
506 add %g1, PCI_ERPT_U, %g1
507#endif /* CONFIG_FIRE */
508
509 /* Now look at the strand erpts */
510
511 STRAND_STRUCT(%g6)
512 ldx [%g6 + STRAND_CONFIGP], %g6
513 ldx [%g6 + CONFIG_STRANDS], %g6
514 set STRAND_SIZE * NSTRANDS, %g5
515 add %g6, %g5, %g5 ! last cpu ptr
516
5171:
518 ! Check in the CE err buf for marked pkt
519 add %g6, STRAND_CE_RPT + STRAND_UNSENT_PKT, %g2
520 mov EVBSC_SIZE, %g3
521 ldx [%g2], %g4
522 cmp %g0, %g4
523 bnz %xcc, 2f
524 add %g6, STRAND_CE_RPT + STRAND_VBSC_ERPT, %g1
525
526 ! Check in the UE err buf for marked pkt
527 set STRAND_UE_RPT + STRAND_UNSENT_PKT, %g2
528 add %g6, %g2, %g2
529 ldx [%g2], %g4
530 set STRAND_UE_RPT + STRAND_VBSC_ERPT, %g3
531 add %g6, %g3, %g1
532 cmp %g0, %g4
533 bnz %xcc, 2f
534 mov EVBSC_SIZE, %g3
535
536 ! %g6 = current strand ptr
5373:
538 set STRAND_SIZE, %g4
539 add %g4, %g6, %g6
540 cmp %g6, %g5 ! new ptr == last ptr?
541 bl %xcc, 1b
542 nop
543
544 ba 4f
545 nop
546
5472:
548 PRINT("FOUND THE PACKAGE TO SEND\r\n")
549 ! We found it. We have all the args in place, so just sent the pkt
550 HVCALL(send_diag_erpt_nolock)
551
552 ! Mark as one less pkt to send
553 STRAND_STRUCT(%g6)
554 ldx [%g6 + STRAND_CONFIGP], %g6 /* config data */
555 add %g6, CONFIG_ERRS_TO_SEND, %g6
556 ldx [%g6], %g1
5570: sub %g1, 1, %g3
558 casx [%g6], %g1, %g3
559 cmp %g1, %g3
560 bne,a,pn %xcc, 0b
561 mov %g3, %g1
562
5634:
564 ! Pop return pc. Done
565 STRAND_POP(%g7, %g1)
566 HVRET
567 SET_SIZE(error_svc_tx)
568
569#endif /* CONFIG_SVC } */
570
571/*
572 * Clear memory sub-system and other error status registers
573 * ready to start overall system
574 *
575 * %g2-%g5 clobbered
576 * %g7 = return address
577 */
578
579 ENTRY(clear_error_status_registers)
580
581 ! clear the l2 esr regs
582 ! XXX need to log the nonzero error status
583 set (NO_L2_BANKS - 1), %g5 ! bank select
5842:
585 setx L2_ESR_BASE, %g2, %g4 ! access the L2 csr
586 sllx %g5, L2_BANK_SHIFT, %g2
587 or %g4, %g2, %g4
588 ldx [%g4], %g3 ! read status
589 stx %g3, [%g4] ! clear status (RW1C)
590 subcc %g5, 1, %g5
591 bge %xcc, 2b
592 nop
593
594 ! clear the DRAM esr regs
595 ! XXX need to log the nonzero error status
596 set (NO_DRAM_BANKS - 1), %g5 ! bank select
5972:
598 setx DRAM_ESR_BASE, %g2, %g4 ! access the dram csr
599 sllx %g5, DRAM_BANK_SHIFT, %g2
600 or %g4, %g2, %g4
601 ldx [%g4], %g3 ! read status
602 stx %g3, [%g4] ! clear status (RW1C)
603 subcc %g5, 1, %g5
604 bge %xcc, 2b
605 nop
606
607 ! clear CEs logged in SPARC ESR also
608 setx SPARC_CE_BITS, %g1, %g2
609 stxa %g2, [%g0]ASI_SPARC_ERR_STATUS
610
611 ! enable all errors, UEs should already be enabled
612 mov (CEEN | NCEEN), %g1
613 stxa %g1, [%g0]ASI_SPARC_ERR_EN
614
615 ! enable L2 errors
616 mov (CEEN | NCEEN), %g1
617 mov 0, %g2
618 BSET_L2_BANK_EEN(%g2, %g1, %g3, %g4)
619 mov 1, %g2
620 BSET_L2_BANK_EEN(%g2, %g1, %g3, %g4)
621 mov 2, %g2
622 BSET_L2_BANK_EEN(%g2, %g1, %g3, %g4)
623 mov 3, %g2
624 BSET_L2_BANK_EEN(%g2, %g1, %g3, %g4)
625
626 HVRET
627
628 SET_SIZE(clear_error_status_registers)
629
630
631
632#ifdef RESETCONFIG_ENABLEHWSCRUBBERS /* { */
633
634/*
635 * Configuration
636 */
637#define DEFAULT_L2_SCRUBINTERVAL 0x100
638#define DEFAULT_DRAM_SCRUBFREQ 0xfff
639
640/*
641 * Helper macros which check if the scrubbers should be enabled, if so
642 * they get enabled with the default scrub rates.
643 */
644#define DRAM_SCRUB_ENABLE(dram_base, bank, reg1, reg2) \
645 .pushlocals ;\
646 set DRAM_CHANNEL_DISABLE_REG + ((bank) * DRAM_BANK_STEP), reg1 ;\
647 ldx [dram_base + reg1], reg1 ;\
648 brnz,pn reg1, 1f ;\
649 nop ;\
650 set DRAM_SCRUB_ENABLE_REG + ((bank) * DRAM_BANK_STEP), reg1 ;\
651 mov DEFAULT_DRAM_SCRUBFREQ, reg2 ;\
652 stx reg2, [dram_base + reg1] ;\
653 set DRAM_SCRUB_ENABLE_REG + ((bank) * DRAM_BANK_STEP), reg1 ;\
654 mov DRAM_SCRUB_ENABLE_REG_ENAB, reg2 ;\
655 stx reg2, [dram_base + reg1] ;\
656 1: .poplocals
657
658#define L2_SCRUB_ENABLE(l2cr_base, bank, reg1, reg2) \
659 .pushlocals ;\
660 set bank << L2_BANK_SHIFT, reg1 ;\
661 ldx [l2cr_base + reg1], reg2 ;\
662 btst L2_SCRUBENABLE, reg2 ;\
663 bnz,pt %xcc, 1f ;\
664 nop ;\
665 set L2_SCRUBINTERVAL_MASK, reg1 ;\
666 andn reg2, reg1, reg2 ;\
667 set DEFAULT_L2_SCRUBINTERVAL, reg1 ;\
668 sllx reg1, L2_SCRUBINTERVAL_SHIFT, reg1 ;\
669 or reg1, L2_SCRUBENABLE, reg1 ;\
670 or reg2, reg1, reg2 ;\
671 set bank << L2_BANK_SHIFT, reg1 ;\
672 stx reg2, [l2cr_base + reg1] ;\
673 1: .poplocals
674
675
676
677 ENTRY(enable_hw_scrubbers)
678 /*
679 * Enable the l2$ scrubber for each of the four l2$ banks
680 */
681 setx L2_CONTROL_REG, %g2, %g1
682 L2_SCRUB_ENABLE(%g1, /* bank */ 0, %g2, %g3)
683 L2_SCRUB_ENABLE(%g1, /* bank */ 1, %g2, %g3)
684 L2_SCRUB_ENABLE(%g1, /* bank */ 2, %g2, %g3)
685 L2_SCRUB_ENABLE(%g1, /* bank */ 3, %g2, %g3)
686
687 /*
688 * Enable the Niagara memory scrubber for each enabled DRAM
689 * bank
690 */
691 setx DRAM_BASE, %g2, %g1
692 DRAM_SCRUB_ENABLE(%g1, /* bank */ 0, %g2, %g3)
693 DRAM_SCRUB_ENABLE(%g1, /* bank */ 1, %g2, %g3)
694 DRAM_SCRUB_ENABLE(%g1, /* bank */ 2, %g2, %g3)
695 DRAM_SCRUB_ENABLE(%g1, /* bank */ 3, %g2, %g3)
696
697 HVRET
698
699 SET_SIZE(enable_hw_scrubbers)
700
701#endif /* } */