Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / common / src / subr.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: subr.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)subr.s 1.30 07/09/11 SMI"
50
51/*
52 * Various support routines
53 */
54
55#include <sys/asm_linkage.h>
56#include <devices/pc16550.h>
57#include <sys/htypes.h>
58#include <hprivregs.h>
59#include <sun4v/asi.h>
60#include <sun4v/queue.h>
61#include <asi.h>
62#include <offsets.h>
63#include <strand.h>
64#include <util.h>
65#include <abort.h>
66#include <debug.h>
67#include <fpga.h>
68
69
70/*
71 * memscrub - zero memory using Niagara blk-init stores
72 * Assumes cache-line alignment and counts
73 *
74 * %g1 address
75 * %g2 length
76 *
77 * Note that the block initializing store only zeros the
78 * whole cacheline if the address is at the start of the
79 * cacheline and the line is not in the L2 cache. Otherwise
80 * the existing cacheline contents are retained other
81 * than the specifically stored value.
82 */
83 ENTRY_NP(memscrub)
84#if defined(CONFIG_FPGA) || defined(T1_FPGA) /* running on real hardware */
85#ifndef T1_FPGA_MEMORY_PREINIT
86 brz %g2, 2f
87 add %g1, %g2, %g2
88 mov ASI_BLK_INIT_P, %asi
891:
90 stxa %g0, [%g1 + 0x00]%asi
91 stxa %g0, [%g1 + 0x08]%asi
92 stxa %g0, [%g1 + 0x10]%asi
93 stxa %g0, [%g1 + 0x18]%asi
94 stxa %g0, [%g1 + 0x20]%asi
95 stxa %g0, [%g1 + 0x28]%asi
96 stxa %g0, [%g1 + 0x30]%asi
97 stxa %g0, [%g1 + 0x38]%asi
98 inc 0x40, %g1
99
100 cmp %g1, %g2
101 blu,pt %xcc, 1b
102 nop
1032:
104 membar #Sync
105#endif /* ifndef T1_FPGA_MEMORY_PREINIT */
106#endif /* if defined(CONFIG_FPGA) || defined(T1_FPGA) */
107 jmp %g7 + 4
108 nop
109 SET_SIZE(memscrub)
110
111
112/*
113 * xcopy - copy xwords
114 * Assumes 8-byte alignment and counts
115 *
116 * %g1 source (clobbered)
117 * %g2 dest (clobbered)
118 * %g3 size (clobbered)
119 * %g4 temp (clobbered)
120 * %g7 return address
121 */
122 ENTRY_NP(xcopy)
123#ifdef CONFIG_LEGIONBCOPY
124 /*
125 * Use a legion magic-trap to do the copy
126 * do alignment test to catch programming errors
127 */
128 or %g1, %g2, %g4
129 or %g4, %g3, %g4
130 btst 7, %g4
131 bnz,pt %xcc, 1f
132 nop
133 ta %xcc, LEGION_MAGICTRAP_PABCOPY
134 brz %g4, 2f ! %g4 == 0 successful
135 nop
1361:
137#endif
138 sub %g1, %g2, %g1
1391:
140 ldx [%g1 + %g2], %g4
141 deccc 8, %g3
142 stx %g4, [%g2]
143 bgu,pt %xcc, 1b
144 inc 8, %g2
145#ifdef CONFIG_LEGIONBCOPY
1462:
147#endif
148 jmp %g7 + 4
149 nop
150 SET_SIZE(xcopy)
151
152
153/*
154 * bcopy - short byte-aligned copies
155 *
156 * %g1 source (clobbered)
157 * %g2 dest (clobbered)
158 * %g3 size (clobbered)
159 * %g4 temp (clobbered)
160 * %g7 return address
161 */
162 ENTRY_NP(bcopy)
163 ! alignment test
164 or %g1, %g2, %g4
165 or %g4, %g3, %g4
166 btst 7, %g4
167 bz,pt %xcc, xcopy
168 nop
169
170#ifdef CONFIG_LEGIONBCOPY
171 /*
172 * Use a legion magic-trap to do the copy
173 */
174 ta %xcc, LEGION_MAGICTRAP_PABCOPY
175 brz %g4, 2f ! %g4 == 0 successful
176 nop
177#endif
178 sub %g1, %g2, %g1
1791:
180 ldub [%g1 + %g2], %g4
181 deccc %g3
182 stb %g4, [%g2]
183 bgu,pt %xcc, 1b
184 inc %g2
185#ifdef CONFIG_LEGIONBCOPY
1862:
187#endif
188 jmp %g7 + 4
189 nop
190 SET_SIZE(bcopy)
191
192
193
194/*
195 * bzero - short byte-aligned zero operations
196 *
197 * NOTE: If we ever need to bzero larger chunks of memory
198 * we need to adapt this to do a more optimized bzero operation
199 *
200 * %g1 dest (clobbered)
201 * %g2 size (clobbered)
202 * %g7 return address
203 */
204
205 ENTRY_NP(bzero)
206
207 SMALL_ZERO_MACRO(%g1, %g2)
208
209 HVRET
210 SET_SIZE(bzero)
211
212
213
214/*
215 * Puts - print a string on the debug uart
216 *
217 * %g1 string (clobbered)
218 * %g7 return address
219 *
220 * %g2-%g4 clobbered
221 */
222 ENTRY_NP(puts)
223
224#ifdef CONFIG_VBSC_SVC
225
226 /*
227 * Check if enough initialization has
228 * taken place to send the message to
229 * the vbsc hypervisor console.
230 */
231 STRAND_STRUCT(%g2)
232 brz,pn %g2, .puts_try_hvuart
233 nop
234 STRAND_PUSH(%g7, %g2, %g3)
235
236 HVCALL(vbsc_puts)
237
238 STRAND_POP(%g7, %g2)
239
240 ba,a .puts_done
241 nop
242
243.puts_try_hvuart:
244
245#endif
246
247#ifdef CONFIG_HVUART
248
249 setx FPGA_UART_BASE, %g3, %g2
2501:
251 ldub [%g2 + LSR_ADDR], %g3
252 btst LSR_THRE, %g3
253 bz 1b
254 nop
255
2561:
257 ldub [%g1], %g3
258 cmp %g3, 0
259 inc %g1
260 bne,a,pt %icc, 2f
261 stb %g3, [%g2]
262 ba,a .puts_done
263 nop
2642:
265 ldub [%g2 + LSR_ADDR], %g3
266 btst LSR_TEMT, %g3
267 bz 2b
268 nop
269 ba,a 1b
270 nop
271#endif
272
273.puts_done:
274
275 jmp %g7 + 4
276 nop
277 SET_SIZE(puts)
278
279
280/*
281 * putx - print a 64-bit xword on the debug uart
282 * %g1 value (clobbered)
283 * %g7 return address
284 *
285 * %g2-%g5 clobbered
286 */
287 ENTRY_NP(putx)
288
289#ifdef CONFIG_VBSC_SVC
290
291 /*
292 * Check if enough initialization has
293 * taken place to send the message to
294 * the vbsc hypervisor console.
295 */
296 STRAND_STRUCT(%g2)
297 brz,pn %g2, .putx_try_hvuart
298 nop
299
300 STRAND_PUSH(%g7, %g2, %g3)
301 STRAND_PUSH(%g6, %g2, %g3)
302
303 HVCALL(vbsc_putx)
304
305 STRAND_POP(%g6, %g2)
306 STRAND_POP(%g7, %g2)
307
308 ba,a .putx_done
309 nop
310
311.putx_try_hvuart:
312
313#endif
314
315#ifdef CONFIG_HVUART
316
317 setx FPGA_UART_BASE, %g3, %g2
3181:
319 ldub [%g2 + LSR_ADDR], %g4
320 btst LSR_THRE, %g4
321 bz 1b
322 nop
323
324 mov 60, %g3
325 ba 2f
326 rd %pc, %g4
327 .ascii "0123456789abcdef"
328 .align 4
3292:
330 add %g4, 4, %g4
3314:
332 srlx %g1, %g3, %g5
333 andcc %g5, 0xf, %g5
334 bne %xcc, 3f
335 nop
336 subcc %g3, 4, %g3
337 bne %xcc, 4b
338 nop
339 ! fall thru
3401:
341 srlx %g1, %g3, %g5
342 and %g5, 0xf, %g5
3433:
344 ldub [%g4 + %g5], %g5
345 stb %g5, [%g2]
346 subcc %g3, 4, %g3
347 bge 2f
348 nop
349
350 ba,a .putx_done
351 nop
3522:
353 ldub [%g2 + LSR_ADDR], %g5
354 btst LSR_TEMT, %g5
355 bz 2b
356 nop
357 ba,a 1b
358 nop
359#endif
360
361.putx_done:
362
363 jmp %g7 + 4
364 nop
365 SET_SIZE(putx)
366
367
368
369#ifdef CONFIG_HVUART
370/*
371 * uart_init - initialize the debug uart
372 * Supports only 16550 UART
373 *
374 * %g1 is UART base address
375 * %g2,%g3 clobbered
376 * %g7 return address
377 */
378 ENTRY_NP(uart_init)
379 ldub [%g1 + LSR_ADDR], %g2 ! read LSR
380 stb %g0, [%g1 + IER_ADDR] ! clear IER
381 stb %g0, [%g1 + FCR_ADDR] ! clear FCR, disable FIFO
382 mov (FCR_XMIT_RESET | FCR_RCVR_RESET), %g3
383 stb %g3, [%g1 + FCR_ADDR] ! reset FIFOs in FCR
384 mov FCR_FIFO_ENABLE, %g3
385 stb %g3, [%g1 + FCR_ADDR] ! FCR enable FIFO
386 mov (LCR_DLAB | LCR_8N1), %g3
387 stb %g3, [%g1 + LCR_ADDR] ! set LCR for 8-n-1, set DLAB
388 ! DLAB = 1
389 mov DLL_9600, %g3
390#ifdef UART_CLOCK_MULTIPLIER
391 mulx %g3, UART_CLOCK_MULTIPLIER, %g3
392#endif
393 stb %g3, [%g1 + DLL_ADDR] ! set baud rate = 9600
394 stb %g0, [%g1 + DLM_ADDR] ! set MS = 0
395 ! disable DLAB
396 mov LCR_8N1, %g3 ! set LCR for 8-n-1, unset DLAB
397 jmp %g7 + 4
398 stb %g3, [%g1 + LCR_ADDR] ! set LCR for 8-n-1, unset DLAB
399 SET_SIZE(uart_init)
400#endif /* CONFIG_HVUART */
401
402
403
404/*
405 * These routines are called from softtrap handlers.
406 *
407 * We do this so that debug printing does not trample all over
408 * the registers you are using.
409 */
410 ENTRY_NP(hprint)
411 mov %o0, %g1
412 ba puts
413 rd %pc, %g7
414 done
415 SET_SIZE(hprint)
416
417 ENTRY_NP(hprintx)
418 mov %o0, %g1
419 ba putx
420 rd %pc, %g7
421 done
422 SET_SIZE(hprintx)
423
424
425/*
426 * Save the state of a virtual cpu into a save area
427 *
428 * FIXME: To be done:
429 * * Move this code into platform specific area - or at least a
430 * portion of it. Platform specific CPUs may have additional state
431 * that requires saving.
432 *
433 * * Clobber tick interrupts - don't care about tick_cmr register.
434 *
435 * * Handle stick interrupts .. a stick interrupt may have retired while
436 * state was saved - in which case we must manually create the
437 * interrupt in softint.
438 *
439 * * Save and restore the floating point registers .. dont forget to
440 * to look and see if pstate.pef etc are enabled ...
441 * ... capture any deferred traps if there are any on a given cpu.
442 *
443 * * Save and restore graphics status.
444 *
445 * * Save and restore the 4v queue registers.
446 *
447 * * Fix to save all the G's and trap stack registers from tl=0 to maxptl
448 *
449 * clobbers: Everything - returns back with TL & GL=0 and
450 * a clean slate.
451 */
452 ENTRY_NP(vcpu_state_save)
453
454 VCPU_STRUCT(%g6)
455
456 ! save vcpu state
457 set CPU_STATE_SAVE_AREA, %g1
458 add %g6, %g1, %g1
459
460 !! %g1 = vcpu save area
461
462 rdpr %tl, %g3
463 stx %g3, [%g1 + VS_TL]
464
465 ! First step - save trap stack and registers
466 add %g1, VS_TRAPSTACK, %g2
4671:
468 wrpr %g3, 0, %tl
469 brz,pn %g3, 2f
470 nop
471 sub %g3, 1, %g3
472 mulx %g3, VCPUTRAPSTATE_SIZE, %g4
473 add %g4, %g2, %g4
474
475 rdpr %tpc, %g5
476 stx %g5, [%g4 + VCTS_TPC]
477 rdpr %tnpc, %g5
478 stx %g5, [%g4 + VCTS_TNPC]
479 rdpr %tstate, %g5
480 stx %g5, [%g4 + VCTS_TSTATE]
481 rdpr %tt, %g5
482 stx %g5, [%g4 + VCTS_TT]
483 rdhpr %htstate, %g5
484 stx %g5, [%g4 + VCTS_HTSTATE]
485 ba,pt %xcc, 1b
486 nop
4872:
488 ldx [%g1 + VS_TL], %g4
489 wrpr %g4, %tl !??!!
490
491 ! Save the misc state
492 rdpr %tba, %g2
493 stx %g2, [%g1 + VS_TBA]
494 rd %y, %g2
495 stx %g2, [%g1 + VS_Y]
496 rd %asi, %g2
497 stx %g2, [%g1 + VS_ASI]
498#if 0 /* { FIXME: workaround fp-diabled trap */
499 rd %gsr, %g2
500 stx %g2, [%g1 + VS_GSR]
501#endif /* } */
502 rdpr %pil, %g2
503 stx %g2, [%g1 + VS_PIL]
504
505 ! Timer state
506 rd %tick, %g2
507 stx %g2, [%g1 + VS_TICK]
508 rd STICK, %g2
509 stx %g2, [%g1 + VS_STICK]
510 rd STICKCMP, %g2
511 stx %g2, [%g1 + VS_STICKCOMPARE]
512
513 ! IMPORTANT: We save softint last just incase a tick compare
514 ! got triggered between when we saved stick and stick compare
515 rd %softint, %g2
516 stx %g2, [%g1 + VS_SOFTINT]
517
518 ! Save scratchpads
519#define STORESCRATCH(regnum) \
520 mov ((regnum) * 8), %g3 ;\
521 ldxa [%g3]ASI_SCRATCHPAD, %g2 ;\
522 stx %g2, [%g1 + VS_SCRATCHPAD + ((regnum) * 8)]
523
524 STORESCRATCH(0)
525 STORESCRATCH(1)
526 STORESCRATCH(2)
527 STORESCRATCH(3)
528 ! scratchpads 4 & 5 dont exist for a Niagara
529 STORESCRATCH(6)
530 STORESCRATCH(7)
531
532#undef STORESCRATCH
533
534
535 /*
536 * NOTE: FIXME saving and restoring the Q registers is postoned until
537 * we actually want to context switch. The reason is simply that
538 * the current LDC and x-call code deliver their mondos by
539 * manipulating the head and tail registers of the local strand
540 * and if we end up sending a message to ourselves (say in the
541 * hvctl code) then we end up restoring the old Q values and not
542 * the updated ones.
543 * So for now the Q values stay on the chip until the mondo
544 * delivery schemes (LDC x-call etc.) have been modified
545 * accordingly.
546 */
547
548#if 0 /* { FIXME: */
549 ! Save the queue registers
550
551 ! Now we restore the queue registers
552#define STOREQ(_name) \
553 set _name/**/_QUEUE_HEAD, %g2 ;\
554 ldxa [%g2]ASI_QUEUE, %g2 ;\
555 sth %g2, [%g1 + VS_/**/_name/**/_HEAD] ;\
556 set _name/**/_QUEUE_TAIL, %g2 ;\
557 ldxa [%g2]ASI_QUEUE, %g2 ;\
558 sth %g2, [%g1 + VS_/**/_name/**/_TAIL]
559
560 STOREQ(CPU_MONDO)
561 STOREQ(DEV_MONDO)
562 STOREQ(ERROR_RESUMABLE)
563 STOREQ(ERROR_NONRESUMABLE)
564
565#undef STOREQ
566#endif /* } */
567
568 ! Save the window state
569 rdpr %wstate, %g2
570 stx %g2, [%g1 + VS_WSTATE]
571 rdpr %cansave, %g2
572 stx %g2, [%g1 + VS_CANSAVE]
573 rdpr %canrestore, %g2
574 stx %g2, [%g1 + VS_CANRESTORE]
575 rdpr %otherwin, %g2
576 stx %g2, [%g1 + VS_OTHERWIN]
577 rdpr %cleanwin, %g2
578 stx %g2, [%g1 + VS_CLEANWIN]
579
580 rdpr %cwp, %g2
581 stx %g2, [%g1 + VS_CWP]
582
583 ! Save the windows
584
585 add %g1, VS_WINS, %g3
586 mov 0, %g4
5871: wrpr %g4, %cwp
588 stx %i0, [%g3 + (0 * 8)]
589 stx %i1, [%g3 + (1 * 8)]
590 stx %i2, [%g3 + (2 * 8)]
591 stx %i3, [%g3 + (3 * 8)]
592 stx %i4, [%g3 + (4 * 8)]
593 stx %i5, [%g3 + (5 * 8)]
594 stx %i6, [%g3 + (6 * 8)]
595 stx %i7, [%g3 + (7 * 8)]
596 stx %l0, [%g3 + (8 * 8)]
597 stx %l1, [%g3 + (9 * 8)]
598 stx %l2, [%g3 + (10 * 8)]
599 stx %l3, [%g3 + (11 * 8)]
600 stx %l4, [%g3 + (12 * 8)]
601 stx %l5, [%g3 + (13 * 8)]
602 stx %l6, [%g3 + (14 * 8)]
603 stx %l7, [%g3 + (15 * 8)]
604 add %g3, RWINDOW_SIZE, %g3
605 inc %g4
606 cmp %g4, NWINDOWS
607 bne,pt %xcc, 1b
608 nop
609
610 ! restore %cwp
611 ldx [%g1 + VS_CWP], %g2
612 wrpr %g2, %cwp
613
614 mov %g7, %l1 ! preserve callers return address
615
616 rdpr %gl, %l2 ! preserve original %gl
617 stx %l2, [%g1+VS_GL]
618
619 ! Stash all the globals except the current ones
620 add %g1, VS_GLOBALS, %l4
621 mov 0, %l3
6221:
623 wrpr %l3, %gl
624 cmp %l3, %l2
625 be,pn %xcc, 2f
626 nop
627 stx %g1, [%l4 + (0 * 8)]
628 stx %g2, [%l4 + (1 * 8)]
629 stx %g3, [%l4 + (2 * 8)]
630 stx %g4, [%l4 + (3 * 8)]
631 stx %g5, [%l4 + (4 * 8)]
632 stx %g6, [%l4 + (5 * 8)]
633 stx %g7, [%l4 + (6 * 8)]
634 inc %l3
635 add %l4, VCPU_GLOBALS_SIZE, %l4
636 ba,pt %xcc, 1b
637 nop
6382:
639
640 wrpr %g0, %gl
641 wrpr %g0, %tl
642
643 ! Return to the caller
644 mov %l1, %g7
645 HVRET
646 SET_SIZE(vcpu_state_save)
647
648
649
650/*
651 * Restore guest partition from save area
652 *
653 * clobbers: Everything ..
654 *
655 * We retore in the reverse order to the save, and
656 * We return back to the caller using the address in %g7
657 * This function changes gl and tl back to the values
658 * stored in the vcpus save area.
659 *
660 * We enter with the pointer to the vcpu to be restored
661 * in the vcpu hscratch register - the assumption is that that
662 * has already been set correctly.
663 */
664 ENTRY_NP(vcpu_state_restore)
665
666 VCPU_STRUCT(%l0)
667 set CPU_STATE_SAVE_AREA, %l1
668 add %l0, %l1, %l1
669 mov %g7, %l7
670
671 !! %l0 = vcpu
672 !! %l1 = vcpu save area
673 !! %l7 = return address
674
675 ! Restore all the globals up to but NOT including the save GL
676 mov 0, %l3
677 add %l1, VS_GLOBALS, %l4
678 ldx [%l1 + VS_GL], %l2
6791:
680 wrpr %l3, %gl
681 cmp %l3, %l2
682 be,pn %xcc, 2f
683 nop
684 ldx [%l4 + (0 * 8)], %g1
685 ldx [%l4 + (1 * 8)], %g2
686 ldx [%l4 + (2 * 8)], %g3
687 ldx [%l4 + (3 * 8)], %g4
688 ldx [%l4 + (4 * 8)], %g5
689 ldx [%l4 + (5 * 8)], %g6
690 ldx [%l4 + (6 * 8)], %g7
691 inc %l3
692 add %l4, VCPU_GLOBALS_SIZE, %l4
693 ba,pt %xcc, 1b
694 nop
6952:
696
697 ! We land here with the globals restored
698 ! and gl set to the hypervisors Gs above
699 ! the vcpu context - move all the register
700 ! values from locals back to Gs.
701
702 mov %l7, %g7 ! return address
703 mov %l0, %g6 ! vcpu struct
704 mov %l1, %g1 ! vcpu struct save area
705
706 ! Now restore all the register windows
707
708 add %g1, VS_WINS, %g3
709 mov 0, %g4
7101: wrpr %g4, %cwp
711 ldx [%g3 + (0 * 8)], %i0
712 ldx [%g3 + (1 * 8)], %i1
713 ldx [%g3 + (2 * 8)], %i2
714 ldx [%g3 + (3 * 8)], %i3
715 ldx [%g3 + (4 * 8)], %i4
716 ldx [%g3 + (5 * 8)], %i5
717 ldx [%g3 + (6 * 8)], %i6
718 ldx [%g3 + (7 * 8)], %i7
719 ldx [%g3 + (8 * 8)], %l0
720 ldx [%g3 + (9 * 8)], %l1
721 ldx [%g3 + (10 * 8)], %l2
722 ldx [%g3 + (11 * 8)], %l3
723 ldx [%g3 + (12 * 8)], %l4
724 ldx [%g3 + (13 * 8)], %l5
725 ldx [%g3 + (14 * 8)], %l6
726 ldx [%g3 + (15 * 8)], %l7
727 add %g3, RWINDOW_SIZE, %g3
728 inc %g4
729 cmp %g4, NWINDOWS
730 bne,pt %xcc, 1b
731 nop
732
733 ! restore the window management registers
734 ldx [%g1 + VS_CWP], %g2
735 wrpr %g2, %cwp
736
737 ldx [%g1 + VS_CLEANWIN], %g2
738 wrpr %g2, %cleanwin
739 ldx [%g1 + VS_OTHERWIN], %g2
740 wrpr %g2, %otherwin
741 ldx [%g1 + VS_CANRESTORE], %g2
742 wrpr %g2, %canrestore
743 ldx [%g1 + VS_CANSAVE], %g2
744 wrpr %g2, %cansave
745 ldx [%g1 + VS_WSTATE], %g2
746 wrpr %g2, %wstate
747
748#if 0 /* { FIXME: See note in state_save about Q registers */
749
750 ! Now we restore the queue registers
751#define RESTOREQ(_name) \
752 lduh [%g1 + VS_/**/_name/**/_HEAD], %g2 ;\
753 set _name/**/_QUEUE_HEAD, %g3 ;\
754 stxa %g2, [%g3]ASI_QUEUE ;\
755 lduh [%g1 + VS_/**/_name/**/_TAIL], %g2 ;\
756 set _name/**/_QUEUE_TAIL, %g3 ;\
757 stxa %g2, [%g3]ASI_QUEUE
758
759 RESTOREQ(CPU_MONDO)
760 RESTOREQ(DEV_MONDO)
761 RESTOREQ(ERROR_RESUMABLE)
762 RESTOREQ(ERROR_NONRESUMABLE)
763
764#undef RESTOREQ
765#endif /* } */
766
767 ! Restore the scratchpads
768#define RESTORESCRATCH(regnum) \
769 ldx [%g1 + VS_SCRATCHPAD + ((regnum) * 8)], %g2 ;\
770 mov ((regnum) * 8), %g3 ;\
771 stxa %g2, [%g3]ASI_SCRATCHPAD
772
773 RESTORESCRATCH(0)
774 RESTORESCRATCH(1)
775 RESTORESCRATCH(2)
776 RESTORESCRATCH(3)
777 ! scratchpads 4 & 5 dont exist for a Niagara
778 RESTORESCRATCH(6)
779 RESTORESCRATCH(7)
780
781#undef RESTORESCRATCH
782
783 ! Restore the misc state
784 ldx [%g1 + VS_TBA], %g2
785 wrpr %g2, %tba
786 ldx [%g1 + VS_Y], %g2
787 wr %g2, %y
788 ldx [%g1 + VS_ASI], %g2
789 wr %g2, %asi
790#if 0 /* { FIXME: workaround fp disabled trap */
791 ldx [%g1 + VS_GSR], %g2
792 wr %g2, %gsr
793#endif /* } */
794 ldx [%g1 + VS_SOFTINT], %g2
795 wr %g2, %softint
796 ldx [%g1 + VS_PIL], %g2
797 wrpr %g2, %pil
798
799 ! Timer state
800 ldx [%g1 + VS_STICKCOMPARE], %g2 ! FIXME: check me
801 wr %g2, STICKCMP
802
803 ! restoration has side effects ... if stick has passed stick compare
804 ! since we saved, then we manually set the softint bit, since
805 ! the HW will have missed the event while the vcpu was
806 ! descheduled.
807 !
808 ! NOTE softint has to already be setup first
809 !
810 ! NOTE stick compare is already setup so we get a match while were
811 ! fiddling with this, the HW will set the match bit for us
812 !
813 ! We ignore tick_cmpr since it's not part of the sun4v
814 ! architecture
815
816 ! If stick cmp int_dis is 1 then stick interrupts are
817 ! disabled, so no further action is necessary
818 brlz %g2, 1f ! int_dis is bit 63 (sign bit)
819 nop
820
821 ! Nothing to do if stick had already passed stick_cmpr
822 ldx [%g1 + VS_STICK], %g3
823 sllx %g3, 1, %g3
824 srlx %g3, 1, %g3 ! ignore the npt bit
825 cmp %g3, %g2
826 bg,pt %xcc, 1f
827 nop
828
829 ! Nothing to do if stick hasn't reached stick_cmpr yet
830 rd STICK, %g3
831 sllx %g3, 1 ,%g3
832 srlx %g3, 1 ,%g3
833 cmp %g3, %g2
834 bl,pt %xcc, 1f
835 nop
836
837 ! Set bit 16 in softint
838 sethi %hi(SOFTINT_SM_BIT), %g4
839 wr %g4, SOFTINT_SET
8401:
841
842 ! Now we restore the trapstack back up to TL
843
844 mov 0, %g4
845 ldx [%g1 + VS_TL], %g2
846 brz,pt %g2, 2f
847 nop
848 add %g1, VS_TRAPSTACK, %g3
8491:
850 add %g4, 1, %g4
851 wrpr %g4, %tl
852 ldx [%g3 + VCTS_TPC], %g5
853 wrpr %g5, %tpc
854 ldx [%g3 + VCTS_TNPC], %g5
855 wrpr %g5, %tnpc
856 ldx [%g3 + VCTS_TSTATE], %g5
857 wrpr %g5, %tstate
858 ldx [%g3 + VCTS_TT], %g5
859 wrpr %g5, %tt
860 ldx [%g3 + VCTS_HTSTATE], %g5
861 wrhpr %g5, %htstate
862 add %g3, VCPUTRAPSTATE_SIZE, %g3
863 cmp %g4, %g2
864 bne,pt %xcc, 1b
865 nop
8662:
867
868 ldx [%g1 + VS_TL], %g4
869 wrpr %g4, %tl
870
871 !wait for changes to take effect
872 membar #Sync
873
874 HVRET
875 SET_SIZE(vcpu_state_restore)
876
877
878/*
879 * Print contents of important registers.
880 */
881 ENTRY_NP(dump_regs)
882 mov %g7, %g6
883 PRINT("tl 0x"); rdpr %tl, %g1; PRINTX(%g1)
884 PRINT(" gl 0x"); rdpr %gl, %g1; PRINTX(%g1)
885 PRINT(" tt 0x"); rdpr %tt, %g1; PRINTX(%g1)
886 PRINT(" tpc 0x"); rdpr %tpc, %g1; PRINTX(%g1)
887 PRINT(" tnpc 0x"); rdpr %tnpc, %g1; PRINTX(%g1)
888 PRINT(" tstate 0x"); rdpr %tstate, %g1; PRINTX(%g1)
889 PRINT(" htstate 0x"); rdhpr %htstate, %g1; PRINTX(%g1)
890 PRINT("\r\n");
891 PRINT(" wstate 0x"); rdpr %wstate, %g1; PRINTX(%g1)
892 PRINT(" cansave 0x"); rdpr %cansave, %g1; PRINTX(%g1)
893 PRINT(" canrestore 0x");rdpr %canrestore, %g1;PRINTX(%g1)
894 PRINT(" otherwin 0x"); rdpr %otherwin, %g1; PRINTX(%g1)
895 PRINT(" cleanwin 0x"); rdpr %cleanwin, %g1; PRINTX(%g1)
896 PRINT(" cwp 0x"); rdpr %cwp, %g1; PRINTX(%g1)
897 PRINT("\r\n");
898 PRINT(" tba 0x"); rdpr %tba, %g1; PRINTX(%g1)
899 PRINT(" y 0x"); rd %y, %g1; PRINTX(%g1)
900 PRINT(" asi 0x"); rd %asi, %g1; PRINTX(%g1)
901#if 0 /* { FIXME: work around fp disabled trap*/
902 PRINT(" gsr 0x"); rd %gsr, %g1; PRINTX(%g1)
903#endif /* } */
904 PRINT(" pil 0x"); rdpr %pil, %g1; PRINTX(%g1)
905 PRINT(" stickcmp 0x"); rd STICKCMP, %g1; PRINTX(%g1)
906 PRINT(" softint 0x"); rd %softint, %g1; PRINTX(%g1)
907 PRINT("\r\n");
908 PRINT(" sc0 0x"); mov (0*8),%g1;ldxa [%g1]ASI_SCRATCHPAD,%g1;PRINTX(%g1)
909 PRINT(" sc1 0x"); mov (1*8),%g1;ldxa [%g1]ASI_SCRATCHPAD,%g1;PRINTX(%g1)
910 PRINT(" sc2 0x"); mov (2*8),%g1;ldxa [%g1]ASI_SCRATCHPAD,%g1;PRINTX(%g1)
911 PRINT(" sc3 0x"); mov (3*8),%g1;ldxa [%g1]ASI_SCRATCHPAD,%g1;PRINTX(%g1)
912 PRINT(" sc6 0x"); mov (6*8),%g1;ldxa [%g1]ASI_SCRATCHPAD,%g1;PRINTX(%g1)
913 PRINT(" sc7 0x"); mov (7*8),%g1;ldxa [%g1]ASI_SCRATCHPAD,%g1;PRINTX(%g1)
914
915 rdpr %cwp, %g3 ! preserve
916 mov 0, %g2
9171:
918 PRINT("Window 0x"); PRINTX(%g2); PRINT("\r\n");
919 wrpr %g2, %cwp
920 PRINT("i0 0x"); PRINTX(%i0); PRINT(" ")
921 PRINT("i1 0x"); PRINTX(%i1); PRINT(" ")
922 PRINT("i2 0x"); PRINTX(%i2); PRINT(" ")
923 PRINT("i3 0x"); PRINTX(%i3); PRINT(" ")
924 PRINT("i4 0x"); PRINTX(%i4); PRINT(" ")
925 PRINT("i5 0x"); PRINTX(%i5); PRINT(" ")
926 PRINT("i6 0x"); PRINTX(%i6); PRINT(" ")
927 PRINT("i7 0x"); PRINTX(%i7); PRINT(" ")
928 PRINT("\r\n");
929
930 PRINT("l0 0x"); PRINTX(%l0); PRINT(" ")
931 PRINT("l1 0x"); PRINTX(%l1); PRINT(" ")
932 PRINT("l2 0x"); PRINTX(%l2); PRINT(" ")
933 PRINT("l3 0x"); PRINTX(%l3); PRINT(" ")
934 PRINT("l4 0x"); PRINTX(%l4); PRINT(" ")
935 PRINT("l5 0x"); PRINTX(%l5); PRINT(" ")
936 PRINT("l6 0x"); PRINTX(%l6); PRINT(" ")
937 PRINT("l7 0x"); PRINTX(%l7); PRINT(" ")
938 PRINT("\r\n");
939 inc %g2
940 cmp %g2, 8
941 bne,pt %xcc, 1b
942 nop
943
944 wrpr %g3, %cwp
945
946 mov %g6, %g7
947 HVRET
948 SET_SIZE(dump_regs)
949
950
951 /*
952 * spinlock_enter(uint64_t *lock)
953 * For calling from C code. In asm code use the SPINLOCK_ENTER macro.
954 */
955 ENTRY_NP(spinlock_enter)
956 STRAND_STRUCT(%o1)
957 ldub [%o1 + STRAND_ID], %o2
958 inc %o2
9591: mov %o2, %o1
960 casx [%o0], %g0, %o1
961 brnz,pn %o1, 1b
962 nop
963 MEMBAR_ENTER
964 retl
965 nop
966 SET_SIZE(spinlock_enter)
967
968
969 /*
970 * spinlock_exit(uint64_t *lock)
971 * For calling from C code. In asm code use the SPINLOCK_EXIT macro.
972 */
973 ENTRY_NP(spinlock_exit)
974 MEMBAR_EXIT
975 stx %g0, [%o0]
976 retl
977 nop
978 SET_SIZE(spinlock_exit)
979
980
981 /*
982 * Get the stick value from the current strand
983 */
984 ENTRY(c_get_stick)
985 retl
986 rd STICK, %o0
987 SET_SIZE(c_get_stick)