Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / common / src / hv_common_cmds.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: hv_common_cmds.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)hv_common_cmds.s 1.3 07/05/03 SMI"
50
51/*
52 * Niagara-family common startup code
53 */
54
55#include <sys/asm_linkage.h>
56#include <sys/stack.h> /* For C environ : FIXME */
57#include <sys/htypes.h>
58#include <hprivregs.h>
59#include <sun4v/traps.h>
60#include <sparcv9/asi.h>
61#include <sun4v/asi.h>
62#include <asi.h>
63#include <hypervisor.h>
64#include <guest.h>
65#include <errs_common.h>
66#include <offsets.h>
67#include <vcpu.h>
68#include <sun4v/errs_defs.h>
69#include <debug.h>
70#include <util.h>
71#include <abort.h>
72#include <hvctl.h>
73
74
75/*
76 * This function handles the receipt of a command mondo from another
77 * strand. Should only get here if we know that a mondo is waiting.
78 */
79 ENTRY(handle_hvmondo)
80
81 /* Collect the pending mondo */
82 HVCALL(hvmondo_recv)
83 mov %g1, %g2 ! ptr to mondo
84 STRAND_STRUCT(%g1)
85
86 ldx [%g2 + HVM_CMD], %g3
87 cmp %g3, HXCMD_SCHED_VCPU
88 be,pn %xcc, schedule_vcpu_cmd
89 cmp %g3, HXCMD_DESCHED_VCPU
90 be,pn %xcc, deschedule_vcpu_cmd
91 cmp %g3, HXCMD_STOP_VCPU
92 be,pn %xcc, stop_vcpu_cmd
93 cmp %g3, HXCMD_GUEST_SHUTDOWN
94 be,pt %xcc, shutdown_guest_cmd
95 cmp %g3, HXCMD_GUEST_PANIC
96 be,pt %xcc, panic_guest_cmd
97 cmp %g3, HXCMD_STOP_GUEST
98 be,pt %xcc, stop_guest_cmd
99 nop
100
101 HVABORT(-1, "Unknown HV mondo command")
102
103 SET_SIZE(handle_hvmondo)
104
105/*
106 * shutdown_guest_cmd
107 *
108 * Handler for the HV xcall to send a shutdown request to
109 * a particular vcpu. This does not return to the caller.
110 *
111 * %g1 = ptr to strand
112 * %g2 = ptr to command mondo
113 */
114 ENTRY_NP(shutdown_guest_cmd)
115
116#ifdef DEBUG
117 PRINT("shutdown_guest_cmd: strand=0x")
118 ldub [%g1 + STRAND_ID], %g3
119 PRINTX(%g3)
120 PRINT("\r\n")
121#endif /* DEBUG */
122
123 /*
124 * Processing of the shutdown request will occur
125 * on the vcpu specified in the mondo structure.
126 *
127 * By bypassing the strand work wheel and restoring
128 * the vcpu state directly, this assumes a one to
129 * one mapping between strands and vcpus. This will
130 * have to be fixed once multiple vcpus can be
131 * scheduled on the same strand.
132 */
133 ldx [%g2 + HVM_ARGS + HVM_GUESTCMD_VCPUP], %g3
134 SET_VCPU_STRUCT(%g3, %g4)
135
136 /*
137 * Restore the vcpu state. Since this will result
138 * in the loss of current register state, save the
139 * mondo arg into the strand structure.
140 */
141 STRAND_PUSH(%g2, %g3, %g4)
142 HVCALL(vcpu_state_restore)
143 STRAND_POP(%g2, %g3)
144
145 ! load the grace period timeout
146 ldx [%g2 + HVM_ARGS + HVM_GUESTCMD_ARG], %g1
147 HVCALL(guest_shutdown)
148 retry
149
150 SET_SIZE(shutdown_guest_cmd)
151
152
153/*
154 * panic_guest_cmd
155 *
156 * Handler for the HV xcall to send a panic request to a
157 * particular vcpu. This does not return to the caller.
158 *
159 * %g1 = ptr to strand
160 * %g2 = ptr to command mondo
161 */
162 ENTRY_NP(panic_guest_cmd)
163
164#ifdef DEBUG
165 PRINT("panic_guest_cmd: strand=0x")
166 ldub [%g1 + STRAND_ID], %g3
167 PRINTX(%g3)
168 PRINT("\r\n")
169#endif /* DEBUG */
170
171 /*
172 * Processing of the panic request will occur on
173 * the vcpu specified in the mondo structure.
174 *
175 * By bypassing the strand work wheel and restoring
176 * the vcpu state directly, this assumes a one to
177 * one mapping between strands and vcpus. This will
178 * have to be fixed once multiple vcpus can be
179 * scheduled on the same strand.
180 */
181 ldx [%g2 + HVM_ARGS + HVM_SCHED_VCPUP], %g3
182
183 ! restore the vcpu state
184 SET_VCPU_STRUCT(%g3, %g4)
185 HVCALL(vcpu_state_restore)
186
187 HVCALL(guest_panic)
188
189 /*
190 * Send the trap to the guest. Clear the pending
191 * flag on the way out.
192 */
193 STRAND_STRUCT(%g1)
194 set STRAND_NRPENDING, %g2
195 ba nonresumable_error_trap
196 stx %g0, [%g1 + %g2]
197
198 /*NOTREACHED*/
199 SET_SIZE(panic_guest_cmd)
200
201
202/*
203 * guest_shutdown
204 *
205 * Queue up a resumable error packet on the current vcpu,
206 * requesting that the guest perform an orderly shutdown.
207 *
208 * %g1 = grace period for shutdown (seconds)
209 */
210 ENTRY_NP(guest_shutdown)
211
212 ba,pt %xcc, 1f
213 rd %pc, %g2
214 .align 8
215 .skip Q_EL_SIZE
2161: inc 4 + (8 - 1), %g2
217 andn %g2, 0x7, %g2 ! align
218
219 !! %g2 = sun4v erpt buffer
220
221 /*
222 * Fill in the only additional data in this erpt,
223 * the grace period before shutdown (seconds).
224 */
225 sth %g1, [%g2 + ESUN4V_G_SECS]
226
227 /*
228 * Fill in the generic parts of the erpt.
229 */
230 GEN_SEQ_NUMBER(%g4, %g5)
231 stx %g4, [%g2 + ESUN4V_G_EHDL]
232
233 rd STICK, %g4
234 stx %g4, [%g2 + ESUN4V_G_STICK]
235
236 set EDESC_WARN_RESUMABLE, %g4
237 stw %g4, [%g2 + ESUN4V_EDESC]
238
239 set (ERR_ATTR_MODE(ERR_MODE_UNKNOWN) | EATTR_SECS), %g4
240 stw %g4, [%g2 + ESUN4V_ATTR]
241
242 stx %g0, [%g2 + ESUN4V_ADDR]
243 stw %g0, [%g2 + ESUN4V_SZ]
244 sth %g0, [%g2 + ESUN4V_G_CPUID]
245
246 STRAND_PUSH(%g7, %g3, %g4)
247 HVCALL(queue_resumable_erpt)
248 STRAND_POP(%g7, %g3)
249
250 HVRET
251 SET_SIZE(guest_shutdown)
252
253
254/*
255 * guest_panic
256 *
257 * Queue up a non-resumable error packet on the current
258 * vcpu, requesting that the guest panic immediately.
259 */
260 ENTRY_NP(guest_panic)
261
262 ba,pt %xcc, 1f
263 rd %pc, %g2
264 .align 8
265 .skip Q_EL_SIZE
2661: inc 4 + (8 - 1), %g2
267 andn %g2, 0x7, %g2 ! align
268
269 !! %g2 = sun4v erpt buffer
270
271 /*
272 * Fill in the generic parts of the erpt
273 */
274 GEN_SEQ_NUMBER(%g4, %g5)
275 stx %g4, [%g2 + ESUN4V_G_EHDL]
276
277 rd STICK, %g4
278 stx %g4, [%g2 + ESUN4V_G_STICK]
279
280 set EDESC_FORCED_PANIC, %g4
281 stw %g4, [%g2 + ESUN4V_EDESC]
282
283 set (ERR_ATTR_MODE(ERR_MODE_UNKNOWN)), %g4
284 stw %g4, [%g2 + ESUN4V_ATTR]
285
286 stx %g0, [%g2 + ESUN4V_ADDR]
287 stw %g0, [%g2 + ESUN4V_SZ]
288 sth %g0, [%g2 + ESUN4V_G_CPUID]
289
290 STRAND_PUSH(%g7, %g3, %g4)
291 HVCALL(queue_nonresumable_erpt)
292 STRAND_POP(%g7, %g3)
293
294 /*
295 * Set the pending flag used when the vbsc_rx
296 * handler calls this directly, i.e. when a HV
297 * xcall is not required.
298 */
299 STRAND_STRUCT(%g1)
300 set STRAND_NRPENDING, %g3
301 mov 1, %g4
302 stx %g4, [%g1 + %g3]
303
304 HVRET
305 SET_SIZE(guest_panic)
306
307
308/*
309 * We get here as a HV mondo X-call
310 * Our working assumption is that the currently running vcpu state
311 * (if any) has been stashed back into its vcpu structure, and so at
312 * the end of this operation we bail back to start_work.
313 *
314 * The argument in the strand x-call mail box is the pointer
315 * to the vcpu to schedule. Plus a set of optional parameters
316 * to setup in the vcpu struct. FIXME: these parameters should
317 * be being set by the sender not us...
318 *
319 * %g1 = ptr to strand
320 * %g2 = ptr to command mondo
321 */
322 ENTRY(schedule_vcpu_cmd)
323 add %g2, HVM_ARGS, %g3
324
325 /* FIXME: validate args ! */
326 ldx [%g3 + HVM_SCHED_VCPUP], %g4 ! vcpu ptr
327 ! assert vcpup->strand == %g1
328 ldub [%g4 + CPU_STRAND_SLOT], %g5
329
330#if DBG_SCHEDULE /* { */
331 PRINT("schedule vcpu @ 0x")
332 PRINTX(%g4)
333 PRINT(" id = 0x")
334 lduw [%g4 + CPU_RES_ID], %g6
335 PRINTX(%g6)
336 PRINT(" vid = 0x")
337 ldub [%g4 + CPU_VID], %g6
338 PRINTX(%g6)
339 PRINT(" in slot 0x")
340 PRINTX(%g5)
341 PRINT("\r\n")
342#endif /* } */
343 mulx %g5, SCHED_SLOT_SIZE, %g3
344 add %g1, STRAND_SLOT, %g1
345 add %g1, %g3, %g3
346 stx %g4, [%g3 + SCHED_SLOT_ARG]
347 mov SLOT_ACTION_RUN_VCPU, %g2
348 stx %g2, [%g3 + SCHED_SLOT_ACTION]
349 ba start_work
350 nop
351 SET_SIZE(schedule_vcpu_cmd)
352
353
354/*
355 * We get here as a HV mondo X-call
356 * Our working assumption is that the currently running vcpu state
357 * (if any) has been stashed back into its vcpu structure, and so
358 * at the end of this operation we bail back to start_work.
359 *
360 * The argument in the strand x-call mail box is the pointer
361 * to the vcpu to deschedule.
362 *
363 * %g1 = ptr to strand
364 * %g2 = ptr to command mondo
365 */
366 ENTRY(deschedule_vcpu_cmd)
367 add %g2, HVM_ARGS, %g3
368 ldx [%g3 + HVM_SCHED_VCPUP], %g4
369 HVCALL(deschedule_vcpu)
370 ba,a,pt %xcc, start_work
371 SET_SIZE(deschedule_vcpu_cmd)
372
373
374/*
375 * %g4 - vcpup
376 * Assumes running on localstrand to vcpu
377 * NOTE: called from deschedule_vcpu_cmd and hcall_mach_exit
378 */
379 ENTRY(deschedule_vcpu)
380 STRAND_STRUCT(%g1)
381#if DEBUG /* { */
382 /* validate arg; vcpup->strand == this strand */
383 ldx [%g4 + CPU_STRAND], %g5
384 cmp %g5, %g1
385 beq,pt %xcc, 1f
386 nop
387
388 HVABORT(-1, "deschedule cpu - vcpu not scheduled on my strand")
3891:
390#endif /* } */
391
392 ldub [%g4 + CPU_STRAND_SLOT], %g5
393#if DBG_DESCHEDULE /* { */
394 mov %g7, %g3 /* preserve return addr */
395 PRINT("deschedule vcpu @ 0x")
396 PRINTX(%g4)
397 PRINT(" id = 0x")
398 lduw [%g4 + CPU_RES_ID], %g6
399 PRINTX(%g6)
400 PRINT(" vid = 0x")
401 ldub [%g4 + CPU_VID], %g6
402 PRINTX(%g6)
403 PRINT(" from slot 0x")
404 PRINTX(%g5)
405 PRINT("\r\n")
406 mov %g3, %g7 /* restore return addr */
407#endif /* } */
408 mulx %g5, SCHED_SLOT_SIZE, %g3
409 add %g1, STRAND_SLOT, %g1
410 add %g1, %g3, %g3
411 set SLOT_ACTION_NOP, %g2
412 stx %g2, [%g3 + SCHED_SLOT_ACTION]
413 mov 1, %g2 ! force alignment trap
414 stx %g2, [%g3 + SCHED_SLOT_ARG]
415
416 HVRET
417 SET_SIZE(deschedule_vcpu)
418
419
420/*
421 * This operation terminates the execution of the specified vcpu.
422 *
423 * Our working assumption is that the currently
424 * running vcpu state (if any) has been stashed back into
425 * its vcpu structure, and so at the end of this operation
426 * we bail back to start_work.
427 *
428 * The argument in the strand x-call mail box is the pointer
429 * to the vcpu to deschedule.
430 *
431 * %g1 = ptr to strand
432 * %g2 = ptr to command mondo
433 */
434 ENTRY(stop_vcpu_cmd)
435 add %g2, HVM_ARGS, %g3
436
437 mov %g1, %g2
438 ldx [%g3 + HVM_SCHED_VCPUP], %g1
439
440 HVCALL(desched_n_stop_vcpu)
441
442 /*
443 * We dont care about the current running state.
444 * It's over with. Just set the vcpu state and
445 * go and get more work.
446 */
447 wrpr %g0, 0, %tl
448 wrpr %g0, 0, %gl
449
450 mov 1, %g1
451 SET_VCPU_STRUCT(%g1, %g2) /* force a alignment trap */
452
453#if DBG_STOP
454 DEBUG_SPINLOCK_ENTER(%g1, %g2, %g3)
455 PRINT("Back to work\r\n")
456 DEBUG_SPINLOCK_EXIT(%g1)
457#endif
458 ba start_work
459 nop
460 SET_SIZE(stop_vcpu_cmd)
461
462
463/*
464 * desched_n_stop_vcpu
465 *
466 * Removes a vcpu from the corresponding strand slot. It then
467 * calls stop_vcpu to stop a virtual cpu an clear out associated
468 * state.
469 *
470 * Expects:
471 * %g1 : vcpu pointer
472 * %g2 : strand pointer
473 * Returns:
474 * %g1 : vcpu pointer
475 * Register Usage:
476 * %g1..%g6
477 * %g7 return address
478 */
479 ENTRY_NP(desched_n_stop_vcpu)
480
481 STRAND_PUSH(%g7, %g3, %g4) ! save return address
482
483 ldub [%g1 + CPU_STRAND_SLOT], %g4
484 mulx %g4, SCHED_SLOT_SIZE, %g3
485 add %g2, STRAND_SLOT, %g5
486 add %g5, %g3, %g3
487 set SLOT_ACTION_NOP, %g2
488 stx %g2, [%g3 + SCHED_SLOT_ACTION]
489 mov 1, %g2 ! force alignment trap
490 stx %g2, [%g3 + SCHED_SLOT_ARG]
491
492#ifdef CONFIG_CRYPTO
493 /*
494 * Stop crypto
495 */
496 VCPU2GUEST_STRUCT(%g1, %g2)
497
498 ! %g1 = cpu struct
499 ! %g2 = guest struct
500 !
501 HVCALL(stop_crypto)
502#endif /* CONFIG_CRYPTO */
503
504 /*
505 * Clean up the (active) running state
506 */
507 HVCALL(stop_vcpu)
508
509 ! %g1 - vcpu
510 ! Default setup entry point for next time we start cpu
511 ! Strictly speaking we should not need to do this here since
512 ! there are only two ways a cpu can start from stopped
513 ! 1. as the boot cpu in which case we force the start address
514 ! 2. via a cpu_start API call in which case the start address
515 ! is set there.
516
517 ldx [%g1 + CPU_RTBA], %g3
518 inc (TT_POR * TRAPTABLE_ENTRY_SIZE), %g3 ! Power-on-reset vector
519 stx %g3, [%g1 + CPU_START_PC]
520
521 STRAND_POP(%g7, %g2) ! retrieve return address
522 HVRET
523
524 SET_SIZE(desched_n_stop_vcpu)
525
526
527/*
528 * c_desched_n_stop_vcpu
529 *
530 * C Wrapper around desched_n_stop_vcpu(). Deschedules and
531 * stops the vcpu passed in as a parameter.
532 *
533 * Expects:
534 * %o0 : vcpu pointer
535 * Returns:
536 * nothing
537 */
538 ENTRY(c_desched_n_stop_vcpu)
539 STRAND_PUSH(%g2, %g6, %g7)
540 STRAND_PUSH(%g3, %g6, %g7)
541 STRAND_PUSH(%g4, %g6, %g7)
542
543 mov %o0, %g1
544 VCPU2STRAND_STRUCT(%g1, %g2)
545 HVCALL(desched_n_stop_vcpu)
546
547 STRAND_POP(%g4, %g6)
548 STRAND_POP(%g3, %g6)
549 STRAND_POP(%g2, %g6)
550
551 retl
552 nop
553 SET_SIZE(c_desched_n_stop_vcpu)
554
555
556/*
557 * stop_guest_cmd
558 *
559 * Called from xcall context usually from the control domain
560 * to exit a guest remotely. Assumes any running vcpu assigned
561 * to this strand already had its state saved so we can clobber
562 * all the registers.
563 *
564 * %g1 - pointer to the current strand
565 * %g2 - HV xcall command mondo pointer
566 */
567 ENTRY(stop_guest_cmd)
568 ldx [%g2 + HVM_ARGS + HVM_STOPGUEST_GUESTP], %g2
569
570 /*
571 * Save the guest parameter because all registers
572 * get clobbered when setting up the C environment.
573 */
574 STRAND_PUSH(%g2, %g3, %g4)
575
576 wrpr %g0, 0, %gl
577 wrpr %g0, 0, %tl
578 HVCALL(setup_c_environ)
579
580 ! retrieve the pointer to the guest to stop
581 STRAND_POP(%o0, %g3)
582
583 call c_guest_exit
584 mov GUEST_EXIT_MACH_EXIT, %o1
585
586 ba,pt %xcc, start_work
587 nop
588
589 SET_SIZE(stop_guest_cmd)
590
591
592 /*
593 * Configures a basic C compatible environment
594 * based on the current vcpu setup.
595 * The only working assumption here is that coming in
596 * the caller has already set gl and tl to 0, so we do not
597 * have to worry about preserving the parameters.
598 */
599 ENTRY(setup_c_environ)
600 wrpr %g0, NWINDOWS-2, %cansave
601 wrpr %g0, NWINDOWS-1, %cleanwin
602 wrpr %g0, 0, %canrestore
603 wrpr %g0, 0, %otherwin
604 wrpr %g0, 0, %cwp
605 wrpr %g0, 0, %wstate
606 wr %g0, %y
607 wrpr %g0, 0xf, %pil
608
609 ! Other stuff here ... pstate, gis etc. FIXME
610
611 ! Setup up the C stack
612 STRAND_STRUCT(%g1)
613 set STRAND_STACK, %g2
614 add %g1, %g2, %g1
615 set STRAND_STACK_SIZE - STACK_BIAS, %g2
616 add %g1, %g2, %g1
617
618 mov %g0, %i6
619 mov %g0, %i7
620 mov %g0, %o6
621 mov %g0, %o7
622 add %g1, -SA(MINFRAME), %sp
623 HVRET
624 SET_SIZE(setup_c_environ)
625
626
627 ENTRY(c_puts)
628 mov %o0, %g1
629 ba puts
630 rd %pc, %g7
631 retl
632 nop
633 SET_SIZE(c_puts)
634
635
636 ! basic compare and swap
637 ! %o0 = address of 32 bit value
638 ! %o1 = value to compare against
639 ! %o2 = value to store
640 ! - returns:
641 ! %o0 = value stored in location
642
643 ENTRY(c_cas32)
644 casa [%o0]ASI_P, %o1, %o2
645 retl
646 mov %o2, %o0
647 SET_SIZE(c_cas32)
648
649
650 ! basic compare and swap
651 ! %o0 = address of 64 bit value
652 ! %o1 = value to compare against
653 ! %o2 = value to store
654 ! - returns:
655 ! %o0 = value stored in location
656
657 ENTRY(c_cas64)
658 casxa [%o0]ASI_P, %o1, %o2
659 retl
660 mov %o2, %o0
661 SET_SIZE(c_cas64)
662
663
664 ! atomic swap ... compare and swap until we succeed.
665 ! %o0 = address of 64 bit value
666 ! %o1 = value to store
667 ! - returns:
668 ! %o0 = value stored in location
669
670 ENTRY(c_atomic_swap64)
6711:
672 mov %o1, %o3
673 ldx [%o0], %o2
674 casxa [%o0]ASI_P, %o2, %o3
675 cmp %o2, %o3
676 bne,pn %xcc, 1b
677 nop
678 retl
679 mov %o3, %o0
680 SET_SIZE(c_atomic_swap64)
681
682
683 ! Returns a pointer to the strand_t struct
684 ! for the strand we are currently executing on
685 ! %o0 = strand struct
686 ENTRY(c_mystrand)
687 STRAND_STRUCT(%o0)
688 retl
689 nop
690 SET_SIZE(c_mystrand)