Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / ontario / src / main.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: main.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)main.s 1.85 07/07/17 SMI"
50
51/*
52 * Niagara startup code
53 */
54
55#include <sys/asm_linkage.h>
56#include <sys/stack.h>
57#include <sys/htypes.h>
58#include <sparcv9/misc.h>
59#include <sparcv9/asi.h>
60#include <hprivregs.h>
61#include <asi.h>
62#include <traps.h>
63#include <sun4v/traps.h>
64#include <dram.h>
65#include <sun4v/mmu.h>
66#include <mmu.h>
67#include <sun4v/asi.h>
68#include <sun4v/queue.h>
69#include <devices/pc16550.h>
70#include <hypervisor.h>
71#include <clock.h>
72#include <guest.h>
73#include <strand.h>
74#include <offsets.h>
75#include <md.h>
76#include <vcpu.h>
77#include <sun4v/errs_defs.h>
78#include <errs_common.h>
79#include <cpu_errs.h>
80#include <config.h>
81#include <cyclic.h>
82#include <util.h>
83#include <abort.h>
84#include <hvctl.h>
85#include <debug.h>
86#include <fpga.h>
87#include <ldc.h>
88#include <iob.h>
89
90#define DBG_SCHEDULE 1
91#define DBG_DESCHEDULE 1
92#define DBG_STOP 1
93#define DBG_LAUNCH 0
94
95 ENTRY_NP(start_master)
96 ! save incoming arguments
97 mov %g1, %i0 ! membase
98 mov %g2, %i1 ! memsize
99 mov %g3, %i2 ! hypervisor description
100 mov %g4, %i3 ! strandstartset
101 mov %g5, %i4 ! total physical memory size
102
103 ! init scratch pad register to a known state
104 SET_VCPU_STRUCT(%g0, %g1)
105 SET_STRAND_STRUCT(%g0, %g1)
106
107#ifdef CONFIG_HVUART
108 ! initialize HV console UART
109 setx FPGA_UART_BASE, %g2, %g1
110 HVCALL(uart_init) ! clobbers %g1,%g2,%g3,%g7
111#endif
112
113 /*
114 * Determine if we're running in RAM or ROM
115 */
116 rd %pc, %g4
117 srlx %g4, 32, %g4 ! in rom?
118 cmp %g4, 0x80 ! bits <39,32>
119 blu,pt %xcc, .master_nocopy ! no, in ram already
120 nop
121
122 /*
123 * Running from ROM
124 *
125 * Scrub the memory that we're going to copy ourselves
126 * into.
127 */
128 mov %i0, %g1
129 setx htraptable, %g7, %g2
130 setx _edata, %g7, %g3
131 brnz %g3, 0f
132 nop
133 setx _etext, %g7, %g3
1340:
135 ! align to next 64-byte boundary
136 inc (64 - 1), %g3
137 andn %g3, (64 - 1), %g3
138 sub %g3, %g2, %g2
139 HVCALL(memscrub)
140
141 /*
142 * Currently executing in ROM, copy to RAM
143 */
144 RELOC_OFFSET(%g1, %g5) ! %g5 = offset
145
146 mov %i0, %g2 ! %g2 = membase
147 setx htraptable, %g7, %g1
148 sub %g1, %g5, %g1
149 setx _edata, %g7, %g3
150 brnz %g3, 0f
151 nop
152 setx _etext, %g7, %g3
1530:
154 sub %g3, %g5, %g3
155
156 sub %g3, %g1, %g3
157 inc 7, %g3
158 andn %g3, 7, %g3
159 HVCALL(xcopy)
160
161 mov %i0, %g1 ! membase
162 mov %i1, %g2 ! memsize
163 mov %i2, %g3 ! hypervisor md address
164 mov %i3, %g4 ! strandstartset
165 mov %i4, %g5 ! total physical memory size
166
167 add %i0, (TT_POR * TRAPTABLE_ENTRY_SIZE), %g6 ! master offset
168 jmp %g6
169 nop
170
171.master_nocopy:
172 wrpr %g0, 1, %tl
173 wrpr %g0, 1, %gl
174 wrhpr %g0, (HPSTATE_ENB | HPSTATE_HPRIV), %hpstate
175 wrpr %g0, NWINDOWS - 2, %cansave
176 wrpr %g0, NWINDOWS - 2, %cleanwin
177 wrpr %g0, 0, %canrestore
178 wrpr %g0, 0, %otherwin
179 wrpr %g0, 0, %cwp
180 wrpr %g0, 0, %wstate
181
182 ! save parameters for memory scrub which is done later
183 mov %i0, %l0 ! membase
184 mov %i1, %l1 ! memsize
185 mov %i3, %l2 ! strandstartset
186 mov %i4, %l3 ! total physical memory size
187
188 RELOC_OFFSET(%g1, %g5) ! %g5 = offset
189 setx htraptable, %g3, %g1
190 sub %g1, %g5, %g1
191 wrhpr %g1, %htba
192
193 setx _edata, %g7, %g1
194 brnz %g1, 0f
195 nop
196 setx _etext, %g7, %g1
1970:
198 ! align to next 64-byte boundary
199 add %g1, (64 - 1), %g1
200 andn %g1, (64 - 1), %g1
201 sub %g1, %g5, %g1 ! Start address
202 add %i0, %i1, %g2 ! end address + 1
203 sub %g2, %g1, %g2 ! length = end+1 - start
204 HVCALL(memscrub)
205
206 RELOC_OFFSET(%g1, %g5) ! %g5 = offset
207 setx config, %g6, %g1
208 sub %g1, %g5, %g6 ! %g6 = global config
209
210 ! set
211 stx %i0, [%g6 + CONFIG_MEMBASE]
212 stx %i1, [%g6 + CONFIG_MEMSIZE]
213 stx %i3, [%g6 + CONFIG_STRAND_STARTSET]
214 stx %i4, [%g6 + CONFIG_PHYSMEMSIZE]
215
216 /*
217 * Find first strand, and use it as the default
218 * target of system interrupts.
219 *
220 * Simply, for now, we just pick the lowest functional strand
221 * as the host for SSI and error interrupts.
222 */
223 brnz %i3, 1f
224 nop
225 HVABORT(-1, "No live strands defined");
2261:
227 ! Find first bit set !
228 mov 0, %g1
2292:
230 srlx %i3, %g1, %g2
231 btst 1, %g2
232 beq,a,pt %xcc, 2b
233 inc %g1
234 sllx %g1, 1*INTRTGT_DEVSHIFT, %g2
235 sllx %g1, 2*INTRTGT_DEVSHIFT, %g1
236 or %g1, %g2, %g1
237 stx %g1, [%g6 + CONFIG_INTRTGT]
238
239 mov %g6, %i0 ! %i0 - global config
240
241 stx %g5, [%i0 + CONFIG_RELOC]
242
243 ! Stash away the boot configs HV md.
244 stx %i2, [%i0 + CONFIG_PARSE_HVMD]
245 mov %i2, %i4
246 ! %i4 - hypervisor description
247
248 setx guests, %g6, %g1
249 sub %g1, %g5, %i1
250 ! %i1 - guests base
251 stx %i1, [%i0 + CONFIG_GUESTS]
252
253 setx vcpus, %g6, %g1
254 sub %g1, %g5, %i2
255 ! %i2 - vcpu base
256 stx %i2, [%i0 + CONFIG_VCPUS]
257
258 setx strands, %g6, %g1
259 sub %g1, %g5, %i3
260 ! %i3 - strands base
261 stx %i3, [%i0 + CONFIG_STRANDS]
262
263 setx hv_ldcs, %g6, %g1
264 sub %g1, %g5, %g1
265 stx %g1, [%i0 + CONFIG_HV_LDCS]
266
267 setx sp_ldcs, %g6, %g1
268 sub %g1, %g5, %g1
269 stx %g1, [%i0 + CONFIG_SP_LDCS]
270
271 ! Perform some basic setup for this strand.
272 rd STR_STATUS_REG, %g3
273 srlx %g3, STR_STATUS_CPU_ID_SHIFT, %g3
274 and %g3, STR_STATUS_CPU_ID_MASK, %g3
275
276 ! %g3 = strand id
277
278 SET_VCPU_STRUCT(%g0, %g2)
279
280 set STRAND_SIZE, %g2
281 mulx %g3, %g2, %g4
282 ldx [%i0 + CONFIG_STRANDS], %g1
283 add %g1, %g4, %g1
284 SET_STRAND_STRUCT(%g1, %g2)
285 stx %i0, [%g1 + STRAND_CONFIGP]
286
287 ! initialize the strand mini-stack
288 stx %g0, [%g1 + STRAND_MINI_STACK + MINI_STACK_PTR]
289
290 PRINT("Alive and well ...\r\n")
291
292 PRINT_REGISTER("Strand start set", %l2)
293 PRINT_REGISTER("Total physical mem", %l3)
294
295#ifndef T1_FPGA
296#ifdef RESETCONFIG_BROKENTICK
297
298 /*
299 * The %tick register on all strands are not properly
300 * synchronized coming out of reset. Use one of the JBUS
301 * performance counters as a common reference point for
302 * this task.
303 *
304 * As each strand is initialized, it reads a value from
305 * the counter and applies the appropriate scaling factor
306 * to convert it from JBUS cycles to a tick value in sync
307 * with all the other strands.
308 */
309
310 ! start counting JBUS cycles in perf counter 2
311 setx JBI_PERF_CTL, %g2, %g1
312 set (JBI_PERF1_EVT_OFF | JBI_PERF2_EVT_CYCLES), %g2
313 stx %g2, [%g1]
314
315 ! reset counter
316 setx JBI_PERF_COUNT, %g2, %g1
317 stx %g0, [%g1]
318 !! %g1 = JBI performance counter reg
319
320 ! calculate JBUS clock multiplier
321 setx CLK_BASE, %g3, %g2
322 ldx [%g2 + CLK_DIV_REG], %g2 ! %g2 = clock divider reg
323 and %g2, CLK_DIV_MASK, %g3 ! %g3 = CMP divisor
324 srlx %g2, CLK_DIV_JDIV_SHIFT, %g5
325 and %g5, CLK_DIV_MASK, %g5
326 sllx %g5, CLK_DIV_SCALE_SHIFT, %g5 ! %g5 = scaled JBUS divisor
327 udivx %g5, %g3, %g2
328 !! %g2 = JBUS clock multiplier
329
330 ! convert to ticks and set %tick
331 ldx [%g1], %g3 ! %g3 = JBUS cycle count
332 mulx %g3, %g2, %g3
333 srlx %g3, CLK_DIV_SCALE_SHIFT, %g3
334 wrpr %g3, %tick
335
336#endif /* RESETCONFIG_BROKENTICK */
337#endif /* ifndef T1_FPGA */
338
339 ! Before we can start using C compiled PIC code
340 ! we have to adjust the GLOBAL_OFFSET_TABLE
341
342 setx _GLOBAL_OFFSET_TABLE_, %g7, %g1
343 setx _start_data, %g7, %g2
344 RELOC_OFFSET(%g7, %g3)
345 sub %g1, %g3, %g1
346 sub %g2, %g3, %g2
3471:
348 ldx [%g1], %g4
349 sub %g4, %g3, %g4
350 stx %g4, [%g1]
351 add %g1, 8, %g1
352 cmp %g1, %g2
353 blt,pt %xcc, 1b
354 nop
355
356#ifndef T1_FPGA
357 PRINT("setup iob\r\n");
358 HVCALL(setup_iob)
359
360 PRINT("setup jbi\r\n");
361 HVCALL(setup_jbi)
362#endif /* ifndef T1_FPGA */
363
364#ifdef CONFIG_VBSC_SVC
365 PRINT("Sending HV start message to vbsc\r\n")
366 HVCALL(vbsc_hv_start)
367#endif
368
369#ifdef CONFIG_FIRE
370 PRINT("setup fire\r\n")
371 HVCALL(setup_fire)
372#endif
373
374 ! Scrub all of memory, except for the hypervisor.
375 ! This starts all other strands.
376 STRAND_STRUCT(%g1)
377 STRAND2CONFIG_STRUCT(%g1, %i0)
378 HVCALL(scrub_all_memory)
379
380 ! Setup and run the initial C environment
381 wrpr %g0, 0, %gl
382 wrpr %g0, 0, %tl
383 HVCALL(setup_c_environ)
384 call c_start
385 nop
386
387 ! Recover and run the old initialization code
388
389 STRAND_STRUCT(%g1)
390 STRAND2CONFIG_STRUCT(%g1, %i0)
391 ldx [%i0 + CONFIG_GUESTS], %i1
392 ldx [%i0 + CONFIG_VCPUS], %i2
393
394 /*
395 * Setup everything else
396 */
397 PRINT("setup everything else\r\n");
398
399 /*
400 * Enable JBI error interrupts and clear SSIERROR
401 * mask (%g1 = 1)
402 */
403 setx JBI_INTR_ONLY_ERRS, %g2, %g1
404 mov 1, %g2
405 HVCALL(setup_jbi_err_interrupts)
406
407 /*
408 * The SSI interrupts are generated from the FPGA which has a
409 * level output. The SSI input for N1 is however edge triggered.
410 * So if there is still an interrupt pending from the FPGA while
411 * the HV is reset, then N1 never sees an edge transition from the
412 * FPGA, and the interrupt is never taken by N1.
413 * To solve that here we fake a pending interrupt for N1, so that the
414 * interrupt service routine is run and anything that may be pending
415 * is cleared appropriately, and the FPGA is serviced correctly.
416 * Ugh!
417 */
418#ifdef CONFIG_FPGA
419 HVCALL(fake_ssiirq)
420#endif
421
422#ifdef CONFIG_SVC
423 /* initialize the service channel */
424 call c_svc_init
425 nop
426#endif /* CONFIG_SVC */
427
428 PRINT("Setting remaining details\r\n")
429
430#ifndef T1_FPGA
431
432 /*
433 * Setup the Error Steer & Start the Polling Daemon:
434 */
435 setx L2_CONTROL_REG, %g1, %g4
436 ldx [%g4], %g3
437 setx (NSTRANDS -1) << L2_ERRORSTEER_SHIFT, %g1, %g2
438 andn %g3, %g2, %g3 ! remove current
439 rd STR_STATUS_REG, %g1 ! this cpu
440 srlx %g1, STR_STATUS_CPU_ID_SHIFT, %g1 ! right justify
441 sllx %g1, L2_ERRORSTEER_SHIFT, %g1 ! position for CReg
442 and %g1, %g2, %g1 ! mask
443 or %g3, %g1, %g3 ! insert
444 stx %g3, [%g4] ! set to this cpu
445
446 /*
447 * Initialize the poll daemon cyclic time.
448 */
449 PRINT("Start error poll daemon\r\n")
450 HVCALL(err_poll_daemon_start) ! start the daemon
451#endif /* ifndef T1_FPGA */
452
453 /*
454 * FIXME: Start heartbeat for the control domain.
455 * Eventually, this will need to be aware of multiple
456 * guests.
457 */
458 PRINT("Start heart beat for control domain\r\n")
459 HVCALL(heartbeat_enable)
460
461 /*
462 * Final cleanup before we can consider the hypervisor truly
463 * running.
464 */
465
466 DEBUG_SPINLOCK_ENTER(%g1, %g2, %g3)
467
468#ifndef T1_FPGA
469 /*
470 * Ensure all zero'd memory is flushed from the l2$
471 */
472 PRINT_NOTRAP("Flush the L2 cache\r\n");
473 HVCALL(l2_flush_cache)
474
475#ifdef RESETCONFIG_ENABLEHWSCRUBBERS
476 PRINT_NOTRAP("Enable L2 and DRAM HW scrubbers\r\n");
477 HVCALL(enable_hw_scrubbers)
478#endif
479
480 PRINT_NOTRAP("Clear error status registers\r\n");
481 HVCALL(clear_error_status_registers)
482#endif /* ifndef T1_FPGA */
483
484/*
485 * XXXLDOMS: - Disabled due to intermittent INTACK TIMEOUT
486 * seen.
487 * Complete hack revisit and fix before FW putback.
488 */
489#if 0 /* FIXME */
490 /*
491 * Enable JBI Interrupt timeout errors before entering the guest
492 * Don't clear the SSIERR mask bit (%g2 = 0) as we might already
493 * have a pending JBI error interrupt and we don't want to lose
494 * it.
495 */
496 set JBI_INTR_TO, %g1
497 clr %g2
498 HVCALL(setup_jbi_err_interrupts)
499#endif
500
501 DEBUG_SPINLOCK_EXIT(%g1)
502
503#if defined(CONFIG_SVC) && defined(CONFIG_VBSC_SVC)
504 PRINT("Sending guest start message to vbsc\r\n")
505
506 call c_vbsc_guest_start
507 mov 0, %o0 ! ID of guest started
508#endif /* defined(CONFIG_SVC) && defined(CONFIG_VBSC_SVC) */
509
510 ba,a start_work
511 nop
512 SET_SIZE(start_master)
513
514
515
516 ENTRY_NP(start_slave)
517 mov %g1, %i0 ! membase
518
519 ! init scratch pad registers to a known state
520 SET_VCPU_STRUCT(%g0, %g4)
521 SET_STRAND_STRUCT(%g0, %g4)
522
523 rd %pc, %g4
524 srlx %g4, 32, %g4 ! in rom?
525 cmp %g4, 0x80 ! bits <39,32>
526 blu,pt %xcc, 1f ! no, in ram already
527 nop
528 add %i0, (TT_POR * TRAPTABLE_ENTRY_SIZE) + 0x10, %g4 ! slave offset
529 jmp %g4 ! goto ram traptable
530 nop
5311:
532 wrhpr %i0, %htba
533
534 ! Setup slave scratchpad for own identity
535
536.reloc2:
537 rd %pc, %g1
538 setx .reloc2, %g3, %g2
539 sub %g2, %g1, %g3 ! %g3 = offset
540 setx config, %g4, %g2
541 sub %g2, %g3, %g2
542 ! %g2 = &config
543
544 rd STR_STATUS_REG, %g1
545 srlx %g1, STR_STATUS_CPU_ID_SHIFT, %g1
546 and %g1, STR_STATUS_CPU_ID_MASK, %i3
547 ! %i3 = current cpu id
548
549 ! Set up the scratchpad registers
550
551 ldx [%g2 + CONFIG_STRANDS], %i2
552 set STRAND_SIZE, %g1
553 mulx %g1, %i3, %g1
554 add %i2, %g1, %i2
555 SET_STRAND_STRUCT(%i2, %g1)
556
557 SET_VCPU_STRUCT(%g0, %g1)
558
559 ! initialize the strand mini-stack
560 ! FIXME: should already be done
561 stx %g0, [%i2 + STRAND_MINI_STACK + MINI_STACK_PTR]
562
563 ! save &config on mini-stack since it cannot be retrieved
564 ! via CONFIG_STRUCT() until the master has run c_start()
565 STRAND_PUSH(%g2, %g3, %g4)
566
567 ! Get us a sane tl & gl and out of red state asap
568 wrpr %g0, 0, %gl
569 wrpr %g0, 0, %tl
570 wrhpr %g0, (HPSTATE_ENB | HPSTATE_HPRIV), %hpstate
571 wrpr %g0, NWINDOWS - 2, %cansave
572 wrpr %g0, NWINDOWS - 2, %cleanwin
573 wrpr %g0, 0, %canrestore
574 wrpr %g0, 0, %otherwin
575 wrpr %g0, 0, %cwp
576 wrpr %g0, 0, %wstate
577
578 STRAND_POP(%g4, %g3) ! restore %g4 = &config
579
580#ifndef T1_FPGA
581#ifdef RESETCONFIG_BROKENTICK
582
583 /*
584 * Synchronize the %tick register with all other
585 * strands. One of the JBUS performance counters
586 * is used as a common reference point to calculate
587 * an appropriate tick value. The initialization
588 * of the counter has already been performed by
589 * the master.
590 */
591
592 ! calculate JBUS clock multiplier
593 setx CLK_BASE, %g3, %g2
594 ldx [%g2 + CLK_DIV_REG], %g2 ! %g2 = clock divider reg
595 and %g2, CLK_DIV_MASK, %g3 ! %g3 = CMP divisor
596 srlx %g2, CLK_DIV_JDIV_SHIFT, %g5
597 and %g5, CLK_DIV_MASK, %g5
598 sllx %g5, CLK_DIV_SCALE_SHIFT, %g5 ! %g5 = scaled JBUS divisor
599 udivx %g5, %g3, %g2
600 !! %g2 = JBUS clock multiplier
601
602 ! convert to ticks and set %tick
603 setx JBI_PERF_COUNT, %g3, %g1
604 ldx [%g1], %g3 ! %g3 = jbus cycle count
605 mulx %g3, %g2, %g3
606 srlx %g3, CLK_DIV_SCALE_SHIFT, %g3
607 wrpr %g3, %tick
608
609#endif /* RESETCONFIG_BROKENTICK */
610#endif /* ifndef T1_FPGA */
611
612 /* Slave now does its bit of the memory scrubbing */
613#if defined(CONFIG_FPGA) || defined(T1_FPGA)
614 STRAND_STRUCT(%g1)
615 set STRAND_SCRUB_SIZE, %g3
616 ldx [%g1 + %g3], %g2
617 set STRAND_SCRUB_BASEPA, %g3
618 ldx [%g1 + %g3], %g1
619
620 HVCALL(memscrub)
621
622 STRAND_STRUCT(%g1)
623 ldub [%g1 + STRAND_ID], %i3
624 mov 1, %i0
625 sllx %i0, %i3, %i0
626 add %g4, CONFIG_SCRUB_SYNC, %g4
6271:
628 ldx [ %g4 ], %g2
629 andn %g2, %i0, %g3
630 casx [ %g4 ], %g2, %g3
631 cmp %g2, %g3
632 bne,pt %xcc, 1b
633 nop
634#endif /* if defined(CONFIG_FPGA) || defined(T1_FPGA) */
635
636#ifndef T1_FPGA
637 HVCALL(clear_error_status_registers)
638#endif /* ifndef T1_FPGA */
639
640 ba,a,pt %xcc, start_work
641 nop
642 SET_SIZE(start_slave)
643
644
645 !
646 ! The main work section for each CPU strand.
647 !
648 ! We basically look for things to do in the strand
649 ! structures work wheel. If we can find nothing to
650 ! do there, we simple suspend the strand and wait
651 ! for HV mondos which would request this strand to
652 ! add or remove something from its work wheel.
653 !
654
655 ENTRY_NP(start_work)
656 !
657 ! This loop works through the schedule list looking for
658 ! something to do.
659 ! If an entire pass is made without an action, then we
660 ! simply go to sleep waiting for a X-call mondo.
661 !
662 mov 0, %g4
663.work_loop:
664 STRAND_STRUCT(%g1)
665 lduh [%g1 + STRAND_CURRENT_SLOT], %g2
666 mulx %g2, SCHED_SLOT_SIZE, %g3
667 add %g1, %g3, %g3
668 add %g3, STRAND_SLOT, %g3
669
670 ldx [%g3 + SCHED_SLOT_ACTION], %g6
671 cmp %g6, SLOT_ACTION_RUN_VCPU
672 be,a,pt %xcc, launch_vcpu
673 ldx [%g3 + SCHED_SLOT_ARG], %g1 ! get arg in annulled ds
674 cmp %g6, SLOT_ACTION_NOP
675 be,pt %xcc, 1f
676 nop
677
678 HVABORT(-1, "Illegal slot code")
6791:
680 inc %g2
681 cmp %g2, NUM_SCHED_SLOTS
682 move %xcc, %g0, %g2
683 sth %g2, [%g1 + STRAND_CURRENT_SLOT]
684 inc %g4
685 cmp %g4, NUM_SCHED_SLOTS
686 bne,pt %xcc, .work_loop
687 nop
688
689 ! OK nothing found to do wait for wake up call
690
691 /* Wait for a HVXCALL PYN */
692 HVCALL(hvmondo_wait)
693
694 ba,pt %xcc, handle_hvmondo
695 nop
696 SET_SIZE(start_work)
697
698/*
699 * stop_vcpu
700 *
701 * stop a virtual cpu
702 * and all associated state.
703 * resets it so if started again, it will have a clean state
704 * associated interrupts and memory mappings are unconfigured.
705 *
706 * NOTE: we go to some lengths to NOT get the vcpup from the
707 * scratchpad registers so we can call this even when the vcpu
708 * is not currently active.
709 *
710 * Expects:
711 * %g1 : vcpu pointer
712 * Returns:
713 * %g1 : vcpu pointer
714 * Register Usage:
715 * %g1..%g6
716 * %g7 return address
717 */
718 ENTRY_NP(stop_vcpu)
719
720 VCPU2GUEST_STRUCT(%g1, %g2)
721
722#ifdef DEBUG
723 brnz %g2, 1f ! paranoia. expect this to be nz
724 nop
725 HVABORT(-1, "vcpu has no assigned guest")
7261:
727#endif
728
729 !
730 ! Save the vcpu ptr - we need it again later
731 !
732 STRAND_PUSH(%g1, %g3, %g4)
733
734 !
735 ! Remove the strands permanent mappings
736 !
737 add %g2, GUEST_PERM_MAPPINGS_LOCK, %g3
738 SPINLOCK_ENTER(%g3, %g4, %g5)
739
740 ! Discover the bit for this cpu in the cpuset
741 ldub [%g1 + CPU_VID], %g3
742 and %g3, MAPPING_XWORD_MASK, %g5
743 mov 1, %g4
744 sllx %g4, %g5, %g4
745 srlx %g3, MAPPING_XWORD_SHIFT, %g5
746 sllx %g5, MAPPING_XWORD_BYTE_SHIFT_BITS, %g5 ! offset into xword array
747 add %g2, GUEST_PERM_MAPPINGS + GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1), %g2
748 mov -(GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1)), %g3
749
7501:
751 add %g3, %g2, %g1 ! Ptr to this perm mapping
752 add %g1, %g5, %g1 ! Xword in a specific cpu set
753 ! Unset bit fields for this cpu
754 ldx [ %g1 + MAPPING_ICPUSET ], %g6
755 andn %g6, %g4, %g6
756 stx %g6, [%g1 + MAPPING_ICPUSET]
757 ldx [ %g1 + MAPPING_DCPUSET ], %g6
758 andn %g6, %g4, %g6
759 stx %g6, [%g1 + MAPPING_DCPUSET]
760
761 ! If entry is completely null, invalidate entry
762 mov MAPPING_XWORD_SIZE*(NVCPU_XWORDS-1), %g1
7632:
764 add %g3, %g2, %g6 ! Grr out of registers
765 add %g6, %g1, %g6
766 ldx [%g6 + MAPPING_ICPUSET], %g6
767 brnz %g6, 3f
768 add %g3, %g2, %g6 ! Grr out of registers
769 add %g6, %g1, %g6
770 ldx [%g6 + MAPPING_DCPUSET], %g6
771 brnz %g6, 3f
772 nop
773 brgz,pt %g1, 2b
774 sub %g1, MAPPING_XWORD_SIZE, %g1
775
776 add %g3, %g2, %g6 ! Grr out of registers
777 stx %g0, [%g6 + MAPPING_TTE] ! Invalidate TTE first
778 stx %g0, [%g6 + MAPPING_VA]
7793:
780 brlz,pt %g3, 1b
781 add %g3, GUEST_PERM_MAPPINGS_INCR, %g3
782
783 membar #Sync ! needed ?
784
785 !
786 ! demap all unlocked tlb entries
787 !
788 set TLB_DEMAP_ALL_TYPE, %g3
789 stxa %g0, [%g3]ASI_IMMU_DEMAP
790 stxa %g0, [%g3]ASI_DMMU_DEMAP
791
792 membar #Sync ! needed ?
793
794 ! Reload guest and cpu struct pointers
795 STRAND_POP(%g1, %g2)
796 VCPU2GUEST_STRUCT(%g1, %g2)
797 add %g2, GUEST_PERM_MAPPINGS_LOCK, %g3
798 SPINLOCK_EXIT(%g3)
799
800
801 !
802 ! remove this cpu as the target of any ldc interrupts
803 !
804 set GUEST_LDC_ENDPOINT, %g3
805 add %g2, %g3, %g3
806 set (GUEST_LDC_ENDPOINT_INCR * MAX_LDC_CHANNELS), %g5
807 add %g3, %g5, %g4
808 ! %g3 = ldc endpoint array base address
809 ! %g4 = current offset into array
810
811.next_ldc:
812 sub %g4, GUEST_LDC_ENDPOINT_INCR, %g4
813 cmp %g4, %g3
814 bl %xcc, .ldc_disable_loop_done
815 nop
816
817 ldub [%g4 + LDC_IS_LIVE], %g5
818 brz %g5, .next_ldc
819 nop
820
821 ! %g1 = the vcpu to stop
822
823 !
824 ! Only clear out the Q CPU so that no interrupts
825 ! will be targeted to this CPU. The LDC channel is
826 ! still live and incoming packets will still be
827 ! queued up.
828 !
829 ldx [%g4 + LDC_TX_MAPREG + LDC_MAPREG_CPUP], %g5
830 cmp %g5, %g1
831 bne %xcc, .check_rx
832 nop
833 stx %g0, [%g4 + LDC_TX_MAPREG + LDC_MAPREG_CPUP]
834.check_rx:
835 ldx [%g4 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g5
836 cmp %g5, %g1
837 bne %xcc, .next_ldc
838 nop
839 stx %g0, [%g4 + LDC_RX_MAPREG + LDC_MAPREG_CPUP]
840
841 ba .next_ldc
842 nop
843
844.ldc_disable_loop_done:
845
846 ! FIXME: must cancel device interrupts targeted at this cpu
847 ! HOW?
848
849 ! FIXME; Do we have to do all this or does it happen on
850 ! the way back in on starting the cpu again ?
851
852 stx %g0, [%g1 + CPU_MMU_AREA_RA] ! erase remaining info
853 stx %g0, [%g1 + CPU_MMU_AREA]
854 stx %g0, [%g1 + CPU_TTRACEBUF_RA]
855 stx %g0, [%g1 + CPU_TTRACEBUF_PA]
856 stx %g0, [%g1 + CPU_TTRACEBUF_SIZE]
857 stx %g0, [%g1 + CPU_NTSBS_CTX0]
858 stx %g0, [%g1 + CPU_NTSBS_CTXN]
859
860 ! Unconfig all the interrupt and error queues
861 stx %g0, [%g1 + CPU_ERRQNR_BASE]
862 stx %g0, [%g1 + CPU_ERRQNR_BASE_RA]
863 stx %g0, [%g1 + CPU_ERRQNR_SIZE]
864 stx %g0, [%g1 + CPU_ERRQNR_MASK]
865
866 mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g3
867 stxa %g0, [%g3]ASI_QUEUE
868 mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g3
869 stxa %g0, [%g3]ASI_QUEUE
870
871 stx %g0, [%g1 + CPU_ERRQR_BASE]
872 stx %g0, [%g1 + CPU_ERRQR_BASE_RA]
873 stx %g0, [%g1 + CPU_ERRQR_SIZE]
874 stx %g0, [%g1 + CPU_ERRQR_MASK]
875
876 mov ERROR_RESUMABLE_QUEUE_HEAD, %g3
877 stxa %g0, [%g3]ASI_QUEUE
878 mov ERROR_RESUMABLE_QUEUE_TAIL, %g3
879 stxa %g0, [%g3]ASI_QUEUE
880
881 stx %g0, [%g1 + CPU_DEVQ_BASE]
882 stx %g0, [%g1 + CPU_DEVQ_BASE_RA]
883 stx %g0, [%g1 + CPU_DEVQ_SIZE]
884 stx %g0, [%g1 + CPU_DEVQ_MASK]
885
886 mov DEV_MONDO_QUEUE_HEAD, %g3
887 stxa %g0, [%g3]ASI_QUEUE
888 mov DEV_MONDO_QUEUE_TAIL, %g3
889 stxa %g0, [%g3]ASI_QUEUE
890
891 stx %g0, [%g1 + CPU_CPUQ_BASE]
892 stx %g0, [%g1 + CPU_CPUQ_BASE_RA]
893 stx %g0, [%g1 + CPU_CPUQ_SIZE]
894 stx %g0, [%g1 + CPU_CPUQ_MASK]
895
896 mov CPU_MONDO_QUEUE_HEAD, %g3
897 stxa %g0, [%g3]ASI_QUEUE
898 mov CPU_MONDO_QUEUE_TAIL, %g3
899 stxa %g0, [%g3]ASI_QUEUE
900
901
902 ! FIXME
903 ! just an off-the-cuff list
904 ! what else of this cpu struct should be cleared/cleaned?
905 !
906 ! FIXME: All this stuff goes away if we call reset_vcpu_state
907 ! in reconf.c - except that maybe we do this in startvcpu instead ?
908
909 ! indicate cpu is unconfigured
910 mov CPU_STATE_STOPPED, %g3
911 ldx [%g1 + CPU_STATUS], %g4 ! do not change status to
912 cmp %g4, CPU_STATE_ERROR ! STATE_STOPPPED if in CPU
913 bne,a,pn %xcc, 1f ! is in error
914 stx %g3, [%g1 + CPU_STATUS]
915 membar #Sync
9161:
917 HVRET
918 SET_SIZE(stop_vcpu)
919
920
921 !
922 ! Enter from start_work loop
923 ! Expects no register setups (except hv scratchpads)
924 ! Provides register setups for master_start
925 !
926 ! Argument in %g1 points to vcpu struct
927 !
928
929 ENTRY_NP(launch_vcpu)
930
931 /*
932 * quick set of sanity checks.
933 */
934#ifdef DEBUG
935 /* is it assigned to this strand ? */
936 STRAND_STRUCT(%g2)
937 ldx [%g1 + CPU_STRAND], %g3
938 cmp %g2, %g3
939 be,pt %xcc, 1f
940 nop
941
942 HVABORT(-1, "Scheduled vcpu not assigned to this strand")
9431:
944
945 /*
946 * is the cpu configured ?
947 * is it stopped or running and not in error ?
948 */
949 ldx [%g1 + CPU_STATUS], %g3
950 cmp %g3, CPU_STATE_STOPPED
951 be,pt %xcc, 1f
952 cmp %g3, CPU_STATE_SUSPENDED
953 be,pt %xcc, 1f
954 cmp %g3, CPU_STATE_RUNNING
955 be,pt %xcc, 1f
956 cmp %g3, CPU_STATE_STARTING
957 be,pt %xcc, 1f
958 nop
959
960 PRINT("\tvcpu state = 0x")
961 PRINTX(%g3)
962 PRINT("\r\n")
963 HVABORT(-1, "Scheduled vcpu is in an illegal state or not configured")
9641:
965
966#endif
967
968 /*
969 * OK let fly ...
970 */
971
972 !
973 ! The vcpu should be fully configured and ready to
974 ! go even if it has never been run before.
975 ! However, because the vcpu state save and restore is not
976 ! complete, and because we're not (re)scheduleing vcpus yet
977 ! then the very first time the vcpu gets kicked off we try and
978 ! initialize some of the basic registers that are not
979 ! (re)stored into place with the state restoration.
980 !
981 ! We figure this all out from the cpu state. If it was
982 ! stopped, then we need to configure registers to bring it
983 ! alive. If it is RUNNING or SUSPENDED then we just
984 ! restore the registers and launch into it.
985 !
986 ! An additional wrinkle - if the cpu is stopped, then it
987 ! may be that the guest too is stopped, in which case we
988 ! assume we're the boot cpu and do the appropriate reset setup
989 ! for the guest too. This can result in an aync status update
990 ! message on the HVCTL channel if it is configured.
991 !
992
993 SET_VCPU_STRUCT(%g1, %g2)
994
995 ldx [%g1 + CPU_STATUS], %g3
996 cmp %g3, CPU_STATE_STOPPED
997 be,pn %xcc, slow_start
998 cmp %g3, CPU_STATE_STARTING
999 be,pn %xcc, slow_start
1000 nop
1001
1002 ! Fast start ...
1003 PRINT("About to restore\r\n")
1004 HVCALL(vcpu_state_restore)
1005 PRINT_NOTRAP("Completed restore\r\n")
1006fast_start:
1007
1008 VCPU_STRUCT(%g1)
1009 mov CPU_STATE_RUNNING, %g2
1010 stx %g2, [%g1 + CPU_STATUS] ! it's running now
1011
1012 /*
1013 * Now that the vcpu is running, set the starting stick
1014 * value for the first utilization query.
1015 */
1016 rd %tick, %g3
1017 sllx %g3, 1, %g3 ! remove npt bit
1018 srax %g3, 1, %g3
1019 stx %g3, [%g1 + CPU_UTIL_STICK_LAST]
1020
1021 set CPU_LAUNCH_WITH_RETRY, %g2
1022 ldub [%g1 + %g2], %g1
1023 brnz,pt %g1, 1f
1024 nop
1025 done
10261:
1027 retry
1028
1029slow_start:
1030 !
1031 ! This section is to formally start a virtual CPU
1032 ! from the stopped state.
1033 !
1034 ! There are a number of additional things we want to do
1035 ! if this is the very first time we're entering a guest.
1036 !
1037
1038 VCPU_GUEST_STRUCT(%g1, %g5)
1039
1040#ifdef CONFIG_CRYPTO
1041
1042 /*
1043 * Start crypto
1044 */
1045 mov %g5, %g2
1046 !
1047 ! %g1 = cpu struct
1048 ! %g2 = guest struct
1049 !
1050 HVCALL(start_crypto)
1051#endif /* CONFIG_CRYPTO */
1052
1053 lduw [%g5 + GUEST_STATE], %g3
1054 cmp %g3, GUEST_STATE_NORMAL
1055 be,pt %xcc, .launch_non_boot_cpu
1056 nop
1057 cmp %g3, GUEST_STATE_RESETTING
1058 be,pt %xcc, .launch_boot_cpu
1059 nop
1060 cmp %g3, GUEST_STATE_SUSPENDED
1061 bne,pt %xcc, 1f
1062 nop
1063 HVABORT(-1, "guest suspend not yet supported")
1064 ! when it is supported we need to move the guest
1065 ! from suspended back to its prior state ...
1066 ! which begs the question of whether we want to have
1067 ! the suspended state or a separate flag ?
10681:
1069 cmp %g3, GUEST_STATE_STOPPED
1070 bne,pt %xcc, 1f
1071 nop
1072 HVABORT(-1, "guest in STOPPED state in launch_vcpu")
10731:
1074 HVABORT(-1, "invalid guest state in launch_vcpu")
1075
1076.launch_boot_cpu:
1077
1078 /*
1079 * FIXME: This scrub needs to go away
1080 *
1081 * Only scrub guest memory if reset reason is POR
1082 *
1083 * %g1 - vcpu
1084 * %g5 - guest
1085 */
1086
1087 set GUEST_RESET_REASON, %g3
1088 ldx [%g5 + %g3], %g3
1089 cmp %g3, RESET_REASON_POR
1090 bne,pt %xcc, .master_guest_scrub_done
1091 nop
1092
1093 mov (NUM_RA2PA_SEGMENTS - 1) * RA2PA_SEGMENT_SIZE, %g3
1094 set (-1), %g6
10951:
1096 add %g3, GUEST_RA2PA_SEGMENT, %g4
1097 add %g4, %g5, %g4 ! &guest.ra2pa_segment
1098
1099 ! only scrub memory segments (obviously ...)
1100 ldub [%g4 + RA2PA_SEGMENT_FLAGS], %g1
1101 btst MEM_SEGMENT, %g1
1102 bz,pn %xcc, 2f
1103 nop
1104
1105 ldx [%g4 + RA2PA_SEGMENT_BASE], %g1 ! RA of base of
1106 ! memory segment
1107 brlz,pn %g1, 2f
1108 nop
1109 ldx [%g4 + RA2PA_SEGMENT_LIMIT], %g2 ! limit of memory
1110 ! segment
1111 sub %g2, %g1, %g2 ! %g2
1112 ! (limit - base)->size
1113 brlez,pn %g2, 2f
1114 nop
1115
1116 ldx [%g4 + RA2PA_SEGMENT_OFFSET], %g7 ! offset of memory
1117 ! segment
1118 add %g1, %g7, %g1 ! RA -> PA
1119
1120 /*
1121 * It's possible that two (or more) contiguous segments describe
1122 * the same physical area in memory so we keep track of the
1123 * last segment PA scrubbed and skip this segment scrub if it's
1124 * the same. Note that all the segments will have the same size
1125 * (> 16GB) so one scrub fits all.
1126 */
1127 cmp %g1, %g6
1128 be,pn %xcc, 2f
1129 mov %g1, %g6
1130
1131 HVCALL(memscrub)
11322:
1133 brgz,pt %g3, 1b
1134 sub %g3, RA2PA_SEGMENT_SIZE, %g3
1135
1136.master_guest_scrub_done:
1137
1138 /*
1139 * Copy guest's firmware image into the partition
1140 */
1141 VCPU_GUEST_STRUCT(%g1, %g2)
1142
1143 set GUEST_ROM_SIZE, %g7
1144 ldx [%g2 + %g7], %g3
1145 set GUEST_ROM_BASE, %g7
1146 ldx [%g5 + %g7], %g1
1147 set GUEST_ROM_SIZE, %g7
1148 ldx [%g5 + %g7], %g3
1149
1150 ! find segment for the guest which contains GUEST_REAL_BASE
1151 ldx [%g5 + GUEST_REAL_BASE], %g2 ! guest real base addr
1152 srlx %g2, RA2PA_SHIFT, %g2
1153 sllx %g2, RA2PA_SEGMENT_SHIFT, %g2 ! ra2pa_segment
1154 add %g2, GUEST_RA2PA_SEGMENT, %g2
1155 add %g5, %g2, %g4 ! %g4 &
1156 ! guest.ra2pa_segment
1157 ldx [%g4 + RA2PA_SEGMENT_BASE], %g2 ! RA of segment base
1158 ldx [%g4 + RA2PA_SEGMENT_OFFSET], %g4 ! Offset of segment base
1159 add %g2, %g4, %g2 ! PA of segment
1160
1161 ! %g1 ROM base
1162 ! %g2 ROM size
1163 ! %g3 GUEST base
1164
1165 HVCALL(xcopy)
1166
1167
1168#ifdef CONFIG_FIRE
1169
1170 GUEST_STRUCT(%g3)
1171
1172 ! %g3 guest struct
1173
1174 !
1175 ! Does this guest have control over Fire leaf A?
1176 ! If so, we need to reset and unconfigure the leaf.
1177 !
1178
1179 CONFIG_STRUCT(%g1)
1180 ldx [%g1 + CONFIG_PCIE_BUSSES], %g2
1181 ldx [%g2 + PCIE_DEVICE_GUESTP], %g2 /* bus 0 */
1182 cmp %g2, %g3
1183 bne,pt %xcc, 1f
1184 nop
1185
1186 wrpr %g0, 0, %tl
1187 wrpr %g0, 0, %gl
1188 HVCALL(setup_c_environ)
1189 mov 0, %o0 ! PCI bus A = 0
1190 call pcie_bus_reset
1191 nop
1192
1193 CONFIG_STRUCT(%g1)
1194 setx fire_dev, %g7, %g5
1195 ldx [%g1 + CONFIG_RELOC], %g7
1196 sub %g5, %g7, %g1 ! ptr to fire_dev[0]
1197
1198 mov 0, %g2 ! PCI bus A = 0
1199
1200 ! %g1 - fire cookie
1201 ! %g2 - root complex (0=A, 1=B)
1202 HVCALL(fire_leaf_soft_reset)
1203
12041:
1205 GUEST_STRUCT(%g3)
1206
1207 !
1208 ! Does this guest have control over Fire leaf B?
1209 ! If so, we need to reset and unconfigure the leaf.
1210 !
1211
1212 CONFIG_STRUCT(%g1)
1213 ldx [%g1 + CONFIG_PCIE_BUSSES], %g2
1214 ldx [%g2 + PCIE_DEVICE_GUESTP + PCIE_DEVICE_SIZE], %g2 /* bus 1 */
1215 cmp %g2, %g3
1216 bne,pt %xcc, 1f
1217 nop
1218
1219 wrpr %g0, 0, %tl
1220 wrpr %g0, 0, %gl
1221 HVCALL(setup_c_environ)
1222 mov 1, %o0 ! PCI bus B = 1
1223 call pcie_bus_reset
1224 nop
1225
1226 CONFIG_STRUCT(%g1)
1227 setx fire_dev, %g7, %g5
1228 ldx [%g1 + CONFIG_RELOC], %g7
1229 sub %g5, %g7, %g5 ! ptr to fire_dev[]
1230 add %g5, FIRE_COOKIE_SIZE, %g1 ! &fire_dev[1]
1231
1232 mov 1, %g2 ! PCI bus B = 1
1233
1234 ! %g1 - fire cookie
1235 ! %g2 - root complex (0=A, 1=B)
1236 HVCALL(fire_leaf_soft_reset)
1237
12381:
1239bus_failed:
1240#endif /* CONFIG_FIRE */
1241
1242 VCPU_GUEST_STRUCT(%g6, %g5)
1243
1244 ! Back to original reg assignments
1245 ! %g6 = cpu
1246 ! %g5 = guest
1247
1248 ! For the boot CPU we must set the launch point - which is in
1249 ! the real trap table. Since we have now copied in a new
1250 ! firmware image, we must also reset the rtba to point to
1251 ! this location.
1252 ! There are only two ways a cpu can start from stopped
1253 ! 1. as the boot cpu in which case we force the start address
1254 ! 2. via a cpu_start API call in which case the start address
1255 ! is set there.
1256
1257 ldx [%g5 + GUEST_REAL_BASE], %g2
1258 stx %g2, [%g6 + CPU_RTBA]
1259 inc (TT_POR * TRAPTABLE_ENTRY_SIZE), %g2 ! Power-on-reset vector
1260 stx %g2, [%g6 + CPU_START_PC]
1261
1262 /*
1263 * Set the guest state to normal, and signal this to Zeus
1264 * on the hvctl channel if it is configured.
1265 */
1266 mov GUEST_STATE_NORMAL, %g1
1267 stw %g1, [%g5 + GUEST_STATE]
1268
1269 mov SIS_TRANSITION, %g1
1270 stub %g1, [%g5 + GUEST_SOFT_STATE]
1271
1272 add %g5, GUEST_SOFT_STATE_STR, %g1
1273 mov SOFT_STATE_SIZE, %g2
1274 HVCALL(bzero)
1275
1276 wrpr %g0, 0, %tl
1277 wrpr %g0, 0, %gl
1278 HVCALL(setup_c_environ)
1279 GUEST_STRUCT(%o0)
1280 call guest_state_notify
1281 nop
1282
1283 /*
1284 * Now that the guest is officially up and running,
1285 * initialize the utilization statistics.
1286 */
1287 rd %tick, %g1
1288 sllx %g1, 1, %g1 ! remove npt bit
1289 srax %g1, 1, %g1
1290
1291 GUEST_STRUCT(%g2)
1292 set GUEST_START_STICK, %g3
1293 add %g2, %g3, %g3
1294 stx %g1, [%g3]
1295
1296 set GUEST_UTIL, %g3
1297 add %g2, %g3, %g3
1298 stx %g1, [%g3 + GUTIL_STICK_LAST]
1299 stx %g0, [%g3 + GUTIL_STOPPED_CYCLES]
1300
1301 ba 1f
1302 nop
1303
1304.launch_non_boot_cpu:
1305
1306 wrpr %g0, 0, %tl
1307 wrpr %g0, 0, %gl
1308 HVCALL(setup_c_environ)
13091:
1310 VCPU_STRUCT(%o0)
1311 call reset_vcpu_state
1312 nop
1313
1314 HVCALL(vcpu_state_restore)
1315
1316 !
1317 ! This nastyness should be replaced by vcpu_state_restore
1318 !
1319
1320 ! clear NPT
1321 rdpr %tick, %g3
1322 cmp %g3, 0
1323 bge %xcc, 1f
1324 nop
1325 sllx %g3, 1, %g3
1326 srlx %g3, 1, %g3
1327 wrpr %g3, %tick
13281:
1329
1330#define INITIAL_PSTATE (PSTATE_PRIV | PSTATE_MM_TSO)
1331#define INITIAL_TSTATE ((INITIAL_PSTATE << TSTATE_PSTATE_SHIFT) | \
1332 (MAXPGL << TSTATE_GL_SHIFT))
1333
1334 VCPU_GUEST_STRUCT(%g6, %g5)
1335
1336 setx INITIAL_TSTATE, %g2, %g1
1337 wrpr %g1, %tstate
1338 wrhpr %g0, %htstate
1339
1340 ldub [%g6 + CPU_PARTTAG], %g2
1341 set IDMMU_PARTITION_ID, %g1
1342 stxa %g2, [%g1]ASI_DMMU
1343 mov MMU_PCONTEXT, %g1
1344 stxa %g0, [%g1]ASI_MMU
1345 mov MMU_SCONTEXT, %g1
1346 stxa %g0, [%g1]ASI_MMU
1347
1348 HVCALL(set_dummytsb_ctx0)
1349 HVCALL(set_dummytsb_ctxN)
1350
1351 /*
1352 * A strand must enter the guest with MMUs disabled.
1353 * The guest assumes responsibility for establishing
1354 * any mappings it requires and enabling the MMU.
1355 */
1356 ldxa [%g0]ASI_LSUCR, %g1
1357 set (LSUCR_DM | LSUCR_IM), %g2
1358 btst %g1, %g2
1359 be,pn %xcc, 0f ! already disabled
1360 nop
1361 andn %g1, %g2, %g1 ! mask out enable bits
1362 stxa %g1, [%g0]ASI_LSUCR
13630:
1364 stx %g0, [%g6 + CPU_MMU_AREA_RA]
1365 stx %g0, [%g6 + CPU_MMU_AREA]
1366
1367 wr %g0, 0, SOFTINT
1368 wrpr %g0, PIL_15, %pil
1369 mov CPU_MONDO_QUEUE_HEAD, %g1
1370 stxa %g0, [%g1]ASI_QUEUE
1371 mov CPU_MONDO_QUEUE_TAIL, %g1
1372 stxa %g0, [%g1]ASI_QUEUE
1373 mov DEV_MONDO_QUEUE_HEAD, %g1
1374 stxa %g0, [%g1]ASI_QUEUE
1375 mov DEV_MONDO_QUEUE_TAIL, %g1
1376 stxa %g0, [%g1]ASI_QUEUE
1377
1378 mov ERROR_RESUMABLE_QUEUE_HEAD, %g1
1379 stxa %g0, [%g1]ASI_QUEUE
1380 mov ERROR_RESUMABLE_QUEUE_TAIL, %g1
1381 stxa %g0, [%g1]ASI_QUEUE
1382 mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g1
1383 stxa %g0, [%g1]ASI_QUEUE
1384 mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g1
1385 stxa %g0, [%g1]ASI_QUEUE
1386
1387 ! FIXME: This should be part of the restore_state call
1388 ! initialize fp regs
1389 rdpr %pstate, %g1
1390 or %g1, PSTATE_PEF, %g1
1391 wrpr %g1, %g0, %pstate
1392 wr %g0, FPRS_FEF, %fprs
1393 stx %g0, [%g6 + CPU_SCR0]
1394 ldd [%g6 + CPU_SCR0], %f0
1395 ldd [%g6 + CPU_SCR0], %f2
1396 ldd [%g6 + CPU_SCR0], %f4
1397 ldd [%g6 + CPU_SCR0], %f6
1398 ldd [%g6 + CPU_SCR0], %f8
1399 ldd [%g6 + CPU_SCR0], %f10
1400 ldd [%g6 + CPU_SCR0], %f12
1401 ldd [%g6 + CPU_SCR0], %f14
1402 ldd [%g6 + CPU_SCR0], %f16
1403 ldd [%g6 + CPU_SCR0], %f18
1404 ldd [%g6 + CPU_SCR0], %f20
1405 ldd [%g6 + CPU_SCR0], %f22
1406 ldd [%g6 + CPU_SCR0], %f24
1407 ldd [%g6 + CPU_SCR0], %f26
1408 ldd [%g6 + CPU_SCR0], %f28
1409 ldd [%g6 + CPU_SCR0], %f30
1410
1411 ldd [%g6 + CPU_SCR0], %f32
1412 ldd [%g6 + CPU_SCR0], %f34
1413 ldd [%g6 + CPU_SCR0], %f36
1414 ldd [%g6 + CPU_SCR0], %f38
1415 ldd [%g6 + CPU_SCR0], %f40
1416 ldd [%g6 + CPU_SCR0], %f42
1417 ldd [%g6 + CPU_SCR0], %f44
1418 ldd [%g6 + CPU_SCR0], %f46
1419 ldd [%g6 + CPU_SCR0], %f48
1420 ldd [%g6 + CPU_SCR0], %f50
1421 ldd [%g6 + CPU_SCR0], %f52
1422 ldd [%g6 + CPU_SCR0], %f54
1423 ldd [%g6 + CPU_SCR0], %f56
1424 ldd [%g6 + CPU_SCR0], %f58
1425 ldd [%g6 + CPU_SCR0], %f60
1426 ldd [%g6 + CPU_SCR0], %f62
1427
1428 ldx [%g6 + CPU_SCR0], %fsr
1429 wr %g0, 0, %gsr
1430 wr %g0, 0, %fprs
1431
1432 ! %g6 cpu
1433 VCPU2GUEST_STRUCT(%g6, %g5)
1434 ! %g5 guest
1435
1436 /*
1437 * Initial arguments for the guest
1438 */
1439 mov CPU_STATE_RUNNING, %o0
1440 stx %o0, [%g6 + CPU_STATUS]
1441 membar #Sync
1442
1443 /*
1444 * Start at the correct POR vector entry point
1445 */
1446 set CPU_LAUNCH_WITH_RETRY, %g2
1447 stb %g0, [%g6 + %g2]
1448
1449 set CPU_START_PC, %g2
1450 ldx [%g6 + %g2], %g2
1451 wrpr %g2, %tnpc
1452
1453 ldx [%g6 + CPU_START_ARG], %o0 ! argument
1454 ldx [%g5 + GUEST_REAL_BASE], %i0 ! memory base
1455
1456 ! find size of base memory segment
1457 mov %i0, %g2
1458 srlx %g2, RA2PA_SHIFT, %g2
1459 sllx %g2, RA2PA_SEGMENT_SHIFT, %g2 ! ra2pa_segment
1460 add %g2, GUEST_RA2PA_SEGMENT, %g2
1461 add %g5, %g2, %g4 ! %g4 &guest.ra2pa_segment
1462 ldx [%g4 + RA2PA_SEGMENT_BASE], %g1
1463 ldx [%g4 + RA2PA_SEGMENT_LIMIT], %g2
1464 sub %g2, %g1, %i1 ! memory size = limit - base
1465
1466 membar #Sync
1467
1468 ba fast_start
1469 nop
1470 SET_SIZE(launch_vcpu)
1471
1472/*
1473 * Scrub all of memory except for the HV.
1474 * Only scrub if running on hardware or in other words if HW FPGA is present.
1475 *
1476 * Parallelize the scrubbing activity by breaking the total
1477 * amount into chunks that each CPU can handle, and require them to
1478 * do their bit as part of their initial startup activity.
1479 *
1480 * Inputs:
1481 * %i0 global config pointer
1482 */
1483 ENTRY_NP(scrub_all_memory)
1484 mov %g7, %l7 ! save return address
1485 ldx [%i0 + CONFIG_MEMBASE], %l0
1486 ldx [%i0 + CONFIG_MEMSIZE], %l1
1487 ldx [%i0 + CONFIG_STRAND_STARTSET], %l2
1488 ldx [%i0 + CONFIG_PHYSMEMSIZE], %l3
1489
1490#if defined(CONFIG_FPGA) || defined(T1_FPGA)
1491 ! How many functional strands do we have available?
1492 mov %l2, %o7
1493 mov 1, %o1
1494 mov %g0, %o2
14951:
1496 andcc %o7, %o1, %g0
1497 beq,pt %xcc, 2f
1498 nop
1499 add %o2, 1, %o2
15002:
1501 sllx %o1, 1, %o1
1502 brnz,pt %o1, 1b
1503 nop
1504
1505 ! %o2 = number of available strands
1506 PRINT("Scrubbing the rest of memory\r\n")
1507 PRINT_REGISTER("Number of strands", %o2)
1508
1509 PRINT_REGISTER("membase", %l0)
1510 PRINT_REGISTER("memsize", %l1)
1511 PRINT_REGISTER("physmem", %l3)
1512
1513 mov %l0, %g1 ! membase
1514 mov %l1, %g2 ! memsize
1515 add %g1, %g2, %g1 ! start of rest of memory
1516 mov %l3, %g2 ! total size
1517 sub %g2, %g1, %g3
1518 ! %g1 = start address
1519 ! %g3 = size to scrub
1520
1521 ! Figure a chunk per strand (round up to 64 bytes)
1522 udivx %g3, %o2, %g3
1523 add %g3, 63, %g3
1524 andn %g3, 63, %g3
1525
1526 ! Now allocate a slice per strand (phys cpu)
1527 ! %i0 = config struct
1528 ! %o7 = live strand bit mask
1529 ! %g1 = scrub start address
1530 ! %g2 = max scrub address
1531 ! %g3 = size for each chunk
1532
1533 ldx [%i0 + CONFIG_STRANDS], %o3
1534 mov %g0, %g6
15351:
1536 mov 1, %o1
1537 sllx %o1, %g6, %o1
1538 andcc %o7, %o1, %g0
1539 beq,pt %xcc, 2f
1540 nop
1541 set STRAND_ID, %g5
1542 stub %g6, [ %o3 + %g5 ]
1543 set STRAND_SCRUB_BASEPA, %g5
1544 stx %g1, [ %o3 + %g5 ]
1545 sub %g2, %g1, %g4
1546 cmp %g4, %g3
1547 movg %xcc, %g3, %g4
1548 set STRAND_SCRUB_SIZE, %g5
1549 stx %g4, [ %o3 + %g5 ]
1550 add %g1, %g4, %g1
15512:
1552 set STRAND_SIZE, %g5
1553 add %o3, %g5, %o3
1554 inc %g6
1555 cmp %g6, NSTRANDS
1556 blt,pt %xcc, 1b
1557 nop
1558
1559 ! Master removes itself from the completed set
1560 STRAND_STRUCT(%o3)
1561 ldub [%o3 + STRAND_ID], %g1
1562 mov 1, %g2
1563 sllx %g2, %g1, %g2
1564 andn %o7, %g2, %o7
1565
1566 ! strand bits get cleared as their scrub is completed
1567 stx %o7, [ %i0 + CONFIG_SCRUB_SYNC ]
1568#endif /* if defined(CONFIG_FPGA) || defined(T1_FPGA) */
1569
1570 /*
1571 * Start all the other strands. They will scrub their slice of memory
1572 * and then go into start work.
1573 */
1574
1575#ifdef T1_FPGA
1576
1577 mov %l2, %g2 ! %g2 = strandstartset
1578 mov 0, %g1
1579 setx IOBBASE + INT_VEC_DIS, %g4, %g5
1580
15811: mov 1, %g3
1582 btst %g2, %g3
1583 bz,pn %xcc, 2f
1584 nop
1585
1586/* skip the current cpu */
1587
1588 rd STR_STATUS_REG, %g3
1589 srlx %g3, STR_STATUS_CPU_ID_SHIFT, %g3
1590 and %g3, STR_STATUS_CPU_ID_MASK, %g3 ! %g3 = current cpu
1591 cmp %g1, %g3
1592 beq,pt %xcc, 3f ! skip the current cpu
1593 nop
1594
1595/* Send poweron reset to other core master strands. */
1596/* The lowest numbered cpu in a core is the core master */
1597
1598 mov INT_VEC_DIS_TYPE_RESET, %g4
1599 sllx %g4, INT_VEC_DIS_TYPE_SHIFT, %g4
1600 or %g4, INT_VEC_DIS_VECTOR_RESET, %g4
1601 sllx %g1, INT_VEC_DIS_VCID_SHIFT, %g3 ! target strand
1602 or %g4, %g3, %g3 ! int_vec_dis value
1603 stx %g3, [%g5]
1604
16053:
1606/* skip the slave strands in a core. the core master wakes up the other strands in reset code. */
1607 srlx %g2, 1, %g2
1608 inc %g1
1609 and %g1, 3, %g3
1610 brnz,pt %g3, 3b
1611 nop
1612 cmp %g1, (NSTRANDS-1)
1613 bleu,pt %xcc, 1b
1614 nop
1615 brz,pt %g0, 4f
1616 nop
1617
16182:
1619 srlx %g2, 1, %g2
1620 inc %g1
1621 cmp %g1, (NSTRANDS-1)
1622 bleu,pt %xcc, 1b
1623 nop
1624
16254:
1626
1627
1628
1629/* The current cpu wakes up the slave strands in it's own core */
1630
1631 mov %l2, %g2 ! %g2 = strandstartset
1632
1633 rd STR_STATUS_REG, %g3
1634 srlx %g3, STR_STATUS_CPU_ID_SHIFT, %g3
1635 and %g3, STR_STATUS_CPU_ID_MASK, %g3 ! %g3 = current cpu
1636
1637 add %g3, 1, %g1
1638 srlx %g2, %g1, %g2
1639 setx IOBBASE + INT_VEC_DIS, %g4, %g5
1640
16411:
1642 and %g1, 3, %g3
1643 brz,pt %g3, 3f ! reached next core
1644 nop
1645
1646 mov 1, %g3
1647 btst %g2, %g3
1648 bz,pn %xcc, 2f
1649 nop
1650
1651/* Send poweron reset to other master core slave strands. */
1652
1653 mov INT_VEC_DIS_TYPE_RESET, %g4
1654 sllx %g4, INT_VEC_DIS_TYPE_SHIFT, %g4
1655 or %g4, INT_VEC_DIS_VECTOR_RESET, %g4
1656 sllx %g1, INT_VEC_DIS_VCID_SHIFT, %g3 ! target strand
1657 or %g4, %g3, %g3 ! int_vec_dis value
1658 stx %g3, [%g5]
1659
16602:
1661 srlx %g2, 1, %g2
1662 inc %g1
1663 cmp %g1, (NSTRANDS-1)
1664 bleu,pt %xcc, 1b
1665 nop
16663:
1667
1668#else /* ifdef T1_FPGA */
1669
1670 mov %l2, %g2 ! %g2 = strandstartset
1671
1672 rd STR_STATUS_REG, %g3
1673 srlx %g3, STR_STATUS_CPU_ID_SHIFT, %g3
1674 and %g3, STR_STATUS_CPU_ID_MASK, %g3 ! %g3 = current cpu
1675 mov 1, %g4
1676 sllx %g4, %g3, %g3
1677 andn %g2, %g3, %g2 ! remove current cpu from set
1678 mov NSTRANDS - 1, %g1
1679
1680 setx IOBBASE + INT_VEC_DIS, %g4, %g5
16811: mov 1, %g3
1682 sllx %g3, %g1, %g3
1683 btst %g2, %g3
1684 bz,pn %xcc, 2f
1685 mov INT_VEC_DIS_TYPE_RESUME, %g4
1686 sllx %g4, INT_VEC_DIS_TYPE_SHIFT, %g4
1687 sllx %g1, INT_VEC_DIS_VCID_SHIFT, %g3 ! target strand
1688 or %g4, %g3, %g3 ! int_vec_dis value
1689 stx %g3, [%g5]
1690
16912: deccc %g1
1692 bgeu,pt %xcc, 1b
1693 nop
1694#endif
1695
1696 /*
1697 * Master now does its bit of the memory scrubbing.
1698 */
1699#if defined(CONFIG_FPGA) || defined(T1_FPGA)
1700 clr %g1
1701 mov %l0, %g2 ! %g2 = membase
1702 HVCALL(memscrub) ! scrub below hypervisor
1703
1704 STRAND_STRUCT(%g3)
1705 ldx [%g3 + STRAND_SCRUB_BASEPA], %g1
1706 ldx [%g3 + STRAND_SCRUB_SIZE], %g2
1707 HVCALL(memscrub) ! scrub masters slice above hypervisor
1708
1709 ! Now wait until all the other strands are done
1710#ifdef T1_FPGA
1711 mov 0, %o1
1712 set 0xFFFFFF, %o2
17131:
1714 ldx [ %i0 + CONFIG_SCRUB_SYNC ], %g2
1715 inc %o1
1716 andcc %o1, %o2, %g0
1717 bne,pt %xcc, 3f
1718 nop
1719 PRINT(" ")
1720 PRINTX(%g2)
17213:
1722 brnz,pt %g2, 1b
1723 nop
1724 PRINT(" done\r\n")
1725#else /* ifdef T1_FPGA */
17261:
1727 ldx [ %i0 + CONFIG_SCRUB_SYNC ], %g2
1728 PRINT(" ")
1729 PRINTX(%g2)
1730 brnz,pt %g2, 1b
1731 nop
1732 PRINT(" done\r\n")
1733#endif /* ifdef T1_FPGA */
1734#endif /* if defined(CONFIG_FPGA) || defined(T1_FPGA) */
1735
1736 mov %l7, %g7 ! restore return address
1737 HVRET
1738 SET_SIZE(scrub_all_memory)