Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * Hypervisor Software File: main.s | |
5 | * | |
6 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
7 | * | |
8 | * - Do no alter or remove copyright notices | |
9 | * | |
10 | * - Redistribution and use of this software in source and binary forms, with | |
11 | * or without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistribution of source code must retain the above copyright notice, | |
15 | * this list of conditions and the following disclaimer. | |
16 | * | |
17 | * - Redistribution in binary form must reproduce the above copyright notice, | |
18 | * this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
20 | * | |
21 | * Neither the name of Sun Microsystems, Inc. or the names of contributors | |
22 | * may be used to endorse or promote products derived from this software | |
23 | * without specific prior written permission. | |
24 | * | |
25 | * This software is provided "AS IS," without a warranty of any kind. | |
26 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | |
27 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | |
28 | * PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN | |
29 | * MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR | |
30 | * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR | |
31 | * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN | |
32 | * OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR | |
33 | * FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE | |
34 | * DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, | |
35 | * ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF | |
36 | * SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. | |
37 | * | |
38 | * You acknowledge that this software is not designed, licensed or | |
39 | * intended for use in the design, construction, operation or maintenance of | |
40 | * any nuclear facility. | |
41 | * | |
42 | * ========== Copyright Header End ============================================ | |
43 | */ | |
44 | /* | |
45 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. | |
46 | * Use is subject to license terms. | |
47 | */ | |
48 | ||
49 | .ident "@(#)main.s 1.8 07/07/17 SMI" | |
50 | ||
51 | /* | |
52 | * Niagara2 startup code | |
53 | */ | |
54 | #include <sys/asm_linkage.h> | |
55 | #include <sys/stack.h> /* For C environ : FIXME */ | |
56 | #include <sys/htypes.h> | |
57 | #include <sparcv9/misc.h> | |
58 | #include <sparcv9/asi.h> | |
59 | #include <hprivregs.h> | |
60 | #include <asi.h> | |
61 | #include <traps.h> | |
62 | #include <sun4v/traps.h> | |
63 | #include <dram.h> | |
64 | #include <sun4v/mmu.h> | |
65 | #include <sun4v/asi.h> | |
66 | #include <sun4v/queue.h> | |
67 | #include <devices/pc16550.h> | |
68 | #include <hypervisor.h> | |
69 | #include <cache.h> | |
70 | ||
71 | #include <guest.h> | |
72 | #include <strand.h> | |
73 | #include <offsets.h> | |
74 | #include <md.h> | |
75 | #include <vcpu.h> | |
76 | #include <config.h> | |
77 | #include <cyclic.h> | |
78 | #include <util.h> | |
79 | #include <abort.h> | |
80 | #include <hvctl.h> | |
81 | #include <debug.h> | |
82 | #include <fpga.h> | |
83 | #include <ldc.h> | |
84 | #include <cmp.h> | |
85 | ||
86 | ENTRY_NP(start_master) | |
87 | ||
88 | !! save incoming arguments | |
89 | mov %g1, %i0 ! membase | |
90 | mov %g2, %i1 ! memsize | |
91 | mov %g3, %i2 ! hypervisor description | |
92 | mov %g4, %i3 ! strandstartset | |
93 | mov %g5, %i4 ! total physical memory size | |
94 | ||
95 | ! init scratch pad registers to a known state | |
96 | SET_VCPU_STRUCT(%g0, %g1) | |
97 | SET_STRAND_STRUCT(%g0, %g1) | |
98 | ||
99 | #ifdef CONFIG_HVUART | |
100 | ! init hv console UART XXX we don't know the address yet! | |
101 | setx FPGA_UART_BASE, %g2, %g1 | |
102 | /* | |
103 | * clobbers %g1,%g2,%g3,%g7 | |
104 | */ | |
105 | HVCALL(uart_init) | |
106 | #endif | |
107 | ||
108 | PRINT_NOTRAP("Entering hypervisor\r\nLSUCR 0x") | |
109 | ldxa [%g0]ASI_LSUCR, %g4 | |
110 | PRINTX_NOTRAP(%g4) | |
111 | PRINT_NOTRAP("\r\nmembase 0x") | |
112 | PRINTX_NOTRAP(%i0) | |
113 | PRINT_NOTRAP("\r\nmemsize 0x") | |
114 | PRINTX_NOTRAP(%i1) | |
115 | PRINT_NOTRAP("\r\nMD 0x") | |
116 | PRINTX_NOTRAP(%i2) | |
117 | PRINT_NOTRAP("\r\nCPU StartSet 0x") | |
118 | PRINTX_NOTRAP(%i3) | |
119 | PRINT_NOTRAP("\r\nTotal Phys Mem Size 0x") | |
120 | PRINTX_NOTRAP(%i4) | |
121 | PRINT_NOTRAP("\r\n") | |
122 | mov %i0, %g1 | |
123 | mov %i1, %g2 | |
124 | mov %i2, %g3 | |
125 | ||
126 | /* | |
127 | * Determine if we're running in RAM or ROM | |
128 | */ | |
129 | rd %pc, %g4 | |
130 | srlx %g4, 32, %g4 ! in rom? | |
131 | cmp %g4, 0x80 ! bits <39,32> | |
132 | blu,pt %xcc, .master_nocopy ! no, in ram already | |
133 | nop | |
134 | ||
135 | /* | |
136 | * Running from ROM | |
137 | * | |
138 | * Scrub the memory that we're going to copy ourselves | |
139 | * into. | |
140 | */ | |
141 | PRINT_NOTRAP("\r\nScrubbing initial RAM\r\n") | |
142 | mov %i0, %g1 | |
143 | setx htraptable, %g7, %g2 | |
144 | setx _edata, %g7, %g3 | |
145 | brnz %g3, 0f | |
146 | nop | |
147 | setx _etext, %g7, %g3 | |
148 | 0: | |
149 | ! align to next 64-byte boundary | |
150 | inc (64 - 1), %g3 | |
151 | andn %g3, (64 - 1), %g3 | |
152 | sub %g3, %g2, %g2 | |
153 | HVCALL(memscrub) | |
154 | ||
155 | /* | |
156 | * Currently executing in ROM, copy to RAM | |
157 | */ | |
158 | PRINT_NOTRAP("Copying from ROM to RAM\r\n") | |
159 | RELOC_OFFSET(%g1, %g5) ! %g5 = offset | |
160 | ||
161 | mov %i0, %g2 ! membase | |
162 | setx htraptable, %g7, %g1 | |
163 | sub %g1, %g5, %g1 | |
164 | setx _edata, %g7, %g3 | |
165 | brnz %g3, 0f | |
166 | nop | |
167 | setx _etext, %g7, %g3 | |
168 | 0: | |
169 | sub %g3, %g5, %g3 | |
170 | ||
171 | sub %g3, %g1, %g3 | |
172 | inc 7, %g3 | |
173 | andn %g3, 7, %g3 | |
174 | HVCALL(xcopy) | |
175 | ||
176 | mov %i0, %g1 ! membase | |
177 | mov %i1, %g2 ! memsize | |
178 | mov %i2, %g3 ! hypervisor description | |
179 | mov %i3, %g4 ! strandstartset | |
180 | mov %i4, %g5 ! total physical memory size | |
181 | ||
182 | add %i0, (TT_POR * TRAPTABLE_ENTRY_SIZE), %g6 ! master offset | |
183 | jmp %g6 | |
184 | nop | |
185 | ||
186 | .master_nocopy: | |
187 | wrpr %g0, 1, %tl | |
188 | wrpr %g0, 1, %gl | |
189 | wrhpr %g0, (HPSTATE_ENB | HPSTATE_HPRIV), %hpstate | |
190 | wrpr %g0, NWINDOWS - 2, %cansave | |
191 | wrpr %g0, NWINDOWS - 2, %cleanwin | |
192 | wrpr %g0, 0, %canrestore | |
193 | wrpr %g0, 0, %otherwin | |
194 | wrpr %g0, 0, %cwp | |
195 | wrpr %g0, 0, %wstate | |
196 | ||
197 | ! save parameters for memory scrub which is done later | |
198 | mov %i0, %l0 ! membase | |
199 | mov %i1, %l1 ! memsize | |
200 | mov %i3, %l2 ! strandstartset | |
201 | mov %i4, %l3 ! total physical memory size | |
202 | ||
203 | /* | |
204 | * relocate the error table function pointers before taking over the | |
205 | * trap table | |
206 | */ | |
207 | RELOC_OFFSET(%g1, %g5) ! %g5 = offset | |
208 | HVCALL(relocate_error_tables) | |
209 | ||
210 | /* | |
211 | * Error handling is ready, now take over the trap table | |
212 | */ | |
213 | RELOC_OFFSET(%g1, %g5) ! %g5 = offset | |
214 | setx htraptable, %g3, %g1 | |
215 | sub %g1, %g5, %g1 | |
216 | wrhpr %g1, %htba | |
217 | ||
218 | /* | |
219 | * Enable per-strand error reporting | |
220 | */ | |
221 | HVCALL(enable_errors_strand) | |
222 | ||
223 | /* | |
224 | * enable per-chip errors | |
225 | */ | |
226 | HVCALL(enable_errors_chip) | |
227 | ||
228 | RELOC_OFFSET(%g1, %g5) | |
229 | !! %g5 offset | |
230 | ||
231 | #ifdef DEBUG | |
232 | PRINT_NOTRAP("Running from RAM\r\n") | |
233 | PRINT_NOTRAP("Hypervisor version: ") | |
234 | HVCALL(printversion) | |
235 | #endif | |
236 | PRINT_NOTRAP("Scrubbing remaining hypervisor RAM\r\n") | |
237 | setx _edata, %g7, %g1 | |
238 | brnz %g1, 0f | |
239 | nop | |
240 | setx _etext, %g7, %g1 | |
241 | 0: | |
242 | ! align to next 64-byte boundary | |
243 | add %g1, (64 - 1), %g1 | |
244 | andn %g1, (64 - 1), %g1 | |
245 | sub %g1, %g5, %g1 ! Start address | |
246 | add %i0, %i1, %g2 ! end address + 1 | |
247 | sub %g2, %g1, %g2 ! length = end+1 - start | |
248 | #ifndef CONFIG_FPGA | |
249 | /* Don't erase last 64 bytes, used by dumbreset */ | |
250 | dec 64, %g2 | |
251 | #endif | |
252 | HVCALL(memscrub) | |
253 | ||
254 | RELOC_OFFSET(%g1, %g5) ! %g5 = offset | |
255 | setx config, %g6, %g1 | |
256 | sub %g1, %g5, %g6 ! %g6 - global config | |
257 | ||
258 | ! set global memory base/size | |
259 | stx %i0, [%g6 + CONFIG_MEMBASE] | |
260 | stx %i1, [%g6 + CONFIG_MEMSIZE] | |
261 | stx %i3, [%g6 + CONFIG_STRAND_STARTSET] | |
262 | stx %i4, [%g6 + CONFIG_PHYSMEMSIZE] | |
263 | ||
264 | /* | |
265 | * Find first strand, and use it as the default | |
266 | * target of system interrupts. | |
267 | * | |
268 | * Simply, for now, we just pick the lowest functional strand | |
269 | * as the host for SSI and error interrupts. | |
270 | */ | |
271 | brnz %i3, 1f | |
272 | nop | |
273 | HVABORT(-1, "No live strands defined"); | |
274 | 1: | |
275 | ! Find first bit set ! | |
276 | mov 0, %g1 | |
277 | 2: | |
278 | srlx %i3, %g1, %g2 | |
279 | btst 1, %g2 | |
280 | beq,a,pt %xcc, 2b | |
281 | inc %g1 | |
282 | sllx %g1, 1*INTRTGT_DEVSHIFT, %g2 | |
283 | sllx %g1, 2*INTRTGT_DEVSHIFT, %g1 | |
284 | or %g1, %g2, %g1 | |
285 | stx %g1, [%g6 + CONFIG_INTRTGT] | |
286 | ||
287 | ||
288 | mov %g6, %i0 ! %i0 - global config | |
289 | ||
290 | stx %g5, [%i0 + CONFIG_RELOC] | |
291 | ||
292 | ! Stash away the boot configs HV md. | |
293 | stx %i2, [%i0 + CONFIG_PARSE_HVMD] | |
294 | mov %i2, %i4 | |
295 | ! %i4 - hypervisor description | |
296 | ||
297 | setx guests, %g6, %g1 | |
298 | sub %g1, %g5, %i1 | |
299 | ! %i1 - guests base | |
300 | stx %i1, [%i0 + CONFIG_GUESTS] | |
301 | ||
302 | setx vcpus, %g6, %g1 | |
303 | sub %g1, %g5, %i2 | |
304 | ! %i2 - cpu base | |
305 | stx %g1, [%i0 + CONFIG_VCPUS] | |
306 | ||
307 | setx strands, %g6, %g1 | |
308 | sub %g1, %g5, %i3 | |
309 | ! %i3 - strands base | |
310 | stx %i3, [%i0 + CONFIG_STRANDS] | |
311 | ||
312 | setx hv_ldcs, %g6, %g1 | |
313 | sub %g1, %g5, %g1 | |
314 | stx %g1, [%i0 + CONFIG_HV_LDCS] | |
315 | ||
316 | setx sp_ldcs, %g6, %g1 | |
317 | sub %g1, %g5, %g1 | |
318 | stx %g1, [%i0 + CONFIG_SP_LDCS] | |
319 | ||
320 | ! Perform some basic setup for this strand. | |
321 | PHYS_STRAND_ID(%g3) | |
322 | ||
323 | ! %g3 = strand id | |
324 | ||
325 | SET_VCPU_STRUCT(%g0, %g2) | |
326 | ||
327 | set STRAND_SIZE, %g2 | |
328 | mulx %g3, %g2, %g4 | |
329 | ldx [%i0 + CONFIG_STRANDS], %g1 | |
330 | add %g1, %g4, %g1 | |
331 | SET_STRAND_STRUCT(%g1, %g2) | |
332 | stx %i0, [%g1 + STRAND_CONFIGP] | |
333 | ||
334 | ! initialize the strand mini-stack | |
335 | stx %g0, [%g1 + STRAND_MINI_STACK + MINI_STACK_PTR] | |
336 | ||
337 | PRINT_REGISTER("Strand startset", %l2) | |
338 | PRINT_REGISTER("Total physical mem", %l3) | |
339 | ||
340 | ! Before we can start using C compiled PIC code | |
341 | ! we have to adjust the GLOBAL_OFFSET_TABLE | |
342 | ||
343 | setx _GLOBAL_OFFSET_TABLE_, %g7, %g1 | |
344 | setx _start_data, %g7, %g2 | |
345 | RELOC_OFFSET(%g7, %g3) | |
346 | sub %g1, %g3, %g1 | |
347 | sub %g2, %g3, %g2 | |
348 | 1: | |
349 | ldx [%g1], %g4 ! %g1 _GLOBAL_OFFSET_TABLE_ | |
350 | sub %g4, %g3, %g4 | |
351 | stx %g4, [%g1] | |
352 | add %g1, 8, %g1 | |
353 | cmp %g1, %g2 | |
354 | blt,pt %xcc, 1b | |
355 | nop | |
356 | ||
357 | PRINT("setup_ncu\r\n") | |
358 | HVCALL(setup_ncu) | |
359 | ||
360 | #ifdef CONFIG_VBSC_SVC | |
361 | PRINT("Sending HV start message to vbsc\r\n") | |
362 | HVCALL(vbsc_hv_start) | |
363 | #endif | |
364 | ||
365 | HVCALL(setup_niu) | |
366 | ||
367 | #ifdef CONFIG_PIU | |
368 | HVCALL(setup_piu) | |
369 | #endif | |
370 | ||
371 | ! Scrub all of memory, except for the hypervisor. | |
372 | ! This starts all other strands. | |
373 | STRAND_STRUCT(%g1) | |
374 | STRAND2CONFIG_STRUCT(%g1, %i0) | |
375 | HVCALL(scrub_all_memory) | |
376 | ||
377 | ! Setup and run the initial C environment | |
378 | wrpr %g0, 0, %gl | |
379 | wrpr %g0, 0, %tl | |
380 | HVCALL(setup_c_environ) | |
381 | call c_start | |
382 | nop | |
383 | ||
384 | ! Recover and run the old initialization code | |
385 | ||
386 | STRAND_STRUCT(%g1) | |
387 | STRAND2CONFIG_STRUCT(%g1, %i0) | |
388 | ldx [%i0 + CONFIG_GUESTS], %i1 | |
389 | ldx[%i0 + CONFIG_VCPUS], %i2 | |
390 | ||
391 | /* | |
392 | * Initialize error_lock | |
393 | */ | |
394 | stx %g0, [%i0 + CONFIG_ERRORLOCK] | |
395 | ||
396 | /* | |
397 | * Initialize the error buffer in use flag | |
398 | */ | |
399 | stx %g0, [%i0 + CONFIG_SRAM_ERPT_BUF_INUSE] | |
400 | ||
401 | /* | |
402 | * enable per-chip errors | |
403 | */ | |
404 | HVCALL(enable_errors_chip) | |
405 | ||
406 | /* | |
407 | * Setup everything else | |
408 | */ | |
409 | #ifdef SUPPORT_NIAGARA2_1x | |
410 | HVCALL(init_cpu_yield_table) | |
411 | #endif | |
412 | ||
413 | #ifdef CONFIG_FPGA | |
414 | /* | |
415 | * The FPGA interrupt output is an active-low level interrupt. | |
416 | * The Niagara SSI interrupt input is falling-edge-triggered. | |
417 | * We can lose an interrupt across a warm reset so workaround | |
418 | * that by injecting a fake SSI interrupt at start-up time. | |
419 | */ | |
420 | HVCALL(fake_ssiirq) | |
421 | ||
422 | /* | |
423 | * Setup the FPGA LDC registers | |
424 | */ | |
425 | HVCALL(setup_fpga_ldc) | |
426 | #endif | |
427 | ||
428 | #ifdef CONFIG_SVC | |
429 | /* initialize the service channel */ | |
430 | call c_svc_init | |
431 | nop | |
432 | #endif /* CONFIG_SVC */ | |
433 | ||
434 | /* | |
435 | * Start heartbeat | |
436 | */ | |
437 | HVCALL(heartbeat_enable) | |
438 | ||
439 | #ifdef CONFIG_CLEANSER | |
440 | /* | |
441 | * kick-off the L2 cache cleanser cyclic | |
442 | * starting with way 0 (%g1 = 0) | |
443 | */ | |
444 | mov %g0, %g1 | |
445 | HVCALL(l2_cache_cleanser_setup) | |
446 | #endif /* CONFIG_CLEANSER */ | |
447 | ||
448 | /* | |
449 | * Final cleanup before we can consider the hypervisor truly | |
450 | * running. | |
451 | */ | |
452 | ||
453 | DEBUG_SPINLOCK_ENTER(%g1, %g2, %g3) | |
454 | ||
455 | /* | |
456 | * Ensure all zero'd memory is flushed from the l2$ | |
457 | */ | |
458 | PRINT_NOTRAP("Flush the L2 cache\r\n"); | |
459 | HVCALL(l2_flush_cache) | |
460 | ||
461 | #ifdef RESETCONFIG_ENABLEHWSCRUBBERS | |
462 | PRINT_NOTRAP("Enable L2 and DRAM HW scrubbers\r\n"); | |
463 | HVCALL(enable_hw_scrubbers) | |
464 | #endif | |
465 | ||
466 | DEBUG_SPINLOCK_EXIT(%g1) | |
467 | ||
468 | #if defined(CONFIG_SVC) && defined(CONFIG_VBSC_SVC) | |
469 | PRINT("Sending guest start message to vbsc\r\n") | |
470 | ||
471 | call c_vbsc_guest_start | |
472 | mov 0, %o0 ! ID of guest started | |
473 | #endif /* defined(CONFIG_SVC) && defined(CONFIG_VBSC_SVC) */ | |
474 | ||
475 | ba,a start_work | |
476 | nop | |
477 | SET_SIZE(start_master) | |
478 | ||
479 | ENTRY_NP(start_slave) | |
480 | mov %g1, %i0 ! membase | |
481 | ||
482 | ! init scratch pad registers to a known state | |
483 | SET_VCPU_STRUCT(%g0, %g4) | |
484 | SET_STRAND_STRUCT(%g0, %g4) | |
485 | ||
486 | rd %pc, %g4 | |
487 | srlx %g4, 32, %g4 ! in rom? | |
488 | cmp %g4, 0x80 ! bits <39,32> | |
489 | blu,pt %xcc, 1f ! no, in ram already | |
490 | nop | |
491 | add %i0, (TT_POR * TRAPTABLE_ENTRY_SIZE) + 0x10, %g4 ! slave offset | |
492 | jmp %g4 ! goto ram traptable | |
493 | nop | |
494 | 1: | |
495 | wrhpr %i0, %htba | |
496 | ||
497 | ! Setup slave scratchpad for own identity | |
498 | ||
499 | .reloc2: | |
500 | rd %pc, %g1 | |
501 | setx .reloc2, %g3, %g2 | |
502 | sub %g2, %g1, %g3 ! %g3 = offset | |
503 | setx config, %g4, %g2 | |
504 | sub %g2, %g3, %g2 | |
505 | ! %g2 = &config | |
506 | ||
507 | PHYS_STRAND_ID(%i3) | |
508 | ! %i3 = current cpu id | |
509 | ||
510 | /* | |
511 | * Enable per-strand error reporting | |
512 | */ | |
513 | HVCALL(enable_errors_strand) | |
514 | ||
515 | ! Set up the scratchpad registers | |
516 | ||
517 | ldx [%g2 + CONFIG_STRANDS], %i2 | |
518 | set STRAND_SIZE, %g1 | |
519 | mulx %g1, %i3, %g1 | |
520 | add %i2, %g1, %i2 | |
521 | SET_STRAND_STRUCT(%i2, %g1) | |
522 | ||
523 | SET_VCPU_STRUCT(%g0, %g1) | |
524 | ||
525 | ! initialize the strand mini-stack | |
526 | stx %g0, [%i2 + STRAND_MINI_STACK + MINI_STACK_PTR] | |
527 | ||
528 | ! save &config on mini-stack since it cannot be retrieved | |
529 | ! via CONFIG_STRUCT() until the master has run c_start() | |
530 | STRAND_PUSH(%g2, %g3, %g4) | |
531 | ||
532 | ! Get us a sane tl & gl and out of red state asap | |
533 | wrpr %g0, 0, %gl | |
534 | wrpr %g0, 0, %tl | |
535 | wrhpr %g0, (HPSTATE_ENB | HPSTATE_HPRIV), %hpstate | |
536 | wrpr %g0, NWINDOWS - 2, %cansave | |
537 | wrpr %g0, NWINDOWS - 2, %cleanwin | |
538 | wrpr %g0, 0, %canrestore | |
539 | wrpr %g0, 0, %otherwin | |
540 | wrpr %g0, 0, %cwp | |
541 | wrpr %g0, 0, %wstate | |
542 | ||
543 | STRAND_POP(%g4, %g3) ! restore %g4 = &config | |
544 | ||
545 | /* Slave now does its bit of the memory scrubbing */ | |
546 | #ifdef CONFIG_FPGA | |
547 | STRAND_STRUCT(%g1) | |
548 | set STRAND_SCRUB_SIZE, %g3 | |
549 | ldx [%g1 + %g3], %g2 | |
550 | set STRAND_SCRUB_BASEPA, %g3 | |
551 | ldx [%g1 + %g3], %g1 | |
552 | ||
553 | HVCALL(memscrub) | |
554 | ||
555 | STRAND_STRUCT(%g1) | |
556 | ldub [%g1 + STRAND_ID], %i3 | |
557 | mov 1, %i0 | |
558 | sllx %i0, %i3, %i0 | |
559 | add %g4, CONFIG_SCRUB_SYNC, %g4 | |
560 | 1: | |
561 | ldx [ %g4 ], %g2 | |
562 | andn %g2, %i0, %g3 | |
563 | casx [ %g4 ], %g2, %g3 | |
564 | cmp %g2, %g3 | |
565 | bne,pt %xcc, 1b | |
566 | nop | |
567 | #endif | |
568 | ||
569 | ba,a,pt %xcc, start_work | |
570 | nop | |
571 | SET_SIZE(start_slave) | |
572 | ||
573 | ! | |
574 | ! The main work section for each CPU strand. | |
575 | ! | |
576 | ! We basically look for things to do in the strand | |
577 | ! structures work wheel. If we can find nothing to | |
578 | ! do there, we simple suspend the strand and wait | |
579 | ! for HV mondos which would request this strand to | |
580 | ! add or remove something from its work wheel. | |
581 | ! | |
582 | ||
583 | ENTRY_NP(start_work) | |
584 | ! | |
585 | ! This loop works through the schedule list looking for | |
586 | ! something to do. | |
587 | ! If an entire pass is made without an action, then we | |
588 | ! simply go to sleep waiting for a X-call mondo. | |
589 | ! | |
590 | mov 0, %g4 | |
591 | .work_loop: | |
592 | STRAND_STRUCT(%g1) | |
593 | lduh [%g1 + STRAND_CURRENT_SLOT], %g2 | |
594 | mulx %g2, SCHED_SLOT_SIZE, %g3 | |
595 | add %g1, %g3, %g3 | |
596 | add %g3, STRAND_SLOT, %g3 | |
597 | ||
598 | ldx [%g3 + SCHED_SLOT_ACTION], %g6 | |
599 | cmp %g6, SLOT_ACTION_RUN_VCPU | |
600 | be,a,pt %xcc, launch_vcpu | |
601 | ldx [%g3 + SCHED_SLOT_ARG], %g1 ! get arg in annulled ds | |
602 | cmp %g6, SLOT_ACTION_NOP | |
603 | be,pt %xcc, 1f | |
604 | nop | |
605 | ||
606 | HVABORT(-1, "Illegal slot code") | |
607 | 1: | |
608 | inc %g2 | |
609 | cmp %g2, NUM_SCHED_SLOTS | |
610 | move %xcc, %g0, %g2 | |
611 | sth %g2, [%g1 + STRAND_CURRENT_SLOT] | |
612 | inc %g4 | |
613 | cmp %g4, NUM_SCHED_SLOTS | |
614 | bne,pt %xcc, .work_loop | |
615 | nop | |
616 | ||
617 | ! OK nothing found to do wait for wake up call | |
618 | ||
619 | /* Wait for a HVXCALL PYN */ | |
620 | HVCALL(hvmondo_wait) | |
621 | ||
622 | ba,pt %xcc, handle_hvmondo | |
623 | nop | |
624 | SET_SIZE(start_work) | |
625 | ||
626 | ||
627 | /* | |
628 | * stop_vcpu | |
629 | * | |
630 | * stop a virtual cpu | |
631 | * and all associated state. | |
632 | * resets it so if started again, it will have a clean state | |
633 | * associated interrupts and memory mappings are unconfigured. | |
634 | * | |
635 | * NOTE: we go to some lengths to NOT get the vcpup from the | |
636 | * scratchpad registers so we can call this even when the vcpu | |
637 | * is not currently active. | |
638 | * | |
639 | * Expects: | |
640 | * %g1 : vcpu pointer | |
641 | * Returns: | |
642 | * %g1 : vcpu pointer | |
643 | * Register Usage: | |
644 | * %g1..%g6 | |
645 | * %g7 return address | |
646 | */ | |
647 | ENTRY_NP(stop_vcpu) | |
648 | ||
649 | VCPU2GUEST_STRUCT(%g1, %g2) | |
650 | ||
651 | #ifdef DEBUG | |
652 | brnz %g2, 1f ! paranoia. expect this to be nz | |
653 | nop | |
654 | HVABORT(-1, "vcpu has no assigned guest") | |
655 | 1: | |
656 | #endif | |
657 | ||
658 | ! | |
659 | ! Save the vcpu ptr - we need it again later | |
660 | ! | |
661 | STRAND_PUSH(%g1, %g3, %g4) | |
662 | ||
663 | ! | |
664 | ! Remove the strands permanent mappings | |
665 | ! | |
666 | add %g2, GUEST_PERM_MAPPINGS_LOCK, %g3 | |
667 | SPINLOCK_ENTER(%g3, %g4, %g5) | |
668 | ||
669 | ! Discover the bit for this cpu in the cpuset | |
670 | ldub [%g1 + CPU_VID], %g3 | |
671 | and %g3, MAPPING_XWORD_MASK, %g5 | |
672 | mov 1, %g4 | |
673 | sllx %g4, %g5, %g4 | |
674 | srlx %g3, MAPPING_XWORD_SHIFT, %g5 | |
675 | sllx %g5, MAPPING_XWORD_BYTE_SHIFT_BITS, %g5 ! offset into xword array | |
676 | ||
677 | add %g2, GUEST_PERM_MAPPINGS + GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1), %g2 | |
678 | mov -(GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1)), %g3 | |
679 | 1: | |
680 | add %g3, %g2, %g1 ! Ptr to this perm mapping | |
681 | add %g1, %g5, %g1 ! Xword in a specific cpu set | |
682 | ! Unset bit fields for this cpu | |
683 | ldx [ %g1 + MAPPING_ICPUSET ], %g6 | |
684 | andn %g6, %g4, %g6 | |
685 | stx %g6, [%g1 + MAPPING_ICPUSET] | |
686 | ldx [ %g1 + MAPPING_DCPUSET ], %g6 | |
687 | andn %g6, %g4, %g6 | |
688 | stx %g6, [%g1 + MAPPING_DCPUSET] | |
689 | ||
690 | ! If entry is completely null, invalidate entry | |
691 | mov MAPPING_XWORD_SIZE*(NVCPU_XWORDS-1), %g1 | |
692 | 2: | |
693 | add %g3, %g2, %g6 ! recalculate %g6 - out of registers | |
694 | add %g6, %g1, %g6 | |
695 | ldx [%g6 + MAPPING_ICPUSET], %g6 | |
696 | brnz %g6, 3f | |
697 | add %g3, %g2, %g6 ! recalculate %g6 - out of registers | |
698 | add %g6, %g1, %g6 | |
699 | ldx [%g6 + MAPPING_DCPUSET], %g6 | |
700 | brnz %g6, 3f | |
701 | nop | |
702 | brgz,pt %g1, 2b | |
703 | sub %g1, MAPPING_XWORD_SIZE, %g1 | |
704 | ||
705 | add %g3, %g2, %g6 ! recalculate %g6 out of registers | |
706 | stx %g0, [%g6 + MAPPING_VA] | |
707 | ! clear TTE_V, bit 63 | |
708 | ldx [%g6 + MAPPING_TTE], %g1 | |
709 | sllx %g1, 1, %g1 | |
710 | srlx %g1, 1, %g1 | |
711 | stx %g1, [%g6 + MAPPING_TTE] | |
712 | 3: | |
713 | brlz,pt %g3, 1b | |
714 | add %g3, GUEST_PERM_MAPPINGS_INCR, %g3 | |
715 | ||
716 | membar #Sync ! needed ? | |
717 | ||
718 | ! | |
719 | ! demap all unlocked tlb entries | |
720 | ! | |
721 | set TLB_DEMAP_ALL_TYPE, %g3 | |
722 | stxa %g0, [%g3]ASI_IMMU_DEMAP | |
723 | stxa %g0, [%g3]ASI_DMMU_DEMAP | |
724 | ||
725 | membar #Sync ! needed ? | |
726 | ||
727 | ! Reload guest and cpu struct pointers | |
728 | STRAND_POP(%g1, %g2) | |
729 | VCPU2GUEST_STRUCT(%g1, %g2) | |
730 | ||
731 | add %g2, GUEST_PERM_MAPPINGS_LOCK, %g3 | |
732 | SPINLOCK_EXIT(%g3) | |
733 | ||
734 | ! | |
735 | ! remove this cpu as the target of any ldc interrupts | |
736 | ! | |
737 | set GUEST_LDC_ENDPOINT, %g3 | |
738 | add %g2, %g3, %g3 | |
739 | set (GUEST_LDC_ENDPOINT_INCR * MAX_LDC_CHANNELS), %g5 | |
740 | add %g3, %g5, %g4 | |
741 | ! %g3 = ldc endpoint array base address | |
742 | ! %g4 = current offset into array | |
743 | ||
744 | .next_ldc: | |
745 | sub %g4, GUEST_LDC_ENDPOINT_INCR, %g4 | |
746 | cmp %g4, %g3 | |
747 | bl %xcc, .ldc_disable_loop_done | |
748 | nop | |
749 | ||
750 | ldub [%g4 + LDC_IS_LIVE], %g5 | |
751 | brz %g5, .next_ldc | |
752 | nop | |
753 | ||
754 | ||
755 | ! %g1 = the vcpu to stop | |
756 | ||
757 | ! | |
758 | ! Only clear out the Q CPU so that no interrupts | |
759 | ! will be targeted to this CPU. The LDC channel is | |
760 | ! still live and incoming packets will still be | |
761 | ! queued up. | |
762 | ! | |
763 | ldx [%g4 + LDC_TX_MAPREG + LDC_MAPREG_CPUP], %g5 | |
764 | cmp %g5, %g1 | |
765 | bne %xcc, .check_rx | |
766 | nop | |
767 | stx %g0, [%g4 + LDC_TX_MAPREG + LDC_MAPREG_CPUP] | |
768 | .check_rx: | |
769 | ldx [%g4 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g5 | |
770 | cmp %g5, %g1 | |
771 | bne %xcc, .next_ldc | |
772 | nop | |
773 | stx %g0, [%g4 + LDC_RX_MAPREG + LDC_MAPREG_CPUP] | |
774 | ||
775 | ba .next_ldc | |
776 | nop | |
777 | ||
778 | .ldc_disable_loop_done: | |
779 | ||
780 | ! FIXME: must cancel device interrupts targeted at this cpu | |
781 | ! HOW? | |
782 | ||
783 | ! FIXME; Do we have to do all this or does it happen on | |
784 | ! the way back in on starting the cpu again ? | |
785 | ||
786 | stx %g0, [%g1 + CPU_MMU_AREA_RA] ! erase remaining info | |
787 | stx %g0, [%g1 + CPU_MMU_AREA] | |
788 | stx %g0, [%g1 + CPU_TTRACEBUF_RA] | |
789 | stx %g0, [%g1 + CPU_TTRACEBUF_PA] | |
790 | stx %g0, [%g1 + CPU_TTRACEBUF_SIZE] | |
791 | stx %g0, [%g1 + CPU_NTSBS_CTX0] | |
792 | stx %g0, [%g1 + CPU_NTSBS_CTXN] | |
793 | ||
794 | ! Unconfig all the interrupt and error queues | |
795 | stx %g0, [%g1 + CPU_ERRQNR_BASE] | |
796 | stx %g0, [%g1 + CPU_ERRQNR_BASE_RA] | |
797 | stx %g0, [%g1 + CPU_ERRQNR_SIZE] | |
798 | stx %g0, [%g1 + CPU_ERRQNR_MASK] | |
799 | ||
800 | mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g3 | |
801 | stxa %g0, [%g3]ASI_QUEUE | |
802 | mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g3 | |
803 | stxa %g0, [%g3]ASI_QUEUE | |
804 | ||
805 | stx %g0, [%g1 + CPU_ERRQR_BASE] | |
806 | stx %g0, [%g1 + CPU_ERRQR_BASE_RA] | |
807 | stx %g0, [%g1 + CPU_ERRQR_SIZE] | |
808 | stx %g0, [%g1 + CPU_ERRQR_MASK] | |
809 | ||
810 | mov ERROR_RESUMABLE_QUEUE_HEAD, %g3 | |
811 | stxa %g0, [%g3]ASI_QUEUE | |
812 | mov ERROR_RESUMABLE_QUEUE_TAIL, %g3 | |
813 | stxa %g0, [%g3]ASI_QUEUE | |
814 | ||
815 | stx %g0, [%g1 + CPU_DEVQ_BASE] | |
816 | stx %g0, [%g1 + CPU_DEVQ_BASE_RA] | |
817 | stx %g0, [%g1 + CPU_DEVQ_SIZE] | |
818 | stx %g0, [%g1 + CPU_DEVQ_MASK] | |
819 | ||
820 | mov DEV_MONDO_QUEUE_HEAD, %g3 | |
821 | stxa %g0, [%g3]ASI_QUEUE | |
822 | mov DEV_MONDO_QUEUE_TAIL, %g3 | |
823 | stxa %g0, [%g3]ASI_QUEUE | |
824 | ||
825 | stx %g0, [%g1 + CPU_CPUQ_BASE] | |
826 | stx %g0, [%g1 + CPU_CPUQ_BASE_RA] | |
827 | stx %g0, [%g1 + CPU_CPUQ_SIZE] | |
828 | stx %g0, [%g1 + CPU_CPUQ_MASK] | |
829 | ||
830 | mov CPU_MONDO_QUEUE_HEAD, %g3 | |
831 | stxa %g0, [%g3]ASI_QUEUE | |
832 | mov CPU_MONDO_QUEUE_TAIL, %g3 | |
833 | stxa %g0, [%g3]ASI_QUEUE | |
834 | ||
835 | ||
836 | ! FIXME | |
837 | ! just an off-the-cuff list | |
838 | ! what else of this cpu struct should be cleared/cleaned? | |
839 | ! | |
840 | ! FIXME: All this stuff goes away if we call reset_vcpu_state | |
841 | ! in reconf.c - except that maybe we do this in startvcpu instead ? | |
842 | ||
843 | ! indicate cpu is unconfigured | |
844 | mov CPU_STATE_STOPPED, %g3 | |
845 | ldx [%g1 + CPU_STATUS], %g4 ! do not change status to | |
846 | cmp %g4, CPU_STATE_ERROR ! STATE_STOPPPED if in CPU | |
847 | bne,a,pn %xcc, 1f ! is in error | |
848 | stx %g3, [%g1 + CPU_STATUS] | |
849 | membar #Sync | |
850 | 1: | |
851 | ||
852 | HVRET | |
853 | SET_SIZE(stop_vcpu) | |
854 | ||
855 | ! | |
856 | ! Enter from start_work loop | |
857 | ! Expects no register setups (except hv scratchpads) | |
858 | ! Provides register setups for master_start | |
859 | ! | |
860 | ! Argument in %g1 points to vcpu struct | |
861 | ! | |
862 | ||
863 | ENTRY_NP(launch_vcpu) | |
864 | /* | |
865 | * quick set of sanity checks. | |
866 | */ | |
867 | #ifdef DEBUG | |
868 | /* is it assigned to this strand ? */ | |
869 | STRAND_STRUCT(%g2) | |
870 | ldx [%g1 + CPU_STRAND], %g3 | |
871 | cmp %g2, %g3 | |
872 | be,pt %xcc, 1f | |
873 | nop | |
874 | ||
875 | HVABORT(-1, "Scheduled vcpu not assigned to this strand") | |
876 | 1: | |
877 | ||
878 | /* | |
879 | * is the cpu configured ? | |
880 | * is it stopped or running and not in error ? | |
881 | */ | |
882 | ldx [%g1 + CPU_STATUS], %g3 | |
883 | cmp %g3, CPU_STATE_STOPPED | |
884 | be,pt %xcc, 1f | |
885 | cmp %g3, CPU_STATE_SUSPENDED | |
886 | be,pt %xcc, 1f | |
887 | cmp %g3, CPU_STATE_RUNNING | |
888 | be,pt %xcc, 1f | |
889 | cmp %g3, CPU_STATE_STARTING | |
890 | be,pt %xcc, 1f | |
891 | nop | |
892 | ||
893 | HVABORT(-1, "Scheduled vcpu is in an illegal state or not configured") | |
894 | 1: | |
895 | ||
896 | #endif | |
897 | ||
898 | /* | |
899 | * OK let fly ... | |
900 | */ | |
901 | ||
902 | ! | |
903 | ! The vcpu should be fully configured and ready to | |
904 | ! go even if it has never been run before. | |
905 | ! However, because the vcpu state save and restore is not | |
906 | ! complete, and because we're not (re)scheduling vcpus yet | |
907 | ! then the very first time the vcpu gets kicked off we try and | |
908 | ! initialize some of the basic registers that are not | |
909 | ! (re)stored into place with the state restoration. | |
910 | ! | |
911 | ! We figure this all out from the cpu state. If it was | |
912 | ! stopped, then we need to configure registers to bring it | |
913 | ! alive. If it is RUNNING or SUSPENDED then we just | |
914 | ! restore the registers and launch into it. | |
915 | ! | |
916 | ! An additional wrinkle - if the cpu is stopped, then it | |
917 | ! may be that the guest too is stopped, in which case we | |
918 | ! assume we're the boot cpu and do the appropriate reset setup | |
919 | ! for the guest too. This can result in an aync status update | |
920 | ! message on the HVCTL channel if it is configured. | |
921 | ! | |
922 | ||
923 | SET_VCPU_STRUCT(%g1, %g2) | |
924 | ||
925 | ldx [%g1 + CPU_STATUS], %g3 | |
926 | cmp %g3, CPU_STATE_STOPPED | |
927 | be,pn %xcc, slow_start | |
928 | cmp %g3, CPU_STATE_STARTING | |
929 | be,pn %xcc, slow_start | |
930 | nop | |
931 | ||
932 | ! Fast start ... | |
933 | PRINT("About to restore\r\n") | |
934 | HVCALL(vcpu_state_restore) | |
935 | PRINT_NOTRAP("Completed restore\r\n") | |
936 | fast_start: | |
937 | ||
938 | VCPU_STRUCT(%g1) | |
939 | mov CPU_STATE_RUNNING, %g2 | |
940 | stx %g2, [%g1 + CPU_STATUS] ! it's running now | |
941 | ||
942 | /* | |
943 | * Now that the vcpu is running, set the starting stick | |
944 | * value for the first utilization query. | |
945 | */ | |
946 | rd %tick, %g3 | |
947 | sllx %g3, 1, %g3 ! remove npt bit | |
948 | srax %g3, 1, %g3 | |
949 | stx %g3, [%g1 + CPU_UTIL_STICK_LAST] | |
950 | ||
951 | set CPU_LAUNCH_WITH_RETRY, %g2 | |
952 | ldub [%g1 + %g2], %g1 | |
953 | brnz,pt %g1, 1f | |
954 | nop | |
955 | done | |
956 | 1: | |
957 | retry | |
958 | ||
959 | slow_start: | |
960 | ! | |
961 | ! This section is to formally start a virtual CPU | |
962 | ! from the stopped state. | |
963 | ! | |
964 | ! There are a number of additional things we want to do | |
965 | ! if this is the very first time we're entering a guest. | |
966 | ! | |
967 | ||
968 | VCPU_GUEST_STRUCT(%g1, %g5) | |
969 | ||
970 | #ifdef CONFIG_CRYPTO | |
971 | ||
972 | /* | |
973 | * Start crypto | |
974 | */ | |
975 | mov %g5, %g2 | |
976 | ! | |
977 | ! %g1 = cpu struct | |
978 | ! %g2 = guest struct | |
979 | ! | |
980 | HVCALL(start_crypto) | |
981 | #endif /* CONFIG_CRYPTO */ | |
982 | ||
983 | lduw [%g5 + GUEST_STATE], %g3 | |
984 | cmp %g3, GUEST_STATE_NORMAL | |
985 | be,pt %xcc, .launch_non_boot_cpu | |
986 | nop | |
987 | cmp %g3, GUEST_STATE_RESETTING | |
988 | be,pt %xcc, .launch_boot_cpu | |
989 | nop | |
990 | cmp %g3, GUEST_STATE_SUSPENDED | |
991 | bne,pt %xcc, 1f | |
992 | nop | |
993 | HVABORT(-1, "guest suspend not yet supported") | |
994 | ! when it is supported we need to move the guest | |
995 | ! from suspended back to its prior state ... | |
996 | ! which begs the question of whether we want to have | |
997 | ! the suspended state or a separate flag ? | |
998 | 1: | |
999 | cmp %g3, GUEST_STATE_STOPPED | |
1000 | bne,pt %xcc, 1f | |
1001 | nop | |
1002 | HVABORT(-1, "guest in STOPPED state in launch_vcpu") | |
1003 | 1: | |
1004 | HVABORT(-1, "invalid guest state in launch_vcpu") | |
1005 | .launch_boot_cpu: | |
1006 | ||
1007 | /* | |
1008 | * FIXME: This scrub needs to go away | |
1009 | * | |
1010 | * Only scrub guest memory if reset reason is POR | |
1011 | * | |
1012 | * %g1 - vcpu | |
1013 | * %g5 - guest | |
1014 | */ | |
1015 | set GUEST_RESET_REASON, %g3 | |
1016 | ldx [%g5 + %g3], %g3 | |
1017 | cmp %g3, RESET_REASON_POR | |
1018 | bne,pt %xcc, .master_guest_scrub_done | |
1019 | nop | |
1020 | ||
1021 | mov (NUM_RA2PA_SEGMENTS - 1) * RA2PA_SEGMENT_SIZE, %g3 | |
1022 | set (-1), %g6 | |
1023 | 1: | |
1024 | add %g3, GUEST_RA2PA_SEGMENT, %g4 | |
1025 | add %g4, %g5, %g4 ! &guest.ra2pa_segment | |
1026 | ||
1027 | ! only scrub memory segments (obviously ...) | |
1028 | ldub [%g4 + RA2PA_SEGMENT_FLAGS], %g1 | |
1029 | btst MEM_SEGMENT, %g1 | |
1030 | bz,pn %xcc, 2f | |
1031 | nop | |
1032 | ||
1033 | ldx [%g4 + RA2PA_SEGMENT_BASE], %g1 ! RA of base of | |
1034 | ! memory segment | |
1035 | brlz,pn %g1, 2f | |
1036 | nop | |
1037 | ldx [%g4 + RA2PA_SEGMENT_LIMIT], %g2 ! limit of memory | |
1038 | ! segment | |
1039 | sub %g2, %g1, %g2 ! %g2 | |
1040 | ! (limit - base)->size | |
1041 | brlez,pn %g2, 2f | |
1042 | nop | |
1043 | ||
1044 | ldx [%g4 + RA2PA_SEGMENT_OFFSET], %g7 ! offset of memory | |
1045 | ! segment | |
1046 | add %g1, %g7, %g1 ! RA -> PA | |
1047 | ||
1048 | /* | |
1049 | * It's possible that two (or more) contiguous segments describe | |
1050 | * the same physical area in memory so we keep track of the | |
1051 | * last segment PA scrubbed and skip this segment scrub if it's | |
1052 | * the same. Note that all the segments will have the same size | |
1053 | * (> 16GB) so one scrub fits all. | |
1054 | */ | |
1055 | cmp %g1, %g6 | |
1056 | be,pn %xcc, 2f | |
1057 | mov %g1, %g6 | |
1058 | ||
1059 | HVCALL(memscrub) | |
1060 | 2: | |
1061 | brgz,pt %g3, 1b | |
1062 | sub %g3, RA2PA_SEGMENT_SIZE, %g3 | |
1063 | ||
1064 | .master_guest_scrub_done: | |
1065 | ||
1066 | PRINT("Post memscrub scrub OK\r\n"); | |
1067 | ||
1068 | ||
1069 | /* | |
1070 | * Copy guest's firmware image into the partition | |
1071 | */ | |
1072 | VCPU_GUEST_STRUCT(%g1, %g2) | |
1073 | ||
1074 | set GUEST_ROM_BASE, %g7 | |
1075 | ldx [%g5 + %g7], %g1 | |
1076 | set GUEST_ROM_SIZE, %g7 | |
1077 | ldx [%g5 + %g7], %g3 | |
1078 | ||
1079 | ! find segment for the guest which contains GUEST_REAL_BASE | |
1080 | ldx [%g5 + GUEST_REAL_BASE], %g2 ! guest real base addr | |
1081 | srlx %g2, RA2PA_SHIFT, %g2 | |
1082 | sllx %g2, RA2PA_SEGMENT_SHIFT, %g2 ! ra2pa_segment | |
1083 | add %g2, GUEST_RA2PA_SEGMENT, %g2 | |
1084 | add %g5, %g2, %g4 ! %g4 & | |
1085 | ! guest.ra2pa_segment | |
1086 | ldx [%g4 + RA2PA_SEGMENT_BASE], %g2 ! RA of segment base | |
1087 | ldx [%g4 + RA2PA_SEGMENT_OFFSET], %g4 ! Offset of segment base | |
1088 | add %g2, %g4, %g2 ! PA of segment | |
1089 | ||
1090 | ! %g1 ROM base | |
1091 | ! %g2 GUEST base | |
1092 | ! %g3 ROM size | |
1093 | ||
1094 | PRINT("Copying guest firmware from 0x") | |
1095 | PRINTX(%g1) | |
1096 | PRINT(" to 0x") | |
1097 | PRINTX(%g2) | |
1098 | PRINT(" size 0x") | |
1099 | PRINTX(%g3) | |
1100 | PRINTX("\r\n") | |
1101 | HVCALL(xcopy) | |
1102 | ||
1103 | ||
1104 | #ifdef CONFIG_PIU | |
1105 | ||
1106 | GUEST_STRUCT(%g3) | |
1107 | ||
1108 | ! %g3 guest struct | |
1109 | ||
1110 | PRINT("--- Guest is 0x"); | |
1111 | PRINTX(%g3) | |
1112 | ||
1113 | ! | |
1114 | ! Does this guest have control over PIU ? | |
1115 | ! If so, we need to reset and unconfigure the leaf. | |
1116 | ! | |
1117 | ||
1118 | CONFIG_STRUCT(%g1) | |
1119 | ldx [%g1 + CONFIG_PCIE_BUSSES], %g2 | |
1120 | ldx [%g2 + PCIE_DEVICE_GUESTP], %g2 /* bus 0 */ | |
1121 | PRINT(" pcie guestp= 0x"); | |
1122 | PRINTX(%g2) | |
1123 | PRINT("\r\n") | |
1124 | cmp %g2, %g3 | |
1125 | bne,pt %xcc, 1f | |
1126 | nop | |
1127 | ||
1128 | PRINT("Soft Reset PCI leaf\r\n"); | |
1129 | ||
1130 | wrpr %g0, 0, %tl | |
1131 | wrpr %g0, 0, %gl | |
1132 | HVCALL(setup_c_environ) | |
1133 | ||
1134 | mov 0, %o0 ! PCI bus A = 0 | |
1135 | call pcie_bus_reset | |
1136 | nop | |
1137 | brz %o0, bus_failed ! Bail on Fail | |
1138 | nop | |
1139 | ||
1140 | CONFIG_STRUCT(%g1) | |
1141 | setx piu_dev, %g7, %g5 | |
1142 | ldx [%g1 + CONFIG_RELOC], %g7 | |
1143 | sub %g5, %g7, %g1 ! ptr to piu_dev[0] | |
1144 | ||
1145 | mov 0, %g2 ! PCI bus A = 0 | |
1146 | ||
1147 | ! %g1 - piu cookie | |
1148 | HVCALL(piu_leaf_soft_reset) | |
1149 | 1: | |
1150 | bus_failed: | |
1151 | #endif /* CONFIG_PIU */ | |
1152 | ||
1153 | VCPU_GUEST_STRUCT(%g6, %g5) | |
1154 | ||
1155 | ! Back to original reg assignments | |
1156 | ! %g6 = cpu | |
1157 | ! %g5 = guest | |
1158 | ||
1159 | ! For the boot CPU we must set the launch point - which is in | |
1160 | ! the real trap table. Since we have now copied in a new | |
1161 | ! firmware image, we must also reset the rtba to point to | |
1162 | ! this location. | |
1163 | ! There are only two ways a cpu can start from stopped | |
1164 | ! 1. as the boot cpu in which case we force the start address | |
1165 | ! 2. via a cpu_start API call in which case the start address | |
1166 | ! is set there. | |
1167 | ||
1168 | ldx [%g5 + GUEST_REAL_BASE], %g2 | |
1169 | stx %g2, [%g6 + CPU_RTBA] | |
1170 | inc (TT_POR * TRAPTABLE_ENTRY_SIZE), %g2 ! Power-on-reset vector | |
1171 | stx %g2, [%g6 + CPU_START_PC] | |
1172 | ||
1173 | /* | |
1174 | * Set the guest state to normal, and signal this to Zeus | |
1175 | * on the hvctl channel if it is configured. | |
1176 | */ | |
1177 | mov GUEST_STATE_NORMAL, %g1 | |
1178 | stw %g1, [%g5 + GUEST_STATE] | |
1179 | ||
1180 | mov SIS_TRANSITION, %g1 | |
1181 | stub %g1, [%g5 + GUEST_SOFT_STATE] | |
1182 | ||
1183 | add %g5, GUEST_SOFT_STATE_STR, %g1 | |
1184 | mov SOFT_STATE_SIZE, %g2 | |
1185 | HVCALL(bzero) | |
1186 | ||
1187 | wrpr %g0, 0, %tl | |
1188 | wrpr %g0, 0, %gl | |
1189 | HVCALL(setup_c_environ) | |
1190 | GUEST_STRUCT(%o0) | |
1191 | call guest_state_notify | |
1192 | nop | |
1193 | ||
1194 | /* | |
1195 | * Now that the guest is officially up and running, | |
1196 | * initialize the utilization statistics. | |
1197 | */ | |
1198 | rd %tick, %g1 | |
1199 | sllx %g1, 1, %g1 ! remove npt bit | |
1200 | srax %g1, 1, %g1 | |
1201 | ||
1202 | GUEST_STRUCT(%g2) | |
1203 | set GUEST_START_STICK, %g3 | |
1204 | add %g2, %g3, %g3 | |
1205 | stx %g1, [%g3] | |
1206 | ||
1207 | set GUEST_UTIL, %g3 | |
1208 | add %g2, %g3, %g3 | |
1209 | stx %g1, [%g3 + GUTIL_STICK_LAST] | |
1210 | stx %g0, [%g3 + GUTIL_STOPPED_CYCLES] | |
1211 | ||
1212 | ba 1f | |
1213 | nop | |
1214 | ||
1215 | .launch_non_boot_cpu: | |
1216 | ||
1217 | wrpr %g0, 0, %tl | |
1218 | wrpr %g0, 0, %gl | |
1219 | HVCALL(setup_c_environ) | |
1220 | 1: | |
1221 | VCPU_STRUCT(%o0) | |
1222 | call reset_vcpu_state | |
1223 | nop | |
1224 | ||
1225 | HVCALL(vcpu_state_restore) | |
1226 | ||
1227 | ! | |
1228 | ! This nastyness should be replaced by vcpu_state_restore | |
1229 | ! | |
1230 | ||
1231 | ! clear NPT | |
1232 | rdpr %tick, %g3 | |
1233 | cmp %g3, 0 | |
1234 | bge %xcc, 1f | |
1235 | nop | |
1236 | sllx %g3, 1, %g3 | |
1237 | srlx %g3, 1, %g3 | |
1238 | wrpr %g3, %tick | |
1239 | 1: | |
1240 | ||
1241 | #define INITIAL_PSTATE (PSTATE_PRIV | PSTATE_MM_TSO) | |
1242 | #define INITIAL_TSTATE ((INITIAL_PSTATE << TSTATE_PSTATE_SHIFT) | \ | |
1243 | (MAXPGL << TSTATE_GL_SHIFT)) | |
1244 | ||
1245 | VCPU_GUEST_STRUCT(%g6, %g5) | |
1246 | ||
1247 | setx INITIAL_TSTATE, %g2, %g1 | |
1248 | wrpr %g1, %tstate | |
1249 | wrhpr %g0, %htstate | |
1250 | ||
1251 | ldub [%g6 + CPU_PARTTAG], %g2 | |
1252 | set IDMMU_PARTITION_ID, %g1 | |
1253 | stxa %g2, [%g1]ASI_DMMU | |
1254 | mov MMU_PCONTEXT, %g1 | |
1255 | stxa %g0, [%g1]ASI_MMU | |
1256 | mov MMU_SCONTEXT, %g1 | |
1257 | stxa %g0, [%g1]ASI_MMU | |
1258 | ||
1259 | HVCALL(set_dummytsb_ctx0) | |
1260 | HVCALL(set_dummytsb_ctxN) | |
1261 | HVCALL(mmu_hwtw_init) | |
1262 | ||
1263 | /* | |
1264 | * A strand must enter the guest with MMUs disabled. | |
1265 | * The guest assumes responsibility for establishing | |
1266 | * any mappings it requires and enabling the MMU. | |
1267 | */ | |
1268 | ldxa [%g0]ASI_LSUCR, %g1 | |
1269 | set (LSUCR_DM | LSUCR_IM), %g2 | |
1270 | btst %g1, %g2 | |
1271 | be,pn %xcc, 0f ! already disabled | |
1272 | nop | |
1273 | andn %g1, %g2, %g1 ! mask out enable bits | |
1274 | stxa %g1, [%g0]ASI_LSUCR | |
1275 | 0: | |
1276 | stx %g0, [%g6 + CPU_MMU_AREA_RA] | |
1277 | stx %g0, [%g6 + CPU_MMU_AREA] | |
1278 | ||
1279 | wr %g0, 0, SOFTINT | |
1280 | wrpr %g0, PIL_15, %pil | |
1281 | mov CPU_MONDO_QUEUE_HEAD, %g1 | |
1282 | stxa %g0, [%g1]ASI_QUEUE | |
1283 | mov CPU_MONDO_QUEUE_TAIL, %g1 | |
1284 | stxa %g0, [%g1]ASI_QUEUE | |
1285 | mov DEV_MONDO_QUEUE_HEAD, %g1 | |
1286 | stxa %g0, [%g1]ASI_QUEUE | |
1287 | mov DEV_MONDO_QUEUE_TAIL, %g1 | |
1288 | stxa %g0, [%g1]ASI_QUEUE | |
1289 | ||
1290 | mov ERROR_RESUMABLE_QUEUE_HEAD, %g1 | |
1291 | stxa %g0, [%g1]ASI_QUEUE | |
1292 | mov ERROR_RESUMABLE_QUEUE_TAIL, %g1 | |
1293 | stxa %g0, [%g1]ASI_QUEUE | |
1294 | mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g1 | |
1295 | stxa %g0, [%g1]ASI_QUEUE | |
1296 | mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g1 | |
1297 | stxa %g0, [%g1]ASI_QUEUE | |
1298 | ||
1299 | ! FIXME: This should be part of the restore_state call | |
1300 | ! initialize fp regs | |
1301 | rdpr %pstate, %g1 | |
1302 | or %g1, PSTATE_PEF, %g1 | |
1303 | wrpr %g1, %g0, %pstate | |
1304 | wr %g0, FPRS_FEF, %fprs | |
1305 | stx %g0, [%g6 + CPU_SCR0] | |
1306 | ldd [%g6 + CPU_SCR0], %f0 | |
1307 | ldd [%g6 + CPU_SCR0], %f2 | |
1308 | ldd [%g6 + CPU_SCR0], %f4 | |
1309 | ldd [%g6 + CPU_SCR0], %f6 | |
1310 | ldd [%g6 + CPU_SCR0], %f8 | |
1311 | ldd [%g6 + CPU_SCR0], %f10 | |
1312 | ldd [%g6 + CPU_SCR0], %f12 | |
1313 | ldd [%g6 + CPU_SCR0], %f14 | |
1314 | ldd [%g6 + CPU_SCR0], %f16 | |
1315 | ldd [%g6 + CPU_SCR0], %f18 | |
1316 | ldd [%g6 + CPU_SCR0], %f20 | |
1317 | ldd [%g6 + CPU_SCR0], %f22 | |
1318 | ldd [%g6 + CPU_SCR0], %f24 | |
1319 | ldd [%g6 + CPU_SCR0], %f26 | |
1320 | ldd [%g6 + CPU_SCR0], %f28 | |
1321 | ldd [%g6 + CPU_SCR0], %f30 | |
1322 | ||
1323 | ldd [%g6 + CPU_SCR0], %f32 | |
1324 | ldd [%g6 + CPU_SCR0], %f34 | |
1325 | ldd [%g6 + CPU_SCR0], %f36 | |
1326 | ldd [%g6 + CPU_SCR0], %f38 | |
1327 | ldd [%g6 + CPU_SCR0], %f40 | |
1328 | ldd [%g6 + CPU_SCR0], %f42 | |
1329 | ldd [%g6 + CPU_SCR0], %f44 | |
1330 | ldd [%g6 + CPU_SCR0], %f46 | |
1331 | ldd [%g6 + CPU_SCR0], %f48 | |
1332 | ldd [%g6 + CPU_SCR0], %f50 | |
1333 | ldd [%g6 + CPU_SCR0], %f52 | |
1334 | ldd [%g6 + CPU_SCR0], %f54 | |
1335 | ldd [%g6 + CPU_SCR0], %f56 | |
1336 | ldd [%g6 + CPU_SCR0], %f58 | |
1337 | ldd [%g6 + CPU_SCR0], %f60 | |
1338 | ldd [%g6 + CPU_SCR0], %f62 | |
1339 | ||
1340 | ldx [%g6 + CPU_SCR0], %fsr | |
1341 | wr %g0, 0, %gsr | |
1342 | wr %g0, 0, %fprs | |
1343 | ||
1344 | ! %g6 cpu | |
1345 | VCPU2GUEST_STRUCT(%g6, %g5) | |
1346 | ! %g5 guest | |
1347 | ||
1348 | /* | |
1349 | * Initial arguments for the guest | |
1350 | */ | |
1351 | mov CPU_STATE_RUNNING, %o0 | |
1352 | stx %o0, [%g6 + CPU_STATUS] | |
1353 | membar #Sync | |
1354 | ||
1355 | /* | |
1356 | * Start at the correct POR vector entry point | |
1357 | */ | |
1358 | set CPU_LAUNCH_WITH_RETRY, %g2 | |
1359 | stb %g0, [%g6 + %g2] | |
1360 | ||
1361 | set CPU_START_PC, %g2 | |
1362 | ldx [%g6 + %g2], %g2 | |
1363 | wrpr %g2, %tnpc | |
1364 | ||
1365 | ldx [%g6 + CPU_START_ARG], %o0 ! argument | |
1366 | ldx [%g5 + GUEST_REAL_BASE], %i0 ! memory base | |
1367 | ||
1368 | ! find size of base memory segment | |
1369 | mov %i0, %g2 | |
1370 | srlx %g2, RA2PA_SHIFT, %g2 | |
1371 | sllx %g2, RA2PA_SEGMENT_SHIFT, %g2 ! ra2pa_segment | |
1372 | add %g2, GUEST_RA2PA_SEGMENT, %g2 | |
1373 | add %g5, %g2, %g4 ! %g4 &guest.ra2pa_segment | |
1374 | ldx [%g4 + RA2PA_SEGMENT_BASE], %g1 | |
1375 | ldx [%g4 + RA2PA_SEGMENT_LIMIT], %g2 | |
1376 | sub %g2, %g1, %i1 ! memory size = limit - base | |
1377 | ||
1378 | membar #Sync | |
1379 | ||
1380 | ba fast_start | |
1381 | nop | |
1382 | SET_SIZE(launch_vcpu) | |
1383 | ||
1384 | ||
1385 | #ifdef RESETCONFIG_ENABLEHWSCRUBBERS | |
1386 | /* | |
1387 | * Configuration | |
1388 | */ | |
1389 | #define DEFAULT_L2_SCRUBINTERVAL 0x100 | |
1390 | #define DEFAULT_DRAM_SCRUBFREQ 0xfff | |
1391 | ||
1392 | /* | |
1393 | * Helper macros which check if the scrubbers should be enabled, if so | |
1394 | * they get enabled with the default scrub rates. | |
1395 | */ | |
1396 | #define DRAM_SCRUB_ENABLE(dram_base, bank, reg1, reg2) \ | |
1397 | .pushlocals ;\ | |
1398 | /* skip banks which are disabled. causes hang. */ ;\ | |
1399 | SKIP_DISABLED_DRAM_BANK(bank, reg1, reg2, 1f) ;\ | |
1400 | set DRAM_SCRUB_ENABLE_REG + ((bank) * DRAM_BANK_STEP), reg1 ;\ | |
1401 | mov DEFAULT_DRAM_SCRUBFREQ, reg2 ;\ | |
1402 | stx reg2, [dram_base + reg1] ;\ | |
1403 | set DRAM_SCRUB_ENABLE_REG + ((bank) * DRAM_BANK_STEP), reg1 ;\ | |
1404 | mov DRAM_SCRUB_ENABLE_REG_ENAB, reg2 ;\ | |
1405 | stx reg2, [dram_base + reg1] ;\ | |
1406 | 1: .poplocals | |
1407 | ||
1408 | #define L2_SCRUB_ENABLE(l2cr_base, bank, reg1, reg2) \ | |
1409 | .pushlocals ;\ | |
1410 | SKIP_DISABLED_L2_BANK(bank, reg1, reg2, 1f) ;\ | |
1411 | set bank << L2_BANK_SHIFT, reg1 ;\ | |
1412 | ldx [l2cr_base + reg1], reg2 ;\ | |
1413 | btst L2_SCRUBENABLE, reg2 ;\ | |
1414 | bnz,pt %xcc, 1f ;\ | |
1415 | nop ;\ | |
1416 | set L2_SCRUBINTERVAL_MASK, reg1 ;\ | |
1417 | andn reg2, reg1, reg2 ;\ | |
1418 | set DEFAULT_L2_SCRUBINTERVAL, reg1 ;\ | |
1419 | sllx reg1, L2_SCRUBINTERVAL_SHIFT, reg1 ;\ | |
1420 | or reg1, L2_SCRUBENABLE, reg1 ;\ | |
1421 | or reg2, reg1, reg2 ;\ | |
1422 | set bank << L2_BANK_SHIFT, reg1 ;\ | |
1423 | stx reg2, [l2cr_base + reg1] ;\ | |
1424 | 1: .poplocals | |
1425 | ||
1426 | /* | |
1427 | * Ensure all zero'd memory is flushed from the l2$ | |
1428 | */ | |
1429 | mov %g5, %o0 | |
1430 | mov %g6, %o1 | |
1431 | HVCALL(l2_flush_cache) | |
1432 | PRINT_NOTRAP("Enable Hardware Scrubber\r\n"); | |
1433 | mov %o1, %g6 | |
1434 | mov %o0, %g5 | |
1435 | ||
1436 | ENTRY(enable_hw_scrubbers) | |
1437 | /* | |
1438 | * Enable the l2$ scrubber for each of the enabled l2$ banks | |
1439 | */ | |
1440 | setx L2_CONTROL_REG, %g2, %g1 | |
1441 | L2_SCRUB_ENABLE(%g1, /* bank */ 0, %g2, %g3) | |
1442 | L2_SCRUB_ENABLE(%g1, /* bank */ 1, %g2, %g3) | |
1443 | L2_SCRUB_ENABLE(%g1, /* bank */ 2, %g2, %g3) | |
1444 | L2_SCRUB_ENABLE(%g1, /* bank */ 3, %g2, %g3) | |
1445 | L2_SCRUB_ENABLE(%g1, /* bank */ 4, %g2, %g3) | |
1446 | L2_SCRUB_ENABLE(%g1, /* bank */ 5, %g2, %g3) | |
1447 | L2_SCRUB_ENABLE(%g1, /* bank */ 6, %g2, %g3) | |
1448 | L2_SCRUB_ENABLE(%g1, /* bank */ 7, %g2, %g3) | |
1449 | ||
1450 | /* | |
1451 | * Enable the Niagara memory scrubber for each enabled DRAM | |
1452 | * bank | |
1453 | */ | |
1454 | setx DRAM_BASE, %g2, %g1 | |
1455 | DRAM_SCRUB_ENABLE(%g1, /* bank */ 0, %g2, %g3) | |
1456 | DRAM_SCRUB_ENABLE(%g1, /* bank */ 1, %g2, %g3) | |
1457 | DRAM_SCRUB_ENABLE(%g1, /* bank */ 2, %g2, %g3) | |
1458 | DRAM_SCRUB_ENABLE(%g1, /* bank */ 3, %g2, %g3) | |
1459 | ||
1460 | HVRET | |
1461 | SET_SIZE(enable_hw_scrubbers) | |
1462 | #endif | |
1463 | ||
1464 | ||
1465 | /* | |
1466 | * Scrub all of memory except for the HV. | |
1467 | * Only scrub if running on hardware or in other words if HW FPGA is present. | |
1468 | * | |
1469 | * Parallelize the scrubbing activity by breaking the total | |
1470 | * amount into chunks that each CPU can handle, and require them to | |
1471 | * do their bit as part of their initial startup activity. | |
1472 | * | |
1473 | * Inputs: | |
1474 | * %i0 global config pointer | |
1475 | */ | |
1476 | ||
1477 | ENTRY_NP(scrub_all_memory) | |
1478 | mov %g7, %l7 ! save return address | |
1479 | ||
1480 | ldx [%i0 + CONFIG_MEMBASE], %l0 | |
1481 | ldx [%i0 + CONFIG_MEMSIZE], %l1 | |
1482 | ldx [%i0 + CONFIG_STRAND_STARTSET], %l2 | |
1483 | ldx [%i0 + CONFIG_PHYSMEMSIZE], %l3 | |
1484 | ||
1485 | #ifdef CONFIG_FPGA | |
1486 | ! How many functional strands do we have available? | |
1487 | mov %l2, %o7 | |
1488 | mov 1, %o1 | |
1489 | mov %g0, %o2 | |
1490 | 1: | |
1491 | andcc %o7, %o1, %g0 | |
1492 | beq,pt %xcc, 2f | |
1493 | nop | |
1494 | add %o2, 1, %o2 | |
1495 | 2: | |
1496 | sllx %o1, 1, %o1 | |
1497 | brnz,pt %o1, 1b | |
1498 | nop | |
1499 | ||
1500 | ! %o2 = number of available strands | |
1501 | PRINT("Scrubbing the rest of memory\r\n") | |
1502 | PRINT_REGISTER("Number of strands", %o2) | |
1503 | ||
1504 | PRINT_REGISTER("membase", %l0) | |
1505 | PRINT_REGISTER("memsize", %l1) | |
1506 | PRINT_REGISTER("physmem", %l3) | |
1507 | ||
1508 | mov %l0, %g1 ! membase | |
1509 | mov %l1, %g2 ! memsize | |
1510 | add %g1, %g2, %g1 ! start of rest of memory | |
1511 | mov %l3, %g2 ! total size | |
1512 | sub %g2, %g1, %g3 | |
1513 | ! %g1 = start address | |
1514 | ! %g3 = size to scrub | |
1515 | ||
1516 | ! Figure a chunk per strand (round up to 64 bytes) | |
1517 | udivx %g3, %o2, %g3 | |
1518 | add %g3, 63, %g3 | |
1519 | andn %g3, 63, %g3 | |
1520 | ||
1521 | ! Now allocate a slice per strand (phys cpu) | |
1522 | ! %i0 = config struct | |
1523 | ! %o7 = live strand bit mask | |
1524 | ! %g1 = scrub start address | |
1525 | ! %g2 = max scrub address | |
1526 | ! %g3 = size for each chunk | |
1527 | ||
1528 | ldx [%i0 + CONFIG_STRANDS], %o3 | |
1529 | mov %g0, %g6 | |
1530 | 1: | |
1531 | mov 1, %o1 | |
1532 | sllx %o1, %g6, %o1 | |
1533 | andcc %o7, %o1, %g0 | |
1534 | beq,pt %xcc, 2f | |
1535 | nop | |
1536 | set STRAND_ID, %g5 | |
1537 | stub %g6, [ %o3 + %g5 ] | |
1538 | set STRAND_SCRUB_BASEPA, %g5 | |
1539 | stx %g1, [ %o3 + %g5 ] | |
1540 | sub %g2, %g1, %g4 | |
1541 | cmp %g4, %g3 | |
1542 | movg %xcc, %g3, %g4 | |
1543 | set STRAND_SCRUB_SIZE, %g5 | |
1544 | stx %g4, [ %o3 + %g5 ] | |
1545 | add %g1, %g4, %g1 | |
1546 | 2: | |
1547 | set STRAND_SIZE, %g5 | |
1548 | add %o3, %g5, %o3 | |
1549 | inc %g6 | |
1550 | cmp %g6, NSTRANDS | |
1551 | blt,pt %xcc, 1b | |
1552 | nop | |
1553 | ||
1554 | ! Master removes itself from the completed set | |
1555 | STRAND_STRUCT(%o3) | |
1556 | ldub [%o3 + STRAND_ID], %g1 | |
1557 | mov 1, %g2 | |
1558 | sllx %g2, %g1, %g2 | |
1559 | andn %o7, %g2, %o7 | |
1560 | ||
1561 | ! strand bits get cleared as their scrub is completed | |
1562 | stx %o7, [ %i0 + CONFIG_SCRUB_SYNC ] | |
1563 | #endif /* CONFIG_FPGA */ | |
1564 | ||
1565 | /* | |
1566 | * Start all the other strands. They will scrub their slice of memory | |
1567 | * and then go into start work. | |
1568 | */ | |
1569 | mov %l2, %g2 ! %g2 = strandstartset | |
1570 | ||
1571 | /* | |
1572 | * Start other processors | |
1573 | */ | |
1574 | mov CMP_CORE_ENABLE_STATUS, %g6 | |
1575 | ldxa [%g6]ASI_CMP_CHIP, %g6 ! %g6 = enabled strands | |
1576 | PHYS_STRAND_ID(%g3) ! %g3 = current cpu | |
1577 | ||
1578 | mov 1, %g4 | |
1579 | sllx %g4, %g3, %g3 | |
1580 | andn %g2, %g3, %g2 ! remove curcpu from set | |
1581 | mov NSTRANDS - 1, %g1 | |
1582 | ||
1583 | mov CMP_CORE_RUNNING_W1S, %g5 | |
1584 | 1: mov 1, %g3 | |
1585 | sllx %g3, %g1, %g3 | |
1586 | btst %g6, %g3 ! cpu enabled ? | |
1587 | bz,pn %xcc, 2f | |
1588 | btst %g2, %g3 ! in set ? | |
1589 | bz,pn %xcc, 2f | |
1590 | nop | |
1591 | stxa %g3, [%g5]ASI_CMP_CHIP | |
1592 | ||
1593 | 2: deccc %g1 | |
1594 | bgeu,pt %xcc, 1b | |
1595 | nop | |
1596 | ||
1597 | PHYS_STRAND_ID(%g3) ! %g3 = current cpu | |
1598 | mov 1, %g4 | |
1599 | sllx %g4, %g3, %g3 | |
1600 | andn %g2, %g3, %g2 ! remove current cpu from set | |
1601 | mov NSTRANDS - 1, %g1 | |
1602 | 1: | |
1603 | mov 1, %g3 | |
1604 | sllx %g3, %g1, %g3 | |
1605 | btst %g2, %g3 | |
1606 | bz,pn %xcc, 2f | |
1607 | mov INT_VEC_DIS_TYPE_RESUME, %g4 | |
1608 | sllx %g4, INT_VEC_DIS_TYPE_SHIFT, %g4 | |
1609 | sllx %g1, INT_VEC_DIS_VCID_SHIFT, %g3 ! target strand | |
1610 | or %g4, %g3, %g3 ! int_vec_dis value | |
1611 | stxa %g3, [%g0]ASI_INTR_UDB_W | |
1612 | ||
1613 | 2: deccc %g1 | |
1614 | bgeu,pt %xcc, 1b | |
1615 | nop | |
1616 | ||
1617 | /* | |
1618 | * Master now does its bit of the memory scrubbing. | |
1619 | */ | |
1620 | #ifdef CONFIG_FPGA | |
1621 | clr %g1 | |
1622 | mov %l0, %g2 ! %g2 = membase | |
1623 | HVCALL(memscrub) ! scrub below hypervisor | |
1624 | ||
1625 | STRAND_STRUCT(%g3) | |
1626 | ldx [%g3 + STRAND_SCRUB_BASEPA], %g1 | |
1627 | ldx [%g3 + STRAND_SCRUB_SIZE], %g2 | |
1628 | HVCALL(memscrub) ! scrub masters slice above hypervisor | |
1629 | ||
1630 | ! Now wait until all the other strands are done | |
1631 | 1: | |
1632 | ldx [ %i0 + CONFIG_SCRUB_SYNC ], %g2 | |
1633 | PRINT(" ") | |
1634 | PRINTX(%g2) | |
1635 | brnz,pt %g2, 1b | |
1636 | nop | |
1637 | PRINT(" done\r\n") | |
1638 | #endif | |
1639 | ||
1640 | mov %l7, %g7 ! restore return address | |
1641 | HVRET | |
1642 | SET_SIZE(scrub_all_memory) |