Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * Hypervisor Software File: hcall_core.s | |
5 | * | |
6 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
7 | * | |
8 | * - Do no alter or remove copyright notices | |
9 | * | |
10 | * - Redistribution and use of this software in source and binary forms, with | |
11 | * or without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistribution of source code must retain the above copyright notice, | |
15 | * this list of conditions and the following disclaimer. | |
16 | * | |
17 | * - Redistribution in binary form must reproduce the above copyright notice, | |
18 | * this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
20 | * | |
21 | * Neither the name of Sun Microsystems, Inc. or the names of contributors | |
22 | * may be used to endorse or promote products derived from this software | |
23 | * without specific prior written permission. | |
24 | * | |
25 | * This software is provided "AS IS," without a warranty of any kind. | |
26 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | |
27 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | |
28 | * PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN | |
29 | * MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR | |
30 | * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR | |
31 | * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN | |
32 | * OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR | |
33 | * FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE | |
34 | * DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, | |
35 | * ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF | |
36 | * SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. | |
37 | * | |
38 | * You acknowledge that this software is not designed, licensed or | |
39 | * intended for use in the design, construction, operation or maintenance of | |
40 | * any nuclear facility. | |
41 | * | |
42 | * ========== Copyright Header End ============================================ | |
43 | */ | |
44 | /* | |
45 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. | |
46 | * Use is subject to license terms. | |
47 | */ | |
48 | ||
49 | .ident "@(#)hcall_core.s 1.104 07/08/02 SMI" | |
50 | ||
51 | #include <sys/asm_linkage.h> | |
52 | #include <sys/htypes.h> | |
53 | #include <sun4v/traps.h> | |
54 | #include <sun4v/asi.h> | |
55 | #include <sparcv9/asi.h> | |
56 | #include <asi.h> | |
57 | #include <hprivregs.h> | |
58 | #include <guest.h> | |
59 | #include <offsets.h> | |
60 | #include <util.h> | |
61 | #include <debug.h> | |
62 | #include <traptrace.h> | |
63 | #include <vdev_intr.h> | |
64 | #include <vdev_ops.h> | |
65 | #include <intr.h> | |
66 | #include <cache.h> | |
67 | ||
68 | /* | |
69 | * guest_exit | |
70 | * | |
71 | * Invoked by hcall_mach_exit or a strand in error. In the case | |
72 | * of mach_exit, the strand waits for more work. In the case of | |
73 | * the latter, on return the strand idles itself. | |
74 | */ | |
75 | ENTRY(guest_exit) | |
76 | ||
77 | STRAND_PUSH(%g7, %g2, %g3) ! save return address | |
78 | ||
79 | /* | |
80 | * Loop over all guests and check if there is more | |
81 | * than one guest configured in the system. If not, | |
82 | * call vbsc_guest_exit. | |
83 | */ | |
84 | ROOT_STRUCT(%g1) | |
85 | GUEST_STRUCT(%g5) ! this guest | |
86 | ldx [%g1 + CONFIG_GUESTS], %g1 ! &guest[0] | |
87 | set NGUESTS - 1, %g3 ! guest loop counter | |
88 | set GUEST_SIZE, %g2 | |
89 | 1: | |
90 | cmp %g1, %g5 | |
91 | beq %xcc, 2f ! skip this guest | |
92 | nop | |
93 | lduw [%g1 + GUEST_STATE], %g4 | |
94 | cmp %g4, GUEST_STATE_UNCONFIGURED ! if another guest, and if | |
95 | bne,pt %xcc, 3f ! it is not unconfigured | |
96 | nop ! do not poweroff | |
97 | 2: | |
98 | add %g1, %g2, %g1 ! guest++ | |
99 | brnz,pt %g3, 1b | |
100 | dec %g3 ! nguests-- | |
101 | ||
102 | /* | |
103 | * If this is the last guest and there is a delayed reconfig in | |
104 | * progress, do not poweroff | |
105 | */ | |
106 | ROOT_STRUCT(%g1) | |
107 | ldx [%g1 + CONFIG_DEL_RECONF_GID], %g1 | |
108 | ldx [%g5 + GUEST_GID], %g2 | |
109 | cmp %g1, %g2 | |
110 | beq,pn %xcc, 3f | |
111 | nop | |
112 | ||
113 | PRINT("\tfor the last guest ...\r\n") | |
114 | #ifdef CONFIG_VBSC_SVC | |
115 | ba,pt %xcc, vbsc_guest_exit | |
116 | nop | |
117 | #else | |
118 | LEGION_EXIT(%o0) | |
119 | #endif | |
120 | 3: | |
121 | wrpr %g0, 0, %gl | |
122 | wrpr %g0, 0, %tl | |
123 | HVCALL(setup_c_environ) | |
124 | ||
125 | GUEST_STRUCT(%o0) | |
126 | add %o0, GUEST_STATE_LOCK, %g2 | |
127 | SPINLOCK_ENTER(%g2, %g1, %g3) | |
128 | !! %o0 current guest | |
129 | !! %g2 guest state lock | |
130 | ||
131 | ! check the state of the guest | |
132 | lduw [%o0 + GUEST_STATE], %g3 | |
133 | cmp %g3, GUEST_STATE_RESETTING | |
134 | be,pn %xcc, 6f | |
135 | cmp %g3, GUEST_STATE_EXITING | |
136 | be,pn %xcc, 6f | |
137 | nop | |
138 | ||
139 | ! check if this is the control domain | |
140 | CTRL_DOMAIN(%g1, %g3, %g4) !! %g1 control domain guestp | |
141 | cmp %g1, %o0 | |
142 | beq,pn %xcc, 4f | |
143 | nop | |
144 | ||
145 | ! not the control domain - exit | |
146 | set GUEST_STATE_EXITING, %g4 | |
147 | stuw %g4, [%o0 + GUEST_STATE] | |
148 | SPINLOCK_EXIT(%g2) | |
149 | ||
150 | mov GUEST_EXIT_MACH_EXIT, %o1 | |
151 | call c_guest_exit | |
152 | nop | |
153 | ||
154 | mov 1, %g1 | |
155 | SET_VCPU_STRUCT(%g1, %g2) ! force alignment trap | |
156 | ba,pt %xcc, 5f | |
157 | nop | |
158 | 4: | |
159 | ! control domain - do a sir | |
160 | set GUEST_STATE_RESETTING, %g4 | |
161 | stuw %g4, [%o0 + GUEST_STATE] | |
162 | SPINLOCK_EXIT(%g2) | |
163 | ||
164 | mov GUEST_EXIT_MACH_SIR, %o1 | |
165 | call c_guest_exit | |
166 | nop | |
167 | ||
168 | 5: | |
169 | STRAND_POP(%g7, %g2) ! restore return address | |
170 | HVRET | |
171 | 6: | |
172 | /* | |
173 | * The guest is already in the process of being | |
174 | * stopped or started. Deschedule the current vcpu | |
175 | * and send it off to wait for the xcall that will | |
176 | * tell it what to do next. | |
177 | */ | |
178 | VCPU_STRUCT(%g1) | |
179 | VCPU2STRAND_STRUCT(%g1, %g3) | |
180 | !! %g1 current vcpu | |
181 | !! %g2 guest state lock | |
182 | !! %g3 current strand | |
183 | ||
184 | ldub [%g1 + CPU_STRAND_SLOT], %g4 | |
185 | mulx %g4, SCHED_SLOT_SIZE, %g4 | |
186 | add %g3, STRAND_SLOT, %g5 | |
187 | add %g5, %g4, %g4 | |
188 | set SLOT_ACTION_NOP, %g3 | |
189 | stx %g3, [%g4 + SCHED_SLOT_ACTION] | |
190 | mov 1, %g3 ! force alignment trap | |
191 | stx %g3, [%g4 + SCHED_SLOT_ARG] | |
192 | ||
193 | SPINLOCK_EXIT(%g2) | |
194 | ||
195 | STRAND_POP(%g7, %g2) ! restore return address | |
196 | HVRET | |
197 | ||
198 | SET_SIZE(guest_exit) | |
199 | ||
200 | ||
201 | /* | |
202 | * mach_exit | |
203 | * | |
204 | * arg0 exit code (%o0) | |
205 | * -- | |
206 | * does not return | |
207 | */ | |
208 | ENTRY(hcall_mach_exit) | |
209 | ||
210 | PRINT("hcall_mach_exit called\r\n") | |
211 | ||
212 | HVCALL(guest_exit) | |
213 | ||
214 | ba,a,pt %xcc, start_work | |
215 | nop | |
216 | ||
217 | SET_SIZE(hcall_mach_exit) | |
218 | ||
219 | ||
220 | /* | |
221 | * mach_sir | |
222 | * | |
223 | * In the world of SIR the domain is merely asking for a reset. | |
224 | * This can simply be a plain reboot/reset of the domain, or an | |
225 | * opportunity to trigger a delayed reconfigure. | |
226 | * | |
227 | * -- | |
228 | * does not return | |
229 | */ | |
230 | ENTRY_NP(hcall_mach_sir) | |
231 | PRINT("hcall_mach_sir called\r\n") | |
232 | ||
233 | /* | |
234 | * Solaris/OS reboot triggers an SIR | |
235 | * | |
236 | * We cannot request a power cycle from the SP here because | |
237 | * we will lose the current configuration of the domain(s) | |
238 | * consequently all SIR actions must result in a simple HV | |
239 | * reset of the domain - the SP/vBSC is never involved. | |
240 | * | |
241 | * Note: For LDoms 1.0 we decommit the hot reset of the last | |
242 | * guest and instead request a power cycle of the system. The | |
243 | * presumption is that the last guest is the control domain | |
244 | * as will be recommended by best practices. | |
245 | */ | |
246 | ||
247 | #ifdef LDOMS_1_0_ERRATUM_POWER_CYCLE | |
248 | /* | |
249 | * Loop over all guests and check if there is more | |
250 | * than one guest configured in the system. If not, | |
251 | * call vbsc_guest_sir. | |
252 | */ | |
253 | ROOT_STRUCT(%g1) | |
254 | GUEST_STRUCT(%g5) ! this guest | |
255 | ldx [%g1 + CONFIG_GUESTS], %g1 ! &guest[0] | |
256 | set NGUESTS - 1, %g3 ! guest loop counter | |
257 | set GUEST_SIZE, %g2 | |
258 | 1: | |
259 | cmp %g1, %g5 | |
260 | beq %xcc, 2f ! skip this guest | |
261 | nop | |
262 | lduw [%g1 + GUEST_STATE], %g4 | |
263 | cmp %g4, GUEST_STATE_UNCONFIGURED ! if another guest, and if | |
264 | bne,pt %xcc, 3f ! it is not unconfigured | |
265 | nop ! do not poweroff | |
266 | 2: | |
267 | add %g1, %g2, %g1 ! guest++ | |
268 | brnz,pt %g3, 1b | |
269 | dec %g3 ! nguests-- | |
270 | ||
271 | /* | |
272 | * If this is the last guest and there is a delayed reconfig in | |
273 | * progress, do not poweroff | |
274 | */ | |
275 | ROOT_STRUCT(%g1) | |
276 | ldx [%g1 + CONFIG_DEL_RECONF_GID], %g1 | |
277 | ldx [%g5 + GUEST_GID], %g2 | |
278 | cmp %g1, %g2 | |
279 | beq,pn %xcc, 3f | |
280 | nop | |
281 | ||
282 | PRINT("\tfor the last guest ...\r\n") | |
283 | #ifdef CONFIG_VBSC_SVC | |
284 | ba,pt %xcc, vbsc_guest_sir | |
285 | nop | |
286 | #else | |
287 | LEGION_EXIT(%o0) | |
288 | #endif | |
289 | 3: | |
290 | #endif /* LDOMS_1_0_ERRATUM_POWER_CYCLE */ | |
291 | ||
292 | wrpr %g0, 0, %gl | |
293 | wrpr %g0, 0, %tl | |
294 | HVCALL(setup_c_environ) | |
295 | GUEST_STRUCT(%o0) | |
296 | !! %o0 current guest pointer | |
297 | ||
298 | add %o0, GUEST_STATE_LOCK, %g2 | |
299 | SPINLOCK_ENTER(%g2, %g3, %g4) | |
300 | !! %g2 guest state lock | |
301 | ||
302 | ! check the state of the guest | |
303 | lduw [%o0 + GUEST_STATE], %g3 | |
304 | cmp %g3, GUEST_STATE_RESETTING | |
305 | be,pn %xcc, 4f | |
306 | cmp %g3, GUEST_STATE_EXITING | |
307 | be,pn %xcc, 4f | |
308 | nop | |
309 | ||
310 | mov GUEST_STATE_RESETTING, %g3 | |
311 | stuw %g3, [%o0 + GUEST_STATE] | |
312 | SPINLOCK_EXIT(%g2) | |
313 | ||
314 | mov GUEST_EXIT_MACH_SIR, %o1 | |
315 | call c_guest_exit | |
316 | nop | |
317 | ||
318 | ba,a,pt %xcc, start_work | |
319 | nop | |
320 | 4: | |
321 | /* | |
322 | * The guest is already in the process of being | |
323 | * stopped or started. Deschedule the current vcpu | |
324 | * and send it off to wait for the xcall that will | |
325 | * tell it what to do next. | |
326 | */ | |
327 | VCPU_STRUCT(%g1) | |
328 | VCPU2STRAND_STRUCT(%g1, %g3) | |
329 | !! %g1 current vcpu | |
330 | !! %g2 guest state lock | |
331 | !! %g3 current strand | |
332 | ||
333 | ldub [%g1 + CPU_STRAND_SLOT], %g4 | |
334 | mulx %g4, SCHED_SLOT_SIZE, %g4 | |
335 | add %g3, STRAND_SLOT, %g5 | |
336 | add %g5, %g4, %g4 | |
337 | set SLOT_ACTION_NOP, %g3 | |
338 | stx %g3, [%g4 + SCHED_SLOT_ACTION] | |
339 | mov 1, %g3 ! force alignment trap | |
340 | stx %g3, [%g4 + SCHED_SLOT_ARG] | |
341 | ||
342 | SPINLOCK_EXIT(%g2) | |
343 | ||
344 | ba,a,pt %xcc, start_work | |
345 | nop | |
346 | ||
347 | SET_SIZE(hcall_mach_sir) | |
348 | ||
349 | ||
350 | /* | |
351 | * mach_desc | |
352 | * | |
353 | * arg0 buffer (%o0) | |
354 | * arg1 len (%o1) | |
355 | * -- | |
356 | * ret0 status (%o0) | |
357 | * ret1 actual len (%o1) (for EOK or EINVAL) | |
358 | * | |
359 | * guest uses this sequence to get the machine description: | |
360 | * mach_desc(0, 0) | |
361 | * if %o0 != EINVAL, failed | |
362 | * len = %o1 | |
363 | * buf = allocate(len) | |
364 | * mach_desc(buf, len) | |
365 | * if %o0 != EOK, failed | |
366 | * so the EINVAL case is the first error check | |
367 | */ | |
368 | ENTRY_NP(hcall_mach_desc) | |
369 | VCPU_GUEST_STRUCT(%g1, %g6) | |
370 | set GUEST_MD_SIZE, %g7 | |
371 | ldx [%g6 + %g7], %g3 | |
372 | ! paranoia for xcopy - should already be 16byte multiple | |
373 | add %g3, MACH_DESC_ALIGNMENT - 1, %g3 | |
374 | andn %g3, MACH_DESC_ALIGNMENT - 1, %g3 | |
375 | cmp %g3, %o1 | |
376 | bgu,pn %xcc, herr_inval | |
377 | mov %g3, %o1 ! return PD size for success or EINVAL | |
378 | ||
379 | btst MACH_DESC_ALIGNMENT - 1, %o0 | |
380 | bnz,pn %xcc, herr_badalign | |
381 | nop | |
382 | ||
383 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o0, %g3, herr_noraddr, %g2, %g4) | |
384 | ||
385 | ! %g3 = size of pd | |
386 | ! %g4 = pa of guest buffer | |
387 | /* xcopy(pd, buf[%o0], size[%g3]) */ | |
388 | set GUEST_MD_PA, %g7 | |
389 | ldx [%g6 + %g7], %g1 | |
390 | mov %g4, %g2 | |
391 | HVCALL(xcopy) | |
392 | ||
393 | ! %o1 was set above to the guest's PD size | |
394 | HCALL_RET(EOK) | |
395 | SET_SIZE(hcall_mach_desc) | |
396 | ||
397 | ||
398 | /* | |
399 | * tod_get - Time-of-day get | |
400 | * | |
401 | * no arguments | |
402 | * -- | |
403 | * ret0 status (%o0) | |
404 | * ret1 tod (%o1) | |
405 | */ | |
406 | ENTRY_NP(hcall_tod_get) | |
407 | GUEST_STRUCT(%g1) | |
408 | ROOT_STRUCT(%g2) | |
409 | ! %g1 guestp | |
410 | ! %g2 configp | |
411 | ldx [%g1 + GUEST_TOD_OFFSET], %g3 | |
412 | ldx [%g2 + CONFIG_TOD], %g4 | |
413 | ldx [%g2 + CONFIG_TODFREQUENCY], %g5 | |
414 | ! %g3 guest's tod offset | |
415 | ! %g4 tod | |
416 | ! %g5 tod frequency | |
417 | #ifdef CONFIG_STATICTOD | |
418 | ! If the PD says no TOD then start with 0 | |
419 | brz,pn %g4, hret_ok | |
420 | clr %o1 | |
421 | #else | |
422 | brz,pn %g4, herr_notsupported | |
423 | clr %o1 ! In case error status not checked | |
424 | #endif | |
425 | ||
426 | ldx [%g4], %o1 | |
427 | udivx %o1, %g5, %o1 ! Convert to seconds | |
428 | add %o1, %g3, %o1 ! Add partition's tod offset | |
429 | HCALL_RET(EOK) | |
430 | SET_SIZE(hcall_tod_get) | |
431 | ||
432 | /* | |
433 | * tod_set - Time-of-day set | |
434 | * | |
435 | * arg0 tod (%o0) | |
436 | * -- | |
437 | * ret0 status (%o0) | |
438 | */ | |
439 | ENTRY_NP(hcall_tod_set) | |
440 | ROOT_STRUCT(%g1) ! %g1 = configp | |
441 | ldx [%g1 + CONFIG_TOD], %g2 ! %g2 = address of TOD counter | |
442 | ||
443 | #ifdef CONFIG_STATICTOD | |
444 | /* | |
445 | * If no hardware TOD then tod-get returned 0 the first time | |
446 | * and will continue to do so. | |
447 | */ | |
448 | brz,pn %g2, hret_ok | |
449 | nop | |
450 | #else | |
451 | brz,pn %g2, herr_notsupported | |
452 | nop | |
453 | #endif | |
454 | ||
455 | GUEST_STRUCT(%g6) ! %g6 = guestp | |
456 | ||
457 | ! acquire the guest's asynchronous lock | |
458 | set GUEST_ASYNC_LOCK, %g5 | |
459 | add %g6, %g5, %g7 | |
460 | SPINLOCK_ENTER(%g7, %g3, %g5) | |
461 | ||
462 | ! compare new tod with current | |
463 | ldx [%g1 + CONFIG_TODFREQUENCY], %g5 | |
464 | ldx [%g2], %g4 ! %g4 = system tod | |
465 | udivx %g4, %g5, %g4 ! convert to seconds | |
466 | sub %o0, %g4, %g4 ! %g4 = new delta | |
467 | ldx [%g6 + GUEST_TOD_OFFSET], %g3 ! current delta | |
468 | cmp %g4, %g3 ! check if tod changed | |
469 | beq,pn %xcc, 1f | |
470 | nop | |
471 | ||
472 | ! tod has changed | |
473 | ||
474 | stx %g4, [%g6 + GUEST_TOD_OFFSET] ! store new tod | |
475 | ||
476 | ! check if async notification for tod is busy or not | |
477 | set GUEST_ASYNC_BUSY, %g5 | |
478 | add %g6, %g5, %g3 ! %g3 = base of busy flags array | |
479 | ldub [%g3 + ENUM_HVctl_info_guest_tod], %g1 | |
480 | brnz,pn %g1, 1f | |
481 | ! not busy, set busy flag and send asynchronous notification | |
482 | mov 1, %g1 | |
483 | stub %g1, [%g3 + ENUM_HVctl_info_guest_tod] | |
484 | set GUEST_ASYNC_BUF, %g5 | |
485 | add %g6, %g5, %g3 | |
486 | add %g3, HVCTL_MSG_MSG, %g3 ! %g3 = base of hvctl msg field | |
487 | ! zero out data part of message | |
488 | add %g3, HVCTL_RES_STATUS_DATA, %g1 | |
489 | set HVCTL_RES_STATUS_DATA_SIZE, %g2 | |
490 | HVCALL(bzero) | |
491 | ! fill in message fields | |
492 | set ENUM_HVctl_res_guest, %g5 | |
493 | stuw %g5, [%g3 + HVCTL_RES_STATUS_RES] ! resource type | |
494 | ldx [%g6 + GUEST_GID], %g5 | |
495 | stuw %g5, [%g3 + HVCTL_RES_STATUS_RESID] ! resource id | |
496 | set ENUM_HVctl_info_guest_tod, %g5 | |
497 | stuw %g5, [%g3 + HVCTL_RES_STATUS_INFOID] ! info id | |
498 | ! code field is initialized to zero in init_guest() and never changed | |
499 | ! fill in the info specific data, i.e. the tod | |
500 | #if (HVCTL_RES_STATUS_DATA & 0x7) != 0 | |
501 | #error data field in hvctl_res_status struct needs to be 8 byte aligned | |
502 | #endif | |
503 | stx %g4, [%g3 + HVCTL_RES_STATUS_DATA + 0 /* aschk ignore */] | |
504 | ! send the message | |
505 | CONFIG_STRUCT(%g3) | |
506 | ldx [%g3 + CONFIG_HVCTL_LDC], %g1 | |
507 | set GUEST_ASYNC_BUF, %g5 | |
508 | add %g6, %g5, %g2 | |
509 | add %g3, CONFIG_HVCTL_LDC_LOCK, %g7 | |
510 | SPINLOCK_ENTER(%g7, %g4, %g5) | |
511 | HVCALL(hv_ldc_send_pkt) | |
512 | CONFIG_STRUCT(%g3) | |
513 | add %g3, CONFIG_HVCTL_LDC_LOCK, %g7 | |
514 | SPINLOCK_EXIT(%g7) | |
515 | GUEST_STRUCT(%g6) ! restore %g6 = guestp | |
516 | 1: | |
517 | ! release guest's asynchronous notification lock | |
518 | set GUEST_ASYNC_LOCK, %g5 | |
519 | add %g6, %g5, %g7 | |
520 | SPINLOCK_EXIT(%g7) | |
521 | ||
522 | #ifdef CONFIG_VBSC_SVC | |
523 | /* | |
524 | * Send the new offset to vbsc on control domain only. | |
525 | */ | |
526 | GUEST_STRUCT(%g1) | |
527 | CTRL_DOMAIN(%g2, %g3, %g4) | |
528 | cmp %g1, %g2 ! is this the control domain ? | |
529 | bne,pn %xcc, 1f | |
530 | nop | |
531 | HVCALL(vbsc_guest_tod_offset) | |
532 | 1: | |
533 | #endif | |
534 | PRINT("Warning TOD has been set\r\n") | |
535 | HCALL_RET(EOK) | |
536 | SET_SIZE(hcall_tod_set) | |
537 | ||
538 | ||
539 | /* | |
540 | * mmu_enable | |
541 | * | |
542 | * arg0 enable (%o0) | |
543 | * arg1 return address (%o1) | |
544 | * -- | |
545 | * ret0 status (%o0) | |
546 | */ | |
547 | ENTRY_NP(hcall_mmu_enable) | |
548 | /* | |
549 | * Check requested return address for instruction | |
550 | * alignment | |
551 | */ | |
552 | btst (INSTRUCTION_ALIGNMENT - 1), %o1 | |
553 | bnz,pn %xcc, herr_badalign | |
554 | nop | |
555 | ||
556 | ldxa [%g0]ASI_LSUCR, %g1 | |
557 | set (LSUCR_DM | LSUCR_IM), %g2 | |
558 | ! %g1 = current lsucr value | |
559 | ! %g2 = mmu enable mask | |
560 | ||
561 | brz,pn %o0, 1f ! enable or disable? | |
562 | btst %g1, %g2 ! ccr indicates current status | |
563 | ||
564 | /* | |
565 | * Trying to enable | |
566 | * | |
567 | * The return address will be virtual and we cannot | |
568 | * check its range, the alignment has already been | |
569 | * checked. | |
570 | */ | |
571 | bnz,pn %xcc, herr_inval ! it's already enabled | |
572 | or %g1, %g2, %g1 ! enable MMU | |
573 | ||
574 | ba,pt %xcc, 2f | |
575 | nop | |
576 | ||
577 | 1: | |
578 | /* | |
579 | * Trying to disable | |
580 | * | |
581 | * The return address is a real address so we check | |
582 | * its range, the alignment has already been checked. | |
583 | */ | |
584 | bz,pn %xcc, herr_inval ! it's already disabled | |
585 | andn %g1, %g2, %g1 ! disable MMU | |
586 | ||
587 | /* Check RA range */ | |
588 | GUEST_STRUCT(%g3) | |
589 | RA2PA_RANGE_CONV(%g3, %o1, INSTRUCTION_SIZE, herr_noraddr, %g4, %g5) | |
590 | ||
591 | 2: | |
592 | wrpr %o1, %tnpc | |
593 | stxa %g1, [%g0]ASI_LSUCR | |
594 | HCALL_RET(EOK) | |
595 | SET_SIZE(hcall_mmu_enable) | |
596 | ||
597 | ||
598 | /* | |
599 | * mmu_fault_area_conf | |
600 | * | |
601 | * arg0 raddr (%o0) | |
602 | * -- | |
603 | * ret0 status (%o0) | |
604 | * ret1 oldraddr (%o1) | |
605 | */ | |
606 | ENTRY_NP(hcall_mmu_fault_area_conf) | |
607 | btst (MMU_FAULT_AREA_ALIGNMENT - 1), %o0 ! check alignment | |
608 | bnz,pn %xcc, herr_badalign | |
609 | VCPU_GUEST_STRUCT(%g1, %g4) | |
610 | brz,a,pn %o0, 1f | |
611 | mov 0, %g2 | |
612 | ||
613 | RA2PA_RANGE_CONV(%g4, %o0, MMU_FAULT_AREA_SIZE, herr_noraddr, %g3, %g2) | |
614 | 1: | |
615 | ldx [%g1 + CPU_MMU_AREA_RA], %o1 | |
616 | stx %o0, [%g1 + CPU_MMU_AREA_RA] | |
617 | stx %g2, [%g1 + CPU_MMU_AREA] | |
618 | ||
619 | HCALL_RET(EOK) | |
620 | SET_SIZE(hcall_mmu_fault_area_conf) | |
621 | ||
622 | /* | |
623 | * mmu_fault_area_info | |
624 | * | |
625 | * -- | |
626 | * ret0 status (%o0) | |
627 | * ret1 fault area raddr (%o1) | |
628 | */ | |
629 | ENTRY_NP(hcall_mmu_fault_area_info) | |
630 | VCPU_STRUCT(%g1) | |
631 | ldx [%g1 + CPU_MMU_AREA_RA], %o1 | |
632 | HCALL_RET(EOK) | |
633 | SET_SIZE(hcall_mmu_fault_area_info) | |
634 | ||
635 | ||
636 | /* | |
637 | * cpu_qconf | |
638 | * | |
639 | * arg0 queue (%o0) | |
640 | * arg1 base raddr (%o1) | |
641 | * arg2 size (#entries, not #bytes) (%o2) | |
642 | * -- | |
643 | * ret0 status (%o0) | |
644 | */ | |
645 | ENTRY_NP(hcall_cpu_qconf) | |
646 | sllx %o2, Q_EL_SIZE_SHIFT, %g4 ! convert #entries to bytes | |
647 | VCPU_STRUCT(%g1) | |
648 | ||
649 | ! size of 0 unconfigures queue | |
650 | brnz,pt %o2, 1f | |
651 | nop | |
652 | ||
653 | /* | |
654 | * Set the stored configuration to relatively safe values | |
655 | * when un-initializing the queue | |
656 | */ | |
657 | mov %g0, %g2 | |
658 | mov %g0, %o1 | |
659 | ba,pt %xcc, 2f | |
660 | mov %g0, %g4 | |
661 | ||
662 | 1: | |
663 | cmp %o2, MIN_QUEUE_ENTRIES | |
664 | blu,pn %xcc, herr_inval | |
665 | .empty | |
666 | ||
667 | setx MAX_QUEUE_ENTRIES, %g3, %g2 | |
668 | cmp %o2, %g2 | |
669 | bgu,pn %xcc, herr_inval | |
670 | .empty | |
671 | ||
672 | ! check that size is a power of two | |
673 | sub %o2, 1, %g2 | |
674 | andcc %o2, %g2, %g0 | |
675 | bnz,pn %xcc, herr_inval | |
676 | .empty | |
677 | ||
678 | ! Check base raddr alignment | |
679 | sub %g4, 1, %g2 ! size in bytes to mask | |
680 | btst %o1, %g2 | |
681 | bnz,pn %xcc, herr_badalign | |
682 | .empty | |
683 | ||
684 | VCPU2GUEST_STRUCT(%g1, %g6) | |
685 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g4, herr_noraddr, %g3, %g2) | |
686 | ||
687 | ! %g2 - queue paddr | |
688 | ! %g4 - queue size (#bytes) | |
689 | dec %g4 | |
690 | ! %g4 - queue mask | |
691 | ||
692 | 2: | |
693 | cmp %o0, CPU_MONDO_QUEUE | |
694 | be,pn %xcc, qconf_cpuq | |
695 | cmp %o0, DEV_MONDO_QUEUE | |
696 | be,pn %xcc, qconf_devq | |
697 | cmp %o0, ERROR_RESUMABLE_QUEUE | |
698 | be,pn %xcc, qconf_errrq | |
699 | cmp %o0, ERROR_NONRESUMABLE_QUEUE | |
700 | bne,pn %xcc, herr_inval | |
701 | nop | |
702 | ||
703 | qconf_errnrq: | |
704 | stx %g2, [%g1 + CPU_ERRQNR_BASE] | |
705 | stx %o1, [%g1 + CPU_ERRQNR_BASE_RA] | |
706 | stx %o2, [%g1 + CPU_ERRQNR_SIZE] | |
707 | stx %g4, [%g1 + CPU_ERRQNR_MASK] | |
708 | mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g3 | |
709 | stxa %g0, [%g3]ASI_QUEUE | |
710 | mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g3 | |
711 | ba,pt %xcc, 4f | |
712 | stxa %g0, [%g3]ASI_QUEUE | |
713 | ||
714 | qconf_errrq: | |
715 | stx %g2, [%g1 + CPU_ERRQR_BASE] | |
716 | stx %o1, [%g1 + CPU_ERRQR_BASE_RA] | |
717 | stx %o2, [%g1 + CPU_ERRQR_SIZE] | |
718 | stx %g4, [%g1 + CPU_ERRQR_MASK] | |
719 | mov ERROR_RESUMABLE_QUEUE_HEAD, %g3 | |
720 | stxa %g0, [%g3]ASI_QUEUE | |
721 | mov ERROR_RESUMABLE_QUEUE_TAIL, %g3 | |
722 | ba,pt %xcc, 4f | |
723 | stxa %g0, [%g3]ASI_QUEUE | |
724 | ||
725 | qconf_devq: | |
726 | stx %g2, [%g1 + CPU_DEVQ_BASE] | |
727 | stx %o1, [%g1 + CPU_DEVQ_BASE_RA] | |
728 | stx %o2, [%g1 + CPU_DEVQ_SIZE] | |
729 | stx %g4, [%g1 + CPU_DEVQ_MASK] | |
730 | stx %g0, [%g1 + CPU_DEVQ_SHDW_TAIL] | |
731 | mov DEV_MONDO_QUEUE_HEAD, %g3 | |
732 | stxa %g0, [%g3]ASI_QUEUE | |
733 | mov DEV_MONDO_QUEUE_TAIL, %g3 | |
734 | ba,pt %xcc, 4f | |
735 | stxa %g0, [%g3]ASI_QUEUE | |
736 | ||
737 | qconf_cpuq: | |
738 | stx %g2, [%g1 + CPU_CPUQ_BASE] | |
739 | stx %o1, [%g1 + CPU_CPUQ_BASE_RA] | |
740 | stx %o2, [%g1 + CPU_CPUQ_SIZE] | |
741 | stx %g4, [%g1 + CPU_CPUQ_MASK] | |
742 | mov CPU_MONDO_QUEUE_HEAD, %g3 | |
743 | stxa %g0, [%g3]ASI_QUEUE | |
744 | mov CPU_MONDO_QUEUE_TAIL, %g3 | |
745 | stxa %g0, [%g3]ASI_QUEUE | |
746 | ||
747 | 4: | |
748 | HCALL_RET(EOK) | |
749 | SET_SIZE(hcall_cpu_qconf) | |
750 | ||
751 | ||
752 | /* | |
753 | * cpu_qinfo | |
754 | * | |
755 | * arg0 queue (%o0) | |
756 | * -- | |
757 | * ret0 status (%o0) | |
758 | * ret1 base raddr (%o1) | |
759 | * ret2 size (#entries) (%o2) | |
760 | */ | |
761 | ENTRY_NP(hcall_cpu_qinfo) | |
762 | VCPU_STRUCT(%g1) | |
763 | ||
764 | cmp %o0, CPU_MONDO_QUEUE | |
765 | be,pn %xcc, qinfo_cpuq | |
766 | cmp %o0, DEV_MONDO_QUEUE | |
767 | be,pn %xcc, qinfo_devq | |
768 | cmp %o0, ERROR_RESUMABLE_QUEUE | |
769 | be,pn %xcc, qinfo_errrq | |
770 | cmp %o0, ERROR_NONRESUMABLE_QUEUE | |
771 | bne,pn %xcc, herr_inval | |
772 | nop | |
773 | qinfo_errnrq: | |
774 | ldx [%g1 + CPU_ERRQNR_BASE_RA], %o1 | |
775 | ba,pt %xcc, 1f | |
776 | ldx [%g1 + CPU_ERRQNR_SIZE], %o2 | |
777 | ||
778 | qinfo_errrq: | |
779 | ldx [%g1 + CPU_ERRQR_BASE_RA], %o1 | |
780 | ba,pt %xcc, 1f | |
781 | ldx [%g1 + CPU_ERRQR_SIZE], %o2 | |
782 | ||
783 | qinfo_devq: | |
784 | ldx [%g1 + CPU_DEVQ_BASE_RA], %o1 | |
785 | ba,pt %xcc, 1f | |
786 | ldx [%g1 + CPU_DEVQ_SIZE], %o2 | |
787 | ||
788 | qinfo_cpuq: | |
789 | ldx [%g1 + CPU_CPUQ_BASE_RA], %o1 | |
790 | ldx [%g1 + CPU_CPUQ_SIZE], %o2 | |
791 | ||
792 | 1: | |
793 | HCALL_RET(EOK) | |
794 | SET_SIZE(hcall_cpu_qinfo) | |
795 | ||
796 | ||
797 | /* | |
798 | * cpu_start | |
799 | * | |
800 | * arg0 cpu (%o0) | |
801 | * arg1 pc (%o1) | |
802 | * arg2 rtba (%o2) | |
803 | * arg3 arg (%o3) | |
804 | * -- | |
805 | * ret0 status (%o0) | |
806 | */ | |
807 | ENTRY_NP(hcall_cpu_start) | |
808 | VCPU_GUEST_STRUCT(%g6, %g7) | |
809 | ! %g6 = CPU | |
810 | ! %g7 = guest | |
811 | ||
812 | cmp %o0, NVCPUS | |
813 | bgeu,pn %xcc, herr_nocpu | |
814 | nop | |
815 | ||
816 | ! Check pc (real) and tba (real) for validity | |
817 | RA2PA_RANGE_CONV(%g7, %o1, INSTRUCTION_SIZE, herr_noraddr, %g1, %g2) | |
818 | RA2PA_RANGE_CONV(%g7, %o2, REAL_TRAPTABLE_SIZE, herr_noraddr, %g1, %g2) | |
819 | btst (INSTRUCTION_ALIGNMENT - 1), %o1 ! Check pc alignment | |
820 | bnz,pn %xcc, herr_badalign | |
821 | set REAL_TRAPTABLE_SIZE - 1, %g1 | |
822 | btst %o2, %g1 | |
823 | bnz,pn %xcc, herr_badalign | |
824 | nop | |
825 | ||
826 | ! Validate requested cpu | |
827 | sllx %o0, 3, %g1 | |
828 | add %g7, %g1, %g1 | |
829 | add %g1, GUEST_VCPUS, %g1 | |
830 | ldx [%g1], %g1 | |
831 | brz,pn %g1, herr_nocpu | |
832 | nop | |
833 | ||
834 | add %g7, GUEST_STATE_LOCK, %g2 | |
835 | SPINLOCK_ENTER(%g2, %g3, %g4) | |
836 | !! %g2 guest state lock | |
837 | ||
838 | lduw [%g7 + GUEST_STATE], %g3 | |
839 | cmp %g3, GUEST_STATE_NORMAL | |
840 | bne,pn %xcc, .start_wouldblock | |
841 | nop | |
842 | ||
843 | !! %g1 requested CPU struct | |
844 | ||
845 | ldx [%g1 + CPU_STATUS], %g3 | |
846 | cmp %g3, CPU_STATE_STOPPED | |
847 | bne,pn %xcc, .start_inval | |
848 | nop | |
849 | ||
850 | set CPU_STATE_STARTING, %g3 | |
851 | stx %g3, [%g1 + CPU_STATUS] | |
852 | SPINLOCK_EXIT(%g2) | |
853 | ||
854 | /* | |
855 | * OK we setup the target vcpu before it gets | |
856 | * launched, so we put the arguments into the | |
857 | * appropriate locations. | |
858 | * %g1 - our target cpu | |
859 | */ | |
860 | ||
861 | stx %o1, [%g1 + CPU_START_PC] | |
862 | stx %o2, [%g1 + CPU_RTBA] | |
863 | stx %o3, [%g1 + CPU_START_ARG] /*FIXME: direct to reg ? */ | |
864 | ||
865 | /* force a launch by done - this should be an assert */ | |
866 | ||
867 | set CPU_LAUNCH_WITH_RETRY, %g2 | |
868 | stub %g0, [%g1 + %g2] ! false | |
869 | ||
870 | /* | |
871 | * The setup arguments for the virtual cpu | |
872 | * should have been placed in its vcpu struct | |
873 | * so we only need to identify which vcpu to schedule | |
874 | * the strand we're sending the mondo to. | |
875 | */ | |
876 | ||
877 | STRAND_STRUCT(%g4) | |
878 | add %g4, STRAND_HV_TXMONDO, %g2 | |
879 | ||
880 | mov HXCMD_SCHED_VCPU, %g3 ! mondop->cmd = SCHED_VCPU | |
881 | stx %g3, [%g2 + HVM_CMD] | |
882 | stx %g4, [%g2 + HVM_FROM_STRANDP] ! mondop->from_strandp = me | |
883 | add %g2, HVM_ARGS, %g3 | |
884 | stx %g1, [%g3 + HVM_SCHED_VCPUP] ! mondop->pkt.sched.vcpup = vp | |
885 | ||
886 | ldx [%g1 + CPU_STRAND], %g1 ! shipit ! | |
887 | HVCALL(hvmondo_send) | |
888 | ||
889 | HCALL_RET(EOK) | |
890 | ||
891 | .start_wouldblock: | |
892 | !! %g2 guest state lock | |
893 | SPINLOCK_EXIT(%g2) | |
894 | ba,pt %xcc, herr_wouldblock | |
895 | nop | |
896 | ||
897 | .start_inval: | |
898 | !! %g2 guest state lock | |
899 | SPINLOCK_EXIT(%g2) | |
900 | ba,pt %xcc, herr_inval | |
901 | nop | |
902 | ||
903 | SET_SIZE(hcall_cpu_start) | |
904 | ||
905 | ||
906 | /* | |
907 | * cpu_stop | |
908 | * | |
909 | * arg0 cpu (%o0) | |
910 | * -- | |
911 | * ret0 status (%o0) | |
912 | */ | |
913 | ENTRY_NP(hcall_cpu_stop) | |
914 | VCPU_GUEST_STRUCT(%g6, %g7) | |
915 | ! %g6 = vcpup | |
916 | ! %g7 = guestp | |
917 | ||
918 | cmp %o0, NVCPUS | |
919 | bgeu,pn %xcc, herr_nocpu | |
920 | nop | |
921 | ||
922 | /* | |
923 | * This HV only runs 1 vcpu per strand, so the | |
924 | * guest vcpu check is sufficient to ensure we're | |
925 | * not stopping ourselves | |
926 | */ | |
927 | ||
928 | ldub [%g6 + CPU_VID], %g1 | |
929 | cmp %o0, %g1 | |
930 | be,pn %xcc, herr_inval | |
931 | nop | |
932 | ||
933 | ! Check current state of requested cpu | |
934 | sllx %o0, 3, %g1 | |
935 | mov GUEST_VCPUS, %g2 | |
936 | add %g1, %g2, %g1 ! %g1 = vcpus[n] offset | |
937 | ldx [%g7 + %g1], %g1 ! %g1 = guest.vcpus[n] | |
938 | brz,pn %g1, herr_nocpu | |
939 | nop | |
940 | !! %g1 targeted vcpu cpu struct | |
941 | !! %g6 vcpup | |
942 | !! %g7 guestp | |
943 | ||
944 | /* | |
945 | * Prevent stopping a vcpu while the guest | |
946 | * is being stopped. | |
947 | */ | |
948 | add %g7, GUEST_STATE_LOCK, %g4 | |
949 | SPINLOCK_ENTER(%g4, %g5, %g3) | |
950 | !! %g4 guest state lock | |
951 | ||
952 | lduw [%g7 + GUEST_STATE], %g3 | |
953 | cmp %g3, GUEST_STATE_EXITING | |
954 | be,pn %xcc, .stop_wouldblock | |
955 | nop | |
956 | ||
957 | /* | |
958 | * Check if the current vcpu is stopping. | |
959 | * Returning in that case prevents a deadlock | |
960 | * if the target vcpu is trying to stop the | |
961 | * current vcpu. | |
962 | */ | |
963 | ldx [%g6 + CPU_STATUS], %g3 | |
964 | cmp %g3, CPU_STATE_STOPPING | |
965 | be,pn %xcc, .stop_wouldblock | |
966 | nop | |
967 | ||
968 | /* | |
969 | * Examine the target vcpu state. It must be in | |
970 | * the running or suspended state in order to | |
971 | * proceed. Return EWOULDBLOCK if the CPU is in | |
972 | * transition. | |
973 | */ | |
974 | ldx [%g1 + CPU_STATUS], %g3 | |
975 | cmp %g3, CPU_STATE_INVALID | |
976 | be,pn %xcc, .stop_inval | |
977 | cmp %g3, CPU_STATE_STOPPED | |
978 | be,pn %xcc, .stop_inval | |
979 | cmp %g3, CPU_STATE_ERROR | |
980 | be,pn %xcc, .stop_inval | |
981 | cmp %g3, CPU_STATE_STOPPING | |
982 | be,pn %xcc, .stop_wouldblock | |
983 | cmp %g3, CPU_STATE_STARTING | |
984 | be,pn %xcc, .stop_wouldblock | |
985 | nop | |
986 | ||
987 | ! mark the vcpu in transition | |
988 | set CPU_STATE_STOPPING, %g3 | |
989 | stx %g3, [%g1 + CPU_STATUS] | |
990 | SPINLOCK_EXIT(%g4) | |
991 | ||
992 | /* | |
993 | * Send a command to the strand running the vcpu | |
994 | * to clean up and stop the vcpu. | |
995 | */ | |
996 | STRAND_STRUCT(%g4) | |
997 | add %g4, STRAND_HV_TXMONDO, %g2 | |
998 | ||
999 | mov HXCMD_STOP_VCPU, %g3 | |
1000 | stx %g3, [%g2 + HVM_CMD] | |
1001 | stx %g4, [%g2 + HVM_FROM_STRANDP] | |
1002 | add %g2, HVM_ARGS, %g3 | |
1003 | stx %g1, [%g3 + HVM_SCHED_VCPUP] | |
1004 | ||
1005 | STRAND_PUSH(%g1, %g3, %g4) ! remember the cpu | |
1006 | ||
1007 | ldx [%g1 + CPU_STRAND], %g1 ! shipit ! | |
1008 | HVCALL(hvmondo_send) | |
1009 | ||
1010 | STRAND_POP(%g1, %g2) ! pop the vcpup | |
1011 | ||
1012 | /* FIXME: This should time out in case we get no response */ | |
1013 | 1: | |
1014 | membar #Sync | |
1015 | ldx [%g1 + CPU_STATUS], %g2 | |
1016 | cmp %g2, CPU_STATE_STOPPING | |
1017 | be,pt %xcc, 1b | |
1018 | nop | |
1019 | ||
1020 | HCALL_RET(EOK) | |
1021 | ||
1022 | .stop_wouldblock: | |
1023 | !! %g4 guest state lock | |
1024 | SPINLOCK_EXIT(%g4) | |
1025 | ba,pt %xcc, herr_wouldblock | |
1026 | nop | |
1027 | ||
1028 | .stop_inval: | |
1029 | !! %g4 guest state lock | |
1030 | SPINLOCK_EXIT(%g4) | |
1031 | ba,pt %xcc, herr_inval | |
1032 | nop | |
1033 | ||
1034 | SET_SIZE(hcall_cpu_stop) | |
1035 | ||
1036 | ||
1037 | /* | |
1038 | * cpu_get_state | |
1039 | * | |
1040 | * arg0 cpu (%o0) | |
1041 | * -- | |
1042 | * ret0 status (%o0) | |
1043 | * ret1 state (%o1) | |
1044 | */ | |
1045 | ENTRY_NP(hcall_cpu_get_state) | |
1046 | GUEST_STRUCT(%g1) | |
1047 | VCPUID2CPUP(%g1, %o0, %g2, herr_nocpu, %g3) | |
1048 | !! %g2 target vcpup | |
1049 | ||
1050 | ldx [%g2 + CPU_STATUS], %o1 | |
1051 | ||
1052 | /* | |
1053 | * Convert the transitional CPU states to one | |
1054 | * of the public states defined by the HV API. | |
1055 | */ | |
1056 | cmp %o1, CPU_STATE_STOPPING | |
1057 | be,a,pn %xcc, 1f | |
1058 | mov CPU_STATE_RUNNING, %o1 | |
1059 | ||
1060 | cmp %o1, CPU_STATE_STARTING | |
1061 | be,a,pn %xcc, 1f | |
1062 | mov CPU_STATE_STOPPED, %o1 | |
1063 | ||
1064 | ! ASSERT(%o1 != CPU_STATE_INVALID) | |
1065 | cmp %o1, CPU_STATE_LAST_PUBLIC | |
1066 | movgu %xcc, CPU_STATE_ERROR, %o1 ! Any non-API state is ERROR | |
1067 | 1: | |
1068 | ||
1069 | HCALL_RET(EOK) | |
1070 | SET_SIZE(hcall_cpu_get_state) | |
1071 | ||
1072 | ||
1073 | /* | |
1074 | * mem_scrub | |
1075 | * | |
1076 | * arg0 real address (%o0) | |
1077 | * arg1 length (%o1) | |
1078 | * -- | |
1079 | * ret0 status (%o0) | |
1080 | * EOK : success or partial success | |
1081 | * ENORADDR : invalid (bad) address | |
1082 | * EBADALIGN : bad alignment | |
1083 | * ret1 length scrubbed (%o1) | |
1084 | */ | |
1085 | ENTRY_NP(hcall_mem_scrub) | |
1086 | brz,pn %o1, herr_inval ! length 0 invalid | |
1087 | or %o0, %o1, %g1 ! address and length | |
1088 | btst L2_LINE_SIZE - 1, %g1 ! aligned? | |
1089 | bnz,pn %xcc, herr_badalign ! no: error | |
1090 | nop | |
1091 | ||
1092 | VCPU_GUEST_STRUCT(%g6, %g5) | |
1093 | ||
1094 | /* Check input arguments with guest map: error ret: r0=ENORADDR */ | |
1095 | RA2PA_RANGE_CONV_UNK_SIZE(%g5, %o0, %o1, herr_noraddr, %g1, %g2) | |
1096 | mov %g2, %o0 | |
1097 | ||
1098 | /* Get Max length: */ | |
1099 | VCPU2ROOT_STRUCT(%g6, %g2) | |
1100 | ldx [%g2 + CONFIG_MEMSCRUB_MAX], %g5 ! limit (# cache lines) | |
1101 | ||
1102 | /* Compute max # lines: */ | |
1103 | srlx %o1, L2_LINE_SHIFT, %g2 ! # input cache lines | |
1104 | cmp %g5, %g2 ! g2 = min(inp, max) | |
1105 | movlu %xcc, %g5, %g2 ! .. | |
1106 | sllx %g2, L2_LINE_SHIFT, %o1 ! ret1 = count scrubbed | |
1107 | ||
1108 | /* | |
1109 | * This is the core of this function. | |
1110 | * All of the code before and after has been optimized to make this | |
1111 | * and the most common path the fastest. | |
1112 | */ | |
1113 | wr %g0, ASI_BLK_INIT_P, %asi | |
1114 | .ms_clear_mem: | |
1115 | stxa %g0, [%o0 + (0 * 8)]%asi | |
1116 | stxa %g0, [%o0 + (1 * 8)]%asi | |
1117 | stxa %g0, [%o0 + (2 * 8)]%asi | |
1118 | stxa %g0, [%o0 + (3 * 8)]%asi | |
1119 | stxa %g0, [%o0 + (4 * 8)]%asi | |
1120 | stxa %g0, [%o0 + (5 * 8)]%asi | |
1121 | stxa %g0, [%o0 + (6 * 8)]%asi | |
1122 | stxa %g0, [%o0 + (7 * 8)]%asi | |
1123 | deccc 1, %g2 | |
1124 | bnz,pt %xcc, .ms_clear_mem | |
1125 | inc 64, %o0 | |
1126 | HCALL_RET(EOK) ! ret0=status, ret1=count | |
1127 | SET_SIZE(hcall_mem_scrub) | |
1128 | ||
1129 | ||
1130 | /* | |
1131 | * mem_sync | |
1132 | * | |
1133 | * arg0 real address (%o0) | |
1134 | * arg1 length (%o1) | |
1135 | * -- | |
1136 | * ret0 (%o0): | |
1137 | * EOK : success, partial success | |
1138 | * ENORADDR : bad address | |
1139 | * EBADALIGN : bad alignment | |
1140 | * ret1 (%o1): | |
1141 | * length synced | |
1142 | */ | |
1143 | ENTRY_NP(hcall_mem_sync) | |
1144 | brz,pn %o1, herr_inval ! len 0 not valid | |
1145 | or %o0, %o1, %g2 | |
1146 | set MEMSYNC_ALIGNMENT - 1, %g3 | |
1147 | btst %g3, %g2 ! check for alignment of addr/len | |
1148 | bnz,pn %xcc, herr_badalign | |
1149 | .empty | |
1150 | ||
1151 | VCPU_GUEST_STRUCT(%g5, %g6) | |
1152 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o0, %o1, herr_noraddr, %g1, %g2) | |
1153 | mov %g2, %o0 | |
1154 | ||
1155 | ! %o0 pa | |
1156 | ! %o1 length | |
1157 | ||
1158 | /* | |
1159 | * Clamp requested length at MEMSCRUB_MAX | |
1160 | */ | |
1161 | VCPU2ROOT_STRUCT(%g5, %g2) | |
1162 | ldx [%g2 + CONFIG_MEMSCRUB_MAX], %g3 | |
1163 | ||
1164 | sllx %g3, L2_LINE_SHIFT, %g3 | |
1165 | cmp %o1, %g3 | |
1166 | movgu %xcc, %g3, %o1 | |
1167 | ! %o1 MIN(requested length, max length) | |
1168 | ||
1169 | /* | |
1170 | * Push cache lines to memory | |
1171 | */ | |
1172 | sub %o1, L2_LINE_SIZE, %o5 | |
1173 | ! %o5 loop counter | |
1174 | add %o0, %o5, %g1 ! hoisted delay slot (see below) | |
1175 | 1: | |
1176 | ba l2_flush_line | |
1177 | rd %pc, %g7 | |
1178 | deccc L2_LINE_SIZE, %o5 ! get to next line | |
1179 | bgeu,pt %xcc, 1b | |
1180 | add %o0, %o5, %g1 ! %g1 is pa to flush | |
1181 | ||
1182 | HCALL_RET(EOK) | |
1183 | SET_SIZE(hcall_mem_sync) | |
1184 | ||
1185 | /* | |
1186 | * cpu_myid | |
1187 | * | |
1188 | * -- | |
1189 | * ret0 status (%o0) | |
1190 | * ret1 mycpuid (%o1) | |
1191 | */ | |
1192 | ENTRY_NP(hcall_cpu_myid) | |
1193 | VCPU_STRUCT(%g1) | |
1194 | ldub [%g1 + CPU_VID], %o1 | |
1195 | HCALL_RET(EOK) | |
1196 | SET_SIZE(hcall_cpu_myid) | |
1197 | ||
1198 | /* | |
1199 | * dump_buf_update | |
1200 | * | |
1201 | * arg0 ra of dump buffer (%o0) | |
1202 | * arg1 size of dump buffer (%o1) | |
1203 | * -- | |
1204 | * ret0 status (%o0) | |
1205 | * ret1 size on success (%o1), min size on EINVAL | |
1206 | */ | |
1207 | ENTRY_NP(hcall_dump_buf_update) | |
1208 | GUEST_STRUCT(%g1) | |
1209 | ||
1210 | /* | |
1211 | * XXX What locking is required between multiple strands | |
1212 | * XXX making simultaneous conf calls? | |
1213 | */ | |
1214 | ||
1215 | /* | |
1216 | * Any error unconfigures any currently configured dump buf | |
1217 | * so set to unconfigured now to avoid special error exit code. | |
1218 | */ | |
1219 | set GUEST_DUMPBUF_SIZE, %g4 | |
1220 | stx %g0, [%g1 + %g4] | |
1221 | set GUEST_DUMPBUF_RA, %g4 | |
1222 | stx %g0, [%g1 + %g4] | |
1223 | set GUEST_DUMPBUF_PA, %g4 | |
1224 | stx %g0, [%g1 + %g4] | |
1225 | ||
1226 | ! Size of 0 unconfigures the dump | |
1227 | brz,pn %o1, hret_ok | |
1228 | nop | |
1229 | ||
1230 | set DUMPBUF_MINSIZE, %g2 | |
1231 | cmp %o1, %g2 | |
1232 | blu,a,pn %xcc, herr_inval | |
1233 | mov %g2, %o1 ! return min size on EINVAL | |
1234 | ||
1235 | ! Check alignment | |
1236 | btst (DUMPBUF_ALIGNMENT - 1), %o0 | |
1237 | bnz,pn %xcc, herr_badalign | |
1238 | nop | |
1239 | ||
1240 | RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o0, %o1, herr_noraddr, %g3, %g2) | |
1241 | ! %g2 pa of dump buffer | |
1242 | set GUEST_DUMPBUF_SIZE, %g4 | |
1243 | stx %o1, [%g1 + %g4] | |
1244 | set GUEST_DUMPBUF_RA, %g4 | |
1245 | stx %o0, [%g1 + %g4] | |
1246 | set GUEST_DUMPBUF_PA, %g4 | |
1247 | stx %g2, [%g1 + %g4] | |
1248 | ||
1249 | ! XXX Need to put something in the buffer | |
1250 | ||
1251 | HCALL_RET(EOK) | |
1252 | SET_SIZE(hcall_dump_buf_update) | |
1253 | ||
1254 | ||
1255 | /* | |
1256 | * dump_buf_info | |
1257 | * | |
1258 | * -- | |
1259 | * ret0 status (%o0) | |
1260 | * ret1 current dumpbuf ra (%o1) | |
1261 | * ret2 current dumpbuf size (%o2) | |
1262 | */ | |
1263 | ENTRY_NP(hcall_dump_buf_info) | |
1264 | GUEST_STRUCT(%g1) | |
1265 | set GUEST_DUMPBUF_SIZE, %g4 | |
1266 | ldx [%g1 + %g4], %o2 | |
1267 | set GUEST_DUMPBUF_RA, %g4 | |
1268 | ldx [%g1 + %g4], %o1 | |
1269 | HCALL_RET(EOK) | |
1270 | SET_SIZE(hcall_dump_buf_info) | |
1271 | ||
1272 | ||
1273 | /* | |
1274 | * cpu_mondo_send | |
1275 | * | |
1276 | * arg0/1 cpulist (%o0/%o1) | |
1277 | * arg2 ptr to 64-byte-aligned data to send (%o2) | |
1278 | * -- | |
1279 | * ret0 status (%o0) | |
1280 | */ | |
1281 | ENTRY(hcall_cpu_mondo_send) | |
1282 | btst CPULIST_ALIGNMENT - 1, %o1 | |
1283 | bnz,pn %xcc, herr_badalign | |
1284 | btst MONDO_DATA_ALIGNMENT - 1, %o2 | |
1285 | bnz,pn %xcc, herr_badalign | |
1286 | nop | |
1287 | ||
1288 | VCPU_GUEST_STRUCT(%g3, %g6) | |
1289 | ! %g3 cpup | |
1290 | ! %g6 guestp | |
1291 | ||
1292 | sllx %o0, CPULIST_ENTRYSIZE_SHIFT, %g5 | |
1293 | ||
1294 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g5, herr_noraddr, %g7, %g1) | |
1295 | RA2PA_RANGE_CONV(%g6, %o2, MONDO_DATA_SIZE, herr_noraddr, %g7, %g2) | |
1296 | ! %g1 cpulistpa | |
1297 | ! %g2 mondopa | |
1298 | ||
1299 | clr %g4 | |
1300 | ! %g4 true for EWOULDBLOCK | |
1301 | .cpu_mondo_continue: | |
1302 | ! %g1 pa of current entry in cpulist | |
1303 | ! %g3 cpup | |
1304 | ! %g4 ewouldblock flag | |
1305 | ! %o0 number of entries remaining in the list | |
1306 | deccc %o0 | |
1307 | blu,pn %xcc, .cpu_mondo_break | |
1308 | nop | |
1309 | ||
1310 | ldsh [%g1], %g6 | |
1311 | ! %g6 tcpuid | |
1312 | cmp %g6, CPULIST_ENTRYDONE | |
1313 | be,a,pn %xcc, .cpu_mondo_continue | |
1314 | inc CPULIST_ENTRYSIZE, %g1 | |
1315 | ||
1316 | ldx [%g3 + CPU_GUEST], %g5 | |
1317 | VCPUID2CPUP(%g5, %g6, %g6, herr_nocpu, %g7) | |
1318 | ! %g6 tcpup | |
1319 | ||
1320 | /* Sending to one's self is not allowed */ | |
1321 | cmp %g3, %g6 ! cpup <?> tcpup | |
1322 | be,pn %xcc, herr_inval | |
1323 | nop | |
1324 | ||
1325 | IS_CPU_IN_ERROR(%g6, %g5) | |
1326 | be,pn %xcc, herr_cpuerror | |
1327 | nop | |
1328 | ||
1329 | /* | |
1330 | * Check to see if the recipient's mailbox is available | |
1331 | */ | |
1332 | add %g6, CPU_COMMAND, %g5 | |
1333 | mov CPU_CMD_BUSY, %g7 | |
1334 | casxa [%g5]ASI_P, %g0, %g7 | |
1335 | brz,pt %g7, .cpu_mondo_send_one | |
1336 | nop | |
1337 | ||
1338 | ! %g1 pa of current entry in cpulist | |
1339 | ! %g2 is our mondo dont corrupt it. | |
1340 | ! %g3 cpup | |
1341 | ! %g4 ewouldblock flag | |
1342 | ! %g6 tcpup | |
1343 | ! %o0 number of entries remaining in the list | |
1344 | ||
1345 | /* | |
1346 | * If the mailbox isn't available then the queue could | |
1347 | * be full. Poke the target cpu to check if the queue | |
1348 | * is still full since we cannot read its head/tail | |
1349 | * registers. | |
1350 | */ | |
1351 | inc %g4 ! ewouldblock flag | |
1352 | ||
1353 | cmp %g7, CPU_CMD_GUESTMONDO_READY | |
1354 | bne,a,pt %xcc, .cpu_mondo_continue | |
1355 | inc CPULIST_ENTRYSIZE, %g1 ! next entry in list | |
1356 | ||
1357 | /* | |
1358 | * Only send another if CPU_POKEDELAY ticks have elapsed since the | |
1359 | * last poke. | |
1360 | */ | |
1361 | ldx [%g6 + CPU_CMD_LASTPOKE], %g7 | |
1362 | inc CPU_POKEDELAY, %g7 | |
1363 | rd %tick, %g5 | |
1364 | cmp %g5, %g7 | |
1365 | blu,a,pt %xcc, .cpu_mondo_continue | |
1366 | inc CPULIST_ENTRYSIZE, %g1 | |
1367 | stx %g5, [%g6 + CPU_CMD_LASTPOKE] | |
1368 | ||
1369 | /* | |
1370 | * Send the target cpu a dummy vecintr so it checks | |
1371 | * to see if the guest removed entries from the queue | |
1372 | */ | |
1373 | VCPU2STRAND_STRUCT(%g6, %g7) | |
1374 | ldub [%g7 + STRAND_ID], %g7 | |
1375 | sllx %g7, INT_VEC_DIS_VCID_SHIFT, %g5 | |
1376 | or %g5, VECINTR_XCALL, %g5 | |
1377 | stxa %g5, [%g0]ASI_INTR_UDB_W | |
1378 | ||
1379 | ba,pt %xcc, .cpu_mondo_continue | |
1380 | inc CPULIST_ENTRYSIZE, %g1 ! next entry in list | |
1381 | ||
1382 | /* | |
1383 | * Copy the mondo data into the target cpu's incoming buffer | |
1384 | */ | |
1385 | .cpu_mondo_send_one: | |
1386 | ldx [%g2 + 0x00], %g7 | |
1387 | stx %g7, [%g6 + CPU_CMD_ARG0] | |
1388 | ldx [%g2 + 0x08], %g7 | |
1389 | stx %g7, [%g6 + CPU_CMD_ARG1] | |
1390 | ldx [%g2 + 0x10], %g7 | |
1391 | stx %g7, [%g6 + CPU_CMD_ARG2] | |
1392 | ldx [%g2 + 0x18], %g7 | |
1393 | stx %g7, [%g6 + CPU_CMD_ARG3] | |
1394 | ldx [%g2 + 0x20], %g7 | |
1395 | stx %g7, [%g6 + CPU_CMD_ARG4] | |
1396 | ldx [%g2 + 0x28], %g7 | |
1397 | stx %g7, [%g6 + CPU_CMD_ARG5] | |
1398 | ldx [%g2 + 0x30], %g7 | |
1399 | stx %g7, [%g6 + CPU_CMD_ARG6] | |
1400 | ldx [%g2 + 0x38], %g7 | |
1401 | stx %g7, [%g6 + CPU_CMD_ARG7] | |
1402 | membar #Sync | |
1403 | mov CPU_CMD_GUESTMONDO_READY, %g7 | |
1404 | stx %g7, [%g6 + CPU_COMMAND] | |
1405 | ||
1406 | /* | |
1407 | * Send a xcall vector interrupt to the target cpu | |
1408 | */ | |
1409 | VCPU2STRAND_STRUCT(%g6, %g7) | |
1410 | ldub [%g7 + STRAND_ID], %g7 | |
1411 | sllx %g7, INT_VEC_DIS_VCID_SHIFT, %g5 | |
1412 | or %g5, VECINTR_XCALL, %g5 | |
1413 | stxa %g5, [%g0]ASI_INTR_UDB_W | |
1414 | ||
1415 | mov CPULIST_ENTRYDONE, %g7 | |
1416 | sth %g7, [%g1] | |
1417 | ||
1418 | ba .cpu_mondo_continue | |
1419 | inc CPULIST_ENTRYSIZE, %g1 ! next entry in list | |
1420 | ||
1421 | .cpu_mondo_break: | |
1422 | brnz,pn %g4, herr_wouldblock ! If remaining then EAGAIN | |
1423 | nop | |
1424 | HCALL_RET(EOK) | |
1425 | SET_SIZE(hcall_cpu_mondo_send) | |
1426 | ||
1427 | ||
1428 | #define TTRACE_RELOC_ADDR(addr, scr0, scr1) \ | |
1429 | setx .+8, scr0, scr1 ;\ | |
1430 | rd %pc, scr0 ;\ | |
1431 | sub scr1, scr0, scr0 ;\ | |
1432 | sub addr, scr0, addr | |
1433 | ||
1434 | /* | |
1435 | * hcal_ttrace_buf_conf | |
1436 | * | |
1437 | * arg0 ra of traptrace buffer (%o0) | |
1438 | * arg1 size of traptrace buffer in entries (%o1) | |
1439 | * -- | |
1440 | * ret0 status (%o0) | |
1441 | * ret1 minimum #entries on EINVAL, #entries on success (%o1) | |
1442 | */ | |
1443 | ENTRY_NP(hcall_ttrace_buf_conf) | |
1444 | VCPU_GUEST_STRUCT(%g1, %g2) | |
1445 | ||
1446 | /* | |
1447 | * Disable traptrace by restoring %htba to original traptable | |
1448 | * always do this first to make error returns easier. | |
1449 | */ | |
1450 | setx htraptable, %g3, %g4 | |
1451 | TTRACE_RELOC_ADDR(%g4, %g3, %g5) | |
1452 | wrhpr %g4, %htba | |
1453 | ||
1454 | ! Clear buffer description | |
1455 | stx %g0, [%g1 + CPU_TTRACEBUF_SIZE] ! size must be first | |
1456 | stx %g0, [%g1 + CPU_TTRACEBUF_PA] | |
1457 | stx %g0, [%g1 + CPU_TTRACEBUF_RA] | |
1458 | ||
1459 | /* | |
1460 | * nentries (arg1) > 0 configures the buffer | |
1461 | * nentries == 0 disables traptrace and cleans up buffer config | |
1462 | */ | |
1463 | brz,pn %o1, hret_ok | |
1464 | nop | |
1465 | ||
1466 | ! Check alignment | |
1467 | btst TTRACE_ALIGNMENT - 1, %o0 | |
1468 | bnz,pn %xcc, herr_badalign | |
1469 | nop | |
1470 | ||
1471 | ! Check that #entries is >= TTRACE_MINIMUM_ENTRIES | |
1472 | cmp %o1, TTRACE_MINIMUM_ENTRIES | |
1473 | blu,a,pn %xcc, herr_inval | |
1474 | mov TTRACE_MINIMUM_ENTRIES, %o1 | |
1475 | ||
1476 | sllx %o1, TTRACE_RECORD_SZ_SHIFT, %g6 ! convert #entries to bytes | |
1477 | ||
1478 | RA2PA_RANGE_CONV_UNK_SIZE(%g2, %o0, %g6, herr_noraddr, %g4, %g3) | |
1479 | ! %g3 pa of traptrace buffer | |
1480 | stx %o0, [%g1 + CPU_TTRACEBUF_RA] | |
1481 | stx %g3, [%g1 + CPU_TTRACEBUF_PA] | |
1482 | stx %g6, [%g1 + CPU_TTRACEBUF_SIZE] ! size must be last | |
1483 | ||
1484 | ! Initialize traptrace buffer header | |
1485 | mov TTRACE_RECORD_SIZE, %g2 | |
1486 | stx %g2, [%g1 + CPU_TTRACE_OFFSET] | |
1487 | stx %g2, [%g3 + TTRACE_HEADER_OFFSET] | |
1488 | stx %g2, [%g3 + TTRACE_HEADER_LAST_OFF] | |
1489 | ! %o1 return is the same as that passed in | |
1490 | HCALL_RET(EOK) | |
1491 | SET_SIZE(hcall_ttrace_buf_conf) | |
1492 | ||
1493 | ||
1494 | /* | |
1495 | * ttrace_buf_info | |
1496 | * | |
1497 | * -- | |
1498 | * ret0 status (%o0) | |
1499 | * ret1 current traptrace buf ra (%o1) | |
1500 | * ret2 current traptrace buf size (%o2) | |
1501 | */ | |
1502 | ENTRY_NP(hcall_ttrace_buf_info) | |
1503 | VCPU_STRUCT(%g1) | |
1504 | ||
1505 | ldx [%g1 + CPU_TTRACEBUF_RA], %o1 | |
1506 | ldx [%g1 + CPU_TTRACEBUF_SIZE], %o2 | |
1507 | srlx %o2, TTRACE_RECORD_SZ_SHIFT, %o2 ! convert bytes to #entries | |
1508 | movrz %o2, %g0, %o1 ! ensure RA zero if size is zero | |
1509 | ||
1510 | HCALL_RET(EOK) | |
1511 | SET_SIZE(hcall_ttrace_buf_info) | |
1512 | ||
1513 | ||
1514 | /* | |
1515 | * ttrace_enable | |
1516 | * | |
1517 | * arg0 boolean: 0 = disable, non-zero = enable (%o0) | |
1518 | * -- | |
1519 | * ret0 status (%o0) | |
1520 | * ret1 previous enable state (0=disabled, 1=enabled) (%o1) | |
1521 | */ | |
1522 | ENTRY_NP(hcall_ttrace_enable) | |
1523 | setx htraptracetable, %g1, %g2 ! %g2 = reloc'd &htraptracetable | |
1524 | TTRACE_RELOC_ADDR(%g2, %g1, %g3) | |
1525 | ||
1526 | setx htraptable, %g1, %g3 ! %g3 = reloc'd &htraptable | |
1527 | TTRACE_RELOC_ADDR(%g3, %g1, %g4) | |
1528 | ||
1529 | mov %g3, %g1 ! %g1 = (%o0 ? %g3 : %g2) | |
1530 | movrnz %o0, %g2, %g1 | |
1531 | ||
1532 | rdhpr %htba, %g4 ! %o1 = (%htba == %g2) | |
1533 | mov %g0, %o1 | |
1534 | cmp %g4, %g2 | |
1535 | move %xcc, 1, %o1 | |
1536 | ||
1537 | /* | |
1538 | * Check that the guest has previously provided a buf for this cpu | |
1539 | * Check here since by now %o1 will be properly set | |
1540 | */ | |
1541 | VCPU_STRUCT(%g2) | |
1542 | TTRACE_CHK_BUF(%g2, %g3, herr_inval) | |
1543 | ||
1544 | wrhpr %g1, %htba | |
1545 | ||
1546 | HCALL_RET(EOK) | |
1547 | SET_SIZE(hcall_ttrace_enable) | |
1548 | ||
1549 | ||
1550 | /* | |
1551 | * ttrace_freeze | |
1552 | * | |
1553 | * arg0 boolean: 0 = disable, non-zero = enable (%o0) | |
1554 | * -- | |
1555 | * ret0 status (%o0) | |
1556 | * ret1 previous freeze state (0=disabled, 1=enabled) (%o1) | |
1557 | */ | |
1558 | ENTRY_NP(hcall_ttrace_freeze) | |
1559 | VCPU_GUEST_STRUCT(%g1, %g3) | |
1560 | ||
1561 | ldx [%g1 + CPU_TTRACEBUF_SIZE], %g2 | |
1562 | brz,pn %g2, herr_inval | |
1563 | .empty | |
1564 | ||
1565 | movrnz %o0, 1, %o0 ! normalize to formal bool | |
1566 | ||
1567 | ! race conditions for two CPUs updating this not harmful | |
1568 | ldx [%g3 + GUEST_TTRACE_FRZ], %o1 ! current val for ret1 | |
1569 | stx %o0, [%g3 + GUEST_TTRACE_FRZ] | |
1570 | ||
1571 | HCALL_RET(EOK) | |
1572 | SET_SIZE(hcall_ttrace_freeze) | |
1573 | ||
1574 | ||
1575 | /* | |
1576 | * ttrace_addentry | |
1577 | * | |
1578 | * arg0 lower 16 bits stored in TTRACE_ENTRY_TAG (%o0) | |
1579 | * arg1 stored in TTRACE_ENTRY_F1 (%o1) | |
1580 | * arg2 stored in TTRACE_ENTRY_F2 (%o2) | |
1581 | * arg3 stored in TTRACE_ENTRY_F3 (%o3) | |
1582 | * arg4 stored in TTRACE_ENTRY_F4 (%o4) | |
1583 | * -- | |
1584 | * ret0 status (%o0) | |
1585 | */ | |
1586 | ENTRY_NP(hcall_ttrace_addentry) | |
1587 | /* | |
1588 | * Check that the guest has perviously provided a buf for this cpu | |
1589 | * return EINVAL if not configured, ignore (EOK) if frozen | |
1590 | */ | |
1591 | TTRACE_PTR(%g3, %g2, herr_inval, hret_ok) | |
1592 | ||
1593 | rdpr %tl, %g4 ! %g4 holds current tl | |
1594 | sub %g4, 1, %g3 ! %g3 holds tl of caller | |
1595 | mov %g3, %g1 ! save for TL field fixup | |
1596 | movrz %g3, 1, %g3 ! minimum is TL=1 | |
1597 | wrpr %g3, %tl | |
1598 | ||
1599 | TTRACE_STATE(%g2, TTRACE_TYPE_GUEST, %g3, %g5) | |
1600 | stb %g1, [%g2 + TTRACE_ENTRY_TL] ! overwrite with calc'd TL | |
1601 | ||
1602 | wrpr %g4, %tl ! restore trap level | |
1603 | ||
1604 | sth %o0, [%g2 + TTRACE_ENTRY_TAG] | |
1605 | stx %o1, [%g2 + TTRACE_ENTRY_F1] | |
1606 | stx %o2, [%g2 + TTRACE_ENTRY_F2] | |
1607 | stx %o3, [%g2 + TTRACE_ENTRY_F3] | |
1608 | stx %o4, [%g2 + TTRACE_ENTRY_F4] | |
1609 | ||
1610 | TTRACE_NEXT(%g2, %g3, %g4, %g5) | |
1611 | ||
1612 | HCALL_RET(EOK) | |
1613 | SET_SIZE(hcall_ttrace_addentry) | |
1614 | ||
1615 | ||
1616 | /* | |
1617 | * cpu_set_rtba - set the current cpu's rtba | |
1618 | * | |
1619 | * arg0 rtba (%o0) | |
1620 | * -- | |
1621 | * ret0 status (%o0) | |
1622 | * ret1 previous rtba (%o1) | |
1623 | */ | |
1624 | ENTRY_NP(hcall_cpu_set_rtba) | |
1625 | VCPU_GUEST_STRUCT(%g1, %g2) | |
1626 | ! %g1 = cpup | |
1627 | ! %g2 = guestp | |
1628 | ||
1629 | ! Return prior rtba value | |
1630 | ldx [%g1 + CPU_RTBA], %o1 | |
1631 | ||
1632 | ! Check rtba for validity | |
1633 | RA2PA_RANGE_CONV(%g2, %o0, REAL_TRAPTABLE_SIZE, herr_noraddr, %g7, %g3) | |
1634 | set REAL_TRAPTABLE_SIZE - 1, %g3 | |
1635 | btst %o0, %g3 | |
1636 | bnz,pn %xcc, herr_badalign | |
1637 | nop | |
1638 | stx %o0, [%g1 + CPU_RTBA] | |
1639 | HCALL_RET(EOK) | |
1640 | SET_SIZE(hcall_cpu_set_rtba) | |
1641 | ||
1642 | ||
1643 | /* | |
1644 | * cpu_get_rtba - return the current cpu's rtba | |
1645 | * | |
1646 | * -- | |
1647 | * ret0 status (%o0) | |
1648 | * ret1 rtba (%o1) | |
1649 | */ | |
1650 | ENTRY_NP(hcall_cpu_get_rtba) | |
1651 | VCPU_STRUCT(%g1) | |
1652 | ldx [%g1 + CPU_RTBA], %o1 | |
1653 | HCALL_RET(EOK) | |
1654 | SET_SIZE(hcall_cpu_get_rtba) | |
1655 | ||
1656 | ||
1657 | /* | |
1658 | * hcall_set_watchdog - configure the guest's watchdog timer | |
1659 | * | |
1660 | * This implementation has a granularity of 1s. Arguments are rounded up | |
1661 | * to the nearest second. | |
1662 | * | |
1663 | * arg0 timeout in milliseconds (%o0) | |
1664 | * -- | |
1665 | * ret0 status (%o0) | |
1666 | * ret1 time remaining in milliseconds (%o1) | |
1667 | */ | |
1668 | ENTRY_NP(hcall_set_watchdog) | |
1669 | GUEST_STRUCT(%g2) | |
1670 | set GUEST_WATCHDOG + WATCHDOG_TICKS, %g3 | |
1671 | add %g2, %g3, %g2 | |
1672 | ||
1673 | /* | |
1674 | * Round up arg0, convert to seconds, and validate | |
1675 | */ | |
1676 | brz,pn %o0, 1f | |
1677 | mov 0, %g1 | |
1678 | add %o0, MSEC_PER_SEC - 1, %g1 | |
1679 | udivx %g1, MSEC_PER_SEC, %g1 | |
1680 | set WATCHDOG_MAX_TIMEOUT, %g3 | |
1681 | cmp %g1, %g3 | |
1682 | bleu,pn %xcc, 1f | |
1683 | inc %g1 /* take care of a heartbeat about to happen */ | |
1684 | ||
1685 | ldx [%g2], %o1 | |
1686 | ba,pt %xcc, herr_inval ! return remaining time even for EINVAL | |
1687 | mulx %o1, MSEC_PER_SEC, %o1 | |
1688 | ||
1689 | 1: | |
1690 | /* | |
1691 | * Replace the current ticks with the new value, calculate | |
1692 | * the return value | |
1693 | */ | |
1694 | ATOMIC_SWAP_64(%g2, %g1, %g4, %g5) | |
1695 | mulx %g4, MSEC_PER_SEC, %o1 | |
1696 | ||
1697 | HCALL_RET(EOK) | |
1698 | SET_SIZE(hcall_set_watchdog) | |
1699 | ||
1700 | ||
1701 | #ifdef CONFIG_BRINGUP | |
1702 | ||
1703 | /* | |
1704 | * vdev_genintr - generate a virtual interrupt | |
1705 | * | |
1706 | * arg0 sysino (%o0) | |
1707 | * -- | |
1708 | * ret0 status (%o0) | |
1709 | */ | |
1710 | ENTRY_NP(hcall_vdev_genintr) | |
1711 | GUEST_STRUCT(%g1) | |
1712 | ! %g1 = guestp | |
1713 | VINO2DEVINST(%g1, %o0, %g2, herr_inval) | |
1714 | cmp %g2, DEVOPS_VDEV | |
1715 | bne,pn %xcc, herr_inval | |
1716 | nop | |
1717 | GUEST2VDEVSTATE(%g1, %g2) | |
1718 | add %g2, VDEV_STATE_MAPREG, %g2 | |
1719 | ! %g2 = mapreg array | |
1720 | and %o0, VINTR_INO_MASK, %o0 ! get INO bits | |
1721 | mulx %o0, MAPREG_SIZE, %g1 | |
1722 | add %g2, %g1, %g1 | |
1723 | ! %g1 = mapreg | |
1724 | HVCALL(vdev_intr_generate) | |
1725 | HCALL_RET(EOK) | |
1726 | SET_SIZE(hcall_vdev_genintr) | |
1727 | ||
1728 | #endif /* CONFIG_BRINGUP */ | |
1729 | ||
1730 | /* | |
1731 | * cpu_yield | |
1732 | * | |
1733 | * -- | |
1734 | * ret0 status (%o0) | |
1735 | */ | |
1736 | ENTRY_NP(hcall_cpu_yield) | |
1737 | ||
1738 | rd %tick, %g6 | |
1739 | sllx %g6, 1, %g6 ! remove npt bit | |
1740 | srax %g6, 1, %g6 ! sign extend for correct delta comp | |
1741 | ||
1742 | ! store the start tick | |
1743 | VCPU_STRUCT(%g1) | |
1744 | stx %g6, [%g1 + CPU_UTIL_YIELD_START] | |
1745 | ||
1746 | STRAND_PUSH(%g6, %g2, %g3) | |
1747 | ||
1748 | HVCALL(plat_halt_strand) | |
1749 | ||
1750 | STRAND_POP(%g2, %g3) | |
1751 | !! %g2 = tick prior to strand de-activate | |
1752 | ||
1753 | rd %tick, %g3 | |
1754 | sllx %g3, 1, %g3 ! remove npt bit | |
1755 | srax %g3, 1, %g3 ! sign extend for correct delta comp | |
1756 | sub %g3, %g2, %g2 | |
1757 | !! %g2 = tick delta for yield time | |
1758 | ||
1759 | /* | |
1760 | * Add the tick delta to the total yielded cycles for this | |
1761 | * vcpu. The value of this counter is never reset as long | |
1762 | * as the vcpu is bound to a guest. | |
1763 | * | |
1764 | * As there is a 1:1 relationship between vcpus and physical | |
1765 | * strands, exclusive access to the vcpu struct can be assumed. | |
1766 | * If this relationship changes and this assumption becomes | |
1767 | * invalid, the code must be modified to ensure this counter | |
1768 | * is updated atomically. | |
1769 | */ | |
1770 | VCPU_STRUCT(%g1) | |
1771 | ldx [%g1 + CPU_UTIL_YIELD_COUNT], %g3 | |
1772 | add %g3, %g2, %g3 | |
1773 | !! %g3 = updated yielded cycle count | |
1774 | ||
1775 | /* | |
1776 | * Clear the yield start variable just before updating the | |
1777 | * counter. This minimizes the window where the cycles from | |
1778 | * the current yield are not accounted for. | |
1779 | */ | |
1780 | stx %g0, [%g1 + CPU_UTIL_YIELD_START] | |
1781 | stx %g3, [%g1 + CPU_UTIL_YIELD_COUNT] | |
1782 | ||
1783 | HCALL_RET(EOK) | |
1784 | SET_SIZE(hcall_cpu_yield) |