Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / common / src / hvcontrol.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: hvcontrol.c
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49#pragma ident "@(#)hvcontrol.c 1.12 07/07/10 SMI"
50
51#include <sys/htypes.h>
52#include <traps.h>
53#include <cache.h>
54#include <mmu.h>
55#include <vdev_ops.h>
56#include <vdev_intr.h>
57#include <ncs.h>
58#include <cyclic.h>
59#include <config.h>
60#include <vcpu.h>
61#include <strand.h>
62#include <guest.h>
63#include <support.h>
64#include <ldc.h>
65#include <hvctl.h>
66#include <md.h>
67#include <abort.h>
68#include <proto.h>
69#include <debug.h>
70
71#if DEBUG
72void dump_control_pkt();
73#endif
74
75void hvctl_send_pkt(hvctl_msg_t *replyp);
76void bad_sequence_number(int seqn, hvctl_msg_t *fromp);
77void reply_cmd(hvctl_msg_t *replyp, hvctl_status_t status);
78void op_start_hello(hvctl_msg_t *rcptp, hvctl_msg_t *replyp);
79void op_start_hello2(hvctl_msg_t *rcptp, hvctl_msg_t *replyp);
80void op_get_configp(hvctl_msg_t *replyp);
81hvctl_status_t op_reconfig(hvctl_msg_t *cmdp, hvctl_msg_t *replyp,
82 bool_t isdelayed);
83hvctl_status_t op_cancel_reconfig(hvctl_msg_t *cmdp, hvctl_msg_t *replyp);
84hvctl_status_t op_get_hvconfig(hvctl_msg_t *replyp);
85hvctl_status_t op_guest_start(hvctl_msg_t *cmdp, hvctl_msg_t *replyp);
86hvctl_status_t op_guest_stop(hvctl_msg_t *cmdp, hvctl_msg_t *replyp);
87hvctl_status_t op_guest_panic(hvctl_msg_t *cmdp, hvctl_msg_t *replyp);
88hvctl_status_t op_get_res_stat(hvctl_msg_t *cmdp, hvctl_msg_t *replyp);
89
90void get_guest_utilisation(guest_t *guestp, rs_guest_util_t *statp);
91
92extern void c_hvldc_send(int hv_endpt, void *payload);
93
94/*
95 * This function is essentially a callback when a HV control
96 * packet is received.
97 *
98 * It receives the request, performs the required action, and then
99 * formulates the appropriate response.
100 *
101 * Eventually the response will be returned via an ldc_send to the
102 * contributing domain, but for the moment the response packet is built,
103 * then finally copied into the temp buffer in the config structure - from
104 * where it is returned by the calling assembler layer.
105 */
106void
107hv_control_pkt()
108{
109 hvctl_msg_t *rcptp;
110 hvctl_msg_t *replyp;
111 hvctl_op_t op;
112 int seqn;
113 hvctl_status_t status;
114
115 rcptp = (hvctl_msg_t *)&config.hvctl_ibuf[0];
116 replyp = (hvctl_msg_t *)&config.hvctl_obuf[0];
117
118 DBGHL(c_printf("HV control interface\n"));
119 DBGHL(dump_control_pkt(rcptp));
120
121 op = ntoh16(rcptp->hdr.op);
122 seqn = ntoh16(rcptp->hdr.seqn);
123
124 /*
125 * Prime response by copying over command
126 */
127 replyp->hdr.op = hton16(op);
128
129 switch (config.hvctl_state) {
130 case HVCTL_STATE_UNCONNECTED:
131 DBGHL(c_printf("\t\tstate: UNKNOWN\n"));
132 if (op != HVctl_op_hello) {
133 reply_cmd(replyp, HVctl_st_eauth);
134 return;
135 }
136 /*
137 * We special case the Hello command.
138 * It should have sequence number 1, and resets the
139 * rest of the command channel state machine.
140 */
141hello_cmd:;
142 op_start_hello(rcptp, replyp);
143 return;
144
145 case HVCTL_STATE_CHALLENGED:
146 DBGHL(c_printf("\t\tstate: STATE_1\n"));
147 if (op == HVctl_op_hello) goto hello_cmd;
148
149 if (config.hvctl_zeus_seq != seqn) {
150 config.hvctl_state = HVCTL_STATE_UNCONNECTED;
151 bad_sequence_number(seqn, replyp);
152 return;
153 }
154 config.hvctl_zeus_seq++; /* ready for next CMD packet */
155
156 if (op != HVctl_op_response) {
157 config.hvctl_state = HVCTL_STATE_UNCONNECTED;
158 reply_cmd(replyp, HVctl_st_eauth);
159 return;
160 }
161 op_start_hello2(rcptp, replyp);
162 return;
163
164 case HVCTL_STATE_CONNECTED:
165 DBGHL(c_printf("\t\tstate: STATE_2\n"));
166 if (op == HVctl_op_hello) goto hello_cmd;
167
168 if (config.hvctl_zeus_seq != seqn) {
169 config.hvctl_state = HVCTL_STATE_UNCONNECTED;
170 bad_sequence_number(seqn, replyp);
171 return;
172 }
173 config.hvctl_zeus_seq++; /* ready for next CMD packet */
174 break;
175
176 default:
177 DBGHL(c_printf("Internal HVCTL error - reached state 0x%x\n",
178 config.hvctl_state));
179 config.hvctl_state = HVCTL_STATE_UNCONNECTED;
180 return;
181 }
182
183 /* Only the STATE_2 case makes it here */
184
185 /*
186 * we get here in the normal case because the hvctl channel
187 * stat indicates that the communication path has authenticated and
188 * is in fact now open.
189 *
190 * What remains is to handle each of the incomming control commands
191 */
192
193 status = HVctl_st_eauth;
194 switch (op) {
195 case HVctl_op_get_hvconfig:
196 DBGHL(c_printf("HVctl_op_get_hvconfig\n"));
197 status = op_get_hvconfig(replyp);
198 break;
199 case HVctl_op_reconfigure:
200 DBGHL(c_printf("HVctl_op_reconfigure\n"));
201 status = op_reconfig(rcptp, replyp, false);
202 break;
203 case HVctl_op_guest_delayed_reconf:
204 DBGHL(c_printf("HVctl_op_guest_delayed_reconf\n"));
205 status = op_reconfig(rcptp, replyp, true);
206 break;
207 case HVctl_op_guest_start:
208 DBGHL(c_printf("HVctl_op_guest_start\n"));
209 status = op_guest_start(rcptp, replyp);
210 break;
211 case HVctl_op_guest_stop:
212 DBGHL(c_printf("HVctl_op_guest_stop\n"));
213 status = op_guest_stop(rcptp, replyp);
214 break;
215 case HVctl_op_guest_suspend:
216 DBGHL(c_printf("HVctl_op_guest_suspend\n"));
217 break;
218 case HVctl_op_guest_resume:
219 DBGHL(c_printf("HVctl_op_guest_resume\n"));
220 break;
221 case HVctl_op_guest_panic:
222 DBGHL(c_printf("HVctl_op_guest_panic\n"));
223 status = op_guest_panic(rcptp, replyp);
224 break;
225 case HVctl_op_get_res_stat:
226 DBGHL(c_printf("HVctl_op_get_res_stat\n"));
227 status = op_get_res_stat(rcptp, replyp);
228 break;
229 case HVctl_op_cancel_reconf:
230 DBGHL(c_printf("HVctl_op_cancel_reconf\n"));
231 status = op_cancel_reconfig(rcptp, replyp);
232 break;
233 default:
234 break;
235 }
236
237 reply_cmd(replyp, status);
238}
239
240/*
241 * This function is used to start a guest that is in the stopped state.
242 *
243 * We ack the command, and then when the guest actually gets going the
244 * domain manager should get an async state update indicating that the
245 * guest has actually been entered.
246 *
247 * That way we don't do things like memory scrub and prom copying in this
248 * function.
249 */
250hvctl_status_t
251op_guest_start(hvctl_msg_t *cmdp, hvctl_msg_t *replyp)
252{
253 int guestid;
254 guest_t *guestp = (guest_t *)config.guests;
255 hvctl_res_error_t errcode;
256 hvctl_status_t status;
257
258 guestid = cmdp->msg.guestop.guestid;
259 errcode = 0;
260 status = HVctl_st_ok;
261
262 if (guestid < 0 || guestid >= NGUESTS) {
263 errcode = HVctl_e_guest_invalid_id;
264 status = HVctl_st_einval;
265 goto done;
266 }
267
268 guestp = &guestp[guestid];
269
270 spinlock_enter(&guestp->state_lock);
271
272 switch (guestp->state) {
273 case GUEST_STATE_SUSPENDED:
274 case GUEST_STATE_NORMAL:
275 case GUEST_STATE_EXITING:
276 case GUEST_STATE_RESETTING:
277 errcode = HVctl_e_guest_active;
278 status = HVctl_st_eillegal;
279 spinlock_exit(&guestp->state_lock);
280 goto done;
281 case GUEST_STATE_STOPPED:
282 break;
283 case GUEST_STATE_UNCONFIGURED:
284 default:
285 errcode = HVctl_e_guest_invalid_id;
286 status = HVctl_st_einval;
287 spinlock_exit(&guestp->state_lock);
288 goto done;
289 }
290
291 guestp->state = GUEST_STATE_RESETTING;
292
293 spinlock_exit(&guestp->state_lock);
294
295 if (!guest_ignition(guestp)) {
296 errcode = HVctl_e_guest_nocpus;
297 status = HVctl_st_einval;
298 }
299
300done:
301 replyp->msg.guestop.code = errcode;
302 return (status);
303}
304
305
306/*
307 * Find an appropriate target strand to drive the guest exit
308 * operation. The criteria for a valid target is a strand that
309 * is not in error and not in transition.
310 *
311 * Returns the ID of the selected strand, or -1 if no suitable
312 * strand could be found.
313 */
314static uint8_t
315find_target_strand_for_exit(guest_t *guestp)
316{
317 int i;
318 vcpu_t **vcpulistp;
319 vcpu_t *vcpup;
320
321 vcpulistp = &(guestp->vcpus[0]);
322
323 for (i = 0; i < NVCPUS; i++) {
324
325 vcpup = vcpulistp[i];
326
327 if (vcpup == NULL)
328 continue;
329
330 ASSERT(vcpup->guest == guestp);
331
332 if ((vcpup->status != CPU_STATE_RUNNING) &&
333 (vcpup->status != CPU_STATE_STOPPED) &&
334 (vcpup->status != CPU_STATE_SUSPENDED))
335 continue;
336
337 return (vcpup->strand->id);
338 }
339
340 return (-1);
341}
342
343/*
344 * This function is used to stop a guest that is in the running or
345 * suspended state.
346 *
347 * We ack the command, and then when the guest actually shuts down the
348 * domain manager should get an async state update indicating that the
349 * guest has actually been entered.
350 *
351 * This avoids having to busy wait in this function while other parts of
352 * the hypervisor shuts down.
353 *
354 * If Zeus doesn't get a timely response to this command it should assume
355 * that some or all of the strands associated with this command are dead
356 * or off in the weeds ...
357 */
358hvctl_status_t
359op_guest_stop(hvctl_msg_t *cmdp, hvctl_msg_t *replyp)
360{
361 int guestid;
362 guest_t *guestp = (guest_t *)config.guests;
363 hvctl_res_error_t errcode;
364 hvctl_status_t status;
365 uint8_t tgt_strand;
366 hvm_t hvxcmsg;
367
368 guestid = cmdp->msg.guestop.guestid;
369 errcode = 0;
370 status = HVctl_st_ok;
371
372 if (guestid < 0 || guestid >= NGUESTS) {
373 errcode = HVctl_e_guest_invalid_id;
374 status = HVctl_st_einval;
375 goto done;
376 }
377
378 guestp = &guestp[guestid];
379
380 spinlock_enter(&guestp->state_lock);
381
382 switch (guestp->state) {
383 case GUEST_STATE_NORMAL:
384 case GUEST_STATE_SUSPENDED:
385 /* state is fine to proceed */
386 break;
387
388 case GUEST_STATE_STOPPED:
389 errcode = HVctl_e_guest_stopped;
390 status = HVctl_st_eillegal;
391 spinlock_exit(&guestp->state_lock);
392 goto done;
393
394 case GUEST_STATE_RESETTING:
395 /*
396 * The guest is already resetting, so it cannot
397 * be stopped and it is not appropriate to wait
398 * for the reset to complete. Fail the operation.
399 */
400 status = HVctl_st_stop_failed;
401 spinlock_exit(&guestp->state_lock);
402 goto done;
403
404 case GUEST_STATE_EXITING:
405 /*
406 * The guest is already stopping, so return
407 * success and let the LDom manager wait for
408 * the asynchronous notification that the
409 * prior stop has completed.
410 */
411 spinlock_exit(&guestp->state_lock);
412 goto done;
413
414 case GUEST_STATE_UNCONFIGURED:
415 default:
416 errcode = HVctl_e_guest_invalid_id;
417 status = HVctl_st_einval;
418 spinlock_exit(&guestp->state_lock);
419 goto done;
420 }
421
422 /*
423 * The stop operation must be driven by a strand
424 * in the guest being stopped. Find an appropriate
425 * strand and send it a xcall to do the real work.
426 */
427 tgt_strand = find_target_strand_for_exit(guestp);
428 if (tgt_strand == -1) {
429 status = HVctl_st_stop_failed;
430 spinlock_exit(&guestp->state_lock);
431 goto done;
432 }
433
434 guestp->state = GUEST_STATE_EXITING;
435 spinlock_exit(&guestp->state_lock);
436
437 DBG(c_printf("sending xcall to stop guest...\n"));
438
439 /* pack and send the xcall */
440 hvxcmsg.cmd = HXCMD_STOP_GUEST;
441 hvxcmsg.args.stopguest.guestp = (uint64_t)guestp;
442
443 DBG(c_printf("stop guest target 0x%x ..\n", tgt_strand));
444
445 c_hvmondo_send(&strands[tgt_strand], &hvxcmsg);
446
447done:
448 replyp->msg.guestop.code = errcode;
449 return (status);
450}
451
452
453hvctl_status_t
454op_guest_panic(hvctl_msg_t *cmdp, hvctl_msg_t *replyp)
455{
456 int i;
457 uint32_t guestid;
458 guest_t *guestp;
459 ldc_endpoint_t *hvctl_ep;
460 vcpu_t *vcpup;
461 hvm_t hvxcmsg;
462
463 guestid = ntoh32(cmdp->msg.guestop.guestid);
464
465 if (guestid >= NGUESTS) {
466 replyp->msg.guestop.code = hton32(HVctl_e_guest_invalid_id);
467 return (HVctl_st_einval);
468 }
469
470 guestp = &((guest_t *)config.guests)[guestid];
471 hvctl_ep = &((ldc_endpoint_t *)config.hv_ldcs)[config.hvctl_ldc];
472
473 /*
474 * Prevent attempts to panic the control domain.
475 * By definition, that is the domain initiating
476 * this request.
477 */
478 if (guestp == hvctl_ep->target_guest) {
479 replyp->msg.guestop.code = hton32(HVctl_e_guest_invalid_id);
480 return (HVctl_st_einval);
481 }
482
483 switch (guestp->state) {
484 case GUEST_STATE_NORMAL:
485 case GUEST_STATE_SUSPENDED:
486 case GUEST_STATE_EXITING:
487 break;
488 case GUEST_STATE_STOPPED:
489 replyp->msg.guestop.code = hton32(HVctl_e_guest_stopped);
490 return (HVctl_st_eillegal);
491 case GUEST_STATE_UNCONFIGURED:
492 default:
493 replyp->msg.guestop.code = hton32(HVctl_e_guest_invalid_id);
494 return (HVctl_st_einval);
495 }
496
497 /* find a running vcpu in the guest domain */
498 for (i = 0; i < NVCPUS; i++) {
499 vcpup = guestp->vcpus[i];
500 if (vcpup == NULL)
501 continue;
502
503 if (vcpup->status == CPU_STATE_RUNNING)
504 break;
505 }
506
507 if (i == NVCPUS) {
508 replyp->msg.guestop.code = hton32(HVctl_e_guest_nocpus);
509 return (HVctl_st_einval);
510 }
511
512 /* send a mondo to the chosen vcpu */
513 hvxcmsg.cmd = HXCMD_GUEST_PANIC;
514 hvxcmsg.args.guestcmd.vcpup = (uint64_t)vcpup;
515
516 c_hvmondo_send(vcpup->strand, &hvxcmsg);
517
518 return (HVctl_st_ok);
519}
520
521hvctl_status_t
522get_guest_status(hvctl_msg_t *cmdp, hvctl_msg_t *replyp)
523{
524 int guestid;
525 int infoid;
526 int status;
527 guest_t *guestp;
528 void *dptr;
529
530 guestid = cmdp->msg.resstat.resid;
531 infoid = cmdp->msg.resstat.infoid;
532
533 if (guestid < 0 || guestid >= NGUESTS) {
534 replyp->msg.resstat.code = HVctl_e_guest_invalid_id;
535 return (HVctl_st_einval);
536 }
537
538 status = HVctl_st_ok;
539 guestp = &((guest_t *)config.guests)[guestid];
540
541 dptr = &(replyp->msg.resstat.data[0]);
542
543 if (infoid < 0 || infoid >= HVctl_info_guest_max) {
544 replyp->msg.resstat.code = HVctl_e_invalid_infoid;
545 status = HVctl_st_einval;
546 } else {
547 spinlock_enter(&guestp->async_lock[infoid]);
548
549 switch (infoid) {
550 case HVctl_info_guest_state: {
551 rs_guest_state_t *statp = dptr;
552
553 statp->state = guestp->state;
554 guestp->async_busy[infoid] = 0;
555 break;
556 }
557 case HVctl_info_guest_soft_state: {
558 rs_guest_soft_state_t *statp = dptr;
559
560 statp->soft_state = guestp->soft_state;
561 c_memcpy(statp->soft_state_str, guestp->soft_state_str,
562 SOFT_STATE_SIZE);
563 guestp->async_busy[infoid] = 0;
564 break;
565 }
566 case HVctl_info_guest_tod: {
567 rs_guest_tod_t *statp = dptr;
568
569 statp->tod = guestp->tod_offset;
570 guestp->async_busy[infoid] = 0;
571 break;
572 }
573 case HVctl_info_guest_utilisation:
574 get_guest_utilisation(guestp, dptr);
575 break;
576 default:
577 replyp->msg.resstat.code = HVctl_e_invalid_infoid;
578 status = HVctl_st_einval;
579 }
580
581 spinlock_exit(&guestp->async_lock[infoid]);
582 }
583
584 return (status);
585}
586
587/*
588 * Returns the number of yielded cycles for the specified vcpu since
589 * the last time the utilization statistics were gathered.
590 *
591 * Each vcpu maintains a count of the yielded cycles since the guest
592 * was bound to it. By tracking only the delta from the last time the
593 * count was read, it is not necessary to reset the yield count or
594 * use atomic operations to update the yielded cycles per-guest and
595 * yielded cycles per-vcpu counters.
596 */
597static uint64_t
598get_vcpu_yielded_cycle_delta(vcpu_t *vcpup, uint64_t now, uint64_t last_count)
599{
600 uint64_t yield_count;
601 uint64_t yield_curr;
602
603 /* start with the total yielded cycles */
604 yield_count = vcpup->util.yield_count;
605
606 /* check if the vcpu is currently yielded */
607 if ((yield_curr = vcpup->util.yield_start) == 0) {
608 /*
609 * The vcpu is not currently yielded. Read
610 * the yield count again to make sure that
611 * if a yield just completed, those cycles
612 * are accounted for.
613 */
614 yield_count = vcpup->util.yield_count;
615
616 } else if (yield_curr < now) {
617 /* add the cycles for the current yield */
618 yield_count += (now - yield_curr);
619 }
620
621 /*
622 * Return the change in the number of yielded cycles
623 * since the last time the yield stats were gathered
624 * for this vcpu.
625 */
626 return (yield_count - last_count);
627}
628
629/*
630 * Returns the utilisation stats for a guest since the last
631 * time they were read. The side effect of this call is to
632 * reset the stat collecting again.
633 */
634void
635get_guest_utilisation(guest_t *guestp, rs_guest_util_t *statp)
636{
637 uint64_t now;
638 vcpu_t *vcpup;
639 int i;
640
641 now = GET_STICK_TIME();
642
643 /*
644 * When this HV supports sub-cpu scheduling
645 * these figures have to come from the guest struct
646 */
647 statp->lifespan = now - guestp->start_stick;
648 statp->wallclock_delta = now - guestp->util.stick_last;
649 statp->active_delta = now - guestp->util.stick_last;
650 /*
651 * Number of cycles CPUs have been stopped for
652 * - not the same as yielded cycles.
653 * FIXME: assume zero for now.
654 */
655 statp->stopped_cycles = 0;
656
657 /*
658 * Aggregate the yield cycles for each vcpu assigned to
659 * this guest for the last timing interval.
660 */
661 statp->yielded_cycles = 0;
662 vcpup = &((vcpu_t *)config.vcpus)[0];
663
664 for (i = 0; i < NVCPUS; i++) {
665
666 if (vcpup->guest == guestp) {
667 uint64_t delta;
668
669 /*
670 * Get the number of yielded cycles since the
671 * guest stats were last gathered.
672 */
673 delta = get_vcpu_yielded_cycle_delta(vcpup, now,
674 vcpup->util.last_yield_count_guest);
675
676 /* set the guest last yield count to 'now' */
677 vcpup->util.last_yield_count_guest += delta;
678
679 /*
680 * Aggregate the vcpu delta with the total guest
681 * yielded cycle count.
682 */
683 statp->yielded_cycles += delta;
684 }
685 vcpup++;
686 }
687
688 guestp->util.stick_last = now;
689}
690
691/*
692 * Return the status of a vcpu resource
693 */
694hvctl_status_t
695get_vcpu_status(hvctl_msg_t *cmdp, hvctl_msg_t *replyp)
696{
697 int vcpuid;
698 int infoid;
699 int status;
700 vcpu_t *vcpup;
701 void *dptr;
702
703 vcpuid = cmdp->msg.resstat.resid;
704 infoid = cmdp->msg.resstat.infoid;
705
706 if (vcpuid < 0 || vcpuid >= NVCPUS) {
707 replyp->msg.resstat.code = HVctl_e_vcpu_invalid_id;
708 return (HVctl_st_einval);
709 }
710
711 status = HVctl_st_ok;
712 vcpup = &((vcpu_t *)config.vcpus)[vcpuid];
713
714 dptr = &(replyp->msg.resstat.data[0]);
715
716 switch (infoid) {
717 case HVctl_info_vcpu_state: {
718 rs_vcpu_state_t *statp = dptr;
719 uint64_t now;
720 uint64_t delta;
721
722 now = GET_STICK_TIME();
723
724 statp->state = vcpup->status;
725 statp->lifespan = now - vcpup->start_stick;
726 statp->wallclock_delta = now - vcpup->util.stick_last;
727 statp->active_delta = now - vcpup->util.stick_last;
728
729 /*
730 * Get the number of yielded cycles since the
731 * vcpu stats were last gathered.
732 */
733 delta = get_vcpu_yielded_cycle_delta(vcpup, now,
734 vcpup->util.last_yield_count_vcpu);
735
736 /* set the vcpu last yield count to 'now' */
737 vcpup->util.last_yield_count_vcpu += delta;
738
739 /* clamp if necessary */
740 statp->yielded_cycles = (delta > statp->active_delta) ?
741 statp->active_delta : delta;
742
743 vcpup->util.stick_last = now;
744 break;
745 }
746 default:
747 replyp->msg.resstat.code = HVctl_e_invalid_infoid;
748 status = HVctl_st_einval;
749 }
750
751 return (status);
752}
753
754/*
755 * This function is used to request the status of a resource.
756 */
757static hvctl_status_t
758op_get_res_stat(hvctl_msg_t *cmdp, hvctl_msg_t *replyp)
759{
760 hvctl_status_t status;
761
762 switch (cmdp->msg.resstat.res) {
763 case HVctl_res_guest:
764 status = get_guest_status(cmdp, replyp);
765 break;
766
767 case HVctl_res_vcpu:
768 status = get_vcpu_status(cmdp, replyp);
769 break;
770
771 case HVctl_res_memory:
772 case HVctl_res_mau:
773#ifdef CONFIG_PCIE
774 case HVctl_res_pcie_bus:
775#endif
776 case HVctl_res_ldc:
777 case HVctl_res_hv_ldc:
778 case HVctl_res_guestmd:
779#ifdef STANDALONE_NET_DEVICES
780 case HVctl_res_network_device:
781#endif
782 status = HVctl_st_enotsupp;
783 break;
784
785 default:
786 status = HVctl_st_einval;
787 break;
788 }
789
790 return (status);
791}
792
793
794void
795bad_sequence_number(int seqn, hvctl_msg_t *replyp)
796{
797 DBGHL(c_printf("Bad sequence number received 0x%x - expected 0x%x\n",
798 seqn, config.hvctl_zeus_seq));
799
800 config.hvctl_state = HVCTL_STATE_UNCONNECTED;
801 reply_cmd(replyp, HVctl_st_bad_seqn);
802}
803
804void
805reply_cmd(hvctl_msg_t *replyp, hvctl_status_t status)
806{
807 replyp->hdr.status = hton16(status);
808
809 switch (status) {
810 case HVctl_st_ok:
811 break;
812 case HVctl_st_bad_seqn:
813 case HVctl_st_eauth:
814 config.hvctl_state = HVCTL_STATE_UNCONNECTED;
815 default:
816 DBGHL(c_printf(
817 "\tCommand failed with error code %x\n", status));
818 break;
819 }
820
821 hvctl_send_pkt(replyp);
822}
823
824
825/*
826 * Initial hello handshake from the Domain Manager
827 *
828 * We check the major and minor version numbers offered for the protocol
829 * negotiation, and note the sequence number offered to us by the domain
830 * manager ... this is how we'll detect that a message got dropped later.
831 * We also "invent" our own sequence number so the domain
832 * manager can spot "dropped" packets later also.
833 */
834#define RANDOM_SEQ_OFFSET 2909 /* ! */
835
836void
837op_start_hello(hvctl_msg_t *rcptp, hvctl_msg_t *replyp)
838{
839 config.hvctl_zeus_seq = ntoh16(rcptp->hdr.seqn);
840 config.hvctl_zeus_seq++; /* For the next packet */
841
842 config.hvctl_hv_seq = config.hvctl_zeus_seq + RANDOM_SEQ_OFFSET;
843
844 DBGHL(c_printf("Requested HV channel version %d.%d\n",
845 ntoh64(rcptp->msg.hello.major),
846 ntoh64(rcptp->msg.hello.minor)));
847
848 if (ntoh64(rcptp->msg.hello.major) != HVCTL_VERSION_MAJOR_NUMBER) {
849 /* Currently only support 1 version */
850 replyp->hdr.op = hton16(HVctl_op_hello);
851 replyp->hdr.status = hton16(HVctl_st_enotsupp);
852 replyp->msg.hello.major = hton64(HVCTL_VERSION_MAJOR_NUMBER);
853 replyp->msg.hello.minor = hton64(HVCTL_VERSION_MINOR_NUMBER);
854
855 config.hvctl_state = HVCTL_STATE_UNCONNECTED;
856
857 DBGHL(c_printf("Version refused\n"));
858
859 } else {
860 config.hvctl_rand_num = __LINE__; /* FIXME */
861
862 replyp->hdr.op = hton16(HVctl_op_challenge);
863 replyp->hdr.status = hton16(HVctl_st_ok);
864 replyp->msg.clnge.code =
865 hton64(HVCTL_HV_CHALLENGE_K ^ config.hvctl_rand_num);
866
867 config.hvctl_state = HVCTL_STATE_CHALLENGED;
868
869 DBGHL(c_printf("Version accepted; challenge = 0x%x\n",
870 ntoh64(replyp->msg.clnge.code)));
871 }
872
873 hvctl_send_pkt(replyp);
874}
875
876/*
877 * Second stage in the handshake process .. try and retrieve our key
878 * from the packet
879 */
880void
881op_start_hello2(hvctl_msg_t *rcptp, hvctl_msg_t *replyp)
882{
883 uint64_t key;
884
885 DBGHL(c_printf("\tHello 2:\n"));
886
887 key = ntoh64(rcptp->msg.clnge.code);
888
889 if ((key ^ HVCTL_ZEUS_CHALLENGE_K) != config.hvctl_rand_num) {
890 DBGHL(c_printf("\t\tFailed key check\n"));
891 reply_cmd(replyp, HVctl_st_eauth);
892 return;
893 }
894
895 DBGHL(c_printf("\t\tPassed key check\n"));
896 config.hvctl_state = HVCTL_STATE_CONNECTED;
897
898 reply_cmd(replyp, HVctl_st_ok);
899}
900
901
902/*
903 * For the moment simply fills in the required reply
904 * fields, and lets the outer asm layer do the send.
905 */
906void
907hvctl_send_pkt(hvctl_msg_t *replyp)
908{
909 replyp->hdr.seqn = hton16(config.hvctl_hv_seq);
910 config.hvctl_hv_seq++;
911}
912
913
914/*
915 * Used by Zeus to pull out the hypervisor's machine description plus the
916 * current delayed reconfiguration status and machine description if any.
917 *
918 * Membase and memsize is the range of memory that the HV has reserved
919 * for itself.
920 */
921hvctl_status_t
922op_get_hvconfig(hvctl_msg_t *replyp)
923{
924 replyp->msg.hvcnf.hv_membase = hton64(config.membase);
925 replyp->msg.hvcnf.hv_memsize = hton64(config.memsize);
926
927 spinlock_enter(&config.del_reconf_lock);
928
929 replyp->msg.hvcnf.hvmdp = hton64((uint64_t)config.active_hvmd);
930 replyp->msg.hvcnf.del_reconf_gid = hton32(config.del_reconf_gid);
931 if (config.del_reconf_gid != INVALID_GID)
932 replyp->msg.hvcnf.del_reconf_hvmdp =
933 hton64((uint64_t)config.parse_hvmd);
934 else
935 replyp->msg.hvcnf.del_reconf_hvmdp = hton64((uint64_t)NULL);
936
937 spinlock_exit(&config.del_reconf_lock);
938
939 return (HVctl_st_ok);
940}
941
942/*
943 * Send an asynchronous notification on the HVctl channel that
944 * a guest's soft state has changed.
945 */
946void
947guest_soft_state_notify(guest_t *guestp)
948{
949 hvctl_msg_t ssmsg;
950
951 c_bzero(&ssmsg, sizeof (ssmsg));
952
953 spinlock_enter(&guestp->async_lock[HVctl_info_guest_soft_state]);
954 if (guestp->async_busy[HVctl_info_guest_soft_state] == 0) {
955 guestp->async_busy[HVctl_info_guest_soft_state] = 1;
956
957 ssmsg.hdr.op = HVctl_op_new_res_stat;
958 ssmsg.msg.resstat.res = HVctl_res_guest;
959 ssmsg.msg.resstat.resid = guestp->guestid;
960 ssmsg.msg.resstat.infoid = HVctl_info_guest_soft_state;
961 ssmsg.msg.resstat.code = 0;
962 ((rs_guest_soft_state_t *)ssmsg.msg.resstat.data)->soft_state =
963 guestp->soft_state;
964 c_memcpy(((rs_guest_soft_state_t *)ssmsg.msg.resstat.data)->
965 soft_state_str, guestp->soft_state_str, SOFT_STATE_SIZE);
966
967 spinlock_enter(&config.hvctl_ldc_lock);
968 c_hvldc_send(config.hvctl_ldc, &ssmsg);
969 spinlock_exit(&config.hvctl_ldc_lock);
970 }
971 spinlock_exit(&guestp->async_lock[HVctl_info_guest_soft_state]);
972}
973
974/*
975 * Send an asynchronous notification on the HVctl channel that
976 * a guest's state has changed.
977 */
978void
979guest_state_notify(guest_t *guestp)
980{
981 hvctl_msg_t smsg;
982
983 spinlock_enter(&guestp->async_lock[HVctl_info_guest_state]);
984 if (guestp->async_busy[HVctl_info_guest_state] != 0) {
985 spinlock_exit(&guestp->async_lock[HVctl_info_guest_state]);
986 return;
987 }
988
989 guestp->async_busy[HVctl_info_guest_state] = 1;
990
991 c_bzero(&smsg, sizeof (smsg));
992
993 smsg.hdr.op = hton16(HVctl_op_new_res_stat);
994 smsg.msg.resstat.res = hton32(HVctl_res_guest);
995 smsg.msg.resstat.resid = hton32(guestp->guestid);
996 smsg.msg.resstat.infoid = hton32(HVctl_info_guest_state);
997 smsg.msg.resstat.code = hton32(0);
998 ((rs_guest_state_t *)smsg.msg.resstat.data)->state =
999 hton64(guestp->state);
1000
1001 /*
1002 * Take the hvctl_ldc_lock while holding the async lock to ensure that
1003 * state notifications maintain ordering and then release the async
1004 * lock to minimize the time it is held.
1005 */
1006 spinlock_enter(&config.hvctl_ldc_lock);
1007 spinlock_exit(&guestp->async_lock[HVctl_info_guest_state]);
1008
1009 c_hvldc_send(config.hvctl_ldc, &smsg);
1010
1011 spinlock_exit(&config.hvctl_ldc_lock);
1012}
1013
1014
1015#if DEBUG
1016
1017void
1018dump_control_pkt()
1019{
1020 hvctl_msg_t *rcptp;
1021 char *sp;
1022
1023 rcptp = (hvctl_msg_t *)&config.hvctl_ibuf[0];
1024
1025 DBGHL(c_printf("\tCommand op 0x%x : seq# 0x%x : chksum # 0x%x\n",
1026 rcptp->hdr.op, rcptp->hdr.seqn, rcptp->hdr.chksum));
1027
1028#define OP(_s, _n) case _s : sp = #_s##" : "##_n; break;
1029 switch (rcptp->hdr.op) {
1030 OP(HVctl_op_hello, "Initial request to open hvctl channel")
1031 OP(HVctl_op_challenge, "challenge returned from HV to Zeus")
1032 OP(HVctl_op_response, "Response from Zeus")
1033 OP(HVctl_op_get_hvconfig, "Get the HV config pointers")
1034 OP(HVctl_op_reconfigure, "Reconfigure request")
1035 OP(HVctl_op_guest_start, "Start a guest")
1036 OP(HVctl_op_guest_stop, "Stop a guest")
1037 OP(HVctl_op_guest_delayed_reconf, "Delayed reconfigure on guest exit")
1038 OP(HVctl_op_guest_suspend, "Suspend a guest")
1039 OP(HVctl_op_guest_resume, "Resume a guest")
1040 OP(HVctl_op_guest_panic, "Panic a guest")
1041 OP(HVctl_op_get_res_stat, "Get resource status if supported")
1042 OP(HVctl_op_new_res_stat, "Async resource status update if supported")
1043#undef OP
1044 default:
1045 sp = "Unknown command";
1046 break;
1047 }
1048 DBGHL(c_printf("\t%s\n", sp));
1049}
1050
1051#endif