Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / common / src / init.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: init.c
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49#pragma ident "@(#)init.c 1.19 07/09/20 SMI"
50
51#include <stdarg.h>
52#include <sys/htypes.h>
53#include <vdev_ops.h>
54#include <vdev_intr.h>
55#include <ncs.h>
56#include <cyclic.h>
57#include <vcpu.h>
58#include <strand.h>
59#include <guest.h>
60#include <memory.h>
61#include <pcie.h>
62#include <fpga.h>
63#include <hvctl.h>
64#include <md.h>
65#include <proto.h>
66#include <debug.h>
67#ifdef STANDALONE_NET_DEVICES
68#include <network.h>
69#endif
70
71hvctl_status_t op_guest_start(hvctl_msg_t *cmdp, hvctl_msg_t *replyp);
72void config_hv_ldcs();
73void config_a_hvldc(bin_md_t *mdp, md_element_t *hvldc_nodep);
74void config_vcpus();
75void config_vcpu_state(vcpu_t *vp);
76#ifdef CONFIG_FPGA
77void config_sp_ldcs();
78void c_fpga_uninit();
79#endif
80void config_guests();
81void config_a_guest(bin_md_t *mdp, md_element_t *guest_nodep);
82void config_guest_md(guest_t *guestp);
83void config_a_guest_ldc_endpoint(guest_t *guestp, bin_md_t *mdp,
84 md_element_t *ldce_nodep);
85#ifdef CONFIG_PCIE
86extern void config_platform_pcie();
87extern void reset_platform_pcie_busses(guest_t *guestp, pcie_device_t *pciep);
88#endif
89#ifdef STANDALONE_NET_DEVICES
90extern void reset_platform_network_devices(guest_t *guestp,
91 network_device_t *netp);
92#endif
93void kickoff_guests();
94void reset_ldc_endpoint(ldc_endpoint_t *ldc_ep);
95void c_ldc_cpu_notify(ldc_endpoint_t *t_endpt, vcpu_t *t_vcpup);
96void config_svcchans();
97
98#ifdef CONFIG_SVC
99extern void config_svcchans();
100#endif
101
102void
103fake_reconfig(bin_md_t *hvmdp)
104{
105 hvctl_msg_t cmd;
106 hvctl_msg_t reply;
107
108 cmd.hdr.op = HVctl_op_reconfigure;
109 cmd.hdr.status = 0;
110 cmd.msg.reconfig.hvmdp = (uint64_t)hvmdp;
111 cmd.msg.reconfig.guestid = 0;
112 reply = cmd;
113
114 DBGINIT(c_printf("Fake reconfig:\n"));
115#ifdef DEBUG
116 c_printf("returned status 0x%x\n", op_reconfig(&cmd, &reply, false));
117#else
118 (void) op_reconfig(&cmd, &reply, false);
119#endif /* DEBUG */
120}
121
122
123void
124fake_hvm_guest_start(int i)
125{
126 hvctl_msg_t cmd;
127 hvctl_msg_t reply;
128
129 cmd.hdr.op = HVctl_op_guest_start;
130 cmd.hdr.status = 0;
131 cmd.msg.guestop.guestid = i;
132 reply = cmd;
133
134 DBGINIT(c_printf("Fake guest start 0x%x\n", i));
135#ifdef DEBUG
136 c_printf("returned status 0x%x\n", op_guest_start(&cmd, &reply));
137#else
138 (void) op_guest_start(&cmd, &reply);
139#endif /* DEBUG */
140}
141
142
143void
144c_start(void)
145{
146 /* LINTED */
147 void *pres_hvmd; /* FIXME: to go away */
148
149 DBGINIT(c_printf(
150 "\n\n\t\t\tHypervisor 2.0 (LDoms capable + console)\n\n"));
151
152 DBGINIT(c_printf("relocation is 0x%x :\n", config.reloc));
153 DBGINIT(c_printf("\tso PROM c_start is at 0x%x\n",
154 config.reloc + (uint64_t)&c_start));
155 DBGINIT(c_printf("\tRAM c_start is at 0x%x\n", (uint64_t)c_start));
156
157 /*
158 * The following setup need only be done once at the
159 * beginning of time.
160 */
161 config.guests = &guests[0];
162 config.mblocks = &mblocks[0];
163 config.vcpus = &vcpus[0];
164 config.strands = &strands[0];
165 config.hv_ldcs = &hv_ldcs[0];
166 config.sp_ldcs = &sp_ldcs[0];
167#ifdef CONFIG_PCIE
168 config.pcie_busses = &pcie_bus[0];
169#endif
170#ifdef STANDALONE_NET_DEVICES
171 config.network_devices = &network_device[0];
172#endif
173 DBGINIT(c_printf("root config @ 0x%x (0x%x)\n", (uint64_t)&config,
174 sizeof (struct config)));
175 DBGINIT(c_printf("%d guest(s) @ 0x%x (0x%x)\n", NGUESTS, config.guests,
176 sizeof (struct guest)));
177 DBGINIT(c_printf("%d mblock(s) @ 0x%x (0x%x)\n",
178 NMBLOCKS, config.mblocks, sizeof (struct mblock)));
179 DBGINIT(c_printf("%d vcpu(s) @ 0x%x (0x%x)\n", NVCPUS, config.vcpus,
180 sizeof (struct vcpu)));
181 DBGINIT(c_printf("%d strand(s) @ 0x%x (0x%x)\n",
182 NSTRANDS, config.strands, sizeof (struct strand)));
183 DBGINIT(c_printf("%d ldc(s) @ 0x%x (0x%x)\n", MAX_HV_LDC_CHANNELS,
184 config.hv_ldcs, sizeof (struct ldc_endpoint)));
185 DBGINIT(c_printf("%d sp_ldc(s) @ 0x%x (0x%x)\n", MAX_SP_LDC_CHANNELS,
186 config.sp_ldcs, sizeof (struct sp_ldc_endpoint)));
187#ifdef CONFIG_PCIE
188 DBGINIT(c_printf("%d pcie_bus(ses) @ 0x%x (0x%x)\n", NUM_PCIE_BUSSES,
189 config.pcie_busses, sizeof (struct pcie_device)));
190#endif
191#ifdef STANDALONE_NET_DEVICES
192 DBGINIT(c_printf("%d network_device(s) @ 0x%x (0x%x)\n",
193 NUM_NETWORK_DEVICES, config.network_devices,
194 sizeof (struct network_device)));
195#endif
196
197 init_hv_internals();
198
199#ifndef SIMULATION
200 /*
201 * Download the hypervisor and guest MDs.
202 */
203 c_bootload();
204#endif
205
206#if defined(CONFIG_PCIE) || defined(CONFIG_FIRE)
207 init_pcie_buses();
208#endif
209
210 /*
211 * This configuration is done based on the MD contents
212 */
213 preparse_hvmd((bin_md_t *)config.parse_hvmd);
214
215 /*
216 * For error handling mark the strands we started as
217 * being the active strands.
218 * Even an idle (no vcpu) strand could/should be able
219 * to handle errors and interrupts if necessary.
220 */
221 config.strand_active = config.strand_startset;
222 DBGINIT(c_printf(
223 "Available strand mask = 0x%x\n", config.strand_startset));
224 DBGINIT(c_printf("\tintrtgt = 0x%x\n", config.intrtgt));
225
226 config_basics();
227#ifdef CONFIG_SVC
228 config_svcchans();
229#endif
230
231 /*
232 * Initial HV LDC config needs to happen before
233 * config_guests, so the console is properly setup.
234 */
235 config_hv_ldcs();
236
237#ifdef CONFIG_FPGA
238 config_sp_ldcs();
239#endif
240
241#if 1 /* FIXME: All this to be removed - init config should be by reconfig */
242
243 config_guests();
244 /* Fake up a config of the memory blocks */
245 /* see op_reconfig */
246 do {
247 bin_md_t *mdp;
248 hvctl_res_error_t fail_code;
249 md_element_t *failnodep;
250 int fail_res_id;
251 extern void res_memory_prep();
252 extern void res_memory_commit(int flag);
253 extern hvctl_status_t res_memory_parse(bin_md_t *mdp,
254 hvctl_res_error_t *fail_codep,
255 md_element_t **failnodepp, int *fail_res_idp);
256 extern void res_console_prep();
257 extern void res_console_commit(int flag);
258 extern hvctl_status_t res_console_parse(bin_md_t *mdp,
259 hvctl_res_error_t *fail_codep,
260 md_element_t **failnodepp, int *fail_res_idp);
261#ifdef CONFIG_PCIE
262 extern void res_pcie_bus_prep();
263 extern void res_pcie_bus_commit(int flag);
264 extern hvctl_status_t res_pcie_bus_parse(bin_md_t *mdp,
265 hvctl_res_error_t *fail_codep,
266 md_element_t **failnodepp, int *fail_res_idp);
267#endif
268#ifdef CONFIG_CRYPTO
269 extern void res_mau_prep();
270 extern void res_mau_commit(int flag);
271 extern hvctl_status_t res_mau_parse(bin_md_t *mdp,
272 hvctl_res_error_t *fail_codep,
273 md_element_t **failnodepp, int *fail_res_idp);
274 extern void res_cwq_prep();
275 extern void res_cwq_commit(int flag);
276 extern hvctl_status_t res_cwq_parse(bin_md_t *mdp,
277 hvctl_res_error_t *fail_codep,
278 md_element_t **failnodepp, int *fail_res_idp);
279#endif
280#ifdef STANDALONE_NET_DEVICES
281 extern void res_network_device_prep();
282 extern void res_network_device_commit(int flag);
283 extern hvctl_status_t res_network_device_parse(bin_md_t *mdp,
284 hvctl_res_error_t *fail_codep,
285 md_element_t **failnodepp, int *fail_res_idp);
286#endif
287
288 mdp = (bin_md_t *)config.parse_hvmd;
289
290 res_memory_prep();
291 if (res_memory_parse(mdp, &fail_code, &failnodep, &fail_res_id)
292 != HVctl_st_ok) {
293 DBGINIT(c_printf("Memory configure failed\n"));
294 c_hvabort();
295 }
296 res_memory_commit(RESF_Config);
297 res_console_prep();
298 if (res_console_parse(mdp, &fail_code, &failnodep, &fail_res_id)
299 != HVctl_st_ok) {
300 DBGINIT(c_printf("Console configure failed\n"));
301 c_hvabort();
302 }
303 res_console_commit(RESF_Config);
304#ifdef CONFIG_PCIE
305 res_pcie_bus_prep();
306 if (res_pcie_bus_parse(mdp, &fail_code, &failnodep,
307 &fail_res_id)
308 != HVctl_st_ok) {
309 DBGINIT(c_printf("pcie configure failed\n"));
310 c_hvabort();
311 }
312 res_pcie_bus_commit(RESF_Config);
313#endif
314#ifdef STANDALONE_NET_DEVICES
315 /*
316 * Note that we will allow a system to be configured
317 * without the network devices if they are not present
318 * in the MD as these are not required for correct
319 * operation.
320 */
321 DBGINIT(c_printf("Configuring network devices\r\n"));
322 res_network_device_prep();
323 if (res_network_device_parse(mdp, &fail_code, &failnodep,
324 &fail_res_id) == HVctl_st_ok) {
325 res_network_device_commit(RESF_Config);
326 DBGINIT(c_printf("network device(s) configured OK\n"));
327 } else {
328 DBGINIT(c_printf(
329 "network device(s) configuration failed\n"));
330 }
331#endif
332 config_vcpus(); /* do after guests */
333#ifdef CONFIG_CRYPTO
334 res_mau_prep();
335 if (res_mau_parse(mdp, &fail_code, &failnodep,
336 &fail_res_id)
337 != HVctl_st_ok) {
338 DBGINIT(c_printf("mau configure failed\n"));
339 c_hvabort();
340 }
341 res_mau_commit(RESF_Config);
342 res_cwq_prep();
343 if (res_cwq_parse(mdp, &fail_code, &failnodep,
344 &fail_res_id)
345 != HVctl_st_ok) {
346 DBGINIT(c_printf("cwq configure failed\n"));
347 c_hvabort();
348 }
349 res_cwq_commit(RESF_Config);
350#endif
351 } while (0);
352#else
353
354 /*
355 * for the moment need to preserve the parse_hvmd
356 * beyond the reconfig - for setup_svc
357 */
358 pres_hvmd = config.parse_hvmd;
359 fake_reconfig(config.parse_hvmd);
360 config.parse_hvmd = pres_hvmd;
361#endif
362
363#ifdef CONFIG_PCIE
364 config_platform_pcie();
365#endif
366
367 accept_hvmd();
368
369 /*
370 * Last step - make sure the configured guests get the boot
371 * cpus scheduled.
372 */
373 kickoff_guests();
374
375 DBGINIT(c_printf("c_start() done\n"));
376}
377
378
379/*
380 * Initialise the basic internal data structures
381 * HV uses before they are fully assigned.
382 */
383void
384init_hv_internals()
385{
386 int i;
387
388 DBGINIT(c_printf("\nInitialising internals\n"));
389
390 DBGINIT(c_printf("WARNING TODO: \n"));
391 DBGINIT(c_printf("\tSAVE_UE_GLOBALS may try to use the vcpu "
392 "scratchpad reg before it is initialized\n"));
393
394 for (i = 0; i < NSTRANDS; i++)
395 init_strand(i);
396
397 for (i = 0; i < NGUESTS; i++)
398 init_guest(i);
399
400 init_mblocks();
401
402 for (i = 0; i < NVCPUS; i++)
403 init_vcpu(i);
404
405 init_plat_hook();
406
407 init_consoles();
408
409 init_dummytsb();
410
411 reloc_resource_info();
412
413 reloc_hvmd_names();
414
415 /* relocate vdev ops tables */
416 reloc_plat_devops();
417
418 /* relocate device instances */
419 config.devinstancesp = devinstances;
420 reloc_devinstances();
421}
422
423/*
424 * Initialise the basic strand data structure
425 */
426void
427init_strand(int i)
428{
429 strand_t *sp;
430 int j;
431
432 sp = config.strands;
433 sp = &(sp[i]);
434
435 sp->configp = &config;
436 sp->id = i;
437
438 /*
439 * DO NOT scrub the structure or the strand_stack because
440 * we're already using it!
441 */
442 sp->current_slot = 0;
443 for (j = 0; j < NUM_SCHED_SLOTS; j++) {
444 sp->slot[j].action = SLOT_ACTION_NOP;
445 sp->slot[j].arg = 0;
446 }
447 sp->err_seq_no = 0;
448 sp->io_prot = 0;
449 sp->io_error = 0;
450}
451
452/*
453 * Used to relocate the dev ops of each virtual device
454 */
455void
456reloc_devopsvec(devopsvec_t *devopsp)
457{
458 void **ptr;
459 int i, limit;
460
461 ptr = (void**)devopsp;
462 limit = sizeof (*devopsp) / sizeof (*ptr);
463
464 for (i = 0; i < limit; i++) {
465 ptr[i] = reloc_ptr(ptr[i]);
466 }
467}
468
469
470void
471reloc_devinstances()
472{
473 devinst_t *dp;
474 int i;
475
476 dp = config.devinstancesp;
477
478 for (i = 0; i < NDEV_INSTS; i++) {
479 dp[i].cookie = reloc_ptr(dp[i].cookie);
480 dp[i].ops = reloc_ptr(dp[i].ops);
481 }
482}
483
484
485void *
486reloc_ptr(void *ptr)
487{
488 return (ptr == NULL ? NULL :
489 (void*)(((uint64_t)ptr) - config.reloc));
490}
491
492
493
494/*
495 * For a full power on no one is sending the relevent strands the schedule
496 * mondo to bring the boot cpus of each guest on line so to get things
497 * moving we put them there right from the get go.
498 */
499void
500kickoff_guests()
501{
502 int i;
503 guest_t *gp;
504
505 gp = config.guests;
506
507 for (i = 0; i < NGUESTS; i++) {
508 if (gp->state != GUEST_STATE_UNCONFIGURED) {
509 fake_hvm_guest_start(i);
510 }
511 gp++;
512 }
513}
514
515/*
516 * Attempt to shutdown a guest. Handles the cases where a guest exits,
517 * requests a reset, or is stopped. Performs all the steps necessary
518 * to put the guest in a state where it can be restarted. If the reason
519 * for exiting is a reset, the guest is restarted before the strand
520 * returns to go look for work.
521 *
522 * The assumption is that the guest state has already been set to one
523 * of the transitional states (exiting or resetting). This prevents any
524 * further attempts to change the state of the guest or any of its vcpus
525 * while it is being shut down.
526 */
527hvctl_status_t
528c_guest_exit(guest_t *guestp, int reason)
529{
530 int i;
531 strand_t *mystrandp;
532 vcpu_t **vcpulistp;
533 ldc_endpoint_t *hvctlep;
534#ifdef CONFIG_PCIE
535 pcie_device_t *pciep;
536#endif
537#ifdef STANDALONE_NET_DEVICES
538 network_device_t *netp;
539#endif
540
541 ASSERT((reason == GUEST_EXIT_MACH_EXIT) ||
542 (reason == GUEST_EXIT_MACH_SIR));
543
544 ASSERT((guestp->state == GUEST_STATE_EXITING) ||
545 (guestp->state == GUEST_STATE_RESETTING));
546
547 DBGINIT(c_printf("guest_exit: reason=0x%x, state=0x%x\n",
548 reason, guestp->state));
549
550 mystrandp = c_mystrand();
551 vcpulistp = &(guestp->vcpus[0]);
552
553 /*
554 * Stop all vcpus in the guest, including the local strand.
555 */
556 for (i = 0; i < NVCPUS; i++) {
557 hvm_t hvxcmsg;
558 vcpu_t *vp;
559
560 vp = vcpulistp[i];
561 if (vp == NULL)
562 continue;
563
564 ASSERT(vp->guest == guestp);
565
566 /* for the local strand, just stop it immediately */
567 if (vp->strand == mystrandp) {
568 ASSERT((vp->status == CPU_STATE_RUNNING) ||
569 (vp->status == CPU_STATE_STOPPED) ||
570 (vp->status == CPU_STATE_SUSPENDED));
571
572 DBGINIT(c_printf("stopping local vCPU 0x%x...\n",
573 vp->vid));
574
575 c_desched_n_stop_vcpu(vp);
576 continue;
577 }
578
579 /*
580 * Wait for the vcpu to finish any state transition it
581 * may have started before the exit was initiated.
582 */
583 while ((vp->status == CPU_STATE_STARTING) ||
584 (vp->status == CPU_STATE_STOPPING))
585 /* LINTED */
586 /* do nothing */;
587
588 if (vp->status == CPU_STATE_ERROR)
589 continue;
590
591 ASSERT(vp->status == CPU_STATE_RUNNING ||
592 vp->status == CPU_STATE_STOPPED ||
593 vp->status == CPU_STATE_SUSPENDED);
594
595 hvxcmsg.cmd = HXCMD_STOP_VCPU;
596 hvxcmsg.args.sched.vcpup = (uint64_t)vp;
597 c_hvmondo_send(vp->strand, &hvxcmsg);
598
599 DBGINIT(c_printf("stop sent to vCPU 0x%x...\n", vp->vid));
600
601 DBGINIT(c_printf("waiting for vCPU 0x%x id=0x%x, st=0x%x...\n",
602 vp, vp->vid, vp->status));
603
604 /* wait for the vcpu to stop */
605 while ((vp->status != CPU_STATE_STOPPED) &&
606 (vp->status != CPU_STATE_ERROR))
607 /* LINTED */
608 /* do nothing */;
609
610 if (vp->status == CPU_STATE_STOPPED)
611 DBGINIT(c_printf("vCPU 0x%x has stopped\n", vp->vid));
612 if (vp->status == CPU_STATE_ERROR)
613 DBGINIT(c_printf("vCPU 0x%x is in error\n", vp->vid));
614 }
615
616 DBGINIT(c_printf("all vCPUS have stopped \n"));
617
618 /*
619 * Remove all the channels this guest is currently using.
620 */
621 for (i = 0; i < guestp->ldc_max_channel_idx; i++) {
622 if ((guestp->ldc_endpoint[i].is_live == false) ||
623 (guestp->ldc_endpoint[i].is_private))
624 continue;
625
626 guestp->ldc_endpoint[i].tx_qsize = 0;
627 /* skip channel if it is not configured */
628 if (guestp->ldc_endpoint[i].rx_qsize == 0)
629 continue;
630 guestp->ldc_endpoint[i].rx_qsize = 0;
631
632 switch (guestp->ldc_endpoint[i].target_type) {
633
634 case LDC_GUEST_ENDPOINT:
635 {
636 /* send interrupt notify */
637 ldc_endpoint_t *my_endpt, *t_endpt;
638 vcpu_t *t_vcpup;
639 uint64_t t_ldcid;
640
641 my_endpt = &guestp->ldc_endpoint[i];
642 t_ldcid = my_endpt->target_channel;
643 t_endpt =
644 &(my_endpt->target_guest->ldc_endpoint[t_ldcid]);
645 t_vcpup = (vcpu_t *)t_endpt->rx_mapreg.pcpup;
646
647 /* FIXME: revoke imported/exported LDC memory */
648
649
650 if (t_vcpup != NULL) {
651 DBGINIT(c_printf("disable ldc 0x%x -> notify "
652 "guest 0x%x, ldc 0x%x, cpu 0x%x ..\n",
653 i, my_endpt->target_guest, t_ldcid,
654 t_vcpup));
655
656 c_ldc_cpu_notify(t_endpt, t_vcpup);
657 }
658 break;
659 }
660 case LDC_SP_ENDPOINT:
661 {
662#if defined(CONFIG_PCIE) && !defined(DEBUG_LEGION)
663
664 /* send interrupt notify */
665 ldc_endpoint_t *my_endpt;
666 struct sp_ldc_endpoint *sp_endpt;
667 struct sram_ldc_qd *rx_qdp;
668 uint64_t t_ldcid;
669
670 my_endpt = &guestp->ldc_endpoint[i];
671 t_ldcid = my_endpt->target_channel;
672 sp_endpt =
673 &(((struct sp_ldc_endpoint *)
674 config.sp_ldcs)[t_ldcid]);
675
676 rx_qdp = (struct sram_ldc_qd *)sp_endpt->rx_qd_pa;
677 rx_qdp->state = 0; /* link is down */
678 rx_qdp->state_updated = 1; /* indicate link reset */
679 rx_qdp->state_notify = 1; /* notify SP */
680
681 DBGINIT(c_printf("disable ldc 0x%x -> notify "
682 "SP ldc 0x%x ..\n", i, t_ldcid));
683
684 c_ldc_send_sp_intr(sp_endpt, SP_LDC_STATE_CHG);
685#endif
686 break;
687 }
688 case LDC_HV_ENDPOINT:
689 break;
690 }
691
692 /* reset the endpoint */
693 reset_ldc_endpoint(&guestp->ldc_endpoint[i]);
694 }
695
696 DBGINIT(c_printf("all LDCs have been reset\n"));
697
698
699#ifdef CONFIG_PCIE
700 /* reset attached devices like the PCI busses */
701 pciep = config.pcie_busses;
702 reset_platform_pcie_busses(guestp, pciep);
703#endif
704
705#ifdef STANDALONE_NET_DEVICES
706 netp = config.network_devices;
707 reset_platform_network_devices(guestp, netp);
708#endif
709
710 /* FIXME: cleanup service channels */
711
712 /*
713 * At this point - unless we're going to restart the guest
714 * we can mark the stop operation as complete.
715 */
716
717 if (reason != GUEST_EXIT_MACH_SIR) {
718 spinlock_enter(&guestp->state_lock);
719 guestp->state = GUEST_STATE_STOPPED;
720 spinlock_exit(&guestp->state_lock);
721 }
722
723 /*
724 * Do any pending delayed reconfiguration.
725 */
726 spinlock_enter(&config.del_reconf_lock);
727 if (config.del_reconf_gid == guestp->guestid) {
728 DBGINIT(c_printf("performing delayed reconfig: guestid=0x%x\n",
729 guestp->guestid));
730 commit_reconfig();
731 config.del_reconf_gid = INVALID_GID;
732 }
733 spinlock_exit(&config.del_reconf_lock);
734
735 /*
736 * send a msg to hvctl channel .. unless it
737 * is attached to the guest we just stopped ;-)
738 */
739 hvctlep = config.hv_ldcs;
740 hvctlep = &hvctlep[config.hvctl_ldc];
741
742 if (hvctlep->target_guest != guestp) {
743 DBGINIT(c_printf("sending async message on hvctl channel\n"));
744 guest_state_notify(guestp);
745 }
746
747 /*
748 * If this was a stop or exit request
749 * we're done at this point.
750 */
751
752 if (reason != GUEST_EXIT_MACH_SIR)
753 return (HVctl_st_ok);
754
755 ASSERT(guestp->state == GUEST_STATE_RESETTING);
756
757 /*
758 * This is an SIR, so we have to restart a CPU from the
759 * guest. This is after a delayed reconfig, so the vcpu
760 * list in the guest is correct.
761 */
762 (void) guest_ignition(guestp);
763 return (HVctl_st_ok);
764}
765
766/*
767 * Kicks off a guest. Returns true on success and false
768 * if no boot cpus are to be found.
769 */
770bool_t
771guest_ignition(guest_t *guestp)
772{
773 int i;
774 vcpu_t *vcpup;
775 strand_t *mystrandp;
776
777 /*
778 * Caller should already have set the guest state to
779 * indicate that this is the start of a new guest.
780 */
781 ASSERT(guestp->state == GUEST_STATE_RESETTING);
782
783 /*
784 * look for the first valid cpu in the guest
785 */
786 for (i = 0; i < NVCPUS; i++) {
787 vcpup = guestp->vcpus[i];
788 if (vcpup != NULL && vcpup->status != CPU_STATE_ERROR) {
789 ASSERT(vcpup->status == CPU_STATE_STOPPED);
790 goto found_bootcpu;
791 }
792 }
793 return (false);
794
795found_bootcpu:
796
797 ASSERT(vcpup->guest == guestp);
798 ASSERT(vcpup->status == CPU_STATE_STOPPED);
799
800 DBGINIT(c_printf("Starting guest 0x%x - using vcpu_id 0x%x "
801 "(vid 0x%x)\n", guestp->guestid, vcpup->res_id, vcpup->vid));
802 DBGINIT(c_printf("Strand active=0x%x,idle=0x%x\n",
803 config.strand_active, config.strand_idle));
804
805 /*
806 * We deliver this using a simple HVXCALL message to schedule
807 * the appropriate vcpu.
808 */
809 /*
810 * FIXME: replacing the hvxcall mbox with a queue means
811 * we can skip this check and simply send a x-call msg
812 * to schedule the vcpu regardless of whether it is our
813 * strand or not.
814 */
815
816 mystrandp = c_mystrand();
817
818 if (vcpup->strand == mystrandp) {
819 int slot;
820
821 slot = vcpup->strand_slot;
822 mystrandp->slot[slot].action = SLOT_ACTION_RUN_VCPU;
823 mystrandp->slot[slot].arg = (uint64_t)vcpup;
824 } else {
825 hvm_t hvxcmsg;
826 /*
827 * Send the start message to the strand owning the
828 * boot cpu to schedule it.
829 *
830 * NOTE: There is a subtle race in here that will
831 * go away with hvctl transmit queues, but currently
832 * doesnt show because of the time it takes to scrub the guest
833 * and copy in its prom image.
834 *
835 * With the one stop HVCTL mailbox, we can start the boot cpu
836 * it can grab the HVCTL mailbox for the state change mesage
837 * thus blocking it for us to send the command reply.
838 * We busy wait here to send the command reply, but the
839 * mbox never unblocks because it is this strand executing the
840 * domain manager at the guest level.
841 *
842 * This blocking cant happen with a hvctl transmit queue ..
843 */
844 hvxcmsg.cmd = HXCMD_SCHED_VCPU;
845 hvxcmsg.args.sched.vcpup = (uint64_t)vcpup;
846
847 c_hvmondo_send(vcpup->strand, &hvxcmsg);
848 }
849
850 return (true);
851}
852
853void
854init_dummytsb()
855{
856 typedef struct {
857 uint64_t tag;
858 uint64_t data;
859 } tsb_entry_t;
860 extern tsb_entry_t dummytsb[DUMMYTSB_ENTRIES];
861 tsb_entry_t *tsbep;
862 int i;
863
864 DBGINIT(c_printf("config_dumbtsb()\n"));
865
866 /*
867 * Dummy tsb has to be carefully aligned so can't put it
868 * in the config struct directly.
869 */
870 tsbep = &dummytsb[0];
871 config.dummytsbp = tsbep;
872
873 for (i = 0; i < DUMMYTSB_ENTRIES; i++) {
874 tsbep[i].tag = (uint64_t)-1; /* Invalid tag */
875 tsbep[i].data = (uint64_t)0;
876 }
877}