Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / huron / src / res_cwq.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: res_cwq.c
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49#pragma ident "@(#)res_cwq.c 1.2 07/06/07 SMI"
50
51#include <stdarg.h>
52
53#include <sys/htypes.h>
54#include <hypervisor.h>
55#include <hprivregs.h>
56#include <traps.h>
57#include <mmu.h>
58#include <sun4v/asi.h>
59#include <vdev_intr.h>
60#include <vdev_ops.h>
61#include <ncs.h>
62#include <config.h>
63#include <cyclic.h>
64#include <vcpu.h>
65#include <strand.h>
66#include <guest.h>
67#include <memory.h>
68#include <support.h>
69#include <md.h>
70#include <abort.h>
71#include <proto.h>
72
73#ifdef CONFIG_CRYPTO
74
75static void res_cwq_commit_config(cwq_t *cwqp);
76static void res_cwq_commit_unconfig(cwq_t *cwqp);
77static void res_cwq_commit_modify(cwq_t *cwqp);
78
79static hvctl_status_t res_cwq_parse_1(bin_md_t *mdp, md_element_t *cwqnodep,
80 hvctl_res_error_t *fail_codep, int *fail_res_idp);
81
82static void setup_a_cwq(vcpu_t *vcpup, cwq_t *cwqp, uint64_t ino);
83extern bool_t strand_in_vcpu_list(uint64_t strand_id, vcpu_t *vcpu_list,
84 uint64_t *found_idx);
85static void unconfig_strand_from_cwq(cwq_t *cwqp, uint64_t strand_num);
86static void init_cwq(cwq_t *cwqp);
87static vcpu_t *cwq_to_vcpu(cwq_t *cwqp, int strand_id);
88
89/*
90 * Initialise N2 CWQ units
91 */
92void
93init_cwq_crypto_units()
94{
95 cwq_t *cwqp;
96 int i, j;
97
98 config.config_m.cwqs = &cwqs[0];
99 cwqp = (cwq_t *)config.config_m.cwqs;
100
101 for (i = 0; i < NCWQS; i++) {
102 cwqp[i].handle = 0LL;
103 cwqp[i].res_id = i;
104 cwqp[i].ino = 0LL;
105 cwqp[i].cpuset = 0LL;
106 cwqp[i].guest = NULL;
107 for (j = 0; j < NSTRANDS_PER_CORE; j++) {
108 cwqp[i].cpu_active[j] = 0;
109 }
110 cwqp[i].state = CWQ_STATE_UNCONFIGURED;
111 }
112}
113
114/*
115 * cwq support functions.
116 */
117void
118res_cwq_prep()
119{
120 cwq_t *cwqp;
121 int i;
122
123 cwqp = config.config_m.cwqs;
124
125 for (i = 0; i < NCWQS; i++, cwqp++) {
126 cwqp->pip.res.flags = (cwqp->state == CWQ_STATE_UNCONFIGURED) ?
127 RESF_Noop : RESF_Unconfig;
128 cwqp->pip.cpuset = 0;
129 }
130}
131
132hvctl_status_t
133res_cwq_parse(bin_md_t *mdp, hvctl_res_error_t *fail_codep,
134 md_element_t **failnodepp, int *fail_res_idp)
135{
136 md_element_t *mdep;
137 uint64_t arc_token;
138 uint64_t node_token;
139 md_element_t *cwqnodep;
140
141 mdp = (bin_md_t *)config.parse_hvmd;
142
143 mdep = md_find_node(mdp, NULL, MDNAME(cwqs));
144 if (mdep == NULL) {
145 DBG_CWQ(c_printf("Missing cwqs node in HVMD\n"));
146 *failnodepp = NULL;
147 *fail_res_idp = 0;
148 return (HVctl_st_badmd);
149 }
150
151 arc_token = MDARC(MDNAME(fwd));
152 node_token = MDNODE(MDNAME(cwq));
153
154 while (NULL != (mdep = md_find_node_by_arc(mdp, mdep, arc_token,
155 node_token, &cwqnodep))) {
156 hvctl_status_t status;
157 status = res_cwq_parse_1(mdp, cwqnodep, fail_codep,
158 fail_res_idp);
159 if (status != HVctl_st_ok) {
160 *failnodepp = cwqnodep;
161 return (status);
162 }
163 }
164 return (HVctl_st_ok);
165}
166
167hvctl_status_t
168res_cwq_parse_1(bin_md_t *mdp, md_element_t *cwqnodep,
169 hvctl_res_error_t *fail_codep, int *fail_res_idp)
170{
171 uint64_t strand_id, thread_id, cwq_id, gid, ino;
172 cwq_t *cwqp = NULL;
173 md_element_t *guestnodep, *cpunodep, *mdep;
174
175 DBG_CWQ(md_dump_node(mdp, cwqnodep));
176
177#if 0 /* { FIXME: we still index by PID of CWQ, not reource_id */
178 if (!md_node_get_val(mdp, cwqnodep, MDNAME(resource_id), &cwq_id)) {
179 DBG_CWQ(c_printf("Missing resource_id in cwq node\n"));
180 *fail_codep = HVctl_e_cwq_missing_id;
181 goto fail;
182 }
183 if (cwq_id >= NCWQS) {
184 DBG_CWQ(c_printf("Invalid resource_id in cwq node\n"));
185 *fail_codep = HVctl_e_cwq_invalid_id;
186 goto fail;
187 }
188#endif /* } */
189
190 mdep = cwqnodep;
191 while (NULL != (mdep = md_find_node_by_arc(mdp, mdep,
192 MDARC(MDNAME(back)), MDNODE(MDNAME(cpu)), &cpunodep))) {
193
194 if (!md_node_get_val(mdp, cpunodep, MDNAME(pid), &strand_id)) {
195 DBG_CWQ(c_printf("Missing PID in cpu node\n"));
196 *fail_codep = HVctl_e_cwq_missing_strandid;
197 goto fail;
198 }
199
200 if (strand_id >= NSTRANDS) {
201 DBG_CWQ(c_printf("Invalid PID in cpu node\n"));
202 *fail_codep = HVctl_e_cwq_invalid_strandid;
203 goto fail;
204 }
205
206 if (cwqp == NULL) {
207 cwq_id = strand_id >> STRANDID_2_COREID_SHIFT;
208 /* ASSERT(cwq_id < NCWQS); */
209 cwqp = config.config_m.cwqs;
210 cwqp = &(cwqp[cwq_id]);
211 cwqp->pip.cpuset = 0;
212 *fail_res_idp = cwq_id;
213 DBG_CWQ(c_printf("res_cwq_parse_1(0x%x)\n", cwq_id));
214 }
215
216 thread_id = strand_id & NSTRANDS_PER_CORE_MASK;
217
218 cwqp->pip.cpuset |= (1 << thread_id);
219
220 if (NULL == md_find_node_by_arc(mdp, cpunodep,
221 MDARC(MDNAME(back)), MDNODE(MDNAME(guest)),
222 &guestnodep)) {
223 DBG_CWQ(c_printf("Missing back arc to guest node in "
224 "cpu node\n"));
225 *fail_codep = HVctl_e_cwq_missing_guest;
226 goto fail;
227 }
228
229 if (!md_node_get_val(mdp, guestnodep, MDNAME(resource_id),
230 &gid)) {
231 DBG_CWQ(c_printf(
232 "Missing resource_id in guest node\n"));
233 *fail_codep = HVctl_e_guest_missing_id;
234 goto fail;
235 }
236 if (gid >= NGUESTS) {
237 DBG_CWQ(c_printf(
238 "Invalid resource_id in guest node\n"));
239 *fail_codep = HVctl_e_guest_invalid_id;
240 goto fail;
241 }
242 /* FIXME: check that all cpus belong to same guest */
243 cwqp->pip.guestid = gid;
244 }
245
246 /* Get ino value for this cwq */
247 if (!md_node_get_val(mdp, cwqnodep, MDNAME(ino), &ino)) {
248 DBG_CWQ(c_printf("WARNING: Missing ino in cwq node\n"));
249 *fail_codep = HVctl_e_cwq_missing_ino;
250 goto fail;
251 }
252
253 DBG_CWQ(c_printf("Virtual cwq 0x%x in guest 0x%x ino 0x%x\n",
254 cwq_id, gid, ino));
255
256
257 /*
258 * Now determine the delta - if relevent...
259 */
260 cwqp->pip.pid = cwq_id;
261 cwqp->pip.ino = ino;
262
263 /*
264 * We can configure an unconfigured CWQ.
265 * Cannot (yet) support the dynamic re-binding of
266 * a configured / running cwq, except to modify the
267 * set of vcpus bound to it, which is handled as part of unconfig
268 * or config.
269 */
270 DBG_CWQ(c_printf("\t\tCurrent cwq status = 0x%x\n", cwqp->state));
271
272 if (cwqp->state == CWQ_STATE_UNCONFIGURED) {
273 DBG_CWQ(c_printf("\t\tElected to config cwq\n"));
274 cwqp->pip.res.flags = RESF_Config;
275 } else {
276 if (cwqp->pid == cwqp->pip.pid &&
277 cwqp->guest->guestid == gid &&
278 cwqp->ino == ino &&
279 cwqp->cpuset != cwqp->pip.cpuset) {
280 DBG_CWQ(c_printf("\t\tElected to modify cwq\n"));
281 cwqp->pip.res.flags = RESF_Modify;
282 } else if (cwqp->pid == cwqp->pip.pid &&
283 cwqp->guest->guestid == gid &&
284 cwqp->ino == ino) {
285 DBG_CWQ(c_printf("\t\tElected to ignore cwq\n"));
286 cwqp->pip.res.flags = RESF_Noop;
287 } else {
288 DBG_CWQ(c_printf("\t\tFailed MD update - no "
289 "rebind live\n"));
290 *fail_codep = HVctl_e_cwq_rebind_na;
291 goto fail;
292 }
293 }
294
295 return (HVctl_st_ok);
296fail:;
297 return (HVctl_st_badmd);
298}
299
300hvctl_status_t
301res_cwq_postparse(hvctl_res_error_t *res_error, int *fail_res_id)
302{
303 return (HVctl_st_ok);
304}
305
306void
307res_cwq_commit(int flag)
308{
309 cwq_t *cwqp;
310 int i;
311
312 cwqp = config.config_m.cwqs;
313
314 for (i = 0; i < NCWQS; i++, cwqp++) {
315 /* if not this ops turn move on */
316 DBG_CWQ(c_printf("res_cwq_commit: cwqid 0x%x : state 0x%x : "
317 "flags 0x%x - opflag 0x%x\n",
318 cwqp->pid, cwqp->state, cwqp->pip.res.flags, flag));
319
320 if (cwqp->pip.res.flags != flag)
321 continue;
322
323 switch (cwqp->pip.res.flags) {
324 case RESF_Noop:
325 DBG_CWQ(c_printf("cwq 0x%x : noop\n", i));
326 break;
327 case RESF_Unconfig:
328 DBG_CWQ(c_printf("cwq 0x%x : unconfig\n", i));
329 res_cwq_commit_unconfig(cwqp);
330 break;
331 case RESF_Config:
332 DBG_CWQ(c_printf("cwq 0x%x : config\n", i));
333 res_cwq_commit_config(cwqp);
334 break;
335 case RESF_Rebind:
336 DBG_CWQ(c_printf("cwq 0x%x : rebind\n", i));
337 ASSERT(0); /* not supported */
338 break;
339 case RESF_Modify:
340 DBG_CWQ(c_printf("cwq 0x%x : modify\n", i));
341 res_cwq_commit_modify(cwqp);
342 break;
343 default:
344 ASSERT(0);
345 }
346 cwqp->pip.res.flags = RESF_Noop; /* cleanup */
347 }
348}
349
350static void
351res_cwq_commit_config(cwq_t *cwqp)
352{
353 guest_t *guestp;
354 vcpu_t *cpup;
355 uint64_t strand_num, vcpu_num;
356 int i;
357
358 DBG_CWQ(c_printf("res_cwq_commit_config\n"));
359
360 /*
361 * Assign the cwq its bound vcpu.
362 * Note: this does not schedule the cwq.
363 */
364 cwqp->pid = cwqp->pip.pid;
365
366 guestp = config.guests;
367 guestp = &(guestp[cwqp->pip.guestid]);
368 ASSERT(guestp->guestid == cwqp->pip.guestid);
369 ASSERT(guestp->cwqs[cwqp->pid] == NULL);
370 guestp->cwqs[cwqp->pid] = cwqp;
371
372 /*
373 * Initialise the remainder of the cwq struct. Need to do this
374 * once for each cpu bound to this cwq.
375 */
376
377 /*
378 * Loop through the cpus attached to this cwq
379 * FIXME: make independent of cpu arch
380 */
381 strand_num = cwqp->pid << STRANDID_2_COREID_SHIFT;
382 for (i = 0; i < NSTRANDS_PER_CORE; ++i, ++strand_num) {
383 /* Skip cpus not being bound to this cwq */
384 if ((cwqp->pip.cpuset & (1 << i)) == 0) {
385 DBG_CWQ(c_printf("Skipping thread id %d for cwq %d "
386 "(pip.cpuset 0x%x)\n",
387 i, cwqp->pid, cwqp->pip.cpuset));
388 continue;
389 }
390 cpup = config.vcpus;
391
392 /* Convert strand to vid */
393 if (strand_in_vcpu_list(strand_num, cpup, &vcpu_num)) {
394 cpup = &(cpup[vcpu_num]);
395 } else {
396 DBG_CWQ(c_printf(
397 "strand 0x%x not found!\n", strand_num));
398 c_hvabort();
399 }
400
401 cpup->cwqp = cwqp;
402
403 DBG_CWQ(c_printf("\tBinding cwq (pid = 0x%x) to vcpu 0x%x on "
404 "strand 0x%x in guest 0x%x\n",
405 cwqp->pid, vcpu_num, strand_num, cwqp->pip.guestid));
406
407 setup_a_cwq(cpup, cwqp, cwqp->pip.ino);
408 config_a_guest_device_vino(cwqp->guest, cwqp->pip.ino,
409 DEVOPS_VDEV);
410 }
411}
412
413static void
414setup_a_cwq(vcpu_t *vcpup, cwq_t *cwqp, uint64_t ino)
415{
416 extern void c_setup_cwq(vcpu_t *, uint64_t, config_t *);
417
418 c_setup_cwq(vcpup, ino, &config);
419 cwqp->guest = vcpup->guest;
420}
421
422static void
423res_cwq_commit_unconfig(cwq_t *cwqp)
424{
425 vcpu_t *cpup;
426 uint64_t strand_num, vcpu_num;
427 int i;
428
429 ASSERT(cwqp->state == CWQ_STATE_RUNNING ||
430 cwqp->state == CWQ_STATE_ERROR);
431
432 ASSERT(cwqp->guest != NULL);
433
434 strand_num = cwqp->pid << STRANDID_2_COREID_SHIFT;
435 for (i = 0; i < NSTRANDS_PER_CORE; ++i, ++strand_num) {
436 /* Skip cpus not bound to this cwq */
437 if ((cwqp->cpuset & (1 << i)) == 0) {
438 DBG_CWQ(c_printf(
439 "Skipping thread id %d (strand 0x%x) for "
440 "cwq %d (cpuset 0x%x)\n",
441 i, strand_num, cwqp->pid, cwqp->cpuset));
442 continue;
443 }
444
445 cpup = config.vcpus;
446
447 /* Convert strand to vid */
448 DBG_CWQ(c_printf(
449 "\tUnconfig cwq (pid = 0x%x) from strand 0x%x in "
450 "guest 0x%x\n", cwqp->pid, strand_num,
451 cwqp->guest->guestid));
452
453 if (strand_in_vcpu_list(strand_num, cpup, &vcpu_num)) {
454 cpup = &(cpup[vcpu_num]);
455 DBG_CWQ(c_printf("\tstrand 0x%x is vcpu 0x%x\n",
456 strand_num, vcpu_num));
457 } else {
458 DBG_CWQ(c_printf(
459 "strand 0x%x not found!\n", strand_num));
460 c_hvabort();
461 }
462
463 unconfig_a_guest_device_vino(cwqp->guest, cwqp->ino,
464 DEVOPS_VDEV);
465 unconfig_strand_from_cwq(cwqp, strand_num);
466 }
467}
468
469static void
470unconfig_strand_from_cwq(cwq_t *cwqp, uint64_t strand_num)
471{
472 guest_t *guestp;
473 int thread_id;
474
475 guestp = cwqp->guest;
476
477 /*
478 * Check if already unconfigured!
479 */
480 ASSERT(guestp->cwqs[cwqp->pid] != NULL);
481 ASSERT(cwqp->state != CWQ_STATE_UNCONFIGURED);
482
483 thread_id = strand_num & NSTRANDS_PER_CORE_MASK;
484
485 /*
486 * Force the cpu_active entry to 0.
487 * It is possible to come through the unconfig sequence
488 * without having gone through stop_cwq. However, we
489 * assured that when we come into unconfig_cwq that the
490 * respective cpu is stopped.
491 */
492 cwqp->cpu_active[thread_id] = 0;
493
494 /*
495 * Remove cpu from CWQ's cpuset and if this
496 * is the last one, then clear the queue structure.
497 */
498 DBG_CWQ(c_printf(
499"\tunconfig_strand_from_cwq: cwq %d thread %d (strand %d) guest %d\n",
500 cwqp->pid, thread_id, strand_num, guestp->guestid));
501
502 cwqp->cpuset &= ~(1 << thread_id);
503
504 DBG_CWQ(c_printf("\tnew cpuset: %d\n", cwqp->cpuset));
505
506 if (cwqp->cpuset == 0) {
507 init_cwq(cwqp);
508 cwqp->state = CWQ_STATE_UNCONFIGURED;
509 cwqp->guest->cwqs[cwqp->pid] = NULL;
510 cwqp->guest = NULL;
511 }
512}
513
514static void
515init_cwq(cwq_t *cwqp)
516{
517 cwqp->queue.cq_dr_base_ra = 0;
518 cwqp->queue.cq_base = 0;
519 cwqp->queue.cq_last = 0;
520 cwqp->queue.cq_head = 0;
521 cwqp->queue.cq_head_marker = 0;
522 cwqp->queue.cq_tail = 0;
523 cwqp->queue.cq_nentries = 0;
524 cwqp->queue.cq_busy = 0;
525}
526
527/*
528 * The only allowed modification on a CWQ is the list of vcpus which
529 * are bound to it.
530 */
531void
532res_cwq_commit_modify(cwq_t *cwqp)
533{
534 uint64_t strand_id, thread_id;
535
536 ASSERT(cwqp->state == CWQ_STATE_RUNNING ||
537 cwqp->state == CWQ_STATE_ERROR);
538 ASSERT(cwqp->guest != NULL);
539
540 /*
541 * Compare old & new cpusets, configuring or unconfiguring
542 * cpu->cwq bindings as appropriate.
543 */
544 /*
545 * We can't determine which cpu->cwq bindings to unconfigure
546 * by walking the available vcpus, as they've already been
547 * unconfigured, so we find them by comparing the old & new
548 * cpuset mask values.
549 */
550 strand_id = cwqp->pid << STRANDID_2_COREID_SHIFT;
551 for (thread_id = 0; thread_id < NSTRANDS_PER_CORE;
552 ++thread_id, ++strand_id) {
553 uint64_t mask = 1LL << thread_id;
554
555 if ((cwqp->cpuset & mask) == (cwqp->pip.cpuset & mask)) {
556 DBG_CWQ(c_printf(
557 "\tIgnoring cwq (pid = 0x%x) on strand "
558 "0x%x in guest 0x%x\n",
559 cwqp->pid, strand_id, cwqp->pip.guestid));
560 continue;
561 }
562 /* Configure? */
563 if ((cwqp->pip.cpuset & mask) != 0) {
564 vcpu_t *cpup;
565
566 cpup = cwq_to_vcpu(cwqp, strand_id);
567 ASSERT(cpup != NULL);
568 cpup->cwqp = cwqp;
569
570 DBG_CWQ(c_printf("\tBinding cwq (pid = 0x%x) to strand "
571 "0x%x in guest 0x%x\n",
572 cwqp->pid, strand_id, cwqp->pip.guestid));
573
574 setup_a_cwq(cpup, cwqp, cwqp->pip.ino);
575 } else {
576 /* Unconfigure */
577 DBG_CWQ(c_printf("\tUnbinding cwq %d from strand 0x%x "
578 "in guest 0x%x\n",
579 cwqp->pid, strand_id, cwqp->guest->guestid));
580 unconfig_strand_from_cwq(cwqp, strand_id);
581 }
582 }
583}
584
585static vcpu_t *
586cwq_to_vcpu(cwq_t *cwqp, int strand_id)
587{
588 vcpu_t *cpup;
589 int i;
590
591 /*
592 * Walk all vcpus bound to guest which owns cwq, and find
593 * one with strand_id passed in.
594 */
595 for (i = 0; i < NVCPUS; ++i) {
596 cpup = cwqp->guest->vcpus[i];
597
598 /* Is vcpu mapped to guest? */
599 if (cpup == NULL)
600 continue;
601
602 ASSERT(cpup->strand != NULL);
603 if (cpup->strand->id == strand_id) {
604 DBG_CWQ(c_printf(
605 "\tcwq_to_vcpu: cwq %d strand %d is vcpu %d\n",
606 cwqp->pid, strand_id, cpup->vid));
607 return (cpup);
608 }
609 }
610
611 return (NULL);
612}
613
614#endif /* CONFIG_CRYPTO */