Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / common / src / res_memory.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: res_memory.c
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49#pragma ident "@(#)res_memory.c 1.5 07/06/07 SMI"
50
51#include <stdarg.h>
52
53#include <sys/htypes.h>
54#include <traps.h>
55#include <cache.h>
56#include <mmu.h>
57#include <sun4v/asi.h>
58#include <vdev_intr.h>
59#include <ncs.h>
60#include <cyclic.h>
61#include <support.h>
62#include <strand.h>
63#include <vcpu.h>
64#include <guest.h>
65#include <memory.h>
66#include <pcie.h>
67#include <vdev_ops.h>
68#include <fpga.h>
69#include <ldc.h>
70#include <config.h>
71#include <offsets.h>
72#include <hvctl.h>
73#include <md.h>
74#include <abort.h>
75#include <hypervisor.h>
76#include <proto.h>
77#include <debug.h>
78
79/*
80 * (re)-configuration code to handle HV memory resources
81 *
82 * We could use the resource identifier in each mblock of the
83 * Hypervisor MD to simply identify memory blocks for (re/un)config
84 * however for the moment - until code cleanup - we still treat memory
85 * blocks as components of the guest structure for the purpose of
86 * addition and removal.
87 */
88
89static void res_memory_commit_config(mblock_t *mbp);
90static void res_memory_commit_unconfig(mblock_t *mbp);
91static void res_memory_commit_modify(mblock_t *mbp);
92
93
94
95void
96init_ra2pa_segment(ra2pa_segment_t *rsp)
97{
98 rsp->limit = 0LL;
99 rsp->base = -1LL;
100 rsp->offset = -1LL;
101 rsp->flags = INVALID_SEGMENT;
102}
103
104void
105assign_ra2pa_segments(guest_t *guestp, uint64_t real_base,
106 uint64_t size, uint64_t ra2pa_offset, uint8_t flags)
107{
108 uint64_t idx;
109 uint64_t eidx;
110 uint64_t limit;
111
112 DBG(c_printf("\t\tG 0x%x : [0x%x + 0x%x] -> 0x%x : slots",
113 guestp->guestid, real_base, size, real_base + ra2pa_offset));
114
115 limit = real_base+size;
116
117 idx = real_base >> RA2PA_SHIFT;
118 eidx = (limit-1) >> RA2PA_SHIFT;
119
120 ASSERT(eidx < NUM_RA2PA_SEGMENTS);
121
122 while (idx <= eidx) {
123 ra2pa_segment_t *sp;
124
125 DBG(c_printf(" 0x%x", idx));
126
127 sp = &(guestp->ra2pa_segment[idx]);
128
129 sp->base = real_base;
130 sp->limit = limit;
131 sp->offset = ra2pa_offset;
132 sp->flags = flags;
133 idx++;
134 }
135
136 DBG(c_printf("\n"));
137}
138
139
140
141void
142clear_ra2pa_segments(guest_t *guestp, uint64_t real_base,
143 uint64_t size)
144{
145 uint64_t idx;
146 uint64_t eidx;
147 uint64_t limit;
148
149 DBG(c_printf("\t\tG 0x%x : [0x%x + 0x%x] -> XX : slots",
150 guestp->guestid, real_base, size));
151
152 limit = real_base+size;
153
154 idx = real_base >> RA2PA_SHIFT;
155 eidx = (limit-1) >> RA2PA_SHIFT;
156
157 ASSERT(eidx < NUM_RA2PA_SEGMENTS);
158
159 while (idx <= eidx) {
160 ra2pa_segment_t *sp;
161
162 DBG(c_printf(" 0x%x", idx));
163
164 sp = &(guestp->ra2pa_segment[idx]);
165 init_ra2pa_segment(sp);
166 idx++;
167 }
168
169 DBG(c_printf("\n"));
170}
171
172
173void
174init_mblocks()
175{
176 mblock_t *mbp;
177 int i;
178
179 mbp = config.mblocks;
180
181 for (i = 0; i < NMBLOCKS; i++, mbp++) {
182 mbp->state = MBLOCK_STATE_UNCONFIGURED;
183 }
184}
185
186
187 /*
188 * This goes through a global pool of memory blocks
189 * allocated by Zeus.
190 */
191void
192res_memory_prep()
193{
194 mblock_t *mbp;
195 int i;
196
197 mbp = config.mblocks;
198
199 for (i = 0; i < NMBLOCKS; i++, mbp++) {
200 mbp->pip.res.flags = mbp->state == MBLOCK_STATE_UNCONFIGURED ?
201 RESF_Noop : RESF_Unconfig;
202 }
203}
204
205
206hvctl_status_t
207res_memory_parse(bin_md_t *mdp, hvctl_res_error_t *fail_codep,
208 md_element_t **failnodepp, int *fail_res_idp)
209{
210 md_element_t *mdep;
211 uint64_t arc_token;
212 uint64_t node_token;
213 md_element_t *mbnodep;
214 mblock_t *mbp;
215 int i;
216
217 mdp = (bin_md_t *)config.parse_hvmd;
218
219 mdep = md_find_node(mdp, NULL, MDNAME(memory));
220 if (mdep == NULL) {
221 DBG(c_printf("Missing cpus node in HVMD\n"));
222 *failnodepp = NULL;
223 *fail_res_idp = 0;
224 goto fail;
225 }
226
227 arc_token = MDARC(MDNAME(fwd));
228 node_token = MDNODE(MDNAME(mblock));
229
230 while (NULL != (mdep = md_find_node_by_arc(mdp, mdep, arc_token,
231 node_token, &mbnodep))) {
232
233 uint64_t guestid;
234 uint64_t res_id;
235 uint64_t membase;
236 uint64_t memsize;
237 uint64_t realbase;
238 md_element_t *guestnodep;
239
240 if (!md_node_get_val(mdp, mbnodep,
241 MDNAME(resource_id), &res_id)) {
242 DBG(c_printf("Missing resource_id in mblock node\n"));
243 *fail_codep = HVctl_e_mblock_missing_id;
244 *fail_res_idp = 0;
245 goto fail;
246 }
247 if (res_id >= NMBLOCKS) {
248 DBG(c_printf("Invalid resource_id in mblock node\n"));
249 *fail_codep = HVctl_e_mblock_invalid_id;
250 *fail_res_idp = 0;
251 goto fail;
252 }
253
254 *fail_res_idp = res_id;
255
256 DBG(c_printf("res_memory_parse(0x%x)\n", res_id));
257
258 if (!md_node_get_val(mdp, mbnodep, MDNAME(membase), &membase)) {
259 DBG(c_printf("Missing membase in mblock node\n"));
260 *fail_codep = HVctl_e_mblock_missing_membase;
261 goto fail;
262 }
263 if (!md_node_get_val(mdp, mbnodep, MDNAME(memsize), &memsize)) {
264 DBG(c_printf("Missing memsize in mblock node\n"));
265 *fail_codep = HVctl_e_mblock_missing_memsize;
266 goto fail;
267 }
268 /* FIXME: test legit PA range(s) */
269 if ((membase + memsize) <= membase) {
270 DBG(c_printf("Invalid physical address range in "
271 "mblock node\n"));
272 *fail_codep = HVctl_e_mblock_invalid_parange;
273 goto fail;
274 }
275 if (!md_node_get_val(mdp, mbnodep,
276 MDNAME(realbase), &realbase)) {
277 DBG(c_printf("Missing realbase in mblock node\n"));
278 *fail_codep = HVctl_e_mblock_missing_realbase;
279 goto fail;
280 }
281 /* FIXME: test legit range(s) */
282 if ((realbase + memsize) <= realbase) {
283 DBG(c_printf("Invalid physical address range in "
284 "mblock node\n"));
285 *fail_codep = HVctl_e_mblock_invalid_rarange;
286 goto fail;
287 }
288
289 /* Which guest is this mblock assigned to? */
290
291 if (NULL == md_find_node_by_arc(mdp, mbnodep,
292 MDARC(MDNAME(back)), MDNODE(MDNAME(guest)), &guestnodep)) {
293 DBG(c_printf("Missing back arc to guest node in "
294 "mblock node\n"));
295 *fail_codep = HVctl_e_mblock_missing_guest;
296 goto fail;
297 }
298
299 if (!md_node_get_val(mdp, guestnodep, MDNAME(resource_id),
300 &guestid)) {
301 DBG(c_printf("Missing gid in guest node\n"));
302 *fail_codep = HVctl_e_guest_missing_id;
303 goto fail;
304 }
305
306 /*
307 * NOTE: This is probably redundant given that we
308 * have likely parsed the guest nodes once already
309 */
310 if (guestid >= NGUESTS) {
311 DBG(c_printf("Invalid gid in guest node\n"));
312 *fail_codep = HVctl_e_guest_invalid_id;
313 goto fail;
314 }
315
316
317 /*
318 * Now determine if any changes are relevent
319 */
320
321 mbp = config.mblocks;
322 mbp = &(mbp[res_id]);
323
324 mbp->pip.membase = membase;
325 mbp->pip.memsize = memsize;
326 mbp->pip.realbase = realbase;
327 mbp->pip.guestid = guestid;
328
329 if (mbp->state == MBLOCK_STATE_UNCONFIGURED) {
330 DBG(c_printf("\t\tElected to config mblock 0x%x\n",
331 res_id));
332 mbp->pip.res.flags = RESF_Config;
333 } else {
334 /* an mblock cannot be rebound between guests */
335 if (mbp->guestid != guestid) {
336 DBG(c_printf("Rebinding mblocks not "
337 "allowed\n"));
338 *fail_codep = HVctl_e_mblock_rebind_na;
339 goto fail;
340 }
341
342 if (mbp->membase == membase &&
343 mbp->memsize == memsize &&
344 mbp->realbase == realbase &&
345 mbp->guestid == guestid) {
346 mbp->pip.res.flags = RESF_Noop;
347 } else {
348 mbp->pip.res.flags = RESF_Modify;
349 }
350 }
351 }
352
353
354 /*
355 * As a final check in the parse stage:
356 * Only allow a config/unconfig or rebind if guest is !active
357 * LDOMS20: Future hypervisors supporting a dynamic memory
358 * reconfigure will remove the check below, and must in-line
359 * force a TLB flush for each of the vcpus who's mblocks are
360 * unconfigured.
361 */
362
363 mbp = config.mblocks;
364
365 for (i = 0; i < NMBLOCKS; i++, mbp++) {
366 guest_t *gp;
367
368 if (mbp->pip.res.flags == RESF_Noop) continue;
369
370 gp = config.guests;
371
372 if (mbp->state == MBLOCK_STATE_CONFIGURED) {
373 gp = &(gp[mbp->guestid]);
374 } else {
375 gp = &(gp[mbp->pip.guestid]);
376 }
377 }
378
379 return (HVctl_st_ok);
380fail:;
381 return (HVctl_st_badmd);
382}
383
384hvctl_status_t
385res_memory_postparse(hvctl_res_error_t *res_error, int *fail_res_id)
386{
387 return (HVctl_st_ok);
388}
389
390void
391res_memory_commit(int flag)
392{
393 mblock_t *mbp;
394 int i;
395
396 mbp = config.mblocks;
397
398 for (i = 0; i < NMBLOCKS; i++, mbp++) {
399 /* if not this ops turn move on */
400 if (mbp->pip.res.flags != flag) continue;
401
402 switch (mbp->pip.res.flags) {
403 case RESF_Noop:
404 DBG(c_printf("mblock 0x%x : noop\n", i));
405 break;
406 case RESF_Unconfig:
407 res_memory_commit_unconfig(mbp);
408 break;
409 case RESF_Config:
410 res_memory_commit_config(mbp);
411 break;
412 case RESF_Rebind:
413 DBG(c_printf("guest 0x%x : rebind\n", i));
414 ASSERT(0); /* not supported */
415 break;
416 case RESF_Modify:
417 res_memory_commit_modify(mbp);
418 break;
419 default:
420 ASSERT(0);
421 }
422
423 mbp->pip.res.flags = RESF_Noop; /* cleanup */
424 }
425}
426
427
428
429
430void
431res_memory_commit_config(mblock_t *mbp)
432{
433 guest_t *gp;
434
435 ASSERT(mbp->state == MBLOCK_STATE_UNCONFIGURED);
436
437 mbp->realbase = mbp->pip.realbase;
438 mbp->membase = mbp->pip.membase;
439 mbp->memsize = mbp->pip.memsize;
440 mbp->guestid = mbp->pip.guestid;
441
442 gp = config.guests;
443 gp = &(gp[mbp->guestid]);
444
445 assign_ra2pa_segments(gp, mbp->realbase, mbp->memsize,
446 mbp->membase - mbp->realbase, MEM_SEGMENT);
447
448 mbp->state = MBLOCK_STATE_CONFIGURED;
449}
450
451
452void
453res_memory_commit_unconfig(mblock_t *mbp)
454{
455 guest_t *gp;
456
457 ASSERT(mbp->state == MBLOCK_STATE_CONFIGURED);
458
459 gp = config.guests;
460 gp = &(gp[mbp->guestid]);
461
462 clear_ra2pa_segments(gp, mbp->realbase, mbp->memsize);
463
464 mbp->state = MBLOCK_STATE_UNCONFIGURED;
465}
466
467
468
469 /*
470 * Modify is somewhat tricky, as it involves updating all the
471 * memory segments to account for what may have been movement
472 * shrinkage, or growth in the given mblock.
473 * Since this is constrained to be done while the assigned guest
474 * is not alive, we implement this simply as a call to unconfig
475 * followed by a call to config.
476 * LDOMS20: This interface has to be enhanced to force a TLB flush
477 * for all the active vcpus on the affected guest to remove
478 * any stale TLB and permanent mappings. The perm mapping table
479 * also needs scouring for stale entries.
480 */
481void
482res_memory_commit_modify(mblock_t *mbp)
483{
484 res_memory_commit_unconfig(mbp);
485 res_memory_commit_config(mbp);
486}