Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * OpenSPARC T2 Processor File: mem.c | |
5 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
6 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES. | |
7 | * | |
8 | * The above named program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public | |
10 | * License version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * The above named program is distributed in the hope that it will be | |
13 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public | |
18 | * License along with this work; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | |
20 | * | |
21 | * ========== Copyright Header End ============================================ | |
22 | */ | |
23 | /* | |
24 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. | |
25 | * Use is subject to license terms. | |
26 | */ | |
27 | #pragma ident "@(#)mem.c 1.24 07/04/16 SMI" | |
28 | ||
29 | /* | |
30 | * Complete the parsing of a generic memory device. | |
31 | * | |
32 | * Pick out the relevent info and allocated the | |
33 | * simcpu_t structure in the config_cpu that | |
34 | * gets handed to us. | |
35 | */ | |
36 | ||
37 | #include <stdio.h> | |
38 | #include <stdlib.h> | |
39 | #include <unistd.h> | |
40 | #include <fcntl.h> | |
41 | #include <sys/types.h> | |
42 | #include <sys/stat.h> | |
43 | #include <sys/mman.h> | |
44 | #include <errno.h> | |
45 | #include <fcntl.h> | |
46 | #if VDISK_SUPPORT /* { */ | |
47 | #include <sys/dkio.h> | |
48 | #include <sys/dklabel.h> | |
49 | #include <sys/vtoc.h> | |
50 | #endif /* VDISK_SUPPORT } */ | |
51 | #include <strings.h> | |
52 | ||
53 | #include "basics.h" | |
54 | #include "allocate.h" | |
55 | #include "lexer.h" | |
56 | #include "simcore.h" | |
57 | #include "config.h" | |
58 | #include "dumpinfo.h" | |
59 | #include "strutil.h" | |
60 | #include "fatal.h" | |
61 | #include "options.h" | |
62 | ||
63 | ||
64 | /* | |
65 | * static only to avoid name clashes with other | |
66 | * modules ... in reality these are exported | |
67 | * via the cpu_type_t struct pointers | |
68 | */ | |
69 | static void mem_parse(config_dev_t *); | |
70 | static void mem_init(config_dev_t *); | |
71 | static void mem_dump(config_dev_t *); | |
72 | static tpaddr_t mem_cacheable(config_addr_t *, dev_access_t type, | |
73 | tpaddr_t off, uint8_t **cbp); | |
74 | static bool_t mem_cpu_access(simcpu_t *, config_addr_t *, tpaddr_t off, | |
75 | maccess_t op, uint64_t *regp); | |
76 | ||
77 | dev_type_t dev_type_memory = { | |
78 | "memory", | |
79 | mem_parse, | |
80 | mem_init, | |
81 | mem_dump, | |
82 | mem_cacheable, | |
83 | mem_cpu_access, /* shouldnt be called if we're a RAM */ | |
84 | DEV_MAGIC | |
85 | }; | |
86 | ||
87 | typedef struct MEM_CONTENT mem_content_t; | |
88 | ||
89 | typedef struct { | |
90 | char *fnamep; | |
91 | uint64_t fileextent; /* how much of file to load in */ | |
92 | uint64_t fileextenta; /* page alligned fileextent */ | |
93 | uint64_t fileoffset; /* offset in file to start loading */ | |
94 | uint64_t memoffset; /* offset in memory of where to load */ | |
95 | #if VDISK_SUPPORT /* { */ | |
96 | int disk_slice; /* 0-1,3-7 (2 resvd for entire disk) */ | |
97 | #endif /* VDISK_SUPPORT } */ | |
98 | bool_t is_rom; /* mmap disk with MAP_PRIVATE flag */ | |
99 | bool_t is_shared; /* mmap disk with MAP_SHARED flag */ | |
100 | } mem_file_t; | |
101 | ||
102 | ||
103 | typedef struct { | |
104 | bool_t is_rom; /* mmap file with MAP_PRIVATE flag */ | |
105 | bool_t is_shared; /* mmap file with MAP_SHARED flag */ | |
106 | #if VDISK_SUPPORT /* { */ | |
107 | bool_t is_disk; /* used to identify virtual disk */ | |
108 | #endif /* VDISK_SUPPORT } */ | |
109 | uint8_t *datap; | |
110 | uint64_t size; /* size of memory segment */ | |
111 | mem_content_t *contentp; /* Any content to manually add */ | |
112 | int nfileblocks; /* no of files to load into segment */ | |
113 | mem_file_t fileblock[1]; /* fileblock struct per file */ | |
114 | } mem_dev_t; | |
115 | ||
116 | ||
117 | ||
118 | struct MEM_CONTENT { | |
119 | uint64_t offset; | |
120 | uint64_t size; | |
121 | mem_content_t *nextp; | |
122 | uint8_t *datap; | |
123 | }; | |
124 | ||
125 | ||
126 | ||
127 | #if VDISK_SUPPORT /* { */ | |
128 | /* | |
129 | * local functions for manipulating disk labels | |
130 | */ | |
131 | static void create_label(mem_dev_t *mdp); | |
132 | static void dump_label(mem_dev_t *mdp); | |
133 | static short get_checksum(struct dk_label *dkl, int mode); | |
134 | #endif /* VDISK_SUPPORT } */ | |
135 | ||
136 | static void mem_parse_content(mem_dev_t *mdp); | |
137 | ||
138 | #define MAXNAMELEN 256 | |
139 | ||
140 | ||
141 | /* | |
142 | * Complete the creation and parsing of the memory directives | |
143 | * | |
144 | * Format for parsing internal memory data is: | |
145 | * | |
146 | * [rom | shared | virtual_disk] | |
147 | * | |
148 | * if a 'rom | shared' directive is found, we parse as follows: | |
149 | * | |
150 | * load [[+]addr] "filename" [ file_offset [ extent_to_load ] ] | |
151 | * | |
152 | * - ROM can only be specified once, we can have as many | |
153 | * load directives as we like ! | |
154 | * | |
155 | * if a 'virtual_disk' directive is found, we parse as follows: | |
156 | * load s0 [rom|shared] "filename" | |
157 | * load s1 [rom|shared] "filename" | |
158 | * | |
159 | * - We will figure out the size of the entire disk by opening each | |
160 | * file. It's important that the first disk contains a valid | |
161 | * VTOC at block 0. We will update the mmapped copy of this VTOC | |
162 | * with the virtual partition information of all disks in the | |
163 | * virtual disk. | |
164 | * | |
165 | * - You cannot specify a disk to load into s2 as that is reserved | |
166 | * to describe the entire disk. | |
167 | * - You can load disks into s0 (boot disk), s1 (swap), | |
168 | * s3,s4,s5,s6,s7 (user specified mount points). | |
169 | * | |
170 | * - Each disk will be mmapped with the permissions specified in | |
171 | * the load directive. | |
172 | */ | |
173 | ||
174 | void | |
175 | mem_parse(config_dev_t *config_devp) | |
176 | { | |
177 | lexer_tok_t tok; | |
178 | mem_dev_t *mdp; | |
179 | uint64_t memory_segment_size; /* initial value of mdp->size */ | |
180 | long pgsize; | |
181 | ||
182 | pgsize = getpagesize(); | |
183 | ||
184 | /* | |
185 | * Allocate the memory device and all that stuff | |
186 | */ | |
187 | mdp = Xcalloc(1, mem_dev_t); | |
188 | ||
189 | mdp->is_rom = false; | |
190 | mdp->is_shared = false; | |
191 | #if VDISK_SUPPORT /* { */ | |
192 | mdp->is_disk = false; | |
193 | #endif /* VDISK_SUPPORT } */ | |
194 | mdp->datap = (void*)0; | |
195 | mdp->contentp = (mem_content_t *)0; | |
196 | mdp->size = config_devp->addrp->range; | |
197 | mdp->nfileblocks = 0; | |
198 | ||
199 | /* CSTYLED */ | |
200 | DBG( PRINTF(("mem_parse: parsing device %d\n", config_devp->device_id)); ); | |
201 | ||
202 | tok = lex_get_token(); | |
203 | switch (tok) { | |
204 | case T_S_Colon: | |
205 | goto finished; /* nothing more to parse */ | |
206 | case T_L_Brace: | |
207 | break; | |
208 | default: | |
209 | lex_fatal("expecting either ; or { when parsing memory device"); | |
210 | } | |
211 | ||
212 | ||
213 | /* | |
214 | * Start the parsing loop | |
215 | */ | |
216 | do { | |
217 | char *fnamep; | |
218 | uint64_t startoffset = 0LL; | |
219 | uint64_t foffset = 0LL; | |
220 | uint64_t flen = 0LL; | |
221 | struct stat sb; | |
222 | int idx; | |
223 | #if VDISK_SUPPORT /* { */ | |
224 | int disk_slice = -1; | |
225 | #endif /* VDISK_SUPPORT } */ | |
226 | bool_t is_shared, is_rom; | |
227 | ||
228 | tok = lex_get_token(); | |
229 | ||
230 | if (tok == T_R_Brace) break; | |
231 | ||
232 | if (tok != T_Token) { | |
233 | fail:; | |
234 | lex_fatal("expected load, rom, shared or virtual_disk " | |
235 | "directive parsing memory device"); | |
236 | } | |
237 | ||
238 | if (streq(lex.strp, "rom")) { | |
239 | if (mdp->is_rom) lex_fatal("rom already specified"); | |
240 | mdp->is_rom = true; | |
241 | lex_get(T_S_Colon); | |
242 | continue; | |
243 | } | |
244 | ||
245 | if (streq(lex.strp, "shared")) { | |
246 | if (mdp->is_shared) | |
247 | lex_fatal("shared already specified"); | |
248 | mdp->is_shared = true; | |
249 | lex_get(T_S_Colon); | |
250 | continue; | |
251 | } | |
252 | ||
253 | if (streq(lex.strp, "virtual_disk")) { | |
254 | #if VDISK_SUPPORT /* { */ | |
255 | if (mdp->is_disk) | |
256 | lex_fatal("virtual_disk already specified"); | |
257 | mdp->is_disk = true; | |
258 | ||
259 | /* | |
260 | * For disks, we don't need the size as specified in the | |
261 | * conf file. We'll calculate the real size once we've | |
262 | * found the size of all disks loaded. | |
263 | * | |
264 | * Howvever, we need to ensure that the memory segment | |
265 | * used for mmapping the disks does not overlap with | |
266 | * any other memory segment in use. Legion has already | |
267 | * done this check when parsing the memory directive. | |
268 | * We have gotten here if we have not overlapped. The | |
269 | * concern now is that when we parse the disk directives | |
270 | * we may extend beyond the memory segment specified by | |
271 | * the memory directive. So as a check, we save the size | |
272 | * here and run a check at the end to ensure that the | |
273 | * total size of all disks is within the bounds of the | |
274 | * size of the memory segment. | |
275 | */ | |
276 | memory_segment_size = mdp->size; | |
277 | ||
278 | mdp->size = 0; /* We'll calculate this as we parse */ | |
279 | ||
280 | lex_get(T_S_Colon); | |
281 | continue; | |
282 | #else /* VDISK_SUPPORT } { */ | |
283 | lex_fatal("virtual_disk not supported on this " | |
284 | "platform"); | |
285 | #endif /* VDISK_SUPPORT } */ | |
286 | } | |
287 | ||
288 | if (streq(lex.strp, "content")) { | |
289 | mem_parse_content(mdp); | |
290 | continue; | |
291 | } | |
292 | ||
293 | if (!streq(lex.strp, "load")) goto fail; | |
294 | ||
295 | tok = lex_get_token(); | |
296 | ||
297 | #if VDISK_SUPPORT /* { */ | |
298 | /* | |
299 | * Parse disk directive as follows: | |
300 | * | |
301 | * s(n) rom|shared "disk.name" | |
302 | * | |
303 | * where: | |
304 | * - s(n) is the slice number (cannot be s2) | |
305 | * - rom or shared to specify how to mmap this disk | |
306 | * - "disk.name" you can probably guess ... | |
307 | * | |
308 | * NOTE: The VTOC of s0 will be overwritten in memory | |
309 | * to with the correct label for all partitions | |
310 | * that make up this virtual disk. | |
311 | * s2 of the VTOC will contain the entire size | |
312 | * of all mmapped disks. We also need a valid | |
313 | * disk label checksum so OBP can open this disk. | |
314 | */ | |
315 | ||
316 | if (mdp->is_disk) { | |
317 | ||
318 | /* | |
319 | * figure out which slice number this disk is | |
320 | */ | |
321 | if (streq(lex.strp, "s0")) | |
322 | disk_slice = 0; | |
323 | else if (streq(lex.strp, "s1")) | |
324 | disk_slice = 1; | |
325 | else if (streq(lex.strp, "s2")) | |
326 | lex_fatal("Cannot load a disk as slice 2."); | |
327 | else if (streq(lex.strp, "s3")) | |
328 | disk_slice = 3; | |
329 | else if (streq(lex.strp, "s4")) | |
330 | disk_slice = 4; | |
331 | else if (streq(lex.strp, "s5")) | |
332 | disk_slice = 5; | |
333 | else if (streq(lex.strp, "s6")) | |
334 | disk_slice = 6; | |
335 | else if (streq(lex.strp, "s7")) | |
336 | disk_slice = 7; | |
337 | else | |
338 | lex_fatal("expected a slice number of format " | |
339 | "s0-7 (but not s2)"); | |
340 | ||
341 | tok = lex_get_token(); | |
342 | ||
343 | /* | |
344 | * parse the mmap permissions for this disk | |
345 | * (rom or shared) | |
346 | */ | |
347 | is_rom = is_shared = false; | |
348 | ||
349 | if (streq(lex.strp, "rom")) | |
350 | is_rom = true; | |
351 | else if (streq(lex.strp, "shared")) | |
352 | is_shared = true; | |
353 | else | |
354 | lex_fatal("expected either rom or shared"); | |
355 | ||
356 | /* | |
357 | * parse the filename | |
358 | */ | |
359 | lex_get(T_String); | |
360 | fnamep = Xstrdup(lex.strp); | |
361 | ||
362 | tok = lex_get_token(); | |
363 | if (tok == T_S_Colon) | |
364 | goto load_file; | |
365 | else | |
366 | lex_fatal("expected ; after filename"); | |
367 | ||
368 | } | |
369 | #endif /* VDISK_SUPPORT } */ | |
370 | ||
371 | /* | |
372 | * Get the load offset, ie. the offset from the base | |
373 | * address to load this file. For disk devices, we | |
374 | * don't need this as we use the size of the previous | |
375 | * disk to work out where to load the next disk. | |
376 | */ | |
377 | switch (tok) { | |
378 | case T_Plus: | |
379 | lex_get(T_Number); | |
380 | startoffset = lex.val; | |
381 | break; | |
382 | case T_Number: | |
383 | if (lex.val < config_devp->addrp->baseaddr || | |
384 | lex.val >= config_devp->addrp->topaddr) | |
385 | lex_fatal("specified load address is outside " | |
386 | "the range of the memory device"); | |
387 | ||
388 | startoffset = lex.val - config_devp->addrp->baseaddr; | |
389 | break; | |
390 | case T_String: | |
391 | startoffset = 0x0LL; | |
392 | goto got_string; | |
393 | default: | |
394 | lex_fatal("Expected either a start address / offset " | |
395 | "or filename for memory device load directive"); | |
396 | } | |
397 | ||
398 | lex_get(T_String); | |
399 | ||
400 | got_string:; | |
401 | fnamep = Xstrdup(lex.strp); | |
402 | ||
403 | tok = lex_get_token(); | |
404 | if (tok == T_S_Colon) | |
405 | goto load_file; | |
406 | if (tok != T_Number) | |
407 | lex_fatal("Expected ; or file offset for memory device " | |
408 | "load directive"); | |
409 | if (lex.val < 0LL) | |
410 | lex_fatal("load file offset must be >0 for load directive"); | |
411 | ||
412 | foffset = lex.val; | |
413 | ||
414 | tok = lex_get_token(); | |
415 | if (tok == T_S_Colon) | |
416 | goto load_file; | |
417 | if (tok != T_Number) | |
418 | lex_fatal("Expected ; or load length for memory device " | |
419 | "load directive"); | |
420 | if (lex.val <= 0LL) | |
421 | lex_fatal("load length must be >=0 for load directive"); | |
422 | ||
423 | flen = lex.val; | |
424 | ||
425 | lex_get(T_S_Colon); | |
426 | ||
427 | load_file: | |
428 | if (stat(fnamep, &sb) < 0) | |
429 | lex_fatal("error opening load file %s", fnamep); | |
430 | ||
431 | if (flen == 0LL) | |
432 | flen = sb.st_size - foffset; | |
433 | ||
434 | #if VDISK_SUPPORT /* { */ | |
435 | /* | |
436 | * Set the load address of this disk slice (startoffset) | |
437 | * to be right after the address that the previous | |
438 | * slice loaded at (mdp->size) | |
439 | * | |
440 | * Increase the size of the disk by flen (rounded up | |
441 | * so it's page aligned.) The next slice (if any) | |
442 | * will be loaded at mdp->size. | |
443 | */ | |
444 | if (mdp->is_disk) { | |
445 | startoffset = mdp->size; | |
446 | mdp->size += sim_roundup(flen, pgsize); | |
447 | ||
448 | /* | |
449 | * sanity check to see if we've blown passed the | |
450 | * the memory segment as specified in the conf | |
451 | * file. | |
452 | */ | |
453 | if (mdp->size > memory_segment_size) { | |
454 | printf("\n Memory segment overflow. " | |
455 | "[0x%llx > 0x%llx]", | |
456 | mdp->size, memory_segment_size); | |
457 | lex_fatal("memory segment for disks has " | |
458 | "extended beyond that specified in the " | |
459 | "conf file"); | |
460 | } | |
461 | } | |
462 | #endif /* VDISK_SUPPORT } */ | |
463 | ||
464 | if (sb.st_size < (foffset + flen)) | |
465 | lex_fatal("load file %s is smaller than the specified " | |
466 | "load range", fnamep); | |
467 | ||
468 | if ((startoffset + flen) > config_devp->addrp->range) | |
469 | lex_fatal("load file %s is larger than the memory device", | |
470 | fnamep); | |
471 | ||
472 | /* OK have parsed file info - add it to load block */ | |
473 | ||
474 | idx = mdp->nfileblocks++; | |
475 | /* CSTYLED */ | |
476 | mdp = Xrealloc(mdp, Sizeof(mem_dev_t) + mdp->nfileblocks * Sizeof(mem_file_t)); | |
477 | ||
478 | mdp->fileblock[idx].fnamep = fnamep; | |
479 | /* how much of file to load in */ | |
480 | mdp->fileblock[idx].fileextent = flen; | |
481 | mdp->fileblock[idx].fileextenta = sim_roundup(flen, pgsize); | |
482 | /* offset in file to start load */ | |
483 | mdp->fileblock[idx].fileoffset = foffset; | |
484 | /* offset in mem to start load */ | |
485 | mdp->fileblock[idx].memoffset = startoffset; | |
486 | /* MMAP with flag MAP_SHARED */ | |
487 | mdp->fileblock[idx].is_shared = is_shared; | |
488 | /* MMAP with flag MAP_PRIVATE */ | |
489 | mdp->fileblock[idx].is_rom = is_rom; | |
490 | #if VDISK_SUPPORT /* { */ | |
491 | /* disk partition number */ | |
492 | mdp->fileblock[idx].disk_slice = disk_slice; | |
493 | ||
494 | if (mdp->is_disk) { | |
495 | /* CSTYLED */ | |
496 | DBG( PRINTF(("\nPartition Info:::\nname: %s \nslice=%d " | |
497 | "\nbase=0x%llx " | |
498 | "\nstartoffset = %llx \nflen = %llx \nfoffset = %llx " | |
499 | "\nmdp->size = 0x%llx\nfilextent = 0x%llx " | |
500 | "fileextenta = 0x%llx " | |
501 | "\nmemoffset = 0x%llx\n", mdp->fileblock[idx].fnamep, | |
502 | mdp->fileblock[idx].disk_slice, | |
503 | config_devp->addrp->baseaddr, | |
504 | startoffset, flen, foffset, mdp->size, | |
505 | mdp->fileblock[idx].fileextent, | |
506 | mdp->fileblock[idx].fileextenta, | |
507 | /* CSTYLED */ | |
508 | mdp->fileblock[idx].memoffset)); ); | |
509 | } | |
510 | #endif /* VDISK_SUPPORT } */ | |
511 | ||
512 | } while (1); | |
513 | ||
514 | finished:; | |
515 | config_devp->devp = mdp; | |
516 | } | |
517 | ||
518 | ||
519 | ||
520 | ||
521 | static int | |
522 | mem_insert(mem_dev_t *mdp, uint64_t offset, uint64_t size, uint8_t *datap) | |
523 | { | |
524 | mem_content_t *tp, **tpp; | |
525 | ||
526 | /* CSTYLED */ | |
527 | DBG( PRINTF(("mem_insert:offset: 0x%llx size: 0x%llx\n", offset, size)); ); | |
528 | ||
529 | /* walk the list looking for insertion point */ | |
530 | for (tpp = &mdp->contentp; (tp = *tpp) != NULL; tpp = &tp->nextp) { | |
531 | if (offset < tp->offset) { | |
532 | if ((offset + size) > tp->offset) { | |
533 | return (-1); /* overlap */ | |
534 | } else { | |
535 | break; | |
536 | } | |
537 | } | |
538 | if ((tp->offset + tp->size) > offset) { | |
539 | return (-1); /* overlap */ | |
540 | } | |
541 | } | |
542 | ||
543 | /* found the location, add the mem object */ | |
544 | tp = Xcalloc(1, mem_content_t); | |
545 | tp->offset = offset; | |
546 | tp->size = size; | |
547 | tp->datap = datap; | |
548 | tp->nextp = *tpp; | |
549 | *tpp = tp; | |
550 | ||
551 | return (0); | |
552 | } | |
553 | ||
554 | /* | |
555 | * Handle the content directive | |
556 | */ | |
557 | #define CONTENT_STEP 256 | |
558 | ||
559 | void | |
560 | mem_parse_content(mem_dev_t *mdp) | |
561 | { | |
562 | uint64_t offset; | |
563 | uint64_t size; | |
564 | uint64_t mask; | |
565 | int word_size; | |
566 | lexer_tok_t tok; | |
567 | int space; | |
568 | uint8_t *datap; | |
569 | int idx; | |
570 | ||
571 | lex_get(T_Plus); | |
572 | lex_get(T_Number); | |
573 | offset = lex.val; | |
574 | if (offset > mdp->size) lex_fatal("Offset is outside memory range"); | |
575 | ||
576 | lex_get(T_Number); | |
577 | word_size = (int)lex.val; | |
578 | ||
579 | switch (word_size) { | |
580 | case 8: | |
581 | mask = 0xffull; | |
582 | break; | |
583 | case 16: | |
584 | mask = 0xffffull; | |
585 | break; | |
586 | case 32: | |
587 | mask = 0xffffffffull; | |
588 | break; | |
589 | case 64: | |
590 | mask = 0xffffffffffffffffull; | |
591 | break; | |
592 | default: | |
593 | lex_fatal("Word size must be 8, 16, 32 or 64 bits"); | |
594 | } | |
595 | word_size >>= 3; | |
596 | ||
597 | lex_get(T_L_Brace); | |
598 | ||
599 | idx = 0; | |
600 | space = 0; | |
601 | datap = NULL; | |
602 | while ((tok = lex_get_token()) != T_R_Brace) { | |
603 | uint8_t *p; | |
604 | ||
605 | if (tok != T_Number) lex_fatal("Unexpected content"); | |
606 | if ((lex.val & ~mask) != 0LL) lex_fatal("Content out of range"); | |
607 | ||
608 | if (offset+idx+word_size >= mdp->size) | |
609 | lex_fatal("Content overflows memory range"); | |
610 | ||
611 | if (space <= (idx+word_size)) { | |
612 | space += CONTENT_STEP; | |
613 | datap = Xrealloc(datap, space); | |
614 | } | |
615 | ||
616 | /* ensure it's written big-endian */ | |
617 | switch (word_size) { | |
618 | case 8: | |
619 | datap[idx] = (lex.val >> 56); | |
620 | datap[idx+1] = (lex.val >> 48); | |
621 | datap[idx+2] = (lex.val >> 40); | |
622 | datap[idx+3] = (lex.val >> 32); | |
623 | idx += 4; | |
624 | case 4: | |
625 | datap[idx] = (lex.val >> 24); | |
626 | datap[idx+1] = (lex.val >> 16); | |
627 | idx += 2; | |
628 | case 2: | |
629 | datap[idx] = (lex.val >> 8); | |
630 | idx++; | |
631 | case 1: | |
632 | datap[idx] = lex.val; | |
633 | idx++; | |
634 | } | |
635 | } | |
636 | if (idx == 0) lex_fatal("content directive must have some content"); | |
637 | ||
638 | if (mem_insert(mdp, offset, idx, datap) < 0) { | |
639 | lex_fatal("Memory content overlap detected "); | |
640 | } | |
641 | } | |
642 | ||
643 | ||
644 | ||
645 | ||
646 | ||
647 | ||
648 | /* | |
649 | * Initialise the mem after parsing is complete | |
650 | * If no files have been requested, then we force a | |
651 | * /dev/zero mapping ... | |
652 | * ... this is why the additional files start at | |
653 | * fileblock index 1. | |
654 | */ | |
655 | ||
656 | void | |
657 | mem_init(config_dev_t *config_devp) | |
658 | { | |
659 | long pgsize; | |
660 | mem_dev_t *mdp; | |
661 | uint8_t *datap; | |
662 | int idx; | |
663 | mem_content_t *cp, *next_cp; | |
664 | ||
665 | mdp = (mem_dev_t *)config_devp->devp; | |
666 | pgsize = getpagesize(); | |
667 | ||
668 | /* | |
669 | * Perform a check to see that the requested loaded files are | |
670 | * a) correctly aligned, and | |
671 | * b) whether we need a /dev/zero mapping to back sections not | |
672 | * covered by other mmapped files (beginning, middle, | |
673 | * end), or because we need to "load" files in that are | |
674 | * not correctly aligned. | |
675 | */ | |
676 | ||
677 | ||
678 | /* | |
679 | * For the moment we create a MAP_ANON block of the entire | |
680 | * ram/rom then map over than any additional data files we need. | |
681 | * Force a segv if we ever try and write to a "ROM". | |
682 | */ | |
683 | ||
684 | /* CSTYLED */ | |
685 | DBG( PRINTF(("memory mapping : 0x%llx\n", sim_roundup(mdp->size, pgsize))); ); | |
686 | ||
687 | datap = mdp->datap = (void*)mmap(NULL, sim_roundup(mdp->size, pgsize), | |
688 | PROT_READ | (mdp->is_rom ? 0 : PROT_WRITE), | |
689 | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, -1, 0); | |
690 | if (MAP_FAILED == datap) fatal("Initial mmap of anon memory failed"); | |
691 | ||
692 | /* | |
693 | * Now we either map or load in all the remaining files that the | |
694 | * config file may have specified. | |
695 | */ | |
696 | ||
697 | for (idx = 0; idx < mdp->nfileblocks; idx++) { | |
698 | int fd; | |
699 | uint8_t *mapp; | |
700 | bool_t is_shared = mdp->is_shared; | |
701 | ||
702 | #if VDISK_SUPPORT /* { */ | |
703 | /* | |
704 | * Figure out which 'is_shared' to use, if we have a | |
705 | * disk then each partition can have it's own flags | |
706 | * for mmap. | |
707 | */ | |
708 | if (mdp->is_disk) | |
709 | is_shared = mdp->fileblock[idx].is_shared; | |
710 | #endif /* VDISK_SUPPORT } */ | |
711 | ||
712 | do { | |
713 | fd = open(mdp->fileblock[idx].fnamep, | |
714 | is_shared ? O_RDWR : O_RDONLY); | |
715 | } while (fd < 0 && EAGAIN == errno); | |
716 | ||
717 | if (fd < 0) | |
718 | fatal("Failed opening file %s", | |
719 | mdp->fileblock[idx].fnamep); | |
720 | ||
721 | /* | |
722 | * Check that the fileoffset (offset in file to start | |
723 | * loading) is page alligned | |
724 | */ | |
725 | if ((mdp->fileblock[idx].fileoffset % pgsize) != 0) | |
726 | fatal("Offset 0x%llx in file %s is not aligned " | |
727 | "with 0x%llx pagesize", | |
728 | mdp->fileblock[idx].fileoffset, | |
729 | mdp->fileblock[idx].fnamep, | |
730 | pgsize); | |
731 | ||
732 | #if 0 /* { */ | |
733 | /* Allow non-rounded out contents */ | |
734 | if ((mdp->fileblock[idx].fileextent % pgsize) != 0) | |
735 | fatal("Loaded extent 0x%llx for file %s is not " | |
736 | "aligned with 0x%llx pagesize", | |
737 | mdp->fileblock[idx].fileextent, | |
738 | mdp->fileblock[idx].fnamep, | |
739 | pgsize); | |
740 | #endif /* } */ | |
741 | ||
742 | /* | |
743 | * Check that the memoffset (offset in memory to start | |
744 | * loading) is page alligned | |
745 | */ | |
746 | if ((mdp->fileblock[idx].memoffset % pgsize) != 0) | |
747 | fatal("Offset 0x%llx into RAM for file %s is not " | |
748 | "aligned with 0x%llx pagesize", | |
749 | mdp->fileblock[idx].memoffset, | |
750 | mdp->fileblock[idx].fnamep, | |
751 | pgsize); | |
752 | ||
753 | ASSERT(NULL != datap || (NULL == datap && | |
754 | 0LL == mdp->fileblock[idx].memoffset)); | |
755 | ||
756 | /* CSTYLED */ | |
757 | DBG( PRINTF(("mapping %s (%s): 0x%llx -> 0x%llx (0x%llx)\n", | |
758 | mdp->fileblock[idx].fnamep, | |
759 | (is_shared ? "SHARED" : "PRIVATE"), | |
760 | config_devp->addrp->baseaddr + | |
761 | mdp->fileblock[idx].memoffset, | |
762 | config_devp->addrp->baseaddr + | |
763 | mdp->fileblock[idx].memoffset + | |
764 | mdp->fileblock[idx].fileextent, | |
765 | /* CSTYLED */ | |
766 | mdp->fileblock[idx].fileextent)); ); | |
767 | ||
768 | /* | |
769 | * mmap the file into memory with the appropriate flags. | |
770 | */ | |
771 | mapp = (void*)mmap((void*)(datap + | |
772 | mdp->fileblock[idx].memoffset), | |
773 | mdp->fileblock[idx].fileextent, | |
774 | PROT_READ | PROT_WRITE, | |
775 | /* CSTYLED */ | |
776 | (is_shared ? MAP_SHARED : MAP_PRIVATE) | MAP_FIXED | MAP_NORESERVE, | |
777 | fd, mdp->fileblock[idx].fileoffset); | |
778 | ||
779 | if (((uint8_t *)MAP_FAILED) == mapp) | |
780 | fatal("Failed mapping file %s", | |
781 | mdp->fileblock[idx].fnamep); | |
782 | ||
783 | ASSERT((datap + mdp->fileblock[idx].memoffset) == mapp); | |
784 | } | |
785 | #if VDISK_SUPPORT /* { */ | |
786 | /* | |
787 | * Now we have mmaped in all files, we need to create a valid | |
788 | * VTOC for the root disk (ie, the first file loaded) based on | |
789 | * the size of the other disks. The VTOC also needs to have | |
790 | * correct values for s2 (the entire disk) so that someone can | |
791 | * open the label on s0 and figure out the size of the entire | |
792 | * disk and the layout of each partition. | |
793 | */ | |
794 | if (mdp->is_disk) | |
795 | create_label(mdp); | |
796 | #endif /* VDISK_SUPPORT } */ | |
797 | ||
798 | ||
799 | /* | |
800 | * Finally apply content | |
801 | */ | |
802 | ||
803 | for (cp = mdp->contentp; cp != NULL; cp = next_cp) { | |
804 | next_cp = cp->nextp; | |
805 | ||
806 | memmove(mdp->datap+cp->offset, cp->datap, cp->size); | |
807 | Xfree(cp->datap); | |
808 | Xfree(cp); | |
809 | } | |
810 | ||
811 | /* CSTYLED */ | |
812 | SANITY( mdp->contentp = NULL; ); | |
813 | } | |
814 | ||
815 | ||
816 | ||
817 | ||
818 | ||
819 | ||
820 | /* | |
821 | * Memory configuration dump | |
822 | */ | |
823 | ||
824 | void | |
825 | mem_dump(config_dev_t *config_devp) | |
826 | { | |
827 | } | |
828 | ||
829 | ||
830 | /* | |
831 | * Returns the extent of the linear cacheable block | |
832 | * starting at offset, and a pointer to the state block | |
833 | * | |
834 | * Note: if the device is a ROM, attempts to write | |
835 | * i.e. DA_Store as type must be failed - | |
836 | * the function is supposed to return 0. | |
837 | */ | |
838 | ||
839 | tpaddr_t | |
840 | mem_cacheable(config_addr_t *config_addrp, dev_access_t type, tpaddr_t offset, | |
841 | uint8_t **blockp) | |
842 | { | |
843 | mem_dev_t *mdp; | |
844 | ||
845 | mdp = config_addrp->config_devp->devp; | |
846 | ||
847 | /* fail store attempts to a ROM */ | |
848 | if (mdp->is_rom && (type & DA_Store)) | |
849 | return ((tpaddr_t)0); | |
850 | ||
851 | #if VDISK_SUPPORT /* { */ | |
852 | /* cannot execute from a disk */ | |
853 | if (mdp->is_disk && (type & DA_Instn)) | |
854 | return ((tpaddr_t)0); | |
855 | #endif /* VDISK_SUPPORT } */ | |
856 | ||
857 | if ((offset < 0) || (offset >= config_addrp->range)) { | |
858 | /* CSTYLED */ | |
859 | SANITY(*blockp = NULL;); | |
860 | return (NULL); | |
861 | } | |
862 | ||
863 | *blockp = mdp->datap + offset; | |
864 | ||
865 | return (config_addrp->range - offset); | |
866 | } | |
867 | ||
868 | ||
869 | ||
870 | /* | |
871 | * Should only get invoked if this is a ROM. | |
872 | * Indicate the store failed - return false | |
873 | * ROMs are read only. | |
874 | */ | |
875 | ||
876 | bool_t | |
877 | mem_cpu_access(simcpu_t *sp, config_addr_t *cap, tpaddr_t offset, maccess_t op, | |
878 | uint64_t *regp) | |
879 | { | |
880 | mem_dev_t *mdp; | |
881 | ||
882 | mdp = cap->config_devp->devp; | |
883 | ||
884 | ASSERT(mdp->is_rom); | |
885 | ||
886 | EXEC_WARNING(("rom_cpu_store: attempted store to ROM @ pc 0x%llx " | |
887 | "of %d bytes at offset 0x%llx", | |
888 | sp->pc, 1<<(op & MA_Size_Mask), offset)); | |
889 | return (false); | |
890 | } | |
891 | ||
892 | #if VDISK_SUPPORT /* { */ | |
893 | void | |
894 | dump_label(mem_dev_t *mdp) | |
895 | { | |
896 | struct dk_label *dk_label; | |
897 | int idx = 0; | |
898 | uint64_t nsect, nhead, nblocks, cyl_size; | |
899 | uint64_t ncyl, start_cyl, end_cyl, size; | |
900 | int i, tag, flag; | |
901 | char tag_str[15], flag_str[15]; | |
902 | ||
903 | /* | |
904 | * read disk label | |
905 | */ | |
906 | dk_label = (struct dk_label *)mdp->datap; | |
907 | ||
908 | /* | |
909 | * If no magic, give up early | |
910 | */ | |
911 | ||
912 | if (dk_label->dkl_magic != DKL_MAGIC) { | |
913 | PRINTF(("No valid disk label - giving up\n")); | |
914 | return; | |
915 | } | |
916 | ||
917 | nsect = dk_label->dkl_nsect; | |
918 | nhead = dk_label->dkl_nhead; | |
919 | ||
920 | cyl_size = nsect * nhead; | |
921 | ||
922 | if (dk_label->dkl_magic != DKL_MAGIC) { | |
923 | PRINTF(("Bad disk label\n")); | |
924 | cyl_size = 1; | |
925 | } | |
926 | ||
927 | /* CSTYLED */ | |
928 | DBG( PRINTF(("[%4s] [%10s] [%4s] [%10s] - [%10s] [%15s] [%10s] [%10s]\n", | |
929 | "part", "Tag", "Flag", "Start-Cyl", "End-Cyl", "Size", "#Blocks", | |
930 | /* CSTYLED */ | |
931 | "#Cylinders")); ); | |
932 | ||
933 | for (i = 0; i < NDKMAP; i++) { | |
934 | uint64_t nblocks; | |
935 | ||
936 | nblocks = (uint64_t)dk_label->dkl_map[i].dkl_nblk; | |
937 | size = nblocks * 512; | |
938 | /* Check to avoid a fault if label is malformed */ | |
939 | if (cyl_size != 0) { | |
940 | ncyl = nblocks / cyl_size; | |
941 | } else { | |
942 | ncyl = nblocks; /* fake */ | |
943 | } | |
944 | start_cyl = dk_label->dkl_map[i].dkl_cylno; | |
945 | ||
946 | tag = dk_label->dkl_vtoc.v_part[i].p_tag; | |
947 | flag = dk_label->dkl_vtoc.v_part[i].p_flag; | |
948 | ||
949 | if (dk_label->dkl_map[i].dkl_nblk == 0) | |
950 | end_cyl = 0; | |
951 | else | |
952 | end_cyl = (dk_label->dkl_map[i].dkl_cylno + ncyl) -1; | |
953 | ||
954 | switch (tag) { | |
955 | case 0x00: | |
956 | strcpy(tag_str, "unassigned"); break; | |
957 | case 0x01: | |
958 | strcpy(tag_str, "boot"); break; | |
959 | case 0x02: | |
960 | strcpy(tag_str, "root"); break; | |
961 | case 0x03: | |
962 | strcpy(tag_str, "swap"); break; | |
963 | case 0x04: | |
964 | strcpy(tag_str, "usr"); break; | |
965 | case 0x05: | |
966 | strcpy(tag_str, "backup"); break; | |
967 | default: | |
968 | strcpy(tag_str, "UNKNOWN"); break; | |
969 | } | |
970 | ||
971 | switch (flag) { | |
972 | case 0x0: | |
973 | strcpy(flag_str, "wm"); break; | |
974 | case V_UNMNT: | |
975 | strcpy(flag_str, "wu"); break; | |
976 | case V_RONLY: | |
977 | strcpy(flag_str, "rm"); break; | |
978 | default: | |
979 | strcpy(flag_str, "unknown"); break; | |
980 | } | |
981 | /* CSTYLED */ | |
982 | DBG( PRINTF(("[%4d] [%10s] [ %2s ] [%10llu] - [%10llu] [%15llu] [%10llu] [%10llu] %lluMB\n", | |
983 | i, tag_str, flag_str, start_cyl, end_cyl, size, nblocks, | |
984 | /* CSTYLED */ | |
985 | ncyl, size/1024/1024)); ); | |
986 | ||
987 | ||
988 | } | |
989 | /* CSTYLED */ | |
990 | DBG( PRINTF(("nparts = [0x%x]\n\n", dk_label->dkl_vtoc.v_nparts)); ); | |
991 | } | |
992 | ||
993 | void | |
994 | create_label(mem_dev_t *mdp) | |
995 | { | |
996 | struct dk_label *dk_label; | |
997 | int idx = 0; | |
998 | uint64_t nsect, nhead, nblocks, nblocksa, cyl_size, sblock; | |
999 | ||
1000 | uint64_t ncyl, start_cyl, end_cyl, size; | |
1001 | int i, tag, flag; | |
1002 | char tag_str[MAXNAMELEN], flag_str[MAXNAMELEN]; | |
1003 | uint64_t total_nblocks = 0; | |
1004 | ||
1005 | /* | |
1006 | * Dump the existing label | |
1007 | */ | |
1008 | /* CSTYLED */ | |
1009 | DBG( PRINTF(("\nReading label from disk\n")); ); | |
1010 | dump_label(mdp); | |
1011 | ||
1012 | dk_label = (struct dk_label *)mdp->datap; | |
1013 | ||
1014 | /* | |
1015 | * wipe the current label | |
1016 | */ | |
1017 | for (idx = 0; idx < NDKMAP; idx++) { | |
1018 | dk_label->dkl_map[idx].dkl_nblk = 0x0; | |
1019 | dk_label->dkl_map[idx].dkl_cylno = 0x0; | |
1020 | dk_label->dkl_vtoc.v_part[idx].p_tag = 0x0; | |
1021 | dk_label->dkl_vtoc.v_part[idx].p_flag = V_UNMNT; | |
1022 | } | |
1023 | dk_label->dkl_magic = DKL_MAGIC; | |
1024 | ||
1025 | /* | |
1026 | * read disk label from s0 and modify it | |
1027 | * | |
1028 | * fake sectors and head = 1 to avoid rounding errors | |
1029 | * when calculating cyl_no. Rounding errors make it | |
1030 | * almost impossible to make the cyl_no align with the | |
1031 | * memoffset we mmaped the file into. So we just make | |
1032 | * start cyl == start block | |
1033 | */ | |
1034 | dk_label->dkl_nsect = 1; | |
1035 | dk_label->dkl_nhead = 1; | |
1036 | nsect = dk_label->dkl_nsect; | |
1037 | nhead = dk_label->dkl_nhead; | |
1038 | cyl_size = nsect * nhead; | |
1039 | ||
1040 | /* | |
1041 | * Update the label at slice 0 with this info | |
1042 | */ | |
1043 | /* CSTYLED */ | |
1044 | DBG( PRINTF(("Creating new lable as follows:\n")); ); | |
1045 | for (idx = 0; idx < mdp->nfileblocks; idx++) { | |
1046 | int slice; | |
1047 | static uint64_t next_cyl = 0; | |
1048 | ||
1049 | slice = mdp->fileblock[idx].disk_slice; | |
1050 | ||
1051 | /* | |
1052 | * Use the real value for fileextent to calculate the | |
1053 | * number of blocks for this partition. | |
1054 | * | |
1055 | * Our next partition needs to be loaded on a page boundry | |
1056 | * so we use the page aligned value for fileextent (fileextenta) | |
1057 | * to calculate the number of aligned blocks (nblocksa) | |
1058 | * Use this to count the number of blocks needed to pad | |
1059 | * the partition so the next slice can be loaded on a | |
1060 | * page boundry. | |
1061 | */ | |
1062 | nblocks = mdp->fileblock[idx].fileextent / 512; | |
1063 | nblocksa = mdp->fileblock[idx].fileextenta / 512; | |
1064 | total_nblocks += nblocksa; | |
1065 | ||
1066 | /* | |
1067 | * calculate start cyl based on the memoffset | |
1068 | * we used to map this file. | |
1069 | * memoffest is already page aligned. | |
1070 | */ | |
1071 | sblock = mdp->fileblock[idx].memoffset / 512; | |
1072 | ||
1073 | ncyl = (float)sblock / (float)cyl_size; | |
1074 | ||
1075 | /* Set the number of blocks in this slice */ | |
1076 | dk_label->dkl_map[slice].dkl_nblk = nblocks; | |
1077 | ||
1078 | /* Set the starting cylinder */ | |
1079 | dk_label->dkl_map[slice].dkl_cylno = ncyl; | |
1080 | ||
1081 | /* CSTYLED */ | |
1082 | DBG( PRINTF(("slice=[%d] name=[%s] size=[%llu][0x%llx] nblocks=[%llu] nblocksa=[%llu] ncyl=[%llu] nsect=[%llu] nhead=[%llu]\n", | |
1083 | mdp->fileblock[idx].disk_slice, | |
1084 | mdp->fileblock[idx].fnamep, | |
1085 | mdp->fileblock[idx].fileextent, | |
1086 | mdp->fileblock[idx].fileextent, | |
1087 | /* CSTYLED */ | |
1088 | nblocks, nblocksa, ncyl, nsect, nhead)); ); | |
1089 | ||
1090 | /* Set the p_tag, p_flag for each slice */ | |
1091 | if (slice == 1) { /* SWAP */ | |
1092 | dk_label->dkl_vtoc.v_part[slice].p_tag = 0x3; | |
1093 | dk_label->dkl_vtoc.v_part[slice].p_flag = V_UNMNT; | |
1094 | } else { /* all else set to ROOT */ | |
1095 | dk_label->dkl_vtoc.v_part[slice].p_tag = 0x2; | |
1096 | dk_label->dkl_vtoc.v_part[slice].p_flag = 0x0; | |
1097 | } | |
1098 | ||
1099 | } | |
1100 | ||
1101 | /* | |
1102 | * create a valid VTOC entry for slice 2 which reflects the total | |
1103 | * size of the virtual disk (with all partitions). | |
1104 | */ | |
1105 | dk_label->dkl_vtoc.v_part[2].p_tag = 0x5; /* backup */ | |
1106 | dk_label->dkl_vtoc.v_part[2].p_flag = V_UNMNT; /* wu */ | |
1107 | dk_label->dkl_map[2].dkl_cylno = 0x0; /* always start at 0 */ | |
1108 | dk_label->dkl_map[2].dkl_nblk = total_nblocks; /* entire disk */ | |
1109 | ||
1110 | dk_label->dkl_magic = DKL_MAGIC; | |
1111 | ||
1112 | /* CSTYLED */ | |
1113 | DBG( PRINTF(("New Checksum = [0x%hx]\n", get_checksum(dk_label, CK_MAKESUM))); ); | |
1114 | /* | |
1115 | * Update checksum on disk | |
1116 | */ | |
1117 | dk_label->dkl_cksum = get_checksum(dk_label, CK_MAKESUM); | |
1118 | ||
1119 | /* CSTYLED */ | |
1120 | DBG( PRINTF(("Dumping new label to be written to disk\n")); ); | |
1121 | dump_label(mdp); | |
1122 | } | |
1123 | ||
1124 | ||
1125 | /* | |
1126 | * Construct checksum for the new disk label | |
1127 | */ | |
1128 | short | |
1129 | get_checksum(struct dk_label *dk_label, int mode) | |
1130 | { | |
1131 | short sum, *sp; | |
1132 | int i; | |
1133 | ||
1134 | sum = 0; | |
1135 | sp = (short *)dk_label; | |
1136 | i = sizeof (*dk_label) / sizeof (*sp); | |
1137 | ||
1138 | /* | |
1139 | * If we are generating a checksum, don't include the checksum | |
1140 | * in the rolling xor. | |
1141 | */ | |
1142 | if (mode == CK_MAKESUM) | |
1143 | i -= 1; | |
1144 | ||
1145 | /* | |
1146 | * Take the xor of all the half-words in the label. | |
1147 | */ | |
1148 | while (i--) { | |
1149 | sum ^= *sp++; | |
1150 | } | |
1151 | ||
1152 | return (sum); | |
1153 | } | |
1154 | #endif /* VDISK_SUPPORT } */ |