Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1991 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
1284e777 | 36 | * from: @(#)vm_kern.c 7.4 (Berkeley) 5/7/91 |
bbc3f849 | 37 | * $Id: vm_kern.c,v 1.3 1993/10/16 16:20:30 rgrimes Exp $ |
1284e777 RG |
38 | */ |
39 | ||
40 | /* | |
15637ed4 RG |
41 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. |
42 | * All rights reserved. | |
43 | * | |
44 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
45 | * | |
46 | * Permission to use, copy, modify and distribute this software and | |
47 | * its documentation is hereby granted, provided that both the copyright | |
48 | * notice and this permission notice appear in all copies of the | |
49 | * software, derivative works or modified versions, and any portions | |
50 | * thereof, and that both notices appear in supporting documentation. | |
51 | * | |
52 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
53 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
54 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
55 | * | |
56 | * Carnegie Mellon requests users of this software to return to | |
57 | * | |
58 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
59 | * School of Computer Science | |
60 | * Carnegie Mellon University | |
61 | * Pittsburgh PA 15213-3890 | |
62 | * | |
63 | * any improvements or extensions that they make and grant Carnegie the | |
64 | * rights to redistribute these changes. | |
65 | * | |
15637ed4 RG |
66 | */ |
67 | ||
68 | /* | |
69 | * Kernel memory management. | |
70 | */ | |
71 | ||
72 | #include "param.h" | |
50665e44 | 73 | #include "syslog.h" |
15637ed4 RG |
74 | |
75 | #include "vm.h" | |
76 | #include "vm_page.h" | |
77 | #include "vm_pageout.h" | |
78 | #include "vm_kern.h" | |
79 | ||
bbc3f849 GW |
80 | vm_map_t kernel_map; |
81 | vm_map_t mb_map; | |
82 | vm_map_t kmem_map; | |
83 | vm_map_t phys_map; | |
84 | vm_map_t buffer_map; | |
85 | ||
15637ed4 RG |
86 | /* |
87 | * kmem_alloc_pageable: | |
88 | * | |
89 | * Allocate pageable memory to the kernel's address map. | |
90 | * map must be "kernel_map" below. | |
91 | */ | |
92 | ||
93 | vm_offset_t kmem_alloc_pageable(map, size) | |
94 | vm_map_t map; | |
95 | register vm_size_t size; | |
96 | { | |
97 | vm_offset_t addr; | |
98 | register int result; | |
99 | ||
100 | #if 0 | |
101 | if (map != kernel_map) | |
102 | panic("kmem_alloc_pageable: not called with kernel_map"); | |
103 | #endif 0 | |
104 | ||
105 | size = round_page(size); | |
106 | ||
107 | addr = vm_map_min(map); | |
108 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
109 | &addr, size, TRUE); | |
110 | if (result != KERN_SUCCESS) { | |
111 | return(0); | |
112 | } | |
113 | ||
114 | return(addr); | |
115 | } | |
116 | ||
117 | /* | |
118 | * Allocate wired-down memory in the kernel's address map | |
119 | * or a submap. | |
120 | */ | |
121 | vm_offset_t kmem_alloc(map, size) | |
122 | register vm_map_t map; | |
123 | register vm_size_t size; | |
124 | { | |
125 | vm_offset_t addr; | |
126 | register int result; | |
127 | register vm_offset_t offset; | |
128 | extern vm_object_t kernel_object; | |
129 | vm_offset_t i; | |
130 | ||
131 | size = round_page(size); | |
132 | ||
133 | /* | |
134 | * Use the kernel object for wired-down kernel pages. | |
135 | * Assume that no region of the kernel object is | |
136 | * referenced more than once. | |
137 | */ | |
138 | ||
139 | addr = vm_map_min(map); | |
140 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
141 | &addr, size, TRUE); | |
142 | if (result != KERN_SUCCESS) { | |
143 | return(0); | |
144 | } | |
145 | ||
146 | /* | |
147 | * Since we didn't know where the new region would | |
148 | * start, we couldn't supply the correct offset into | |
149 | * the kernel object. Re-allocate that address | |
150 | * region with the correct offset. | |
151 | */ | |
152 | ||
153 | offset = addr - VM_MIN_KERNEL_ADDRESS; | |
154 | vm_object_reference(kernel_object); | |
155 | ||
156 | vm_map_lock(map); | |
157 | vm_map_delete(map, addr, addr + size); | |
158 | vm_map_insert(map, kernel_object, offset, addr, addr + size); | |
159 | vm_map_unlock(map); | |
160 | ||
161 | /* | |
162 | * Guarantee that there are pages already in this object | |
163 | * before calling vm_map_pageable. This is to prevent the | |
164 | * following scenario: | |
165 | * | |
166 | * 1) Threads have swapped out, so that there is a | |
167 | * pager for the kernel_object. | |
168 | * 2) The kmsg zone is empty, and so we are kmem_allocing | |
169 | * a new page for it. | |
170 | * 3) vm_map_pageable calls vm_fault; there is no page, | |
171 | * but there is a pager, so we call | |
172 | * pager_data_request. But the kmsg zone is empty, | |
173 | * so we must kmem_alloc. | |
174 | * 4) goto 1 | |
175 | * 5) Even if the kmsg zone is not empty: when we get | |
176 | * the data back from the pager, it will be (very | |
177 | * stale) non-zero data. kmem_alloc is defined to | |
178 | * return zero-filled memory. | |
179 | * | |
180 | * We're intentionally not activating the pages we allocate | |
181 | * to prevent a race with page-out. vm_map_pageable will wire | |
182 | * the pages. | |
183 | */ | |
184 | ||
185 | vm_object_lock(kernel_object); | |
186 | for (i = 0 ; i < size; i+= PAGE_SIZE) { | |
187 | vm_page_t mem; | |
188 | ||
189 | while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) { | |
190 | vm_object_unlock(kernel_object); | |
191 | VM_WAIT; | |
192 | vm_object_lock(kernel_object); | |
193 | } | |
194 | vm_page_zero_fill(mem); | |
195 | mem->busy = FALSE; | |
196 | } | |
197 | vm_object_unlock(kernel_object); | |
198 | ||
199 | /* | |
200 | * And finally, mark the data as non-pageable. | |
201 | */ | |
202 | ||
203 | (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); | |
204 | ||
205 | /* | |
206 | * Try to coalesce the map | |
207 | */ | |
208 | ||
209 | vm_map_simplify(map, addr); | |
210 | ||
211 | return(addr); | |
212 | } | |
213 | ||
214 | /* | |
215 | * kmem_free: | |
216 | * | |
217 | * Release a region of kernel virtual memory allocated | |
218 | * with kmem_alloc, and return the physical pages | |
219 | * associated with that region. | |
220 | */ | |
221 | void kmem_free(map, addr, size) | |
222 | vm_map_t map; | |
223 | register vm_offset_t addr; | |
224 | vm_size_t size; | |
225 | { | |
226 | (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); | |
227 | vm_map_simplify(map, addr); | |
228 | } | |
229 | ||
230 | /* | |
231 | * kmem_suballoc: | |
232 | * | |
233 | * Allocates a map to manage a subrange | |
234 | * of the kernel virtual address space. | |
235 | * | |
236 | * Arguments are as follows: | |
237 | * | |
238 | * parent Map to take range from | |
239 | * size Size of range to find | |
240 | * min, max Returned endpoints of map | |
241 | * pageable Can the region be paged | |
242 | */ | |
243 | vm_map_t kmem_suballoc(parent, min, max, size, pageable) | |
244 | register vm_map_t parent; | |
245 | vm_offset_t *min, *max; | |
246 | register vm_size_t size; | |
247 | boolean_t pageable; | |
248 | { | |
249 | register int ret; | |
250 | vm_map_t result; | |
251 | ||
252 | size = round_page(size); | |
253 | ||
254 | *min = (vm_offset_t) vm_map_min(parent); | |
255 | ret = vm_map_find(parent, NULL, (vm_offset_t) 0, | |
256 | min, size, TRUE); | |
257 | if (ret != KERN_SUCCESS) { | |
258 | printf("kmem_suballoc: bad status return of %d.\n", ret); | |
259 | panic("kmem_suballoc"); | |
260 | } | |
261 | *max = *min + size; | |
262 | pmap_reference(vm_map_pmap(parent)); | |
263 | result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable); | |
264 | if (result == NULL) | |
265 | panic("kmem_suballoc: cannot create submap"); | |
266 | if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) | |
267 | panic("kmem_suballoc: unable to change range to submap"); | |
268 | return(result); | |
269 | } | |
270 | ||
271 | /* | |
272 | * vm_move: | |
273 | * | |
274 | * Move memory from source to destination map, possibly deallocating | |
275 | * the source map reference to the memory. | |
276 | * | |
277 | * Parameters are as follows: | |
278 | * | |
279 | * src_map Source address map | |
280 | * src_addr Address within source map | |
281 | * dst_map Destination address map | |
282 | * num_bytes Amount of data (in bytes) to copy/move | |
283 | * src_dealloc Should source be removed after copy? | |
284 | * | |
285 | * Assumes the src and dst maps are not already locked. | |
286 | * | |
287 | * Returns new destination address or 0 (if a failure occurs). | |
288 | */ | |
289 | vm_offset_t vm_move(src_map,src_addr,dst_map,num_bytes,src_dealloc) | |
290 | vm_map_t src_map; | |
291 | register vm_offset_t src_addr; | |
292 | register vm_map_t dst_map; | |
293 | vm_offset_t num_bytes; | |
294 | boolean_t src_dealloc; | |
295 | { | |
296 | register vm_offset_t src_start; /* Beginning of region */ | |
297 | register vm_size_t src_size; /* Size of rounded region */ | |
298 | vm_offset_t dst_start; /* destination address */ | |
299 | register int result; | |
300 | ||
301 | /* | |
302 | * Page-align the source region | |
303 | */ | |
304 | ||
305 | src_start = trunc_page(src_addr); | |
306 | src_size = round_page(src_addr + num_bytes) - src_start; | |
307 | ||
308 | /* | |
309 | * If there's no destination, we can be at most deallocating | |
310 | * the source range. | |
311 | */ | |
312 | if (dst_map == NULL) { | |
313 | if (src_dealloc) | |
314 | if (vm_deallocate(src_map, src_start, src_size) | |
315 | != KERN_SUCCESS) { | |
316 | printf("vm_move: deallocate of source"); | |
317 | printf(" failed, dealloc_only clause\n"); | |
318 | } | |
319 | return(0); | |
320 | } | |
321 | ||
322 | /* | |
323 | * Allocate a place to put the copy | |
324 | */ | |
325 | ||
326 | dst_start = (vm_offset_t) 0; | |
327 | if ((result = vm_allocate(dst_map, &dst_start, src_size, TRUE)) | |
328 | == KERN_SUCCESS) { | |
329 | /* | |
330 | * Perform the copy, asking for deallocation if desired | |
331 | */ | |
332 | result = vm_map_copy(dst_map, src_map, dst_start, src_size, | |
333 | src_start, FALSE, src_dealloc); | |
334 | } | |
335 | ||
336 | /* | |
337 | * Return the destination address corresponding to | |
338 | * the source address given (rather than the front | |
339 | * of the newly-allocated page). | |
340 | */ | |
341 | ||
342 | if (result == KERN_SUCCESS) | |
343 | return(dst_start + (src_addr - src_start)); | |
344 | return(0); | |
345 | } | |
346 | ||
347 | /* | |
348 | * Allocate wired-down memory in the kernel's address map for the higher | |
349 | * level kernel memory allocator (kern/kern_malloc.c). We cannot use | |
350 | * kmem_alloc() because we may need to allocate memory at interrupt | |
351 | * level where we cannot block (canwait == FALSE). | |
352 | * | |
353 | * This routine has its own private kernel submap (kmem_map) and object | |
354 | * (kmem_object). This, combined with the fact that only malloc uses | |
355 | * this routine, ensures that we will never block in map or object waits. | |
356 | * | |
357 | * Note that this still only works in a uni-processor environment and | |
358 | * when called at splhigh(). | |
359 | * | |
360 | * We don't worry about expanding the map (adding entries) since entries | |
361 | * for wired maps are statically allocated. | |
362 | */ | |
363 | vm_offset_t | |
364 | kmem_malloc(map, size, canwait) | |
365 | register vm_map_t map; | |
366 | register vm_size_t size; | |
367 | boolean_t canwait; | |
368 | { | |
369 | register vm_offset_t offset, i; | |
370 | vm_map_entry_t entry; | |
371 | vm_offset_t addr; | |
372 | vm_page_t m; | |
373 | extern vm_object_t kmem_object; | |
374 | ||
375 | if (map != kmem_map && map != mb_map) | |
376 | panic("kern_malloc_alloc: map != {kmem,mb}_map"); | |
377 | ||
378 | size = round_page(size); | |
379 | addr = vm_map_min(map); | |
380 | ||
381 | if (vm_map_find(map, NULL, (vm_offset_t)0, | |
382 | &addr, size, TRUE) != KERN_SUCCESS) { | |
383 | if (!canwait) { | |
384 | if (map == kmem_map) | |
385 | panic("kmem_malloc: kmem_map too small"); | |
386 | else if (map == mb_map) | |
50665e44 DG |
387 | log(LOG_WARNING, |
388 | "kmem_malloc: mb_map too small (can't wait)\n"); | |
15637ed4 RG |
389 | } |
390 | return 0; | |
391 | } | |
392 | ||
393 | /* | |
394 | * Since we didn't know where the new region would start, | |
395 | * we couldn't supply the correct offset into the kmem object. | |
396 | * Re-allocate that address region with the correct offset. | |
397 | */ | |
398 | offset = addr - vm_map_min(kmem_map); | |
399 | vm_object_reference(kmem_object); | |
400 | ||
401 | vm_map_lock(map); | |
402 | vm_map_delete(map, addr, addr + size); | |
403 | vm_map_insert(map, kmem_object, offset, addr, addr + size); | |
404 | ||
405 | /* | |
406 | * If we can wait, just mark the range as wired | |
407 | * (will fault pages as necessary). | |
408 | */ | |
409 | if (canwait) { | |
410 | vm_map_unlock(map); | |
411 | (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, | |
412 | FALSE); | |
413 | vm_map_simplify(map, addr); | |
414 | return(addr); | |
415 | } | |
416 | ||
417 | /* | |
418 | * If we cannot wait then we must allocate all memory up front, | |
419 | * pulling it off the active queue to prevent pageout. | |
420 | */ | |
421 | vm_object_lock(kmem_object); | |
422 | for (i = 0; i < size; i += PAGE_SIZE) { | |
423 | m = vm_page_alloc(kmem_object, offset + i); | |
424 | ||
425 | /* | |
426 | * Ran out of space, free everything up and return. | |
427 | * Don't need to lock page queues here as we know | |
428 | * that the pages we got aren't on any queues. | |
429 | */ | |
430 | if (m == NULL) { | |
431 | while (i != 0) { | |
432 | i -= PAGE_SIZE; | |
433 | m = vm_page_lookup(kmem_object, offset + i); | |
434 | vm_page_free(m); | |
435 | } | |
436 | vm_object_unlock(kmem_object); | |
437 | vm_map_delete(map, addr, addr + size); | |
438 | vm_map_unlock(map); | |
439 | return(0); | |
440 | } | |
441 | #if 0 | |
442 | vm_page_zero_fill(m); | |
443 | #endif | |
444 | m->busy = FALSE; | |
445 | } | |
446 | vm_object_unlock(kmem_object); | |
447 | ||
448 | /* | |
449 | * Mark map entry as non-pageable. | |
450 | * Assert: vm_map_insert() will never be able to extend the previous | |
451 | * entry so there will be a new entry exactly corresponding to this | |
452 | * address range and it will have wired_count == 0. | |
453 | */ | |
454 | if (!vm_map_lookup_entry(map, addr, &entry) || | |
455 | entry->start != addr || entry->end != addr + size || | |
456 | entry->wired_count) | |
457 | panic("kmem_malloc: entry not found or misaligned"); | |
458 | entry->wired_count++; | |
459 | ||
460 | /* | |
461 | * Loop thru pages, entering them in the pmap. | |
462 | * (We cannot add them to the wired count without | |
463 | * wrapping the vm_page_queue_lock in splimp...) | |
464 | */ | |
465 | for (i = 0; i < size; i += PAGE_SIZE) { | |
466 | vm_object_lock(kmem_object); | |
467 | m = vm_page_lookup(kmem_object, offset + i); | |
468 | vm_object_unlock(kmem_object); | |
469 | pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m), | |
470 | VM_PROT_DEFAULT, TRUE); | |
471 | } | |
472 | vm_map_unlock(map); | |
473 | ||
474 | vm_map_simplify(map, addr); | |
475 | return(addr); | |
476 | } | |
477 | ||
478 | /* | |
479 | * kmem_alloc_wait | |
480 | * | |
481 | * Allocates pageable memory from a sub-map of the kernel. If the submap | |
482 | * has no room, the caller sleeps waiting for more memory in the submap. | |
483 | * | |
484 | */ | |
485 | vm_offset_t kmem_alloc_wait(map, size) | |
486 | vm_map_t map; | |
487 | vm_size_t size; | |
488 | { | |
489 | vm_offset_t addr; | |
490 | int result; | |
491 | ||
492 | size = round_page(size); | |
493 | ||
494 | do { | |
495 | /* | |
496 | * To make this work for more than one map, | |
497 | * use the map's lock to lock out sleepers/wakers. | |
498 | * Unfortunately, vm_map_find also grabs the map lock. | |
499 | */ | |
500 | vm_map_lock(map); | |
501 | lock_set_recursive(&map->lock); | |
502 | ||
503 | addr = vm_map_min(map); | |
504 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
505 | &addr, size, TRUE); | |
506 | ||
507 | lock_clear_recursive(&map->lock); | |
508 | if (result != KERN_SUCCESS) { | |
509 | ||
510 | if ( (vm_map_max(map) - vm_map_min(map)) < size ) { | |
511 | vm_map_unlock(map); | |
512 | return(0); | |
513 | } | |
514 | ||
515 | assert_wait((int)map, TRUE); | |
516 | vm_map_unlock(map); | |
517 | thread_wakeup(&vm_pages_needed); /* XXX */ | |
518 | thread_block(); | |
519 | } | |
520 | else { | |
521 | vm_map_unlock(map); | |
522 | } | |
523 | ||
524 | } while (result != KERN_SUCCESS); | |
525 | ||
526 | return(addr); | |
527 | } | |
528 | ||
529 | /* | |
530 | * kmem_alloc_wired_wait | |
531 | * | |
532 | * Allocates nonpageable memory from a sub-map of the kernel. If the submap | |
533 | * has no room, the caller sleeps waiting for more memory in the submap. | |
534 | * | |
535 | */ | |
536 | vm_offset_t kmem_alloc_wired_wait(map, size) | |
537 | vm_map_t map; | |
538 | vm_size_t size; | |
539 | { | |
540 | vm_offset_t addr; | |
541 | int result; | |
542 | ||
543 | size = round_page(size); | |
544 | ||
545 | do { | |
546 | /* | |
547 | * To make this work for more than one map, | |
548 | * use the map's lock to lock out sleepers/wakers. | |
549 | * Unfortunately, vm_map_find also grabs the map lock. | |
550 | */ | |
551 | vm_map_lock(map); | |
552 | lock_set_recursive(&map->lock); | |
553 | ||
554 | addr = vm_map_min(map); | |
555 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
556 | &addr, size, FALSE); | |
557 | ||
558 | lock_clear_recursive(&map->lock); | |
559 | if (result != KERN_SUCCESS) { | |
560 | ||
561 | if ( (vm_map_max(map) - vm_map_min(map)) < size ) { | |
562 | vm_map_unlock(map); | |
563 | return(0); | |
564 | } | |
565 | ||
566 | assert_wait((int)map, TRUE); | |
567 | vm_map_unlock(map); | |
568 | thread_wakeup(&vm_pages_needed); /* XXX */ | |
569 | thread_block(); | |
570 | } | |
571 | else { | |
572 | vm_map_unlock(map); | |
573 | } | |
574 | ||
575 | } while (result != KERN_SUCCESS); | |
576 | ||
577 | return(addr); | |
578 | } | |
579 | ||
580 | /* | |
581 | * kmem_free_wakeup | |
582 | * | |
583 | * Returns memory to a submap of the kernel, and wakes up any threads | |
584 | * waiting for memory in that map. | |
585 | */ | |
586 | void kmem_free_wakeup(map, addr, size) | |
587 | vm_map_t map; | |
588 | vm_offset_t addr; | |
589 | vm_size_t size; | |
590 | { | |
591 | vm_map_lock(map); | |
592 | (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); | |
593 | thread_wakeup((int)map); | |
594 | vm_map_unlock(map); | |
595 | vm_map_simplify(map, addr); | |
596 | } | |
597 | ||
598 | /* | |
599 | * kmem_init: | |
600 | * | |
601 | * Initialize the kernel's virtual memory map, taking | |
602 | * into account all memory allocated up to this time. | |
603 | */ | |
604 | void kmem_init(start, end) | |
605 | vm_offset_t start; | |
606 | vm_offset_t end; | |
607 | { | |
608 | vm_offset_t addr; | |
609 | extern vm_map_t kernel_map; | |
610 | ||
611 | addr = VM_MIN_KERNEL_ADDRESS; | |
612 | kernel_map = vm_map_create(pmap_kernel(), addr, end, FALSE); | |
613 | (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, | |
614 | &addr, (start - VM_MIN_KERNEL_ADDRESS), | |
615 | FALSE); | |
616 | } |