Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1991 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
78ed81a3 | 36 | * from: @(#)vm_kern.c 7.4 (Berkeley) 5/7/91 |
37 | * $Id$ | |
38 | */ | |
39 | ||
40 | /* | |
15637ed4 RG |
41 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. |
42 | * All rights reserved. | |
43 | * | |
44 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
45 | * | |
46 | * Permission to use, copy, modify and distribute this software and | |
47 | * its documentation is hereby granted, provided that both the copyright | |
48 | * notice and this permission notice appear in all copies of the | |
49 | * software, derivative works or modified versions, and any portions | |
50 | * thereof, and that both notices appear in supporting documentation. | |
51 | * | |
52 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
53 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
54 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
55 | * | |
56 | * Carnegie Mellon requests users of this software to return to | |
57 | * | |
58 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
59 | * School of Computer Science | |
60 | * Carnegie Mellon University | |
61 | * Pittsburgh PA 15213-3890 | |
62 | * | |
63 | * any improvements or extensions that they make and grant Carnegie the | |
64 | * rights to redistribute these changes. | |
65 | * | |
15637ed4 RG |
66 | */ |
67 | ||
68 | /* | |
69 | * Kernel memory management. | |
70 | */ | |
71 | ||
72 | #include "param.h" | |
78ed81a3 | 73 | #include "syslog.h" |
15637ed4 RG |
74 | |
75 | #include "vm.h" | |
76 | #include "vm_page.h" | |
77 | #include "vm_pageout.h" | |
78 | #include "vm_kern.h" | |
79 | ||
80 | /* | |
81 | * kmem_alloc_pageable: | |
82 | * | |
83 | * Allocate pageable memory to the kernel's address map. | |
84 | * map must be "kernel_map" below. | |
85 | */ | |
86 | ||
87 | vm_offset_t kmem_alloc_pageable(map, size) | |
88 | vm_map_t map; | |
89 | register vm_size_t size; | |
90 | { | |
91 | vm_offset_t addr; | |
92 | register int result; | |
93 | ||
94 | #if 0 | |
95 | if (map != kernel_map) | |
96 | panic("kmem_alloc_pageable: not called with kernel_map"); | |
97 | #endif 0 | |
98 | ||
99 | size = round_page(size); | |
100 | ||
101 | addr = vm_map_min(map); | |
102 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
103 | &addr, size, TRUE); | |
104 | if (result != KERN_SUCCESS) { | |
105 | return(0); | |
106 | } | |
107 | ||
108 | return(addr); | |
109 | } | |
110 | ||
111 | /* | |
112 | * Allocate wired-down memory in the kernel's address map | |
113 | * or a submap. | |
114 | */ | |
115 | vm_offset_t kmem_alloc(map, size) | |
116 | register vm_map_t map; | |
117 | register vm_size_t size; | |
118 | { | |
119 | vm_offset_t addr; | |
120 | register int result; | |
121 | register vm_offset_t offset; | |
122 | extern vm_object_t kernel_object; | |
123 | vm_offset_t i; | |
124 | ||
125 | size = round_page(size); | |
126 | ||
127 | /* | |
128 | * Use the kernel object for wired-down kernel pages. | |
129 | * Assume that no region of the kernel object is | |
130 | * referenced more than once. | |
131 | */ | |
132 | ||
133 | addr = vm_map_min(map); | |
134 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
135 | &addr, size, TRUE); | |
136 | if (result != KERN_SUCCESS) { | |
137 | return(0); | |
138 | } | |
139 | ||
140 | /* | |
141 | * Since we didn't know where the new region would | |
142 | * start, we couldn't supply the correct offset into | |
143 | * the kernel object. Re-allocate that address | |
144 | * region with the correct offset. | |
145 | */ | |
146 | ||
147 | offset = addr - VM_MIN_KERNEL_ADDRESS; | |
148 | vm_object_reference(kernel_object); | |
149 | ||
150 | vm_map_lock(map); | |
151 | vm_map_delete(map, addr, addr + size); | |
152 | vm_map_insert(map, kernel_object, offset, addr, addr + size); | |
153 | vm_map_unlock(map); | |
154 | ||
155 | /* | |
156 | * Guarantee that there are pages already in this object | |
157 | * before calling vm_map_pageable. This is to prevent the | |
158 | * following scenario: | |
159 | * | |
160 | * 1) Threads have swapped out, so that there is a | |
161 | * pager for the kernel_object. | |
162 | * 2) The kmsg zone is empty, and so we are kmem_allocing | |
163 | * a new page for it. | |
164 | * 3) vm_map_pageable calls vm_fault; there is no page, | |
165 | * but there is a pager, so we call | |
166 | * pager_data_request. But the kmsg zone is empty, | |
167 | * so we must kmem_alloc. | |
168 | * 4) goto 1 | |
169 | * 5) Even if the kmsg zone is not empty: when we get | |
170 | * the data back from the pager, it will be (very | |
171 | * stale) non-zero data. kmem_alloc is defined to | |
172 | * return zero-filled memory. | |
173 | * | |
174 | * We're intentionally not activating the pages we allocate | |
175 | * to prevent a race with page-out. vm_map_pageable will wire | |
176 | * the pages. | |
177 | */ | |
178 | ||
179 | vm_object_lock(kernel_object); | |
180 | for (i = 0 ; i < size; i+= PAGE_SIZE) { | |
181 | vm_page_t mem; | |
182 | ||
183 | while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) { | |
184 | vm_object_unlock(kernel_object); | |
185 | VM_WAIT; | |
186 | vm_object_lock(kernel_object); | |
187 | } | |
188 | vm_page_zero_fill(mem); | |
189 | mem->busy = FALSE; | |
190 | } | |
191 | vm_object_unlock(kernel_object); | |
192 | ||
193 | /* | |
194 | * And finally, mark the data as non-pageable. | |
195 | */ | |
196 | ||
197 | (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); | |
198 | ||
199 | /* | |
200 | * Try to coalesce the map | |
201 | */ | |
202 | ||
203 | vm_map_simplify(map, addr); | |
204 | ||
205 | return(addr); | |
206 | } | |
207 | ||
208 | /* | |
209 | * kmem_free: | |
210 | * | |
211 | * Release a region of kernel virtual memory allocated | |
212 | * with kmem_alloc, and return the physical pages | |
213 | * associated with that region. | |
214 | */ | |
215 | void kmem_free(map, addr, size) | |
216 | vm_map_t map; | |
217 | register vm_offset_t addr; | |
218 | vm_size_t size; | |
219 | { | |
220 | (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); | |
221 | vm_map_simplify(map, addr); | |
222 | } | |
223 | ||
224 | /* | |
225 | * kmem_suballoc: | |
226 | * | |
227 | * Allocates a map to manage a subrange | |
228 | * of the kernel virtual address space. | |
229 | * | |
230 | * Arguments are as follows: | |
231 | * | |
232 | * parent Map to take range from | |
233 | * size Size of range to find | |
234 | * min, max Returned endpoints of map | |
235 | * pageable Can the region be paged | |
236 | */ | |
237 | vm_map_t kmem_suballoc(parent, min, max, size, pageable) | |
238 | register vm_map_t parent; | |
239 | vm_offset_t *min, *max; | |
240 | register vm_size_t size; | |
241 | boolean_t pageable; | |
242 | { | |
243 | register int ret; | |
244 | vm_map_t result; | |
245 | ||
246 | size = round_page(size); | |
247 | ||
248 | *min = (vm_offset_t) vm_map_min(parent); | |
249 | ret = vm_map_find(parent, NULL, (vm_offset_t) 0, | |
250 | min, size, TRUE); | |
251 | if (ret != KERN_SUCCESS) { | |
252 | printf("kmem_suballoc: bad status return of %d.\n", ret); | |
253 | panic("kmem_suballoc"); | |
254 | } | |
255 | *max = *min + size; | |
256 | pmap_reference(vm_map_pmap(parent)); | |
257 | result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable); | |
258 | if (result == NULL) | |
259 | panic("kmem_suballoc: cannot create submap"); | |
260 | if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) | |
261 | panic("kmem_suballoc: unable to change range to submap"); | |
262 | return(result); | |
263 | } | |
264 | ||
265 | /* | |
266 | * vm_move: | |
267 | * | |
268 | * Move memory from source to destination map, possibly deallocating | |
269 | * the source map reference to the memory. | |
270 | * | |
271 | * Parameters are as follows: | |
272 | * | |
273 | * src_map Source address map | |
274 | * src_addr Address within source map | |
275 | * dst_map Destination address map | |
276 | * num_bytes Amount of data (in bytes) to copy/move | |
277 | * src_dealloc Should source be removed after copy? | |
278 | * | |
279 | * Assumes the src and dst maps are not already locked. | |
280 | * | |
281 | * Returns new destination address or 0 (if a failure occurs). | |
282 | */ | |
283 | vm_offset_t vm_move(src_map,src_addr,dst_map,num_bytes,src_dealloc) | |
284 | vm_map_t src_map; | |
285 | register vm_offset_t src_addr; | |
286 | register vm_map_t dst_map; | |
287 | vm_offset_t num_bytes; | |
288 | boolean_t src_dealloc; | |
289 | { | |
290 | register vm_offset_t src_start; /* Beginning of region */ | |
291 | register vm_size_t src_size; /* Size of rounded region */ | |
292 | vm_offset_t dst_start; /* destination address */ | |
293 | register int result; | |
294 | ||
295 | /* | |
296 | * Page-align the source region | |
297 | */ | |
298 | ||
299 | src_start = trunc_page(src_addr); | |
300 | src_size = round_page(src_addr + num_bytes) - src_start; | |
301 | ||
302 | /* | |
303 | * If there's no destination, we can be at most deallocating | |
304 | * the source range. | |
305 | */ | |
306 | if (dst_map == NULL) { | |
307 | if (src_dealloc) | |
308 | if (vm_deallocate(src_map, src_start, src_size) | |
309 | != KERN_SUCCESS) { | |
310 | printf("vm_move: deallocate of source"); | |
311 | printf(" failed, dealloc_only clause\n"); | |
312 | } | |
313 | return(0); | |
314 | } | |
315 | ||
316 | /* | |
317 | * Allocate a place to put the copy | |
318 | */ | |
319 | ||
320 | dst_start = (vm_offset_t) 0; | |
321 | if ((result = vm_allocate(dst_map, &dst_start, src_size, TRUE)) | |
322 | == KERN_SUCCESS) { | |
323 | /* | |
324 | * Perform the copy, asking for deallocation if desired | |
325 | */ | |
326 | result = vm_map_copy(dst_map, src_map, dst_start, src_size, | |
327 | src_start, FALSE, src_dealloc); | |
328 | } | |
329 | ||
330 | /* | |
331 | * Return the destination address corresponding to | |
332 | * the source address given (rather than the front | |
333 | * of the newly-allocated page). | |
334 | */ | |
335 | ||
336 | if (result == KERN_SUCCESS) | |
337 | return(dst_start + (src_addr - src_start)); | |
338 | return(0); | |
339 | } | |
340 | ||
341 | /* | |
342 | * Allocate wired-down memory in the kernel's address map for the higher | |
343 | * level kernel memory allocator (kern/kern_malloc.c). We cannot use | |
344 | * kmem_alloc() because we may need to allocate memory at interrupt | |
345 | * level where we cannot block (canwait == FALSE). | |
346 | * | |
347 | * This routine has its own private kernel submap (kmem_map) and object | |
348 | * (kmem_object). This, combined with the fact that only malloc uses | |
349 | * this routine, ensures that we will never block in map or object waits. | |
350 | * | |
351 | * Note that this still only works in a uni-processor environment and | |
352 | * when called at splhigh(). | |
353 | * | |
354 | * We don't worry about expanding the map (adding entries) since entries | |
355 | * for wired maps are statically allocated. | |
356 | */ | |
357 | vm_offset_t | |
358 | kmem_malloc(map, size, canwait) | |
359 | register vm_map_t map; | |
360 | register vm_size_t size; | |
361 | boolean_t canwait; | |
362 | { | |
363 | register vm_offset_t offset, i; | |
364 | vm_map_entry_t entry; | |
365 | vm_offset_t addr; | |
366 | vm_page_t m; | |
367 | extern vm_object_t kmem_object; | |
368 | ||
369 | if (map != kmem_map && map != mb_map) | |
370 | panic("kern_malloc_alloc: map != {kmem,mb}_map"); | |
371 | ||
372 | size = round_page(size); | |
373 | addr = vm_map_min(map); | |
374 | ||
375 | if (vm_map_find(map, NULL, (vm_offset_t)0, | |
376 | &addr, size, TRUE) != KERN_SUCCESS) { | |
377 | if (!canwait) { | |
378 | if (map == kmem_map) | |
379 | panic("kmem_malloc: kmem_map too small"); | |
380 | else if (map == mb_map) | |
78ed81a3 | 381 | log(LOG_WARNING, |
382 | "kmem_malloc: mb_map too small (can't wait)\n"); | |
15637ed4 RG |
383 | } |
384 | return 0; | |
385 | } | |
386 | ||
387 | /* | |
388 | * Since we didn't know where the new region would start, | |
389 | * we couldn't supply the correct offset into the kmem object. | |
390 | * Re-allocate that address region with the correct offset. | |
391 | */ | |
392 | offset = addr - vm_map_min(kmem_map); | |
393 | vm_object_reference(kmem_object); | |
394 | ||
395 | vm_map_lock(map); | |
396 | vm_map_delete(map, addr, addr + size); | |
397 | vm_map_insert(map, kmem_object, offset, addr, addr + size); | |
398 | ||
399 | /* | |
400 | * If we can wait, just mark the range as wired | |
401 | * (will fault pages as necessary). | |
402 | */ | |
403 | if (canwait) { | |
404 | vm_map_unlock(map); | |
405 | (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, | |
406 | FALSE); | |
407 | vm_map_simplify(map, addr); | |
408 | return(addr); | |
409 | } | |
410 | ||
411 | /* | |
412 | * If we cannot wait then we must allocate all memory up front, | |
413 | * pulling it off the active queue to prevent pageout. | |
414 | */ | |
415 | vm_object_lock(kmem_object); | |
416 | for (i = 0; i < size; i += PAGE_SIZE) { | |
417 | m = vm_page_alloc(kmem_object, offset + i); | |
418 | ||
419 | /* | |
420 | * Ran out of space, free everything up and return. | |
421 | * Don't need to lock page queues here as we know | |
422 | * that the pages we got aren't on any queues. | |
423 | */ | |
424 | if (m == NULL) { | |
425 | while (i != 0) { | |
426 | i -= PAGE_SIZE; | |
427 | m = vm_page_lookup(kmem_object, offset + i); | |
428 | vm_page_free(m); | |
429 | } | |
430 | vm_object_unlock(kmem_object); | |
431 | vm_map_delete(map, addr, addr + size); | |
432 | vm_map_unlock(map); | |
433 | return(0); | |
434 | } | |
435 | #if 0 | |
436 | vm_page_zero_fill(m); | |
437 | #endif | |
438 | m->busy = FALSE; | |
439 | } | |
440 | vm_object_unlock(kmem_object); | |
441 | ||
442 | /* | |
443 | * Mark map entry as non-pageable. | |
444 | * Assert: vm_map_insert() will never be able to extend the previous | |
445 | * entry so there will be a new entry exactly corresponding to this | |
446 | * address range and it will have wired_count == 0. | |
447 | */ | |
448 | if (!vm_map_lookup_entry(map, addr, &entry) || | |
449 | entry->start != addr || entry->end != addr + size || | |
450 | entry->wired_count) | |
451 | panic("kmem_malloc: entry not found or misaligned"); | |
452 | entry->wired_count++; | |
453 | ||
454 | /* | |
455 | * Loop thru pages, entering them in the pmap. | |
456 | * (We cannot add them to the wired count without | |
457 | * wrapping the vm_page_queue_lock in splimp...) | |
458 | */ | |
459 | for (i = 0; i < size; i += PAGE_SIZE) { | |
460 | vm_object_lock(kmem_object); | |
461 | m = vm_page_lookup(kmem_object, offset + i); | |
462 | vm_object_unlock(kmem_object); | |
463 | pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m), | |
464 | VM_PROT_DEFAULT, TRUE); | |
465 | } | |
466 | vm_map_unlock(map); | |
467 | ||
468 | vm_map_simplify(map, addr); | |
469 | return(addr); | |
470 | } | |
471 | ||
472 | /* | |
473 | * kmem_alloc_wait | |
474 | * | |
475 | * Allocates pageable memory from a sub-map of the kernel. If the submap | |
476 | * has no room, the caller sleeps waiting for more memory in the submap. | |
477 | * | |
478 | */ | |
479 | vm_offset_t kmem_alloc_wait(map, size) | |
480 | vm_map_t map; | |
481 | vm_size_t size; | |
482 | { | |
483 | vm_offset_t addr; | |
484 | int result; | |
485 | ||
486 | size = round_page(size); | |
487 | ||
488 | do { | |
489 | /* | |
490 | * To make this work for more than one map, | |
491 | * use the map's lock to lock out sleepers/wakers. | |
492 | * Unfortunately, vm_map_find also grabs the map lock. | |
493 | */ | |
494 | vm_map_lock(map); | |
495 | lock_set_recursive(&map->lock); | |
496 | ||
497 | addr = vm_map_min(map); | |
498 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
499 | &addr, size, TRUE); | |
500 | ||
501 | lock_clear_recursive(&map->lock); | |
502 | if (result != KERN_SUCCESS) { | |
503 | ||
504 | if ( (vm_map_max(map) - vm_map_min(map)) < size ) { | |
505 | vm_map_unlock(map); | |
506 | return(0); | |
507 | } | |
508 | ||
509 | assert_wait((int)map, TRUE); | |
510 | vm_map_unlock(map); | |
511 | thread_wakeup(&vm_pages_needed); /* XXX */ | |
512 | thread_block(); | |
513 | } | |
514 | else { | |
515 | vm_map_unlock(map); | |
516 | } | |
517 | ||
518 | } while (result != KERN_SUCCESS); | |
519 | ||
520 | return(addr); | |
521 | } | |
522 | ||
523 | /* | |
524 | * kmem_alloc_wired_wait | |
525 | * | |
526 | * Allocates nonpageable memory from a sub-map of the kernel. If the submap | |
527 | * has no room, the caller sleeps waiting for more memory in the submap. | |
528 | * | |
529 | */ | |
530 | vm_offset_t kmem_alloc_wired_wait(map, size) | |
531 | vm_map_t map; | |
532 | vm_size_t size; | |
533 | { | |
534 | vm_offset_t addr; | |
535 | int result; | |
536 | ||
537 | size = round_page(size); | |
538 | ||
539 | do { | |
540 | /* | |
541 | * To make this work for more than one map, | |
542 | * use the map's lock to lock out sleepers/wakers. | |
543 | * Unfortunately, vm_map_find also grabs the map lock. | |
544 | */ | |
545 | vm_map_lock(map); | |
546 | lock_set_recursive(&map->lock); | |
547 | ||
548 | addr = vm_map_min(map); | |
549 | result = vm_map_find(map, NULL, (vm_offset_t) 0, | |
550 | &addr, size, FALSE); | |
551 | ||
552 | lock_clear_recursive(&map->lock); | |
553 | if (result != KERN_SUCCESS) { | |
554 | ||
555 | if ( (vm_map_max(map) - vm_map_min(map)) < size ) { | |
556 | vm_map_unlock(map); | |
557 | return(0); | |
558 | } | |
559 | ||
560 | assert_wait((int)map, TRUE); | |
561 | vm_map_unlock(map); | |
562 | thread_wakeup(&vm_pages_needed); /* XXX */ | |
563 | thread_block(); | |
564 | } | |
565 | else { | |
566 | vm_map_unlock(map); | |
567 | } | |
568 | ||
569 | } while (result != KERN_SUCCESS); | |
570 | ||
571 | return(addr); | |
572 | } | |
573 | ||
574 | /* | |
575 | * kmem_free_wakeup | |
576 | * | |
577 | * Returns memory to a submap of the kernel, and wakes up any threads | |
578 | * waiting for memory in that map. | |
579 | */ | |
580 | void kmem_free_wakeup(map, addr, size) | |
581 | vm_map_t map; | |
582 | vm_offset_t addr; | |
583 | vm_size_t size; | |
584 | { | |
585 | vm_map_lock(map); | |
586 | (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); | |
587 | thread_wakeup((int)map); | |
588 | vm_map_unlock(map); | |
589 | vm_map_simplify(map, addr); | |
590 | } | |
591 | ||
592 | /* | |
593 | * kmem_init: | |
594 | * | |
595 | * Initialize the kernel's virtual memory map, taking | |
596 | * into account all memory allocated up to this time. | |
597 | */ | |
598 | void kmem_init(start, end) | |
599 | vm_offset_t start; | |
600 | vm_offset_t end; | |
601 | { | |
602 | vm_offset_t addr; | |
603 | extern vm_map_t kernel_map; | |
604 | ||
605 | addr = VM_MIN_KERNEL_ADDRESS; | |
606 | kernel_map = vm_map_create(pmap_kernel(), addr, end, FALSE); | |
607 | (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, | |
608 | &addr, (start - VM_MIN_KERNEL_ADDRESS), | |
609 | FALSE); | |
610 | } |