Commit | Line | Data |
---|---|---|
175f072e | 1 | /* |
ad0f93d2 KB |
2 | * Copyright (c) 1991, 1993 |
3 | * The Regents of the University of California. All rights reserved. | |
175f072e KM |
4 | * |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
0e24ad83 | 8 | * %sccs.include.redist.c% |
175f072e | 9 | * |
5a11d901 | 10 | * @(#)vm_map.c 8.3 (Berkeley) %G% |
0e24ad83 KM |
11 | * |
12 | * | |
13 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
14 | * All rights reserved. | |
15 | * | |
16 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
17 | * | |
18 | * Permission to use, copy, modify and distribute this software and | |
19 | * its documentation is hereby granted, provided that both the copyright | |
20 | * notice and this permission notice appear in all copies of the | |
21 | * software, derivative works or modified versions, and any portions | |
22 | * thereof, and that both notices appear in supporting documentation. | |
23 | * | |
24 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
25 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
26 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
27 | * | |
28 | * Carnegie Mellon requests users of this software to return to | |
29 | * | |
30 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
31 | * School of Computer Science | |
32 | * Carnegie Mellon University | |
33 | * Pittsburgh PA 15213-3890 | |
34 | * | |
35 | * any improvements or extensions that they make and grant Carnegie the | |
36 | * rights to redistribute these changes. | |
175f072e KM |
37 | */ |
38 | ||
39 | /* | |
40 | * Virtual memory mapping module. | |
41 | */ | |
42 | ||
73506ff8 KB |
43 | #include <sys/param.h> |
44 | #include <sys/systm.h> | |
45 | #include <sys/malloc.h> | |
46 | ||
47 | #include <vm/vm.h> | |
48 | #include <vm/vm_page.h> | |
49 | #include <vm/vm_object.h> | |
175f072e KM |
50 | |
51 | /* | |
52 | * Virtual memory maps provide for the mapping, protection, | |
53 | * and sharing of virtual memory objects. In addition, | |
54 | * this module provides for an efficient virtual copy of | |
55 | * memory from one map to another. | |
56 | * | |
57 | * Synchronization is required prior to most operations. | |
58 | * | |
59 | * Maps consist of an ordered doubly-linked list of simple | |
60 | * entries; a single hint is used to speed up lookups. | |
61 | * | |
62 | * In order to properly represent the sharing of virtual | |
63 | * memory regions among maps, the map structure is bi-level. | |
64 | * Top-level ("address") maps refer to regions of sharable | |
65 | * virtual memory. These regions are implemented as | |
66 | * ("sharing") maps, which then refer to the actual virtual | |
67 | * memory objects. When two address maps "share" memory, | |
68 | * their top-level maps both have references to the same | |
69 | * sharing map. When memory is virtual-copied from one | |
70 | * address map to another, the references in the sharing | |
71 | * maps are actually copied -- no copying occurs at the | |
72 | * virtual memory object level. | |
73 | * | |
74 | * Since portions of maps are specified by start/end addreses, | |
75 | * which may not align with existing map entries, all | |
76 | * routines merely "clip" entries to these start/end values. | |
77 | * [That is, an entry is split into two, bordering at a | |
78 | * start or end value.] Note that these clippings may not | |
79 | * always be necessary (as the two resulting entries are then | |
80 | * not changed); however, the clipping is done for convenience. | |
81 | * No attempt is currently made to "glue back together" two | |
82 | * abutting entries. | |
83 | * | |
84 | * As mentioned above, virtual copy operations are performed | |
85 | * by copying VM object references from one sharing map to | |
86 | * another, and then marking both regions as copy-on-write. | |
87 | * It is important to note that only one writeable reference | |
88 | * to a VM object region exists in any map -- this means that | |
89 | * shadow object creation can be delayed until a write operation | |
90 | * occurs. | |
91 | */ | |
92 | ||
93 | /* | |
5d7b9ad3 | 94 | * vm_map_startup: |
175f072e KM |
95 | * |
96 | * Initialize the vm_map module. Must be called before | |
97 | * any other vm_map routines. | |
98 | * | |
99 | * Map and entry structures are allocated from the general | |
100 | * purpose memory pool with some exceptions: | |
101 | * | |
102 | * - The kernel map and kmem submap are allocated statically. | |
103 | * - Kernel map entries are allocated out of a static pool. | |
104 | * | |
105 | * These restrictions are necessary since malloc() uses the | |
106 | * maps and requires map entries. | |
107 | */ | |
108 | ||
109 | vm_offset_t kentry_data; | |
110 | vm_size_t kentry_data_size; | |
111 | vm_map_entry_t kentry_free; | |
112 | vm_map_t kmap_free; | |
113 | ||
73506ff8 KB |
114 | static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); |
115 | static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); | |
116 | ||
5d7b9ad3 | 117 | void vm_map_startup() |
175f072e KM |
118 | { |
119 | register int i; | |
120 | register vm_map_entry_t mep; | |
121 | vm_map_t mp; | |
122 | ||
123 | /* | |
124 | * Static map structures for allocation before initialization of | |
125 | * kernel map or kmem map. vm_map_create knows how to deal with them. | |
126 | */ | |
127 | kmap_free = mp = (vm_map_t) kentry_data; | |
128 | i = MAX_KMAP; | |
129 | while (--i > 0) { | |
130 | mp->header.next = (vm_map_entry_t) (mp + 1); | |
131 | mp++; | |
132 | } | |
5d7b9ad3 | 133 | mp++->header.next = NULL; |
175f072e KM |
134 | |
135 | /* | |
136 | * Form a free list of statically allocated kernel map entries | |
137 | * with the rest. | |
138 | */ | |
139 | kentry_free = mep = (vm_map_entry_t) mp; | |
140 | i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; | |
141 | while (--i > 0) { | |
142 | mep->next = mep + 1; | |
143 | mep++; | |
144 | } | |
5d7b9ad3 MK |
145 | mep->next = NULL; |
146 | } | |
147 | ||
148 | /* | |
149 | * Allocate a vmspace structure, including a vm_map and pmap, | |
150 | * and initialize those structures. The refcnt is set to 1. | |
151 | * The remaining fields must be initialized by the caller. | |
152 | */ | |
153 | struct vmspace * | |
154 | vmspace_alloc(min, max, pageable) | |
155 | vm_offset_t min, max; | |
156 | int pageable; | |
157 | { | |
158 | register struct vmspace *vm; | |
159 | ||
160 | MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); | |
161 | bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); | |
162 | vm_map_init(&vm->vm_map, min, max, pageable); | |
163 | pmap_pinit(&vm->vm_pmap); | |
164 | vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ | |
165 | vm->vm_refcnt = 1; | |
166 | return (vm); | |
167 | } | |
168 | ||
169 | void | |
170 | vmspace_free(vm) | |
171 | register struct vmspace *vm; | |
172 | { | |
173 | ||
174 | if (--vm->vm_refcnt == 0) { | |
175 | /* | |
176 | * Lock the map, to wait out all other references to it. | |
177 | * Delete all of the mappings and pages they hold, | |
178 | * then call the pmap module to reclaim anything left. | |
179 | */ | |
180 | vm_map_lock(&vm->vm_map); | |
181 | (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, | |
182 | vm->vm_map.max_offset); | |
183 | pmap_release(&vm->vm_pmap); | |
184 | FREE(vm, M_VMMAP); | |
185 | } | |
175f072e KM |
186 | } |
187 | ||
188 | /* | |
189 | * vm_map_create: | |
190 | * | |
191 | * Creates and returns a new empty VM map with | |
192 | * the given physical map structure, and having | |
193 | * the given lower and upper address bounds. | |
194 | */ | |
195 | vm_map_t vm_map_create(pmap, min, max, pageable) | |
196 | pmap_t pmap; | |
197 | vm_offset_t min, max; | |
198 | boolean_t pageable; | |
199 | { | |
200 | register vm_map_t result; | |
67e8af50 | 201 | extern vm_map_t kmem_map; |
175f072e | 202 | |
5d7b9ad3 | 203 | if (kmem_map == NULL) { |
175f072e KM |
204 | result = kmap_free; |
205 | kmap_free = (vm_map_t) result->header.next; | |
5d7b9ad3 MK |
206 | if (result == NULL) |
207 | panic("vm_map_create: out of maps"); | |
175f072e KM |
208 | } else |
209 | MALLOC(result, vm_map_t, sizeof(struct vm_map), | |
210 | M_VMMAP, M_WAITOK); | |
211 | ||
5d7b9ad3 | 212 | vm_map_init(result, min, max, pageable); |
175f072e | 213 | result->pmap = pmap; |
175f072e KM |
214 | return(result); |
215 | } | |
216 | ||
5d7b9ad3 MK |
217 | /* |
218 | * Initialize an existing vm_map structure | |
219 | * such as that in the vmspace structure. | |
220 | * The pmap is set elsewhere. | |
221 | */ | |
222 | void | |
223 | vm_map_init(map, min, max, pageable) | |
224 | register struct vm_map *map; | |
225 | vm_offset_t min, max; | |
226 | boolean_t pageable; | |
227 | { | |
228 | map->header.next = map->header.prev = &map->header; | |
229 | map->nentries = 0; | |
230 | map->size = 0; | |
231 | map->ref_count = 1; | |
232 | map->is_main_map = TRUE; | |
233 | map->min_offset = min; | |
234 | map->max_offset = max; | |
235 | map->entries_pageable = pageable; | |
236 | map->first_free = &map->header; | |
237 | map->hint = &map->header; | |
238 | map->timestamp = 0; | |
239 | lock_init(&map->lock, TRUE); | |
240 | simple_lock_init(&map->ref_lock); | |
241 | simple_lock_init(&map->hint_lock); | |
242 | } | |
243 | ||
175f072e KM |
244 | /* |
245 | * vm_map_entry_create: [ internal use only ] | |
246 | * | |
247 | * Allocates a VM map entry for insertion. | |
248 | * No entry fields are filled in. This routine is | |
249 | */ | |
250 | vm_map_entry_t vm_map_entry_create(map) | |
251 | vm_map_t map; | |
252 | { | |
253 | vm_map_entry_t entry; | |
5a11d901 | 254 | #ifdef DEBUG |
931302a1 | 255 | extern vm_map_t kernel_map, kmem_map, mb_map, pager_map; |
5a11d901 | 256 | boolean_t isspecial; |
175f072e | 257 | |
5a11d901 MH |
258 | isspecial = (map == kernel_map || map == kmem_map || |
259 | map == mb_map || map == pager_map); | |
260 | if (isspecial && map->entries_pageable || | |
261 | !isspecial && !map->entries_pageable) | |
262 | panic("vm_map_entry_create: bogus map"); | |
263 | #endif | |
264 | if (map->entries_pageable) { | |
175f072e KM |
265 | MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), |
266 | M_VMMAPENT, M_WAITOK); | |
5a11d901 MH |
267 | } else { |
268 | if (entry = kentry_free) | |
269 | kentry_free = kentry_free->next; | |
270 | } | |
5d7b9ad3 | 271 | if (entry == NULL) |
175f072e KM |
272 | panic("vm_map_entry_create: out of map entries"); |
273 | ||
274 | return(entry); | |
275 | } | |
276 | ||
277 | /* | |
278 | * vm_map_entry_dispose: [ internal use only ] | |
279 | * | |
280 | * Inverse of vm_map_entry_create. | |
281 | */ | |
282 | void vm_map_entry_dispose(map, entry) | |
283 | vm_map_t map; | |
284 | vm_map_entry_t entry; | |
285 | { | |
5a11d901 | 286 | #ifdef DEBUG |
931302a1 | 287 | extern vm_map_t kernel_map, kmem_map, mb_map, pager_map; |
5a11d901 | 288 | boolean_t isspecial; |
175f072e | 289 | |
5a11d901 MH |
290 | isspecial = (map == kernel_map || map == kmem_map || |
291 | map == mb_map || map == pager_map); | |
292 | if (isspecial && map->entries_pageable || | |
293 | !isspecial && !map->entries_pageable) | |
294 | panic("vm_map_entry_dispose: bogus map"); | |
295 | #endif | |
296 | if (map->entries_pageable) { | |
297 | FREE(entry, M_VMMAPENT); | |
298 | } else { | |
175f072e KM |
299 | entry->next = kentry_free; |
300 | kentry_free = entry; | |
5a11d901 | 301 | } |
175f072e KM |
302 | } |
303 | ||
304 | /* | |
305 | * vm_map_entry_{un,}link: | |
306 | * | |
307 | * Insert/remove entries from maps. | |
308 | */ | |
309 | #define vm_map_entry_link(map, after_where, entry) \ | |
310 | { \ | |
311 | (map)->nentries++; \ | |
312 | (entry)->prev = (after_where); \ | |
313 | (entry)->next = (after_where)->next; \ | |
314 | (entry)->prev->next = (entry); \ | |
315 | (entry)->next->prev = (entry); \ | |
316 | } | |
317 | #define vm_map_entry_unlink(map, entry) \ | |
318 | { \ | |
319 | (map)->nentries--; \ | |
320 | (entry)->next->prev = (entry)->prev; \ | |
321 | (entry)->prev->next = (entry)->next; \ | |
322 | } | |
323 | ||
324 | /* | |
325 | * vm_map_reference: | |
326 | * | |
327 | * Creates another valid reference to the given map. | |
328 | * | |
329 | */ | |
330 | void vm_map_reference(map) | |
331 | register vm_map_t map; | |
332 | { | |
5d7b9ad3 | 333 | if (map == NULL) |
175f072e KM |
334 | return; |
335 | ||
336 | simple_lock(&map->ref_lock); | |
337 | map->ref_count++; | |
338 | simple_unlock(&map->ref_lock); | |
339 | } | |
340 | ||
341 | /* | |
342 | * vm_map_deallocate: | |
343 | * | |
344 | * Removes a reference from the specified map, | |
345 | * destroying it if no references remain. | |
346 | * The map should not be locked. | |
347 | */ | |
348 | void vm_map_deallocate(map) | |
349 | register vm_map_t map; | |
350 | { | |
351 | register int c; | |
352 | ||
5d7b9ad3 | 353 | if (map == NULL) |
175f072e KM |
354 | return; |
355 | ||
356 | simple_lock(&map->ref_lock); | |
357 | c = --map->ref_count; | |
358 | simple_unlock(&map->ref_lock); | |
359 | ||
360 | if (c > 0) { | |
361 | return; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Lock the map, to wait out all other references | |
366 | * to it. | |
367 | */ | |
368 | ||
369 | vm_map_lock(map); | |
370 | ||
371 | (void) vm_map_delete(map, map->min_offset, map->max_offset); | |
372 | ||
373 | pmap_destroy(map->pmap); | |
374 | ||
375 | FREE(map, M_VMMAP); | |
376 | } | |
377 | ||
378 | /* | |
5a11d901 | 379 | * vm_map_insert: |
175f072e KM |
380 | * |
381 | * Inserts the given whole VM object into the target | |
382 | * map at the specified address range. The object's | |
383 | * size should match that of the address range. | |
384 | * | |
385 | * Requires that the map be locked, and leaves it so. | |
386 | */ | |
73506ff8 | 387 | int |
175f072e KM |
388 | vm_map_insert(map, object, offset, start, end) |
389 | vm_map_t map; | |
390 | vm_object_t object; | |
391 | vm_offset_t offset; | |
392 | vm_offset_t start; | |
393 | vm_offset_t end; | |
394 | { | |
395 | register vm_map_entry_t new_entry; | |
396 | register vm_map_entry_t prev_entry; | |
397 | vm_map_entry_t temp_entry; | |
398 | ||
399 | /* | |
400 | * Check that the start and end points are not bogus. | |
401 | */ | |
402 | ||
403 | if ((start < map->min_offset) || (end > map->max_offset) || | |
404 | (start >= end)) | |
405 | return(KERN_INVALID_ADDRESS); | |
406 | ||
407 | /* | |
408 | * Find the entry prior to the proposed | |
409 | * starting address; if it's part of an | |
410 | * existing entry, this range is bogus. | |
411 | */ | |
412 | ||
413 | if (vm_map_lookup_entry(map, start, &temp_entry)) | |
414 | return(KERN_NO_SPACE); | |
415 | ||
416 | prev_entry = temp_entry; | |
417 | ||
418 | /* | |
419 | * Assert that the next entry doesn't overlap the | |
420 | * end point. | |
421 | */ | |
422 | ||
423 | if ((prev_entry->next != &map->header) && | |
424 | (prev_entry->next->start < end)) | |
425 | return(KERN_NO_SPACE); | |
426 | ||
427 | /* | |
428 | * See if we can avoid creating a new entry by | |
429 | * extending one of our neighbors. | |
430 | */ | |
431 | ||
5d7b9ad3 | 432 | if (object == NULL) { |
175f072e KM |
433 | if ((prev_entry != &map->header) && |
434 | (prev_entry->end == start) && | |
435 | (map->is_main_map) && | |
436 | (prev_entry->is_a_map == FALSE) && | |
437 | (prev_entry->is_sub_map == FALSE) && | |
438 | (prev_entry->inheritance == VM_INHERIT_DEFAULT) && | |
439 | (prev_entry->protection == VM_PROT_DEFAULT) && | |
440 | (prev_entry->max_protection == VM_PROT_DEFAULT) && | |
441 | (prev_entry->wired_count == 0)) { | |
442 | ||
443 | if (vm_object_coalesce(prev_entry->object.vm_object, | |
5d7b9ad3 | 444 | NULL, |
175f072e KM |
445 | prev_entry->offset, |
446 | (vm_offset_t) 0, | |
447 | (vm_size_t)(prev_entry->end | |
448 | - prev_entry->start), | |
449 | (vm_size_t)(end - prev_entry->end))) { | |
450 | /* | |
451 | * Coalesced the two objects - can extend | |
452 | * the previous map entry to include the | |
453 | * new range. | |
454 | */ | |
455 | map->size += (end - prev_entry->end); | |
456 | prev_entry->end = end; | |
457 | return(KERN_SUCCESS); | |
458 | } | |
459 | } | |
460 | } | |
461 | ||
462 | /* | |
463 | * Create a new entry | |
464 | */ | |
465 | ||
466 | new_entry = vm_map_entry_create(map); | |
467 | new_entry->start = start; | |
468 | new_entry->end = end; | |
469 | ||
470 | new_entry->is_a_map = FALSE; | |
471 | new_entry->is_sub_map = FALSE; | |
472 | new_entry->object.vm_object = object; | |
473 | new_entry->offset = offset; | |
474 | ||
475 | new_entry->copy_on_write = FALSE; | |
476 | new_entry->needs_copy = FALSE; | |
477 | ||
478 | if (map->is_main_map) { | |
479 | new_entry->inheritance = VM_INHERIT_DEFAULT; | |
480 | new_entry->protection = VM_PROT_DEFAULT; | |
481 | new_entry->max_protection = VM_PROT_DEFAULT; | |
482 | new_entry->wired_count = 0; | |
483 | } | |
484 | ||
485 | /* | |
486 | * Insert the new entry into the list | |
487 | */ | |
488 | ||
489 | vm_map_entry_link(map, prev_entry, new_entry); | |
490 | map->size += new_entry->end - new_entry->start; | |
491 | ||
492 | /* | |
493 | * Update the free space hint | |
494 | */ | |
495 | ||
496 | if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start)) | |
497 | map->first_free = new_entry; | |
498 | ||
499 | return(KERN_SUCCESS); | |
500 | } | |
501 | ||
502 | /* | |
503 | * SAVE_HINT: | |
504 | * | |
505 | * Saves the specified entry as the hint for | |
506 | * future lookups. Performs necessary interlocks. | |
507 | */ | |
508 | #define SAVE_HINT(map,value) \ | |
509 | simple_lock(&(map)->hint_lock); \ | |
510 | (map)->hint = (value); \ | |
511 | simple_unlock(&(map)->hint_lock); | |
512 | ||
513 | /* | |
514 | * vm_map_lookup_entry: [ internal use only ] | |
515 | * | |
516 | * Finds the map entry containing (or | |
517 | * immediately preceding) the specified address | |
518 | * in the given map; the entry is returned | |
519 | * in the "entry" parameter. The boolean | |
520 | * result indicates whether the address is | |
521 | * actually contained in the map. | |
522 | */ | |
523 | boolean_t vm_map_lookup_entry(map, address, entry) | |
524 | register vm_map_t map; | |
525 | register vm_offset_t address; | |
526 | vm_map_entry_t *entry; /* OUT */ | |
527 | { | |
528 | register vm_map_entry_t cur; | |
529 | register vm_map_entry_t last; | |
530 | ||
531 | /* | |
532 | * Start looking either from the head of the | |
533 | * list, or from the hint. | |
534 | */ | |
535 | ||
536 | simple_lock(&map->hint_lock); | |
537 | cur = map->hint; | |
538 | simple_unlock(&map->hint_lock); | |
539 | ||
540 | if (cur == &map->header) | |
541 | cur = cur->next; | |
542 | ||
543 | if (address >= cur->start) { | |
544 | /* | |
545 | * Go from hint to end of list. | |
546 | * | |
547 | * But first, make a quick check to see if | |
548 | * we are already looking at the entry we | |
549 | * want (which is usually the case). | |
550 | * Note also that we don't need to save the hint | |
551 | * here... it is the same hint (unless we are | |
552 | * at the header, in which case the hint didn't | |
553 | * buy us anything anyway). | |
554 | */ | |
555 | last = &map->header; | |
556 | if ((cur != last) && (cur->end > address)) { | |
557 | *entry = cur; | |
558 | return(TRUE); | |
559 | } | |
560 | } | |
561 | else { | |
562 | /* | |
563 | * Go from start to hint, *inclusively* | |
564 | */ | |
565 | last = cur->next; | |
566 | cur = map->header.next; | |
567 | } | |
568 | ||
569 | /* | |
570 | * Search linearly | |
571 | */ | |
572 | ||
573 | while (cur != last) { | |
574 | if (cur->end > address) { | |
575 | if (address >= cur->start) { | |
576 | /* | |
577 | * Save this lookup for future | |
578 | * hints, and return | |
579 | */ | |
580 | ||
581 | *entry = cur; | |
582 | SAVE_HINT(map, cur); | |
583 | return(TRUE); | |
584 | } | |
585 | break; | |
586 | } | |
587 | cur = cur->next; | |
588 | } | |
589 | *entry = cur->prev; | |
590 | SAVE_HINT(map, *entry); | |
591 | return(FALSE); | |
592 | } | |
593 | ||
fc8007a4 CT |
594 | /* |
595 | * Find sufficient space for `length' bytes in the given map, starting at | |
596 | * `start'. The map must be locked. Returns 0 on success, 1 on no space. | |
597 | */ | |
598 | int | |
599 | vm_map_findspace(map, start, length, addr) | |
600 | register vm_map_t map; | |
601 | register vm_offset_t start; | |
602 | vm_size_t length; | |
603 | vm_offset_t *addr; | |
604 | { | |
605 | register vm_map_entry_t entry, next; | |
606 | register vm_offset_t end; | |
607 | ||
608 | if (start < map->min_offset) | |
609 | start = map->min_offset; | |
610 | if (start > map->max_offset) | |
611 | return (1); | |
612 | ||
613 | /* | |
614 | * Look for the first possible address; if there's already | |
615 | * something at this address, we have to start after it. | |
616 | */ | |
617 | if (start == map->min_offset) { | |
618 | if ((entry = map->first_free) != &map->header) | |
619 | start = entry->end; | |
620 | } else { | |
621 | vm_map_entry_t tmp; | |
622 | if (vm_map_lookup_entry(map, start, &tmp)) | |
623 | start = tmp->end; | |
624 | entry = tmp; | |
625 | } | |
626 | ||
627 | /* | |
628 | * Look through the rest of the map, trying to fit a new region in | |
629 | * the gap between existing regions, or after the very last region. | |
630 | */ | |
631 | for (;; start = (entry = next)->end) { | |
632 | /* | |
633 | * Find the end of the proposed new region. Be sure we didn't | |
634 | * go beyond the end of the map, or wrap around the address; | |
635 | * if so, we lose. Otherwise, if this is the last entry, or | |
636 | * if the proposed new region fits before the next entry, we | |
637 | * win. | |
638 | */ | |
639 | end = start + length; | |
640 | if (end > map->max_offset || end < start) | |
641 | return (1); | |
642 | next = entry->next; | |
643 | if (next == &map->header || next->start >= end) | |
644 | break; | |
645 | } | |
646 | SAVE_HINT(map, entry); | |
647 | *addr = start; | |
648 | return (0); | |
649 | } | |
650 | ||
175f072e KM |
651 | /* |
652 | * vm_map_find finds an unallocated region in the target address | |
653 | * map with the given length. The search is defined to be | |
654 | * first-fit from the specified address; the region found is | |
655 | * returned in the same parameter. | |
656 | * | |
657 | */ | |
73506ff8 | 658 | int |
175f072e KM |
659 | vm_map_find(map, object, offset, addr, length, find_space) |
660 | vm_map_t map; | |
661 | vm_object_t object; | |
662 | vm_offset_t offset; | |
663 | vm_offset_t *addr; /* IN/OUT */ | |
664 | vm_size_t length; | |
665 | boolean_t find_space; | |
666 | { | |
175f072e | 667 | register vm_offset_t start; |
175f072e KM |
668 | int result; |
669 | ||
670 | start = *addr; | |
175f072e | 671 | vm_map_lock(map); |
175f072e | 672 | if (find_space) { |
fc8007a4 | 673 | if (vm_map_findspace(map, start, length, addr)) { |
175f072e KM |
674 | vm_map_unlock(map); |
675 | return (KERN_NO_SPACE); | |
676 | } | |
fc8007a4 | 677 | start = *addr; |
175f072e | 678 | } |
175f072e | 679 | result = vm_map_insert(map, object, offset, start, start + length); |
175f072e | 680 | vm_map_unlock(map); |
fc8007a4 | 681 | return (result); |
175f072e KM |
682 | } |
683 | ||
684 | /* | |
685 | * vm_map_simplify_entry: [ internal use only ] | |
686 | * | |
687 | * Simplify the given map entry by: | |
688 | * removing extra sharing maps | |
689 | * [XXX maybe later] merging with a neighbor | |
690 | */ | |
691 | void vm_map_simplify_entry(map, entry) | |
692 | vm_map_t map; | |
693 | vm_map_entry_t entry; | |
694 | { | |
695 | #ifdef lint | |
696 | map++; | |
1524bcb8 | 697 | #endif |
175f072e KM |
698 | |
699 | /* | |
700 | * If this entry corresponds to a sharing map, then | |
701 | * see if we can remove the level of indirection. | |
702 | * If it's not a sharing map, then it points to | |
703 | * a VM object, so see if we can merge with either | |
704 | * of our neighbors. | |
705 | */ | |
706 | ||
707 | if (entry->is_sub_map) | |
708 | return; | |
709 | if (entry->is_a_map) { | |
710 | #if 0 | |
711 | vm_map_t my_share_map; | |
712 | int count; | |
713 | ||
714 | my_share_map = entry->object.share_map; | |
715 | simple_lock(&my_share_map->ref_lock); | |
716 | count = my_share_map->ref_count; | |
717 | simple_unlock(&my_share_map->ref_lock); | |
718 | ||
719 | if (count == 1) { | |
720 | /* Can move the region from | |
721 | * entry->start to entry->end (+ entry->offset) | |
722 | * in my_share_map into place of entry. | |
723 | * Later. | |
724 | */ | |
725 | } | |
1524bcb8 | 726 | #endif |
175f072e KM |
727 | } |
728 | else { | |
729 | /* | |
730 | * Try to merge with our neighbors. | |
731 | * | |
732 | * Conditions for merge are: | |
733 | * | |
734 | * 1. entries are adjacent. | |
735 | * 2. both entries point to objects | |
736 | * with null pagers. | |
737 | * | |
738 | * If a merge is possible, we replace the two | |
739 | * entries with a single entry, then merge | |
740 | * the two objects into a single object. | |
741 | * | |
742 | * Now, all that is left to do is write the | |
743 | * code! | |
744 | */ | |
745 | } | |
746 | } | |
747 | ||
748 | /* | |
749 | * vm_map_clip_start: [ internal use only ] | |
750 | * | |
751 | * Asserts that the given entry begins at or after | |
752 | * the specified address; if necessary, | |
753 | * it splits the entry into two. | |
754 | */ | |
755 | #define vm_map_clip_start(map, entry, startaddr) \ | |
756 | { \ | |
757 | if (startaddr > entry->start) \ | |
758 | _vm_map_clip_start(map, entry, startaddr); \ | |
759 | } | |
760 | ||
761 | /* | |
762 | * This routine is called only when it is known that | |
763 | * the entry must be split. | |
764 | */ | |
73506ff8 | 765 | static void _vm_map_clip_start(map, entry, start) |
175f072e KM |
766 | register vm_map_t map; |
767 | register vm_map_entry_t entry; | |
768 | register vm_offset_t start; | |
769 | { | |
770 | register vm_map_entry_t new_entry; | |
771 | ||
772 | /* | |
773 | * See if we can simplify this entry first | |
774 | */ | |
775 | ||
776 | vm_map_simplify_entry(map, entry); | |
777 | ||
778 | /* | |
779 | * Split off the front portion -- | |
780 | * note that we must insert the new | |
781 | * entry BEFORE this one, so that | |
782 | * this entry has the specified starting | |
783 | * address. | |
784 | */ | |
785 | ||
786 | new_entry = vm_map_entry_create(map); | |
787 | *new_entry = *entry; | |
788 | ||
789 | new_entry->end = start; | |
790 | entry->offset += (start - entry->start); | |
791 | entry->start = start; | |
792 | ||
793 | vm_map_entry_link(map, entry->prev, new_entry); | |
794 | ||
795 | if (entry->is_a_map || entry->is_sub_map) | |
796 | vm_map_reference(new_entry->object.share_map); | |
797 | else | |
798 | vm_object_reference(new_entry->object.vm_object); | |
799 | } | |
800 | ||
801 | /* | |
802 | * vm_map_clip_end: [ internal use only ] | |
803 | * | |
804 | * Asserts that the given entry ends at or before | |
805 | * the specified address; if necessary, | |
806 | * it splits the entry into two. | |
807 | */ | |
808 | ||
175f072e KM |
809 | #define vm_map_clip_end(map, entry, endaddr) \ |
810 | { \ | |
811 | if (endaddr < entry->end) \ | |
812 | _vm_map_clip_end(map, entry, endaddr); \ | |
813 | } | |
814 | ||
815 | /* | |
816 | * This routine is called only when it is known that | |
817 | * the entry must be split. | |
818 | */ | |
73506ff8 | 819 | static void _vm_map_clip_end(map, entry, end) |
175f072e KM |
820 | register vm_map_t map; |
821 | register vm_map_entry_t entry; | |
822 | register vm_offset_t end; | |
823 | { | |
824 | register vm_map_entry_t new_entry; | |
825 | ||
826 | /* | |
827 | * Create a new entry and insert it | |
828 | * AFTER the specified entry | |
829 | */ | |
830 | ||
831 | new_entry = vm_map_entry_create(map); | |
832 | *new_entry = *entry; | |
833 | ||
834 | new_entry->start = entry->end = end; | |
835 | new_entry->offset += (end - entry->start); | |
836 | ||
837 | vm_map_entry_link(map, entry, new_entry); | |
838 | ||
839 | if (entry->is_a_map || entry->is_sub_map) | |
840 | vm_map_reference(new_entry->object.share_map); | |
841 | else | |
842 | vm_object_reference(new_entry->object.vm_object); | |
843 | } | |
844 | ||
845 | /* | |
846 | * VM_MAP_RANGE_CHECK: [ internal use only ] | |
847 | * | |
848 | * Asserts that the starting and ending region | |
849 | * addresses fall within the valid range of the map. | |
850 | */ | |
851 | #define VM_MAP_RANGE_CHECK(map, start, end) \ | |
852 | { \ | |
853 | if (start < vm_map_min(map)) \ | |
854 | start = vm_map_min(map); \ | |
855 | if (end > vm_map_max(map)) \ | |
856 | end = vm_map_max(map); \ | |
857 | if (start > end) \ | |
858 | start = end; \ | |
859 | } | |
860 | ||
861 | /* | |
862 | * vm_map_submap: [ kernel use only ] | |
863 | * | |
864 | * Mark the given range as handled by a subordinate map. | |
865 | * | |
866 | * This range must have been created with vm_map_find, | |
867 | * and no other operations may have been performed on this | |
868 | * range prior to calling vm_map_submap. | |
869 | * | |
870 | * Only a limited number of operations can be performed | |
871 | * within this rage after calling vm_map_submap: | |
872 | * vm_fault | |
873 | * [Don't try vm_map_copy!] | |
874 | * | |
875 | * To remove a submapping, one must first remove the | |
876 | * range from the superior map, and then destroy the | |
877 | * submap (if desired). [Better yet, don't try it.] | |
878 | */ | |
73506ff8 | 879 | int |
175f072e KM |
880 | vm_map_submap(map, start, end, submap) |
881 | register vm_map_t map; | |
882 | register vm_offset_t start; | |
883 | register vm_offset_t end; | |
884 | vm_map_t submap; | |
885 | { | |
886 | vm_map_entry_t entry; | |
887 | register int result = KERN_INVALID_ARGUMENT; | |
888 | ||
889 | vm_map_lock(map); | |
890 | ||
891 | VM_MAP_RANGE_CHECK(map, start, end); | |
892 | ||
893 | if (vm_map_lookup_entry(map, start, &entry)) { | |
894 | vm_map_clip_start(map, entry, start); | |
895 | } | |
896 | else | |
897 | entry = entry->next; | |
898 | ||
899 | vm_map_clip_end(map, entry, end); | |
900 | ||
901 | if ((entry->start == start) && (entry->end == end) && | |
902 | (!entry->is_a_map) && | |
5d7b9ad3 | 903 | (entry->object.vm_object == NULL) && |
175f072e KM |
904 | (!entry->copy_on_write)) { |
905 | entry->is_a_map = FALSE; | |
906 | entry->is_sub_map = TRUE; | |
907 | vm_map_reference(entry->object.sub_map = submap); | |
908 | result = KERN_SUCCESS; | |
909 | } | |
910 | vm_map_unlock(map); | |
911 | ||
912 | return(result); | |
913 | } | |
914 | ||
915 | /* | |
916 | * vm_map_protect: | |
917 | * | |
918 | * Sets the protection of the specified address | |
919 | * region in the target map. If "set_max" is | |
920 | * specified, the maximum protection is to be set; | |
921 | * otherwise, only the current protection is affected. | |
922 | */ | |
73506ff8 | 923 | int |
175f072e KM |
924 | vm_map_protect(map, start, end, new_prot, set_max) |
925 | register vm_map_t map; | |
926 | register vm_offset_t start; | |
927 | register vm_offset_t end; | |
928 | register vm_prot_t new_prot; | |
929 | register boolean_t set_max; | |
930 | { | |
931 | register vm_map_entry_t current; | |
932 | vm_map_entry_t entry; | |
933 | ||
934 | vm_map_lock(map); | |
935 | ||
936 | VM_MAP_RANGE_CHECK(map, start, end); | |
937 | ||
938 | if (vm_map_lookup_entry(map, start, &entry)) { | |
939 | vm_map_clip_start(map, entry, start); | |
940 | } | |
941 | else | |
942 | entry = entry->next; | |
943 | ||
944 | /* | |
945 | * Make a first pass to check for protection | |
946 | * violations. | |
947 | */ | |
948 | ||
949 | current = entry; | |
950 | while ((current != &map->header) && (current->start < end)) { | |
951 | if (current->is_sub_map) | |
952 | return(KERN_INVALID_ARGUMENT); | |
953 | if ((new_prot & current->max_protection) != new_prot) { | |
954 | vm_map_unlock(map); | |
955 | return(KERN_PROTECTION_FAILURE); | |
956 | } | |
957 | ||
958 | current = current->next; | |
959 | } | |
960 | ||
961 | /* | |
962 | * Go back and fix up protections. | |
963 | * [Note that clipping is not necessary the second time.] | |
964 | */ | |
965 | ||
966 | current = entry; | |
967 | ||
968 | while ((current != &map->header) && (current->start < end)) { | |
969 | vm_prot_t old_prot; | |
970 | ||
971 | vm_map_clip_end(map, current, end); | |
972 | ||
973 | old_prot = current->protection; | |
974 | if (set_max) | |
975 | current->protection = | |
976 | (current->max_protection = new_prot) & | |
977 | old_prot; | |
978 | else | |
979 | current->protection = new_prot; | |
980 | ||
981 | /* | |
982 | * Update physical map if necessary. | |
983 | * Worry about copy-on-write here -- CHECK THIS XXX | |
984 | */ | |
985 | ||
986 | if (current->protection != old_prot) { | |
987 | ||
988 | #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ | |
989 | VM_PROT_ALL) | |
990 | #define max(a,b) ((a) > (b) ? (a) : (b)) | |
991 | ||
992 | if (current->is_a_map) { | |
993 | vm_map_entry_t share_entry; | |
994 | vm_offset_t share_end; | |
995 | ||
996 | vm_map_lock(current->object.share_map); | |
997 | (void) vm_map_lookup_entry( | |
998 | current->object.share_map, | |
999 | current->offset, | |
1000 | &share_entry); | |
1001 | share_end = current->offset + | |
1002 | (current->end - current->start); | |
1003 | while ((share_entry != | |
1004 | ¤t->object.share_map->header) && | |
1005 | (share_entry->start < share_end)) { | |
1006 | ||
1007 | pmap_protect(map->pmap, | |
1008 | (max(share_entry->start, | |
1009 | current->offset) - | |
1010 | current->offset + | |
1011 | current->start), | |
1012 | min(share_entry->end, | |
1013 | share_end) - | |
1014 | current->offset + | |
1015 | current->start, | |
1016 | current->protection & | |
1017 | MASK(share_entry)); | |
1018 | ||
1019 | share_entry = share_entry->next; | |
1020 | } | |
1021 | vm_map_unlock(current->object.share_map); | |
1022 | } | |
1023 | else | |
1024 | pmap_protect(map->pmap, current->start, | |
1025 | current->end, | |
1026 | current->protection & MASK(entry)); | |
1027 | #undef max | |
1028 | #undef MASK | |
1029 | } | |
1030 | current = current->next; | |
1031 | } | |
1032 | ||
1033 | vm_map_unlock(map); | |
1034 | return(KERN_SUCCESS); | |
1035 | } | |
1036 | ||
1037 | /* | |
1038 | * vm_map_inherit: | |
1039 | * | |
1040 | * Sets the inheritance of the specified address | |
1041 | * range in the target map. Inheritance | |
1042 | * affects how the map will be shared with | |
1043 | * child maps at the time of vm_map_fork. | |
1044 | */ | |
73506ff8 | 1045 | int |
175f072e KM |
1046 | vm_map_inherit(map, start, end, new_inheritance) |
1047 | register vm_map_t map; | |
1048 | register vm_offset_t start; | |
1049 | register vm_offset_t end; | |
1050 | register vm_inherit_t new_inheritance; | |
1051 | { | |
1052 | register vm_map_entry_t entry; | |
1053 | vm_map_entry_t temp_entry; | |
1054 | ||
1055 | switch (new_inheritance) { | |
1056 | case VM_INHERIT_NONE: | |
1057 | case VM_INHERIT_COPY: | |
1058 | case VM_INHERIT_SHARE: | |
1059 | break; | |
1060 | default: | |
1061 | return(KERN_INVALID_ARGUMENT); | |
1062 | } | |
1063 | ||
1064 | vm_map_lock(map); | |
1065 | ||
1066 | VM_MAP_RANGE_CHECK(map, start, end); | |
1067 | ||
1068 | if (vm_map_lookup_entry(map, start, &temp_entry)) { | |
1069 | entry = temp_entry; | |
1070 | vm_map_clip_start(map, entry, start); | |
1071 | } | |
1072 | else | |
1073 | entry = temp_entry->next; | |
1074 | ||
1075 | while ((entry != &map->header) && (entry->start < end)) { | |
1076 | vm_map_clip_end(map, entry, end); | |
1077 | ||
1078 | entry->inheritance = new_inheritance; | |
1079 | ||
1080 | entry = entry->next; | |
1081 | } | |
1082 | ||
1083 | vm_map_unlock(map); | |
1084 | return(KERN_SUCCESS); | |
1085 | } | |
1086 | ||
1087 | /* | |
1088 | * vm_map_pageable: | |
1089 | * | |
1090 | * Sets the pageability of the specified address | |
1091 | * range in the target map. Regions specified | |
1092 | * as not pageable require locked-down physical | |
1093 | * memory and physical page maps. | |
1094 | * | |
1095 | * The map must not be locked, but a reference | |
1096 | * must remain to the map throughout the call. | |
1097 | */ | |
73506ff8 | 1098 | int |
175f072e KM |
1099 | vm_map_pageable(map, start, end, new_pageable) |
1100 | register vm_map_t map; | |
1101 | register vm_offset_t start; | |
1102 | register vm_offset_t end; | |
1103 | register boolean_t new_pageable; | |
1104 | { | |
1105 | register vm_map_entry_t entry; | |
b5246c21 | 1106 | vm_map_entry_t start_entry; |
d4f41bb2 MH |
1107 | register vm_offset_t failed; |
1108 | int rv; | |
175f072e KM |
1109 | |
1110 | vm_map_lock(map); | |
1111 | ||
1112 | VM_MAP_RANGE_CHECK(map, start, end); | |
1113 | ||
1114 | /* | |
1115 | * Only one pageability change may take place at one | |
1116 | * time, since vm_fault assumes it will be called | |
1117 | * only once for each wiring/unwiring. Therefore, we | |
1118 | * have to make sure we're actually changing the pageability | |
1119 | * for the entire region. We do so before making any changes. | |
1120 | */ | |
1121 | ||
b5246c21 MH |
1122 | if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { |
1123 | vm_map_unlock(map); | |
1124 | return(KERN_INVALID_ADDRESS); | |
175f072e | 1125 | } |
b5246c21 | 1126 | entry = start_entry; |
175f072e KM |
1127 | |
1128 | /* | |
1129 | * Actions are rather different for wiring and unwiring, | |
1130 | * so we have two separate cases. | |
1131 | */ | |
1132 | ||
1133 | if (new_pageable) { | |
1134 | ||
b5246c21 MH |
1135 | vm_map_clip_start(map, entry, start); |
1136 | ||
175f072e KM |
1137 | /* |
1138 | * Unwiring. First ensure that the range to be | |
b5246c21 MH |
1139 | * unwired is really wired down and that there |
1140 | * are no holes. | |
175f072e KM |
1141 | */ |
1142 | while ((entry != &map->header) && (entry->start < end)) { | |
1143 | ||
b5246c21 MH |
1144 | if (entry->wired_count == 0 || |
1145 | (entry->end < end && | |
1146 | (entry->next == &map->header || | |
1147 | entry->next->start > entry->end))) { | |
175f072e KM |
1148 | vm_map_unlock(map); |
1149 | return(KERN_INVALID_ARGUMENT); | |
1150 | } | |
1151 | entry = entry->next; | |
1152 | } | |
1153 | ||
1154 | /* | |
1155 | * Now decrement the wiring count for each region. | |
1156 | * If a region becomes completely unwired, | |
1157 | * unwire its physical pages and mappings. | |
1158 | */ | |
1159 | lock_set_recursive(&map->lock); | |
1160 | ||
b5246c21 | 1161 | entry = start_entry; |
175f072e KM |
1162 | while ((entry != &map->header) && (entry->start < end)) { |
1163 | vm_map_clip_end(map, entry, end); | |
1164 | ||
1165 | entry->wired_count--; | |
1166 | if (entry->wired_count == 0) | |
1167 | vm_fault_unwire(map, entry->start, entry->end); | |
1168 | ||
1169 | entry = entry->next; | |
1170 | } | |
1171 | lock_clear_recursive(&map->lock); | |
1172 | } | |
1173 | ||
1174 | else { | |
1175 | /* | |
1176 | * Wiring. We must do this in two passes: | |
1177 | * | |
b5246c21 MH |
1178 | * 1. Holding the write lock, we create any shadow |
1179 | * or zero-fill objects that need to be created. | |
1180 | * Then we clip each map entry to the region to be | |
1181 | * wired and increment its wiring count. We | |
1182 | * create objects before clipping the map entries | |
1183 | * to avoid object proliferation. | |
175f072e KM |
1184 | * |
1185 | * 2. We downgrade to a read lock, and call | |
1186 | * vm_fault_wire to fault in the pages for any | |
1187 | * newly wired area (wired_count is 1). | |
1188 | * | |
1189 | * Downgrading to a read lock for vm_fault_wire avoids | |
1190 | * a possible deadlock with another thread that may have | |
1191 | * faulted on one of the pages to be wired (it would mark | |
1192 | * the page busy, blocking us, then in turn block on the | |
1193 | * map lock that we hold). Because of problems in the | |
1194 | * recursive lock package, we cannot upgrade to a write | |
1195 | * lock in vm_map_lookup. Thus, any actions that require | |
1196 | * the write lock must be done beforehand. Because we | |
1197 | * keep the read lock on the map, the copy-on-write status | |
1198 | * of the entries we modify here cannot change. | |
1199 | */ | |
1200 | ||
1201 | /* | |
1202 | * Pass 1. | |
1203 | */ | |
175f072e | 1204 | while ((entry != &map->header) && (entry->start < end)) { |
b5246c21 | 1205 | if (entry->wired_count == 0) { |
175f072e KM |
1206 | |
1207 | /* | |
1208 | * Perform actions of vm_map_lookup that need | |
1209 | * the write lock on the map: create a shadow | |
1210 | * object for a copy-on-write region, or an | |
1211 | * object for a zero-fill region. | |
1212 | * | |
1213 | * We don't have to do this for entries that | |
1214 | * point to sharing maps, because we won't hold | |
1215 | * the lock on the sharing map. | |
1216 | */ | |
1217 | if (!entry->is_a_map) { | |
1218 | if (entry->needs_copy && | |
1219 | ((entry->protection & VM_PROT_WRITE) != 0)) { | |
1220 | ||
1221 | vm_object_shadow(&entry->object.vm_object, | |
1222 | &entry->offset, | |
1223 | (vm_size_t)(entry->end | |
1224 | - entry->start)); | |
1225 | entry->needs_copy = FALSE; | |
1226 | } | |
5d7b9ad3 | 1227 | else if (entry->object.vm_object == NULL) { |
175f072e KM |
1228 | entry->object.vm_object = |
1229 | vm_object_allocate((vm_size_t)(entry->end | |
1230 | - entry->start)); | |
1231 | entry->offset = (vm_offset_t)0; | |
1232 | } | |
1233 | } | |
1234 | } | |
b5246c21 MH |
1235 | vm_map_clip_start(map, entry, start); |
1236 | vm_map_clip_end(map, entry, end); | |
1237 | entry->wired_count++; | |
175f072e | 1238 | |
b5246c21 MH |
1239 | /* |
1240 | * Check for holes | |
1241 | */ | |
1242 | if (entry->end < end && | |
1243 | (entry->next == &map->header || | |
1244 | entry->next->start > entry->end)) { | |
1245 | /* | |
1246 | * Found one. Object creation actions | |
1247 | * do not need to be undone, but the | |
1248 | * wired counts need to be restored. | |
1249 | */ | |
1250 | while (entry != &map->header && entry->end > start) { | |
1251 | entry->wired_count--; | |
1252 | entry = entry->prev; | |
1253 | } | |
1254 | vm_map_unlock(map); | |
1255 | return(KERN_INVALID_ARGUMENT); | |
1256 | } | |
175f072e KM |
1257 | entry = entry->next; |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * Pass 2. | |
1262 | */ | |
1263 | ||
1264 | /* | |
1265 | * HACK HACK HACK HACK | |
1266 | * | |
1267 | * If we are wiring in the kernel map or a submap of it, | |
1268 | * unlock the map to avoid deadlocks. We trust that the | |
1269 | * kernel threads are well-behaved, and therefore will | |
1270 | * not do anything destructive to this region of the map | |
1271 | * while we have it unlocked. We cannot trust user threads | |
1272 | * to do the same. | |
1273 | * | |
1274 | * HACK HACK HACK HACK | |
1275 | */ | |
1276 | if (vm_map_pmap(map) == kernel_pmap) { | |
1277 | vm_map_unlock(map); /* trust me ... */ | |
1278 | } | |
1279 | else { | |
1280 | lock_set_recursive(&map->lock); | |
1281 | lock_write_to_read(&map->lock); | |
1282 | } | |
1283 | ||
d4f41bb2 | 1284 | rv = 0; |
b5246c21 | 1285 | entry = start_entry; |
175f072e | 1286 | while (entry != &map->header && entry->start < end) { |
d4f41bb2 MH |
1287 | /* |
1288 | * If vm_fault_wire fails for any page we need to | |
1289 | * undo what has been done. We decrement the wiring | |
1290 | * count for those pages which have not yet been | |
1291 | * wired (now) and unwire those that have (later). | |
1292 | * | |
1293 | * XXX this violates the locking protocol on the map, | |
1294 | * needs to be fixed. | |
1295 | */ | |
1296 | if (rv) | |
1297 | entry->wired_count--; | |
1298 | else if (entry->wired_count == 1) { | |
1299 | rv = vm_fault_wire(map, entry->start, entry->end); | |
1300 | if (rv) { | |
1301 | failed = entry->start; | |
1302 | entry->wired_count--; | |
1303 | } | |
175f072e KM |
1304 | } |
1305 | entry = entry->next; | |
1306 | } | |
1307 | ||
1308 | if (vm_map_pmap(map) == kernel_pmap) { | |
1309 | vm_map_lock(map); | |
1310 | } | |
1311 | else { | |
1312 | lock_clear_recursive(&map->lock); | |
1313 | } | |
d4f41bb2 MH |
1314 | if (rv) { |
1315 | vm_map_unlock(map); | |
1316 | (void) vm_map_pageable(map, start, failed, TRUE); | |
1317 | return(rv); | |
1318 | } | |
175f072e KM |
1319 | } |
1320 | ||
1321 | vm_map_unlock(map); | |
1322 | ||
1323 | return(KERN_SUCCESS); | |
1324 | } | |
1325 | ||
5a11d901 MH |
1326 | /* |
1327 | * vm_map_clean | |
1328 | * | |
1329 | * Push any dirty cached pages in the address range to their pager. | |
1330 | * If syncio is TRUE, dirty pages are written synchronously. | |
1331 | * If invalidate is TRUE, any cached pages are freed as well. | |
1332 | * | |
1333 | * Returns an error if any part of the specified range is not mapped. | |
1334 | */ | |
1335 | int | |
1336 | vm_map_clean(map, start, end, syncio, invalidate) | |
1337 | vm_map_t map; | |
1338 | vm_offset_t start; | |
1339 | vm_offset_t end; | |
1340 | boolean_t syncio; | |
1341 | boolean_t invalidate; | |
1342 | { | |
1343 | register vm_map_entry_t current; | |
1344 | vm_map_entry_t entry; | |
1345 | vm_size_t size; | |
1346 | vm_object_t object; | |
1347 | vm_offset_t offset; | |
1348 | ||
1349 | vm_map_lock_read(map); | |
1350 | VM_MAP_RANGE_CHECK(map, start, end); | |
1351 | if (!vm_map_lookup_entry(map, start, &entry)) { | |
1352 | vm_map_unlock_read(map); | |
1353 | return(KERN_INVALID_ADDRESS); | |
1354 | } | |
1355 | ||
1356 | /* | |
1357 | * Make a first pass to check for holes. | |
1358 | */ | |
1359 | for (current = entry; current->start < end; current = current->next) { | |
1360 | if (current->is_sub_map) { | |
1361 | vm_map_unlock_read(map); | |
1362 | return(KERN_INVALID_ARGUMENT); | |
1363 | } | |
1364 | if (end > current->end && | |
1365 | (current->next == &map->header || | |
1366 | current->end != current->next->start)) { | |
1367 | vm_map_unlock_read(map); | |
1368 | return(KERN_INVALID_ADDRESS); | |
1369 | } | |
1370 | } | |
1371 | ||
1372 | /* | |
1373 | * Make a second pass, cleaning/uncaching pages from the indicated | |
1374 | * objects as we go. | |
1375 | */ | |
1376 | for (current = entry; current->start < end; current = current->next) { | |
1377 | offset = current->offset + (start - current->start); | |
1378 | size = (end <= current->end ? end : current->end) - start; | |
1379 | if (current->is_a_map) { | |
1380 | register vm_map_t smap; | |
1381 | vm_map_entry_t tentry; | |
1382 | vm_size_t tsize; | |
1383 | ||
1384 | smap = current->object.share_map; | |
1385 | vm_map_lock_read(smap); | |
1386 | (void) vm_map_lookup_entry(smap, offset, &tentry); | |
1387 | tsize = tentry->end - offset; | |
1388 | if (tsize < size) | |
1389 | size = tsize; | |
1390 | object = tentry->object.vm_object; | |
1391 | offset = tentry->offset + (offset - tentry->start); | |
1392 | vm_object_lock(object); | |
1393 | vm_map_unlock_read(smap); | |
1394 | } else { | |
1395 | object = current->object.vm_object; | |
1396 | vm_object_lock(object); | |
1397 | } | |
1398 | /* | |
1399 | * Flush pages if writing is allowed. | |
1400 | * XXX should we continue on an error? | |
1401 | */ | |
1402 | if ((current->protection & VM_PROT_WRITE) && | |
1403 | !vm_object_page_clean(object, offset, offset+size, | |
1404 | syncio, FALSE)) { | |
1405 | vm_object_unlock(object); | |
1406 | vm_map_unlock_read(map); | |
1407 | return(KERN_FAILURE); | |
1408 | } | |
1409 | if (invalidate) | |
1410 | vm_object_page_remove(object, offset, offset+size); | |
1411 | vm_object_unlock(object); | |
1412 | start += size; | |
1413 | } | |
1414 | ||
1415 | vm_map_unlock_read(map); | |
1416 | return(KERN_SUCCESS); | |
1417 | } | |
1418 | ||
175f072e KM |
1419 | /* |
1420 | * vm_map_entry_unwire: [ internal use only ] | |
1421 | * | |
1422 | * Make the region specified by this entry pageable. | |
1423 | * | |
1424 | * The map in question should be locked. | |
1425 | * [This is the reason for this routine's existence.] | |
1426 | */ | |
1427 | void vm_map_entry_unwire(map, entry) | |
1428 | vm_map_t map; | |
1429 | register vm_map_entry_t entry; | |
1430 | { | |
1431 | vm_fault_unwire(map, entry->start, entry->end); | |
1432 | entry->wired_count = 0; | |
1433 | } | |
1434 | ||
1435 | /* | |
1436 | * vm_map_entry_delete: [ internal use only ] | |
1437 | * | |
1438 | * Deallocate the given entry from the target map. | |
1439 | */ | |
1440 | void vm_map_entry_delete(map, entry) | |
1441 | register vm_map_t map; | |
1442 | register vm_map_entry_t entry; | |
1443 | { | |
1444 | if (entry->wired_count != 0) | |
1445 | vm_map_entry_unwire(map, entry); | |
1446 | ||
1447 | vm_map_entry_unlink(map, entry); | |
1448 | map->size -= entry->end - entry->start; | |
1449 | ||
1450 | if (entry->is_a_map || entry->is_sub_map) | |
1451 | vm_map_deallocate(entry->object.share_map); | |
1452 | else | |
1453 | vm_object_deallocate(entry->object.vm_object); | |
1454 | ||
1455 | vm_map_entry_dispose(map, entry); | |
1456 | } | |
1457 | ||
1458 | /* | |
1459 | * vm_map_delete: [ internal use only ] | |
1460 | * | |
1461 | * Deallocates the given address range from the target | |
1462 | * map. | |
1463 | * | |
1464 | * When called with a sharing map, removes pages from | |
1465 | * that region from all physical maps. | |
1466 | */ | |
73506ff8 | 1467 | int |
175f072e KM |
1468 | vm_map_delete(map, start, end) |
1469 | register vm_map_t map; | |
1470 | vm_offset_t start; | |
1471 | register vm_offset_t end; | |
1472 | { | |
1473 | register vm_map_entry_t entry; | |
1474 | vm_map_entry_t first_entry; | |
1475 | ||
1476 | /* | |
1477 | * Find the start of the region, and clip it | |
1478 | */ | |
1479 | ||
1480 | if (!vm_map_lookup_entry(map, start, &first_entry)) | |
1481 | entry = first_entry->next; | |
1482 | else { | |
1483 | entry = first_entry; | |
1484 | vm_map_clip_start(map, entry, start); | |
1485 | ||
1486 | /* | |
1487 | * Fix the lookup hint now, rather than each | |
1488 | * time though the loop. | |
1489 | */ | |
1490 | ||
1491 | SAVE_HINT(map, entry->prev); | |
1492 | } | |
1493 | ||
1494 | /* | |
1495 | * Save the free space hint | |
1496 | */ | |
1497 | ||
1498 | if (map->first_free->start >= start) | |
1499 | map->first_free = entry->prev; | |
1500 | ||
1501 | /* | |
1502 | * Step through all entries in this region | |
1503 | */ | |
1504 | ||
1505 | while ((entry != &map->header) && (entry->start < end)) { | |
1506 | vm_map_entry_t next; | |
1507 | register vm_offset_t s, e; | |
1508 | register vm_object_t object; | |
1509 | ||
1510 | vm_map_clip_end(map, entry, end); | |
1511 | ||
1512 | next = entry->next; | |
1513 | s = entry->start; | |
1514 | e = entry->end; | |
1515 | ||
1516 | /* | |
1517 | * Unwire before removing addresses from the pmap; | |
1518 | * otherwise, unwiring will put the entries back in | |
1519 | * the pmap. | |
1520 | */ | |
1521 | ||
1522 | object = entry->object.vm_object; | |
1523 | if (entry->wired_count != 0) | |
1524 | vm_map_entry_unwire(map, entry); | |
1525 | ||
1526 | /* | |
1527 | * If this is a sharing map, we must remove | |
1528 | * *all* references to this data, since we can't | |
1529 | * find all of the physical maps which are sharing | |
1530 | * it. | |
1531 | */ | |
1532 | ||
1533 | if (object == kernel_object || object == kmem_object) | |
1534 | vm_object_page_remove(object, entry->offset, | |
1535 | entry->offset + (e - s)); | |
1536 | else if (!map->is_main_map) | |
1537 | vm_object_pmap_remove(object, | |
1538 | entry->offset, | |
1539 | entry->offset + (e - s)); | |
1540 | else | |
1541 | pmap_remove(map->pmap, s, e); | |
1542 | ||
1543 | /* | |
1544 | * Delete the entry (which may delete the object) | |
1545 | * only after removing all pmap entries pointing | |
1546 | * to its pages. (Otherwise, its page frames may | |
1547 | * be reallocated, and any modify bits will be | |
1548 | * set in the wrong object!) | |
1549 | */ | |
1550 | ||
1551 | vm_map_entry_delete(map, entry); | |
1552 | entry = next; | |
1553 | } | |
1554 | return(KERN_SUCCESS); | |
1555 | } | |
1556 | ||
1557 | /* | |
1558 | * vm_map_remove: | |
1559 | * | |
1560 | * Remove the given address range from the target map. | |
1561 | * This is the exported form of vm_map_delete. | |
1562 | */ | |
73506ff8 | 1563 | int |
175f072e KM |
1564 | vm_map_remove(map, start, end) |
1565 | register vm_map_t map; | |
1566 | register vm_offset_t start; | |
1567 | register vm_offset_t end; | |
1568 | { | |
1569 | register int result; | |
1570 | ||
1571 | vm_map_lock(map); | |
1572 | VM_MAP_RANGE_CHECK(map, start, end); | |
1573 | result = vm_map_delete(map, start, end); | |
1574 | vm_map_unlock(map); | |
1575 | ||
1576 | return(result); | |
1577 | } | |
1578 | ||
1579 | /* | |
1580 | * vm_map_check_protection: | |
1581 | * | |
1582 | * Assert that the target map allows the specified | |
1583 | * privilege on the entire address region given. | |
1584 | * The entire region must be allocated. | |
1585 | */ | |
1586 | boolean_t vm_map_check_protection(map, start, end, protection) | |
1587 | register vm_map_t map; | |
1588 | register vm_offset_t start; | |
1589 | register vm_offset_t end; | |
1590 | register vm_prot_t protection; | |
1591 | { | |
1592 | register vm_map_entry_t entry; | |
1593 | vm_map_entry_t tmp_entry; | |
1594 | ||
1595 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
1596 | return(FALSE); | |
1597 | } | |
1598 | ||
1599 | entry = tmp_entry; | |
1600 | ||
1601 | while (start < end) { | |
1602 | if (entry == &map->header) { | |
1603 | return(FALSE); | |
1604 | } | |
1605 | ||
1606 | /* | |
1607 | * No holes allowed! | |
1608 | */ | |
1609 | ||
1610 | if (start < entry->start) { | |
1611 | return(FALSE); | |
1612 | } | |
1613 | ||
1614 | /* | |
1615 | * Check protection associated with entry. | |
1616 | */ | |
1617 | ||
1618 | if ((entry->protection & protection) != protection) { | |
1619 | return(FALSE); | |
1620 | } | |
1621 | ||
1622 | /* go to next entry */ | |
1623 | ||
1624 | start = entry->end; | |
1625 | entry = entry->next; | |
1626 | } | |
1627 | return(TRUE); | |
1628 | } | |
1629 | ||
1630 | /* | |
1631 | * vm_map_copy_entry: | |
1632 | * | |
1633 | * Copies the contents of the source entry to the destination | |
1634 | * entry. The entries *must* be aligned properly. | |
1635 | */ | |
1636 | void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) | |
1637 | vm_map_t src_map, dst_map; | |
1638 | register vm_map_entry_t src_entry, dst_entry; | |
1639 | { | |
1640 | vm_object_t temp_object; | |
1641 | ||
1642 | if (src_entry->is_sub_map || dst_entry->is_sub_map) | |
1643 | return; | |
1644 | ||
5d7b9ad3 | 1645 | if (dst_entry->object.vm_object != NULL && |
224765a4 | 1646 | (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0) |
175f072e KM |
1647 | printf("vm_map_copy_entry: copying over permanent data!\n"); |
1648 | ||
1649 | /* | |
1650 | * If our destination map was wired down, | |
1651 | * unwire it now. | |
1652 | */ | |
1653 | ||
1654 | if (dst_entry->wired_count != 0) | |
1655 | vm_map_entry_unwire(dst_map, dst_entry); | |
1656 | ||
1657 | /* | |
1658 | * If we're dealing with a sharing map, we | |
1659 | * must remove the destination pages from | |
1660 | * all maps (since we cannot know which maps | |
1661 | * this sharing map belongs in). | |
1662 | */ | |
1663 | ||
1664 | if (dst_map->is_main_map) | |
1665 | pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end); | |
1666 | else | |
1667 | vm_object_pmap_remove(dst_entry->object.vm_object, | |
1668 | dst_entry->offset, | |
1669 | dst_entry->offset + | |
1670 | (dst_entry->end - dst_entry->start)); | |
1671 | ||
1672 | if (src_entry->wired_count == 0) { | |
1673 | ||
1674 | boolean_t src_needs_copy; | |
1675 | ||
1676 | /* | |
1677 | * If the source entry is marked needs_copy, | |
1678 | * it is already write-protected. | |
1679 | */ | |
1680 | if (!src_entry->needs_copy) { | |
1681 | ||
1682 | boolean_t su; | |
1683 | ||
1684 | /* | |
1685 | * If the source entry has only one mapping, | |
1686 | * we can just protect the virtual address | |
1687 | * range. | |
1688 | */ | |
1689 | if (!(su = src_map->is_main_map)) { | |
1690 | simple_lock(&src_map->ref_lock); | |
1691 | su = (src_map->ref_count == 1); | |
1692 | simple_unlock(&src_map->ref_lock); | |
1693 | } | |
1694 | ||
1695 | if (su) { | |
1696 | pmap_protect(src_map->pmap, | |
1697 | src_entry->start, | |
1698 | src_entry->end, | |
1699 | src_entry->protection & ~VM_PROT_WRITE); | |
1700 | } | |
1701 | else { | |
1702 | vm_object_pmap_copy(src_entry->object.vm_object, | |
1703 | src_entry->offset, | |
1704 | src_entry->offset + (src_entry->end | |
1705 | -src_entry->start)); | |
1706 | } | |
1707 | } | |
1708 | ||
1709 | /* | |
1710 | * Make a copy of the object. | |
1711 | */ | |
1712 | temp_object = dst_entry->object.vm_object; | |
1713 | vm_object_copy(src_entry->object.vm_object, | |
1714 | src_entry->offset, | |
1715 | (vm_size_t)(src_entry->end - | |
1716 | src_entry->start), | |
1717 | &dst_entry->object.vm_object, | |
1718 | &dst_entry->offset, | |
1719 | &src_needs_copy); | |
1720 | /* | |
1721 | * If we didn't get a copy-object now, mark the | |
1722 | * source map entry so that a shadow will be created | |
1723 | * to hold its changed pages. | |
1724 | */ | |
1725 | if (src_needs_copy) | |
1726 | src_entry->needs_copy = TRUE; | |
1727 | ||
1728 | /* | |
1729 | * The destination always needs to have a shadow | |
1730 | * created. | |
1731 | */ | |
1732 | dst_entry->needs_copy = TRUE; | |
1733 | ||
1734 | /* | |
1735 | * Mark the entries copy-on-write, so that write-enabling | |
1736 | * the entry won't make copy-on-write pages writable. | |
1737 | */ | |
1738 | src_entry->copy_on_write = TRUE; | |
1739 | dst_entry->copy_on_write = TRUE; | |
1740 | /* | |
1741 | * Get rid of the old object. | |
1742 | */ | |
1743 | vm_object_deallocate(temp_object); | |
1744 | ||
1745 | pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, | |
1746 | dst_entry->end - dst_entry->start, src_entry->start); | |
1747 | } | |
1748 | else { | |
1749 | /* | |
1750 | * Of course, wired down pages can't be set copy-on-write. | |
1751 | * Cause wired pages to be copied into the new | |
1752 | * map by simulating faults (the new pages are | |
1753 | * pageable) | |
1754 | */ | |
1755 | vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); | |
1756 | } | |
1757 | } | |
1758 | ||
1759 | /* | |
1760 | * vm_map_copy: | |
1761 | * | |
1762 | * Perform a virtual memory copy from the source | |
1763 | * address map/range to the destination map/range. | |
1764 | * | |
1765 | * If src_destroy or dst_alloc is requested, | |
1766 | * the source and destination regions should be | |
1767 | * disjoint, not only in the top-level map, but | |
1768 | * in the sharing maps as well. [The best way | |
1769 | * to guarantee this is to use a new intermediate | |
1770 | * map to make copies. This also reduces map | |
1771 | * fragmentation.] | |
1772 | */ | |
73506ff8 | 1773 | int |
175f072e KM |
1774 | vm_map_copy(dst_map, src_map, |
1775 | dst_addr, len, src_addr, | |
1776 | dst_alloc, src_destroy) | |
1777 | vm_map_t dst_map; | |
1778 | vm_map_t src_map; | |
1779 | vm_offset_t dst_addr; | |
1780 | vm_size_t len; | |
1781 | vm_offset_t src_addr; | |
1782 | boolean_t dst_alloc; | |
1783 | boolean_t src_destroy; | |
1784 | { | |
1785 | register | |
1786 | vm_map_entry_t src_entry; | |
1787 | register | |
1788 | vm_map_entry_t dst_entry; | |
1789 | vm_map_entry_t tmp_entry; | |
1790 | vm_offset_t src_start; | |
1791 | vm_offset_t src_end; | |
1792 | vm_offset_t dst_start; | |
1793 | vm_offset_t dst_end; | |
1794 | vm_offset_t src_clip; | |
1795 | vm_offset_t dst_clip; | |
1796 | int result; | |
1797 | boolean_t old_src_destroy; | |
1798 | ||
1799 | /* | |
1800 | * XXX While we figure out why src_destroy screws up, | |
1801 | * we'll do it by explicitly vm_map_delete'ing at the end. | |
1802 | */ | |
1803 | ||
1804 | old_src_destroy = src_destroy; | |
1805 | src_destroy = FALSE; | |
1806 | ||
1807 | /* | |
1808 | * Compute start and end of region in both maps | |
1809 | */ | |
1810 | ||
1811 | src_start = src_addr; | |
1812 | src_end = src_start + len; | |
1813 | dst_start = dst_addr; | |
1814 | dst_end = dst_start + len; | |
1815 | ||
1816 | /* | |
1817 | * Check that the region can exist in both source | |
1818 | * and destination. | |
1819 | */ | |
1820 | ||
1821 | if ((dst_end < dst_start) || (src_end < src_start)) | |
1822 | return(KERN_NO_SPACE); | |
1823 | ||
1824 | /* | |
1825 | * Lock the maps in question -- we avoid deadlock | |
1826 | * by ordering lock acquisition by map value | |
1827 | */ | |
1828 | ||
1829 | if (src_map == dst_map) { | |
1830 | vm_map_lock(src_map); | |
1831 | } | |
1832 | else if ((int) src_map < (int) dst_map) { | |
1833 | vm_map_lock(src_map); | |
1834 | vm_map_lock(dst_map); | |
1835 | } else { | |
1836 | vm_map_lock(dst_map); | |
1837 | vm_map_lock(src_map); | |
1838 | } | |
1839 | ||
1840 | result = KERN_SUCCESS; | |
1841 | ||
1842 | /* | |
1843 | * Check protections... source must be completely readable and | |
1844 | * destination must be completely writable. [Note that if we're | |
1845 | * allocating the destination region, we don't have to worry | |
1846 | * about protection, but instead about whether the region | |
1847 | * exists.] | |
1848 | */ | |
1849 | ||
1850 | if (src_map->is_main_map && dst_map->is_main_map) { | |
1851 | if (!vm_map_check_protection(src_map, src_start, src_end, | |
1852 | VM_PROT_READ)) { | |
1853 | result = KERN_PROTECTION_FAILURE; | |
1854 | goto Return; | |
1855 | } | |
1856 | ||
1857 | if (dst_alloc) { | |
1858 | /* XXX Consider making this a vm_map_find instead */ | |
5d7b9ad3 | 1859 | if ((result = vm_map_insert(dst_map, NULL, |
175f072e KM |
1860 | (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS) |
1861 | goto Return; | |
1862 | } | |
1863 | else if (!vm_map_check_protection(dst_map, dst_start, dst_end, | |
1864 | VM_PROT_WRITE)) { | |
1865 | result = KERN_PROTECTION_FAILURE; | |
1866 | goto Return; | |
1867 | } | |
1868 | } | |
1869 | ||
1870 | /* | |
1871 | * Find the start entries and clip. | |
1872 | * | |
1873 | * Note that checking protection asserts that the | |
1874 | * lookup cannot fail. | |
1875 | * | |
1876 | * Also note that we wait to do the second lookup | |
1877 | * until we have done the first clip, as the clip | |
1878 | * may affect which entry we get! | |
1879 | */ | |
1880 | ||
1881 | (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); | |
1882 | src_entry = tmp_entry; | |
1883 | vm_map_clip_start(src_map, src_entry, src_start); | |
1884 | ||
1885 | (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry); | |
1886 | dst_entry = tmp_entry; | |
1887 | vm_map_clip_start(dst_map, dst_entry, dst_start); | |
1888 | ||
1889 | /* | |
1890 | * If both source and destination entries are the same, | |
1891 | * retry the first lookup, as it may have changed. | |
1892 | */ | |
1893 | ||
1894 | if (src_entry == dst_entry) { | |
1895 | (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); | |
1896 | src_entry = tmp_entry; | |
1897 | } | |
1898 | ||
1899 | /* | |
1900 | * If source and destination entries are still the same, | |
1901 | * a null copy is being performed. | |
1902 | */ | |
1903 | ||
1904 | if (src_entry == dst_entry) | |
1905 | goto Return; | |
1906 | ||
1907 | /* | |
1908 | * Go through entries until we get to the end of the | |
1909 | * region. | |
1910 | */ | |
1911 | ||
1912 | while (src_start < src_end) { | |
1913 | /* | |
1914 | * Clip the entries to the endpoint of the entire region. | |
1915 | */ | |
1916 | ||
1917 | vm_map_clip_end(src_map, src_entry, src_end); | |
1918 | vm_map_clip_end(dst_map, dst_entry, dst_end); | |
1919 | ||
1920 | /* | |
1921 | * Clip each entry to the endpoint of the other entry. | |
1922 | */ | |
1923 | ||
1924 | src_clip = src_entry->start + (dst_entry->end - dst_entry->start); | |
1925 | vm_map_clip_end(src_map, src_entry, src_clip); | |
1926 | ||
1927 | dst_clip = dst_entry->start + (src_entry->end - src_entry->start); | |
1928 | vm_map_clip_end(dst_map, dst_entry, dst_clip); | |
1929 | ||
1930 | /* | |
1931 | * Both entries now match in size and relative endpoints. | |
1932 | * | |
1933 | * If both entries refer to a VM object, we can | |
1934 | * deal with them now. | |
1935 | */ | |
1936 | ||
1937 | if (!src_entry->is_a_map && !dst_entry->is_a_map) { | |
1938 | vm_map_copy_entry(src_map, dst_map, src_entry, | |
1939 | dst_entry); | |
1940 | } | |
1941 | else { | |
1942 | register vm_map_t new_dst_map; | |
1943 | vm_offset_t new_dst_start; | |
1944 | vm_size_t new_size; | |
1945 | vm_map_t new_src_map; | |
1946 | vm_offset_t new_src_start; | |
1947 | ||
1948 | /* | |
1949 | * We have to follow at least one sharing map. | |
1950 | */ | |
1951 | ||
1952 | new_size = (dst_entry->end - dst_entry->start); | |
1953 | ||
1954 | if (src_entry->is_a_map) { | |
1955 | new_src_map = src_entry->object.share_map; | |
1956 | new_src_start = src_entry->offset; | |
1957 | } | |
1958 | else { | |
1959 | new_src_map = src_map; | |
1960 | new_src_start = src_entry->start; | |
1961 | lock_set_recursive(&src_map->lock); | |
1962 | } | |
1963 | ||
1964 | if (dst_entry->is_a_map) { | |
1965 | vm_offset_t new_dst_end; | |
1966 | ||
1967 | new_dst_map = dst_entry->object.share_map; | |
1968 | new_dst_start = dst_entry->offset; | |
1969 | ||
1970 | /* | |
1971 | * Since the destination sharing entries | |
1972 | * will be merely deallocated, we can | |
1973 | * do that now, and replace the region | |
1974 | * with a null object. [This prevents | |
1975 | * splitting the source map to match | |
1976 | * the form of the destination map.] | |
1977 | * Note that we can only do so if the | |
1978 | * source and destination do not overlap. | |
1979 | */ | |
1980 | ||
1981 | new_dst_end = new_dst_start + new_size; | |
1982 | ||
1983 | if (new_dst_map != new_src_map) { | |
1984 | vm_map_lock(new_dst_map); | |
1985 | (void) vm_map_delete(new_dst_map, | |
1986 | new_dst_start, | |
1987 | new_dst_end); | |
1988 | (void) vm_map_insert(new_dst_map, | |
5d7b9ad3 | 1989 | NULL, |
175f072e KM |
1990 | (vm_offset_t) 0, |
1991 | new_dst_start, | |
1992 | new_dst_end); | |
1993 | vm_map_unlock(new_dst_map); | |
1994 | } | |
1995 | } | |
1996 | else { | |
1997 | new_dst_map = dst_map; | |
1998 | new_dst_start = dst_entry->start; | |
1999 | lock_set_recursive(&dst_map->lock); | |
2000 | } | |
2001 | ||
2002 | /* | |
2003 | * Recursively copy the sharing map. | |
2004 | */ | |
2005 | ||
2006 | (void) vm_map_copy(new_dst_map, new_src_map, | |
2007 | new_dst_start, new_size, new_src_start, | |
2008 | FALSE, FALSE); | |
2009 | ||
2010 | if (dst_map == new_dst_map) | |
2011 | lock_clear_recursive(&dst_map->lock); | |
2012 | if (src_map == new_src_map) | |
2013 | lock_clear_recursive(&src_map->lock); | |
2014 | } | |
2015 | ||
2016 | /* | |
2017 | * Update variables for next pass through the loop. | |
2018 | */ | |
2019 | ||
2020 | src_start = src_entry->end; | |
2021 | src_entry = src_entry->next; | |
2022 | dst_start = dst_entry->end; | |
2023 | dst_entry = dst_entry->next; | |
2024 | ||
2025 | /* | |
2026 | * If the source is to be destroyed, here is the | |
2027 | * place to do it. | |
2028 | */ | |
2029 | ||
2030 | if (src_destroy && src_map->is_main_map && | |
2031 | dst_map->is_main_map) | |
2032 | vm_map_entry_delete(src_map, src_entry->prev); | |
2033 | } | |
2034 | ||
2035 | /* | |
2036 | * Update the physical maps as appropriate | |
2037 | */ | |
2038 | ||
2039 | if (src_map->is_main_map && dst_map->is_main_map) { | |
2040 | if (src_destroy) | |
2041 | pmap_remove(src_map->pmap, src_addr, src_addr + len); | |
2042 | } | |
2043 | ||
2044 | /* | |
2045 | * Unlock the maps | |
2046 | */ | |
2047 | ||
2048 | Return: ; | |
2049 | ||
2050 | if (old_src_destroy) | |
2051 | vm_map_delete(src_map, src_addr, src_addr + len); | |
2052 | ||
2053 | vm_map_unlock(src_map); | |
2054 | if (src_map != dst_map) | |
2055 | vm_map_unlock(dst_map); | |
2056 | ||
2057 | return(result); | |
2058 | } | |
2059 | ||
2060 | /* | |
5d7b9ad3 MK |
2061 | * vmspace_fork: |
2062 | * Create a new process vmspace structure and vm_map | |
2063 | * based on those of an existing process. The new map | |
2064 | * is based on the old map, according to the inheritance | |
2065 | * values on the regions in that map. | |
175f072e | 2066 | * |
5d7b9ad3 | 2067 | * The source map must not be locked. |
175f072e | 2068 | */ |
5d7b9ad3 MK |
2069 | struct vmspace * |
2070 | vmspace_fork(vm1) | |
2071 | register struct vmspace *vm1; | |
175f072e | 2072 | { |
5d7b9ad3 MK |
2073 | register struct vmspace *vm2; |
2074 | vm_map_t old_map = &vm1->vm_map; | |
175f072e KM |
2075 | vm_map_t new_map; |
2076 | vm_map_entry_t old_entry; | |
2077 | vm_map_entry_t new_entry; | |
2078 | pmap_t new_pmap; | |
2079 | ||
2080 | vm_map_lock(old_map); | |
2081 | ||
5d7b9ad3 MK |
2082 | vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, |
2083 | old_map->entries_pageable); | |
2084 | bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, | |
2085 | (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); | |
2086 | new_pmap = &vm2->vm_pmap; /* XXX */ | |
2087 | new_map = &vm2->vm_map; /* XXX */ | |
175f072e KM |
2088 | |
2089 | old_entry = old_map->header.next; | |
2090 | ||
2091 | while (old_entry != &old_map->header) { | |
2092 | if (old_entry->is_sub_map) | |
2093 | panic("vm_map_fork: encountered a submap"); | |
2094 | ||
2095 | switch (old_entry->inheritance) { | |
2096 | case VM_INHERIT_NONE: | |
2097 | break; | |
2098 | ||
2099 | case VM_INHERIT_SHARE: | |
2100 | /* | |
2101 | * If we don't already have a sharing map: | |
2102 | */ | |
2103 | ||
2104 | if (!old_entry->is_a_map) { | |
2105 | vm_map_t new_share_map; | |
2106 | vm_map_entry_t new_share_entry; | |
2107 | ||
2108 | /* | |
2109 | * Create a new sharing map | |
2110 | */ | |
2111 | ||
5d7b9ad3 | 2112 | new_share_map = vm_map_create(NULL, |
175f072e KM |
2113 | old_entry->start, |
2114 | old_entry->end, | |
2115 | TRUE); | |
2116 | new_share_map->is_main_map = FALSE; | |
2117 | ||
2118 | /* | |
2119 | * Create the only sharing entry from the | |
2120 | * old task map entry. | |
2121 | */ | |
2122 | ||
2123 | new_share_entry = | |
2124 | vm_map_entry_create(new_share_map); | |
2125 | *new_share_entry = *old_entry; | |
b5246c21 | 2126 | new_share_entry->wired_count = 0; |
175f072e KM |
2127 | |
2128 | /* | |
2129 | * Insert the entry into the new sharing | |
2130 | * map | |
2131 | */ | |
2132 | ||
2133 | vm_map_entry_link(new_share_map, | |
2134 | new_share_map->header.prev, | |
2135 | new_share_entry); | |
2136 | ||
2137 | /* | |
2138 | * Fix up the task map entry to refer | |
2139 | * to the sharing map now. | |
2140 | */ | |
2141 | ||
2142 | old_entry->is_a_map = TRUE; | |
2143 | old_entry->object.share_map = new_share_map; | |
2144 | old_entry->offset = old_entry->start; | |
2145 | } | |
2146 | ||
2147 | /* | |
2148 | * Clone the entry, referencing the sharing map. | |
2149 | */ | |
2150 | ||
2151 | new_entry = vm_map_entry_create(new_map); | |
2152 | *new_entry = *old_entry; | |
b5246c21 | 2153 | new_entry->wired_count = 0; |
175f072e KM |
2154 | vm_map_reference(new_entry->object.share_map); |
2155 | ||
2156 | /* | |
2157 | * Insert the entry into the new map -- we | |
2158 | * know we're inserting at the end of the new | |
2159 | * map. | |
2160 | */ | |
2161 | ||
2162 | vm_map_entry_link(new_map, new_map->header.prev, | |
2163 | new_entry); | |
2164 | ||
2165 | /* | |
2166 | * Update the physical map | |
2167 | */ | |
2168 | ||
2169 | pmap_copy(new_map->pmap, old_map->pmap, | |
2170 | new_entry->start, | |
2171 | (old_entry->end - old_entry->start), | |
2172 | old_entry->start); | |
2173 | break; | |
2174 | ||
2175 | case VM_INHERIT_COPY: | |
2176 | /* | |
2177 | * Clone the entry and link into the map. | |
2178 | */ | |
2179 | ||
2180 | new_entry = vm_map_entry_create(new_map); | |
2181 | *new_entry = *old_entry; | |
2182 | new_entry->wired_count = 0; | |
5d7b9ad3 | 2183 | new_entry->object.vm_object = NULL; |
175f072e KM |
2184 | new_entry->is_a_map = FALSE; |
2185 | vm_map_entry_link(new_map, new_map->header.prev, | |
2186 | new_entry); | |
2187 | if (old_entry->is_a_map) { | |
2188 | int check; | |
2189 | ||
2190 | check = vm_map_copy(new_map, | |
2191 | old_entry->object.share_map, | |
2192 | new_entry->start, | |
2193 | (vm_size_t)(new_entry->end - | |
2194 | new_entry->start), | |
2195 | old_entry->offset, | |
2196 | FALSE, FALSE); | |
2197 | if (check != KERN_SUCCESS) | |
2198 | printf("vm_map_fork: copy in share_map region failed\n"); | |
2199 | } | |
2200 | else { | |
2201 | vm_map_copy_entry(old_map, new_map, old_entry, | |
2202 | new_entry); | |
2203 | } | |
2204 | break; | |
2205 | } | |
2206 | old_entry = old_entry->next; | |
2207 | } | |
2208 | ||
2209 | new_map->size = old_map->size; | |
2210 | vm_map_unlock(old_map); | |
2211 | ||
5d7b9ad3 | 2212 | return(vm2); |
175f072e KM |
2213 | } |
2214 | ||
2215 | /* | |
2216 | * vm_map_lookup: | |
2217 | * | |
2218 | * Finds the VM object, offset, and | |
2219 | * protection for a given virtual address in the | |
2220 | * specified map, assuming a page fault of the | |
2221 | * type specified. | |
2222 | * | |
2223 | * Leaves the map in question locked for read; return | |
2224 | * values are guaranteed until a vm_map_lookup_done | |
2225 | * call is performed. Note that the map argument | |
2226 | * is in/out; the returned map must be used in | |
2227 | * the call to vm_map_lookup_done. | |
2228 | * | |
2229 | * A handle (out_entry) is returned for use in | |
2230 | * vm_map_lookup_done, to make that fast. | |
2231 | * | |
2232 | * If a lookup is requested with "write protection" | |
2233 | * specified, the map may be changed to perform virtual | |
2234 | * copying operations, although the data referenced will | |
2235 | * remain the same. | |
2236 | */ | |
73506ff8 | 2237 | int |
175f072e KM |
2238 | vm_map_lookup(var_map, vaddr, fault_type, out_entry, |
2239 | object, offset, out_prot, wired, single_use) | |
2240 | vm_map_t *var_map; /* IN/OUT */ | |
2241 | register vm_offset_t vaddr; | |
2242 | register vm_prot_t fault_type; | |
2243 | ||
2244 | vm_map_entry_t *out_entry; /* OUT */ | |
2245 | vm_object_t *object; /* OUT */ | |
2246 | vm_offset_t *offset; /* OUT */ | |
2247 | vm_prot_t *out_prot; /* OUT */ | |
2248 | boolean_t *wired; /* OUT */ | |
2249 | boolean_t *single_use; /* OUT */ | |
2250 | { | |
2251 | vm_map_t share_map; | |
2252 | vm_offset_t share_offset; | |
2253 | register vm_map_entry_t entry; | |
2254 | register vm_map_t map = *var_map; | |
2255 | register vm_prot_t prot; | |
2256 | register boolean_t su; | |
2257 | ||
2258 | RetryLookup: ; | |
2259 | ||
2260 | /* | |
2261 | * Lookup the faulting address. | |
2262 | */ | |
2263 | ||
2264 | vm_map_lock_read(map); | |
2265 | ||
2266 | #define RETURN(why) \ | |
2267 | { \ | |
2268 | vm_map_unlock_read(map); \ | |
2269 | return(why); \ | |
2270 | } | |
2271 | ||
2272 | /* | |
2273 | * If the map has an interesting hint, try it before calling | |
2274 | * full blown lookup routine. | |
2275 | */ | |
2276 | ||
2277 | simple_lock(&map->hint_lock); | |
2278 | entry = map->hint; | |
2279 | simple_unlock(&map->hint_lock); | |
2280 | ||
2281 | *out_entry = entry; | |
2282 | ||
2283 | if ((entry == &map->header) || | |
2284 | (vaddr < entry->start) || (vaddr >= entry->end)) { | |
2285 | vm_map_entry_t tmp_entry; | |
2286 | ||
2287 | /* | |
2288 | * Entry was either not a valid hint, or the vaddr | |
2289 | * was not contained in the entry, so do a full lookup. | |
2290 | */ | |
2291 | if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) | |
2292 | RETURN(KERN_INVALID_ADDRESS); | |
2293 | ||
2294 | entry = tmp_entry; | |
2295 | *out_entry = entry; | |
2296 | } | |
2297 | ||
2298 | /* | |
2299 | * Handle submaps. | |
2300 | */ | |
2301 | ||
2302 | if (entry->is_sub_map) { | |
2303 | vm_map_t old_map = map; | |
2304 | ||
2305 | *var_map = map = entry->object.sub_map; | |
2306 | vm_map_unlock_read(old_map); | |
2307 | goto RetryLookup; | |
2308 | } | |
2309 | ||
2310 | /* | |
2311 | * Check whether this task is allowed to have | |
2312 | * this page. | |
2313 | */ | |
2314 | ||
2315 | prot = entry->protection; | |
2316 | if ((fault_type & (prot)) != fault_type) | |
2317 | RETURN(KERN_PROTECTION_FAILURE); | |
2318 | ||
2319 | /* | |
2320 | * If this page is not pageable, we have to get | |
2321 | * it for all possible accesses. | |
2322 | */ | |
2323 | ||
2324 | if (*wired = (entry->wired_count != 0)) | |
2325 | prot = fault_type = entry->protection; | |
2326 | ||
2327 | /* | |
2328 | * If we don't already have a VM object, track | |
2329 | * it down. | |
2330 | */ | |
2331 | ||
2332 | if (su = !entry->is_a_map) { | |
2333 | share_map = map; | |
2334 | share_offset = vaddr; | |
2335 | } | |
2336 | else { | |
2337 | vm_map_entry_t share_entry; | |
2338 | ||
2339 | /* | |
2340 | * Compute the sharing map, and offset into it. | |
2341 | */ | |
2342 | ||
2343 | share_map = entry->object.share_map; | |
2344 | share_offset = (vaddr - entry->start) + entry->offset; | |
2345 | ||
2346 | /* | |
2347 | * Look for the backing store object and offset | |
2348 | */ | |
2349 | ||
2350 | vm_map_lock_read(share_map); | |
2351 | ||
2352 | if (!vm_map_lookup_entry(share_map, share_offset, | |
2353 | &share_entry)) { | |
2354 | vm_map_unlock_read(share_map); | |
2355 | RETURN(KERN_INVALID_ADDRESS); | |
2356 | } | |
2357 | entry = share_entry; | |
2358 | } | |
2359 | ||
2360 | /* | |
2361 | * If the entry was copy-on-write, we either ... | |
2362 | */ | |
2363 | ||
2364 | if (entry->needs_copy) { | |
2365 | /* | |
2366 | * If we want to write the page, we may as well | |
2367 | * handle that now since we've got the sharing | |
2368 | * map locked. | |
2369 | * | |
2370 | * If we don't need to write the page, we just | |
2371 | * demote the permissions allowed. | |
2372 | */ | |
2373 | ||
2374 | if (fault_type & VM_PROT_WRITE) { | |
2375 | /* | |
2376 | * Make a new object, and place it in the | |
2377 | * object chain. Note that no new references | |
2378 | * have appeared -- one just moved from the | |
2379 | * share map to the new object. | |
2380 | */ | |
2381 | ||
2382 | if (lock_read_to_write(&share_map->lock)) { | |
2383 | if (share_map != map) | |
2384 | vm_map_unlock_read(map); | |
2385 | goto RetryLookup; | |
2386 | } | |
2387 | ||
2388 | vm_object_shadow( | |
2389 | &entry->object.vm_object, | |
2390 | &entry->offset, | |
2391 | (vm_size_t) (entry->end - entry->start)); | |
2392 | ||
2393 | entry->needs_copy = FALSE; | |
2394 | ||
2395 | lock_write_to_read(&share_map->lock); | |
2396 | } | |
2397 | else { | |
2398 | /* | |
2399 | * We're attempting to read a copy-on-write | |
2400 | * page -- don't allow writes. | |
2401 | */ | |
2402 | ||
2403 | prot &= (~VM_PROT_WRITE); | |
2404 | } | |
2405 | } | |
2406 | ||
2407 | /* | |
2408 | * Create an object if necessary. | |
2409 | */ | |
5d7b9ad3 | 2410 | if (entry->object.vm_object == NULL) { |
175f072e KM |
2411 | |
2412 | if (lock_read_to_write(&share_map->lock)) { | |
2413 | if (share_map != map) | |
2414 | vm_map_unlock_read(map); | |
2415 | goto RetryLookup; | |
2416 | } | |
2417 | ||
2418 | entry->object.vm_object = vm_object_allocate( | |
2419 | (vm_size_t)(entry->end - entry->start)); | |
2420 | entry->offset = 0; | |
2421 | lock_write_to_read(&share_map->lock); | |
2422 | } | |
2423 | ||
2424 | /* | |
2425 | * Return the object/offset from this entry. If the entry | |
2426 | * was copy-on-write or empty, it has been fixed up. | |
2427 | */ | |
2428 | ||
2429 | *offset = (share_offset - entry->start) + entry->offset; | |
2430 | *object = entry->object.vm_object; | |
2431 | ||
2432 | /* | |
2433 | * Return whether this is the only map sharing this data. | |
2434 | */ | |
2435 | ||
2436 | if (!su) { | |
2437 | simple_lock(&share_map->ref_lock); | |
2438 | su = (share_map->ref_count == 1); | |
2439 | simple_unlock(&share_map->ref_lock); | |
2440 | } | |
2441 | ||
2442 | *out_prot = prot; | |
2443 | *single_use = su; | |
2444 | ||
2445 | return(KERN_SUCCESS); | |
2446 | ||
2447 | #undef RETURN | |
2448 | } | |
2449 | ||
2450 | /* | |
2451 | * vm_map_lookup_done: | |
2452 | * | |
2453 | * Releases locks acquired by a vm_map_lookup | |
2454 | * (according to the handle returned by that lookup). | |
2455 | */ | |
2456 | ||
2457 | void vm_map_lookup_done(map, entry) | |
2458 | register vm_map_t map; | |
2459 | vm_map_entry_t entry; | |
2460 | { | |
2461 | /* | |
2462 | * If this entry references a map, unlock it first. | |
2463 | */ | |
2464 | ||
2465 | if (entry->is_a_map) | |
2466 | vm_map_unlock_read(entry->object.share_map); | |
2467 | ||
2468 | /* | |
2469 | * Unlock the main-level map | |
2470 | */ | |
2471 | ||
2472 | vm_map_unlock_read(map); | |
2473 | } | |
2474 | ||
2475 | /* | |
2476 | * Routine: vm_map_simplify | |
2477 | * Purpose: | |
2478 | * Attempt to simplify the map representation in | |
2479 | * the vicinity of the given starting address. | |
2480 | * Note: | |
2481 | * This routine is intended primarily to keep the | |
2482 | * kernel maps more compact -- they generally don't | |
2483 | * benefit from the "expand a map entry" technology | |
2484 | * at allocation time because the adjacent entry | |
2485 | * is often wired down. | |
2486 | */ | |
2487 | void vm_map_simplify(map, start) | |
2488 | vm_map_t map; | |
2489 | vm_offset_t start; | |
2490 | { | |
2491 | vm_map_entry_t this_entry; | |
2492 | vm_map_entry_t prev_entry; | |
2493 | ||
2494 | vm_map_lock(map); | |
2495 | if ( | |
2496 | (vm_map_lookup_entry(map, start, &this_entry)) && | |
2497 | ((prev_entry = this_entry->prev) != &map->header) && | |
2498 | ||
2499 | (prev_entry->end == start) && | |
2500 | (map->is_main_map) && | |
2501 | ||
2502 | (prev_entry->is_a_map == FALSE) && | |
2503 | (prev_entry->is_sub_map == FALSE) && | |
2504 | ||
2505 | (this_entry->is_a_map == FALSE) && | |
2506 | (this_entry->is_sub_map == FALSE) && | |
2507 | ||
2508 | (prev_entry->inheritance == this_entry->inheritance) && | |
2509 | (prev_entry->protection == this_entry->protection) && | |
2510 | (prev_entry->max_protection == this_entry->max_protection) && | |
2511 | (prev_entry->wired_count == this_entry->wired_count) && | |
2512 | ||
2513 | (prev_entry->copy_on_write == this_entry->copy_on_write) && | |
2514 | (prev_entry->needs_copy == this_entry->needs_copy) && | |
2515 | ||
2516 | (prev_entry->object.vm_object == this_entry->object.vm_object) && | |
2517 | ((prev_entry->offset + (prev_entry->end - prev_entry->start)) | |
2518 | == this_entry->offset) | |
2519 | ) { | |
2520 | if (map->first_free == this_entry) | |
2521 | map->first_free = prev_entry; | |
2522 | ||
2523 | SAVE_HINT(map, prev_entry); | |
2524 | vm_map_entry_unlink(map, this_entry); | |
2525 | prev_entry->end = this_entry->end; | |
2526 | vm_object_deallocate(this_entry->object.vm_object); | |
2527 | vm_map_entry_dispose(map, this_entry); | |
2528 | } | |
2529 | vm_map_unlock(map); | |
2530 | } | |
2531 | ||
2532 | /* | |
2533 | * vm_map_print: [ debug ] | |
2534 | */ | |
2535 | void vm_map_print(map, full) | |
2536 | register vm_map_t map; | |
2537 | boolean_t full; | |
2538 | { | |
2539 | register vm_map_entry_t entry; | |
2540 | extern int indent; | |
2541 | ||
2542 | iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", | |
2543 | (map->is_main_map ? "Task" : "Share"), | |
2544 | (int) map, (int) (map->pmap), map->ref_count, map->nentries, | |
2545 | map->timestamp); | |
2546 | ||
2547 | if (!full && indent) | |
2548 | return; | |
2549 | ||
2550 | indent += 2; | |
2551 | for (entry = map->header.next; entry != &map->header; | |
2552 | entry = entry->next) { | |
2553 | iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", | |
2554 | (int) entry, (int) entry->start, (int) entry->end); | |
2555 | if (map->is_main_map) { | |
2556 | static char *inheritance_name[4] = | |
2557 | { "share", "copy", "none", "donate_copy"}; | |
2558 | printf("prot=%x/%x/%s, ", | |
2559 | entry->protection, | |
2560 | entry->max_protection, | |
2561 | inheritance_name[entry->inheritance]); | |
2562 | if (entry->wired_count != 0) | |
2563 | printf("wired, "); | |
2564 | } | |
2565 | ||
2566 | if (entry->is_a_map || entry->is_sub_map) { | |
2567 | printf("share=0x%x, offset=0x%x\n", | |
2568 | (int) entry->object.share_map, | |
2569 | (int) entry->offset); | |
2570 | if ((entry->prev == &map->header) || | |
2571 | (!entry->prev->is_a_map) || | |
2572 | (entry->prev->object.share_map != | |
2573 | entry->object.share_map)) { | |
2574 | indent += 2; | |
2575 | vm_map_print(entry->object.share_map, full); | |
2576 | indent -= 2; | |
2577 | } | |
2578 | ||
2579 | } | |
2580 | else { | |
2581 | printf("object=0x%x, offset=0x%x", | |
2582 | (int) entry->object.vm_object, | |
2583 | (int) entry->offset); | |
2584 | if (entry->copy_on_write) | |
2585 | printf(", copy (%s)", | |
2586 | entry->needs_copy ? "needed" : "done"); | |
2587 | printf("\n"); | |
2588 | ||
2589 | if ((entry->prev == &map->header) || | |
2590 | (entry->prev->is_a_map) || | |
2591 | (entry->prev->object.vm_object != | |
2592 | entry->object.vm_object)) { | |
2593 | indent += 2; | |
2594 | vm_object_print(entry->object.vm_object, full); | |
2595 | indent -= 2; | |
2596 | } | |
2597 | } | |
2598 | } | |
2599 | indent -= 2; | |
2600 | } |