UNMATCHED SPL() CALL IN VM SYSTEM
[unix-history] / usr / src / sys.386bsd / vm / vm_map.c
CommitLineData
608c45cd
WJ
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 7.3 (Berkeley) 4/21/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
65/*
66 * Virtual memory mapping module.
67 */
68
69#include "param.h"
70#include "malloc.h"
71#include "vm.h"
72#include "vm_page.h"
73#include "vm_object.h"
74
75/*
76 * Virtual memory maps provide for the mapping, protection,
77 * and sharing of virtual memory objects. In addition,
78 * this module provides for an efficient virtual copy of
79 * memory from one map to another.
80 *
81 * Synchronization is required prior to most operations.
82 *
83 * Maps consist of an ordered doubly-linked list of simple
84 * entries; a single hint is used to speed up lookups.
85 *
86 * In order to properly represent the sharing of virtual
87 * memory regions among maps, the map structure is bi-level.
88 * Top-level ("address") maps refer to regions of sharable
89 * virtual memory. These regions are implemented as
90 * ("sharing") maps, which then refer to the actual virtual
91 * memory objects. When two address maps "share" memory,
92 * their top-level maps both have references to the same
93 * sharing map. When memory is virtual-copied from one
94 * address map to another, the references in the sharing
95 * maps are actually copied -- no copying occurs at the
96 * virtual memory object level.
97 *
98 * Since portions of maps are specified by start/end addreses,
99 * which may not align with existing map entries, all
100 * routines merely "clip" entries to these start/end values.
101 * [That is, an entry is split into two, bordering at a
102 * start or end value.] Note that these clippings may not
103 * always be necessary (as the two resulting entries are then
104 * not changed); however, the clipping is done for convenience.
105 * No attempt is currently made to "glue back together" two
106 * abutting entries.
107 *
108 * As mentioned above, virtual copy operations are performed
109 * by copying VM object references from one sharing map to
110 * another, and then marking both regions as copy-on-write.
111 * It is important to note that only one writeable reference
112 * to a VM object region exists in any map -- this means that
113 * shadow object creation can be delayed until a write operation
114 * occurs.
115 */
116
117/*
118 * vm_map_startup:
119 *
120 * Initialize the vm_map module. Must be called before
121 * any other vm_map routines.
122 *
123 * Map and entry structures are allocated from the general
124 * purpose memory pool with some exceptions:
125 *
126 * - The kernel map and kmem submap are allocated statically.
127 * - Kernel map entries are allocated out of a static pool.
128 *
129 * These restrictions are necessary since malloc() uses the
130 * maps and requires map entries.
131 */
132
133vm_offset_t kentry_data;
134vm_size_t kentry_data_size;
135vm_map_entry_t kentry_free;
136vm_map_t kmap_free;
137
138void vm_map_startup()
139{
140 register int i;
141 register vm_map_entry_t mep;
142 vm_map_t mp;
143
144 /*
145 * Static map structures for allocation before initialization of
146 * kernel map or kmem map. vm_map_create knows how to deal with them.
147 */
148 kmap_free = mp = (vm_map_t) kentry_data;
149 i = MAX_KMAP;
150 while (--i > 0) {
151 mp->header.next = (vm_map_entry_t) (mp + 1);
152 mp++;
153 }
154 mp++->header.next = NULL;
155
156 /*
157 * Form a free list of statically allocated kernel map entries
158 * with the rest.
159 */
160 kentry_free = mep = (vm_map_entry_t) mp;
161 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
162 while (--i > 0) {
163 mep->next = mep + 1;
164 mep++;
165 }
166 mep->next = NULL;
167}
168
169/*
170 * Allocate a vmspace structure, including a vm_map and pmap,
171 * and initialize those structures. The refcnt is set to 1.
172 * The remaining fields must be initialized by the caller.
173 */
174struct vmspace *
175vmspace_alloc(min, max, pageable)
176 vm_offset_t min, max;
177 int pageable;
178{
179 register struct vmspace *vm;
180
181 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
182 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
183 vm_map_init(&vm->vm_map, min, max, pageable);
184 pmap_pinit(&vm->vm_pmap);
185 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
186 vm->vm_refcnt = 1;
187 return (vm);
188}
189
190void
191vmspace_free(vm)
192 register struct vmspace *vm;
193{
194
195 if (--vm->vm_refcnt == 0) {
196 /*
197 * Lock the map, to wait out all other references to it.
198 * Delete all of the mappings and pages they hold,
199 * then call the pmap module to reclaim anything left.
200 */
201 vm_map_lock(&vm->vm_map);
202 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
203 vm->vm_map.max_offset);
204 pmap_release(&vm->vm_pmap);
205 FREE(vm, M_VMMAP);
206 }
207}
208
209/*
210 * vm_map_create:
211 *
212 * Creates and returns a new empty VM map with
213 * the given physical map structure, and having
214 * the given lower and upper address bounds.
215 */
216vm_map_t vm_map_create(pmap, min, max, pageable)
217 pmap_t pmap;
218 vm_offset_t min, max;
219 boolean_t pageable;
220{
221 register vm_map_t result;
222 extern vm_map_t kernel_map, kmem_map;
223
224 if (kmem_map == NULL) {
225 result = kmap_free;
226 kmap_free = (vm_map_t) result->header.next;
227 if (result == NULL)
228 panic("vm_map_create: out of maps");
229 } else
230 MALLOC(result, vm_map_t, sizeof(struct vm_map),
231 M_VMMAP, M_WAITOK);
232
233 vm_map_init(result, min, max, pageable);
234 result->pmap = pmap;
235 return(result);
236}
237
238/*
239 * Initialize an existing vm_map structure
240 * such as that in the vmspace structure.
241 * The pmap is set elsewhere.
242 */
243void
244vm_map_init(map, min, max, pageable)
245 register struct vm_map *map;
246 vm_offset_t min, max;
247 boolean_t pageable;
248{
249 map->header.next = map->header.prev = &map->header;
250 map->nentries = 0;
251 map->size = 0;
252 map->ref_count = 1;
253 map->is_main_map = TRUE;
254 map->min_offset = min;
255 map->max_offset = max;
256 map->entries_pageable = pageable;
257 map->first_free = &map->header;
258 map->hint = &map->header;
259 map->timestamp = 0;
260 lock_init(&map->lock, TRUE);
261 simple_lock_init(&map->ref_lock);
262 simple_lock_init(&map->hint_lock);
263}
264
265/*
266 * vm_map_entry_create: [ internal use only ]
267 *
268 * Allocates a VM map entry for insertion.
269 * No entry fields are filled in. This routine is
270 */
271vm_map_entry_t vm_map_entry_create(map)
272 vm_map_t map;
273{
274 vm_map_entry_t entry;
275 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map;
276
277 if (map == kernel_map || map == kmem_map || map == mb_map
278 || map == buffer_map) {
279 if (entry = kentry_free)
280 kentry_free = kentry_free->next;
281 } else
282 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
283 M_VMMAPENT, M_WAITOK);
284 if (entry == NULL)
285 panic("vm_map_entry_create: out of map entries");
286
287 return(entry);
288}
289
290/*
291 * vm_map_entry_dispose: [ internal use only ]
292 *
293 * Inverse of vm_map_entry_create.
294 */
295void vm_map_entry_dispose(map, entry)
296 vm_map_t map;
297 vm_map_entry_t entry;
298{
299 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map;
300
301 if (map == kernel_map || map == kmem_map || map == mb_map
302 || map == buffer_map) {
303 entry->next = kentry_free;
304 kentry_free = entry;
305 } else
306 FREE(entry, M_VMMAPENT);
307}
308
309/*
310 * vm_map_entry_{un,}link:
311 *
312 * Insert/remove entries from maps.
313 */
314#define vm_map_entry_link(map, after_where, entry) \
315 { \
316 (map)->nentries++; \
317 (entry)->prev = (after_where); \
318 (entry)->next = (after_where)->next; \
319 (entry)->prev->next = (entry); \
320 (entry)->next->prev = (entry); \
321 }
322#define vm_map_entry_unlink(map, entry) \
323 { \
324 (map)->nentries--; \
325 (entry)->next->prev = (entry)->prev; \
326 (entry)->prev->next = (entry)->next; \
327 }
328
329/*
330 * vm_map_reference:
331 *
332 * Creates another valid reference to the given map.
333 *
334 */
335void vm_map_reference(map)
336 register vm_map_t map;
337{
338 if (map == NULL)
339 return;
340
341 simple_lock(&map->ref_lock);
342 map->ref_count++;
343 simple_unlock(&map->ref_lock);
344}
345
346/*
347 * vm_map_deallocate:
348 *
349 * Removes a reference from the specified map,
350 * destroying it if no references remain.
351 * The map should not be locked.
352 */
353void vm_map_deallocate(map)
354 register vm_map_t map;
355{
356 register int c;
357
358 if (map == NULL)
359 return;
360
361 simple_lock(&map->ref_lock);
362 c = --map->ref_count;
363 simple_unlock(&map->ref_lock);
364
365 if (c > 0) {
366 return;
367 }
368
369 /*
370 * Lock the map, to wait out all other references
371 * to it.
372 */
373
374 vm_map_lock(map);
375
376 (void) vm_map_delete(map, map->min_offset, map->max_offset);
377
378 pmap_destroy(map->pmap);
379
380 FREE(map, M_VMMAP);
381}
382
383/*
384 * vm_map_insert: [ internal use only ]
385 *
386 * Inserts the given whole VM object into the target
387 * map at the specified address range. The object's
388 * size should match that of the address range.
389 *
390 * Requires that the map be locked, and leaves it so.
391 */
392vm_map_insert(map, object, offset, start, end)
393 vm_map_t map;
394 vm_object_t object;
395 vm_offset_t offset;
396 vm_offset_t start;
397 vm_offset_t end;
398{
399 register vm_map_entry_t new_entry;
400 register vm_map_entry_t prev_entry;
401 vm_map_entry_t temp_entry;
402
403 /*
404 * Check that the start and end points are not bogus.
405 */
406
407 if ((start < map->min_offset) || (end > map->max_offset) ||
408 (start >= end))
409 return(KERN_INVALID_ADDRESS);
410
411 /*
412 * Find the entry prior to the proposed
413 * starting address; if it's part of an
414 * existing entry, this range is bogus.
415 */
416
417 if (vm_map_lookup_entry(map, start, &temp_entry))
418 return(KERN_NO_SPACE);
419
420 prev_entry = temp_entry;
421
422 /*
423 * Assert that the next entry doesn't overlap the
424 * end point.
425 */
426
427 if ((prev_entry->next != &map->header) &&
428 (prev_entry->next->start < end))
429 return(KERN_NO_SPACE);
430
431 /*
432 * See if we can avoid creating a new entry by
433 * extending one of our neighbors.
434 */
435
436 if (object == NULL) {
437 if ((prev_entry != &map->header) &&
438 (prev_entry->end == start) &&
439 (map->is_main_map) &&
440 (prev_entry->is_a_map == FALSE) &&
441 (prev_entry->is_sub_map == FALSE) &&
442 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
443 (prev_entry->protection == VM_PROT_DEFAULT) &&
444 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
445 (prev_entry->wired_count == 0)) {
446
447 if (vm_object_coalesce(prev_entry->object.vm_object,
448 NULL,
449 prev_entry->offset,
450 (vm_offset_t) 0,
451 (vm_size_t)(prev_entry->end
452 - prev_entry->start),
453 (vm_size_t)(end - prev_entry->end))) {
454 /*
455 * Coalesced the two objects - can extend
456 * the previous map entry to include the
457 * new range.
458 */
459 map->size += (end - prev_entry->end);
460 prev_entry->end = end;
461 return(KERN_SUCCESS);
462 }
463 }
464 }
465
466 /*
467 * Create a new entry
468 */
469
470 new_entry = vm_map_entry_create(map);
471 new_entry->start = start;
472 new_entry->end = end;
473
474 new_entry->is_a_map = FALSE;
475 new_entry->is_sub_map = FALSE;
476 new_entry->object.vm_object = object;
477 new_entry->offset = offset;
478
479 new_entry->copy_on_write = FALSE;
480 new_entry->needs_copy = FALSE;
481
482 if (map->is_main_map) {
483 new_entry->inheritance = VM_INHERIT_DEFAULT;
484 new_entry->protection = VM_PROT_DEFAULT;
485 new_entry->max_protection = VM_PROT_DEFAULT;
486 new_entry->wired_count = 0;
487 }
488
489 /*
490 * Insert the new entry into the list
491 */
492
493 vm_map_entry_link(map, prev_entry, new_entry);
494 map->size += new_entry->end - new_entry->start;
495
496 /*
497 * Update the free space hint
498 */
499
500 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
501 map->first_free = new_entry;
502
503 return(KERN_SUCCESS);
504}
505
506/*
507 * SAVE_HINT:
508 *
509 * Saves the specified entry as the hint for
510 * future lookups. Performs necessary interlocks.
511 */
512#define SAVE_HINT(map,value) \
513 simple_lock(&(map)->hint_lock); \
514 (map)->hint = (value); \
515 simple_unlock(&(map)->hint_lock);
516
517/*
518 * vm_map_lookup_entry: [ internal use only ]
519 *
520 * Finds the map entry containing (or
521 * immediately preceding) the specified address
522 * in the given map; the entry is returned
523 * in the "entry" parameter. The boolean
524 * result indicates whether the address is
525 * actually contained in the map.
526 */
527boolean_t vm_map_lookup_entry(map, address, entry)
528 register vm_map_t map;
529 register vm_offset_t address;
530 vm_map_entry_t *entry; /* OUT */
531{
532 register vm_map_entry_t cur;
533 register vm_map_entry_t last;
534
535 /*
536 * Start looking either from the head of the
537 * list, or from the hint.
538 */
539
540 simple_lock(&map->hint_lock);
541 cur = map->hint;
542 simple_unlock(&map->hint_lock);
543
544 if (cur == &map->header)
545 cur = cur->next;
546
547 if (address >= cur->start) {
548 /*
549 * Go from hint to end of list.
550 *
551 * But first, make a quick check to see if
552 * we are already looking at the entry we
553 * want (which is usually the case).
554 * Note also that we don't need to save the hint
555 * here... it is the same hint (unless we are
556 * at the header, in which case the hint didn't
557 * buy us anything anyway).
558 */
559 last = &map->header;
560 if ((cur != last) && (cur->end > address)) {
561 *entry = cur;
562 return(TRUE);
563 }
564 }
565 else {
566 /*
567 * Go from start to hint, *inclusively*
568 */
569 last = cur->next;
570 cur = map->header.next;
571 }
572
573 /*
574 * Search linearly
575 */
576
577 while (cur != last) {
578 if (cur->end > address) {
579 if (address >= cur->start) {
580 /*
581 * Save this lookup for future
582 * hints, and return
583 */
584
585 *entry = cur;
586 SAVE_HINT(map, cur);
587 return(TRUE);
588 }
589 break;
590 }
591 cur = cur->next;
592 }
593 *entry = cur->prev;
594 SAVE_HINT(map, *entry);
595 return(FALSE);
596}
597
598/*
599 * vm_map_find finds an unallocated region in the target address
600 * map with the given length. The search is defined to be
601 * first-fit from the specified address; the region found is
602 * returned in the same parameter.
603 *
604 */
605vm_map_find(map, object, offset, addr, length, find_space)
606 vm_map_t map;
607 vm_object_t object;
608 vm_offset_t offset;
609 vm_offset_t *addr; /* IN/OUT */
610 vm_size_t length;
611 boolean_t find_space;
612{
613 register vm_map_entry_t entry;
614 register vm_offset_t start;
615 register vm_offset_t end;
616 int result;
617
618 start = *addr;
619
620 vm_map_lock(map);
621
622 if (find_space) {
623 /*
624 * Calculate the first possible address.
625 */
626
627 if (start < map->min_offset)
628 start = map->min_offset;
629 if (start > map->max_offset) {
630 vm_map_unlock(map);
631 return (KERN_NO_SPACE);
632 }
633
634 /*
635 * Look for the first possible address;
636 * if there's already something at this
637 * address, we have to start after it.
638 */
639
640 if (start == map->min_offset) {
641 if ((entry = map->first_free) != &map->header)
642 start = entry->end;
643 } else {
644 vm_map_entry_t tmp_entry;
645 if (vm_map_lookup_entry(map, start, &tmp_entry))
646 start = tmp_entry->end;
647 entry = tmp_entry;
648 }
649
650 /*
651 * In any case, the "entry" always precedes
652 * the proposed new region throughout the
653 * loop:
654 */
655
656 while (TRUE) {
657 register vm_map_entry_t next;
658
659 /*
660 * Find the end of the proposed new region.
661 * Be sure we didn't go beyond the end, or
662 * wrap around the address.
663 */
664
665 end = start + length;
666
667 if ((end > map->max_offset) || (end < start)) {
668 vm_map_unlock(map);
669 return (KERN_NO_SPACE);
670 }
671
672 /*
673 * If there are no more entries, we must win.
674 */
675
676 next = entry->next;
677 if (next == &map->header)
678 break;
679
680 /*
681 * If there is another entry, it must be
682 * after the end of the potential new region.
683 */
684
685 if (next->start >= end)
686 break;
687
688 /*
689 * Didn't fit -- move to the next entry.
690 */
691
692 entry = next;
693 start = entry->end;
694 }
695 *addr = start;
696
697 SAVE_HINT(map, entry);
698 }
699
700 result = vm_map_insert(map, object, offset, start, start + length);
701
702 vm_map_unlock(map);
703 return(result);
704}
705
706/*
707 * vm_map_simplify_entry: [ internal use only ]
708 *
709 * Simplify the given map entry by:
710 * removing extra sharing maps
711 * [XXX maybe later] merging with a neighbor
712 */
713void vm_map_simplify_entry(map, entry)
714 vm_map_t map;
715 vm_map_entry_t entry;
716{
717#ifdef lint
718 map++;
719#endif lint
720
721 /*
722 * If this entry corresponds to a sharing map, then
723 * see if we can remove the level of indirection.
724 * If it's not a sharing map, then it points to
725 * a VM object, so see if we can merge with either
726 * of our neighbors.
727 */
728
729 if (entry->is_sub_map)
730 return;
731 if (entry->is_a_map) {
732#if 0
733 vm_map_t my_share_map;
734 int count;
735
736 my_share_map = entry->object.share_map;
737 simple_lock(&my_share_map->ref_lock);
738 count = my_share_map->ref_count;
739 simple_unlock(&my_share_map->ref_lock);
740
741 if (count == 1) {
742 /* Can move the region from
743 * entry->start to entry->end (+ entry->offset)
744 * in my_share_map into place of entry.
745 * Later.
746 */
747 }
748#endif 0
749 }
750 else {
751 /*
752 * Try to merge with our neighbors.
753 *
754 * Conditions for merge are:
755 *
756 * 1. entries are adjacent.
757 * 2. both entries point to objects
758 * with null pagers.
759 *
760 * If a merge is possible, we replace the two
761 * entries with a single entry, then merge
762 * the two objects into a single object.
763 *
764 * Now, all that is left to do is write the
765 * code!
766 */
767 }
768}
769
770/*
771 * vm_map_clip_start: [ internal use only ]
772 *
773 * Asserts that the given entry begins at or after
774 * the specified address; if necessary,
775 * it splits the entry into two.
776 */
777#define vm_map_clip_start(map, entry, startaddr) \
778{ \
779 if (startaddr > entry->start) \
780 _vm_map_clip_start(map, entry, startaddr); \
781}
782
783/*
784 * This routine is called only when it is known that
785 * the entry must be split.
786 */
787void _vm_map_clip_start(map, entry, start)
788 register vm_map_t map;
789 register vm_map_entry_t entry;
790 register vm_offset_t start;
791{
792 register vm_map_entry_t new_entry;
793
794 /*
795 * See if we can simplify this entry first
796 */
797
798 vm_map_simplify_entry(map, entry);
799
800 /*
801 * Split off the front portion --
802 * note that we must insert the new
803 * entry BEFORE this one, so that
804 * this entry has the specified starting
805 * address.
806 */
807
808 new_entry = vm_map_entry_create(map);
809 *new_entry = *entry;
810
811 new_entry->end = start;
812 entry->offset += (start - entry->start);
813 entry->start = start;
814
815 vm_map_entry_link(map, entry->prev, new_entry);
816
817 if (entry->is_a_map || entry->is_sub_map)
818 vm_map_reference(new_entry->object.share_map);
819 else
820 vm_object_reference(new_entry->object.vm_object);
821}
822
823/*
824 * vm_map_clip_end: [ internal use only ]
825 *
826 * Asserts that the given entry ends at or before
827 * the specified address; if necessary,
828 * it splits the entry into two.
829 */
830
831void _vm_map_clip_end();
832#define vm_map_clip_end(map, entry, endaddr) \
833{ \
834 if (endaddr < entry->end) \
835 _vm_map_clip_end(map, entry, endaddr); \
836}
837
838/*
839 * This routine is called only when it is known that
840 * the entry must be split.
841 */
842void _vm_map_clip_end(map, entry, end)
843 register vm_map_t map;
844 register vm_map_entry_t entry;
845 register vm_offset_t end;
846{
847 register vm_map_entry_t new_entry;
848
849 /*
850 * Create a new entry and insert it
851 * AFTER the specified entry
852 */
853
854 new_entry = vm_map_entry_create(map);
855 *new_entry = *entry;
856
857 new_entry->start = entry->end = end;
858 new_entry->offset += (end - entry->start);
859
860 vm_map_entry_link(map, entry, new_entry);
861
862 if (entry->is_a_map || entry->is_sub_map)
863 vm_map_reference(new_entry->object.share_map);
864 else
865 vm_object_reference(new_entry->object.vm_object);
866}
867
868/*
869 * VM_MAP_RANGE_CHECK: [ internal use only ]
870 *
871 * Asserts that the starting and ending region
872 * addresses fall within the valid range of the map.
873 */
874#define VM_MAP_RANGE_CHECK(map, start, end) \
875 { \
876 if (start < vm_map_min(map)) \
877 start = vm_map_min(map); \
878 if (end > vm_map_max(map)) \
879 end = vm_map_max(map); \
880 if (start > end) \
881 start = end; \
882 }
883
884/*
885 * vm_map_submap: [ kernel use only ]
886 *
887 * Mark the given range as handled by a subordinate map.
888 *
889 * This range must have been created with vm_map_find,
890 * and no other operations may have been performed on this
891 * range prior to calling vm_map_submap.
892 *
893 * Only a limited number of operations can be performed
894 * within this rage after calling vm_map_submap:
895 * vm_fault
896 * [Don't try vm_map_copy!]
897 *
898 * To remove a submapping, one must first remove the
899 * range from the superior map, and then destroy the
900 * submap (if desired). [Better yet, don't try it.]
901 */
902vm_map_submap(map, start, end, submap)
903 register vm_map_t map;
904 register vm_offset_t start;
905 register vm_offset_t end;
906 vm_map_t submap;
907{
908 vm_map_entry_t entry;
909 register int result = KERN_INVALID_ARGUMENT;
910
911 vm_map_lock(map);
912
913 VM_MAP_RANGE_CHECK(map, start, end);
914
915 if (vm_map_lookup_entry(map, start, &entry)) {
916 vm_map_clip_start(map, entry, start);
917 }
918 else
919 entry = entry->next;
920
921 vm_map_clip_end(map, entry, end);
922
923 if ((entry->start == start) && (entry->end == end) &&
924 (!entry->is_a_map) &&
925 (entry->object.vm_object == NULL) &&
926 (!entry->copy_on_write)) {
927 entry->is_a_map = FALSE;
928 entry->is_sub_map = TRUE;
929 vm_map_reference(entry->object.sub_map = submap);
930 result = KERN_SUCCESS;
931 }
932 vm_map_unlock(map);
933
934 return(result);
935}
936
937/*
938 * vm_map_protect:
939 *
940 * Sets the protection of the specified address
941 * region in the target map. If "set_max" is
942 * specified, the maximum protection is to be set;
943 * otherwise, only the current protection is affected.
944 */
945vm_map_protect(map, start, end, new_prot, set_max)
946 register vm_map_t map;
947 register vm_offset_t start;
948 register vm_offset_t end;
949 register vm_prot_t new_prot;
950 register boolean_t set_max;
951{
952 register vm_map_entry_t current;
953 vm_map_entry_t entry;
954
955 vm_map_lock(map);
956
957 VM_MAP_RANGE_CHECK(map, start, end);
958
959 if (vm_map_lookup_entry(map, start, &entry)) {
960 vm_map_clip_start(map, entry, start);
961 }
962 else
963 entry = entry->next;
964
965 /*
966 * Make a first pass to check for protection
967 * violations.
968 */
969
970 current = entry;
971 while ((current != &map->header) && (current->start < end)) {
972 if (current->is_sub_map)
973 return(KERN_INVALID_ARGUMENT);
974 if ((new_prot & current->max_protection) != new_prot) {
975 vm_map_unlock(map);
976 return(KERN_PROTECTION_FAILURE);
977 }
978
979 current = current->next;
980 }
981
982 /*
983 * Go back and fix up protections.
984 * [Note that clipping is not necessary the second time.]
985 */
986
987 current = entry;
988
989 while ((current != &map->header) && (current->start < end)) {
990 vm_prot_t old_prot;
991
992 vm_map_clip_end(map, current, end);
993
994 old_prot = current->protection;
995 if (set_max)
996 current->protection =
997 (current->max_protection = new_prot) &
998 old_prot;
999 else
1000 current->protection = new_prot;
1001
1002 /*
1003 * Update physical map if necessary.
1004 * Worry about copy-on-write here -- CHECK THIS XXX
1005 */
1006
1007 if (current->protection != old_prot) {
1008
1009#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1010 VM_PROT_ALL)
1011#define max(a,b) ((a) > (b) ? (a) : (b))
1012
1013 if (current->is_a_map) {
1014 vm_map_entry_t share_entry;
1015 vm_offset_t share_end;
1016
1017 vm_map_lock(current->object.share_map);
1018 (void) vm_map_lookup_entry(
1019 current->object.share_map,
1020 current->offset,
1021 &share_entry);
1022 share_end = current->offset +
1023 (current->end - current->start);
1024 while ((share_entry !=
1025 &current->object.share_map->header) &&
1026 (share_entry->start < share_end)) {
1027
1028 pmap_protect(map->pmap,
1029 (max(share_entry->start,
1030 current->offset) -
1031 current->offset +
1032 current->start),
1033 min(share_entry->end,
1034 share_end) -
1035 current->offset +
1036 current->start,
1037 current->protection &
1038 MASK(share_entry));
1039
1040 share_entry = share_entry->next;
1041 }
1042 vm_map_unlock(current->object.share_map);
1043 }
1044 else
1045 pmap_protect(map->pmap, current->start,
1046 current->end,
1047 current->protection & MASK(entry));
1048#undef max
1049#undef MASK
1050 }
1051 current = current->next;
1052 }
1053
1054 vm_map_unlock(map);
1055 return(KERN_SUCCESS);
1056}
1057
1058/*
1059 * vm_map_inherit:
1060 *
1061 * Sets the inheritance of the specified address
1062 * range in the target map. Inheritance
1063 * affects how the map will be shared with
1064 * child maps at the time of vm_map_fork.
1065 */
1066vm_map_inherit(map, start, end, new_inheritance)
1067 register vm_map_t map;
1068 register vm_offset_t start;
1069 register vm_offset_t end;
1070 register vm_inherit_t new_inheritance;
1071{
1072 register vm_map_entry_t entry;
1073 vm_map_entry_t temp_entry;
1074
1075 switch (new_inheritance) {
1076 case VM_INHERIT_NONE:
1077 case VM_INHERIT_COPY:
1078 case VM_INHERIT_SHARE:
1079 break;
1080 default:
1081 return(KERN_INVALID_ARGUMENT);
1082 }
1083
1084 vm_map_lock(map);
1085
1086 VM_MAP_RANGE_CHECK(map, start, end);
1087
1088 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1089 entry = temp_entry;
1090 vm_map_clip_start(map, entry, start);
1091 }
1092 else
1093 entry = temp_entry->next;
1094
1095 while ((entry != &map->header) && (entry->start < end)) {
1096 vm_map_clip_end(map, entry, end);
1097
1098 entry->inheritance = new_inheritance;
1099
1100 entry = entry->next;
1101 }
1102
1103 vm_map_unlock(map);
1104 return(KERN_SUCCESS);
1105}
1106
1107/*
1108 * vm_map_pageable:
1109 *
1110 * Sets the pageability of the specified address
1111 * range in the target map. Regions specified
1112 * as not pageable require locked-down physical
1113 * memory and physical page maps.
1114 *
1115 * The map must not be locked, but a reference
1116 * must remain to the map throughout the call.
1117 */
1118vm_map_pageable(map, start, end, new_pageable)
1119 register vm_map_t map;
1120 register vm_offset_t start;
1121 register vm_offset_t end;
1122 register boolean_t new_pageable;
1123{
1124 register vm_map_entry_t entry;
1125 vm_map_entry_t temp_entry;
1126
1127 vm_map_lock(map);
1128
1129 VM_MAP_RANGE_CHECK(map, start, end);
1130
1131 /*
1132 * Only one pageability change may take place at one
1133 * time, since vm_fault assumes it will be called
1134 * only once for each wiring/unwiring. Therefore, we
1135 * have to make sure we're actually changing the pageability
1136 * for the entire region. We do so before making any changes.
1137 */
1138
1139 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1140 entry = temp_entry;
1141 vm_map_clip_start(map, entry, start);
1142 }
1143 else
1144 entry = temp_entry->next;
1145 temp_entry = entry;
1146
1147 /*
1148 * Actions are rather different for wiring and unwiring,
1149 * so we have two separate cases.
1150 */
1151
1152 if (new_pageable) {
1153
1154 /*
1155 * Unwiring. First ensure that the range to be
1156 * unwired is really wired down.
1157 */
1158 while ((entry != &map->header) && (entry->start < end)) {
1159
1160 if (entry->wired_count == 0) {
1161 vm_map_unlock(map);
1162 return(KERN_INVALID_ARGUMENT);
1163 }
1164 entry = entry->next;
1165 }
1166
1167 /*
1168 * Now decrement the wiring count for each region.
1169 * If a region becomes completely unwired,
1170 * unwire its physical pages and mappings.
1171 */
1172 lock_set_recursive(&map->lock);
1173
1174 entry = temp_entry;
1175 while ((entry != &map->header) && (entry->start < end)) {
1176 vm_map_clip_end(map, entry, end);
1177
1178 entry->wired_count--;
1179 if (entry->wired_count == 0)
1180 vm_fault_unwire(map, entry->start, entry->end);
1181
1182 entry = entry->next;
1183 }
1184 lock_clear_recursive(&map->lock);
1185 }
1186
1187 else {
1188 /*
1189 * Wiring. We must do this in two passes:
1190 *
1191 * 1. Holding the write lock, we increment the
1192 * wiring count. For any area that is not already
1193 * wired, we create any shadow objects that need
1194 * to be created.
1195 *
1196 * 2. We downgrade to a read lock, and call
1197 * vm_fault_wire to fault in the pages for any
1198 * newly wired area (wired_count is 1).
1199 *
1200 * Downgrading to a read lock for vm_fault_wire avoids
1201 * a possible deadlock with another thread that may have
1202 * faulted on one of the pages to be wired (it would mark
1203 * the page busy, blocking us, then in turn block on the
1204 * map lock that we hold). Because of problems in the
1205 * recursive lock package, we cannot upgrade to a write
1206 * lock in vm_map_lookup. Thus, any actions that require
1207 * the write lock must be done beforehand. Because we
1208 * keep the read lock on the map, the copy-on-write status
1209 * of the entries we modify here cannot change.
1210 */
1211
1212 /*
1213 * Pass 1.
1214 */
1215 entry = temp_entry;
1216 while ((entry != &map->header) && (entry->start < end)) {
1217 vm_map_clip_end(map, entry, end);
1218
1219 entry->wired_count++;
1220 if (entry->wired_count == 1) {
1221
1222 /*
1223 * Perform actions of vm_map_lookup that need
1224 * the write lock on the map: create a shadow
1225 * object for a copy-on-write region, or an
1226 * object for a zero-fill region.
1227 *
1228 * We don't have to do this for entries that
1229 * point to sharing maps, because we won't hold
1230 * the lock on the sharing map.
1231 */
1232 if (!entry->is_a_map) {
1233 if (entry->needs_copy &&
1234 ((entry->protection & VM_PROT_WRITE) != 0)) {
1235
1236 vm_object_shadow(&entry->object.vm_object,
1237 &entry->offset,
1238 (vm_size_t)(entry->end
1239 - entry->start));
1240 entry->needs_copy = FALSE;
1241 }
1242 else if (entry->object.vm_object == NULL) {
1243 entry->object.vm_object =
1244 vm_object_allocate((vm_size_t)(entry->end
1245 - entry->start));
1246 entry->offset = (vm_offset_t)0;
1247 }
1248 }
1249 }
1250
1251 entry = entry->next;
1252 }
1253
1254 /*
1255 * Pass 2.
1256 */
1257
1258 /*
1259 * HACK HACK HACK HACK
1260 *
1261 * If we are wiring in the kernel map or a submap of it,
1262 * unlock the map to avoid deadlocks. We trust that the
1263 * kernel threads are well-behaved, and therefore will
1264 * not do anything destructive to this region of the map
1265 * while we have it unlocked. We cannot trust user threads
1266 * to do the same.
1267 *
1268 * HACK HACK HACK HACK
1269 */
1270 if (vm_map_pmap(map) == kernel_pmap) {
1271 vm_map_unlock(map); /* trust me ... */
1272 }
1273 else {
1274 lock_set_recursive(&map->lock);
1275 lock_write_to_read(&map->lock);
1276 }
1277
1278 entry = temp_entry;
1279 while (entry != &map->header && entry->start < end) {
1280 if (entry->wired_count == 1) {
1281 vm_fault_wire(map, entry->start, entry->end);
1282 }
1283 entry = entry->next;
1284 }
1285
1286 if (vm_map_pmap(map) == kernel_pmap) {
1287 vm_map_lock(map);
1288 }
1289 else {
1290 lock_clear_recursive(&map->lock);
1291 }
1292 }
1293
1294 vm_map_unlock(map);
1295
1296 return(KERN_SUCCESS);
1297}
1298
1299/*
1300 * vm_map_entry_unwire: [ internal use only ]
1301 *
1302 * Make the region specified by this entry pageable.
1303 *
1304 * The map in question should be locked.
1305 * [This is the reason for this routine's existence.]
1306 */
1307void vm_map_entry_unwire(map, entry)
1308 vm_map_t map;
1309 register vm_map_entry_t entry;
1310{
1311 vm_fault_unwire(map, entry->start, entry->end);
1312 entry->wired_count = 0;
1313}
1314
1315/*
1316 * vm_map_entry_delete: [ internal use only ]
1317 *
1318 * Deallocate the given entry from the target map.
1319 */
1320void vm_map_entry_delete(map, entry)
1321 register vm_map_t map;
1322 register vm_map_entry_t entry;
1323{
1324 if (entry->wired_count != 0)
1325 vm_map_entry_unwire(map, entry);
1326
1327 vm_map_entry_unlink(map, entry);
1328 map->size -= entry->end - entry->start;
1329
1330 if (entry->is_a_map || entry->is_sub_map)
1331 vm_map_deallocate(entry->object.share_map);
1332 else
1333 vm_object_deallocate(entry->object.vm_object);
1334
1335 vm_map_entry_dispose(map, entry);
1336}
1337
1338/*
1339 * vm_map_delete: [ internal use only ]
1340 *
1341 * Deallocates the given address range from the target
1342 * map.
1343 *
1344 * When called with a sharing map, removes pages from
1345 * that region from all physical maps.
1346 */
1347vm_map_delete(map, start, end)
1348 register vm_map_t map;
1349 vm_offset_t start;
1350 register vm_offset_t end;
1351{
1352 register vm_map_entry_t entry;
1353 vm_map_entry_t first_entry;
1354
1355 /*
1356 * Find the start of the region, and clip it
1357 */
1358
1359 if (!vm_map_lookup_entry(map, start, &first_entry))
1360 entry = first_entry->next;
1361 else {
1362 entry = first_entry;
1363 vm_map_clip_start(map, entry, start);
1364
1365 /*
1366 * Fix the lookup hint now, rather than each
1367 * time though the loop.
1368 */
1369
1370 SAVE_HINT(map, entry->prev);
1371 }
1372
1373 /*
1374 * Save the free space hint
1375 */
1376
1377 if (map->first_free->start >= start)
1378 map->first_free = entry->prev;
1379
1380 /*
1381 * Step through all entries in this region
1382 */
1383
1384 while ((entry != &map->header) && (entry->start < end)) {
1385 vm_map_entry_t next;
1386 register vm_offset_t s, e;
1387 register vm_object_t object;
1388
1389 vm_map_clip_end(map, entry, end);
1390
1391 next = entry->next;
1392 s = entry->start;
1393 e = entry->end;
1394
1395 /*
1396 * Unwire before removing addresses from the pmap;
1397 * otherwise, unwiring will put the entries back in
1398 * the pmap.
1399 */
1400
1401 object = entry->object.vm_object;
1402 if (entry->wired_count != 0)
1403 vm_map_entry_unwire(map, entry);
1404
1405 /*
1406 * If this is a sharing map, we must remove
1407 * *all* references to this data, since we can't
1408 * find all of the physical maps which are sharing
1409 * it.
1410 */
1411
1412 if (object == kernel_object || object == kmem_object)
1413 vm_object_page_remove(object, entry->offset,
1414 entry->offset + (e - s));
1415 else if (!map->is_main_map)
1416 vm_object_pmap_remove(object,
1417 entry->offset,
1418 entry->offset + (e - s));
1419 else
1420 pmap_remove(map->pmap, s, e);
1421
1422 /*
1423 * Delete the entry (which may delete the object)
1424 * only after removing all pmap entries pointing
1425 * to its pages. (Otherwise, its page frames may
1426 * be reallocated, and any modify bits will be
1427 * set in the wrong object!)
1428 */
1429
1430 vm_map_entry_delete(map, entry);
1431 entry = next;
1432 }
1433 return(KERN_SUCCESS);
1434}
1435
1436/*
1437 * vm_map_remove:
1438 *
1439 * Remove the given address range from the target map.
1440 * This is the exported form of vm_map_delete.
1441 */
1442vm_map_remove(map, start, end)
1443 register vm_map_t map;
1444 register vm_offset_t start;
1445 register vm_offset_t end;
1446{
1447 register int result;
1448
1449 vm_map_lock(map);
1450 VM_MAP_RANGE_CHECK(map, start, end);
1451 result = vm_map_delete(map, start, end);
1452 vm_map_unlock(map);
1453
1454 return(result);
1455}
1456
1457/*
1458 * vm_map_check_protection:
1459 *
1460 * Assert that the target map allows the specified
1461 * privilege on the entire address region given.
1462 * The entire region must be allocated.
1463 */
1464boolean_t vm_map_check_protection(map, start, end, protection)
1465 register vm_map_t map;
1466 register vm_offset_t start;
1467 register vm_offset_t end;
1468 register vm_prot_t protection;
1469{
1470 register vm_map_entry_t entry;
1471 vm_map_entry_t tmp_entry;
1472
1473 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1474 return(FALSE);
1475 }
1476
1477 entry = tmp_entry;
1478
1479 while (start < end) {
1480 if (entry == &map->header) {
1481 return(FALSE);
1482 }
1483
1484 /*
1485 * No holes allowed!
1486 */
1487
1488 if (start < entry->start) {
1489 return(FALSE);
1490 }
1491
1492 /*
1493 * Check protection associated with entry.
1494 */
1495
1496 if ((entry->protection & protection) != protection) {
1497 return(FALSE);
1498 }
1499
1500 /* go to next entry */
1501
1502 start = entry->end;
1503 entry = entry->next;
1504 }
1505 return(TRUE);
1506}
1507
1508/*
1509 * vm_map_copy_entry:
1510 *
1511 * Copies the contents of the source entry to the destination
1512 * entry. The entries *must* be aligned properly.
1513 */
1514void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1515 vm_map_t src_map, dst_map;
1516 register vm_map_entry_t src_entry, dst_entry;
1517{
1518 vm_object_t temp_object;
1519
1520 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1521 return;
1522
1523 if (dst_entry->object.vm_object != NULL &&
1524 !dst_entry->object.vm_object->internal)
1525 printf("vm_map_copy_entry: copying over permanent data!\n");
1526
1527 /*
1528 * If our destination map was wired down,
1529 * unwire it now.
1530 */
1531
1532 if (dst_entry->wired_count != 0)
1533 vm_map_entry_unwire(dst_map, dst_entry);
1534
1535 /*
1536 * If we're dealing with a sharing map, we
1537 * must remove the destination pages from
1538 * all maps (since we cannot know which maps
1539 * this sharing map belongs in).
1540 */
1541
1542 if (dst_map->is_main_map)
1543 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1544 else
1545 vm_object_pmap_remove(dst_entry->object.vm_object,
1546 dst_entry->offset,
1547 dst_entry->offset +
1548 (dst_entry->end - dst_entry->start));
1549
1550 if (src_entry->wired_count == 0) {
1551
1552 boolean_t src_needs_copy;
1553
1554 /*
1555 * If the source entry is marked needs_copy,
1556 * it is already write-protected.
1557 */
1558 if (!src_entry->needs_copy) {
1559
1560 boolean_t su;
1561
1562 /*
1563 * If the source entry has only one mapping,
1564 * we can just protect the virtual address
1565 * range.
1566 */
1567 if (!(su = src_map->is_main_map)) {
1568 simple_lock(&src_map->ref_lock);
1569 su = (src_map->ref_count == 1);
1570 simple_unlock(&src_map->ref_lock);
1571 }
1572
1573 if (su) {
1574 pmap_protect(src_map->pmap,
1575 src_entry->start,
1576 src_entry->end,
1577 src_entry->protection & ~VM_PROT_WRITE);
1578 }
1579 else {
1580 vm_object_pmap_copy(src_entry->object.vm_object,
1581 src_entry->offset,
1582 src_entry->offset + (src_entry->end
1583 -src_entry->start));
1584 }
1585 }
1586
1587 /*
1588 * Make a copy of the object.
1589 */
1590 temp_object = dst_entry->object.vm_object;
1591 vm_object_copy(src_entry->object.vm_object,
1592 src_entry->offset,
1593 (vm_size_t)(src_entry->end -
1594 src_entry->start),
1595 &dst_entry->object.vm_object,
1596 &dst_entry->offset,
1597 &src_needs_copy);
1598 /*
1599 * If we didn't get a copy-object now, mark the
1600 * source map entry so that a shadow will be created
1601 * to hold its changed pages.
1602 */
1603 if (src_needs_copy)
1604 src_entry->needs_copy = TRUE;
1605
1606 /*
1607 * The destination always needs to have a shadow
1608 * created.
1609 */
1610 dst_entry->needs_copy = TRUE;
1611
1612 /*
1613 * Mark the entries copy-on-write, so that write-enabling
1614 * the entry won't make copy-on-write pages writable.
1615 */
1616 src_entry->copy_on_write = TRUE;
1617 dst_entry->copy_on_write = TRUE;
1618 /*
1619 * Get rid of the old object.
1620 */
1621 vm_object_deallocate(temp_object);
1622
1623 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1624 dst_entry->end - dst_entry->start, src_entry->start);
1625 }
1626 else {
1627 /*
1628 * Of course, wired down pages can't be set copy-on-write.
1629 * Cause wired pages to be copied into the new
1630 * map by simulating faults (the new pages are
1631 * pageable)
1632 */
1633 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1634 }
1635}
1636
1637/*
1638 * vm_map_copy:
1639 *
1640 * Perform a virtual memory copy from the source
1641 * address map/range to the destination map/range.
1642 *
1643 * If src_destroy or dst_alloc is requested,
1644 * the source and destination regions should be
1645 * disjoint, not only in the top-level map, but
1646 * in the sharing maps as well. [The best way
1647 * to guarantee this is to use a new intermediate
1648 * map to make copies. This also reduces map
1649 * fragmentation.]
1650 */
1651vm_map_copy(dst_map, src_map,
1652 dst_addr, len, src_addr,
1653 dst_alloc, src_destroy)
1654 vm_map_t dst_map;
1655 vm_map_t src_map;
1656 vm_offset_t dst_addr;
1657 vm_size_t len;
1658 vm_offset_t src_addr;
1659 boolean_t dst_alloc;
1660 boolean_t src_destroy;
1661{
1662 register
1663 vm_map_entry_t src_entry;
1664 register
1665 vm_map_entry_t dst_entry;
1666 vm_map_entry_t tmp_entry;
1667 vm_offset_t src_start;
1668 vm_offset_t src_end;
1669 vm_offset_t dst_start;
1670 vm_offset_t dst_end;
1671 vm_offset_t src_clip;
1672 vm_offset_t dst_clip;
1673 int result;
1674 boolean_t old_src_destroy;
1675
1676 /*
1677 * XXX While we figure out why src_destroy screws up,
1678 * we'll do it by explicitly vm_map_delete'ing at the end.
1679 */
1680
1681 old_src_destroy = src_destroy;
1682 src_destroy = FALSE;
1683
1684 /*
1685 * Compute start and end of region in both maps
1686 */
1687
1688 src_start = src_addr;
1689 src_end = src_start + len;
1690 dst_start = dst_addr;
1691 dst_end = dst_start + len;
1692
1693 /*
1694 * Check that the region can exist in both source
1695 * and destination.
1696 */
1697
1698 if ((dst_end < dst_start) || (src_end < src_start))
1699 return(KERN_NO_SPACE);
1700
1701 /*
1702 * Lock the maps in question -- we avoid deadlock
1703 * by ordering lock acquisition by map value
1704 */
1705
1706 if (src_map == dst_map) {
1707 vm_map_lock(src_map);
1708 }
1709 else if ((int) src_map < (int) dst_map) {
1710 vm_map_lock(src_map);
1711 vm_map_lock(dst_map);
1712 } else {
1713 vm_map_lock(dst_map);
1714 vm_map_lock(src_map);
1715 }
1716
1717 result = KERN_SUCCESS;
1718
1719 /*
1720 * Check protections... source must be completely readable and
1721 * destination must be completely writable. [Note that if we're
1722 * allocating the destination region, we don't have to worry
1723 * about protection, but instead about whether the region
1724 * exists.]
1725 */
1726
1727 if (src_map->is_main_map && dst_map->is_main_map) {
1728 if (!vm_map_check_protection(src_map, src_start, src_end,
1729 VM_PROT_READ)) {
1730 result = KERN_PROTECTION_FAILURE;
1731 goto Return;
1732 }
1733
1734 if (dst_alloc) {
1735 /* XXX Consider making this a vm_map_find instead */
1736 if ((result = vm_map_insert(dst_map, NULL,
1737 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1738 goto Return;
1739 }
1740 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1741 VM_PROT_WRITE)) {
1742 result = KERN_PROTECTION_FAILURE;
1743 goto Return;
1744 }
1745 }
1746
1747 /*
1748 * Find the start entries and clip.
1749 *
1750 * Note that checking protection asserts that the
1751 * lookup cannot fail.
1752 *
1753 * Also note that we wait to do the second lookup
1754 * until we have done the first clip, as the clip
1755 * may affect which entry we get!
1756 */
1757
1758 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1759 src_entry = tmp_entry;
1760 vm_map_clip_start(src_map, src_entry, src_start);
1761
1762 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1763 dst_entry = tmp_entry;
1764 vm_map_clip_start(dst_map, dst_entry, dst_start);
1765
1766 /*
1767 * If both source and destination entries are the same,
1768 * retry the first lookup, as it may have changed.
1769 */
1770
1771 if (src_entry == dst_entry) {
1772 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1773 src_entry = tmp_entry;
1774 }
1775
1776 /*
1777 * If source and destination entries are still the same,
1778 * a null copy is being performed.
1779 */
1780
1781 if (src_entry == dst_entry)
1782 goto Return;
1783
1784 /*
1785 * Go through entries until we get to the end of the
1786 * region.
1787 */
1788
1789 while (src_start < src_end) {
1790 /*
1791 * Clip the entries to the endpoint of the entire region.
1792 */
1793
1794 vm_map_clip_end(src_map, src_entry, src_end);
1795 vm_map_clip_end(dst_map, dst_entry, dst_end);
1796
1797 /*
1798 * Clip each entry to the endpoint of the other entry.
1799 */
1800
1801 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1802 vm_map_clip_end(src_map, src_entry, src_clip);
1803
1804 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1805 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1806
1807 /*
1808 * Both entries now match in size and relative endpoints.
1809 *
1810 * If both entries refer to a VM object, we can
1811 * deal with them now.
1812 */
1813
1814 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1815 vm_map_copy_entry(src_map, dst_map, src_entry,
1816 dst_entry);
1817 }
1818 else {
1819 register vm_map_t new_dst_map;
1820 vm_offset_t new_dst_start;
1821 vm_size_t new_size;
1822 vm_map_t new_src_map;
1823 vm_offset_t new_src_start;
1824
1825 /*
1826 * We have to follow at least one sharing map.
1827 */
1828
1829 new_size = (dst_entry->end - dst_entry->start);
1830
1831 if (src_entry->is_a_map) {
1832 new_src_map = src_entry->object.share_map;
1833 new_src_start = src_entry->offset;
1834 }
1835 else {
1836 new_src_map = src_map;
1837 new_src_start = src_entry->start;
1838 lock_set_recursive(&src_map->lock);
1839 }
1840
1841 if (dst_entry->is_a_map) {
1842 vm_offset_t new_dst_end;
1843
1844 new_dst_map = dst_entry->object.share_map;
1845 new_dst_start = dst_entry->offset;
1846
1847 /*
1848 * Since the destination sharing entries
1849 * will be merely deallocated, we can
1850 * do that now, and replace the region
1851 * with a null object. [This prevents
1852 * splitting the source map to match
1853 * the form of the destination map.]
1854 * Note that we can only do so if the
1855 * source and destination do not overlap.
1856 */
1857
1858 new_dst_end = new_dst_start + new_size;
1859
1860 if (new_dst_map != new_src_map) {
1861 vm_map_lock(new_dst_map);
1862 (void) vm_map_delete(new_dst_map,
1863 new_dst_start,
1864 new_dst_end);
1865 (void) vm_map_insert(new_dst_map,
1866 NULL,
1867 (vm_offset_t) 0,
1868 new_dst_start,
1869 new_dst_end);
1870 vm_map_unlock(new_dst_map);
1871 }
1872 }
1873 else {
1874 new_dst_map = dst_map;
1875 new_dst_start = dst_entry->start;
1876 lock_set_recursive(&dst_map->lock);
1877 }
1878
1879 /*
1880 * Recursively copy the sharing map.
1881 */
1882
1883 (void) vm_map_copy(new_dst_map, new_src_map,
1884 new_dst_start, new_size, new_src_start,
1885 FALSE, FALSE);
1886
1887 if (dst_map == new_dst_map)
1888 lock_clear_recursive(&dst_map->lock);
1889 if (src_map == new_src_map)
1890 lock_clear_recursive(&src_map->lock);
1891 }
1892
1893 /*
1894 * Update variables for next pass through the loop.
1895 */
1896
1897 src_start = src_entry->end;
1898 src_entry = src_entry->next;
1899 dst_start = dst_entry->end;
1900 dst_entry = dst_entry->next;
1901
1902 /*
1903 * If the source is to be destroyed, here is the
1904 * place to do it.
1905 */
1906
1907 if (src_destroy && src_map->is_main_map &&
1908 dst_map->is_main_map)
1909 vm_map_entry_delete(src_map, src_entry->prev);
1910 }
1911
1912 /*
1913 * Update the physical maps as appropriate
1914 */
1915
1916 if (src_map->is_main_map && dst_map->is_main_map) {
1917 if (src_destroy)
1918 pmap_remove(src_map->pmap, src_addr, src_addr + len);
1919 }
1920
1921 /*
1922 * Unlock the maps
1923 */
1924
1925 Return: ;
1926
1927 if (old_src_destroy)
1928 vm_map_delete(src_map, src_addr, src_addr + len);
1929
1930 vm_map_unlock(src_map);
1931 if (src_map != dst_map)
1932 vm_map_unlock(dst_map);
1933
1934 return(result);
1935}
1936
1937/*
1938 * vmspace_fork:
1939 * Create a new process vmspace structure and vm_map
1940 * based on those of an existing process. The new map
1941 * is based on the old map, according to the inheritance
1942 * values on the regions in that map.
1943 *
1944 * The source map must not be locked.
1945 */
1946struct vmspace *
1947vmspace_fork(vm1)
1948 register struct vmspace *vm1;
1949{
1950 register struct vmspace *vm2;
1951 vm_map_t old_map = &vm1->vm_map;
1952 vm_map_t new_map;
1953 vm_map_entry_t old_entry;
1954 vm_map_entry_t new_entry;
1955 pmap_t new_pmap;
1956
1957 vm_map_lock(old_map);
1958
1959 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1960 old_map->entries_pageable);
1961 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1962 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1963 new_pmap = &vm2->vm_pmap; /* XXX */
1964 new_map = &vm2->vm_map; /* XXX */
1965
1966 old_entry = old_map->header.next;
1967
1968 while (old_entry != &old_map->header) {
1969 if (old_entry->is_sub_map)
1970 panic("vm_map_fork: encountered a submap");
1971
1972 switch (old_entry->inheritance) {
1973 case VM_INHERIT_NONE:
1974 break;
1975
1976 case VM_INHERIT_SHARE:
1977 /*
1978 * If we don't already have a sharing map:
1979 */
1980
1981 if (!old_entry->is_a_map) {
1982 vm_map_t new_share_map;
1983 vm_map_entry_t new_share_entry;
1984
1985 /*
1986 * Create a new sharing map
1987 */
1988
1989 new_share_map = vm_map_create(NULL,
1990 old_entry->start,
1991 old_entry->end,
1992 TRUE);
1993 new_share_map->is_main_map = FALSE;
1994
1995 /*
1996 * Create the only sharing entry from the
1997 * old task map entry.
1998 */
1999
2000 new_share_entry =
2001 vm_map_entry_create(new_share_map);
2002 *new_share_entry = *old_entry;
2003
2004 /*
2005 * Insert the entry into the new sharing
2006 * map
2007 */
2008
2009 vm_map_entry_link(new_share_map,
2010 new_share_map->header.prev,
2011 new_share_entry);
2012
2013 /*
2014 * Fix up the task map entry to refer
2015 * to the sharing map now.
2016 */
2017
2018 old_entry->is_a_map = TRUE;
2019 old_entry->object.share_map = new_share_map;
2020 old_entry->offset = old_entry->start;
2021 }
2022
2023 /*
2024 * Clone the entry, referencing the sharing map.
2025 */
2026
2027 new_entry = vm_map_entry_create(new_map);
2028 *new_entry = *old_entry;
2029 vm_map_reference(new_entry->object.share_map);
2030
2031 /*
2032 * Insert the entry into the new map -- we
2033 * know we're inserting at the end of the new
2034 * map.
2035 */
2036
2037 vm_map_entry_link(new_map, new_map->header.prev,
2038 new_entry);
2039
2040 /*
2041 * Update the physical map
2042 */
2043
2044 pmap_copy(new_map->pmap, old_map->pmap,
2045 new_entry->start,
2046 (old_entry->end - old_entry->start),
2047 old_entry->start);
2048 break;
2049
2050 case VM_INHERIT_COPY:
2051 /*
2052 * Clone the entry and link into the map.
2053 */
2054
2055 new_entry = vm_map_entry_create(new_map);
2056 *new_entry = *old_entry;
2057 new_entry->wired_count = 0;
2058 new_entry->object.vm_object = NULL;
2059 new_entry->is_a_map = FALSE;
2060 vm_map_entry_link(new_map, new_map->header.prev,
2061 new_entry);
2062 if (old_entry->is_a_map) {
2063 int check;
2064
2065 check = vm_map_copy(new_map,
2066 old_entry->object.share_map,
2067 new_entry->start,
2068 (vm_size_t)(new_entry->end -
2069 new_entry->start),
2070 old_entry->offset,
2071 FALSE, FALSE);
2072 if (check != KERN_SUCCESS)
2073 printf("vm_map_fork: copy in share_map region failed\n");
2074 }
2075 else {
2076 vm_map_copy_entry(old_map, new_map, old_entry,
2077 new_entry);
2078 }
2079 break;
2080 }
2081 old_entry = old_entry->next;
2082 }
2083
2084 new_map->size = old_map->size;
2085 vm_map_unlock(old_map);
2086
2087 return(vm2);
2088}
2089
2090/*
2091 * vm_map_lookup:
2092 *
2093 * Finds the VM object, offset, and
2094 * protection for a given virtual address in the
2095 * specified map, assuming a page fault of the
2096 * type specified.
2097 *
2098 * Leaves the map in question locked for read; return
2099 * values are guaranteed until a vm_map_lookup_done
2100 * call is performed. Note that the map argument
2101 * is in/out; the returned map must be used in
2102 * the call to vm_map_lookup_done.
2103 *
2104 * A handle (out_entry) is returned for use in
2105 * vm_map_lookup_done, to make that fast.
2106 *
2107 * If a lookup is requested with "write protection"
2108 * specified, the map may be changed to perform virtual
2109 * copying operations, although the data referenced will
2110 * remain the same.
2111 */
2112vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2113 object, offset, out_prot, wired, single_use)
2114 vm_map_t *var_map; /* IN/OUT */
2115 register vm_offset_t vaddr;
2116 register vm_prot_t fault_type;
2117
2118 vm_map_entry_t *out_entry; /* OUT */
2119 vm_object_t *object; /* OUT */
2120 vm_offset_t *offset; /* OUT */
2121 vm_prot_t *out_prot; /* OUT */
2122 boolean_t *wired; /* OUT */
2123 boolean_t *single_use; /* OUT */
2124{
2125 vm_map_t share_map;
2126 vm_offset_t share_offset;
2127 register vm_map_entry_t entry;
2128 register vm_map_t map = *var_map;
2129 register vm_prot_t prot;
2130 register boolean_t su;
2131
2132 RetryLookup: ;
2133
2134 /*
2135 * Lookup the faulting address.
2136 */
2137
2138 vm_map_lock_read(map);
2139
2140#define RETURN(why) \
2141 { \
2142 vm_map_unlock_read(map); \
2143 return(why); \
2144 }
2145
2146 /*
2147 * If the map has an interesting hint, try it before calling
2148 * full blown lookup routine.
2149 */
2150
2151 simple_lock(&map->hint_lock);
2152 entry = map->hint;
2153 simple_unlock(&map->hint_lock);
2154
2155 *out_entry = entry;
2156
2157 if ((entry == &map->header) ||
2158 (vaddr < entry->start) || (vaddr >= entry->end)) {
2159 vm_map_entry_t tmp_entry;
2160
2161 /*
2162 * Entry was either not a valid hint, or the vaddr
2163 * was not contained in the entry, so do a full lookup.
2164 */
2165 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2166 RETURN(KERN_INVALID_ADDRESS);
2167
2168 entry = tmp_entry;
2169 *out_entry = entry;
2170 }
2171
2172 /*
2173 * Handle submaps.
2174 */
2175
2176 if (entry->is_sub_map) {
2177 vm_map_t old_map = map;
2178
2179 *var_map = map = entry->object.sub_map;
2180 vm_map_unlock_read(old_map);
2181 goto RetryLookup;
2182 }
2183
2184 /*
2185 * Check whether this task is allowed to have
2186 * this page.
2187 */
2188
2189 prot = entry->protection;
2190 if ((fault_type & (prot)) != fault_type)
2191 RETURN(KERN_PROTECTION_FAILURE);
2192
2193 /*
2194 * If this page is not pageable, we have to get
2195 * it for all possible accesses.
2196 */
2197
2198 if (*wired = (entry->wired_count != 0))
2199 prot = fault_type = entry->protection;
2200
2201 /*
2202 * If we don't already have a VM object, track
2203 * it down.
2204 */
2205
2206 if (su = !entry->is_a_map) {
2207 share_map = map;
2208 share_offset = vaddr;
2209 }
2210 else {
2211 vm_map_entry_t share_entry;
2212
2213 /*
2214 * Compute the sharing map, and offset into it.
2215 */
2216
2217 share_map = entry->object.share_map;
2218 share_offset = (vaddr - entry->start) + entry->offset;
2219
2220 /*
2221 * Look for the backing store object and offset
2222 */
2223
2224 vm_map_lock_read(share_map);
2225
2226 if (!vm_map_lookup_entry(share_map, share_offset,
2227 &share_entry)) {
2228 vm_map_unlock_read(share_map);
2229 RETURN(KERN_INVALID_ADDRESS);
2230 }
2231 entry = share_entry;
2232 }
2233
2234 /*
2235 * If the entry was copy-on-write, we either ...
2236 */
2237
2238 if (entry->needs_copy) {
2239 /*
2240 * If we want to write the page, we may as well
2241 * handle that now since we've got the sharing
2242 * map locked.
2243 *
2244 * If we don't need to write the page, we just
2245 * demote the permissions allowed.
2246 */
2247
2248 if (fault_type & VM_PROT_WRITE) {
2249 /*
2250 * Make a new object, and place it in the
2251 * object chain. Note that no new references
2252 * have appeared -- one just moved from the
2253 * share map to the new object.
2254 */
2255
2256 if (lock_read_to_write(&share_map->lock)) {
2257 if (share_map != map)
2258 vm_map_unlock_read(map);
2259 goto RetryLookup;
2260 }
2261
2262 vm_object_shadow(
2263 &entry->object.vm_object,
2264 &entry->offset,
2265 (vm_size_t) (entry->end - entry->start));
2266
2267 entry->needs_copy = FALSE;
2268
2269 lock_write_to_read(&share_map->lock);
2270 }
2271 else {
2272 /*
2273 * We're attempting to read a copy-on-write
2274 * page -- don't allow writes.
2275 */
2276
2277 prot &= (~VM_PROT_WRITE);
2278 }
2279 }
2280
2281 /*
2282 * Create an object if necessary.
2283 */
2284 if (entry->object.vm_object == NULL) {
2285
2286 if (lock_read_to_write(&share_map->lock)) {
2287 if (share_map != map)
2288 vm_map_unlock_read(map);
2289 goto RetryLookup;
2290 }
2291
2292 entry->object.vm_object = vm_object_allocate(
2293 (vm_size_t)(entry->end - entry->start));
2294 entry->offset = 0;
2295 lock_write_to_read(&share_map->lock);
2296 }
2297
2298 /*
2299 * Return the object/offset from this entry. If the entry
2300 * was copy-on-write or empty, it has been fixed up.
2301 */
2302
2303 *offset = (share_offset - entry->start) + entry->offset;
2304 *object = entry->object.vm_object;
2305
2306 /*
2307 * Return whether this is the only map sharing this data.
2308 */
2309
2310 if (!su) {
2311 simple_lock(&share_map->ref_lock);
2312 su = (share_map->ref_count == 1);
2313 simple_unlock(&share_map->ref_lock);
2314 }
2315
2316 *out_prot = prot;
2317 *single_use = su;
2318
2319 return(KERN_SUCCESS);
2320
2321#undef RETURN
2322}
2323
2324/*
2325 * vm_map_lookup_done:
2326 *
2327 * Releases locks acquired by a vm_map_lookup
2328 * (according to the handle returned by that lookup).
2329 */
2330
2331void vm_map_lookup_done(map, entry)
2332 register vm_map_t map;
2333 vm_map_entry_t entry;
2334{
2335 /*
2336 * If this entry references a map, unlock it first.
2337 */
2338
2339 if (entry->is_a_map)
2340 vm_map_unlock_read(entry->object.share_map);
2341
2342 /*
2343 * Unlock the main-level map
2344 */
2345
2346 vm_map_unlock_read(map);
2347}
2348
2349/*
2350 * Routine: vm_map_simplify
2351 * Purpose:
2352 * Attempt to simplify the map representation in
2353 * the vicinity of the given starting address.
2354 * Note:
2355 * This routine is intended primarily to keep the
2356 * kernel maps more compact -- they generally don't
2357 * benefit from the "expand a map entry" technology
2358 * at allocation time because the adjacent entry
2359 * is often wired down.
2360 */
2361void vm_map_simplify(map, start)
2362 vm_map_t map;
2363 vm_offset_t start;
2364{
2365 vm_map_entry_t this_entry;
2366 vm_map_entry_t prev_entry;
2367
2368 vm_map_lock(map);
2369 if (
2370 (vm_map_lookup_entry(map, start, &this_entry)) &&
2371 ((prev_entry = this_entry->prev) != &map->header) &&
2372
2373 (prev_entry->end == start) &&
2374 (map->is_main_map) &&
2375
2376 (prev_entry->is_a_map == FALSE) &&
2377 (prev_entry->is_sub_map == FALSE) &&
2378
2379 (this_entry->is_a_map == FALSE) &&
2380 (this_entry->is_sub_map == FALSE) &&
2381
2382 (prev_entry->inheritance == this_entry->inheritance) &&
2383 (prev_entry->protection == this_entry->protection) &&
2384 (prev_entry->max_protection == this_entry->max_protection) &&
2385 (prev_entry->wired_count == this_entry->wired_count) &&
2386
2387 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2388 (prev_entry->needs_copy == this_entry->needs_copy) &&
2389
2390 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2391 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2392 == this_entry->offset)
2393 ) {
2394 if (map->first_free == this_entry)
2395 map->first_free = prev_entry;
2396
2397 SAVE_HINT(map, prev_entry);
2398 vm_map_entry_unlink(map, this_entry);
2399 prev_entry->end = this_entry->end;
2400 vm_object_deallocate(this_entry->object.vm_object);
2401 vm_map_entry_dispose(map, this_entry);
2402 }
2403 vm_map_unlock(map);
2404}
2405
2406/*
2407 * vm_map_print: [ debug ]
2408 */
2409void vm_map_print(map, full)
2410 register vm_map_t map;
2411 boolean_t full;
2412{
2413 register vm_map_entry_t entry;
2414 extern int indent;
2415
2416 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2417 (map->is_main_map ? "Task" : "Share"),
2418 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2419 map->timestamp);
2420
2421 if (!full && indent)
2422 return;
2423
2424 indent += 2;
2425 for (entry = map->header.next; entry != &map->header;
2426 entry = entry->next) {
2427 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2428 (int) entry, (int) entry->start, (int) entry->end);
2429 if (map->is_main_map) {
2430 static char *inheritance_name[4] =
2431 { "share", "copy", "none", "donate_copy"};
2432 printf("prot=%x/%x/%s, ",
2433 entry->protection,
2434 entry->max_protection,
2435 inheritance_name[entry->inheritance]);
2436 if (entry->wired_count != 0)
2437 printf("wired, ");
2438 }
2439
2440 if (entry->is_a_map || entry->is_sub_map) {
2441 printf("share=0x%x, offset=0x%x\n",
2442 (int) entry->object.share_map,
2443 (int) entry->offset);
2444 if ((entry->prev == &map->header) ||
2445 (!entry->prev->is_a_map) ||
2446 (entry->prev->object.share_map !=
2447 entry->object.share_map)) {
2448 indent += 2;
2449 vm_map_print(entry->object.share_map, full);
2450 indent -= 2;
2451 }
2452
2453 }
2454 else {
2455 printf("object=0x%x, offset=0x%x",
2456 (int) entry->object.vm_object,
2457 (int) entry->offset);
2458 if (entry->copy_on_write)
2459 printf(", copy (%s)",
2460 entry->needs_copy ? "needed" : "done");
2461 printf("\n");
2462
2463 if ((entry->prev == &map->header) ||
2464 (entry->prev->is_a_map) ||
2465 (entry->prev->object.vm_object !=
2466 entry->object.vm_object)) {
2467 indent += 2;
2468 vm_object_print(entry->object.vm_object, full);
2469 indent -= 2;
2470 }
2471 }
2472 }
2473 indent -= 2;
2474}