new include format; lint; function prototypes
[unix-history] / usr / src / sys / vm / vm_map.c
CommitLineData
175f072e 1/*
175f072e
KM
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
0e24ad83 8 * %sccs.include.redist.c%
175f072e 9 *
73506ff8 10 * @(#)vm_map.c 7.6 (Berkeley) %G%
0e24ad83
KM
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
175f072e
KM
37 */
38
39/*
40 * Virtual memory mapping module.
41 */
42
73506ff8
KB
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/malloc.h>
46
47#include <vm/vm.h>
48#include <vm/vm_page.h>
49#include <vm/vm_object.h>
175f072e
KM
50
51/*
52 * Virtual memory maps provide for the mapping, protection,
53 * and sharing of virtual memory objects. In addition,
54 * this module provides for an efficient virtual copy of
55 * memory from one map to another.
56 *
57 * Synchronization is required prior to most operations.
58 *
59 * Maps consist of an ordered doubly-linked list of simple
60 * entries; a single hint is used to speed up lookups.
61 *
62 * In order to properly represent the sharing of virtual
63 * memory regions among maps, the map structure is bi-level.
64 * Top-level ("address") maps refer to regions of sharable
65 * virtual memory. These regions are implemented as
66 * ("sharing") maps, which then refer to the actual virtual
67 * memory objects. When two address maps "share" memory,
68 * their top-level maps both have references to the same
69 * sharing map. When memory is virtual-copied from one
70 * address map to another, the references in the sharing
71 * maps are actually copied -- no copying occurs at the
72 * virtual memory object level.
73 *
74 * Since portions of maps are specified by start/end addreses,
75 * which may not align with existing map entries, all
76 * routines merely "clip" entries to these start/end values.
77 * [That is, an entry is split into two, bordering at a
78 * start or end value.] Note that these clippings may not
79 * always be necessary (as the two resulting entries are then
80 * not changed); however, the clipping is done for convenience.
81 * No attempt is currently made to "glue back together" two
82 * abutting entries.
83 *
84 * As mentioned above, virtual copy operations are performed
85 * by copying VM object references from one sharing map to
86 * another, and then marking both regions as copy-on-write.
87 * It is important to note that only one writeable reference
88 * to a VM object region exists in any map -- this means that
89 * shadow object creation can be delayed until a write operation
90 * occurs.
91 */
92
93/*
5d7b9ad3 94 * vm_map_startup:
175f072e
KM
95 *
96 * Initialize the vm_map module. Must be called before
97 * any other vm_map routines.
98 *
99 * Map and entry structures are allocated from the general
100 * purpose memory pool with some exceptions:
101 *
102 * - The kernel map and kmem submap are allocated statically.
103 * - Kernel map entries are allocated out of a static pool.
104 *
105 * These restrictions are necessary since malloc() uses the
106 * maps and requires map entries.
107 */
108
109vm_offset_t kentry_data;
110vm_size_t kentry_data_size;
111vm_map_entry_t kentry_free;
112vm_map_t kmap_free;
113
73506ff8
KB
114static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
115static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
116
5d7b9ad3 117void vm_map_startup()
175f072e
KM
118{
119 register int i;
120 register vm_map_entry_t mep;
121 vm_map_t mp;
122
123 /*
124 * Static map structures for allocation before initialization of
125 * kernel map or kmem map. vm_map_create knows how to deal with them.
126 */
127 kmap_free = mp = (vm_map_t) kentry_data;
128 i = MAX_KMAP;
129 while (--i > 0) {
130 mp->header.next = (vm_map_entry_t) (mp + 1);
131 mp++;
132 }
5d7b9ad3 133 mp++->header.next = NULL;
175f072e
KM
134
135 /*
136 * Form a free list of statically allocated kernel map entries
137 * with the rest.
138 */
139 kentry_free = mep = (vm_map_entry_t) mp;
140 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
141 while (--i > 0) {
142 mep->next = mep + 1;
143 mep++;
144 }
5d7b9ad3
MK
145 mep->next = NULL;
146}
147
148/*
149 * Allocate a vmspace structure, including a vm_map and pmap,
150 * and initialize those structures. The refcnt is set to 1.
151 * The remaining fields must be initialized by the caller.
152 */
153struct vmspace *
154vmspace_alloc(min, max, pageable)
155 vm_offset_t min, max;
156 int pageable;
157{
158 register struct vmspace *vm;
159
160 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
161 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
162 vm_map_init(&vm->vm_map, min, max, pageable);
163 pmap_pinit(&vm->vm_pmap);
164 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
165 vm->vm_refcnt = 1;
166 return (vm);
167}
168
169void
170vmspace_free(vm)
171 register struct vmspace *vm;
172{
173
174 if (--vm->vm_refcnt == 0) {
175 /*
176 * Lock the map, to wait out all other references to it.
177 * Delete all of the mappings and pages they hold,
178 * then call the pmap module to reclaim anything left.
179 */
180 vm_map_lock(&vm->vm_map);
181 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
182 vm->vm_map.max_offset);
183 pmap_release(&vm->vm_pmap);
184 FREE(vm, M_VMMAP);
185 }
175f072e
KM
186}
187
188/*
189 * vm_map_create:
190 *
191 * Creates and returns a new empty VM map with
192 * the given physical map structure, and having
193 * the given lower and upper address bounds.
194 */
195vm_map_t vm_map_create(pmap, min, max, pageable)
196 pmap_t pmap;
197 vm_offset_t min, max;
198 boolean_t pageable;
199{
200 register vm_map_t result;
201 extern vm_map_t kernel_map, kmem_map;
202
5d7b9ad3 203 if (kmem_map == NULL) {
175f072e
KM
204 result = kmap_free;
205 kmap_free = (vm_map_t) result->header.next;
5d7b9ad3
MK
206 if (result == NULL)
207 panic("vm_map_create: out of maps");
175f072e
KM
208 } else
209 MALLOC(result, vm_map_t, sizeof(struct vm_map),
210 M_VMMAP, M_WAITOK);
211
5d7b9ad3 212 vm_map_init(result, min, max, pageable);
175f072e 213 result->pmap = pmap;
175f072e
KM
214 return(result);
215}
216
5d7b9ad3
MK
217/*
218 * Initialize an existing vm_map structure
219 * such as that in the vmspace structure.
220 * The pmap is set elsewhere.
221 */
222void
223vm_map_init(map, min, max, pageable)
224 register struct vm_map *map;
225 vm_offset_t min, max;
226 boolean_t pageable;
227{
228 map->header.next = map->header.prev = &map->header;
229 map->nentries = 0;
230 map->size = 0;
231 map->ref_count = 1;
232 map->is_main_map = TRUE;
233 map->min_offset = min;
234 map->max_offset = max;
235 map->entries_pageable = pageable;
236 map->first_free = &map->header;
237 map->hint = &map->header;
238 map->timestamp = 0;
239 lock_init(&map->lock, TRUE);
240 simple_lock_init(&map->ref_lock);
241 simple_lock_init(&map->hint_lock);
242}
243
175f072e
KM
244/*
245 * vm_map_entry_create: [ internal use only ]
246 *
247 * Allocates a VM map entry for insertion.
248 * No entry fields are filled in. This routine is
249 */
250vm_map_entry_t vm_map_entry_create(map)
251 vm_map_t map;
252{
253 vm_map_entry_t entry;
254 extern vm_map_t kernel_map, kmem_map, mb_map;
255
256 if (map == kernel_map || map == kmem_map || map == mb_map) {
257 if (entry = kentry_free)
258 kentry_free = kentry_free->next;
259 } else
260 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
261 M_VMMAPENT, M_WAITOK);
5d7b9ad3 262 if (entry == NULL)
175f072e
KM
263 panic("vm_map_entry_create: out of map entries");
264
265 return(entry);
266}
267
268/*
269 * vm_map_entry_dispose: [ internal use only ]
270 *
271 * Inverse of vm_map_entry_create.
272 */
273void vm_map_entry_dispose(map, entry)
274 vm_map_t map;
275 vm_map_entry_t entry;
276{
277 extern vm_map_t kernel_map, kmem_map, mb_map;
278
279 if (map == kernel_map || map == kmem_map || map == mb_map) {
280 entry->next = kentry_free;
281 kentry_free = entry;
282 } else
283 FREE(entry, M_VMMAPENT);
284}
285
286/*
287 * vm_map_entry_{un,}link:
288 *
289 * Insert/remove entries from maps.
290 */
291#define vm_map_entry_link(map, after_where, entry) \
292 { \
293 (map)->nentries++; \
294 (entry)->prev = (after_where); \
295 (entry)->next = (after_where)->next; \
296 (entry)->prev->next = (entry); \
297 (entry)->next->prev = (entry); \
298 }
299#define vm_map_entry_unlink(map, entry) \
300 { \
301 (map)->nentries--; \
302 (entry)->next->prev = (entry)->prev; \
303 (entry)->prev->next = (entry)->next; \
304 }
305
306/*
307 * vm_map_reference:
308 *
309 * Creates another valid reference to the given map.
310 *
311 */
312void vm_map_reference(map)
313 register vm_map_t map;
314{
5d7b9ad3 315 if (map == NULL)
175f072e
KM
316 return;
317
318 simple_lock(&map->ref_lock);
319 map->ref_count++;
320 simple_unlock(&map->ref_lock);
321}
322
323/*
324 * vm_map_deallocate:
325 *
326 * Removes a reference from the specified map,
327 * destroying it if no references remain.
328 * The map should not be locked.
329 */
330void vm_map_deallocate(map)
331 register vm_map_t map;
332{
333 register int c;
334
5d7b9ad3 335 if (map == NULL)
175f072e
KM
336 return;
337
338 simple_lock(&map->ref_lock);
339 c = --map->ref_count;
340 simple_unlock(&map->ref_lock);
341
342 if (c > 0) {
343 return;
344 }
345
346 /*
347 * Lock the map, to wait out all other references
348 * to it.
349 */
350
351 vm_map_lock(map);
352
353 (void) vm_map_delete(map, map->min_offset, map->max_offset);
354
355 pmap_destroy(map->pmap);
356
357 FREE(map, M_VMMAP);
358}
359
360/*
361 * vm_map_insert: [ internal use only ]
362 *
363 * Inserts the given whole VM object into the target
364 * map at the specified address range. The object's
365 * size should match that of the address range.
366 *
367 * Requires that the map be locked, and leaves it so.
368 */
73506ff8 369int
175f072e
KM
370vm_map_insert(map, object, offset, start, end)
371 vm_map_t map;
372 vm_object_t object;
373 vm_offset_t offset;
374 vm_offset_t start;
375 vm_offset_t end;
376{
377 register vm_map_entry_t new_entry;
378 register vm_map_entry_t prev_entry;
379 vm_map_entry_t temp_entry;
380
381 /*
382 * Check that the start and end points are not bogus.
383 */
384
385 if ((start < map->min_offset) || (end > map->max_offset) ||
386 (start >= end))
387 return(KERN_INVALID_ADDRESS);
388
389 /*
390 * Find the entry prior to the proposed
391 * starting address; if it's part of an
392 * existing entry, this range is bogus.
393 */
394
395 if (vm_map_lookup_entry(map, start, &temp_entry))
396 return(KERN_NO_SPACE);
397
398 prev_entry = temp_entry;
399
400 /*
401 * Assert that the next entry doesn't overlap the
402 * end point.
403 */
404
405 if ((prev_entry->next != &map->header) &&
406 (prev_entry->next->start < end))
407 return(KERN_NO_SPACE);
408
409 /*
410 * See if we can avoid creating a new entry by
411 * extending one of our neighbors.
412 */
413
5d7b9ad3 414 if (object == NULL) {
175f072e
KM
415 if ((prev_entry != &map->header) &&
416 (prev_entry->end == start) &&
417 (map->is_main_map) &&
418 (prev_entry->is_a_map == FALSE) &&
419 (prev_entry->is_sub_map == FALSE) &&
420 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
421 (prev_entry->protection == VM_PROT_DEFAULT) &&
422 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
423 (prev_entry->wired_count == 0)) {
424
425 if (vm_object_coalesce(prev_entry->object.vm_object,
5d7b9ad3 426 NULL,
175f072e
KM
427 prev_entry->offset,
428 (vm_offset_t) 0,
429 (vm_size_t)(prev_entry->end
430 - prev_entry->start),
431 (vm_size_t)(end - prev_entry->end))) {
432 /*
433 * Coalesced the two objects - can extend
434 * the previous map entry to include the
435 * new range.
436 */
437 map->size += (end - prev_entry->end);
438 prev_entry->end = end;
439 return(KERN_SUCCESS);
440 }
441 }
442 }
443
444 /*
445 * Create a new entry
446 */
447
448 new_entry = vm_map_entry_create(map);
449 new_entry->start = start;
450 new_entry->end = end;
451
452 new_entry->is_a_map = FALSE;
453 new_entry->is_sub_map = FALSE;
454 new_entry->object.vm_object = object;
455 new_entry->offset = offset;
456
457 new_entry->copy_on_write = FALSE;
458 new_entry->needs_copy = FALSE;
459
460 if (map->is_main_map) {
461 new_entry->inheritance = VM_INHERIT_DEFAULT;
462 new_entry->protection = VM_PROT_DEFAULT;
463 new_entry->max_protection = VM_PROT_DEFAULT;
464 new_entry->wired_count = 0;
465 }
466
467 /*
468 * Insert the new entry into the list
469 */
470
471 vm_map_entry_link(map, prev_entry, new_entry);
472 map->size += new_entry->end - new_entry->start;
473
474 /*
475 * Update the free space hint
476 */
477
478 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
479 map->first_free = new_entry;
480
481 return(KERN_SUCCESS);
482}
483
484/*
485 * SAVE_HINT:
486 *
487 * Saves the specified entry as the hint for
488 * future lookups. Performs necessary interlocks.
489 */
490#define SAVE_HINT(map,value) \
491 simple_lock(&(map)->hint_lock); \
492 (map)->hint = (value); \
493 simple_unlock(&(map)->hint_lock);
494
495/*
496 * vm_map_lookup_entry: [ internal use only ]
497 *
498 * Finds the map entry containing (or
499 * immediately preceding) the specified address
500 * in the given map; the entry is returned
501 * in the "entry" parameter. The boolean
502 * result indicates whether the address is
503 * actually contained in the map.
504 */
505boolean_t vm_map_lookup_entry(map, address, entry)
506 register vm_map_t map;
507 register vm_offset_t address;
508 vm_map_entry_t *entry; /* OUT */
509{
510 register vm_map_entry_t cur;
511 register vm_map_entry_t last;
512
513 /*
514 * Start looking either from the head of the
515 * list, or from the hint.
516 */
517
518 simple_lock(&map->hint_lock);
519 cur = map->hint;
520 simple_unlock(&map->hint_lock);
521
522 if (cur == &map->header)
523 cur = cur->next;
524
525 if (address >= cur->start) {
526 /*
527 * Go from hint to end of list.
528 *
529 * But first, make a quick check to see if
530 * we are already looking at the entry we
531 * want (which is usually the case).
532 * Note also that we don't need to save the hint
533 * here... it is the same hint (unless we are
534 * at the header, in which case the hint didn't
535 * buy us anything anyway).
536 */
537 last = &map->header;
538 if ((cur != last) && (cur->end > address)) {
539 *entry = cur;
540 return(TRUE);
541 }
542 }
543 else {
544 /*
545 * Go from start to hint, *inclusively*
546 */
547 last = cur->next;
548 cur = map->header.next;
549 }
550
551 /*
552 * Search linearly
553 */
554
555 while (cur != last) {
556 if (cur->end > address) {
557 if (address >= cur->start) {
558 /*
559 * Save this lookup for future
560 * hints, and return
561 */
562
563 *entry = cur;
564 SAVE_HINT(map, cur);
565 return(TRUE);
566 }
567 break;
568 }
569 cur = cur->next;
570 }
571 *entry = cur->prev;
572 SAVE_HINT(map, *entry);
573 return(FALSE);
574}
575
fc8007a4
CT
576/*
577 * Find sufficient space for `length' bytes in the given map, starting at
578 * `start'. The map must be locked. Returns 0 on success, 1 on no space.
579 */
580int
581vm_map_findspace(map, start, length, addr)
582 register vm_map_t map;
583 register vm_offset_t start;
584 vm_size_t length;
585 vm_offset_t *addr;
586{
587 register vm_map_entry_t entry, next;
588 register vm_offset_t end;
589
590 if (start < map->min_offset)
591 start = map->min_offset;
592 if (start > map->max_offset)
593 return (1);
594
595 /*
596 * Look for the first possible address; if there's already
597 * something at this address, we have to start after it.
598 */
599 if (start == map->min_offset) {
600 if ((entry = map->first_free) != &map->header)
601 start = entry->end;
602 } else {
603 vm_map_entry_t tmp;
604 if (vm_map_lookup_entry(map, start, &tmp))
605 start = tmp->end;
606 entry = tmp;
607 }
608
609 /*
610 * Look through the rest of the map, trying to fit a new region in
611 * the gap between existing regions, or after the very last region.
612 */
613 for (;; start = (entry = next)->end) {
614 /*
615 * Find the end of the proposed new region. Be sure we didn't
616 * go beyond the end of the map, or wrap around the address;
617 * if so, we lose. Otherwise, if this is the last entry, or
618 * if the proposed new region fits before the next entry, we
619 * win.
620 */
621 end = start + length;
622 if (end > map->max_offset || end < start)
623 return (1);
624 next = entry->next;
625 if (next == &map->header || next->start >= end)
626 break;
627 }
628 SAVE_HINT(map, entry);
629 *addr = start;
630 return (0);
631}
632
175f072e
KM
633/*
634 * vm_map_find finds an unallocated region in the target address
635 * map with the given length. The search is defined to be
636 * first-fit from the specified address; the region found is
637 * returned in the same parameter.
638 *
639 */
73506ff8 640int
175f072e
KM
641vm_map_find(map, object, offset, addr, length, find_space)
642 vm_map_t map;
643 vm_object_t object;
644 vm_offset_t offset;
645 vm_offset_t *addr; /* IN/OUT */
646 vm_size_t length;
647 boolean_t find_space;
648{
175f072e 649 register vm_offset_t start;
175f072e
KM
650 int result;
651
652 start = *addr;
175f072e 653 vm_map_lock(map);
175f072e 654 if (find_space) {
fc8007a4 655 if (vm_map_findspace(map, start, length, addr)) {
175f072e
KM
656 vm_map_unlock(map);
657 return (KERN_NO_SPACE);
658 }
fc8007a4 659 start = *addr;
175f072e 660 }
175f072e 661 result = vm_map_insert(map, object, offset, start, start + length);
175f072e 662 vm_map_unlock(map);
fc8007a4 663 return (result);
175f072e
KM
664}
665
666/*
667 * vm_map_simplify_entry: [ internal use only ]
668 *
669 * Simplify the given map entry by:
670 * removing extra sharing maps
671 * [XXX maybe later] merging with a neighbor
672 */
673void vm_map_simplify_entry(map, entry)
674 vm_map_t map;
675 vm_map_entry_t entry;
676{
677#ifdef lint
678 map++;
679#endif lint
680
681 /*
682 * If this entry corresponds to a sharing map, then
683 * see if we can remove the level of indirection.
684 * If it's not a sharing map, then it points to
685 * a VM object, so see if we can merge with either
686 * of our neighbors.
687 */
688
689 if (entry->is_sub_map)
690 return;
691 if (entry->is_a_map) {
692#if 0
693 vm_map_t my_share_map;
694 int count;
695
696 my_share_map = entry->object.share_map;
697 simple_lock(&my_share_map->ref_lock);
698 count = my_share_map->ref_count;
699 simple_unlock(&my_share_map->ref_lock);
700
701 if (count == 1) {
702 /* Can move the region from
703 * entry->start to entry->end (+ entry->offset)
704 * in my_share_map into place of entry.
705 * Later.
706 */
707 }
708#endif 0
709 }
710 else {
711 /*
712 * Try to merge with our neighbors.
713 *
714 * Conditions for merge are:
715 *
716 * 1. entries are adjacent.
717 * 2. both entries point to objects
718 * with null pagers.
719 *
720 * If a merge is possible, we replace the two
721 * entries with a single entry, then merge
722 * the two objects into a single object.
723 *
724 * Now, all that is left to do is write the
725 * code!
726 */
727 }
728}
729
730/*
731 * vm_map_clip_start: [ internal use only ]
732 *
733 * Asserts that the given entry begins at or after
734 * the specified address; if necessary,
735 * it splits the entry into two.
736 */
737#define vm_map_clip_start(map, entry, startaddr) \
738{ \
739 if (startaddr > entry->start) \
740 _vm_map_clip_start(map, entry, startaddr); \
741}
742
743/*
744 * This routine is called only when it is known that
745 * the entry must be split.
746 */
73506ff8 747static void _vm_map_clip_start(map, entry, start)
175f072e
KM
748 register vm_map_t map;
749 register vm_map_entry_t entry;
750 register vm_offset_t start;
751{
752 register vm_map_entry_t new_entry;
753
754 /*
755 * See if we can simplify this entry first
756 */
757
758 vm_map_simplify_entry(map, entry);
759
760 /*
761 * Split off the front portion --
762 * note that we must insert the new
763 * entry BEFORE this one, so that
764 * this entry has the specified starting
765 * address.
766 */
767
768 new_entry = vm_map_entry_create(map);
769 *new_entry = *entry;
770
771 new_entry->end = start;
772 entry->offset += (start - entry->start);
773 entry->start = start;
774
775 vm_map_entry_link(map, entry->prev, new_entry);
776
777 if (entry->is_a_map || entry->is_sub_map)
778 vm_map_reference(new_entry->object.share_map);
779 else
780 vm_object_reference(new_entry->object.vm_object);
781}
782
783/*
784 * vm_map_clip_end: [ internal use only ]
785 *
786 * Asserts that the given entry ends at or before
787 * the specified address; if necessary,
788 * it splits the entry into two.
789 */
790
175f072e
KM
791#define vm_map_clip_end(map, entry, endaddr) \
792{ \
793 if (endaddr < entry->end) \
794 _vm_map_clip_end(map, entry, endaddr); \
795}
796
797/*
798 * This routine is called only when it is known that
799 * the entry must be split.
800 */
73506ff8 801static void _vm_map_clip_end(map, entry, end)
175f072e
KM
802 register vm_map_t map;
803 register vm_map_entry_t entry;
804 register vm_offset_t end;
805{
806 register vm_map_entry_t new_entry;
807
808 /*
809 * Create a new entry and insert it
810 * AFTER the specified entry
811 */
812
813 new_entry = vm_map_entry_create(map);
814 *new_entry = *entry;
815
816 new_entry->start = entry->end = end;
817 new_entry->offset += (end - entry->start);
818
819 vm_map_entry_link(map, entry, new_entry);
820
821 if (entry->is_a_map || entry->is_sub_map)
822 vm_map_reference(new_entry->object.share_map);
823 else
824 vm_object_reference(new_entry->object.vm_object);
825}
826
827/*
828 * VM_MAP_RANGE_CHECK: [ internal use only ]
829 *
830 * Asserts that the starting and ending region
831 * addresses fall within the valid range of the map.
832 */
833#define VM_MAP_RANGE_CHECK(map, start, end) \
834 { \
835 if (start < vm_map_min(map)) \
836 start = vm_map_min(map); \
837 if (end > vm_map_max(map)) \
838 end = vm_map_max(map); \
839 if (start > end) \
840 start = end; \
841 }
842
843/*
844 * vm_map_submap: [ kernel use only ]
845 *
846 * Mark the given range as handled by a subordinate map.
847 *
848 * This range must have been created with vm_map_find,
849 * and no other operations may have been performed on this
850 * range prior to calling vm_map_submap.
851 *
852 * Only a limited number of operations can be performed
853 * within this rage after calling vm_map_submap:
854 * vm_fault
855 * [Don't try vm_map_copy!]
856 *
857 * To remove a submapping, one must first remove the
858 * range from the superior map, and then destroy the
859 * submap (if desired). [Better yet, don't try it.]
860 */
73506ff8 861int
175f072e
KM
862vm_map_submap(map, start, end, submap)
863 register vm_map_t map;
864 register vm_offset_t start;
865 register vm_offset_t end;
866 vm_map_t submap;
867{
868 vm_map_entry_t entry;
869 register int result = KERN_INVALID_ARGUMENT;
870
871 vm_map_lock(map);
872
873 VM_MAP_RANGE_CHECK(map, start, end);
874
875 if (vm_map_lookup_entry(map, start, &entry)) {
876 vm_map_clip_start(map, entry, start);
877 }
878 else
879 entry = entry->next;
880
881 vm_map_clip_end(map, entry, end);
882
883 if ((entry->start == start) && (entry->end == end) &&
884 (!entry->is_a_map) &&
5d7b9ad3 885 (entry->object.vm_object == NULL) &&
175f072e
KM
886 (!entry->copy_on_write)) {
887 entry->is_a_map = FALSE;
888 entry->is_sub_map = TRUE;
889 vm_map_reference(entry->object.sub_map = submap);
890 result = KERN_SUCCESS;
891 }
892 vm_map_unlock(map);
893
894 return(result);
895}
896
897/*
898 * vm_map_protect:
899 *
900 * Sets the protection of the specified address
901 * region in the target map. If "set_max" is
902 * specified, the maximum protection is to be set;
903 * otherwise, only the current protection is affected.
904 */
73506ff8 905int
175f072e
KM
906vm_map_protect(map, start, end, new_prot, set_max)
907 register vm_map_t map;
908 register vm_offset_t start;
909 register vm_offset_t end;
910 register vm_prot_t new_prot;
911 register boolean_t set_max;
912{
913 register vm_map_entry_t current;
914 vm_map_entry_t entry;
915
916 vm_map_lock(map);
917
918 VM_MAP_RANGE_CHECK(map, start, end);
919
920 if (vm_map_lookup_entry(map, start, &entry)) {
921 vm_map_clip_start(map, entry, start);
922 }
923 else
924 entry = entry->next;
925
926 /*
927 * Make a first pass to check for protection
928 * violations.
929 */
930
931 current = entry;
932 while ((current != &map->header) && (current->start < end)) {
933 if (current->is_sub_map)
934 return(KERN_INVALID_ARGUMENT);
935 if ((new_prot & current->max_protection) != new_prot) {
936 vm_map_unlock(map);
937 return(KERN_PROTECTION_FAILURE);
938 }
939
940 current = current->next;
941 }
942
943 /*
944 * Go back and fix up protections.
945 * [Note that clipping is not necessary the second time.]
946 */
947
948 current = entry;
949
950 while ((current != &map->header) && (current->start < end)) {
951 vm_prot_t old_prot;
952
953 vm_map_clip_end(map, current, end);
954
955 old_prot = current->protection;
956 if (set_max)
957 current->protection =
958 (current->max_protection = new_prot) &
959 old_prot;
960 else
961 current->protection = new_prot;
962
963 /*
964 * Update physical map if necessary.
965 * Worry about copy-on-write here -- CHECK THIS XXX
966 */
967
968 if (current->protection != old_prot) {
969
970#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
971 VM_PROT_ALL)
972#define max(a,b) ((a) > (b) ? (a) : (b))
973
974 if (current->is_a_map) {
975 vm_map_entry_t share_entry;
976 vm_offset_t share_end;
977
978 vm_map_lock(current->object.share_map);
979 (void) vm_map_lookup_entry(
980 current->object.share_map,
981 current->offset,
982 &share_entry);
983 share_end = current->offset +
984 (current->end - current->start);
985 while ((share_entry !=
986 &current->object.share_map->header) &&
987 (share_entry->start < share_end)) {
988
989 pmap_protect(map->pmap,
990 (max(share_entry->start,
991 current->offset) -
992 current->offset +
993 current->start),
994 min(share_entry->end,
995 share_end) -
996 current->offset +
997 current->start,
998 current->protection &
999 MASK(share_entry));
1000
1001 share_entry = share_entry->next;
1002 }
1003 vm_map_unlock(current->object.share_map);
1004 }
1005 else
1006 pmap_protect(map->pmap, current->start,
1007 current->end,
1008 current->protection & MASK(entry));
1009#undef max
1010#undef MASK
1011 }
1012 current = current->next;
1013 }
1014
1015 vm_map_unlock(map);
1016 return(KERN_SUCCESS);
1017}
1018
1019/*
1020 * vm_map_inherit:
1021 *
1022 * Sets the inheritance of the specified address
1023 * range in the target map. Inheritance
1024 * affects how the map will be shared with
1025 * child maps at the time of vm_map_fork.
1026 */
73506ff8 1027int
175f072e
KM
1028vm_map_inherit(map, start, end, new_inheritance)
1029 register vm_map_t map;
1030 register vm_offset_t start;
1031 register vm_offset_t end;
1032 register vm_inherit_t new_inheritance;
1033{
1034 register vm_map_entry_t entry;
1035 vm_map_entry_t temp_entry;
1036
1037 switch (new_inheritance) {
1038 case VM_INHERIT_NONE:
1039 case VM_INHERIT_COPY:
1040 case VM_INHERIT_SHARE:
1041 break;
1042 default:
1043 return(KERN_INVALID_ARGUMENT);
1044 }
1045
1046 vm_map_lock(map);
1047
1048 VM_MAP_RANGE_CHECK(map, start, end);
1049
1050 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1051 entry = temp_entry;
1052 vm_map_clip_start(map, entry, start);
1053 }
1054 else
1055 entry = temp_entry->next;
1056
1057 while ((entry != &map->header) && (entry->start < end)) {
1058 vm_map_clip_end(map, entry, end);
1059
1060 entry->inheritance = new_inheritance;
1061
1062 entry = entry->next;
1063 }
1064
1065 vm_map_unlock(map);
1066 return(KERN_SUCCESS);
1067}
1068
1069/*
1070 * vm_map_pageable:
1071 *
1072 * Sets the pageability of the specified address
1073 * range in the target map. Regions specified
1074 * as not pageable require locked-down physical
1075 * memory and physical page maps.
1076 *
1077 * The map must not be locked, but a reference
1078 * must remain to the map throughout the call.
1079 */
73506ff8 1080int
175f072e
KM
1081vm_map_pageable(map, start, end, new_pageable)
1082 register vm_map_t map;
1083 register vm_offset_t start;
1084 register vm_offset_t end;
1085 register boolean_t new_pageable;
1086{
1087 register vm_map_entry_t entry;
1088 vm_map_entry_t temp_entry;
1089
1090 vm_map_lock(map);
1091
1092 VM_MAP_RANGE_CHECK(map, start, end);
1093
1094 /*
1095 * Only one pageability change may take place at one
1096 * time, since vm_fault assumes it will be called
1097 * only once for each wiring/unwiring. Therefore, we
1098 * have to make sure we're actually changing the pageability
1099 * for the entire region. We do so before making any changes.
1100 */
1101
1102 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1103 entry = temp_entry;
1104 vm_map_clip_start(map, entry, start);
1105 }
1106 else
1107 entry = temp_entry->next;
1108 temp_entry = entry;
1109
1110 /*
1111 * Actions are rather different for wiring and unwiring,
1112 * so we have two separate cases.
1113 */
1114
1115 if (new_pageable) {
1116
1117 /*
1118 * Unwiring. First ensure that the range to be
1119 * unwired is really wired down.
1120 */
1121 while ((entry != &map->header) && (entry->start < end)) {
1122
1123 if (entry->wired_count == 0) {
1124 vm_map_unlock(map);
1125 return(KERN_INVALID_ARGUMENT);
1126 }
1127 entry = entry->next;
1128 }
1129
1130 /*
1131 * Now decrement the wiring count for each region.
1132 * If a region becomes completely unwired,
1133 * unwire its physical pages and mappings.
1134 */
1135 lock_set_recursive(&map->lock);
1136
1137 entry = temp_entry;
1138 while ((entry != &map->header) && (entry->start < end)) {
1139 vm_map_clip_end(map, entry, end);
1140
1141 entry->wired_count--;
1142 if (entry->wired_count == 0)
1143 vm_fault_unwire(map, entry->start, entry->end);
1144
1145 entry = entry->next;
1146 }
1147 lock_clear_recursive(&map->lock);
1148 }
1149
1150 else {
1151 /*
1152 * Wiring. We must do this in two passes:
1153 *
1154 * 1. Holding the write lock, we increment the
1155 * wiring count. For any area that is not already
1156 * wired, we create any shadow objects that need
1157 * to be created.
1158 *
1159 * 2. We downgrade to a read lock, and call
1160 * vm_fault_wire to fault in the pages for any
1161 * newly wired area (wired_count is 1).
1162 *
1163 * Downgrading to a read lock for vm_fault_wire avoids
1164 * a possible deadlock with another thread that may have
1165 * faulted on one of the pages to be wired (it would mark
1166 * the page busy, blocking us, then in turn block on the
1167 * map lock that we hold). Because of problems in the
1168 * recursive lock package, we cannot upgrade to a write
1169 * lock in vm_map_lookup. Thus, any actions that require
1170 * the write lock must be done beforehand. Because we
1171 * keep the read lock on the map, the copy-on-write status
1172 * of the entries we modify here cannot change.
1173 */
1174
1175 /*
1176 * Pass 1.
1177 */
1178 entry = temp_entry;
1179 while ((entry != &map->header) && (entry->start < end)) {
1180 vm_map_clip_end(map, entry, end);
1181
1182 entry->wired_count++;
1183 if (entry->wired_count == 1) {
1184
1185 /*
1186 * Perform actions of vm_map_lookup that need
1187 * the write lock on the map: create a shadow
1188 * object for a copy-on-write region, or an
1189 * object for a zero-fill region.
1190 *
1191 * We don't have to do this for entries that
1192 * point to sharing maps, because we won't hold
1193 * the lock on the sharing map.
1194 */
1195 if (!entry->is_a_map) {
1196 if (entry->needs_copy &&
1197 ((entry->protection & VM_PROT_WRITE) != 0)) {
1198
1199 vm_object_shadow(&entry->object.vm_object,
1200 &entry->offset,
1201 (vm_size_t)(entry->end
1202 - entry->start));
1203 entry->needs_copy = FALSE;
1204 }
5d7b9ad3 1205 else if (entry->object.vm_object == NULL) {
175f072e
KM
1206 entry->object.vm_object =
1207 vm_object_allocate((vm_size_t)(entry->end
1208 - entry->start));
1209 entry->offset = (vm_offset_t)0;
1210 }
1211 }
1212 }
1213
1214 entry = entry->next;
1215 }
1216
1217 /*
1218 * Pass 2.
1219 */
1220
1221 /*
1222 * HACK HACK HACK HACK
1223 *
1224 * If we are wiring in the kernel map or a submap of it,
1225 * unlock the map to avoid deadlocks. We trust that the
1226 * kernel threads are well-behaved, and therefore will
1227 * not do anything destructive to this region of the map
1228 * while we have it unlocked. We cannot trust user threads
1229 * to do the same.
1230 *
1231 * HACK HACK HACK HACK
1232 */
1233 if (vm_map_pmap(map) == kernel_pmap) {
1234 vm_map_unlock(map); /* trust me ... */
1235 }
1236 else {
1237 lock_set_recursive(&map->lock);
1238 lock_write_to_read(&map->lock);
1239 }
1240
1241 entry = temp_entry;
1242 while (entry != &map->header && entry->start < end) {
1243 if (entry->wired_count == 1) {
1244 vm_fault_wire(map, entry->start, entry->end);
1245 }
1246 entry = entry->next;
1247 }
1248
1249 if (vm_map_pmap(map) == kernel_pmap) {
1250 vm_map_lock(map);
1251 }
1252 else {
1253 lock_clear_recursive(&map->lock);
1254 }
1255 }
1256
1257 vm_map_unlock(map);
1258
1259 return(KERN_SUCCESS);
1260}
1261
1262/*
1263 * vm_map_entry_unwire: [ internal use only ]
1264 *
1265 * Make the region specified by this entry pageable.
1266 *
1267 * The map in question should be locked.
1268 * [This is the reason for this routine's existence.]
1269 */
1270void vm_map_entry_unwire(map, entry)
1271 vm_map_t map;
1272 register vm_map_entry_t entry;
1273{
1274 vm_fault_unwire(map, entry->start, entry->end);
1275 entry->wired_count = 0;
1276}
1277
1278/*
1279 * vm_map_entry_delete: [ internal use only ]
1280 *
1281 * Deallocate the given entry from the target map.
1282 */
1283void vm_map_entry_delete(map, entry)
1284 register vm_map_t map;
1285 register vm_map_entry_t entry;
1286{
1287 if (entry->wired_count != 0)
1288 vm_map_entry_unwire(map, entry);
1289
1290 vm_map_entry_unlink(map, entry);
1291 map->size -= entry->end - entry->start;
1292
1293 if (entry->is_a_map || entry->is_sub_map)
1294 vm_map_deallocate(entry->object.share_map);
1295 else
1296 vm_object_deallocate(entry->object.vm_object);
1297
1298 vm_map_entry_dispose(map, entry);
1299}
1300
1301/*
1302 * vm_map_delete: [ internal use only ]
1303 *
1304 * Deallocates the given address range from the target
1305 * map.
1306 *
1307 * When called with a sharing map, removes pages from
1308 * that region from all physical maps.
1309 */
73506ff8 1310int
175f072e
KM
1311vm_map_delete(map, start, end)
1312 register vm_map_t map;
1313 vm_offset_t start;
1314 register vm_offset_t end;
1315{
1316 register vm_map_entry_t entry;
1317 vm_map_entry_t first_entry;
1318
1319 /*
1320 * Find the start of the region, and clip it
1321 */
1322
1323 if (!vm_map_lookup_entry(map, start, &first_entry))
1324 entry = first_entry->next;
1325 else {
1326 entry = first_entry;
1327 vm_map_clip_start(map, entry, start);
1328
1329 /*
1330 * Fix the lookup hint now, rather than each
1331 * time though the loop.
1332 */
1333
1334 SAVE_HINT(map, entry->prev);
1335 }
1336
1337 /*
1338 * Save the free space hint
1339 */
1340
1341 if (map->first_free->start >= start)
1342 map->first_free = entry->prev;
1343
1344 /*
1345 * Step through all entries in this region
1346 */
1347
1348 while ((entry != &map->header) && (entry->start < end)) {
1349 vm_map_entry_t next;
1350 register vm_offset_t s, e;
1351 register vm_object_t object;
1352
1353 vm_map_clip_end(map, entry, end);
1354
1355 next = entry->next;
1356 s = entry->start;
1357 e = entry->end;
1358
1359 /*
1360 * Unwire before removing addresses from the pmap;
1361 * otherwise, unwiring will put the entries back in
1362 * the pmap.
1363 */
1364
1365 object = entry->object.vm_object;
1366 if (entry->wired_count != 0)
1367 vm_map_entry_unwire(map, entry);
1368
1369 /*
1370 * If this is a sharing map, we must remove
1371 * *all* references to this data, since we can't
1372 * find all of the physical maps which are sharing
1373 * it.
1374 */
1375
1376 if (object == kernel_object || object == kmem_object)
1377 vm_object_page_remove(object, entry->offset,
1378 entry->offset + (e - s));
1379 else if (!map->is_main_map)
1380 vm_object_pmap_remove(object,
1381 entry->offset,
1382 entry->offset + (e - s));
1383 else
1384 pmap_remove(map->pmap, s, e);
1385
1386 /*
1387 * Delete the entry (which may delete the object)
1388 * only after removing all pmap entries pointing
1389 * to its pages. (Otherwise, its page frames may
1390 * be reallocated, and any modify bits will be
1391 * set in the wrong object!)
1392 */
1393
1394 vm_map_entry_delete(map, entry);
1395 entry = next;
1396 }
1397 return(KERN_SUCCESS);
1398}
1399
1400/*
1401 * vm_map_remove:
1402 *
1403 * Remove the given address range from the target map.
1404 * This is the exported form of vm_map_delete.
1405 */
73506ff8 1406int
175f072e
KM
1407vm_map_remove(map, start, end)
1408 register vm_map_t map;
1409 register vm_offset_t start;
1410 register vm_offset_t end;
1411{
1412 register int result;
1413
1414 vm_map_lock(map);
1415 VM_MAP_RANGE_CHECK(map, start, end);
1416 result = vm_map_delete(map, start, end);
1417 vm_map_unlock(map);
1418
1419 return(result);
1420}
1421
1422/*
1423 * vm_map_check_protection:
1424 *
1425 * Assert that the target map allows the specified
1426 * privilege on the entire address region given.
1427 * The entire region must be allocated.
1428 */
1429boolean_t vm_map_check_protection(map, start, end, protection)
1430 register vm_map_t map;
1431 register vm_offset_t start;
1432 register vm_offset_t end;
1433 register vm_prot_t protection;
1434{
1435 register vm_map_entry_t entry;
1436 vm_map_entry_t tmp_entry;
1437
1438 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1439 return(FALSE);
1440 }
1441
1442 entry = tmp_entry;
1443
1444 while (start < end) {
1445 if (entry == &map->header) {
1446 return(FALSE);
1447 }
1448
1449 /*
1450 * No holes allowed!
1451 */
1452
1453 if (start < entry->start) {
1454 return(FALSE);
1455 }
1456
1457 /*
1458 * Check protection associated with entry.
1459 */
1460
1461 if ((entry->protection & protection) != protection) {
1462 return(FALSE);
1463 }
1464
1465 /* go to next entry */
1466
1467 start = entry->end;
1468 entry = entry->next;
1469 }
1470 return(TRUE);
1471}
1472
1473/*
1474 * vm_map_copy_entry:
1475 *
1476 * Copies the contents of the source entry to the destination
1477 * entry. The entries *must* be aligned properly.
1478 */
1479void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1480 vm_map_t src_map, dst_map;
1481 register vm_map_entry_t src_entry, dst_entry;
1482{
1483 vm_object_t temp_object;
1484
1485 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1486 return;
1487
5d7b9ad3 1488 if (dst_entry->object.vm_object != NULL &&
224765a4 1489 (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)
175f072e
KM
1490 printf("vm_map_copy_entry: copying over permanent data!\n");
1491
1492 /*
1493 * If our destination map was wired down,
1494 * unwire it now.
1495 */
1496
1497 if (dst_entry->wired_count != 0)
1498 vm_map_entry_unwire(dst_map, dst_entry);
1499
1500 /*
1501 * If we're dealing with a sharing map, we
1502 * must remove the destination pages from
1503 * all maps (since we cannot know which maps
1504 * this sharing map belongs in).
1505 */
1506
1507 if (dst_map->is_main_map)
1508 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1509 else
1510 vm_object_pmap_remove(dst_entry->object.vm_object,
1511 dst_entry->offset,
1512 dst_entry->offset +
1513 (dst_entry->end - dst_entry->start));
1514
1515 if (src_entry->wired_count == 0) {
1516
1517 boolean_t src_needs_copy;
1518
1519 /*
1520 * If the source entry is marked needs_copy,
1521 * it is already write-protected.
1522 */
1523 if (!src_entry->needs_copy) {
1524
1525 boolean_t su;
1526
1527 /*
1528 * If the source entry has only one mapping,
1529 * we can just protect the virtual address
1530 * range.
1531 */
1532 if (!(su = src_map->is_main_map)) {
1533 simple_lock(&src_map->ref_lock);
1534 su = (src_map->ref_count == 1);
1535 simple_unlock(&src_map->ref_lock);
1536 }
1537
1538 if (su) {
1539 pmap_protect(src_map->pmap,
1540 src_entry->start,
1541 src_entry->end,
1542 src_entry->protection & ~VM_PROT_WRITE);
1543 }
1544 else {
1545 vm_object_pmap_copy(src_entry->object.vm_object,
1546 src_entry->offset,
1547 src_entry->offset + (src_entry->end
1548 -src_entry->start));
1549 }
1550 }
1551
1552 /*
1553 * Make a copy of the object.
1554 */
1555 temp_object = dst_entry->object.vm_object;
1556 vm_object_copy(src_entry->object.vm_object,
1557 src_entry->offset,
1558 (vm_size_t)(src_entry->end -
1559 src_entry->start),
1560 &dst_entry->object.vm_object,
1561 &dst_entry->offset,
1562 &src_needs_copy);
1563 /*
1564 * If we didn't get a copy-object now, mark the
1565 * source map entry so that a shadow will be created
1566 * to hold its changed pages.
1567 */
1568 if (src_needs_copy)
1569 src_entry->needs_copy = TRUE;
1570
1571 /*
1572 * The destination always needs to have a shadow
1573 * created.
1574 */
1575 dst_entry->needs_copy = TRUE;
1576
1577 /*
1578 * Mark the entries copy-on-write, so that write-enabling
1579 * the entry won't make copy-on-write pages writable.
1580 */
1581 src_entry->copy_on_write = TRUE;
1582 dst_entry->copy_on_write = TRUE;
1583 /*
1584 * Get rid of the old object.
1585 */
1586 vm_object_deallocate(temp_object);
1587
1588 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1589 dst_entry->end - dst_entry->start, src_entry->start);
1590 }
1591 else {
1592 /*
1593 * Of course, wired down pages can't be set copy-on-write.
1594 * Cause wired pages to be copied into the new
1595 * map by simulating faults (the new pages are
1596 * pageable)
1597 */
1598 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1599 }
1600}
1601
1602/*
1603 * vm_map_copy:
1604 *
1605 * Perform a virtual memory copy from the source
1606 * address map/range to the destination map/range.
1607 *
1608 * If src_destroy or dst_alloc is requested,
1609 * the source and destination regions should be
1610 * disjoint, not only in the top-level map, but
1611 * in the sharing maps as well. [The best way
1612 * to guarantee this is to use a new intermediate
1613 * map to make copies. This also reduces map
1614 * fragmentation.]
1615 */
73506ff8 1616int
175f072e
KM
1617vm_map_copy(dst_map, src_map,
1618 dst_addr, len, src_addr,
1619 dst_alloc, src_destroy)
1620 vm_map_t dst_map;
1621 vm_map_t src_map;
1622 vm_offset_t dst_addr;
1623 vm_size_t len;
1624 vm_offset_t src_addr;
1625 boolean_t dst_alloc;
1626 boolean_t src_destroy;
1627{
1628 register
1629 vm_map_entry_t src_entry;
1630 register
1631 vm_map_entry_t dst_entry;
1632 vm_map_entry_t tmp_entry;
1633 vm_offset_t src_start;
1634 vm_offset_t src_end;
1635 vm_offset_t dst_start;
1636 vm_offset_t dst_end;
1637 vm_offset_t src_clip;
1638 vm_offset_t dst_clip;
1639 int result;
1640 boolean_t old_src_destroy;
1641
1642 /*
1643 * XXX While we figure out why src_destroy screws up,
1644 * we'll do it by explicitly vm_map_delete'ing at the end.
1645 */
1646
1647 old_src_destroy = src_destroy;
1648 src_destroy = FALSE;
1649
1650 /*
1651 * Compute start and end of region in both maps
1652 */
1653
1654 src_start = src_addr;
1655 src_end = src_start + len;
1656 dst_start = dst_addr;
1657 dst_end = dst_start + len;
1658
1659 /*
1660 * Check that the region can exist in both source
1661 * and destination.
1662 */
1663
1664 if ((dst_end < dst_start) || (src_end < src_start))
1665 return(KERN_NO_SPACE);
1666
1667 /*
1668 * Lock the maps in question -- we avoid deadlock
1669 * by ordering lock acquisition by map value
1670 */
1671
1672 if (src_map == dst_map) {
1673 vm_map_lock(src_map);
1674 }
1675 else if ((int) src_map < (int) dst_map) {
1676 vm_map_lock(src_map);
1677 vm_map_lock(dst_map);
1678 } else {
1679 vm_map_lock(dst_map);
1680 vm_map_lock(src_map);
1681 }
1682
1683 result = KERN_SUCCESS;
1684
1685 /*
1686 * Check protections... source must be completely readable and
1687 * destination must be completely writable. [Note that if we're
1688 * allocating the destination region, we don't have to worry
1689 * about protection, but instead about whether the region
1690 * exists.]
1691 */
1692
1693 if (src_map->is_main_map && dst_map->is_main_map) {
1694 if (!vm_map_check_protection(src_map, src_start, src_end,
1695 VM_PROT_READ)) {
1696 result = KERN_PROTECTION_FAILURE;
1697 goto Return;
1698 }
1699
1700 if (dst_alloc) {
1701 /* XXX Consider making this a vm_map_find instead */
5d7b9ad3 1702 if ((result = vm_map_insert(dst_map, NULL,
175f072e
KM
1703 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1704 goto Return;
1705 }
1706 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1707 VM_PROT_WRITE)) {
1708 result = KERN_PROTECTION_FAILURE;
1709 goto Return;
1710 }
1711 }
1712
1713 /*
1714 * Find the start entries and clip.
1715 *
1716 * Note that checking protection asserts that the
1717 * lookup cannot fail.
1718 *
1719 * Also note that we wait to do the second lookup
1720 * until we have done the first clip, as the clip
1721 * may affect which entry we get!
1722 */
1723
1724 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1725 src_entry = tmp_entry;
1726 vm_map_clip_start(src_map, src_entry, src_start);
1727
1728 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1729 dst_entry = tmp_entry;
1730 vm_map_clip_start(dst_map, dst_entry, dst_start);
1731
1732 /*
1733 * If both source and destination entries are the same,
1734 * retry the first lookup, as it may have changed.
1735 */
1736
1737 if (src_entry == dst_entry) {
1738 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1739 src_entry = tmp_entry;
1740 }
1741
1742 /*
1743 * If source and destination entries are still the same,
1744 * a null copy is being performed.
1745 */
1746
1747 if (src_entry == dst_entry)
1748 goto Return;
1749
1750 /*
1751 * Go through entries until we get to the end of the
1752 * region.
1753 */
1754
1755 while (src_start < src_end) {
1756 /*
1757 * Clip the entries to the endpoint of the entire region.
1758 */
1759
1760 vm_map_clip_end(src_map, src_entry, src_end);
1761 vm_map_clip_end(dst_map, dst_entry, dst_end);
1762
1763 /*
1764 * Clip each entry to the endpoint of the other entry.
1765 */
1766
1767 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1768 vm_map_clip_end(src_map, src_entry, src_clip);
1769
1770 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1771 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1772
1773 /*
1774 * Both entries now match in size and relative endpoints.
1775 *
1776 * If both entries refer to a VM object, we can
1777 * deal with them now.
1778 */
1779
1780 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1781 vm_map_copy_entry(src_map, dst_map, src_entry,
1782 dst_entry);
1783 }
1784 else {
1785 register vm_map_t new_dst_map;
1786 vm_offset_t new_dst_start;
1787 vm_size_t new_size;
1788 vm_map_t new_src_map;
1789 vm_offset_t new_src_start;
1790
1791 /*
1792 * We have to follow at least one sharing map.
1793 */
1794
1795 new_size = (dst_entry->end - dst_entry->start);
1796
1797 if (src_entry->is_a_map) {
1798 new_src_map = src_entry->object.share_map;
1799 new_src_start = src_entry->offset;
1800 }
1801 else {
1802 new_src_map = src_map;
1803 new_src_start = src_entry->start;
1804 lock_set_recursive(&src_map->lock);
1805 }
1806
1807 if (dst_entry->is_a_map) {
1808 vm_offset_t new_dst_end;
1809
1810 new_dst_map = dst_entry->object.share_map;
1811 new_dst_start = dst_entry->offset;
1812
1813 /*
1814 * Since the destination sharing entries
1815 * will be merely deallocated, we can
1816 * do that now, and replace the region
1817 * with a null object. [This prevents
1818 * splitting the source map to match
1819 * the form of the destination map.]
1820 * Note that we can only do so if the
1821 * source and destination do not overlap.
1822 */
1823
1824 new_dst_end = new_dst_start + new_size;
1825
1826 if (new_dst_map != new_src_map) {
1827 vm_map_lock(new_dst_map);
1828 (void) vm_map_delete(new_dst_map,
1829 new_dst_start,
1830 new_dst_end);
1831 (void) vm_map_insert(new_dst_map,
5d7b9ad3 1832 NULL,
175f072e
KM
1833 (vm_offset_t) 0,
1834 new_dst_start,
1835 new_dst_end);
1836 vm_map_unlock(new_dst_map);
1837 }
1838 }
1839 else {
1840 new_dst_map = dst_map;
1841 new_dst_start = dst_entry->start;
1842 lock_set_recursive(&dst_map->lock);
1843 }
1844
1845 /*
1846 * Recursively copy the sharing map.
1847 */
1848
1849 (void) vm_map_copy(new_dst_map, new_src_map,
1850 new_dst_start, new_size, new_src_start,
1851 FALSE, FALSE);
1852
1853 if (dst_map == new_dst_map)
1854 lock_clear_recursive(&dst_map->lock);
1855 if (src_map == new_src_map)
1856 lock_clear_recursive(&src_map->lock);
1857 }
1858
1859 /*
1860 * Update variables for next pass through the loop.
1861 */
1862
1863 src_start = src_entry->end;
1864 src_entry = src_entry->next;
1865 dst_start = dst_entry->end;
1866 dst_entry = dst_entry->next;
1867
1868 /*
1869 * If the source is to be destroyed, here is the
1870 * place to do it.
1871 */
1872
1873 if (src_destroy && src_map->is_main_map &&
1874 dst_map->is_main_map)
1875 vm_map_entry_delete(src_map, src_entry->prev);
1876 }
1877
1878 /*
1879 * Update the physical maps as appropriate
1880 */
1881
1882 if (src_map->is_main_map && dst_map->is_main_map) {
1883 if (src_destroy)
1884 pmap_remove(src_map->pmap, src_addr, src_addr + len);
1885 }
1886
1887 /*
1888 * Unlock the maps
1889 */
1890
1891 Return: ;
1892
1893 if (old_src_destroy)
1894 vm_map_delete(src_map, src_addr, src_addr + len);
1895
1896 vm_map_unlock(src_map);
1897 if (src_map != dst_map)
1898 vm_map_unlock(dst_map);
1899
1900 return(result);
1901}
1902
1903/*
5d7b9ad3
MK
1904 * vmspace_fork:
1905 * Create a new process vmspace structure and vm_map
1906 * based on those of an existing process. The new map
1907 * is based on the old map, according to the inheritance
1908 * values on the regions in that map.
175f072e 1909 *
5d7b9ad3 1910 * The source map must not be locked.
175f072e 1911 */
5d7b9ad3
MK
1912struct vmspace *
1913vmspace_fork(vm1)
1914 register struct vmspace *vm1;
175f072e 1915{
5d7b9ad3
MK
1916 register struct vmspace *vm2;
1917 vm_map_t old_map = &vm1->vm_map;
175f072e
KM
1918 vm_map_t new_map;
1919 vm_map_entry_t old_entry;
1920 vm_map_entry_t new_entry;
1921 pmap_t new_pmap;
1922
1923 vm_map_lock(old_map);
1924
5d7b9ad3
MK
1925 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1926 old_map->entries_pageable);
1927 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1928 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1929 new_pmap = &vm2->vm_pmap; /* XXX */
1930 new_map = &vm2->vm_map; /* XXX */
175f072e
KM
1931
1932 old_entry = old_map->header.next;
1933
1934 while (old_entry != &old_map->header) {
1935 if (old_entry->is_sub_map)
1936 panic("vm_map_fork: encountered a submap");
1937
1938 switch (old_entry->inheritance) {
1939 case VM_INHERIT_NONE:
1940 break;
1941
1942 case VM_INHERIT_SHARE:
1943 /*
1944 * If we don't already have a sharing map:
1945 */
1946
1947 if (!old_entry->is_a_map) {
1948 vm_map_t new_share_map;
1949 vm_map_entry_t new_share_entry;
1950
1951 /*
1952 * Create a new sharing map
1953 */
1954
5d7b9ad3 1955 new_share_map = vm_map_create(NULL,
175f072e
KM
1956 old_entry->start,
1957 old_entry->end,
1958 TRUE);
1959 new_share_map->is_main_map = FALSE;
1960
1961 /*
1962 * Create the only sharing entry from the
1963 * old task map entry.
1964 */
1965
1966 new_share_entry =
1967 vm_map_entry_create(new_share_map);
1968 *new_share_entry = *old_entry;
1969
1970 /*
1971 * Insert the entry into the new sharing
1972 * map
1973 */
1974
1975 vm_map_entry_link(new_share_map,
1976 new_share_map->header.prev,
1977 new_share_entry);
1978
1979 /*
1980 * Fix up the task map entry to refer
1981 * to the sharing map now.
1982 */
1983
1984 old_entry->is_a_map = TRUE;
1985 old_entry->object.share_map = new_share_map;
1986 old_entry->offset = old_entry->start;
1987 }
1988
1989 /*
1990 * Clone the entry, referencing the sharing map.
1991 */
1992
1993 new_entry = vm_map_entry_create(new_map);
1994 *new_entry = *old_entry;
1995 vm_map_reference(new_entry->object.share_map);
1996
1997 /*
1998 * Insert the entry into the new map -- we
1999 * know we're inserting at the end of the new
2000 * map.
2001 */
2002
2003 vm_map_entry_link(new_map, new_map->header.prev,
2004 new_entry);
2005
2006 /*
2007 * Update the physical map
2008 */
2009
2010 pmap_copy(new_map->pmap, old_map->pmap,
2011 new_entry->start,
2012 (old_entry->end - old_entry->start),
2013 old_entry->start);
2014 break;
2015
2016 case VM_INHERIT_COPY:
2017 /*
2018 * Clone the entry and link into the map.
2019 */
2020
2021 new_entry = vm_map_entry_create(new_map);
2022 *new_entry = *old_entry;
2023 new_entry->wired_count = 0;
5d7b9ad3 2024 new_entry->object.vm_object = NULL;
175f072e
KM
2025 new_entry->is_a_map = FALSE;
2026 vm_map_entry_link(new_map, new_map->header.prev,
2027 new_entry);
2028 if (old_entry->is_a_map) {
2029 int check;
2030
2031 check = vm_map_copy(new_map,
2032 old_entry->object.share_map,
2033 new_entry->start,
2034 (vm_size_t)(new_entry->end -
2035 new_entry->start),
2036 old_entry->offset,
2037 FALSE, FALSE);
2038 if (check != KERN_SUCCESS)
2039 printf("vm_map_fork: copy in share_map region failed\n");
2040 }
2041 else {
2042 vm_map_copy_entry(old_map, new_map, old_entry,
2043 new_entry);
2044 }
2045 break;
2046 }
2047 old_entry = old_entry->next;
2048 }
2049
2050 new_map->size = old_map->size;
2051 vm_map_unlock(old_map);
2052
5d7b9ad3 2053 return(vm2);
175f072e
KM
2054}
2055
2056/*
2057 * vm_map_lookup:
2058 *
2059 * Finds the VM object, offset, and
2060 * protection for a given virtual address in the
2061 * specified map, assuming a page fault of the
2062 * type specified.
2063 *
2064 * Leaves the map in question locked for read; return
2065 * values are guaranteed until a vm_map_lookup_done
2066 * call is performed. Note that the map argument
2067 * is in/out; the returned map must be used in
2068 * the call to vm_map_lookup_done.
2069 *
2070 * A handle (out_entry) is returned for use in
2071 * vm_map_lookup_done, to make that fast.
2072 *
2073 * If a lookup is requested with "write protection"
2074 * specified, the map may be changed to perform virtual
2075 * copying operations, although the data referenced will
2076 * remain the same.
2077 */
73506ff8 2078int
175f072e
KM
2079vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2080 object, offset, out_prot, wired, single_use)
2081 vm_map_t *var_map; /* IN/OUT */
2082 register vm_offset_t vaddr;
2083 register vm_prot_t fault_type;
2084
2085 vm_map_entry_t *out_entry; /* OUT */
2086 vm_object_t *object; /* OUT */
2087 vm_offset_t *offset; /* OUT */
2088 vm_prot_t *out_prot; /* OUT */
2089 boolean_t *wired; /* OUT */
2090 boolean_t *single_use; /* OUT */
2091{
2092 vm_map_t share_map;
2093 vm_offset_t share_offset;
2094 register vm_map_entry_t entry;
2095 register vm_map_t map = *var_map;
2096 register vm_prot_t prot;
2097 register boolean_t su;
2098
2099 RetryLookup: ;
2100
2101 /*
2102 * Lookup the faulting address.
2103 */
2104
2105 vm_map_lock_read(map);
2106
2107#define RETURN(why) \
2108 { \
2109 vm_map_unlock_read(map); \
2110 return(why); \
2111 }
2112
2113 /*
2114 * If the map has an interesting hint, try it before calling
2115 * full blown lookup routine.
2116 */
2117
2118 simple_lock(&map->hint_lock);
2119 entry = map->hint;
2120 simple_unlock(&map->hint_lock);
2121
2122 *out_entry = entry;
2123
2124 if ((entry == &map->header) ||
2125 (vaddr < entry->start) || (vaddr >= entry->end)) {
2126 vm_map_entry_t tmp_entry;
2127
2128 /*
2129 * Entry was either not a valid hint, or the vaddr
2130 * was not contained in the entry, so do a full lookup.
2131 */
2132 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2133 RETURN(KERN_INVALID_ADDRESS);
2134
2135 entry = tmp_entry;
2136 *out_entry = entry;
2137 }
2138
2139 /*
2140 * Handle submaps.
2141 */
2142
2143 if (entry->is_sub_map) {
2144 vm_map_t old_map = map;
2145
2146 *var_map = map = entry->object.sub_map;
2147 vm_map_unlock_read(old_map);
2148 goto RetryLookup;
2149 }
2150
2151 /*
2152 * Check whether this task is allowed to have
2153 * this page.
2154 */
2155
2156 prot = entry->protection;
2157 if ((fault_type & (prot)) != fault_type)
2158 RETURN(KERN_PROTECTION_FAILURE);
2159
2160 /*
2161 * If this page is not pageable, we have to get
2162 * it for all possible accesses.
2163 */
2164
2165 if (*wired = (entry->wired_count != 0))
2166 prot = fault_type = entry->protection;
2167
2168 /*
2169 * If we don't already have a VM object, track
2170 * it down.
2171 */
2172
2173 if (su = !entry->is_a_map) {
2174 share_map = map;
2175 share_offset = vaddr;
2176 }
2177 else {
2178 vm_map_entry_t share_entry;
2179
2180 /*
2181 * Compute the sharing map, and offset into it.
2182 */
2183
2184 share_map = entry->object.share_map;
2185 share_offset = (vaddr - entry->start) + entry->offset;
2186
2187 /*
2188 * Look for the backing store object and offset
2189 */
2190
2191 vm_map_lock_read(share_map);
2192
2193 if (!vm_map_lookup_entry(share_map, share_offset,
2194 &share_entry)) {
2195 vm_map_unlock_read(share_map);
2196 RETURN(KERN_INVALID_ADDRESS);
2197 }
2198 entry = share_entry;
2199 }
2200
2201 /*
2202 * If the entry was copy-on-write, we either ...
2203 */
2204
2205 if (entry->needs_copy) {
2206 /*
2207 * If we want to write the page, we may as well
2208 * handle that now since we've got the sharing
2209 * map locked.
2210 *
2211 * If we don't need to write the page, we just
2212 * demote the permissions allowed.
2213 */
2214
2215 if (fault_type & VM_PROT_WRITE) {
2216 /*
2217 * Make a new object, and place it in the
2218 * object chain. Note that no new references
2219 * have appeared -- one just moved from the
2220 * share map to the new object.
2221 */
2222
2223 if (lock_read_to_write(&share_map->lock)) {
2224 if (share_map != map)
2225 vm_map_unlock_read(map);
2226 goto RetryLookup;
2227 }
2228
2229 vm_object_shadow(
2230 &entry->object.vm_object,
2231 &entry->offset,
2232 (vm_size_t) (entry->end - entry->start));
2233
2234 entry->needs_copy = FALSE;
2235
2236 lock_write_to_read(&share_map->lock);
2237 }
2238 else {
2239 /*
2240 * We're attempting to read a copy-on-write
2241 * page -- don't allow writes.
2242 */
2243
2244 prot &= (~VM_PROT_WRITE);
2245 }
2246 }
2247
2248 /*
2249 * Create an object if necessary.
2250 */
5d7b9ad3 2251 if (entry->object.vm_object == NULL) {
175f072e
KM
2252
2253 if (lock_read_to_write(&share_map->lock)) {
2254 if (share_map != map)
2255 vm_map_unlock_read(map);
2256 goto RetryLookup;
2257 }
2258
2259 entry->object.vm_object = vm_object_allocate(
2260 (vm_size_t)(entry->end - entry->start));
2261 entry->offset = 0;
2262 lock_write_to_read(&share_map->lock);
2263 }
2264
2265 /*
2266 * Return the object/offset from this entry. If the entry
2267 * was copy-on-write or empty, it has been fixed up.
2268 */
2269
2270 *offset = (share_offset - entry->start) + entry->offset;
2271 *object = entry->object.vm_object;
2272
2273 /*
2274 * Return whether this is the only map sharing this data.
2275 */
2276
2277 if (!su) {
2278 simple_lock(&share_map->ref_lock);
2279 su = (share_map->ref_count == 1);
2280 simple_unlock(&share_map->ref_lock);
2281 }
2282
2283 *out_prot = prot;
2284 *single_use = su;
2285
2286 return(KERN_SUCCESS);
2287
2288#undef RETURN
2289}
2290
2291/*
2292 * vm_map_lookup_done:
2293 *
2294 * Releases locks acquired by a vm_map_lookup
2295 * (according to the handle returned by that lookup).
2296 */
2297
2298void vm_map_lookup_done(map, entry)
2299 register vm_map_t map;
2300 vm_map_entry_t entry;
2301{
2302 /*
2303 * If this entry references a map, unlock it first.
2304 */
2305
2306 if (entry->is_a_map)
2307 vm_map_unlock_read(entry->object.share_map);
2308
2309 /*
2310 * Unlock the main-level map
2311 */
2312
2313 vm_map_unlock_read(map);
2314}
2315
2316/*
2317 * Routine: vm_map_simplify
2318 * Purpose:
2319 * Attempt to simplify the map representation in
2320 * the vicinity of the given starting address.
2321 * Note:
2322 * This routine is intended primarily to keep the
2323 * kernel maps more compact -- they generally don't
2324 * benefit from the "expand a map entry" technology
2325 * at allocation time because the adjacent entry
2326 * is often wired down.
2327 */
2328void vm_map_simplify(map, start)
2329 vm_map_t map;
2330 vm_offset_t start;
2331{
2332 vm_map_entry_t this_entry;
2333 vm_map_entry_t prev_entry;
2334
2335 vm_map_lock(map);
2336 if (
2337 (vm_map_lookup_entry(map, start, &this_entry)) &&
2338 ((prev_entry = this_entry->prev) != &map->header) &&
2339
2340 (prev_entry->end == start) &&
2341 (map->is_main_map) &&
2342
2343 (prev_entry->is_a_map == FALSE) &&
2344 (prev_entry->is_sub_map == FALSE) &&
2345
2346 (this_entry->is_a_map == FALSE) &&
2347 (this_entry->is_sub_map == FALSE) &&
2348
2349 (prev_entry->inheritance == this_entry->inheritance) &&
2350 (prev_entry->protection == this_entry->protection) &&
2351 (prev_entry->max_protection == this_entry->max_protection) &&
2352 (prev_entry->wired_count == this_entry->wired_count) &&
2353
2354 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2355 (prev_entry->needs_copy == this_entry->needs_copy) &&
2356
2357 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2358 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2359 == this_entry->offset)
2360 ) {
2361 if (map->first_free == this_entry)
2362 map->first_free = prev_entry;
2363
2364 SAVE_HINT(map, prev_entry);
2365 vm_map_entry_unlink(map, this_entry);
2366 prev_entry->end = this_entry->end;
2367 vm_object_deallocate(this_entry->object.vm_object);
2368 vm_map_entry_dispose(map, this_entry);
2369 }
2370 vm_map_unlock(map);
2371}
2372
2373/*
2374 * vm_map_print: [ debug ]
2375 */
2376void vm_map_print(map, full)
2377 register vm_map_t map;
2378 boolean_t full;
2379{
2380 register vm_map_entry_t entry;
2381 extern int indent;
2382
2383 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2384 (map->is_main_map ? "Task" : "Share"),
2385 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2386 map->timestamp);
2387
2388 if (!full && indent)
2389 return;
2390
2391 indent += 2;
2392 for (entry = map->header.next; entry != &map->header;
2393 entry = entry->next) {
2394 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2395 (int) entry, (int) entry->start, (int) entry->end);
2396 if (map->is_main_map) {
2397 static char *inheritance_name[4] =
2398 { "share", "copy", "none", "donate_copy"};
2399 printf("prot=%x/%x/%s, ",
2400 entry->protection,
2401 entry->max_protection,
2402 inheritance_name[entry->inheritance]);
2403 if (entry->wired_count != 0)
2404 printf("wired, ");
2405 }
2406
2407 if (entry->is_a_map || entry->is_sub_map) {
2408 printf("share=0x%x, offset=0x%x\n",
2409 (int) entry->object.share_map,
2410 (int) entry->offset);
2411 if ((entry->prev == &map->header) ||
2412 (!entry->prev->is_a_map) ||
2413 (entry->prev->object.share_map !=
2414 entry->object.share_map)) {
2415 indent += 2;
2416 vm_map_print(entry->object.share_map, full);
2417 indent -= 2;
2418 }
2419
2420 }
2421 else {
2422 printf("object=0x%x, offset=0x%x",
2423 (int) entry->object.vm_object,
2424 (int) entry->offset);
2425 if (entry->copy_on_write)
2426 printf(", copy (%s)",
2427 entry->needs_copy ? "needed" : "done");
2428 printf("\n");
2429
2430 if ((entry->prev == &map->header) ||
2431 (entry->prev->is_a_map) ||
2432 (entry->prev->object.vm_object !=
2433 entry->object.vm_object)) {
2434 indent += 2;
2435 vm_object_print(entry->object.vm_object, full);
2436 indent -= 2;
2437 }
2438 }
2439 }
2440 indent -= 2;
2441}