changed BINMODE= 6555 and CATMODE= 0664
[unix-history] / sys / vm / vm_map.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 7.3 (Berkeley) 4/21/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
65 * -------------------- ----- ----------------------
66 * CURRENT PATCH LEVEL: 1 00137
67 * -------------------- ----- ----------------------
68 *
69 * 08 Apr 93 Yuval Yarom Several VM system fixes
70 */
71
72/*
73 * Virtual memory mapping module.
74 */
75
76#include "param.h"
77#include "malloc.h"
78#include "vm.h"
79#include "vm_page.h"
80#include "vm_object.h"
81
82/*
83 * Virtual memory maps provide for the mapping, protection,
84 * and sharing of virtual memory objects. In addition,
85 * this module provides for an efficient virtual copy of
86 * memory from one map to another.
87 *
88 * Synchronization is required prior to most operations.
89 *
90 * Maps consist of an ordered doubly-linked list of simple
91 * entries; a single hint is used to speed up lookups.
92 *
93 * In order to properly represent the sharing of virtual
94 * memory regions among maps, the map structure is bi-level.
95 * Top-level ("address") maps refer to regions of sharable
96 * virtual memory. These regions are implemented as
97 * ("sharing") maps, which then refer to the actual virtual
98 * memory objects. When two address maps "share" memory,
99 * their top-level maps both have references to the same
100 * sharing map. When memory is virtual-copied from one
101 * address map to another, the references in the sharing
102 * maps are actually copied -- no copying occurs at the
103 * virtual memory object level.
104 *
105 * Since portions of maps are specified by start/end addreses,
106 * which may not align with existing map entries, all
107 * routines merely "clip" entries to these start/end values.
108 * [That is, an entry is split into two, bordering at a
109 * start or end value.] Note that these clippings may not
110 * always be necessary (as the two resulting entries are then
111 * not changed); however, the clipping is done for convenience.
112 * No attempt is currently made to "glue back together" two
113 * abutting entries.
114 *
115 * As mentioned above, virtual copy operations are performed
116 * by copying VM object references from one sharing map to
117 * another, and then marking both regions as copy-on-write.
118 * It is important to note that only one writeable reference
119 * to a VM object region exists in any map -- this means that
120 * shadow object creation can be delayed until a write operation
121 * occurs.
122 */
123
124/*
125 * vm_map_startup:
126 *
127 * Initialize the vm_map module. Must be called before
128 * any other vm_map routines.
129 *
130 * Map and entry structures are allocated from the general
131 * purpose memory pool with some exceptions:
132 *
133 * - The kernel map and kmem submap are allocated statically.
134 * - Kernel map entries are allocated out of a static pool.
135 *
136 * These restrictions are necessary since malloc() uses the
137 * maps and requires map entries.
138 */
139
140vm_offset_t kentry_data;
141vm_size_t kentry_data_size;
142vm_map_entry_t kentry_free;
143vm_map_t kmap_free;
144
145void vm_map_startup()
146{
147 register int i;
148 register vm_map_entry_t mep;
149 vm_map_t mp;
150
151 /*
152 * Static map structures for allocation before initialization of
153 * kernel map or kmem map. vm_map_create knows how to deal with them.
154 */
155 kmap_free = mp = (vm_map_t) kentry_data;
156 i = MAX_KMAP;
157 while (--i > 0) {
158 mp->header.next = (vm_map_entry_t) (mp + 1);
159 mp++;
160 }
161 mp++->header.next = NULL;
162
163 /*
164 * Form a free list of statically allocated kernel map entries
165 * with the rest.
166 */
167 kentry_free = mep = (vm_map_entry_t) mp;
168 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
169 while (--i > 0) {
170 mep->next = mep + 1;
171 mep++;
172 }
173 mep->next = NULL;
174}
175
176/*
177 * Allocate a vmspace structure, including a vm_map and pmap,
178 * and initialize those structures. The refcnt is set to 1.
179 * The remaining fields must be initialized by the caller.
180 */
181struct vmspace *
182vmspace_alloc(min, max, pageable)
183 vm_offset_t min, max;
184 int pageable;
185{
186 register struct vmspace *vm;
187
188 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
189 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
190 vm_map_init(&vm->vm_map, min, max, pageable);
191 pmap_pinit(&vm->vm_pmap);
192 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
193 vm->vm_refcnt = 1;
194 return (vm);
195}
196
197void
198vmspace_free(vm)
199 register struct vmspace *vm;
200{
201
202 if (--vm->vm_refcnt == 0) {
203 /*
204 * Lock the map, to wait out all other references to it.
205 * Delete all of the mappings and pages they hold,
206 * then call the pmap module to reclaim anything left.
207 */
208 vm_map_lock(&vm->vm_map);
209 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
210 vm->vm_map.max_offset);
211 pmap_release(&vm->vm_pmap);
212 FREE(vm, M_VMMAP);
213 }
214}
215
216/*
217 * vm_map_create:
218 *
219 * Creates and returns a new empty VM map with
220 * the given physical map structure, and having
221 * the given lower and upper address bounds.
222 */
223vm_map_t vm_map_create(pmap, min, max, pageable)
224 pmap_t pmap;
225 vm_offset_t min, max;
226 boolean_t pageable;
227{
228 register vm_map_t result;
229 extern vm_map_t kernel_map, kmem_map;
230
231 if (kmem_map == NULL) {
232 result = kmap_free;
233 kmap_free = (vm_map_t) result->header.next;
234 if (result == NULL)
235 panic("vm_map_create: out of maps");
236 } else
237 MALLOC(result, vm_map_t, sizeof(struct vm_map),
238 M_VMMAP, M_WAITOK);
239
240 vm_map_init(result, min, max, pageable);
241 result->pmap = pmap;
242 return(result);
243}
244
245/*
246 * Initialize an existing vm_map structure
247 * such as that in the vmspace structure.
248 * The pmap is set elsewhere.
249 */
250void
251vm_map_init(map, min, max, pageable)
252 register struct vm_map *map;
253 vm_offset_t min, max;
254 boolean_t pageable;
255{
256 map->header.next = map->header.prev = &map->header;
257 map->nentries = 0;
258 map->size = 0;
259 map->ref_count = 1;
260 map->is_main_map = TRUE;
261 map->min_offset = min;
262 map->max_offset = max;
263 map->entries_pageable = pageable;
264 map->first_free = &map->header;
265 map->hint = &map->header;
266 map->timestamp = 0;
267 lock_init(&map->lock, TRUE);
268 simple_lock_init(&map->ref_lock);
269 simple_lock_init(&map->hint_lock);
270}
271
272/*
273 * vm_map_entry_create: [ internal use only ]
274 *
275 * Allocates a VM map entry for insertion.
276 * No entry fields are filled in. This routine is
277 */
278vm_map_entry_t vm_map_entry_create(map)
279 vm_map_t map;
280{
281 vm_map_entry_t entry;
282 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
283
284 if (map == kernel_map || map == kmem_map || map == mb_map
285 || map == buffer_map || map == pager_map) {
286 if (entry = kentry_free)
287 kentry_free = kentry_free->next;
288 } else
289 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
290 M_VMMAPENT, M_WAITOK);
291 if (entry == NULL)
292 panic("vm_map_entry_create: out of map entries");
293
294 return(entry);
295}
296
297/*
298 * vm_map_entry_dispose: [ internal use only ]
299 *
300 * Inverse of vm_map_entry_create.
301 */
302void vm_map_entry_dispose(map, entry)
303 vm_map_t map;
304 vm_map_entry_t entry;
305{
306 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
307
308 if (map == kernel_map || map == kmem_map || map == mb_map
309 || map == buffer_map || map == pager_map) {
310 entry->next = kentry_free;
311 kentry_free = entry;
312 } else
313 FREE(entry, M_VMMAPENT);
314}
315
316/*
317 * vm_map_entry_{un,}link:
318 *
319 * Insert/remove entries from maps.
320 */
321#define vm_map_entry_link(map, after_where, entry) \
322 { \
323 (map)->nentries++; \
324 (entry)->prev = (after_where); \
325 (entry)->next = (after_where)->next; \
326 (entry)->prev->next = (entry); \
327 (entry)->next->prev = (entry); \
328 }
329#define vm_map_entry_unlink(map, entry) \
330 { \
331 (map)->nentries--; \
332 (entry)->next->prev = (entry)->prev; \
333 (entry)->prev->next = (entry)->next; \
334 }
335
336/*
337 * vm_map_reference:
338 *
339 * Creates another valid reference to the given map.
340 *
341 */
342void vm_map_reference(map)
343 register vm_map_t map;
344{
345 if (map == NULL)
346 return;
347
348 simple_lock(&map->ref_lock);
349 map->ref_count++;
350 simple_unlock(&map->ref_lock);
351}
352
353/*
354 * vm_map_deallocate:
355 *
356 * Removes a reference from the specified map,
357 * destroying it if no references remain.
358 * The map should not be locked.
359 */
360void vm_map_deallocate(map)
361 register vm_map_t map;
362{
363 register int c;
364
365 if (map == NULL)
366 return;
367
368 simple_lock(&map->ref_lock);
369 c = --map->ref_count;
370 simple_unlock(&map->ref_lock);
371
372 if (c > 0) {
373 return;
374 }
375
376 /*
377 * Lock the map, to wait out all other references
378 * to it.
379 */
380
381 vm_map_lock(map);
382
383 (void) vm_map_delete(map, map->min_offset, map->max_offset);
384
385 pmap_destroy(map->pmap);
386
387 FREE(map, M_VMMAP);
388}
389
390/*
391 * vm_map_insert: [ internal use only ]
392 *
393 * Inserts the given whole VM object into the target
394 * map at the specified address range. The object's
395 * size should match that of the address range.
396 *
397 * Requires that the map be locked, and leaves it so.
398 */
399vm_map_insert(map, object, offset, start, end)
400 vm_map_t map;
401 vm_object_t object;
402 vm_offset_t offset;
403 vm_offset_t start;
404 vm_offset_t end;
405{
406 register vm_map_entry_t new_entry;
407 register vm_map_entry_t prev_entry;
408 vm_map_entry_t temp_entry;
409
410 /*
411 * Check that the start and end points are not bogus.
412 */
413
414 if ((start < map->min_offset) || (end > map->max_offset) ||
415 (start >= end))
416 return(KERN_INVALID_ADDRESS);
417
418 /*
419 * Find the entry prior to the proposed
420 * starting address; if it's part of an
421 * existing entry, this range is bogus.
422 */
423
424 if (vm_map_lookup_entry(map, start, &temp_entry))
425 return(KERN_NO_SPACE);
426
427 prev_entry = temp_entry;
428
429 /*
430 * Assert that the next entry doesn't overlap the
431 * end point.
432 */
433
434 if ((prev_entry->next != &map->header) &&
435 (prev_entry->next->start < end))
436 return(KERN_NO_SPACE);
437
438 /*
439 * See if we can avoid creating a new entry by
440 * extending one of our neighbors.
441 */
442
443 if (object == NULL) {
444 if ((prev_entry != &map->header) &&
445 (prev_entry->end == start) &&
446 (map->is_main_map) &&
447 (prev_entry->is_a_map == FALSE) &&
448 (prev_entry->is_sub_map == FALSE) &&
449 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
450 (prev_entry->protection == VM_PROT_DEFAULT) &&
451 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
452 (prev_entry->wired_count == 0)) {
453
454 if (vm_object_coalesce(prev_entry->object.vm_object,
455 NULL,
456 prev_entry->offset,
457 (vm_offset_t) 0,
458 (vm_size_t)(prev_entry->end
459 - prev_entry->start),
460 (vm_size_t)(end - prev_entry->end))) {
461 /*
462 * Coalesced the two objects - can extend
463 * the previous map entry to include the
464 * new range.
465 */
466 map->size += (end - prev_entry->end);
467 prev_entry->end = end;
468 return(KERN_SUCCESS);
469 }
470 }
471 }
472
473 /*
474 * Create a new entry
475 */
476
477 new_entry = vm_map_entry_create(map);
478 new_entry->start = start;
479 new_entry->end = end;
480
481 new_entry->is_a_map = FALSE;
482 new_entry->is_sub_map = FALSE;
483 new_entry->object.vm_object = object;
484 new_entry->offset = offset;
485
486 new_entry->copy_on_write = FALSE;
487 new_entry->needs_copy = FALSE;
488
489 if (map->is_main_map) {
490 new_entry->inheritance = VM_INHERIT_DEFAULT;
491 new_entry->protection = VM_PROT_DEFAULT;
492 new_entry->max_protection = VM_PROT_DEFAULT;
493 new_entry->wired_count = 0;
494 }
495
496 /*
497 * Insert the new entry into the list
498 */
499
500 vm_map_entry_link(map, prev_entry, new_entry);
501 map->size += new_entry->end - new_entry->start;
502
503 /*
504 * Update the free space hint
505 */
506
507 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
508 map->first_free = new_entry;
509
510 return(KERN_SUCCESS);
511}
512
513/*
514 * SAVE_HINT:
515 *
516 * Saves the specified entry as the hint for
517 * future lookups. Performs necessary interlocks.
518 */
519#define SAVE_HINT(map,value) \
520 simple_lock(&(map)->hint_lock); \
521 (map)->hint = (value); \
522 simple_unlock(&(map)->hint_lock);
523
524/*
525 * vm_map_lookup_entry: [ internal use only ]
526 *
527 * Finds the map entry containing (or
528 * immediately preceding) the specified address
529 * in the given map; the entry is returned
530 * in the "entry" parameter. The boolean
531 * result indicates whether the address is
532 * actually contained in the map.
533 */
534boolean_t vm_map_lookup_entry(map, address, entry)
535 register vm_map_t map;
536 register vm_offset_t address;
537 vm_map_entry_t *entry; /* OUT */
538{
539 register vm_map_entry_t cur;
540 register vm_map_entry_t last;
541
542 /*
543 * Start looking either from the head of the
544 * list, or from the hint.
545 */
546
547 simple_lock(&map->hint_lock);
548 cur = map->hint;
549 simple_unlock(&map->hint_lock);
550
551 if (cur == &map->header)
552 cur = cur->next;
553
554 if (address >= cur->start) {
555 /*
556 * Go from hint to end of list.
557 *
558 * But first, make a quick check to see if
559 * we are already looking at the entry we
560 * want (which is usually the case).
561 * Note also that we don't need to save the hint
562 * here... it is the same hint (unless we are
563 * at the header, in which case the hint didn't
564 * buy us anything anyway).
565 */
566 last = &map->header;
567 if ((cur != last) && (cur->end > address)) {
568 *entry = cur;
569 return(TRUE);
570 }
571 }
572 else {
573 /*
574 * Go from start to hint, *inclusively*
575 */
576 last = cur->next;
577 cur = map->header.next;
578 }
579
580 /*
581 * Search linearly
582 */
583
584 while (cur != last) {
585 if (cur->end > address) {
586 if (address >= cur->start) {
587 /*
588 * Save this lookup for future
589 * hints, and return
590 */
591
592 *entry = cur;
593 SAVE_HINT(map, cur);
594 return(TRUE);
595 }
596 break;
597 }
598 cur = cur->next;
599 }
600 *entry = cur->prev;
601 SAVE_HINT(map, *entry);
602 return(FALSE);
603}
604
605/*
606 * vm_map_find finds an unallocated region in the target address
607 * map with the given length. The search is defined to be
608 * first-fit from the specified address; the region found is
609 * returned in the same parameter.
610 *
611 */
612vm_map_find(map, object, offset, addr, length, find_space)
613 vm_map_t map;
614 vm_object_t object;
615 vm_offset_t offset;
616 vm_offset_t *addr; /* IN/OUT */
617 vm_size_t length;
618 boolean_t find_space;
619{
620 register vm_map_entry_t entry;
621 register vm_offset_t start;
622 register vm_offset_t end;
623 int result;
624
625 start = *addr;
626
627 vm_map_lock(map);
628
629 if (find_space) {
630 /*
631 * Calculate the first possible address.
632 */
633
634 if (start < map->min_offset)
635 start = map->min_offset;
636 if (start > map->max_offset) {
637 vm_map_unlock(map);
638 return (KERN_NO_SPACE);
639 }
640
641 /*
642 * Look for the first possible address;
643 * if there's already something at this
644 * address, we have to start after it.
645 */
646
647 if (start == map->min_offset) {
648 if ((entry = map->first_free) != &map->header)
649 start = entry->end;
650 } else {
651 vm_map_entry_t tmp_entry;
652 if (vm_map_lookup_entry(map, start, &tmp_entry))
653 start = tmp_entry->end;
654 entry = tmp_entry;
655 }
656
657 /*
658 * In any case, the "entry" always precedes
659 * the proposed new region throughout the
660 * loop:
661 */
662
663 while (TRUE) {
664 register vm_map_entry_t next;
665
666 /*
667 * Find the end of the proposed new region.
668 * Be sure we didn't go beyond the end, or
669 * wrap around the address.
670 */
671
672 end = start + length;
673
674 if ((end > map->max_offset) || (end < start)) {
675 vm_map_unlock(map);
676 return (KERN_NO_SPACE);
677 }
678
679 /*
680 * If there are no more entries, we must win.
681 */
682
683 next = entry->next;
684 if (next == &map->header)
685 break;
686
687 /*
688 * If there is another entry, it must be
689 * after the end of the potential new region.
690 */
691
692 if (next->start >= end)
693 break;
694
695 /*
696 * Didn't fit -- move to the next entry.
697 */
698
699 entry = next;
700 start = entry->end;
701 }
702 *addr = start;
703
704 SAVE_HINT(map, entry);
705 }
706
707 result = vm_map_insert(map, object, offset, start, start + length);
708
709 vm_map_unlock(map);
710 return(result);
711}
712
713/*
714 * vm_map_simplify_entry: [ internal use only ]
715 *
716 * Simplify the given map entry by:
717 * removing extra sharing maps
718 * [XXX maybe later] merging with a neighbor
719 */
720void vm_map_simplify_entry(map, entry)
721 vm_map_t map;
722 vm_map_entry_t entry;
723{
724#ifdef lint
725 map++;
726#endif lint
727
728 /*
729 * If this entry corresponds to a sharing map, then
730 * see if we can remove the level of indirection.
731 * If it's not a sharing map, then it points to
732 * a VM object, so see if we can merge with either
733 * of our neighbors.
734 */
735
736 if (entry->is_sub_map)
737 return;
738 if (entry->is_a_map) {
739#if 0
740 vm_map_t my_share_map;
741 int count;
742
743 my_share_map = entry->object.share_map;
744 simple_lock(&my_share_map->ref_lock);
745 count = my_share_map->ref_count;
746 simple_unlock(&my_share_map->ref_lock);
747
748 if (count == 1) {
749 /* Can move the region from
750 * entry->start to entry->end (+ entry->offset)
751 * in my_share_map into place of entry.
752 * Later.
753 */
754 }
755#endif 0
756 }
757 else {
758 /*
759 * Try to merge with our neighbors.
760 *
761 * Conditions for merge are:
762 *
763 * 1. entries are adjacent.
764 * 2. both entries point to objects
765 * with null pagers.
766 *
767 * If a merge is possible, we replace the two
768 * entries with a single entry, then merge
769 * the two objects into a single object.
770 *
771 * Now, all that is left to do is write the
772 * code!
773 */
774 }
775}
776
777/*
778 * vm_map_clip_start: [ internal use only ]
779 *
780 * Asserts that the given entry begins at or after
781 * the specified address; if necessary,
782 * it splits the entry into two.
783 */
784#define vm_map_clip_start(map, entry, startaddr) \
785{ \
786 if (startaddr > entry->start) \
787 _vm_map_clip_start(map, entry, startaddr); \
788}
789
790/*
791 * This routine is called only when it is known that
792 * the entry must be split.
793 */
794void _vm_map_clip_start(map, entry, start)
795 register vm_map_t map;
796 register vm_map_entry_t entry;
797 register vm_offset_t start;
798{
799 register vm_map_entry_t new_entry;
800
801 /*
802 * See if we can simplify this entry first
803 */
804
805 vm_map_simplify_entry(map, entry);
806
807 /*
808 * Split off the front portion --
809 * note that we must insert the new
810 * entry BEFORE this one, so that
811 * this entry has the specified starting
812 * address.
813 */
814
815 new_entry = vm_map_entry_create(map);
816 *new_entry = *entry;
817
818 new_entry->end = start;
819 entry->offset += (start - entry->start);
820 entry->start = start;
821
822 vm_map_entry_link(map, entry->prev, new_entry);
823
824 if (entry->is_a_map || entry->is_sub_map)
825 vm_map_reference(new_entry->object.share_map);
826 else
827 vm_object_reference(new_entry->object.vm_object);
828}
829
830/*
831 * vm_map_clip_end: [ internal use only ]
832 *
833 * Asserts that the given entry ends at or before
834 * the specified address; if necessary,
835 * it splits the entry into two.
836 */
837
838void _vm_map_clip_end();
839#define vm_map_clip_end(map, entry, endaddr) \
840{ \
841 if (endaddr < entry->end) \
842 _vm_map_clip_end(map, entry, endaddr); \
843}
844
845/*
846 * This routine is called only when it is known that
847 * the entry must be split.
848 */
849void _vm_map_clip_end(map, entry, end)
850 register vm_map_t map;
851 register vm_map_entry_t entry;
852 register vm_offset_t end;
853{
854 register vm_map_entry_t new_entry;
855
856 /*
857 * Create a new entry and insert it
858 * AFTER the specified entry
859 */
860
861 new_entry = vm_map_entry_create(map);
862 *new_entry = *entry;
863
864 new_entry->start = entry->end = end;
865 new_entry->offset += (end - entry->start);
866
867 vm_map_entry_link(map, entry, new_entry);
868
869 if (entry->is_a_map || entry->is_sub_map)
870 vm_map_reference(new_entry->object.share_map);
871 else
872 vm_object_reference(new_entry->object.vm_object);
873}
874
875/*
876 * VM_MAP_RANGE_CHECK: [ internal use only ]
877 *
878 * Asserts that the starting and ending region
879 * addresses fall within the valid range of the map.
880 */
881#define VM_MAP_RANGE_CHECK(map, start, end) \
882 { \
883 if (start < vm_map_min(map)) \
884 start = vm_map_min(map); \
885 if (end > vm_map_max(map)) \
886 end = vm_map_max(map); \
887 if (start > end) \
888 start = end; \
889 }
890
891/*
892 * vm_map_submap: [ kernel use only ]
893 *
894 * Mark the given range as handled by a subordinate map.
895 *
896 * This range must have been created with vm_map_find,
897 * and no other operations may have been performed on this
898 * range prior to calling vm_map_submap.
899 *
900 * Only a limited number of operations can be performed
901 * within this rage after calling vm_map_submap:
902 * vm_fault
903 * [Don't try vm_map_copy!]
904 *
905 * To remove a submapping, one must first remove the
906 * range from the superior map, and then destroy the
907 * submap (if desired). [Better yet, don't try it.]
908 */
909vm_map_submap(map, start, end, submap)
910 register vm_map_t map;
911 register vm_offset_t start;
912 register vm_offset_t end;
913 vm_map_t submap;
914{
915 vm_map_entry_t entry;
916 register int result = KERN_INVALID_ARGUMENT;
917
918 vm_map_lock(map);
919
920 VM_MAP_RANGE_CHECK(map, start, end);
921
922 if (vm_map_lookup_entry(map, start, &entry)) {
923 vm_map_clip_start(map, entry, start);
924 }
925 else
926 entry = entry->next;
927
928 vm_map_clip_end(map, entry, end);
929
930 if ((entry->start == start) && (entry->end == end) &&
931 (!entry->is_a_map) &&
932 (entry->object.vm_object == NULL) &&
933 (!entry->copy_on_write)) {
934 entry->is_a_map = FALSE;
935 entry->is_sub_map = TRUE;
936 vm_map_reference(entry->object.sub_map = submap);
937 result = KERN_SUCCESS;
938 }
939 vm_map_unlock(map);
940
941 return(result);
942}
943
944/*
945 * vm_map_protect:
946 *
947 * Sets the protection of the specified address
948 * region in the target map. If "set_max" is
949 * specified, the maximum protection is to be set;
950 * otherwise, only the current protection is affected.
951 */
952vm_map_protect(map, start, end, new_prot, set_max)
953 register vm_map_t map;
954 register vm_offset_t start;
955 register vm_offset_t end;
956 register vm_prot_t new_prot;
957 register boolean_t set_max;
958{
959 register vm_map_entry_t current;
960 vm_map_entry_t entry;
961
962 vm_map_lock(map);
963
964 VM_MAP_RANGE_CHECK(map, start, end);
965
966 if (vm_map_lookup_entry(map, start, &entry)) {
967 vm_map_clip_start(map, entry, start);
968 }
969 else
970 entry = entry->next;
971
972 /*
973 * Make a first pass to check for protection
974 * violations.
975 */
976
977 current = entry;
978 while ((current != &map->header) && (current->start < end)) {
979 if (current->is_sub_map)
980 return(KERN_INVALID_ARGUMENT);
981 if ((new_prot & current->max_protection) != new_prot) {
982 vm_map_unlock(map);
983 return(KERN_PROTECTION_FAILURE);
984 }
985
986 current = current->next;
987 }
988
989 /*
990 * Go back and fix up protections.
991 * [Note that clipping is not necessary the second time.]
992 */
993
994 current = entry;
995
996 while ((current != &map->header) && (current->start < end)) {
997 vm_prot_t old_prot;
998
999 vm_map_clip_end(map, current, end);
1000
1001 old_prot = current->protection;
1002 if (set_max)
1003 current->protection =
1004 (current->max_protection = new_prot) &
1005 old_prot;
1006 else
1007 current->protection = new_prot;
1008
1009 /*
1010 * Update physical map if necessary.
1011 * Worry about copy-on-write here -- CHECK THIS XXX
1012 */
1013
1014 if (current->protection != old_prot) {
1015
1016#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1017 VM_PROT_ALL)
1018#define max(a,b) ((a) > (b) ? (a) : (b))
1019
1020 if (current->is_a_map) {
1021 vm_map_entry_t share_entry;
1022 vm_offset_t share_end;
1023
1024 vm_map_lock(current->object.share_map);
1025 (void) vm_map_lookup_entry(
1026 current->object.share_map,
1027 current->offset,
1028 &share_entry);
1029 share_end = current->offset +
1030 (current->end - current->start);
1031 while ((share_entry !=
1032 &current->object.share_map->header) &&
1033 (share_entry->start < share_end)) {
1034
1035 pmap_protect(map->pmap,
1036 (max(share_entry->start,
1037 current->offset) -
1038 current->offset +
1039 current->start),
1040 min(share_entry->end,
1041 share_end) -
1042 current->offset +
1043 current->start,
1044 current->protection &
1045 MASK(share_entry));
1046
1047 share_entry = share_entry->next;
1048 }
1049 vm_map_unlock(current->object.share_map);
1050 }
1051 else
1052 pmap_protect(map->pmap, current->start,
1053 current->end,
1054 current->protection & MASK(entry));
1055#undef max
1056#undef MASK
1057 }
1058 current = current->next;
1059 }
1060
1061 vm_map_unlock(map);
1062 return(KERN_SUCCESS);
1063}
1064
1065/*
1066 * vm_map_inherit:
1067 *
1068 * Sets the inheritance of the specified address
1069 * range in the target map. Inheritance
1070 * affects how the map will be shared with
1071 * child maps at the time of vm_map_fork.
1072 */
1073vm_map_inherit(map, start, end, new_inheritance)
1074 register vm_map_t map;
1075 register vm_offset_t start;
1076 register vm_offset_t end;
1077 register vm_inherit_t new_inheritance;
1078{
1079 register vm_map_entry_t entry;
1080 vm_map_entry_t temp_entry;
1081
1082 switch (new_inheritance) {
1083 case VM_INHERIT_NONE:
1084 case VM_INHERIT_COPY:
1085 case VM_INHERIT_SHARE:
1086 break;
1087 default:
1088 return(KERN_INVALID_ARGUMENT);
1089 }
1090
1091 vm_map_lock(map);
1092
1093 VM_MAP_RANGE_CHECK(map, start, end);
1094
1095 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1096 entry = temp_entry;
1097 vm_map_clip_start(map, entry, start);
1098 }
1099 else
1100 entry = temp_entry->next;
1101
1102 while ((entry != &map->header) && (entry->start < end)) {
1103 vm_map_clip_end(map, entry, end);
1104
1105 entry->inheritance = new_inheritance;
1106
1107 entry = entry->next;
1108 }
1109
1110 vm_map_unlock(map);
1111 return(KERN_SUCCESS);
1112}
1113
1114/*
1115 * vm_map_pageable:
1116 *
1117 * Sets the pageability of the specified address
1118 * range in the target map. Regions specified
1119 * as not pageable require locked-down physical
1120 * memory and physical page maps.
1121 *
1122 * The map must not be locked, but a reference
1123 * must remain to the map throughout the call.
1124 */
1125vm_map_pageable(map, start, end, new_pageable)
1126 register vm_map_t map;
1127 register vm_offset_t start;
1128 register vm_offset_t end;
1129 register boolean_t new_pageable;
1130{
1131 register vm_map_entry_t entry;
1132 vm_map_entry_t temp_entry;
1133
1134 vm_map_lock(map);
1135
1136 VM_MAP_RANGE_CHECK(map, start, end);
1137
1138 /*
1139 * Only one pageability change may take place at one
1140 * time, since vm_fault assumes it will be called
1141 * only once for each wiring/unwiring. Therefore, we
1142 * have to make sure we're actually changing the pageability
1143 * for the entire region. We do so before making any changes.
1144 */
1145
1146 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1147 entry = temp_entry;
1148 vm_map_clip_start(map, entry, start);
1149 }
1150 else
1151 entry = temp_entry->next;
1152 temp_entry = entry;
1153
1154 /*
1155 * Actions are rather different for wiring and unwiring,
1156 * so we have two separate cases.
1157 */
1158
1159 if (new_pageable) {
1160
1161 /*
1162 * Unwiring. First ensure that the range to be
1163 * unwired is really wired down.
1164 */
1165 while ((entry != &map->header) && (entry->start < end)) {
1166
1167 if (entry->wired_count == 0) {
1168 vm_map_unlock(map);
1169 return(KERN_INVALID_ARGUMENT);
1170 }
1171 entry = entry->next;
1172 }
1173
1174 /*
1175 * Now decrement the wiring count for each region.
1176 * If a region becomes completely unwired,
1177 * unwire its physical pages and mappings.
1178 */
1179 lock_set_recursive(&map->lock);
1180
1181 entry = temp_entry;
1182 while ((entry != &map->header) && (entry->start < end)) {
1183 vm_map_clip_end(map, entry, end);
1184
1185 entry->wired_count--;
1186 if (entry->wired_count == 0)
1187 vm_fault_unwire(map, entry->start, entry->end);
1188
1189 entry = entry->next;
1190 }
1191 lock_clear_recursive(&map->lock);
1192 }
1193
1194 else {
1195 /*
1196 * Wiring. We must do this in two passes:
1197 *
1198 * 1. Holding the write lock, we increment the
1199 * wiring count. For any area that is not already
1200 * wired, we create any shadow objects that need
1201 * to be created.
1202 *
1203 * 2. We downgrade to a read lock, and call
1204 * vm_fault_wire to fault in the pages for any
1205 * newly wired area (wired_count is 1).
1206 *
1207 * Downgrading to a read lock for vm_fault_wire avoids
1208 * a possible deadlock with another thread that may have
1209 * faulted on one of the pages to be wired (it would mark
1210 * the page busy, blocking us, then in turn block on the
1211 * map lock that we hold). Because of problems in the
1212 * recursive lock package, we cannot upgrade to a write
1213 * lock in vm_map_lookup. Thus, any actions that require
1214 * the write lock must be done beforehand. Because we
1215 * keep the read lock on the map, the copy-on-write status
1216 * of the entries we modify here cannot change.
1217 */
1218
1219 /*
1220 * Pass 1.
1221 */
1222 entry = temp_entry;
1223 while ((entry != &map->header) && (entry->start < end)) {
1224 vm_map_clip_end(map, entry, end);
1225
1226 entry->wired_count++;
1227 if (entry->wired_count == 1) {
1228
1229 /*
1230 * Perform actions of vm_map_lookup that need
1231 * the write lock on the map: create a shadow
1232 * object for a copy-on-write region, or an
1233 * object for a zero-fill region.
1234 *
1235 * We don't have to do this for entries that
1236 * point to sharing maps, because we won't hold
1237 * the lock on the sharing map.
1238 */
1239 if (!entry->is_a_map) {
1240 if (entry->needs_copy &&
1241 ((entry->protection & VM_PROT_WRITE) != 0)) {
1242
1243 vm_object_shadow(&entry->object.vm_object,
1244 &entry->offset,
1245 (vm_size_t)(entry->end
1246 - entry->start));
1247 entry->needs_copy = FALSE;
1248 }
1249 else if (entry->object.vm_object == NULL) {
1250 entry->object.vm_object =
1251 vm_object_allocate((vm_size_t)(entry->end
1252 - entry->start));
1253 entry->offset = (vm_offset_t)0;
1254 }
1255 }
1256 }
1257
1258 entry = entry->next;
1259 }
1260
1261 /*
1262 * Pass 2.
1263 */
1264
1265 /*
1266 * HACK HACK HACK HACK
1267 *
1268 * If we are wiring in the kernel map or a submap of it,
1269 * unlock the map to avoid deadlocks. We trust that the
1270 * kernel threads are well-behaved, and therefore will
1271 * not do anything destructive to this region of the map
1272 * while we have it unlocked. We cannot trust user threads
1273 * to do the same.
1274 *
1275 * HACK HACK HACK HACK
1276 */
1277 if (vm_map_pmap(map) == kernel_pmap) {
1278 vm_map_unlock(map); /* trust me ... */
1279 }
1280 else {
1281 lock_set_recursive(&map->lock);
1282 lock_write_to_read(&map->lock);
1283 }
1284
1285 entry = temp_entry;
1286 while (entry != &map->header && entry->start < end) {
b56d9a08
DG
1287 if (entry->wired_count == 1) {
1288 vm_fault_wire(map, entry->start, entry->end);
15637ed4
RG
1289 }
1290 entry = entry->next;
1291 }
1292
1293 if (vm_map_pmap(map) == kernel_pmap) {
1294 vm_map_lock(map);
1295 }
1296 else {
1297 lock_clear_recursive(&map->lock);
1298 }
1299 }
1300
1301 vm_map_unlock(map);
1302
1303 return(KERN_SUCCESS);
1304}
1305
1306/*
1307 * vm_map_entry_unwire: [ internal use only ]
1308 *
1309 * Make the region specified by this entry pageable.
1310 *
1311 * The map in question should be locked.
1312 * [This is the reason for this routine's existence.]
1313 */
1314void vm_map_entry_unwire(map, entry)
1315 vm_map_t map;
1316 register vm_map_entry_t entry;
1317{
1318 vm_fault_unwire(map, entry->start, entry->end);
1319 entry->wired_count = 0;
1320}
1321
1322/*
1323 * vm_map_entry_delete: [ internal use only ]
1324 *
1325 * Deallocate the given entry from the target map.
1326 */
1327void vm_map_entry_delete(map, entry)
1328 register vm_map_t map;
1329 register vm_map_entry_t entry;
1330{
1331 if (entry->wired_count != 0)
1332 vm_map_entry_unwire(map, entry);
1333
1334 vm_map_entry_unlink(map, entry);
1335 map->size -= entry->end - entry->start;
1336
1337 if (entry->is_a_map || entry->is_sub_map)
1338 vm_map_deallocate(entry->object.share_map);
1339 else
1340 vm_object_deallocate(entry->object.vm_object);
1341
1342 vm_map_entry_dispose(map, entry);
1343}
1344
1345/*
1346 * vm_map_delete: [ internal use only ]
1347 *
1348 * Deallocates the given address range from the target
1349 * map.
1350 *
1351 * When called with a sharing map, removes pages from
1352 * that region from all physical maps.
1353 */
1354vm_map_delete(map, start, end)
1355 register vm_map_t map;
1356 vm_offset_t start;
1357 register vm_offset_t end;
1358{
1359 register vm_map_entry_t entry;
1360 vm_map_entry_t first_entry;
1361
1362 /*
1363 * Find the start of the region, and clip it
1364 */
1365
1366 if (!vm_map_lookup_entry(map, start, &first_entry))
1367 entry = first_entry->next;
1368 else {
1369 entry = first_entry;
1370 vm_map_clip_start(map, entry, start);
1371
1372 /*
1373 * Fix the lookup hint now, rather than each
1374 * time though the loop.
1375 */
1376
1377 SAVE_HINT(map, entry->prev);
1378 }
1379
1380 /*
1381 * Save the free space hint
1382 */
1383
1384 if (map->first_free->start >= start)
1385 map->first_free = entry->prev;
1386
1387 /*
1388 * Step through all entries in this region
1389 */
1390
1391 while ((entry != &map->header) && (entry->start < end)) {
1392 vm_map_entry_t next;
1393 register vm_offset_t s, e;
1394 register vm_object_t object;
1395
1396 vm_map_clip_end(map, entry, end);
1397
1398 next = entry->next;
1399 s = entry->start;
1400 e = entry->end;
1401
1402 /*
1403 * Unwire before removing addresses from the pmap;
1404 * otherwise, unwiring will put the entries back in
1405 * the pmap.
1406 */
1407
1408 object = entry->object.vm_object;
1409 if (entry->wired_count != 0)
1410 vm_map_entry_unwire(map, entry);
1411
1412 /*
1413 * If this is a sharing map, we must remove
1414 * *all* references to this data, since we can't
1415 * find all of the physical maps which are sharing
1416 * it.
1417 */
1418
1419 if (object == kernel_object || object == kmem_object)
1420 vm_object_page_remove(object, entry->offset,
1421 entry->offset + (e - s));
1422 else if (!map->is_main_map)
1423 vm_object_pmap_remove(object,
1424 entry->offset,
1425 entry->offset + (e - s));
1426 else
1427 pmap_remove(map->pmap, s, e);
1428
1429 /*
1430 * Delete the entry (which may delete the object)
1431 * only after removing all pmap entries pointing
1432 * to its pages. (Otherwise, its page frames may
1433 * be reallocated, and any modify bits will be
1434 * set in the wrong object!)
1435 */
1436
1437 vm_map_entry_delete(map, entry);
1438 entry = next;
1439 }
1440 return(KERN_SUCCESS);
1441}
1442
1443/*
1444 * vm_map_remove:
1445 *
1446 * Remove the given address range from the target map.
1447 * This is the exported form of vm_map_delete.
1448 */
1449vm_map_remove(map, start, end)
1450 register vm_map_t map;
1451 register vm_offset_t start;
1452 register vm_offset_t end;
1453{
1454 register int result;
1455
1456 vm_map_lock(map);
1457 VM_MAP_RANGE_CHECK(map, start, end);
1458 result = vm_map_delete(map, start, end);
1459 vm_map_unlock(map);
1460
1461 return(result);
1462}
1463
1464/*
1465 * vm_map_check_protection:
1466 *
1467 * Assert that the target map allows the specified
1468 * privilege on the entire address region given.
1469 * The entire region must be allocated.
1470 */
1471boolean_t vm_map_check_protection(map, start, end, protection)
1472 register vm_map_t map;
1473 register vm_offset_t start;
1474 register vm_offset_t end;
1475 register vm_prot_t protection;
1476{
1477 register vm_map_entry_t entry;
1478 vm_map_entry_t tmp_entry;
1479
1480 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1481 return(FALSE);
1482 }
1483
1484 entry = tmp_entry;
1485
1486 while (start < end) {
1487 if (entry == &map->header) {
1488 return(FALSE);
1489 }
1490
1491 /*
1492 * No holes allowed!
1493 */
1494
1495 if (start < entry->start) {
1496 return(FALSE);
1497 }
1498
1499 /*
1500 * Check protection associated with entry.
1501 */
1502
1503 if ((entry->protection & protection) != protection) {
1504 return(FALSE);
1505 }
1506
1507 /* go to next entry */
1508
1509 start = entry->end;
1510 entry = entry->next;
1511 }
1512 return(TRUE);
1513}
1514
1515/*
1516 * vm_map_copy_entry:
1517 *
1518 * Copies the contents of the source entry to the destination
1519 * entry. The entries *must* be aligned properly.
1520 */
1521void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1522 vm_map_t src_map, dst_map;
1523 register vm_map_entry_t src_entry, dst_entry;
1524{
1525 vm_object_t temp_object;
1526
1527 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1528 return;
1529
1530 if (dst_entry->object.vm_object != NULL &&
1531 !dst_entry->object.vm_object->internal)
1532 printf("vm_map_copy_entry: copying over permanent data!\n");
1533
1534 /*
1535 * If our destination map was wired down,
1536 * unwire it now.
1537 */
1538
1539 if (dst_entry->wired_count != 0)
1540 vm_map_entry_unwire(dst_map, dst_entry);
1541
1542 /*
1543 * If we're dealing with a sharing map, we
1544 * must remove the destination pages from
1545 * all maps (since we cannot know which maps
1546 * this sharing map belongs in).
1547 */
1548
1549 if (dst_map->is_main_map)
1550 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1551 else
1552 vm_object_pmap_remove(dst_entry->object.vm_object,
1553 dst_entry->offset,
1554 dst_entry->offset +
1555 (dst_entry->end - dst_entry->start));
1556
1557 if (src_entry->wired_count == 0) {
1558
1559 boolean_t src_needs_copy;
1560
1561 /*
1562 * If the source entry is marked needs_copy,
1563 * it is already write-protected.
1564 */
1565 if (!src_entry->needs_copy) {
1566
1567 boolean_t su;
1568
1569 /*
1570 * If the source entry has only one mapping,
1571 * we can just protect the virtual address
1572 * range.
1573 */
1574 if (!(su = src_map->is_main_map)) {
1575 simple_lock(&src_map->ref_lock);
1576 su = (src_map->ref_count == 1);
1577 simple_unlock(&src_map->ref_lock);
1578 }
1579
1580 if (su) {
1581 pmap_protect(src_map->pmap,
1582 src_entry->start,
1583 src_entry->end,
1584 src_entry->protection & ~VM_PROT_WRITE);
1585 }
1586 else {
1587 vm_object_pmap_copy(src_entry->object.vm_object,
1588 src_entry->offset,
1589 src_entry->offset + (src_entry->end
1590 -src_entry->start));
1591 }
1592 }
1593
1594 /*
1595 * Make a copy of the object.
1596 */
1597 temp_object = dst_entry->object.vm_object;
1598 vm_object_copy(src_entry->object.vm_object,
1599 src_entry->offset,
1600 (vm_size_t)(src_entry->end -
1601 src_entry->start),
1602 &dst_entry->object.vm_object,
1603 &dst_entry->offset,
1604 &src_needs_copy);
1605 /*
1606 * If we didn't get a copy-object now, mark the
1607 * source map entry so that a shadow will be created
1608 * to hold its changed pages.
1609 */
1610 if (src_needs_copy)
1611 src_entry->needs_copy = TRUE;
1612
1613 /*
1614 * The destination always needs to have a shadow
1615 * created.
1616 */
1617 dst_entry->needs_copy = TRUE;
1618
1619 /*
1620 * Mark the entries copy-on-write, so that write-enabling
1621 * the entry won't make copy-on-write pages writable.
1622 */
1623 src_entry->copy_on_write = TRUE;
1624 dst_entry->copy_on_write = TRUE;
1625 /*
1626 * Get rid of the old object.
1627 */
1628 vm_object_deallocate(temp_object);
1629
1630 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1631 dst_entry->end - dst_entry->start, src_entry->start);
1632 }
1633 else {
1634 /*
1635 * Of course, wired down pages can't be set copy-on-write.
1636 * Cause wired pages to be copied into the new
1637 * map by simulating faults (the new pages are
1638 * pageable)
1639 */
1640 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1641 }
1642}
1643
1644/*
1645 * vm_map_copy:
1646 *
1647 * Perform a virtual memory copy from the source
1648 * address map/range to the destination map/range.
1649 *
1650 * If src_destroy or dst_alloc is requested,
1651 * the source and destination regions should be
1652 * disjoint, not only in the top-level map, but
1653 * in the sharing maps as well. [The best way
1654 * to guarantee this is to use a new intermediate
1655 * map to make copies. This also reduces map
1656 * fragmentation.]
1657 */
1658vm_map_copy(dst_map, src_map,
1659 dst_addr, len, src_addr,
1660 dst_alloc, src_destroy)
1661 vm_map_t dst_map;
1662 vm_map_t src_map;
1663 vm_offset_t dst_addr;
1664 vm_size_t len;
1665 vm_offset_t src_addr;
1666 boolean_t dst_alloc;
1667 boolean_t src_destroy;
1668{
1669 register
1670 vm_map_entry_t src_entry;
1671 register
1672 vm_map_entry_t dst_entry;
1673 vm_map_entry_t tmp_entry;
1674 vm_offset_t src_start;
1675 vm_offset_t src_end;
1676 vm_offset_t dst_start;
1677 vm_offset_t dst_end;
1678 vm_offset_t src_clip;
1679 vm_offset_t dst_clip;
1680 int result;
1681 boolean_t old_src_destroy;
1682
1683 /*
1684 * XXX While we figure out why src_destroy screws up,
1685 * we'll do it by explicitly vm_map_delete'ing at the end.
1686 */
1687
1688 old_src_destroy = src_destroy;
1689 src_destroy = FALSE;
1690
1691 /*
1692 * Compute start and end of region in both maps
1693 */
1694
1695 src_start = src_addr;
1696 src_end = src_start + len;
1697 dst_start = dst_addr;
1698 dst_end = dst_start + len;
1699
1700 /*
1701 * Check that the region can exist in both source
1702 * and destination.
1703 */
1704
1705 if ((dst_end < dst_start) || (src_end < src_start))
1706 return(KERN_NO_SPACE);
1707
1708 /*
1709 * Lock the maps in question -- we avoid deadlock
1710 * by ordering lock acquisition by map value
1711 */
1712
1713 if (src_map == dst_map) {
1714 vm_map_lock(src_map);
1715 }
1716 else if ((int) src_map < (int) dst_map) {
1717 vm_map_lock(src_map);
1718 vm_map_lock(dst_map);
1719 } else {
1720 vm_map_lock(dst_map);
1721 vm_map_lock(src_map);
1722 }
1723
1724 result = KERN_SUCCESS;
1725
1726 /*
1727 * Check protections... source must be completely readable and
1728 * destination must be completely writable. [Note that if we're
1729 * allocating the destination region, we don't have to worry
1730 * about protection, but instead about whether the region
1731 * exists.]
1732 */
1733
1734 if (src_map->is_main_map && dst_map->is_main_map) {
1735 if (!vm_map_check_protection(src_map, src_start, src_end,
1736 VM_PROT_READ)) {
1737 result = KERN_PROTECTION_FAILURE;
1738 goto Return;
1739 }
1740
1741 if (dst_alloc) {
1742 /* XXX Consider making this a vm_map_find instead */
1743 if ((result = vm_map_insert(dst_map, NULL,
1744 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1745 goto Return;
1746 }
1747 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1748 VM_PROT_WRITE)) {
1749 result = KERN_PROTECTION_FAILURE;
1750 goto Return;
1751 }
1752 }
1753
1754 /*
1755 * Find the start entries and clip.
1756 *
1757 * Note that checking protection asserts that the
1758 * lookup cannot fail.
1759 *
1760 * Also note that we wait to do the second lookup
1761 * until we have done the first clip, as the clip
1762 * may affect which entry we get!
1763 */
1764
1765 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1766 src_entry = tmp_entry;
1767 vm_map_clip_start(src_map, src_entry, src_start);
1768
1769 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1770 dst_entry = tmp_entry;
1771 vm_map_clip_start(dst_map, dst_entry, dst_start);
1772
1773 /*
1774 * If both source and destination entries are the same,
1775 * retry the first lookup, as it may have changed.
1776 */
1777
1778 if (src_entry == dst_entry) {
1779 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1780 src_entry = tmp_entry;
1781 }
1782
1783 /*
1784 * If source and destination entries are still the same,
1785 * a null copy is being performed.
1786 */
1787
1788 if (src_entry == dst_entry)
1789 goto Return;
1790
1791 /*
1792 * Go through entries until we get to the end of the
1793 * region.
1794 */
1795
1796 while (src_start < src_end) {
1797 /*
1798 * Clip the entries to the endpoint of the entire region.
1799 */
1800
1801 vm_map_clip_end(src_map, src_entry, src_end);
1802 vm_map_clip_end(dst_map, dst_entry, dst_end);
1803
1804 /*
1805 * Clip each entry to the endpoint of the other entry.
1806 */
1807
1808 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1809 vm_map_clip_end(src_map, src_entry, src_clip);
1810
1811 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1812 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1813
1814 /*
1815 * Both entries now match in size and relative endpoints.
1816 *
1817 * If both entries refer to a VM object, we can
1818 * deal with them now.
1819 */
1820
1821 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1822 vm_map_copy_entry(src_map, dst_map, src_entry,
1823 dst_entry);
1824 }
1825 else {
1826 register vm_map_t new_dst_map;
1827 vm_offset_t new_dst_start;
1828 vm_size_t new_size;
1829 vm_map_t new_src_map;
1830 vm_offset_t new_src_start;
1831
1832 /*
1833 * We have to follow at least one sharing map.
1834 */
1835
1836 new_size = (dst_entry->end - dst_entry->start);
1837
1838 if (src_entry->is_a_map) {
1839 new_src_map = src_entry->object.share_map;
1840 new_src_start = src_entry->offset;
1841 }
1842 else {
1843 new_src_map = src_map;
1844 new_src_start = src_entry->start;
1845 lock_set_recursive(&src_map->lock);
1846 }
1847
1848 if (dst_entry->is_a_map) {
1849 vm_offset_t new_dst_end;
1850
1851 new_dst_map = dst_entry->object.share_map;
1852 new_dst_start = dst_entry->offset;
1853
1854 /*
1855 * Since the destination sharing entries
1856 * will be merely deallocated, we can
1857 * do that now, and replace the region
1858 * with a null object. [This prevents
1859 * splitting the source map to match
1860 * the form of the destination map.]
1861 * Note that we can only do so if the
1862 * source and destination do not overlap.
1863 */
1864
1865 new_dst_end = new_dst_start + new_size;
1866
1867 if (new_dst_map != new_src_map) {
1868 vm_map_lock(new_dst_map);
1869 (void) vm_map_delete(new_dst_map,
1870 new_dst_start,
1871 new_dst_end);
1872 (void) vm_map_insert(new_dst_map,
1873 NULL,
1874 (vm_offset_t) 0,
1875 new_dst_start,
1876 new_dst_end);
1877 vm_map_unlock(new_dst_map);
1878 }
1879 }
1880 else {
1881 new_dst_map = dst_map;
1882 new_dst_start = dst_entry->start;
1883 lock_set_recursive(&dst_map->lock);
1884 }
1885
1886 /*
1887 * Recursively copy the sharing map.
1888 */
1889
1890 (void) vm_map_copy(new_dst_map, new_src_map,
1891 new_dst_start, new_size, new_src_start,
1892 FALSE, FALSE);
1893
1894 if (dst_map == new_dst_map)
1895 lock_clear_recursive(&dst_map->lock);
1896 if (src_map == new_src_map)
1897 lock_clear_recursive(&src_map->lock);
1898 }
1899
1900 /*
1901 * Update variables for next pass through the loop.
1902 */
1903
1904 src_start = src_entry->end;
1905 src_entry = src_entry->next;
1906 dst_start = dst_entry->end;
1907 dst_entry = dst_entry->next;
1908
1909 /*
1910 * If the source is to be destroyed, here is the
1911 * place to do it.
1912 */
1913
1914 if (src_destroy && src_map->is_main_map &&
1915 dst_map->is_main_map)
1916 vm_map_entry_delete(src_map, src_entry->prev);
1917 }
1918
1919 /*
1920 * Update the physical maps as appropriate
1921 */
1922
1923 if (src_map->is_main_map && dst_map->is_main_map) {
1924 if (src_destroy)
1925 pmap_remove(src_map->pmap, src_addr, src_addr + len);
1926 }
1927
1928 /*
1929 * Unlock the maps
1930 */
1931
1932 Return: ;
1933
1934 if (old_src_destroy)
1935 vm_map_delete(src_map, src_addr, src_addr + len);
1936
1937 vm_map_unlock(src_map);
1938 if (src_map != dst_map)
1939 vm_map_unlock(dst_map);
1940
1941 return(result);
1942}
1943
1944/*
1945 * vmspace_fork:
1946 * Create a new process vmspace structure and vm_map
1947 * based on those of an existing process. The new map
1948 * is based on the old map, according to the inheritance
1949 * values on the regions in that map.
1950 *
1951 * The source map must not be locked.
1952 */
1953struct vmspace *
1954vmspace_fork(vm1)
1955 register struct vmspace *vm1;
1956{
1957 register struct vmspace *vm2;
1958 vm_map_t old_map = &vm1->vm_map;
1959 vm_map_t new_map;
1960 vm_map_entry_t old_entry;
1961 vm_map_entry_t new_entry;
1962 pmap_t new_pmap;
1963
1964 vm_map_lock(old_map);
1965
1966 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1967 old_map->entries_pageable);
1968 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1969 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1970 new_pmap = &vm2->vm_pmap; /* XXX */
1971 new_map = &vm2->vm_map; /* XXX */
1972
1973 old_entry = old_map->header.next;
1974
1975 while (old_entry != &old_map->header) {
1976 if (old_entry->is_sub_map)
1977 panic("vm_map_fork: encountered a submap");
1978
1979 switch (old_entry->inheritance) {
1980 case VM_INHERIT_NONE:
1981 break;
1982
1983 case VM_INHERIT_SHARE:
1984 /*
1985 * If we don't already have a sharing map:
1986 */
1987
1988 if (!old_entry->is_a_map) {
1989 vm_map_t new_share_map;
1990 vm_map_entry_t new_share_entry;
1991
1992 /*
1993 * Create a new sharing map
1994 */
1995
1996 new_share_map = vm_map_create(NULL,
1997 old_entry->start,
1998 old_entry->end,
1999 TRUE);
2000 new_share_map->is_main_map = FALSE;
2001
2002 /*
2003 * Create the only sharing entry from the
2004 * old task map entry.
2005 */
2006
2007 new_share_entry =
2008 vm_map_entry_create(new_share_map);
2009 *new_share_entry = *old_entry;
2010
2011 /*
2012 * Insert the entry into the new sharing
2013 * map
2014 */
2015
2016 vm_map_entry_link(new_share_map,
2017 new_share_map->header.prev,
2018 new_share_entry);
2019
2020 /*
2021 * Fix up the task map entry to refer
2022 * to the sharing map now.
2023 */
2024
2025 old_entry->is_a_map = TRUE;
2026 old_entry->object.share_map = new_share_map;
2027 old_entry->offset = old_entry->start;
2028 }
2029
2030 /*
2031 * Clone the entry, referencing the sharing map.
2032 */
2033
2034 new_entry = vm_map_entry_create(new_map);
2035 *new_entry = *old_entry;
2036 vm_map_reference(new_entry->object.share_map);
2037
2038 /*
2039 * Insert the entry into the new map -- we
2040 * know we're inserting at the end of the new
2041 * map.
2042 */
2043
2044 vm_map_entry_link(new_map, new_map->header.prev,
2045 new_entry);
2046
2047 /*
2048 * Update the physical map
2049 */
2050
2051 pmap_copy(new_map->pmap, old_map->pmap,
2052 new_entry->start,
2053 (old_entry->end - old_entry->start),
2054 old_entry->start);
2055 break;
2056
2057 case VM_INHERIT_COPY:
2058 /*
2059 * Clone the entry and link into the map.
2060 */
2061
2062 new_entry = vm_map_entry_create(new_map);
2063 *new_entry = *old_entry;
2064 new_entry->wired_count = 0;
2065 new_entry->object.vm_object = NULL;
2066 new_entry->is_a_map = FALSE;
2067 vm_map_entry_link(new_map, new_map->header.prev,
2068 new_entry);
2069 if (old_entry->is_a_map) {
2070 int check;
2071
2072 check = vm_map_copy(new_map,
2073 old_entry->object.share_map,
2074 new_entry->start,
2075 (vm_size_t)(new_entry->end -
2076 new_entry->start),
2077 old_entry->offset,
2078 FALSE, FALSE);
2079 if (check != KERN_SUCCESS)
2080 printf("vm_map_fork: copy in share_map region failed\n");
2081 }
2082 else {
2083 vm_map_copy_entry(old_map, new_map, old_entry,
2084 new_entry);
2085 }
2086 break;
2087 }
2088 old_entry = old_entry->next;
2089 }
2090
2091 new_map->size = old_map->size;
2092 vm_map_unlock(old_map);
2093
2094 return(vm2);
2095}
2096
2097/*
2098 * vm_map_lookup:
2099 *
2100 * Finds the VM object, offset, and
2101 * protection for a given virtual address in the
2102 * specified map, assuming a page fault of the
2103 * type specified.
2104 *
2105 * Leaves the map in question locked for read; return
2106 * values are guaranteed until a vm_map_lookup_done
2107 * call is performed. Note that the map argument
2108 * is in/out; the returned map must be used in
2109 * the call to vm_map_lookup_done.
2110 *
2111 * A handle (out_entry) is returned for use in
2112 * vm_map_lookup_done, to make that fast.
2113 *
2114 * If a lookup is requested with "write protection"
2115 * specified, the map may be changed to perform virtual
2116 * copying operations, although the data referenced will
2117 * remain the same.
2118 */
2119vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2120 object, offset, out_prot, wired, single_use)
2121 vm_map_t *var_map; /* IN/OUT */
2122 register vm_offset_t vaddr;
2123 register vm_prot_t fault_type;
2124
2125 vm_map_entry_t *out_entry; /* OUT */
2126 vm_object_t *object; /* OUT */
2127 vm_offset_t *offset; /* OUT */
2128 vm_prot_t *out_prot; /* OUT */
2129 boolean_t *wired; /* OUT */
2130 boolean_t *single_use; /* OUT */
2131{
2132 vm_map_t share_map;
2133 vm_offset_t share_offset;
2134 register vm_map_entry_t entry;
2135 register vm_map_t map = *var_map;
2136 register vm_prot_t prot;
2137 register boolean_t su;
2138
2139 RetryLookup: ;
2140
2141 /*
2142 * Lookup the faulting address.
2143 */
2144
2145 vm_map_lock_read(map);
2146
2147#define RETURN(why) \
2148 { \
2149 vm_map_unlock_read(map); \
2150 return(why); \
2151 }
2152
2153 /*
2154 * If the map has an interesting hint, try it before calling
2155 * full blown lookup routine.
2156 */
2157
2158 simple_lock(&map->hint_lock);
2159 entry = map->hint;
2160 simple_unlock(&map->hint_lock);
2161
2162 *out_entry = entry;
2163
2164 if ((entry == &map->header) ||
2165 (vaddr < entry->start) || (vaddr >= entry->end)) {
2166 vm_map_entry_t tmp_entry;
2167
2168 /*
2169 * Entry was either not a valid hint, or the vaddr
2170 * was not contained in the entry, so do a full lookup.
2171 */
2172 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2173 RETURN(KERN_INVALID_ADDRESS);
2174
2175 entry = tmp_entry;
2176 *out_entry = entry;
2177 }
2178
2179 /*
2180 * Handle submaps.
2181 */
2182
2183 if (entry->is_sub_map) {
2184 vm_map_t old_map = map;
2185
2186 *var_map = map = entry->object.sub_map;
2187 vm_map_unlock_read(old_map);
2188 goto RetryLookup;
2189 }
2190
2191 /*
2192 * Check whether this task is allowed to have
2193 * this page.
2194 */
2195
2196 prot = entry->protection;
2197 if ((fault_type & (prot)) != fault_type)
2198 RETURN(KERN_PROTECTION_FAILURE);
2199
2200 /*
2201 * If this page is not pageable, we have to get
2202 * it for all possible accesses.
2203 */
2204
2205 if (*wired = (entry->wired_count != 0))
2206 prot = fault_type = entry->protection;
2207
2208 /*
2209 * If we don't already have a VM object, track
2210 * it down.
2211 */
2212
2213 if (su = !entry->is_a_map) {
2214 share_map = map;
2215 share_offset = vaddr;
2216 }
2217 else {
2218 vm_map_entry_t share_entry;
2219
2220 /*
2221 * Compute the sharing map, and offset into it.
2222 */
2223
2224 share_map = entry->object.share_map;
2225 share_offset = (vaddr - entry->start) + entry->offset;
2226
2227 /*
2228 * Look for the backing store object and offset
2229 */
2230
2231 vm_map_lock_read(share_map);
2232
2233 if (!vm_map_lookup_entry(share_map, share_offset,
2234 &share_entry)) {
2235 vm_map_unlock_read(share_map);
2236 RETURN(KERN_INVALID_ADDRESS);
2237 }
2238 entry = share_entry;
2239 }
2240
2241 /*
2242 * If the entry was copy-on-write, we either ...
2243 */
2244
2245 if (entry->needs_copy) {
2246 /*
2247 * If we want to write the page, we may as well
2248 * handle that now since we've got the sharing
2249 * map locked.
2250 *
2251 * If we don't need to write the page, we just
2252 * demote the permissions allowed.
2253 */
2254
2255 if (fault_type & VM_PROT_WRITE) {
2256 /*
2257 * Make a new object, and place it in the
2258 * object chain. Note that no new references
2259 * have appeared -- one just moved from the
2260 * share map to the new object.
2261 */
2262
2263 if (lock_read_to_write(&share_map->lock)) {
2264 if (share_map != map)
2265 vm_map_unlock_read(map);
2266 goto RetryLookup;
2267 }
2268
2269 vm_object_shadow(
2270 &entry->object.vm_object,
2271 &entry->offset,
2272 (vm_size_t) (entry->end - entry->start));
2273
2274 entry->needs_copy = FALSE;
2275
2276 lock_write_to_read(&share_map->lock);
2277 }
2278 else {
2279 /*
2280 * We're attempting to read a copy-on-write
2281 * page -- don't allow writes.
2282 */
2283
2284 prot &= (~VM_PROT_WRITE);
2285 }
2286 }
2287
2288 /*
2289 * Create an object if necessary.
2290 */
2291 if (entry->object.vm_object == NULL) {
2292
2293 if (lock_read_to_write(&share_map->lock)) {
2294 if (share_map != map)
2295 vm_map_unlock_read(map);
2296 goto RetryLookup;
2297 }
2298
2299 entry->object.vm_object = vm_object_allocate(
2300 (vm_size_t)(entry->end - entry->start));
2301 entry->offset = 0;
2302 lock_write_to_read(&share_map->lock);
2303 }
2304
2305 /*
2306 * Return the object/offset from this entry. If the entry
2307 * was copy-on-write or empty, it has been fixed up.
2308 */
2309
2310 *offset = (share_offset - entry->start) + entry->offset;
2311 *object = entry->object.vm_object;
2312
2313 /*
2314 * Return whether this is the only map sharing this data.
2315 */
2316
2317 if (!su) {
2318 simple_lock(&share_map->ref_lock);
2319 su = (share_map->ref_count == 1);
2320 simple_unlock(&share_map->ref_lock);
2321 }
2322
2323 *out_prot = prot;
2324 *single_use = su;
2325
2326 return(KERN_SUCCESS);
2327
2328#undef RETURN
2329}
2330
2331/*
2332 * vm_map_lookup_done:
2333 *
2334 * Releases locks acquired by a vm_map_lookup
2335 * (according to the handle returned by that lookup).
2336 */
2337
2338void vm_map_lookup_done(map, entry)
2339 register vm_map_t map;
2340 vm_map_entry_t entry;
2341{
2342 /*
2343 * If this entry references a map, unlock it first.
2344 */
2345
2346 if (entry->is_a_map)
2347 vm_map_unlock_read(entry->object.share_map);
2348
2349 /*
2350 * Unlock the main-level map
2351 */
2352
2353 vm_map_unlock_read(map);
2354}
2355
2356/*
2357 * Routine: vm_map_simplify
2358 * Purpose:
2359 * Attempt to simplify the map representation in
2360 * the vicinity of the given starting address.
2361 * Note:
2362 * This routine is intended primarily to keep the
2363 * kernel maps more compact -- they generally don't
2364 * benefit from the "expand a map entry" technology
2365 * at allocation time because the adjacent entry
2366 * is often wired down.
2367 */
2368void vm_map_simplify(map, start)
2369 vm_map_t map;
2370 vm_offset_t start;
2371{
2372 vm_map_entry_t this_entry;
2373 vm_map_entry_t prev_entry;
2374
2375 vm_map_lock(map);
2376 if (
2377 (vm_map_lookup_entry(map, start, &this_entry)) &&
2378 ((prev_entry = this_entry->prev) != &map->header) &&
2379
2380 (prev_entry->end == start) &&
2381 (map->is_main_map) &&
2382
2383 (prev_entry->is_a_map == FALSE) &&
2384 (prev_entry->is_sub_map == FALSE) &&
2385
2386 (this_entry->is_a_map == FALSE) &&
2387 (this_entry->is_sub_map == FALSE) &&
2388
2389 (prev_entry->inheritance == this_entry->inheritance) &&
2390 (prev_entry->protection == this_entry->protection) &&
2391 (prev_entry->max_protection == this_entry->max_protection) &&
2392 (prev_entry->wired_count == this_entry->wired_count) &&
2393
2394 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2395 (prev_entry->needs_copy == this_entry->needs_copy) &&
2396
2397 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2398 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2399 == this_entry->offset)
2400 ) {
2401 if (map->first_free == this_entry)
2402 map->first_free = prev_entry;
2403
2404 SAVE_HINT(map, prev_entry);
2405 vm_map_entry_unlink(map, this_entry);
2406 prev_entry->end = this_entry->end;
2407 vm_object_deallocate(this_entry->object.vm_object);
2408 vm_map_entry_dispose(map, this_entry);
2409 }
2410 vm_map_unlock(map);
2411}
2412
2413/*
2414 * vm_map_print: [ debug ]
2415 */
2416void vm_map_print(map, full)
2417 register vm_map_t map;
2418 boolean_t full;
2419{
2420 register vm_map_entry_t entry;
2421 extern int indent;
2422
2423 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2424 (map->is_main_map ? "Task" : "Share"),
2425 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2426 map->timestamp);
2427
2428 if (!full && indent)
2429 return;
2430
2431 indent += 2;
2432 for (entry = map->header.next; entry != &map->header;
2433 entry = entry->next) {
2434 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2435 (int) entry, (int) entry->start, (int) entry->end);
2436 if (map->is_main_map) {
2437 static char *inheritance_name[4] =
2438 { "share", "copy", "none", "donate_copy"};
2439 printf("prot=%x/%x/%s, ",
2440 entry->protection,
2441 entry->max_protection,
2442 inheritance_name[entry->inheritance]);
2443 if (entry->wired_count != 0)
2444 printf("wired, ");
2445 }
2446
2447 if (entry->is_a_map || entry->is_sub_map) {
2448 printf("share=0x%x, offset=0x%x\n",
2449 (int) entry->object.share_map,
2450 (int) entry->offset);
2451 if ((entry->prev == &map->header) ||
2452 (!entry->prev->is_a_map) ||
2453 (entry->prev->object.share_map !=
2454 entry->object.share_map)) {
2455 indent += 2;
2456 vm_map_print(entry->object.share_map, full);
2457 indent -= 2;
2458 }
2459
2460 }
2461 else {
2462 printf("object=0x%x, offset=0x%x",
2463 (int) entry->object.vm_object,
2464 (int) entry->offset);
2465 if (entry->copy_on_write)
2466 printf(", copy (%s)",
2467 entry->needs_copy ? "needed" : "done");
2468 printf("\n");
2469
2470 if ((entry->prev == &map->header) ||
2471 (entry->prev->is_a_map) ||
2472 (entry->prev->object.vm_object !=
2473 entry->object.vm_object)) {
2474 indent += 2;
2475 vm_object_print(entry->object.vm_object, full);
2476 indent -= 2;
2477 }
2478 }
2479 }
2480 indent -= 2;
2481}