Added -lgcc_pic back again.
[unix-history] / sys / vm / vm_map.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777
RG
36 * from: @(#)vm_map.c 7.3 (Berkeley) 4/21/91
37 * $Id$
38 */
39
40/*
15637ed4
RG
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
15637ed4
RG
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
d55b6cf4 71#include "ddb.h"
15637ed4 72#include "param.h"
dd18dc33 73#include "systm.h"
15637ed4
RG
74#include "malloc.h"
75#include "vm.h"
76#include "vm_page.h"
77#include "vm_object.h"
78
79/*
80 * Virtual memory maps provide for the mapping, protection,
81 * and sharing of virtual memory objects. In addition,
82 * this module provides for an efficient virtual copy of
83 * memory from one map to another.
84 *
85 * Synchronization is required prior to most operations.
86 *
87 * Maps consist of an ordered doubly-linked list of simple
88 * entries; a single hint is used to speed up lookups.
89 *
90 * In order to properly represent the sharing of virtual
91 * memory regions among maps, the map structure is bi-level.
92 * Top-level ("address") maps refer to regions of sharable
93 * virtual memory. These regions are implemented as
94 * ("sharing") maps, which then refer to the actual virtual
95 * memory objects. When two address maps "share" memory,
96 * their top-level maps both have references to the same
97 * sharing map. When memory is virtual-copied from one
98 * address map to another, the references in the sharing
99 * maps are actually copied -- no copying occurs at the
100 * virtual memory object level.
101 *
102 * Since portions of maps are specified by start/end addreses,
103 * which may not align with existing map entries, all
104 * routines merely "clip" entries to these start/end values.
105 * [That is, an entry is split into two, bordering at a
106 * start or end value.] Note that these clippings may not
107 * always be necessary (as the two resulting entries are then
108 * not changed); however, the clipping is done for convenience.
109 * No attempt is currently made to "glue back together" two
110 * abutting entries.
111 *
112 * As mentioned above, virtual copy operations are performed
113 * by copying VM object references from one sharing map to
114 * another, and then marking both regions as copy-on-write.
115 * It is important to note that only one writeable reference
116 * to a VM object region exists in any map -- this means that
117 * shadow object creation can be delayed until a write operation
118 * occurs.
119 */
120
121/*
122 * vm_map_startup:
123 *
124 * Initialize the vm_map module. Must be called before
125 * any other vm_map routines.
126 *
127 * Map and entry structures are allocated from the general
128 * purpose memory pool with some exceptions:
129 *
130 * - The kernel map and kmem submap are allocated statically.
131 * - Kernel map entries are allocated out of a static pool.
132 *
133 * These restrictions are necessary since malloc() uses the
134 * maps and requires map entries.
135 */
136
137vm_offset_t kentry_data;
138vm_size_t kentry_data_size;
139vm_map_entry_t kentry_free;
140vm_map_t kmap_free;
141
142void vm_map_startup()
143{
144 register int i;
145 register vm_map_entry_t mep;
146 vm_map_t mp;
147
148 /*
149 * Static map structures for allocation before initialization of
150 * kernel map or kmem map. vm_map_create knows how to deal with them.
151 */
152 kmap_free = mp = (vm_map_t) kentry_data;
153 i = MAX_KMAP;
154 while (--i > 0) {
155 mp->header.next = (vm_map_entry_t) (mp + 1);
156 mp++;
157 }
158 mp++->header.next = NULL;
159
160 /*
161 * Form a free list of statically allocated kernel map entries
162 * with the rest.
163 */
164 kentry_free = mep = (vm_map_entry_t) mp;
165 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
166 while (--i > 0) {
167 mep->next = mep + 1;
168 mep++;
169 }
170 mep->next = NULL;
171}
172
173/*
174 * Allocate a vmspace structure, including a vm_map and pmap,
175 * and initialize those structures. The refcnt is set to 1.
176 * The remaining fields must be initialized by the caller.
177 */
178struct vmspace *
179vmspace_alloc(min, max, pageable)
180 vm_offset_t min, max;
181 int pageable;
182{
183 register struct vmspace *vm;
184
185 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
186 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
187 vm_map_init(&vm->vm_map, min, max, pageable);
188 pmap_pinit(&vm->vm_pmap);
189 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
190 vm->vm_refcnt = 1;
191 return (vm);
192}
193
194void
195vmspace_free(vm)
196 register struct vmspace *vm;
197{
198
199 if (--vm->vm_refcnt == 0) {
200 /*
201 * Lock the map, to wait out all other references to it.
202 * Delete all of the mappings and pages they hold,
203 * then call the pmap module to reclaim anything left.
204 */
205 vm_map_lock(&vm->vm_map);
206 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
207 vm->vm_map.max_offset);
208 pmap_release(&vm->vm_pmap);
209 FREE(vm, M_VMMAP);
210 }
211}
212
213/*
214 * vm_map_create:
215 *
216 * Creates and returns a new empty VM map with
217 * the given physical map structure, and having
218 * the given lower and upper address bounds.
219 */
220vm_map_t vm_map_create(pmap, min, max, pageable)
221 pmap_t pmap;
222 vm_offset_t min, max;
223 boolean_t pageable;
224{
225 register vm_map_t result;
226 extern vm_map_t kernel_map, kmem_map;
227
228 if (kmem_map == NULL) {
229 result = kmap_free;
230 kmap_free = (vm_map_t) result->header.next;
231 if (result == NULL)
232 panic("vm_map_create: out of maps");
233 } else
234 MALLOC(result, vm_map_t, sizeof(struct vm_map),
235 M_VMMAP, M_WAITOK);
236
237 vm_map_init(result, min, max, pageable);
238 result->pmap = pmap;
239 return(result);
240}
241
242/*
243 * Initialize an existing vm_map structure
244 * such as that in the vmspace structure.
245 * The pmap is set elsewhere.
246 */
247void
248vm_map_init(map, min, max, pageable)
249 register struct vm_map *map;
250 vm_offset_t min, max;
251 boolean_t pageable;
252{
253 map->header.next = map->header.prev = &map->header;
254 map->nentries = 0;
255 map->size = 0;
256 map->ref_count = 1;
257 map->is_main_map = TRUE;
258 map->min_offset = min;
259 map->max_offset = max;
260 map->entries_pageable = pageable;
261 map->first_free = &map->header;
262 map->hint = &map->header;
263 map->timestamp = 0;
264 lock_init(&map->lock, TRUE);
265 simple_lock_init(&map->ref_lock);
266 simple_lock_init(&map->hint_lock);
267}
268
269/*
270 * vm_map_entry_create: [ internal use only ]
271 *
272 * Allocates a VM map entry for insertion.
273 * No entry fields are filled in. This routine is
274 */
275vm_map_entry_t vm_map_entry_create(map)
276 vm_map_t map;
277{
278 vm_map_entry_t entry;
279 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
280
281 if (map == kernel_map || map == kmem_map || map == mb_map
282 || map == buffer_map || map == pager_map) {
283 if (entry = kentry_free)
284 kentry_free = kentry_free->next;
285 } else
286 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
287 M_VMMAPENT, M_WAITOK);
288 if (entry == NULL)
289 panic("vm_map_entry_create: out of map entries");
290
291 return(entry);
292}
293
294/*
295 * vm_map_entry_dispose: [ internal use only ]
296 *
297 * Inverse of vm_map_entry_create.
298 */
299void vm_map_entry_dispose(map, entry)
300 vm_map_t map;
301 vm_map_entry_t entry;
302{
303 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
304
305 if (map == kernel_map || map == kmem_map || map == mb_map
306 || map == buffer_map || map == pager_map) {
307 entry->next = kentry_free;
308 kentry_free = entry;
309 } else
310 FREE(entry, M_VMMAPENT);
311}
312
313/*
314 * vm_map_entry_{un,}link:
315 *
316 * Insert/remove entries from maps.
317 */
318#define vm_map_entry_link(map, after_where, entry) \
319 { \
320 (map)->nentries++; \
321 (entry)->prev = (after_where); \
322 (entry)->next = (after_where)->next; \
323 (entry)->prev->next = (entry); \
324 (entry)->next->prev = (entry); \
325 }
326#define vm_map_entry_unlink(map, entry) \
327 { \
328 (map)->nentries--; \
329 (entry)->next->prev = (entry)->prev; \
330 (entry)->prev->next = (entry)->next; \
331 }
332
333/*
334 * vm_map_reference:
335 *
336 * Creates another valid reference to the given map.
337 *
338 */
339void vm_map_reference(map)
340 register vm_map_t map;
341{
342 if (map == NULL)
343 return;
344
345 simple_lock(&map->ref_lock);
346 map->ref_count++;
347 simple_unlock(&map->ref_lock);
348}
349
350/*
351 * vm_map_deallocate:
352 *
353 * Removes a reference from the specified map,
354 * destroying it if no references remain.
355 * The map should not be locked.
356 */
357void vm_map_deallocate(map)
358 register vm_map_t map;
359{
360 register int c;
361
362 if (map == NULL)
363 return;
364
365 simple_lock(&map->ref_lock);
366 c = --map->ref_count;
367 simple_unlock(&map->ref_lock);
368
369 if (c > 0) {
370 return;
371 }
372
373 /*
374 * Lock the map, to wait out all other references
375 * to it.
376 */
377
378 vm_map_lock(map);
379
380 (void) vm_map_delete(map, map->min_offset, map->max_offset);
381
382 pmap_destroy(map->pmap);
383
384 FREE(map, M_VMMAP);
385}
386
387/*
388 * vm_map_insert: [ internal use only ]
389 *
390 * Inserts the given whole VM object into the target
391 * map at the specified address range. The object's
392 * size should match that of the address range.
393 *
394 * Requires that the map be locked, and leaves it so.
395 */
396vm_map_insert(map, object, offset, start, end)
397 vm_map_t map;
398 vm_object_t object;
399 vm_offset_t offset;
400 vm_offset_t start;
401 vm_offset_t end;
402{
403 register vm_map_entry_t new_entry;
404 register vm_map_entry_t prev_entry;
405 vm_map_entry_t temp_entry;
406
407 /*
408 * Check that the start and end points are not bogus.
409 */
410
411 if ((start < map->min_offset) || (end > map->max_offset) ||
412 (start >= end))
413 return(KERN_INVALID_ADDRESS);
414
415 /*
416 * Find the entry prior to the proposed
417 * starting address; if it's part of an
418 * existing entry, this range is bogus.
419 */
420
421 if (vm_map_lookup_entry(map, start, &temp_entry))
422 return(KERN_NO_SPACE);
423
424 prev_entry = temp_entry;
425
426 /*
427 * Assert that the next entry doesn't overlap the
428 * end point.
429 */
430
431 if ((prev_entry->next != &map->header) &&
432 (prev_entry->next->start < end))
433 return(KERN_NO_SPACE);
434
435 /*
436 * See if we can avoid creating a new entry by
437 * extending one of our neighbors.
438 */
439
440 if (object == NULL) {
441 if ((prev_entry != &map->header) &&
442 (prev_entry->end == start) &&
443 (map->is_main_map) &&
444 (prev_entry->is_a_map == FALSE) &&
445 (prev_entry->is_sub_map == FALSE) &&
446 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
447 (prev_entry->protection == VM_PROT_DEFAULT) &&
448 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
449 (prev_entry->wired_count == 0)) {
450
451 if (vm_object_coalesce(prev_entry->object.vm_object,
452 NULL,
453 prev_entry->offset,
454 (vm_offset_t) 0,
455 (vm_size_t)(prev_entry->end
456 - prev_entry->start),
457 (vm_size_t)(end - prev_entry->end))) {
458 /*
459 * Coalesced the two objects - can extend
460 * the previous map entry to include the
461 * new range.
462 */
463 map->size += (end - prev_entry->end);
464 prev_entry->end = end;
465 return(KERN_SUCCESS);
466 }
467 }
468 }
469
470 /*
471 * Create a new entry
472 */
473
474 new_entry = vm_map_entry_create(map);
475 new_entry->start = start;
476 new_entry->end = end;
477
478 new_entry->is_a_map = FALSE;
479 new_entry->is_sub_map = FALSE;
480 new_entry->object.vm_object = object;
481 new_entry->offset = offset;
482
483 new_entry->copy_on_write = FALSE;
484 new_entry->needs_copy = FALSE;
485
486 if (map->is_main_map) {
487 new_entry->inheritance = VM_INHERIT_DEFAULT;
488 new_entry->protection = VM_PROT_DEFAULT;
489 new_entry->max_protection = VM_PROT_DEFAULT;
490 new_entry->wired_count = 0;
491 }
492
493 /*
494 * Insert the new entry into the list
495 */
496
497 vm_map_entry_link(map, prev_entry, new_entry);
498 map->size += new_entry->end - new_entry->start;
499
500 /*
501 * Update the free space hint
502 */
503
504 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
505 map->first_free = new_entry;
506
507 return(KERN_SUCCESS);
508}
509
510/*
511 * SAVE_HINT:
512 *
513 * Saves the specified entry as the hint for
514 * future lookups. Performs necessary interlocks.
515 */
516#define SAVE_HINT(map,value) \
517 simple_lock(&(map)->hint_lock); \
518 (map)->hint = (value); \
519 simple_unlock(&(map)->hint_lock);
520
521/*
522 * vm_map_lookup_entry: [ internal use only ]
523 *
524 * Finds the map entry containing (or
525 * immediately preceding) the specified address
526 * in the given map; the entry is returned
527 * in the "entry" parameter. The boolean
528 * result indicates whether the address is
529 * actually contained in the map.
530 */
531boolean_t vm_map_lookup_entry(map, address, entry)
532 register vm_map_t map;
533 register vm_offset_t address;
534 vm_map_entry_t *entry; /* OUT */
535{
536 register vm_map_entry_t cur;
537 register vm_map_entry_t last;
538
539 /*
540 * Start looking either from the head of the
541 * list, or from the hint.
542 */
543
544 simple_lock(&map->hint_lock);
545 cur = map->hint;
546 simple_unlock(&map->hint_lock);
547
548 if (cur == &map->header)
549 cur = cur->next;
550
551 if (address >= cur->start) {
552 /*
553 * Go from hint to end of list.
554 *
555 * But first, make a quick check to see if
556 * we are already looking at the entry we
557 * want (which is usually the case).
558 * Note also that we don't need to save the hint
559 * here... it is the same hint (unless we are
560 * at the header, in which case the hint didn't
561 * buy us anything anyway).
562 */
563 last = &map->header;
564 if ((cur != last) && (cur->end > address)) {
565 *entry = cur;
566 return(TRUE);
567 }
568 }
569 else {
570 /*
571 * Go from start to hint, *inclusively*
572 */
573 last = cur->next;
574 cur = map->header.next;
575 }
576
577 /*
578 * Search linearly
579 */
580
581 while (cur != last) {
582 if (cur->end > address) {
583 if (address >= cur->start) {
584 /*
585 * Save this lookup for future
586 * hints, and return
587 */
588
589 *entry = cur;
590 SAVE_HINT(map, cur);
591 return(TRUE);
592 }
593 break;
594 }
595 cur = cur->next;
596 }
597 *entry = cur->prev;
598 SAVE_HINT(map, *entry);
599 return(FALSE);
600}
601
602/*
603 * vm_map_find finds an unallocated region in the target address
604 * map with the given length. The search is defined to be
605 * first-fit from the specified address; the region found is
606 * returned in the same parameter.
607 *
608 */
609vm_map_find(map, object, offset, addr, length, find_space)
610 vm_map_t map;
611 vm_object_t object;
612 vm_offset_t offset;
613 vm_offset_t *addr; /* IN/OUT */
614 vm_size_t length;
615 boolean_t find_space;
616{
617 register vm_map_entry_t entry;
618 register vm_offset_t start;
619 register vm_offset_t end;
620 int result;
621
622 start = *addr;
623
624 vm_map_lock(map);
625
626 if (find_space) {
627 /*
628 * Calculate the first possible address.
629 */
630
631 if (start < map->min_offset)
632 start = map->min_offset;
633 if (start > map->max_offset) {
634 vm_map_unlock(map);
635 return (KERN_NO_SPACE);
636 }
637
638 /*
639 * Look for the first possible address;
640 * if there's already something at this
641 * address, we have to start after it.
642 */
643
644 if (start == map->min_offset) {
645 if ((entry = map->first_free) != &map->header)
646 start = entry->end;
647 } else {
648 vm_map_entry_t tmp_entry;
649 if (vm_map_lookup_entry(map, start, &tmp_entry))
650 start = tmp_entry->end;
651 entry = tmp_entry;
652 }
653
654 /*
655 * In any case, the "entry" always precedes
656 * the proposed new region throughout the
657 * loop:
658 */
659
660 while (TRUE) {
661 register vm_map_entry_t next;
662
663 /*
664 * Find the end of the proposed new region.
665 * Be sure we didn't go beyond the end, or
666 * wrap around the address.
667 */
668
669 end = start + length;
670
671 if ((end > map->max_offset) || (end < start)) {
672 vm_map_unlock(map);
673 return (KERN_NO_SPACE);
674 }
675
676 /*
677 * If there are no more entries, we must win.
678 */
679
680 next = entry->next;
681 if (next == &map->header)
682 break;
683
684 /*
685 * If there is another entry, it must be
686 * after the end of the potential new region.
687 */
688
689 if (next->start >= end)
690 break;
691
692 /*
693 * Didn't fit -- move to the next entry.
694 */
695
696 entry = next;
697 start = entry->end;
698 }
699 *addr = start;
700
701 SAVE_HINT(map, entry);
702 }
703
704 result = vm_map_insert(map, object, offset, start, start + length);
705
706 vm_map_unlock(map);
707 return(result);
708}
709
710/*
711 * vm_map_simplify_entry: [ internal use only ]
712 *
713 * Simplify the given map entry by:
714 * removing extra sharing maps
715 * [XXX maybe later] merging with a neighbor
716 */
717void vm_map_simplify_entry(map, entry)
718 vm_map_t map;
719 vm_map_entry_t entry;
720{
721#ifdef lint
722 map++;
723#endif lint
724
725 /*
726 * If this entry corresponds to a sharing map, then
727 * see if we can remove the level of indirection.
728 * If it's not a sharing map, then it points to
729 * a VM object, so see if we can merge with either
730 * of our neighbors.
731 */
732
733 if (entry->is_sub_map)
734 return;
735 if (entry->is_a_map) {
736#if 0
737 vm_map_t my_share_map;
738 int count;
739
740 my_share_map = entry->object.share_map;
741 simple_lock(&my_share_map->ref_lock);
742 count = my_share_map->ref_count;
743 simple_unlock(&my_share_map->ref_lock);
744
745 if (count == 1) {
746 /* Can move the region from
747 * entry->start to entry->end (+ entry->offset)
748 * in my_share_map into place of entry.
749 * Later.
750 */
751 }
752#endif 0
753 }
754 else {
755 /*
756 * Try to merge with our neighbors.
757 *
758 * Conditions for merge are:
759 *
760 * 1. entries are adjacent.
761 * 2. both entries point to objects
762 * with null pagers.
763 *
764 * If a merge is possible, we replace the two
765 * entries with a single entry, then merge
766 * the two objects into a single object.
767 *
768 * Now, all that is left to do is write the
769 * code!
770 */
771 }
772}
773
774/*
775 * vm_map_clip_start: [ internal use only ]
776 *
777 * Asserts that the given entry begins at or after
778 * the specified address; if necessary,
779 * it splits the entry into two.
780 */
781#define vm_map_clip_start(map, entry, startaddr) \
782{ \
783 if (startaddr > entry->start) \
784 _vm_map_clip_start(map, entry, startaddr); \
785}
786
787/*
788 * This routine is called only when it is known that
789 * the entry must be split.
790 */
791void _vm_map_clip_start(map, entry, start)
792 register vm_map_t map;
793 register vm_map_entry_t entry;
794 register vm_offset_t start;
795{
796 register vm_map_entry_t new_entry;
797
798 /*
799 * See if we can simplify this entry first
800 */
801
802 vm_map_simplify_entry(map, entry);
803
804 /*
805 * Split off the front portion --
806 * note that we must insert the new
807 * entry BEFORE this one, so that
808 * this entry has the specified starting
809 * address.
810 */
811
812 new_entry = vm_map_entry_create(map);
813 *new_entry = *entry;
814
815 new_entry->end = start;
816 entry->offset += (start - entry->start);
817 entry->start = start;
818
819 vm_map_entry_link(map, entry->prev, new_entry);
820
821 if (entry->is_a_map || entry->is_sub_map)
822 vm_map_reference(new_entry->object.share_map);
823 else
824 vm_object_reference(new_entry->object.vm_object);
825}
826
827/*
828 * vm_map_clip_end: [ internal use only ]
829 *
830 * Asserts that the given entry ends at or before
831 * the specified address; if necessary,
832 * it splits the entry into two.
833 */
834
835void _vm_map_clip_end();
836#define vm_map_clip_end(map, entry, endaddr) \
837{ \
838 if (endaddr < entry->end) \
839 _vm_map_clip_end(map, entry, endaddr); \
840}
841
842/*
843 * This routine is called only when it is known that
844 * the entry must be split.
845 */
846void _vm_map_clip_end(map, entry, end)
847 register vm_map_t map;
848 register vm_map_entry_t entry;
849 register vm_offset_t end;
850{
851 register vm_map_entry_t new_entry;
852
853 /*
854 * Create a new entry and insert it
855 * AFTER the specified entry
856 */
857
858 new_entry = vm_map_entry_create(map);
859 *new_entry = *entry;
860
861 new_entry->start = entry->end = end;
862 new_entry->offset += (end - entry->start);
863
864 vm_map_entry_link(map, entry, new_entry);
865
866 if (entry->is_a_map || entry->is_sub_map)
867 vm_map_reference(new_entry->object.share_map);
868 else
869 vm_object_reference(new_entry->object.vm_object);
870}
871
872/*
873 * VM_MAP_RANGE_CHECK: [ internal use only ]
874 *
875 * Asserts that the starting and ending region
876 * addresses fall within the valid range of the map.
877 */
878#define VM_MAP_RANGE_CHECK(map, start, end) \
879 { \
880 if (start < vm_map_min(map)) \
881 start = vm_map_min(map); \
882 if (end > vm_map_max(map)) \
883 end = vm_map_max(map); \
884 if (start > end) \
885 start = end; \
886 }
887
888/*
889 * vm_map_submap: [ kernel use only ]
890 *
891 * Mark the given range as handled by a subordinate map.
892 *
893 * This range must have been created with vm_map_find,
894 * and no other operations may have been performed on this
895 * range prior to calling vm_map_submap.
896 *
897 * Only a limited number of operations can be performed
898 * within this rage after calling vm_map_submap:
899 * vm_fault
900 * [Don't try vm_map_copy!]
901 *
902 * To remove a submapping, one must first remove the
903 * range from the superior map, and then destroy the
904 * submap (if desired). [Better yet, don't try it.]
905 */
906vm_map_submap(map, start, end, submap)
907 register vm_map_t map;
908 register vm_offset_t start;
909 register vm_offset_t end;
910 vm_map_t submap;
911{
912 vm_map_entry_t entry;
913 register int result = KERN_INVALID_ARGUMENT;
914
915 vm_map_lock(map);
916
917 VM_MAP_RANGE_CHECK(map, start, end);
918
919 if (vm_map_lookup_entry(map, start, &entry)) {
920 vm_map_clip_start(map, entry, start);
921 }
922 else
923 entry = entry->next;
924
925 vm_map_clip_end(map, entry, end);
926
927 if ((entry->start == start) && (entry->end == end) &&
928 (!entry->is_a_map) &&
929 (entry->object.vm_object == NULL) &&
930 (!entry->copy_on_write)) {
931 entry->is_a_map = FALSE;
932 entry->is_sub_map = TRUE;
933 vm_map_reference(entry->object.sub_map = submap);
934 result = KERN_SUCCESS;
935 }
936 vm_map_unlock(map);
937
938 return(result);
939}
940
941/*
942 * vm_map_protect:
943 *
944 * Sets the protection of the specified address
945 * region in the target map. If "set_max" is
946 * specified, the maximum protection is to be set;
947 * otherwise, only the current protection is affected.
948 */
949vm_map_protect(map, start, end, new_prot, set_max)
950 register vm_map_t map;
951 register vm_offset_t start;
952 register vm_offset_t end;
953 register vm_prot_t new_prot;
954 register boolean_t set_max;
955{
956 register vm_map_entry_t current;
957 vm_map_entry_t entry;
958
959 vm_map_lock(map);
960
961 VM_MAP_RANGE_CHECK(map, start, end);
962
963 if (vm_map_lookup_entry(map, start, &entry)) {
964 vm_map_clip_start(map, entry, start);
965 }
966 else
967 entry = entry->next;
968
969 /*
970 * Make a first pass to check for protection
971 * violations.
972 */
973
974 current = entry;
975 while ((current != &map->header) && (current->start < end)) {
976 if (current->is_sub_map)
977 return(KERN_INVALID_ARGUMENT);
978 if ((new_prot & current->max_protection) != new_prot) {
979 vm_map_unlock(map);
980 return(KERN_PROTECTION_FAILURE);
981 }
982
983 current = current->next;
984 }
985
986 /*
987 * Go back and fix up protections.
988 * [Note that clipping is not necessary the second time.]
989 */
990
991 current = entry;
992
993 while ((current != &map->header) && (current->start < end)) {
994 vm_prot_t old_prot;
995
996 vm_map_clip_end(map, current, end);
997
998 old_prot = current->protection;
999 if (set_max)
1000 current->protection =
1001 (current->max_protection = new_prot) &
1002 old_prot;
1003 else
1004 current->protection = new_prot;
1005
1006 /*
1007 * Update physical map if necessary.
1008 * Worry about copy-on-write here -- CHECK THIS XXX
1009 */
1010
1011 if (current->protection != old_prot) {
1012
1013#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1014 VM_PROT_ALL)
1015#define max(a,b) ((a) > (b) ? (a) : (b))
1016
1017 if (current->is_a_map) {
1018 vm_map_entry_t share_entry;
1019 vm_offset_t share_end;
1020
1021 vm_map_lock(current->object.share_map);
1022 (void) vm_map_lookup_entry(
1023 current->object.share_map,
1024 current->offset,
1025 &share_entry);
1026 share_end = current->offset +
1027 (current->end - current->start);
1028 while ((share_entry !=
1029 &current->object.share_map->header) &&
1030 (share_entry->start < share_end)) {
1031
1032 pmap_protect(map->pmap,
1033 (max(share_entry->start,
1034 current->offset) -
1035 current->offset +
1036 current->start),
1037 min(share_entry->end,
1038 share_end) -
1039 current->offset +
1040 current->start,
1041 current->protection &
1042 MASK(share_entry));
1043
1044 share_entry = share_entry->next;
1045 }
1046 vm_map_unlock(current->object.share_map);
1047 }
1048 else
1049 pmap_protect(map->pmap, current->start,
1050 current->end,
1051 current->protection & MASK(entry));
1052#undef max
1053#undef MASK
1054 }
1055 current = current->next;
1056 }
1057
1058 vm_map_unlock(map);
1059 return(KERN_SUCCESS);
1060}
1061
1062/*
1063 * vm_map_inherit:
1064 *
1065 * Sets the inheritance of the specified address
1066 * range in the target map. Inheritance
1067 * affects how the map will be shared with
1068 * child maps at the time of vm_map_fork.
1069 */
1070vm_map_inherit(map, start, end, new_inheritance)
1071 register vm_map_t map;
1072 register vm_offset_t start;
1073 register vm_offset_t end;
1074 register vm_inherit_t new_inheritance;
1075{
1076 register vm_map_entry_t entry;
1077 vm_map_entry_t temp_entry;
1078
1079 switch (new_inheritance) {
1080 case VM_INHERIT_NONE:
1081 case VM_INHERIT_COPY:
1082 case VM_INHERIT_SHARE:
1083 break;
1084 default:
1085 return(KERN_INVALID_ARGUMENT);
1086 }
1087
1088 vm_map_lock(map);
1089
1090 VM_MAP_RANGE_CHECK(map, start, end);
1091
1092 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1093 entry = temp_entry;
1094 vm_map_clip_start(map, entry, start);
1095 }
1096 else
1097 entry = temp_entry->next;
1098
1099 while ((entry != &map->header) && (entry->start < end)) {
1100 vm_map_clip_end(map, entry, end);
1101
1102 entry->inheritance = new_inheritance;
1103
1104 entry = entry->next;
1105 }
1106
1107 vm_map_unlock(map);
1108 return(KERN_SUCCESS);
1109}
1110
1111/*
1112 * vm_map_pageable:
1113 *
1114 * Sets the pageability of the specified address
1115 * range in the target map. Regions specified
1116 * as not pageable require locked-down physical
1117 * memory and physical page maps.
1118 *
1119 * The map must not be locked, but a reference
1120 * must remain to the map throughout the call.
1121 */
1122vm_map_pageable(map, start, end, new_pageable)
1123 register vm_map_t map;
1124 register vm_offset_t start;
1125 register vm_offset_t end;
1126 register boolean_t new_pageable;
1127{
1128 register vm_map_entry_t entry;
1129 vm_map_entry_t temp_entry;
1130
1131 vm_map_lock(map);
1132
1133 VM_MAP_RANGE_CHECK(map, start, end);
1134
1135 /*
1136 * Only one pageability change may take place at one
1137 * time, since vm_fault assumes it will be called
1138 * only once for each wiring/unwiring. Therefore, we
1139 * have to make sure we're actually changing the pageability
1140 * for the entire region. We do so before making any changes.
1141 */
1142
1143 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1144 entry = temp_entry;
1145 vm_map_clip_start(map, entry, start);
1146 }
1147 else
1148 entry = temp_entry->next;
1149 temp_entry = entry;
1150
1151 /*
1152 * Actions are rather different for wiring and unwiring,
1153 * so we have two separate cases.
1154 */
1155
1156 if (new_pageable) {
1157
1158 /*
1159 * Unwiring. First ensure that the range to be
1160 * unwired is really wired down.
1161 */
1162 while ((entry != &map->header) && (entry->start < end)) {
1163
1164 if (entry->wired_count == 0) {
1165 vm_map_unlock(map);
1166 return(KERN_INVALID_ARGUMENT);
1167 }
1168 entry = entry->next;
1169 }
1170
1171 /*
1172 * Now decrement the wiring count for each region.
1173 * If a region becomes completely unwired,
1174 * unwire its physical pages and mappings.
1175 */
1176 lock_set_recursive(&map->lock);
1177
1178 entry = temp_entry;
1179 while ((entry != &map->header) && (entry->start < end)) {
1180 vm_map_clip_end(map, entry, end);
1181
1182 entry->wired_count--;
1183 if (entry->wired_count == 0)
1184 vm_fault_unwire(map, entry->start, entry->end);
1185
1186 entry = entry->next;
1187 }
1188 lock_clear_recursive(&map->lock);
1189 }
1190
1191 else {
1192 /*
1193 * Wiring. We must do this in two passes:
1194 *
1195 * 1. Holding the write lock, we increment the
1196 * wiring count. For any area that is not already
1197 * wired, we create any shadow objects that need
1198 * to be created.
1199 *
1200 * 2. We downgrade to a read lock, and call
1201 * vm_fault_wire to fault in the pages for any
1202 * newly wired area (wired_count is 1).
1203 *
1204 * Downgrading to a read lock for vm_fault_wire avoids
1205 * a possible deadlock with another thread that may have
1206 * faulted on one of the pages to be wired (it would mark
1207 * the page busy, blocking us, then in turn block on the
1208 * map lock that we hold). Because of problems in the
1209 * recursive lock package, we cannot upgrade to a write
1210 * lock in vm_map_lookup. Thus, any actions that require
1211 * the write lock must be done beforehand. Because we
1212 * keep the read lock on the map, the copy-on-write status
1213 * of the entries we modify here cannot change.
1214 */
1215
1216 /*
1217 * Pass 1.
1218 */
1219 entry = temp_entry;
1220 while ((entry != &map->header) && (entry->start < end)) {
1221 vm_map_clip_end(map, entry, end);
1222
1223 entry->wired_count++;
1224 if (entry->wired_count == 1) {
1225
1226 /*
1227 * Perform actions of vm_map_lookup that need
1228 * the write lock on the map: create a shadow
1229 * object for a copy-on-write region, or an
1230 * object for a zero-fill region.
1231 *
1232 * We don't have to do this for entries that
1233 * point to sharing maps, because we won't hold
1234 * the lock on the sharing map.
1235 */
1236 if (!entry->is_a_map) {
1237 if (entry->needs_copy &&
1238 ((entry->protection & VM_PROT_WRITE) != 0)) {
1239
1240 vm_object_shadow(&entry->object.vm_object,
1241 &entry->offset,
1242 (vm_size_t)(entry->end
1243 - entry->start));
1244 entry->needs_copy = FALSE;
1245 }
1246 else if (entry->object.vm_object == NULL) {
1247 entry->object.vm_object =
1248 vm_object_allocate((vm_size_t)(entry->end
1249 - entry->start));
1250 entry->offset = (vm_offset_t)0;
1251 }
1252 }
1253 }
1254
1255 entry = entry->next;
1256 }
1257
1258 /*
1259 * Pass 2.
1260 */
1261
1262 /*
1263 * HACK HACK HACK HACK
1264 *
1265 * If we are wiring in the kernel map or a submap of it,
1266 * unlock the map to avoid deadlocks. We trust that the
1267 * kernel threads are well-behaved, and therefore will
1268 * not do anything destructive to this region of the map
1269 * while we have it unlocked. We cannot trust user threads
1270 * to do the same.
1271 *
1272 * HACK HACK HACK HACK
1273 */
1274 if (vm_map_pmap(map) == kernel_pmap) {
1275 vm_map_unlock(map); /* trust me ... */
1276 }
1277 else {
1278 lock_set_recursive(&map->lock);
1279 lock_write_to_read(&map->lock);
1280 }
1281
1282 entry = temp_entry;
1283 while (entry != &map->header && entry->start < end) {
b56d9a08
DG
1284 if (entry->wired_count == 1) {
1285 vm_fault_wire(map, entry->start, entry->end);
15637ed4
RG
1286 }
1287 entry = entry->next;
1288 }
1289
1290 if (vm_map_pmap(map) == kernel_pmap) {
1291 vm_map_lock(map);
1292 }
1293 else {
1294 lock_clear_recursive(&map->lock);
1295 }
1296 }
1297
1298 vm_map_unlock(map);
1299
1300 return(KERN_SUCCESS);
1301}
1302
1303/*
1304 * vm_map_entry_unwire: [ internal use only ]
1305 *
1306 * Make the region specified by this entry pageable.
1307 *
1308 * The map in question should be locked.
1309 * [This is the reason for this routine's existence.]
1310 */
1311void vm_map_entry_unwire(map, entry)
1312 vm_map_t map;
1313 register vm_map_entry_t entry;
1314{
1315 vm_fault_unwire(map, entry->start, entry->end);
1316 entry->wired_count = 0;
1317}
1318
1319/*
1320 * vm_map_entry_delete: [ internal use only ]
1321 *
1322 * Deallocate the given entry from the target map.
1323 */
1324void vm_map_entry_delete(map, entry)
1325 register vm_map_t map;
1326 register vm_map_entry_t entry;
1327{
1328 if (entry->wired_count != 0)
1329 vm_map_entry_unwire(map, entry);
1330
1331 vm_map_entry_unlink(map, entry);
1332 map->size -= entry->end - entry->start;
1333
1334 if (entry->is_a_map || entry->is_sub_map)
1335 vm_map_deallocate(entry->object.share_map);
1336 else
1337 vm_object_deallocate(entry->object.vm_object);
1338
1339 vm_map_entry_dispose(map, entry);
1340}
1341
1342/*
1343 * vm_map_delete: [ internal use only ]
1344 *
1345 * Deallocates the given address range from the target
1346 * map.
1347 *
1348 * When called with a sharing map, removes pages from
1349 * that region from all physical maps.
1350 */
1351vm_map_delete(map, start, end)
1352 register vm_map_t map;
1353 vm_offset_t start;
1354 register vm_offset_t end;
1355{
1356 register vm_map_entry_t entry;
1357 vm_map_entry_t first_entry;
1358
1359 /*
1360 * Find the start of the region, and clip it
1361 */
1362
1363 if (!vm_map_lookup_entry(map, start, &first_entry))
1364 entry = first_entry->next;
1365 else {
1366 entry = first_entry;
1367 vm_map_clip_start(map, entry, start);
1368
1369 /*
1370 * Fix the lookup hint now, rather than each
1371 * time though the loop.
1372 */
1373
1374 SAVE_HINT(map, entry->prev);
1375 }
1376
1377 /*
1378 * Save the free space hint
1379 */
1380
1381 if (map->first_free->start >= start)
1382 map->first_free = entry->prev;
1383
1384 /*
1385 * Step through all entries in this region
1386 */
1387
1388 while ((entry != &map->header) && (entry->start < end)) {
1389 vm_map_entry_t next;
1390 register vm_offset_t s, e;
1391 register vm_object_t object;
1392
1393 vm_map_clip_end(map, entry, end);
1394
1395 next = entry->next;
1396 s = entry->start;
1397 e = entry->end;
1398
1399 /*
1400 * Unwire before removing addresses from the pmap;
1401 * otherwise, unwiring will put the entries back in
1402 * the pmap.
1403 */
1404
1405 object = entry->object.vm_object;
1406 if (entry->wired_count != 0)
1407 vm_map_entry_unwire(map, entry);
1408
1409 /*
1410 * If this is a sharing map, we must remove
1411 * *all* references to this data, since we can't
1412 * find all of the physical maps which are sharing
1413 * it.
1414 */
1415
1416 if (object == kernel_object || object == kmem_object)
1417 vm_object_page_remove(object, entry->offset,
1418 entry->offset + (e - s));
1419 else if (!map->is_main_map)
1420 vm_object_pmap_remove(object,
1421 entry->offset,
1422 entry->offset + (e - s));
1423 else
1424 pmap_remove(map->pmap, s, e);
1425
1426 /*
1427 * Delete the entry (which may delete the object)
1428 * only after removing all pmap entries pointing
1429 * to its pages. (Otherwise, its page frames may
1430 * be reallocated, and any modify bits will be
1431 * set in the wrong object!)
1432 */
1433
1434 vm_map_entry_delete(map, entry);
1435 entry = next;
1436 }
1437 return(KERN_SUCCESS);
1438}
1439
1440/*
1441 * vm_map_remove:
1442 *
1443 * Remove the given address range from the target map.
1444 * This is the exported form of vm_map_delete.
1445 */
1446vm_map_remove(map, start, end)
1447 register vm_map_t map;
1448 register vm_offset_t start;
1449 register vm_offset_t end;
1450{
1451 register int result;
1452
1453 vm_map_lock(map);
1454 VM_MAP_RANGE_CHECK(map, start, end);
1455 result = vm_map_delete(map, start, end);
1456 vm_map_unlock(map);
1457
1458 return(result);
1459}
1460
1461/*
1462 * vm_map_check_protection:
1463 *
1464 * Assert that the target map allows the specified
1465 * privilege on the entire address region given.
1466 * The entire region must be allocated.
1467 */
1468boolean_t vm_map_check_protection(map, start, end, protection)
1469 register vm_map_t map;
1470 register vm_offset_t start;
1471 register vm_offset_t end;
1472 register vm_prot_t protection;
1473{
1474 register vm_map_entry_t entry;
1475 vm_map_entry_t tmp_entry;
1476
1477 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1478 return(FALSE);
1479 }
1480
1481 entry = tmp_entry;
1482
1483 while (start < end) {
1484 if (entry == &map->header) {
1485 return(FALSE);
1486 }
1487
1488 /*
1489 * No holes allowed!
1490 */
1491
1492 if (start < entry->start) {
1493 return(FALSE);
1494 }
1495
1496 /*
1497 * Check protection associated with entry.
1498 */
1499
1500 if ((entry->protection & protection) != protection) {
1501 return(FALSE);
1502 }
1503
1504 /* go to next entry */
1505
1506 start = entry->end;
1507 entry = entry->next;
1508 }
1509 return(TRUE);
1510}
1511
1512/*
1513 * vm_map_copy_entry:
1514 *
1515 * Copies the contents of the source entry to the destination
1516 * entry. The entries *must* be aligned properly.
1517 */
1518void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1519 vm_map_t src_map, dst_map;
1520 register vm_map_entry_t src_entry, dst_entry;
1521{
1522 vm_object_t temp_object;
1523
1524 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1525 return;
1526
1527 if (dst_entry->object.vm_object != NULL &&
1528 !dst_entry->object.vm_object->internal)
1529 printf("vm_map_copy_entry: copying over permanent data!\n");
1530
1531 /*
1532 * If our destination map was wired down,
1533 * unwire it now.
1534 */
1535
1536 if (dst_entry->wired_count != 0)
1537 vm_map_entry_unwire(dst_map, dst_entry);
1538
1539 /*
1540 * If we're dealing with a sharing map, we
1541 * must remove the destination pages from
1542 * all maps (since we cannot know which maps
1543 * this sharing map belongs in).
1544 */
1545
1546 if (dst_map->is_main_map)
1547 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1548 else
1549 vm_object_pmap_remove(dst_entry->object.vm_object,
1550 dst_entry->offset,
1551 dst_entry->offset +
1552 (dst_entry->end - dst_entry->start));
1553
1554 if (src_entry->wired_count == 0) {
1555
1556 boolean_t src_needs_copy;
1557
1558 /*
1559 * If the source entry is marked needs_copy,
1560 * it is already write-protected.
1561 */
1562 if (!src_entry->needs_copy) {
1563
1564 boolean_t su;
1565
1566 /*
1567 * If the source entry has only one mapping,
1568 * we can just protect the virtual address
1569 * range.
1570 */
1571 if (!(su = src_map->is_main_map)) {
1572 simple_lock(&src_map->ref_lock);
1573 su = (src_map->ref_count == 1);
1574 simple_unlock(&src_map->ref_lock);
1575 }
1576
1577 if (su) {
1578 pmap_protect(src_map->pmap,
1579 src_entry->start,
1580 src_entry->end,
1581 src_entry->protection & ~VM_PROT_WRITE);
1582 }
1583 else {
1584 vm_object_pmap_copy(src_entry->object.vm_object,
1585 src_entry->offset,
1586 src_entry->offset + (src_entry->end
1587 -src_entry->start));
1588 }
1589 }
1590
1591 /*
1592 * Make a copy of the object.
1593 */
1594 temp_object = dst_entry->object.vm_object;
1595 vm_object_copy(src_entry->object.vm_object,
1596 src_entry->offset,
1597 (vm_size_t)(src_entry->end -
1598 src_entry->start),
1599 &dst_entry->object.vm_object,
1600 &dst_entry->offset,
1601 &src_needs_copy);
1602 /*
1603 * If we didn't get a copy-object now, mark the
1604 * source map entry so that a shadow will be created
1605 * to hold its changed pages.
1606 */
1607 if (src_needs_copy)
1608 src_entry->needs_copy = TRUE;
1609
1610 /*
1611 * The destination always needs to have a shadow
1612 * created.
1613 */
1614 dst_entry->needs_copy = TRUE;
1615
1616 /*
1617 * Mark the entries copy-on-write, so that write-enabling
1618 * the entry won't make copy-on-write pages writable.
1619 */
1620 src_entry->copy_on_write = TRUE;
1621 dst_entry->copy_on_write = TRUE;
1622 /*
1623 * Get rid of the old object.
1624 */
1625 vm_object_deallocate(temp_object);
1626
1627 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1628 dst_entry->end - dst_entry->start, src_entry->start);
1629 }
1630 else {
1631 /*
1632 * Of course, wired down pages can't be set copy-on-write.
1633 * Cause wired pages to be copied into the new
1634 * map by simulating faults (the new pages are
1635 * pageable)
1636 */
1637 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1638 }
1639}
1640
1641/*
1642 * vm_map_copy:
1643 *
1644 * Perform a virtual memory copy from the source
1645 * address map/range to the destination map/range.
1646 *
1647 * If src_destroy or dst_alloc is requested,
1648 * the source and destination regions should be
1649 * disjoint, not only in the top-level map, but
1650 * in the sharing maps as well. [The best way
1651 * to guarantee this is to use a new intermediate
1652 * map to make copies. This also reduces map
1653 * fragmentation.]
1654 */
1655vm_map_copy(dst_map, src_map,
1656 dst_addr, len, src_addr,
1657 dst_alloc, src_destroy)
1658 vm_map_t dst_map;
1659 vm_map_t src_map;
1660 vm_offset_t dst_addr;
1661 vm_size_t len;
1662 vm_offset_t src_addr;
1663 boolean_t dst_alloc;
1664 boolean_t src_destroy;
1665{
1666 register
1667 vm_map_entry_t src_entry;
1668 register
1669 vm_map_entry_t dst_entry;
1670 vm_map_entry_t tmp_entry;
1671 vm_offset_t src_start;
1672 vm_offset_t src_end;
1673 vm_offset_t dst_start;
1674 vm_offset_t dst_end;
1675 vm_offset_t src_clip;
1676 vm_offset_t dst_clip;
1677 int result;
1678 boolean_t old_src_destroy;
1679
1680 /*
1681 * XXX While we figure out why src_destroy screws up,
1682 * we'll do it by explicitly vm_map_delete'ing at the end.
1683 */
1684
1685 old_src_destroy = src_destroy;
1686 src_destroy = FALSE;
1687
1688 /*
1689 * Compute start and end of region in both maps
1690 */
1691
1692 src_start = src_addr;
1693 src_end = src_start + len;
1694 dst_start = dst_addr;
1695 dst_end = dst_start + len;
1696
1697 /*
1698 * Check that the region can exist in both source
1699 * and destination.
1700 */
1701
1702 if ((dst_end < dst_start) || (src_end < src_start))
1703 return(KERN_NO_SPACE);
1704
1705 /*
1706 * Lock the maps in question -- we avoid deadlock
1707 * by ordering lock acquisition by map value
1708 */
1709
1710 if (src_map == dst_map) {
1711 vm_map_lock(src_map);
1712 }
1713 else if ((int) src_map < (int) dst_map) {
1714 vm_map_lock(src_map);
1715 vm_map_lock(dst_map);
1716 } else {
1717 vm_map_lock(dst_map);
1718 vm_map_lock(src_map);
1719 }
1720
1721 result = KERN_SUCCESS;
1722
1723 /*
1724 * Check protections... source must be completely readable and
1725 * destination must be completely writable. [Note that if we're
1726 * allocating the destination region, we don't have to worry
1727 * about protection, but instead about whether the region
1728 * exists.]
1729 */
1730
1731 if (src_map->is_main_map && dst_map->is_main_map) {
1732 if (!vm_map_check_protection(src_map, src_start, src_end,
1733 VM_PROT_READ)) {
1734 result = KERN_PROTECTION_FAILURE;
1735 goto Return;
1736 }
1737
1738 if (dst_alloc) {
1739 /* XXX Consider making this a vm_map_find instead */
1740 if ((result = vm_map_insert(dst_map, NULL,
1741 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1742 goto Return;
1743 }
1744 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1745 VM_PROT_WRITE)) {
1746 result = KERN_PROTECTION_FAILURE;
1747 goto Return;
1748 }
1749 }
1750
1751 /*
1752 * Find the start entries and clip.
1753 *
1754 * Note that checking protection asserts that the
1755 * lookup cannot fail.
1756 *
1757 * Also note that we wait to do the second lookup
1758 * until we have done the first clip, as the clip
1759 * may affect which entry we get!
1760 */
1761
1762 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1763 src_entry = tmp_entry;
1764 vm_map_clip_start(src_map, src_entry, src_start);
1765
1766 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1767 dst_entry = tmp_entry;
1768 vm_map_clip_start(dst_map, dst_entry, dst_start);
1769
1770 /*
1771 * If both source and destination entries are the same,
1772 * retry the first lookup, as it may have changed.
1773 */
1774
1775 if (src_entry == dst_entry) {
1776 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1777 src_entry = tmp_entry;
1778 }
1779
1780 /*
1781 * If source and destination entries are still the same,
1782 * a null copy is being performed.
1783 */
1784
1785 if (src_entry == dst_entry)
1786 goto Return;
1787
1788 /*
1789 * Go through entries until we get to the end of the
1790 * region.
1791 */
1792
1793 while (src_start < src_end) {
1794 /*
1795 * Clip the entries to the endpoint of the entire region.
1796 */
1797
1798 vm_map_clip_end(src_map, src_entry, src_end);
1799 vm_map_clip_end(dst_map, dst_entry, dst_end);
1800
1801 /*
1802 * Clip each entry to the endpoint of the other entry.
1803 */
1804
1805 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1806 vm_map_clip_end(src_map, src_entry, src_clip);
1807
1808 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1809 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1810
1811 /*
1812 * Both entries now match in size and relative endpoints.
1813 *
1814 * If both entries refer to a VM object, we can
1815 * deal with them now.
1816 */
1817
1818 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1819 vm_map_copy_entry(src_map, dst_map, src_entry,
1820 dst_entry);
1821 }
1822 else {
1823 register vm_map_t new_dst_map;
1824 vm_offset_t new_dst_start;
1825 vm_size_t new_size;
1826 vm_map_t new_src_map;
1827 vm_offset_t new_src_start;
1828
1829 /*
1830 * We have to follow at least one sharing map.
1831 */
1832
1833 new_size = (dst_entry->end - dst_entry->start);
1834
1835 if (src_entry->is_a_map) {
1836 new_src_map = src_entry->object.share_map;
1837 new_src_start = src_entry->offset;
1838 }
1839 else {
1840 new_src_map = src_map;
1841 new_src_start = src_entry->start;
1842 lock_set_recursive(&src_map->lock);
1843 }
1844
1845 if (dst_entry->is_a_map) {
1846 vm_offset_t new_dst_end;
1847
1848 new_dst_map = dst_entry->object.share_map;
1849 new_dst_start = dst_entry->offset;
1850
1851 /*
1852 * Since the destination sharing entries
1853 * will be merely deallocated, we can
1854 * do that now, and replace the region
1855 * with a null object. [This prevents
1856 * splitting the source map to match
1857 * the form of the destination map.]
1858 * Note that we can only do so if the
1859 * source and destination do not overlap.
1860 */
1861
1862 new_dst_end = new_dst_start + new_size;
1863
1864 if (new_dst_map != new_src_map) {
1865 vm_map_lock(new_dst_map);
1866 (void) vm_map_delete(new_dst_map,
1867 new_dst_start,
1868 new_dst_end);
1869 (void) vm_map_insert(new_dst_map,
1870 NULL,
1871 (vm_offset_t) 0,
1872 new_dst_start,
1873 new_dst_end);
1874 vm_map_unlock(new_dst_map);
1875 }
1876 }
1877 else {
1878 new_dst_map = dst_map;
1879 new_dst_start = dst_entry->start;
1880 lock_set_recursive(&dst_map->lock);
1881 }
1882
1883 /*
1884 * Recursively copy the sharing map.
1885 */
1886
1887 (void) vm_map_copy(new_dst_map, new_src_map,
1888 new_dst_start, new_size, new_src_start,
1889 FALSE, FALSE);
1890
1891 if (dst_map == new_dst_map)
1892 lock_clear_recursive(&dst_map->lock);
1893 if (src_map == new_src_map)
1894 lock_clear_recursive(&src_map->lock);
1895 }
1896
1897 /*
1898 * Update variables for next pass through the loop.
1899 */
1900
1901 src_start = src_entry->end;
1902 src_entry = src_entry->next;
1903 dst_start = dst_entry->end;
1904 dst_entry = dst_entry->next;
1905
1906 /*
1907 * If the source is to be destroyed, here is the
1908 * place to do it.
1909 */
1910
1911 if (src_destroy && src_map->is_main_map &&
1912 dst_map->is_main_map)
1913 vm_map_entry_delete(src_map, src_entry->prev);
1914 }
1915
1916 /*
1917 * Update the physical maps as appropriate
1918 */
1919
1920 if (src_map->is_main_map && dst_map->is_main_map) {
1921 if (src_destroy)
1922 pmap_remove(src_map->pmap, src_addr, src_addr + len);
1923 }
1924
1925 /*
1926 * Unlock the maps
1927 */
1928
1929 Return: ;
1930
1931 if (old_src_destroy)
1932 vm_map_delete(src_map, src_addr, src_addr + len);
1933
1934 vm_map_unlock(src_map);
1935 if (src_map != dst_map)
1936 vm_map_unlock(dst_map);
1937
1938 return(result);
1939}
1940
1941/*
1942 * vmspace_fork:
1943 * Create a new process vmspace structure and vm_map
1944 * based on those of an existing process. The new map
1945 * is based on the old map, according to the inheritance
1946 * values on the regions in that map.
1947 *
1948 * The source map must not be locked.
1949 */
1950struct vmspace *
1951vmspace_fork(vm1)
1952 register struct vmspace *vm1;
1953{
1954 register struct vmspace *vm2;
1955 vm_map_t old_map = &vm1->vm_map;
1956 vm_map_t new_map;
1957 vm_map_entry_t old_entry;
1958 vm_map_entry_t new_entry;
1959 pmap_t new_pmap;
1960
1961 vm_map_lock(old_map);
1962
1963 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1964 old_map->entries_pageable);
1965 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1966 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1967 new_pmap = &vm2->vm_pmap; /* XXX */
1968 new_map = &vm2->vm_map; /* XXX */
1969
1970 old_entry = old_map->header.next;
1971
1972 while (old_entry != &old_map->header) {
1973 if (old_entry->is_sub_map)
1974 panic("vm_map_fork: encountered a submap");
1975
1976 switch (old_entry->inheritance) {
1977 case VM_INHERIT_NONE:
1978 break;
1979
1980 case VM_INHERIT_SHARE:
1981 /*
1982 * If we don't already have a sharing map:
1983 */
1984
1985 if (!old_entry->is_a_map) {
1986 vm_map_t new_share_map;
1987 vm_map_entry_t new_share_entry;
1988
1989 /*
1990 * Create a new sharing map
1991 */
1992
1993 new_share_map = vm_map_create(NULL,
1994 old_entry->start,
1995 old_entry->end,
1996 TRUE);
1997 new_share_map->is_main_map = FALSE;
1998
1999 /*
2000 * Create the only sharing entry from the
2001 * old task map entry.
2002 */
2003
2004 new_share_entry =
2005 vm_map_entry_create(new_share_map);
2006 *new_share_entry = *old_entry;
2007
2008 /*
2009 * Insert the entry into the new sharing
2010 * map
2011 */
2012
2013 vm_map_entry_link(new_share_map,
2014 new_share_map->header.prev,
2015 new_share_entry);
2016
2017 /*
2018 * Fix up the task map entry to refer
2019 * to the sharing map now.
2020 */
2021
2022 old_entry->is_a_map = TRUE;
2023 old_entry->object.share_map = new_share_map;
2024 old_entry->offset = old_entry->start;
2025 }
2026
2027 /*
2028 * Clone the entry, referencing the sharing map.
2029 */
2030
2031 new_entry = vm_map_entry_create(new_map);
2032 *new_entry = *old_entry;
2033 vm_map_reference(new_entry->object.share_map);
2034
2035 /*
2036 * Insert the entry into the new map -- we
2037 * know we're inserting at the end of the new
2038 * map.
2039 */
2040
2041 vm_map_entry_link(new_map, new_map->header.prev,
2042 new_entry);
2043
2044 /*
2045 * Update the physical map
2046 */
2047
2048 pmap_copy(new_map->pmap, old_map->pmap,
2049 new_entry->start,
2050 (old_entry->end - old_entry->start),
2051 old_entry->start);
2052 break;
2053
2054 case VM_INHERIT_COPY:
2055 /*
2056 * Clone the entry and link into the map.
2057 */
2058
2059 new_entry = vm_map_entry_create(new_map);
2060 *new_entry = *old_entry;
2061 new_entry->wired_count = 0;
2062 new_entry->object.vm_object = NULL;
2063 new_entry->is_a_map = FALSE;
2064 vm_map_entry_link(new_map, new_map->header.prev,
2065 new_entry);
2066 if (old_entry->is_a_map) {
2067 int check;
2068
2069 check = vm_map_copy(new_map,
2070 old_entry->object.share_map,
2071 new_entry->start,
2072 (vm_size_t)(new_entry->end -
2073 new_entry->start),
2074 old_entry->offset,
2075 FALSE, FALSE);
2076 if (check != KERN_SUCCESS)
2077 printf("vm_map_fork: copy in share_map region failed\n");
2078 }
2079 else {
2080 vm_map_copy_entry(old_map, new_map, old_entry,
2081 new_entry);
2082 }
2083 break;
2084 }
2085 old_entry = old_entry->next;
2086 }
2087
2088 new_map->size = old_map->size;
2089 vm_map_unlock(old_map);
2090
2091 return(vm2);
2092}
2093
2094/*
2095 * vm_map_lookup:
2096 *
2097 * Finds the VM object, offset, and
2098 * protection for a given virtual address in the
2099 * specified map, assuming a page fault of the
2100 * type specified.
2101 *
2102 * Leaves the map in question locked for read; return
2103 * values are guaranteed until a vm_map_lookup_done
2104 * call is performed. Note that the map argument
2105 * is in/out; the returned map must be used in
2106 * the call to vm_map_lookup_done.
2107 *
2108 * A handle (out_entry) is returned for use in
2109 * vm_map_lookup_done, to make that fast.
2110 *
2111 * If a lookup is requested with "write protection"
2112 * specified, the map may be changed to perform virtual
2113 * copying operations, although the data referenced will
2114 * remain the same.
2115 */
2116vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2117 object, offset, out_prot, wired, single_use)
2118 vm_map_t *var_map; /* IN/OUT */
2119 register vm_offset_t vaddr;
2120 register vm_prot_t fault_type;
2121
2122 vm_map_entry_t *out_entry; /* OUT */
2123 vm_object_t *object; /* OUT */
2124 vm_offset_t *offset; /* OUT */
2125 vm_prot_t *out_prot; /* OUT */
2126 boolean_t *wired; /* OUT */
2127 boolean_t *single_use; /* OUT */
2128{
2129 vm_map_t share_map;
2130 vm_offset_t share_offset;
2131 register vm_map_entry_t entry;
2132 register vm_map_t map = *var_map;
2133 register vm_prot_t prot;
2134 register boolean_t su;
2135
2136 RetryLookup: ;
2137
2138 /*
2139 * Lookup the faulting address.
2140 */
2141
2142 vm_map_lock_read(map);
2143
2144#define RETURN(why) \
2145 { \
2146 vm_map_unlock_read(map); \
2147 return(why); \
2148 }
2149
2150 /*
2151 * If the map has an interesting hint, try it before calling
2152 * full blown lookup routine.
2153 */
2154
2155 simple_lock(&map->hint_lock);
2156 entry = map->hint;
2157 simple_unlock(&map->hint_lock);
2158
2159 *out_entry = entry;
2160
2161 if ((entry == &map->header) ||
2162 (vaddr < entry->start) || (vaddr >= entry->end)) {
2163 vm_map_entry_t tmp_entry;
2164
2165 /*
2166 * Entry was either not a valid hint, or the vaddr
2167 * was not contained in the entry, so do a full lookup.
2168 */
2169 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2170 RETURN(KERN_INVALID_ADDRESS);
2171
2172 entry = tmp_entry;
2173 *out_entry = entry;
2174 }
2175
2176 /*
2177 * Handle submaps.
2178 */
2179
2180 if (entry->is_sub_map) {
2181 vm_map_t old_map = map;
2182
2183 *var_map = map = entry->object.sub_map;
2184 vm_map_unlock_read(old_map);
2185 goto RetryLookup;
2186 }
2187
2188 /*
2189 * Check whether this task is allowed to have
2190 * this page.
2191 */
2192
2193 prot = entry->protection;
2194 if ((fault_type & (prot)) != fault_type)
2195 RETURN(KERN_PROTECTION_FAILURE);
2196
2197 /*
2198 * If this page is not pageable, we have to get
2199 * it for all possible accesses.
2200 */
2201
2202 if (*wired = (entry->wired_count != 0))
2203 prot = fault_type = entry->protection;
2204
2205 /*
2206 * If we don't already have a VM object, track
2207 * it down.
2208 */
2209
2210 if (su = !entry->is_a_map) {
2211 share_map = map;
2212 share_offset = vaddr;
2213 }
2214 else {
2215 vm_map_entry_t share_entry;
2216
2217 /*
2218 * Compute the sharing map, and offset into it.
2219 */
2220
2221 share_map = entry->object.share_map;
2222 share_offset = (vaddr - entry->start) + entry->offset;
2223
2224 /*
2225 * Look for the backing store object and offset
2226 */
2227
2228 vm_map_lock_read(share_map);
2229
2230 if (!vm_map_lookup_entry(share_map, share_offset,
2231 &share_entry)) {
2232 vm_map_unlock_read(share_map);
2233 RETURN(KERN_INVALID_ADDRESS);
2234 }
2235 entry = share_entry;
2236 }
2237
2238 /*
2239 * If the entry was copy-on-write, we either ...
2240 */
2241
2242 if (entry->needs_copy) {
2243 /*
2244 * If we want to write the page, we may as well
2245 * handle that now since we've got the sharing
2246 * map locked.
2247 *
2248 * If we don't need to write the page, we just
2249 * demote the permissions allowed.
2250 */
2251
2252 if (fault_type & VM_PROT_WRITE) {
2253 /*
2254 * Make a new object, and place it in the
2255 * object chain. Note that no new references
2256 * have appeared -- one just moved from the
2257 * share map to the new object.
2258 */
2259
2260 if (lock_read_to_write(&share_map->lock)) {
2261 if (share_map != map)
2262 vm_map_unlock_read(map);
2263 goto RetryLookup;
2264 }
2265
2266 vm_object_shadow(
2267 &entry->object.vm_object,
2268 &entry->offset,
2269 (vm_size_t) (entry->end - entry->start));
2270
2271 entry->needs_copy = FALSE;
2272
2273 lock_write_to_read(&share_map->lock);
2274 }
2275 else {
2276 /*
2277 * We're attempting to read a copy-on-write
2278 * page -- don't allow writes.
2279 */
2280
2281 prot &= (~VM_PROT_WRITE);
2282 }
2283 }
2284
2285 /*
2286 * Create an object if necessary.
2287 */
2288 if (entry->object.vm_object == NULL) {
2289
2290 if (lock_read_to_write(&share_map->lock)) {
2291 if (share_map != map)
2292 vm_map_unlock_read(map);
2293 goto RetryLookup;
2294 }
2295
2296 entry->object.vm_object = vm_object_allocate(
2297 (vm_size_t)(entry->end - entry->start));
2298 entry->offset = 0;
2299 lock_write_to_read(&share_map->lock);
2300 }
2301
2302 /*
2303 * Return the object/offset from this entry. If the entry
2304 * was copy-on-write or empty, it has been fixed up.
2305 */
2306
2307 *offset = (share_offset - entry->start) + entry->offset;
2308 *object = entry->object.vm_object;
2309
2310 /*
2311 * Return whether this is the only map sharing this data.
2312 */
2313
2314 if (!su) {
2315 simple_lock(&share_map->ref_lock);
2316 su = (share_map->ref_count == 1);
2317 simple_unlock(&share_map->ref_lock);
2318 }
2319
2320 *out_prot = prot;
2321 *single_use = su;
2322
2323 return(KERN_SUCCESS);
2324
2325#undef RETURN
2326}
2327
2328/*
2329 * vm_map_lookup_done:
2330 *
2331 * Releases locks acquired by a vm_map_lookup
2332 * (according to the handle returned by that lookup).
2333 */
2334
2335void vm_map_lookup_done(map, entry)
2336 register vm_map_t map;
2337 vm_map_entry_t entry;
2338{
2339 /*
2340 * If this entry references a map, unlock it first.
2341 */
2342
2343 if (entry->is_a_map)
2344 vm_map_unlock_read(entry->object.share_map);
2345
2346 /*
2347 * Unlock the main-level map
2348 */
2349
2350 vm_map_unlock_read(map);
2351}
2352
2353/*
2354 * Routine: vm_map_simplify
2355 * Purpose:
2356 * Attempt to simplify the map representation in
2357 * the vicinity of the given starting address.
2358 * Note:
2359 * This routine is intended primarily to keep the
2360 * kernel maps more compact -- they generally don't
2361 * benefit from the "expand a map entry" technology
2362 * at allocation time because the adjacent entry
2363 * is often wired down.
2364 */
2365void vm_map_simplify(map, start)
2366 vm_map_t map;
2367 vm_offset_t start;
2368{
2369 vm_map_entry_t this_entry;
2370 vm_map_entry_t prev_entry;
2371
2372 vm_map_lock(map);
2373 if (
2374 (vm_map_lookup_entry(map, start, &this_entry)) &&
2375 ((prev_entry = this_entry->prev) != &map->header) &&
2376
2377 (prev_entry->end == start) &&
2378 (map->is_main_map) &&
2379
2380 (prev_entry->is_a_map == FALSE) &&
2381 (prev_entry->is_sub_map == FALSE) &&
2382
2383 (this_entry->is_a_map == FALSE) &&
2384 (this_entry->is_sub_map == FALSE) &&
2385
2386 (prev_entry->inheritance == this_entry->inheritance) &&
2387 (prev_entry->protection == this_entry->protection) &&
2388 (prev_entry->max_protection == this_entry->max_protection) &&
2389 (prev_entry->wired_count == this_entry->wired_count) &&
2390
2391 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2392 (prev_entry->needs_copy == this_entry->needs_copy) &&
2393
2394 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2395 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2396 == this_entry->offset)
2397 ) {
2398 if (map->first_free == this_entry)
2399 map->first_free = prev_entry;
2400
2401 SAVE_HINT(map, prev_entry);
2402 vm_map_entry_unlink(map, this_entry);
2403 prev_entry->end = this_entry->end;
2404 vm_object_deallocate(this_entry->object.vm_object);
2405 vm_map_entry_dispose(map, this_entry);
2406 }
2407 vm_map_unlock(map);
2408}
2409
d55b6cf4 2410#if defined(DEBUG) || (NDDB > 0)
15637ed4
RG
2411/*
2412 * vm_map_print: [ debug ]
2413 */
2414void vm_map_print(map, full)
2415 register vm_map_t map;
2416 boolean_t full;
2417{
2418 register vm_map_entry_t entry;
2419 extern int indent;
2420
2421 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2422 (map->is_main_map ? "Task" : "Share"),
2423 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2424 map->timestamp);
2425
2426 if (!full && indent)
2427 return;
2428
2429 indent += 2;
2430 for (entry = map->header.next; entry != &map->header;
2431 entry = entry->next) {
2432 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2433 (int) entry, (int) entry->start, (int) entry->end);
2434 if (map->is_main_map) {
2435 static char *inheritance_name[4] =
2436 { "share", "copy", "none", "donate_copy"};
2437 printf("prot=%x/%x/%s, ",
2438 entry->protection,
2439 entry->max_protection,
2440 inheritance_name[entry->inheritance]);
2441 if (entry->wired_count != 0)
2442 printf("wired, ");
2443 }
2444
2445 if (entry->is_a_map || entry->is_sub_map) {
2446 printf("share=0x%x, offset=0x%x\n",
2447 (int) entry->object.share_map,
2448 (int) entry->offset);
2449 if ((entry->prev == &map->header) ||
2450 (!entry->prev->is_a_map) ||
2451 (entry->prev->object.share_map !=
2452 entry->object.share_map)) {
2453 indent += 2;
2454 vm_map_print(entry->object.share_map, full);
2455 indent -= 2;
2456 }
2457
2458 }
2459 else {
2460 printf("object=0x%x, offset=0x%x",
2461 (int) entry->object.vm_object,
2462 (int) entry->offset);
2463 if (entry->copy_on_write)
2464 printf(", copy (%s)",
2465 entry->needs_copy ? "needed" : "done");
2466 printf("\n");
2467
2468 if ((entry->prev == &map->header) ||
2469 (entry->prev->is_a_map) ||
2470 (entry->prev->object.vm_object !=
2471 entry->object.vm_object)) {
2472 indent += 2;
2473 vm_object_print(entry->object.vm_object, full);
2474 indent -= 2;
2475 }
2476 }
2477 }
2478 indent -= 2;
2479}
d55b6cf4 2480#endif /* defined(DEBUG) || (NDDB > 0) */