This commit was generated by cvs2svn to track changes on a CVS vendor
[unix-history] / sys / vm / vm_map.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 7.3 (Berkeley) 4/21/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
65 * -------------------- ----- ----------------------
66 * CURRENT PATCH LEVEL: 1 00137
67 * -------------------- ----- ----------------------
68 *
69 * 08 Apr 93 Yuval Yarom Several VM system fixes
70 */
71
72/*
73 * Virtual memory mapping module.
74 */
75
76#include "param.h"
77#include "malloc.h"
78#include "vm.h"
79#include "vm_page.h"
80#include "vm_object.h"
81
82/*
83 * Virtual memory maps provide for the mapping, protection,
84 * and sharing of virtual memory objects. In addition,
85 * this module provides for an efficient virtual copy of
86 * memory from one map to another.
87 *
88 * Synchronization is required prior to most operations.
89 *
90 * Maps consist of an ordered doubly-linked list of simple
91 * entries; a single hint is used to speed up lookups.
92 *
93 * In order to properly represent the sharing of virtual
94 * memory regions among maps, the map structure is bi-level.
95 * Top-level ("address") maps refer to regions of sharable
96 * virtual memory. These regions are implemented as
97 * ("sharing") maps, which then refer to the actual virtual
98 * memory objects. When two address maps "share" memory,
99 * their top-level maps both have references to the same
100 * sharing map. When memory is virtual-copied from one
101 * address map to another, the references in the sharing
102 * maps are actually copied -- no copying occurs at the
103 * virtual memory object level.
104 *
105 * Since portions of maps are specified by start/end addreses,
106 * which may not align with existing map entries, all
107 * routines merely "clip" entries to these start/end values.
108 * [That is, an entry is split into two, bordering at a
109 * start or end value.] Note that these clippings may not
110 * always be necessary (as the two resulting entries are then
111 * not changed); however, the clipping is done for convenience.
112 * No attempt is currently made to "glue back together" two
113 * abutting entries.
114 *
115 * As mentioned above, virtual copy operations are performed
116 * by copying VM object references from one sharing map to
117 * another, and then marking both regions as copy-on-write.
118 * It is important to note that only one writeable reference
119 * to a VM object region exists in any map -- this means that
120 * shadow object creation can be delayed until a write operation
121 * occurs.
122 */
123
124/*
125 * vm_map_startup:
126 *
127 * Initialize the vm_map module. Must be called before
128 * any other vm_map routines.
129 *
130 * Map and entry structures are allocated from the general
131 * purpose memory pool with some exceptions:
132 *
133 * - The kernel map and kmem submap are allocated statically.
134 * - Kernel map entries are allocated out of a static pool.
135 *
136 * These restrictions are necessary since malloc() uses the
137 * maps and requires map entries.
138 */
139
140vm_offset_t kentry_data;
141vm_size_t kentry_data_size;
142vm_map_entry_t kentry_free;
143vm_map_t kmap_free;
144
145void vm_map_startup()
146{
147 register int i;
148 register vm_map_entry_t mep;
149 vm_map_t mp;
150
151 /*
152 * Static map structures for allocation before initialization of
153 * kernel map or kmem map. vm_map_create knows how to deal with them.
154 */
155 kmap_free = mp = (vm_map_t) kentry_data;
156 i = MAX_KMAP;
157 while (--i > 0) {
158 mp->header.next = (vm_map_entry_t) (mp + 1);
159 mp++;
160 }
161 mp++->header.next = NULL;
162
163 /*
164 * Form a free list of statically allocated kernel map entries
165 * with the rest.
166 */
167 kentry_free = mep = (vm_map_entry_t) mp;
168 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
169 while (--i > 0) {
170 mep->next = mep + 1;
171 mep++;
172 }
173 mep->next = NULL;
174}
175
176/*
177 * Allocate a vmspace structure, including a vm_map and pmap,
178 * and initialize those structures. The refcnt is set to 1.
179 * The remaining fields must be initialized by the caller.
180 */
181struct vmspace *
182vmspace_alloc(min, max, pageable)
183 vm_offset_t min, max;
184 int pageable;
185{
186 register struct vmspace *vm;
187
188 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
189 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
190 vm_map_init(&vm->vm_map, min, max, pageable);
191 pmap_pinit(&vm->vm_pmap);
192 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
193 vm->vm_refcnt = 1;
194 return (vm);
195}
196
197void
198vmspace_free(vm)
199 register struct vmspace *vm;
200{
201
202 if (--vm->vm_refcnt == 0) {
203 /*
204 * Lock the map, to wait out all other references to it.
205 * Delete all of the mappings and pages they hold,
206 * then call the pmap module to reclaim anything left.
207 */
208 vm_map_lock(&vm->vm_map);
209 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
210 vm->vm_map.max_offset);
211 pmap_release(&vm->vm_pmap);
212 FREE(vm, M_VMMAP);
213 }
214}
215
216/*
217 * vm_map_create:
218 *
219 * Creates and returns a new empty VM map with
220 * the given physical map structure, and having
221 * the given lower and upper address bounds.
222 */
223vm_map_t vm_map_create(pmap, min, max, pageable)
224 pmap_t pmap;
225 vm_offset_t min, max;
226 boolean_t pageable;
227{
228 register vm_map_t result;
229 extern vm_map_t kernel_map, kmem_map;
230
231 if (kmem_map == NULL) {
232 result = kmap_free;
233 kmap_free = (vm_map_t) result->header.next;
234 if (result == NULL)
235 panic("vm_map_create: out of maps");
236 } else
237 MALLOC(result, vm_map_t, sizeof(struct vm_map),
238 M_VMMAP, M_WAITOK);
239
240 vm_map_init(result, min, max, pageable);
241 result->pmap = pmap;
242 return(result);
243}
244
245/*
246 * Initialize an existing vm_map structure
247 * such as that in the vmspace structure.
248 * The pmap is set elsewhere.
249 */
250void
251vm_map_init(map, min, max, pageable)
252 register struct vm_map *map;
253 vm_offset_t min, max;
254 boolean_t pageable;
255{
256 map->header.next = map->header.prev = &map->header;
257 map->nentries = 0;
258 map->size = 0;
259 map->ref_count = 1;
260 map->is_main_map = TRUE;
261 map->min_offset = min;
262 map->max_offset = max;
263 map->entries_pageable = pageable;
264 map->first_free = &map->header;
265 map->hint = &map->header;
266 map->timestamp = 0;
267 lock_init(&map->lock, TRUE);
268 simple_lock_init(&map->ref_lock);
269 simple_lock_init(&map->hint_lock);
270}
271
272/*
273 * vm_map_entry_create: [ internal use only ]
274 *
275 * Allocates a VM map entry for insertion.
276 * No entry fields are filled in. This routine is
277 */
278vm_map_entry_t vm_map_entry_create(map)
279 vm_map_t map;
280{
281 vm_map_entry_t entry;
282 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
283
284 if (map == kernel_map || map == kmem_map || map == mb_map
285 || map == buffer_map || map == pager_map) {
286 if (entry = kentry_free)
287 kentry_free = kentry_free->next;
288 } else
289 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
290 M_VMMAPENT, M_WAITOK);
291 if (entry == NULL)
292 panic("vm_map_entry_create: out of map entries");
293
294 return(entry);
295}
296
297/*
298 * vm_map_entry_dispose: [ internal use only ]
299 *
300 * Inverse of vm_map_entry_create.
301 */
302void vm_map_entry_dispose(map, entry)
303 vm_map_t map;
304 vm_map_entry_t entry;
305{
306 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
307
308 if (map == kernel_map || map == kmem_map || map == mb_map
309 || map == buffer_map || map == pager_map) {
310 entry->next = kentry_free;
311 kentry_free = entry;
312 } else
313 FREE(entry, M_VMMAPENT);
314}
315
316/*
317 * vm_map_entry_{un,}link:
318 *
319 * Insert/remove entries from maps.
320 */
321#define vm_map_entry_link(map, after_where, entry) \
322 { \
323 (map)->nentries++; \
324 (entry)->prev = (after_where); \
325 (entry)->next = (after_where)->next; \
326 (entry)->prev->next = (entry); \
327 (entry)->next->prev = (entry); \
328 }
329#define vm_map_entry_unlink(map, entry) \
330 { \
331 (map)->nentries--; \
332 (entry)->next->prev = (entry)->prev; \
333 (entry)->prev->next = (entry)->next; \
334 }
335
336/*
337 * vm_map_reference:
338 *
339 * Creates another valid reference to the given map.
340 *
341 */
342void vm_map_reference(map)
343 register vm_map_t map;
344{
345 if (map == NULL)
346 return;
347
348 simple_lock(&map->ref_lock);
349 map->ref_count++;
350 simple_unlock(&map->ref_lock);
351}
352
353/*
354 * vm_map_deallocate:
355 *
356 * Removes a reference from the specified map,
357 * destroying it if no references remain.
358 * The map should not be locked.
359 */
360void vm_map_deallocate(map)
361 register vm_map_t map;
362{
363 register int c;
364
365 if (map == NULL)
366 return;
367
368 simple_lock(&map->ref_lock);
369 c = --map->ref_count;
370 simple_unlock(&map->ref_lock);
371
372 if (c > 0) {
373 return;
374 }
375
376 /*
377 * Lock the map, to wait out all other references
378 * to it.
379 */
380
381 vm_map_lock(map);
382
383 (void) vm_map_delete(map, map->min_offset, map->max_offset);
384
385 pmap_destroy(map->pmap);
386
387 FREE(map, M_VMMAP);
388}
389
390/*
391 * vm_map_insert: [ internal use only ]
392 *
393 * Inserts the given whole VM object into the target
394 * map at the specified address range. The object's
395 * size should match that of the address range.
396 *
397 * Requires that the map be locked, and leaves it so.
398 */
399vm_map_insert(map, object, offset, start, end)
400 vm_map_t map;
401 vm_object_t object;
402 vm_offset_t offset;
403 vm_offset_t start;
404 vm_offset_t end;
405{
406 register vm_map_entry_t new_entry;
407 register vm_map_entry_t prev_entry;
408 vm_map_entry_t temp_entry;
409
410 /*
411 * Check that the start and end points are not bogus.
412 */
413
414 if ((start < map->min_offset) || (end > map->max_offset) ||
415 (start >= end))
416 return(KERN_INVALID_ADDRESS);
417
418 /*
419 * Find the entry prior to the proposed
420 * starting address; if it's part of an
421 * existing entry, this range is bogus.
422 */
423
424 if (vm_map_lookup_entry(map, start, &temp_entry))
425 return(KERN_NO_SPACE);
426
427 prev_entry = temp_entry;
428
429 /*
430 * Assert that the next entry doesn't overlap the
431 * end point.
432 */
433
434 if ((prev_entry->next != &map->header) &&
435 (prev_entry->next->start < end))
436 return(KERN_NO_SPACE);
437
438 /*
439 * See if we can avoid creating a new entry by
440 * extending one of our neighbors.
441 */
442
443 if (object == NULL) {
444 if ((prev_entry != &map->header) &&
445 (prev_entry->end == start) &&
446 (map->is_main_map) &&
447 (prev_entry->is_a_map == FALSE) &&
448 (prev_entry->is_sub_map == FALSE) &&
449 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
450 (prev_entry->protection == VM_PROT_DEFAULT) &&
451 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
452 (prev_entry->wired_count == 0)) {
453
454 if (vm_object_coalesce(prev_entry->object.vm_object,
455 NULL,
456 prev_entry->offset,
457 (vm_offset_t) 0,
458 (vm_size_t)(prev_entry->end
459 - prev_entry->start),
460 (vm_size_t)(end - prev_entry->end))) {
461 /*
462 * Coalesced the two objects - can extend
463 * the previous map entry to include the
464 * new range.
465 */
466 map->size += (end - prev_entry->end);
467 prev_entry->end = end;
468 return(KERN_SUCCESS);
469 }
470 }
471 }
472
473 /*
474 * Create a new entry
475 */
476
477 new_entry = vm_map_entry_create(map);
478 new_entry->start = start;
479 new_entry->end = end;
480
481 new_entry->is_a_map = FALSE;
482 new_entry->is_sub_map = FALSE;
483 new_entry->object.vm_object = object;
484 new_entry->offset = offset;
485
486 new_entry->copy_on_write = FALSE;
487 new_entry->needs_copy = FALSE;
488
489 if (map->is_main_map) {
490 new_entry->inheritance = VM_INHERIT_DEFAULT;
491 new_entry->protection = VM_PROT_DEFAULT;
492 new_entry->max_protection = VM_PROT_DEFAULT;
493 new_entry->wired_count = 0;
494 }
495
496 /*
497 * Insert the new entry into the list
498 */
499
500 vm_map_entry_link(map, prev_entry, new_entry);
501 map->size += new_entry->end - new_entry->start;
502
503 /*
504 * Update the free space hint
505 */
506
507 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
508 map->first_free = new_entry;
509
510 return(KERN_SUCCESS);
511}
512
513/*
514 * SAVE_HINT:
515 *
516 * Saves the specified entry as the hint for
517 * future lookups. Performs necessary interlocks.
518 */
519#define SAVE_HINT(map,value) \
520 simple_lock(&(map)->hint_lock); \
521 (map)->hint = (value); \
522 simple_unlock(&(map)->hint_lock);
523
524/*
525 * vm_map_lookup_entry: [ internal use only ]
526 *
527 * Finds the map entry containing (or
528 * immediately preceding) the specified address
529 * in the given map; the entry is returned
530 * in the "entry" parameter. The boolean
531 * result indicates whether the address is
532 * actually contained in the map.
533 */
534boolean_t vm_map_lookup_entry(map, address, entry)
535 register vm_map_t map;
536 register vm_offset_t address;
537 vm_map_entry_t *entry; /* OUT */
538{
539 register vm_map_entry_t cur;
540 register vm_map_entry_t last;
541
542 /*
543 * Start looking either from the head of the
544 * list, or from the hint.
545 */
546
547 simple_lock(&map->hint_lock);
548 cur = map->hint;
549 simple_unlock(&map->hint_lock);
550
551 if (cur == &map->header)
552 cur = cur->next;
553
554 if (address >= cur->start) {
555 /*
556 * Go from hint to end of list.
557 *
558 * But first, make a quick check to see if
559 * we are already looking at the entry we
560 * want (which is usually the case).
561 * Note also that we don't need to save the hint
562 * here... it is the same hint (unless we are
563 * at the header, in which case the hint didn't
564 * buy us anything anyway).
565 */
566 last = &map->header;
567 if ((cur != last) && (cur->end > address)) {
568 *entry = cur;
569 return(TRUE);
570 }
571 }
572 else {
573 /*
574 * Go from start to hint, *inclusively*
575 */
576 last = cur->next;
577 cur = map->header.next;
578 }
579
580 /*
581 * Search linearly
582 */
583
584 while (cur != last) {
585 if (cur->end > address) {
586 if (address >= cur->start) {
587 /*
588 * Save this lookup for future
589 * hints, and return
590 */
591
592 *entry = cur;
593 SAVE_HINT(map, cur);
594 return(TRUE);
595 }
596 break;
597 }
598 cur = cur->next;
599 }
600 *entry = cur->prev;
601 SAVE_HINT(map, *entry);
602 return(FALSE);
603}
604
605/*
606 * vm_map_find finds an unallocated region in the target address
607 * map with the given length. The search is defined to be
608 * first-fit from the specified address; the region found is
609 * returned in the same parameter.
610 *
611 */
612vm_map_find(map, object, offset, addr, length, find_space)
613 vm_map_t map;
614 vm_object_t object;
615 vm_offset_t offset;
616 vm_offset_t *addr; /* IN/OUT */
617 vm_size_t length;
618 boolean_t find_space;
619{
620 register vm_map_entry_t entry;
621 register vm_offset_t start;
622 register vm_offset_t end;
623 int result;
624
625 start = *addr;
626
627 vm_map_lock(map);
628
629 if (find_space) {
630 /*
631 * Calculate the first possible address.
632 */
633
634 if (start < map->min_offset)
635 start = map->min_offset;
636 if (start > map->max_offset) {
637 vm_map_unlock(map);
638 return (KERN_NO_SPACE);
639 }
640
641 /*
642 * Look for the first possible address;
643 * if there's already something at this
644 * address, we have to start after it.
645 */
646
647 if (start == map->min_offset) {
648 if ((entry = map->first_free) != &map->header)
649 start = entry->end;
650 } else {
651 vm_map_entry_t tmp_entry;
652 if (vm_map_lookup_entry(map, start, &tmp_entry))
653 start = tmp_entry->end;
654 entry = tmp_entry;
655 }
656
657 /*
658 * In any case, the "entry" always precedes
659 * the proposed new region throughout the
660 * loop:
661 */
662
663 while (TRUE) {
664 register vm_map_entry_t next;
665
666 /*
667 * Find the end of the proposed new region.
668 * Be sure we didn't go beyond the end, or
669 * wrap around the address.
670 */
671
672 end = start + length;
673
674 if ((end > map->max_offset) || (end < start)) {
675 vm_map_unlock(map);
676 return (KERN_NO_SPACE);
677 }
678
679 /*
680 * If there are no more entries, we must win.
681 */
682
683 next = entry->next;
684 if (next == &map->header)
685 break;
686
687 /*
688 * If there is another entry, it must be
689 * after the end of the potential new region.
690 */
691
692 if (next->start >= end)
693 break;
694
695 /*
696 * Didn't fit -- move to the next entry.
697 */
698
699 entry = next;
700 start = entry->end;
701 }
702 *addr = start;
703
704 SAVE_HINT(map, entry);
705 }
706
707 result = vm_map_insert(map, object, offset, start, start + length);
708
709 vm_map_unlock(map);
710 return(result);
711}
712
713/*
714 * vm_map_simplify_entry: [ internal use only ]
715 *
716 * Simplify the given map entry by:
717 * removing extra sharing maps
718 * [XXX maybe later] merging with a neighbor
719 */
720void vm_map_simplify_entry(map, entry)
721 vm_map_t map;
722 vm_map_entry_t entry;
723{
724#ifdef lint
725 map++;
726#endif lint
727
728 /*
729 * If this entry corresponds to a sharing map, then
730 * see if we can remove the level of indirection.
731 * If it's not a sharing map, then it points to
732 * a VM object, so see if we can merge with either
733 * of our neighbors.
734 */
735
736 if (entry->is_sub_map)
737 return;
738 if (entry->is_a_map) {
739#if 0
740 vm_map_t my_share_map;
741 int count;
742
743 my_share_map = entry->object.share_map;
744 simple_lock(&my_share_map->ref_lock);
745 count = my_share_map->ref_count;
746 simple_unlock(&my_share_map->ref_lock);
747
748 if (count == 1) {
749 /* Can move the region from
750 * entry->start to entry->end (+ entry->offset)
751 * in my_share_map into place of entry.
752 * Later.
753 */
754 }
755#endif 0
756 }
757 else {
758 /*
759 * Try to merge with our neighbors.
760 *
761 * Conditions for merge are:
762 *
763 * 1. entries are adjacent.
764 * 2. both entries point to objects
765 * with null pagers.
766 *
767 * If a merge is possible, we replace the two
768 * entries with a single entry, then merge
769 * the two objects into a single object.
770 *
771 * Now, all that is left to do is write the
772 * code!
773 */
774 }
775}
776
777/*
778 * vm_map_clip_start: [ internal use only ]
779 *
780 * Asserts that the given entry begins at or after
781 * the specified address; if necessary,
782 * it splits the entry into two.
783 */
784#define vm_map_clip_start(map, entry, startaddr) \
785{ \
786 if (startaddr > entry->start) \
787 _vm_map_clip_start(map, entry, startaddr); \
788}
789
790/*
791 * This routine is called only when it is known that
792 * the entry must be split.
793 */
794void _vm_map_clip_start(map, entry, start)
795 register vm_map_t map;
796 register vm_map_entry_t entry;
797 register vm_offset_t start;
798{
799 register vm_map_entry_t new_entry;
800
801 /*
802 * See if we can simplify this entry first
803 */
804
805 vm_map_simplify_entry(map, entry);
806
807 /*
808 * Split off the front portion --
809 * note that we must insert the new
810 * entry BEFORE this one, so that
811 * this entry has the specified starting
812 * address.
813 */
814
815 new_entry = vm_map_entry_create(map);
816 *new_entry = *entry;
817
818 new_entry->end = start;
819 entry->offset += (start - entry->start);
820 entry->start = start;
821
822 vm_map_entry_link(map, entry->prev, new_entry);
823
824 if (entry->is_a_map || entry->is_sub_map)
825 vm_map_reference(new_entry->object.share_map);
826 else
827 vm_object_reference(new_entry->object.vm_object);
828}
829
830/*
831 * vm_map_clip_end: [ internal use only ]
832 *
833 * Asserts that the given entry ends at or before
834 * the specified address; if necessary,
835 * it splits the entry into two.
836 */
837
838void _vm_map_clip_end();
839#define vm_map_clip_end(map, entry, endaddr) \
840{ \
841 if (endaddr < entry->end) \
842 _vm_map_clip_end(map, entry, endaddr); \
843}
844
845/*
846 * This routine is called only when it is known that
847 * the entry must be split.
848 */
849void _vm_map_clip_end(map, entry, end)
850 register vm_map_t map;
851 register vm_map_entry_t entry;
852 register vm_offset_t end;
853{
854 register vm_map_entry_t new_entry;
855
856 /*
857 * Create a new entry and insert it
858 * AFTER the specified entry
859 */
860
861 new_entry = vm_map_entry_create(map);
862 *new_entry = *entry;
863
864 new_entry->start = entry->end = end;
865 new_entry->offset += (end - entry->start);
866
867 vm_map_entry_link(map, entry, new_entry);
868
869 if (entry->is_a_map || entry->is_sub_map)
870 vm_map_reference(new_entry->object.share_map);
871 else
872 vm_object_reference(new_entry->object.vm_object);
873}
874
875/*
876 * VM_MAP_RANGE_CHECK: [ internal use only ]
877 *
878 * Asserts that the starting and ending region
879 * addresses fall within the valid range of the map.
880 */
881#define VM_MAP_RANGE_CHECK(map, start, end) \
882 { \
883 if (start < vm_map_min(map)) \
884 start = vm_map_min(map); \
885 if (end > vm_map_max(map)) \
886 end = vm_map_max(map); \
887 if (start > end) \
888 start = end; \
889 }
890
891/*
892 * vm_map_submap: [ kernel use only ]
893 *
894 * Mark the given range as handled by a subordinate map.
895 *
896 * This range must have been created with vm_map_find,
897 * and no other operations may have been performed on this
898 * range prior to calling vm_map_submap.
899 *
900 * Only a limited number of operations can be performed
901 * within this rage after calling vm_map_submap:
902 * vm_fault
903 * [Don't try vm_map_copy!]
904 *
905 * To remove a submapping, one must first remove the
906 * range from the superior map, and then destroy the
907 * submap (if desired). [Better yet, don't try it.]
908 */
909vm_map_submap(map, start, end, submap)
910 register vm_map_t map;
911 register vm_offset_t start;
912 register vm_offset_t end;
913 vm_map_t submap;
914{
915 vm_map_entry_t entry;
916 register int result = KERN_INVALID_ARGUMENT;
917
918 vm_map_lock(map);
919
920 VM_MAP_RANGE_CHECK(map, start, end);
921
922 if (vm_map_lookup_entry(map, start, &entry)) {
923 vm_map_clip_start(map, entry, start);
924 }
925 else
926 entry = entry->next;
927
928 vm_map_clip_end(map, entry, end);
929
930 if ((entry->start == start) && (entry->end == end) &&
931 (!entry->is_a_map) &&
932 (entry->object.vm_object == NULL) &&
933 (!entry->copy_on_write)) {
934 entry->is_a_map = FALSE;
935 entry->is_sub_map = TRUE;
936 vm_map_reference(entry->object.sub_map = submap);
937 result = KERN_SUCCESS;
938 }
939 vm_map_unlock(map);
940
941 return(result);
942}
943
944/*
945 * vm_map_protect:
946 *
947 * Sets the protection of the specified address
948 * region in the target map. If "set_max" is
949 * specified, the maximum protection is to be set;
950 * otherwise, only the current protection is affected.
951 */
952vm_map_protect(map, start, end, new_prot, set_max)
953 register vm_map_t map;
954 register vm_offset_t start;
955 register vm_offset_t end;
956 register vm_prot_t new_prot;
957 register boolean_t set_max;
958{
959 register vm_map_entry_t current;
960 vm_map_entry_t entry;
961
962 vm_map_lock(map);
963
964 VM_MAP_RANGE_CHECK(map, start, end);
965
966 if (vm_map_lookup_entry(map, start, &entry)) {
967 vm_map_clip_start(map, entry, start);
968 }
969 else
970 entry = entry->next;
971
972 /*
973 * Make a first pass to check for protection
974 * violations.
975 */
976
977 current = entry;
978 while ((current != &map->header) && (current->start < end)) {
979 if (current->is_sub_map)
980 return(KERN_INVALID_ARGUMENT);
981 if ((new_prot & current->max_protection) != new_prot) {
982 vm_map_unlock(map);
983 return(KERN_PROTECTION_FAILURE);
984 }
985
986 current = current->next;
987 }
988
989 /*
990 * Go back and fix up protections.
991 * [Note that clipping is not necessary the second time.]
992 */
993
994 current = entry;
995
996 while ((current != &map->header) && (current->start < end)) {
997 vm_prot_t old_prot;
998
999 vm_map_clip_end(map, current, end);
1000
1001 old_prot = current->protection;
1002 if (set_max)
1003 current->protection =
1004 (current->max_protection = new_prot) &
1005 old_prot;
1006 else
1007 current->protection = new_prot;
1008
1009 /*
1010 * Update physical map if necessary.
1011 * Worry about copy-on-write here -- CHECK THIS XXX
1012 */
1013
1014 if (current->protection != old_prot) {
1015
1016#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1017 VM_PROT_ALL)
1018#define max(a,b) ((a) > (b) ? (a) : (b))
1019
1020 if (current->is_a_map) {
1021 vm_map_entry_t share_entry;
1022 vm_offset_t share_end;
1023
1024 vm_map_lock(current->object.share_map);
1025 (void) vm_map_lookup_entry(
1026 current->object.share_map,
1027 current->offset,
1028 &share_entry);
1029 share_end = current->offset +
1030 (current->end - current->start);
1031 while ((share_entry !=
1032 &current->object.share_map->header) &&
1033 (share_entry->start < share_end)) {
1034
1035 pmap_protect(map->pmap,
1036 (max(share_entry->start,
1037 current->offset) -
1038 current->offset +
1039 current->start),
1040 min(share_entry->end,
1041 share_end) -
1042 current->offset +
1043 current->start,
1044 current->protection &
1045 MASK(share_entry));
1046
1047 share_entry = share_entry->next;
1048 }
1049 vm_map_unlock(current->object.share_map);
1050 }
1051 else
1052 pmap_protect(map->pmap, current->start,
1053 current->end,
1054 current->protection & MASK(entry));
1055#undef max
1056#undef MASK
1057 }
1058 current = current->next;
1059 }
1060
1061 vm_map_unlock(map);
1062 return(KERN_SUCCESS);
1063}
1064
1065/*
1066 * vm_map_inherit:
1067 *
1068 * Sets the inheritance of the specified address
1069 * range in the target map. Inheritance
1070 * affects how the map will be shared with
1071 * child maps at the time of vm_map_fork.
1072 */
1073vm_map_inherit(map, start, end, new_inheritance)
1074 register vm_map_t map;
1075 register vm_offset_t start;
1076 register vm_offset_t end;
1077 register vm_inherit_t new_inheritance;
1078{
1079 register vm_map_entry_t entry;
1080 vm_map_entry_t temp_entry;
1081
1082 switch (new_inheritance) {
1083 case VM_INHERIT_NONE:
1084 case VM_INHERIT_COPY:
1085 case VM_INHERIT_SHARE:
1086 break;
1087 default:
1088 return(KERN_INVALID_ARGUMENT);
1089 }
1090
1091 vm_map_lock(map);
1092
1093 VM_MAP_RANGE_CHECK(map, start, end);
1094
1095 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1096 entry = temp_entry;
1097 vm_map_clip_start(map, entry, start);
1098 }
1099 else
1100 entry = temp_entry->next;
1101
1102 while ((entry != &map->header) && (entry->start < end)) {
1103 vm_map_clip_end(map, entry, end);
1104
1105 entry->inheritance = new_inheritance;
1106
1107 entry = entry->next;
1108 }
1109
1110 vm_map_unlock(map);
1111 return(KERN_SUCCESS);
1112}
1113
1114/*
1115 * vm_map_pageable:
1116 *
1117 * Sets the pageability of the specified address
1118 * range in the target map. Regions specified
1119 * as not pageable require locked-down physical
1120 * memory and physical page maps.
1121 *
1122 * The map must not be locked, but a reference
1123 * must remain to the map throughout the call.
1124 */
1125vm_map_pageable(map, start, end, new_pageable)
1126 register vm_map_t map;
1127 register vm_offset_t start;
1128 register vm_offset_t end;
1129 register boolean_t new_pageable;
1130{
1131 register vm_map_entry_t entry;
1132 vm_map_entry_t temp_entry;
ce97e880
NW
1133 register vm_offset_t failed;
1134 int rv;
15637ed4
RG
1135
1136 vm_map_lock(map);
1137
1138 VM_MAP_RANGE_CHECK(map, start, end);
1139
1140 /*
1141 * Only one pageability change may take place at one
1142 * time, since vm_fault assumes it will be called
1143 * only once for each wiring/unwiring. Therefore, we
1144 * have to make sure we're actually changing the pageability
1145 * for the entire region. We do so before making any changes.
1146 */
1147
1148 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1149 entry = temp_entry;
1150 vm_map_clip_start(map, entry, start);
1151 }
1152 else
1153 entry = temp_entry->next;
1154 temp_entry = entry;
1155
1156 /*
1157 * Actions are rather different for wiring and unwiring,
1158 * so we have two separate cases.
1159 */
1160
1161 if (new_pageable) {
1162
1163 /*
1164 * Unwiring. First ensure that the range to be
1165 * unwired is really wired down.
1166 */
1167 while ((entry != &map->header) && (entry->start < end)) {
1168
1169 if (entry->wired_count == 0) {
1170 vm_map_unlock(map);
1171 return(KERN_INVALID_ARGUMENT);
1172 }
1173 entry = entry->next;
1174 }
1175
1176 /*
1177 * Now decrement the wiring count for each region.
1178 * If a region becomes completely unwired,
1179 * unwire its physical pages and mappings.
1180 */
1181 lock_set_recursive(&map->lock);
1182
1183 entry = temp_entry;
1184 while ((entry != &map->header) && (entry->start < end)) {
1185 vm_map_clip_end(map, entry, end);
1186
1187 entry->wired_count--;
1188 if (entry->wired_count == 0)
1189 vm_fault_unwire(map, entry->start, entry->end);
1190
1191 entry = entry->next;
1192 }
1193 lock_clear_recursive(&map->lock);
1194 }
1195
1196 else {
1197 /*
1198 * Wiring. We must do this in two passes:
1199 *
1200 * 1. Holding the write lock, we increment the
1201 * wiring count. For any area that is not already
1202 * wired, we create any shadow objects that need
1203 * to be created.
1204 *
1205 * 2. We downgrade to a read lock, and call
1206 * vm_fault_wire to fault in the pages for any
1207 * newly wired area (wired_count is 1).
1208 *
1209 * Downgrading to a read lock for vm_fault_wire avoids
1210 * a possible deadlock with another thread that may have
1211 * faulted on one of the pages to be wired (it would mark
1212 * the page busy, blocking us, then in turn block on the
1213 * map lock that we hold). Because of problems in the
1214 * recursive lock package, we cannot upgrade to a write
1215 * lock in vm_map_lookup. Thus, any actions that require
1216 * the write lock must be done beforehand. Because we
1217 * keep the read lock on the map, the copy-on-write status
1218 * of the entries we modify here cannot change.
1219 */
1220
1221 /*
1222 * Pass 1.
1223 */
1224 entry = temp_entry;
1225 while ((entry != &map->header) && (entry->start < end)) {
1226 vm_map_clip_end(map, entry, end);
1227
1228 entry->wired_count++;
1229 if (entry->wired_count == 1) {
1230
1231 /*
1232 * Perform actions of vm_map_lookup that need
1233 * the write lock on the map: create a shadow
1234 * object for a copy-on-write region, or an
1235 * object for a zero-fill region.
1236 *
1237 * We don't have to do this for entries that
1238 * point to sharing maps, because we won't hold
1239 * the lock on the sharing map.
1240 */
1241 if (!entry->is_a_map) {
1242 if (entry->needs_copy &&
1243 ((entry->protection & VM_PROT_WRITE) != 0)) {
1244
1245 vm_object_shadow(&entry->object.vm_object,
1246 &entry->offset,
1247 (vm_size_t)(entry->end
1248 - entry->start));
1249 entry->needs_copy = FALSE;
1250 }
1251 else if (entry->object.vm_object == NULL) {
1252 entry->object.vm_object =
1253 vm_object_allocate((vm_size_t)(entry->end
1254 - entry->start));
1255 entry->offset = (vm_offset_t)0;
1256 }
1257 }
1258 }
1259
1260 entry = entry->next;
1261 }
1262
1263 /*
1264 * Pass 2.
1265 */
1266
1267 /*
1268 * HACK HACK HACK HACK
1269 *
1270 * If we are wiring in the kernel map or a submap of it,
1271 * unlock the map to avoid deadlocks. We trust that the
1272 * kernel threads are well-behaved, and therefore will
1273 * not do anything destructive to this region of the map
1274 * while we have it unlocked. We cannot trust user threads
1275 * to do the same.
1276 *
1277 * HACK HACK HACK HACK
1278 */
1279 if (vm_map_pmap(map) == kernel_pmap) {
1280 vm_map_unlock(map); /* trust me ... */
1281 }
1282 else {
1283 lock_set_recursive(&map->lock);
1284 lock_write_to_read(&map->lock);
1285 }
1286
ce97e880 1287 rv = 0;
15637ed4
RG
1288 entry = temp_entry;
1289 while (entry != &map->header && entry->start < end) {
ce97e880
NW
1290 /*
1291 * If vm_fault_wire fails for any page we need to
1292 * undo what has been done. We decrement the wiring
1293 * count for those pages which have not yet been
1294 * wired (now) and unwire those that have (later).
1295 *
1296 * XXX this violates the locking protocol on the map,
1297 * needs to be fixed.
1298 */
1299 if (rv)
1300 entry->wired_count--;
1301 else if (entry->wired_count == 1) {
1302 rv = vm_fault_wire(map, entry->start, entry->end);
1303 if (rv) {
1304 failed = entry->start;
1305 entry->wired_count--;
1306 }
15637ed4
RG
1307 }
1308 entry = entry->next;
1309 }
1310
1311 if (vm_map_pmap(map) == kernel_pmap) {
1312 vm_map_lock(map);
1313 }
1314 else {
1315 lock_clear_recursive(&map->lock);
1316 }
ce97e880
NW
1317 if (rv) {
1318 vm_map_unlock(map);
1319 (void) vm_map_pageable(map, start, failed, TRUE);
1320 return(rv);
1321 }
15637ed4
RG
1322 }
1323
1324 vm_map_unlock(map);
1325
1326 return(KERN_SUCCESS);
1327}
1328
1329/*
1330 * vm_map_entry_unwire: [ internal use only ]
1331 *
1332 * Make the region specified by this entry pageable.
1333 *
1334 * The map in question should be locked.
1335 * [This is the reason for this routine's existence.]
1336 */
1337void vm_map_entry_unwire(map, entry)
1338 vm_map_t map;
1339 register vm_map_entry_t entry;
1340{
1341 vm_fault_unwire(map, entry->start, entry->end);
1342 entry->wired_count = 0;
1343}
1344
1345/*
1346 * vm_map_entry_delete: [ internal use only ]
1347 *
1348 * Deallocate the given entry from the target map.
1349 */
1350void vm_map_entry_delete(map, entry)
1351 register vm_map_t map;
1352 register vm_map_entry_t entry;
1353{
1354 if (entry->wired_count != 0)
1355 vm_map_entry_unwire(map, entry);
1356
1357 vm_map_entry_unlink(map, entry);
1358 map->size -= entry->end - entry->start;
1359
1360 if (entry->is_a_map || entry->is_sub_map)
1361 vm_map_deallocate(entry->object.share_map);
1362 else
1363 vm_object_deallocate(entry->object.vm_object);
1364
1365 vm_map_entry_dispose(map, entry);
1366}
1367
1368/*
1369 * vm_map_delete: [ internal use only ]
1370 *
1371 * Deallocates the given address range from the target
1372 * map.
1373 *
1374 * When called with a sharing map, removes pages from
1375 * that region from all physical maps.
1376 */
1377vm_map_delete(map, start, end)
1378 register vm_map_t map;
1379 vm_offset_t start;
1380 register vm_offset_t end;
1381{
1382 register vm_map_entry_t entry;
1383 vm_map_entry_t first_entry;
1384
1385 /*
1386 * Find the start of the region, and clip it
1387 */
1388
1389 if (!vm_map_lookup_entry(map, start, &first_entry))
1390 entry = first_entry->next;
1391 else {
1392 entry = first_entry;
1393 vm_map_clip_start(map, entry, start);
1394
1395 /*
1396 * Fix the lookup hint now, rather than each
1397 * time though the loop.
1398 */
1399
1400 SAVE_HINT(map, entry->prev);
1401 }
1402
1403 /*
1404 * Save the free space hint
1405 */
1406
1407 if (map->first_free->start >= start)
1408 map->first_free = entry->prev;
1409
1410 /*
1411 * Step through all entries in this region
1412 */
1413
1414 while ((entry != &map->header) && (entry->start < end)) {
1415 vm_map_entry_t next;
1416 register vm_offset_t s, e;
1417 register vm_object_t object;
1418
1419 vm_map_clip_end(map, entry, end);
1420
1421 next = entry->next;
1422 s = entry->start;
1423 e = entry->end;
1424
1425 /*
1426 * Unwire before removing addresses from the pmap;
1427 * otherwise, unwiring will put the entries back in
1428 * the pmap.
1429 */
1430
1431 object = entry->object.vm_object;
1432 if (entry->wired_count != 0)
1433 vm_map_entry_unwire(map, entry);
1434
1435 /*
1436 * If this is a sharing map, we must remove
1437 * *all* references to this data, since we can't
1438 * find all of the physical maps which are sharing
1439 * it.
1440 */
1441
1442 if (object == kernel_object || object == kmem_object)
1443 vm_object_page_remove(object, entry->offset,
1444 entry->offset + (e - s));
1445 else if (!map->is_main_map)
1446 vm_object_pmap_remove(object,
1447 entry->offset,
1448 entry->offset + (e - s));
1449 else
1450 pmap_remove(map->pmap, s, e);
1451
1452 /*
1453 * Delete the entry (which may delete the object)
1454 * only after removing all pmap entries pointing
1455 * to its pages. (Otherwise, its page frames may
1456 * be reallocated, and any modify bits will be
1457 * set in the wrong object!)
1458 */
1459
1460 vm_map_entry_delete(map, entry);
1461 entry = next;
1462 }
1463 return(KERN_SUCCESS);
1464}
1465
1466/*
1467 * vm_map_remove:
1468 *
1469 * Remove the given address range from the target map.
1470 * This is the exported form of vm_map_delete.
1471 */
1472vm_map_remove(map, start, end)
1473 register vm_map_t map;
1474 register vm_offset_t start;
1475 register vm_offset_t end;
1476{
1477 register int result;
1478
1479 vm_map_lock(map);
1480 VM_MAP_RANGE_CHECK(map, start, end);
1481 result = vm_map_delete(map, start, end);
1482 vm_map_unlock(map);
1483
1484 return(result);
1485}
1486
1487/*
1488 * vm_map_check_protection:
1489 *
1490 * Assert that the target map allows the specified
1491 * privilege on the entire address region given.
1492 * The entire region must be allocated.
1493 */
1494boolean_t vm_map_check_protection(map, start, end, protection)
1495 register vm_map_t map;
1496 register vm_offset_t start;
1497 register vm_offset_t end;
1498 register vm_prot_t protection;
1499{
1500 register vm_map_entry_t entry;
1501 vm_map_entry_t tmp_entry;
1502
1503 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1504 return(FALSE);
1505 }
1506
1507 entry = tmp_entry;
1508
1509 while (start < end) {
1510 if (entry == &map->header) {
1511 return(FALSE);
1512 }
1513
1514 /*
1515 * No holes allowed!
1516 */
1517
1518 if (start < entry->start) {
1519 return(FALSE);
1520 }
1521
1522 /*
1523 * Check protection associated with entry.
1524 */
1525
1526 if ((entry->protection & protection) != protection) {
1527 return(FALSE);
1528 }
1529
1530 /* go to next entry */
1531
1532 start = entry->end;
1533 entry = entry->next;
1534 }
1535 return(TRUE);
1536}
1537
1538/*
1539 * vm_map_copy_entry:
1540 *
1541 * Copies the contents of the source entry to the destination
1542 * entry. The entries *must* be aligned properly.
1543 */
1544void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1545 vm_map_t src_map, dst_map;
1546 register vm_map_entry_t src_entry, dst_entry;
1547{
1548 vm_object_t temp_object;
1549
1550 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1551 return;
1552
1553 if (dst_entry->object.vm_object != NULL &&
1554 !dst_entry->object.vm_object->internal)
1555 printf("vm_map_copy_entry: copying over permanent data!\n");
1556
1557 /*
1558 * If our destination map was wired down,
1559 * unwire it now.
1560 */
1561
1562 if (dst_entry->wired_count != 0)
1563 vm_map_entry_unwire(dst_map, dst_entry);
1564
1565 /*
1566 * If we're dealing with a sharing map, we
1567 * must remove the destination pages from
1568 * all maps (since we cannot know which maps
1569 * this sharing map belongs in).
1570 */
1571
1572 if (dst_map->is_main_map)
1573 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1574 else
1575 vm_object_pmap_remove(dst_entry->object.vm_object,
1576 dst_entry->offset,
1577 dst_entry->offset +
1578 (dst_entry->end - dst_entry->start));
1579
1580 if (src_entry->wired_count == 0) {
1581
1582 boolean_t src_needs_copy;
1583
1584 /*
1585 * If the source entry is marked needs_copy,
1586 * it is already write-protected.
1587 */
1588 if (!src_entry->needs_copy) {
1589
1590 boolean_t su;
1591
1592 /*
1593 * If the source entry has only one mapping,
1594 * we can just protect the virtual address
1595 * range.
1596 */
1597 if (!(su = src_map->is_main_map)) {
1598 simple_lock(&src_map->ref_lock);
1599 su = (src_map->ref_count == 1);
1600 simple_unlock(&src_map->ref_lock);
1601 }
1602
1603 if (su) {
1604 pmap_protect(src_map->pmap,
1605 src_entry->start,
1606 src_entry->end,
1607 src_entry->protection & ~VM_PROT_WRITE);
1608 }
1609 else {
1610 vm_object_pmap_copy(src_entry->object.vm_object,
1611 src_entry->offset,
1612 src_entry->offset + (src_entry->end
1613 -src_entry->start));
1614 }
1615 }
1616
1617 /*
1618 * Make a copy of the object.
1619 */
1620 temp_object = dst_entry->object.vm_object;
1621 vm_object_copy(src_entry->object.vm_object,
1622 src_entry->offset,
1623 (vm_size_t)(src_entry->end -
1624 src_entry->start),
1625 &dst_entry->object.vm_object,
1626 &dst_entry->offset,
1627 &src_needs_copy);
1628 /*
1629 * If we didn't get a copy-object now, mark the
1630 * source map entry so that a shadow will be created
1631 * to hold its changed pages.
1632 */
1633 if (src_needs_copy)
1634 src_entry->needs_copy = TRUE;
1635
1636 /*
1637 * The destination always needs to have a shadow
1638 * created.
1639 */
1640 dst_entry->needs_copy = TRUE;
1641
1642 /*
1643 * Mark the entries copy-on-write, so that write-enabling
1644 * the entry won't make copy-on-write pages writable.
1645 */
1646 src_entry->copy_on_write = TRUE;
1647 dst_entry->copy_on_write = TRUE;
1648 /*
1649 * Get rid of the old object.
1650 */
1651 vm_object_deallocate(temp_object);
1652
1653 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1654 dst_entry->end - dst_entry->start, src_entry->start);
1655 }
1656 else {
1657 /*
1658 * Of course, wired down pages can't be set copy-on-write.
1659 * Cause wired pages to be copied into the new
1660 * map by simulating faults (the new pages are
1661 * pageable)
1662 */
1663 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1664 }
1665}
1666
1667/*
1668 * vm_map_copy:
1669 *
1670 * Perform a virtual memory copy from the source
1671 * address map/range to the destination map/range.
1672 *
1673 * If src_destroy or dst_alloc is requested,
1674 * the source and destination regions should be
1675 * disjoint, not only in the top-level map, but
1676 * in the sharing maps as well. [The best way
1677 * to guarantee this is to use a new intermediate
1678 * map to make copies. This also reduces map
1679 * fragmentation.]
1680 */
1681vm_map_copy(dst_map, src_map,
1682 dst_addr, len, src_addr,
1683 dst_alloc, src_destroy)
1684 vm_map_t dst_map;
1685 vm_map_t src_map;
1686 vm_offset_t dst_addr;
1687 vm_size_t len;
1688 vm_offset_t src_addr;
1689 boolean_t dst_alloc;
1690 boolean_t src_destroy;
1691{
1692 register
1693 vm_map_entry_t src_entry;
1694 register
1695 vm_map_entry_t dst_entry;
1696 vm_map_entry_t tmp_entry;
1697 vm_offset_t src_start;
1698 vm_offset_t src_end;
1699 vm_offset_t dst_start;
1700 vm_offset_t dst_end;
1701 vm_offset_t src_clip;
1702 vm_offset_t dst_clip;
1703 int result;
1704 boolean_t old_src_destroy;
1705
1706 /*
1707 * XXX While we figure out why src_destroy screws up,
1708 * we'll do it by explicitly vm_map_delete'ing at the end.
1709 */
1710
1711 old_src_destroy = src_destroy;
1712 src_destroy = FALSE;
1713
1714 /*
1715 * Compute start and end of region in both maps
1716 */
1717
1718 src_start = src_addr;
1719 src_end = src_start + len;
1720 dst_start = dst_addr;
1721 dst_end = dst_start + len;
1722
1723 /*
1724 * Check that the region can exist in both source
1725 * and destination.
1726 */
1727
1728 if ((dst_end < dst_start) || (src_end < src_start))
1729 return(KERN_NO_SPACE);
1730
1731 /*
1732 * Lock the maps in question -- we avoid deadlock
1733 * by ordering lock acquisition by map value
1734 */
1735
1736 if (src_map == dst_map) {
1737 vm_map_lock(src_map);
1738 }
1739 else if ((int) src_map < (int) dst_map) {
1740 vm_map_lock(src_map);
1741 vm_map_lock(dst_map);
1742 } else {
1743 vm_map_lock(dst_map);
1744 vm_map_lock(src_map);
1745 }
1746
1747 result = KERN_SUCCESS;
1748
1749 /*
1750 * Check protections... source must be completely readable and
1751 * destination must be completely writable. [Note that if we're
1752 * allocating the destination region, we don't have to worry
1753 * about protection, but instead about whether the region
1754 * exists.]
1755 */
1756
1757 if (src_map->is_main_map && dst_map->is_main_map) {
1758 if (!vm_map_check_protection(src_map, src_start, src_end,
1759 VM_PROT_READ)) {
1760 result = KERN_PROTECTION_FAILURE;
1761 goto Return;
1762 }
1763
1764 if (dst_alloc) {
1765 /* XXX Consider making this a vm_map_find instead */
1766 if ((result = vm_map_insert(dst_map, NULL,
1767 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1768 goto Return;
1769 }
1770 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1771 VM_PROT_WRITE)) {
1772 result = KERN_PROTECTION_FAILURE;
1773 goto Return;
1774 }
1775 }
1776
1777 /*
1778 * Find the start entries and clip.
1779 *
1780 * Note that checking protection asserts that the
1781 * lookup cannot fail.
1782 *
1783 * Also note that we wait to do the second lookup
1784 * until we have done the first clip, as the clip
1785 * may affect which entry we get!
1786 */
1787
1788 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1789 src_entry = tmp_entry;
1790 vm_map_clip_start(src_map, src_entry, src_start);
1791
1792 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1793 dst_entry = tmp_entry;
1794 vm_map_clip_start(dst_map, dst_entry, dst_start);
1795
1796 /*
1797 * If both source and destination entries are the same,
1798 * retry the first lookup, as it may have changed.
1799 */
1800
1801 if (src_entry == dst_entry) {
1802 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1803 src_entry = tmp_entry;
1804 }
1805
1806 /*
1807 * If source and destination entries are still the same,
1808 * a null copy is being performed.
1809 */
1810
1811 if (src_entry == dst_entry)
1812 goto Return;
1813
1814 /*
1815 * Go through entries until we get to the end of the
1816 * region.
1817 */
1818
1819 while (src_start < src_end) {
1820 /*
1821 * Clip the entries to the endpoint of the entire region.
1822 */
1823
1824 vm_map_clip_end(src_map, src_entry, src_end);
1825 vm_map_clip_end(dst_map, dst_entry, dst_end);
1826
1827 /*
1828 * Clip each entry to the endpoint of the other entry.
1829 */
1830
1831 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1832 vm_map_clip_end(src_map, src_entry, src_clip);
1833
1834 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1835 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1836
1837 /*
1838 * Both entries now match in size and relative endpoints.
1839 *
1840 * If both entries refer to a VM object, we can
1841 * deal with them now.
1842 */
1843
1844 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1845 vm_map_copy_entry(src_map, dst_map, src_entry,
1846 dst_entry);
1847 }
1848 else {
1849 register vm_map_t new_dst_map;
1850 vm_offset_t new_dst_start;
1851 vm_size_t new_size;
1852 vm_map_t new_src_map;
1853 vm_offset_t new_src_start;
1854
1855 /*
1856 * We have to follow at least one sharing map.
1857 */
1858
1859 new_size = (dst_entry->end - dst_entry->start);
1860
1861 if (src_entry->is_a_map) {
1862 new_src_map = src_entry->object.share_map;
1863 new_src_start = src_entry->offset;
1864 }
1865 else {
1866 new_src_map = src_map;
1867 new_src_start = src_entry->start;
1868 lock_set_recursive(&src_map->lock);
1869 }
1870
1871 if (dst_entry->is_a_map) {
1872 vm_offset_t new_dst_end;
1873
1874 new_dst_map = dst_entry->object.share_map;
1875 new_dst_start = dst_entry->offset;
1876
1877 /*
1878 * Since the destination sharing entries
1879 * will be merely deallocated, we can
1880 * do that now, and replace the region
1881 * with a null object. [This prevents
1882 * splitting the source map to match
1883 * the form of the destination map.]
1884 * Note that we can only do so if the
1885 * source and destination do not overlap.
1886 */
1887
1888 new_dst_end = new_dst_start + new_size;
1889
1890 if (new_dst_map != new_src_map) {
1891 vm_map_lock(new_dst_map);
1892 (void) vm_map_delete(new_dst_map,
1893 new_dst_start,
1894 new_dst_end);
1895 (void) vm_map_insert(new_dst_map,
1896 NULL,
1897 (vm_offset_t) 0,
1898 new_dst_start,
1899 new_dst_end);
1900 vm_map_unlock(new_dst_map);
1901 }
1902 }
1903 else {
1904 new_dst_map = dst_map;
1905 new_dst_start = dst_entry->start;
1906 lock_set_recursive(&dst_map->lock);
1907 }
1908
1909 /*
1910 * Recursively copy the sharing map.
1911 */
1912
1913 (void) vm_map_copy(new_dst_map, new_src_map,
1914 new_dst_start, new_size, new_src_start,
1915 FALSE, FALSE);
1916
1917 if (dst_map == new_dst_map)
1918 lock_clear_recursive(&dst_map->lock);
1919 if (src_map == new_src_map)
1920 lock_clear_recursive(&src_map->lock);
1921 }
1922
1923 /*
1924 * Update variables for next pass through the loop.
1925 */
1926
1927 src_start = src_entry->end;
1928 src_entry = src_entry->next;
1929 dst_start = dst_entry->end;
1930 dst_entry = dst_entry->next;
1931
1932 /*
1933 * If the source is to be destroyed, here is the
1934 * place to do it.
1935 */
1936
1937 if (src_destroy && src_map->is_main_map &&
1938 dst_map->is_main_map)
1939 vm_map_entry_delete(src_map, src_entry->prev);
1940 }
1941
1942 /*
1943 * Update the physical maps as appropriate
1944 */
1945
1946 if (src_map->is_main_map && dst_map->is_main_map) {
1947 if (src_destroy)
1948 pmap_remove(src_map->pmap, src_addr, src_addr + len);
1949 }
1950
1951 /*
1952 * Unlock the maps
1953 */
1954
1955 Return: ;
1956
1957 if (old_src_destroy)
1958 vm_map_delete(src_map, src_addr, src_addr + len);
1959
1960 vm_map_unlock(src_map);
1961 if (src_map != dst_map)
1962 vm_map_unlock(dst_map);
1963
1964 return(result);
1965}
1966
1967/*
1968 * vmspace_fork:
1969 * Create a new process vmspace structure and vm_map
1970 * based on those of an existing process. The new map
1971 * is based on the old map, according to the inheritance
1972 * values on the regions in that map.
1973 *
1974 * The source map must not be locked.
1975 */
1976struct vmspace *
1977vmspace_fork(vm1)
1978 register struct vmspace *vm1;
1979{
1980 register struct vmspace *vm2;
1981 vm_map_t old_map = &vm1->vm_map;
1982 vm_map_t new_map;
1983 vm_map_entry_t old_entry;
1984 vm_map_entry_t new_entry;
1985 pmap_t new_pmap;
1986
1987 vm_map_lock(old_map);
1988
1989 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1990 old_map->entries_pageable);
1991 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1992 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1993 new_pmap = &vm2->vm_pmap; /* XXX */
1994 new_map = &vm2->vm_map; /* XXX */
1995
1996 old_entry = old_map->header.next;
1997
1998 while (old_entry != &old_map->header) {
1999 if (old_entry->is_sub_map)
2000 panic("vm_map_fork: encountered a submap");
2001
2002 switch (old_entry->inheritance) {
2003 case VM_INHERIT_NONE:
2004 break;
2005
2006 case VM_INHERIT_SHARE:
2007 /*
2008 * If we don't already have a sharing map:
2009 */
2010
2011 if (!old_entry->is_a_map) {
2012 vm_map_t new_share_map;
2013 vm_map_entry_t new_share_entry;
2014
2015 /*
2016 * Create a new sharing map
2017 */
2018
2019 new_share_map = vm_map_create(NULL,
2020 old_entry->start,
2021 old_entry->end,
2022 TRUE);
2023 new_share_map->is_main_map = FALSE;
2024
2025 /*
2026 * Create the only sharing entry from the
2027 * old task map entry.
2028 */
2029
2030 new_share_entry =
2031 vm_map_entry_create(new_share_map);
2032 *new_share_entry = *old_entry;
2033
2034 /*
2035 * Insert the entry into the new sharing
2036 * map
2037 */
2038
2039 vm_map_entry_link(new_share_map,
2040 new_share_map->header.prev,
2041 new_share_entry);
2042
2043 /*
2044 * Fix up the task map entry to refer
2045 * to the sharing map now.
2046 */
2047
2048 old_entry->is_a_map = TRUE;
2049 old_entry->object.share_map = new_share_map;
2050 old_entry->offset = old_entry->start;
2051 }
2052
2053 /*
2054 * Clone the entry, referencing the sharing map.
2055 */
2056
2057 new_entry = vm_map_entry_create(new_map);
2058 *new_entry = *old_entry;
2059 vm_map_reference(new_entry->object.share_map);
2060
2061 /*
2062 * Insert the entry into the new map -- we
2063 * know we're inserting at the end of the new
2064 * map.
2065 */
2066
2067 vm_map_entry_link(new_map, new_map->header.prev,
2068 new_entry);
2069
2070 /*
2071 * Update the physical map
2072 */
2073
2074 pmap_copy(new_map->pmap, old_map->pmap,
2075 new_entry->start,
2076 (old_entry->end - old_entry->start),
2077 old_entry->start);
2078 break;
2079
2080 case VM_INHERIT_COPY:
2081 /*
2082 * Clone the entry and link into the map.
2083 */
2084
2085 new_entry = vm_map_entry_create(new_map);
2086 *new_entry = *old_entry;
2087 new_entry->wired_count = 0;
2088 new_entry->object.vm_object = NULL;
2089 new_entry->is_a_map = FALSE;
2090 vm_map_entry_link(new_map, new_map->header.prev,
2091 new_entry);
2092 if (old_entry->is_a_map) {
2093 int check;
2094
2095 check = vm_map_copy(new_map,
2096 old_entry->object.share_map,
2097 new_entry->start,
2098 (vm_size_t)(new_entry->end -
2099 new_entry->start),
2100 old_entry->offset,
2101 FALSE, FALSE);
2102 if (check != KERN_SUCCESS)
2103 printf("vm_map_fork: copy in share_map region failed\n");
2104 }
2105 else {
2106 vm_map_copy_entry(old_map, new_map, old_entry,
2107 new_entry);
2108 }
2109 break;
2110 }
2111 old_entry = old_entry->next;
2112 }
2113
2114 new_map->size = old_map->size;
2115 vm_map_unlock(old_map);
2116
2117 return(vm2);
2118}
2119
2120/*
2121 * vm_map_lookup:
2122 *
2123 * Finds the VM object, offset, and
2124 * protection for a given virtual address in the
2125 * specified map, assuming a page fault of the
2126 * type specified.
2127 *
2128 * Leaves the map in question locked for read; return
2129 * values are guaranteed until a vm_map_lookup_done
2130 * call is performed. Note that the map argument
2131 * is in/out; the returned map must be used in
2132 * the call to vm_map_lookup_done.
2133 *
2134 * A handle (out_entry) is returned for use in
2135 * vm_map_lookup_done, to make that fast.
2136 *
2137 * If a lookup is requested with "write protection"
2138 * specified, the map may be changed to perform virtual
2139 * copying operations, although the data referenced will
2140 * remain the same.
2141 */
2142vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2143 object, offset, out_prot, wired, single_use)
2144 vm_map_t *var_map; /* IN/OUT */
2145 register vm_offset_t vaddr;
2146 register vm_prot_t fault_type;
2147
2148 vm_map_entry_t *out_entry; /* OUT */
2149 vm_object_t *object; /* OUT */
2150 vm_offset_t *offset; /* OUT */
2151 vm_prot_t *out_prot; /* OUT */
2152 boolean_t *wired; /* OUT */
2153 boolean_t *single_use; /* OUT */
2154{
2155 vm_map_t share_map;
2156 vm_offset_t share_offset;
2157 register vm_map_entry_t entry;
2158 register vm_map_t map = *var_map;
2159 register vm_prot_t prot;
2160 register boolean_t su;
2161
2162 RetryLookup: ;
2163
2164 /*
2165 * Lookup the faulting address.
2166 */
2167
2168 vm_map_lock_read(map);
2169
2170#define RETURN(why) \
2171 { \
2172 vm_map_unlock_read(map); \
2173 return(why); \
2174 }
2175
2176 /*
2177 * If the map has an interesting hint, try it before calling
2178 * full blown lookup routine.
2179 */
2180
2181 simple_lock(&map->hint_lock);
2182 entry = map->hint;
2183 simple_unlock(&map->hint_lock);
2184
2185 *out_entry = entry;
2186
2187 if ((entry == &map->header) ||
2188 (vaddr < entry->start) || (vaddr >= entry->end)) {
2189 vm_map_entry_t tmp_entry;
2190
2191 /*
2192 * Entry was either not a valid hint, or the vaddr
2193 * was not contained in the entry, so do a full lookup.
2194 */
2195 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2196 RETURN(KERN_INVALID_ADDRESS);
2197
2198 entry = tmp_entry;
2199 *out_entry = entry;
2200 }
2201
2202 /*
2203 * Handle submaps.
2204 */
2205
2206 if (entry->is_sub_map) {
2207 vm_map_t old_map = map;
2208
2209 *var_map = map = entry->object.sub_map;
2210 vm_map_unlock_read(old_map);
2211 goto RetryLookup;
2212 }
2213
2214 /*
2215 * Check whether this task is allowed to have
2216 * this page.
2217 */
2218
2219 prot = entry->protection;
2220 if ((fault_type & (prot)) != fault_type)
2221 RETURN(KERN_PROTECTION_FAILURE);
2222
2223 /*
2224 * If this page is not pageable, we have to get
2225 * it for all possible accesses.
2226 */
2227
2228 if (*wired = (entry->wired_count != 0))
2229 prot = fault_type = entry->protection;
2230
2231 /*
2232 * If we don't already have a VM object, track
2233 * it down.
2234 */
2235
2236 if (su = !entry->is_a_map) {
2237 share_map = map;
2238 share_offset = vaddr;
2239 }
2240 else {
2241 vm_map_entry_t share_entry;
2242
2243 /*
2244 * Compute the sharing map, and offset into it.
2245 */
2246
2247 share_map = entry->object.share_map;
2248 share_offset = (vaddr - entry->start) + entry->offset;
2249
2250 /*
2251 * Look for the backing store object and offset
2252 */
2253
2254 vm_map_lock_read(share_map);
2255
2256 if (!vm_map_lookup_entry(share_map, share_offset,
2257 &share_entry)) {
2258 vm_map_unlock_read(share_map);
2259 RETURN(KERN_INVALID_ADDRESS);
2260 }
2261 entry = share_entry;
2262 }
2263
2264 /*
2265 * If the entry was copy-on-write, we either ...
2266 */
2267
2268 if (entry->needs_copy) {
2269 /*
2270 * If we want to write the page, we may as well
2271 * handle that now since we've got the sharing
2272 * map locked.
2273 *
2274 * If we don't need to write the page, we just
2275 * demote the permissions allowed.
2276 */
2277
2278 if (fault_type & VM_PROT_WRITE) {
2279 /*
2280 * Make a new object, and place it in the
2281 * object chain. Note that no new references
2282 * have appeared -- one just moved from the
2283 * share map to the new object.
2284 */
2285
2286 if (lock_read_to_write(&share_map->lock)) {
2287 if (share_map != map)
2288 vm_map_unlock_read(map);
2289 goto RetryLookup;
2290 }
2291
2292 vm_object_shadow(
2293 &entry->object.vm_object,
2294 &entry->offset,
2295 (vm_size_t) (entry->end - entry->start));
2296
2297 entry->needs_copy = FALSE;
2298
2299 lock_write_to_read(&share_map->lock);
2300 }
2301 else {
2302 /*
2303 * We're attempting to read a copy-on-write
2304 * page -- don't allow writes.
2305 */
2306
2307 prot &= (~VM_PROT_WRITE);
2308 }
2309 }
2310
2311 /*
2312 * Create an object if necessary.
2313 */
2314 if (entry->object.vm_object == NULL) {
2315
2316 if (lock_read_to_write(&share_map->lock)) {
2317 if (share_map != map)
2318 vm_map_unlock_read(map);
2319 goto RetryLookup;
2320 }
2321
2322 entry->object.vm_object = vm_object_allocate(
2323 (vm_size_t)(entry->end - entry->start));
2324 entry->offset = 0;
2325 lock_write_to_read(&share_map->lock);
2326 }
2327
2328 /*
2329 * Return the object/offset from this entry. If the entry
2330 * was copy-on-write or empty, it has been fixed up.
2331 */
2332
2333 *offset = (share_offset - entry->start) + entry->offset;
2334 *object = entry->object.vm_object;
2335
2336 /*
2337 * Return whether this is the only map sharing this data.
2338 */
2339
2340 if (!su) {
2341 simple_lock(&share_map->ref_lock);
2342 su = (share_map->ref_count == 1);
2343 simple_unlock(&share_map->ref_lock);
2344 }
2345
2346 *out_prot = prot;
2347 *single_use = su;
2348
2349 return(KERN_SUCCESS);
2350
2351#undef RETURN
2352}
2353
2354/*
2355 * vm_map_lookup_done:
2356 *
2357 * Releases locks acquired by a vm_map_lookup
2358 * (according to the handle returned by that lookup).
2359 */
2360
2361void vm_map_lookup_done(map, entry)
2362 register vm_map_t map;
2363 vm_map_entry_t entry;
2364{
2365 /*
2366 * If this entry references a map, unlock it first.
2367 */
2368
2369 if (entry->is_a_map)
2370 vm_map_unlock_read(entry->object.share_map);
2371
2372 /*
2373 * Unlock the main-level map
2374 */
2375
2376 vm_map_unlock_read(map);
2377}
2378
2379/*
2380 * Routine: vm_map_simplify
2381 * Purpose:
2382 * Attempt to simplify the map representation in
2383 * the vicinity of the given starting address.
2384 * Note:
2385 * This routine is intended primarily to keep the
2386 * kernel maps more compact -- they generally don't
2387 * benefit from the "expand a map entry" technology
2388 * at allocation time because the adjacent entry
2389 * is often wired down.
2390 */
2391void vm_map_simplify(map, start)
2392 vm_map_t map;
2393 vm_offset_t start;
2394{
2395 vm_map_entry_t this_entry;
2396 vm_map_entry_t prev_entry;
2397
2398 vm_map_lock(map);
2399 if (
2400 (vm_map_lookup_entry(map, start, &this_entry)) &&
2401 ((prev_entry = this_entry->prev) != &map->header) &&
2402
2403 (prev_entry->end == start) &&
2404 (map->is_main_map) &&
2405
2406 (prev_entry->is_a_map == FALSE) &&
2407 (prev_entry->is_sub_map == FALSE) &&
2408
2409 (this_entry->is_a_map == FALSE) &&
2410 (this_entry->is_sub_map == FALSE) &&
2411
2412 (prev_entry->inheritance == this_entry->inheritance) &&
2413 (prev_entry->protection == this_entry->protection) &&
2414 (prev_entry->max_protection == this_entry->max_protection) &&
2415 (prev_entry->wired_count == this_entry->wired_count) &&
2416
2417 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2418 (prev_entry->needs_copy == this_entry->needs_copy) &&
2419
2420 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2421 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2422 == this_entry->offset)
2423 ) {
2424 if (map->first_free == this_entry)
2425 map->first_free = prev_entry;
2426
2427 SAVE_HINT(map, prev_entry);
2428 vm_map_entry_unlink(map, this_entry);
2429 prev_entry->end = this_entry->end;
2430 vm_object_deallocate(this_entry->object.vm_object);
2431 vm_map_entry_dispose(map, this_entry);
2432 }
2433 vm_map_unlock(map);
2434}
2435
2436/*
2437 * vm_map_print: [ debug ]
2438 */
2439void vm_map_print(map, full)
2440 register vm_map_t map;
2441 boolean_t full;
2442{
2443 register vm_map_entry_t entry;
2444 extern int indent;
2445
2446 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2447 (map->is_main_map ? "Task" : "Share"),
2448 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2449 map->timestamp);
2450
2451 if (!full && indent)
2452 return;
2453
2454 indent += 2;
2455 for (entry = map->header.next; entry != &map->header;
2456 entry = entry->next) {
2457 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2458 (int) entry, (int) entry->start, (int) entry->end);
2459 if (map->is_main_map) {
2460 static char *inheritance_name[4] =
2461 { "share", "copy", "none", "donate_copy"};
2462 printf("prot=%x/%x/%s, ",
2463 entry->protection,
2464 entry->max_protection,
2465 inheritance_name[entry->inheritance]);
2466 if (entry->wired_count != 0)
2467 printf("wired, ");
2468 }
2469
2470 if (entry->is_a_map || entry->is_sub_map) {
2471 printf("share=0x%x, offset=0x%x\n",
2472 (int) entry->object.share_map,
2473 (int) entry->offset);
2474 if ((entry->prev == &map->header) ||
2475 (!entry->prev->is_a_map) ||
2476 (entry->prev->object.share_map !=
2477 entry->object.share_map)) {
2478 indent += 2;
2479 vm_map_print(entry->object.share_map, full);
2480 indent -= 2;
2481 }
2482
2483 }
2484 else {
2485 printf("object=0x%x, offset=0x%x",
2486 (int) entry->object.vm_object,
2487 (int) entry->offset);
2488 if (entry->copy_on_write)
2489 printf(", copy (%s)",
2490 entry->needs_copy ? "needed" : "done");
2491 printf("\n");
2492
2493 if ((entry->prev == &map->header) ||
2494 (entry->prev->is_a_map) ||
2495 (entry->prev->object.vm_object !=
2496 entry->object.vm_object)) {
2497 indent += 2;
2498 vm_object_print(entry->object.vm_object, full);
2499 indent -= 2;
2500 }
2501 }
2502 }
2503 indent -= 2;
2504}