Add the following line to all generic configs as a comment so people would
[unix-history] / sys / vm / vm_map.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.c 7.3 (Berkeley) 4/21/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
65 * -------------------- ----- ----------------------
66 * CURRENT PATCH LEVEL: 1 00137
67 * -------------------- ----- ----------------------
68 *
69 * 08 Apr 93 Yuval Yarom Several VM system fixes
70 */
71
72/*
73 * Virtual memory mapping module.
74 */
75
76#include "param.h"
dd18dc33 77#include "systm.h"
15637ed4
RG
78#include "malloc.h"
79#include "vm.h"
80#include "vm_page.h"
81#include "vm_object.h"
82
83/*
84 * Virtual memory maps provide for the mapping, protection,
85 * and sharing of virtual memory objects. In addition,
86 * this module provides for an efficient virtual copy of
87 * memory from one map to another.
88 *
89 * Synchronization is required prior to most operations.
90 *
91 * Maps consist of an ordered doubly-linked list of simple
92 * entries; a single hint is used to speed up lookups.
93 *
94 * In order to properly represent the sharing of virtual
95 * memory regions among maps, the map structure is bi-level.
96 * Top-level ("address") maps refer to regions of sharable
97 * virtual memory. These regions are implemented as
98 * ("sharing") maps, which then refer to the actual virtual
99 * memory objects. When two address maps "share" memory,
100 * their top-level maps both have references to the same
101 * sharing map. When memory is virtual-copied from one
102 * address map to another, the references in the sharing
103 * maps are actually copied -- no copying occurs at the
104 * virtual memory object level.
105 *
106 * Since portions of maps are specified by start/end addreses,
107 * which may not align with existing map entries, all
108 * routines merely "clip" entries to these start/end values.
109 * [That is, an entry is split into two, bordering at a
110 * start or end value.] Note that these clippings may not
111 * always be necessary (as the two resulting entries are then
112 * not changed); however, the clipping is done for convenience.
113 * No attempt is currently made to "glue back together" two
114 * abutting entries.
115 *
116 * As mentioned above, virtual copy operations are performed
117 * by copying VM object references from one sharing map to
118 * another, and then marking both regions as copy-on-write.
119 * It is important to note that only one writeable reference
120 * to a VM object region exists in any map -- this means that
121 * shadow object creation can be delayed until a write operation
122 * occurs.
123 */
124
125/*
126 * vm_map_startup:
127 *
128 * Initialize the vm_map module. Must be called before
129 * any other vm_map routines.
130 *
131 * Map and entry structures are allocated from the general
132 * purpose memory pool with some exceptions:
133 *
134 * - The kernel map and kmem submap are allocated statically.
135 * - Kernel map entries are allocated out of a static pool.
136 *
137 * These restrictions are necessary since malloc() uses the
138 * maps and requires map entries.
139 */
140
141vm_offset_t kentry_data;
142vm_size_t kentry_data_size;
143vm_map_entry_t kentry_free;
144vm_map_t kmap_free;
145
146void vm_map_startup()
147{
148 register int i;
149 register vm_map_entry_t mep;
150 vm_map_t mp;
151
152 /*
153 * Static map structures for allocation before initialization of
154 * kernel map or kmem map. vm_map_create knows how to deal with them.
155 */
156 kmap_free = mp = (vm_map_t) kentry_data;
157 i = MAX_KMAP;
158 while (--i > 0) {
159 mp->header.next = (vm_map_entry_t) (mp + 1);
160 mp++;
161 }
162 mp++->header.next = NULL;
163
164 /*
165 * Form a free list of statically allocated kernel map entries
166 * with the rest.
167 */
168 kentry_free = mep = (vm_map_entry_t) mp;
169 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
170 while (--i > 0) {
171 mep->next = mep + 1;
172 mep++;
173 }
174 mep->next = NULL;
175}
176
177/*
178 * Allocate a vmspace structure, including a vm_map and pmap,
179 * and initialize those structures. The refcnt is set to 1.
180 * The remaining fields must be initialized by the caller.
181 */
182struct vmspace *
183vmspace_alloc(min, max, pageable)
184 vm_offset_t min, max;
185 int pageable;
186{
187 register struct vmspace *vm;
188
189 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
190 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
191 vm_map_init(&vm->vm_map, min, max, pageable);
192 pmap_pinit(&vm->vm_pmap);
193 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
194 vm->vm_refcnt = 1;
195 return (vm);
196}
197
198void
199vmspace_free(vm)
200 register struct vmspace *vm;
201{
202
203 if (--vm->vm_refcnt == 0) {
204 /*
205 * Lock the map, to wait out all other references to it.
206 * Delete all of the mappings and pages they hold,
207 * then call the pmap module to reclaim anything left.
208 */
209 vm_map_lock(&vm->vm_map);
210 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
211 vm->vm_map.max_offset);
212 pmap_release(&vm->vm_pmap);
213 FREE(vm, M_VMMAP);
214 }
215}
216
217/*
218 * vm_map_create:
219 *
220 * Creates and returns a new empty VM map with
221 * the given physical map structure, and having
222 * the given lower and upper address bounds.
223 */
224vm_map_t vm_map_create(pmap, min, max, pageable)
225 pmap_t pmap;
226 vm_offset_t min, max;
227 boolean_t pageable;
228{
229 register vm_map_t result;
230 extern vm_map_t kernel_map, kmem_map;
231
232 if (kmem_map == NULL) {
233 result = kmap_free;
234 kmap_free = (vm_map_t) result->header.next;
235 if (result == NULL)
236 panic("vm_map_create: out of maps");
237 } else
238 MALLOC(result, vm_map_t, sizeof(struct vm_map),
239 M_VMMAP, M_WAITOK);
240
241 vm_map_init(result, min, max, pageable);
242 result->pmap = pmap;
243 return(result);
244}
245
246/*
247 * Initialize an existing vm_map structure
248 * such as that in the vmspace structure.
249 * The pmap is set elsewhere.
250 */
251void
252vm_map_init(map, min, max, pageable)
253 register struct vm_map *map;
254 vm_offset_t min, max;
255 boolean_t pageable;
256{
257 map->header.next = map->header.prev = &map->header;
258 map->nentries = 0;
259 map->size = 0;
260 map->ref_count = 1;
261 map->is_main_map = TRUE;
262 map->min_offset = min;
263 map->max_offset = max;
264 map->entries_pageable = pageable;
265 map->first_free = &map->header;
266 map->hint = &map->header;
267 map->timestamp = 0;
268 lock_init(&map->lock, TRUE);
269 simple_lock_init(&map->ref_lock);
270 simple_lock_init(&map->hint_lock);
271}
272
273/*
274 * vm_map_entry_create: [ internal use only ]
275 *
276 * Allocates a VM map entry for insertion.
277 * No entry fields are filled in. This routine is
278 */
279vm_map_entry_t vm_map_entry_create(map)
280 vm_map_t map;
281{
282 vm_map_entry_t entry;
283 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
284
285 if (map == kernel_map || map == kmem_map || map == mb_map
286 || map == buffer_map || map == pager_map) {
287 if (entry = kentry_free)
288 kentry_free = kentry_free->next;
289 } else
290 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
291 M_VMMAPENT, M_WAITOK);
292 if (entry == NULL)
293 panic("vm_map_entry_create: out of map entries");
294
295 return(entry);
296}
297
298/*
299 * vm_map_entry_dispose: [ internal use only ]
300 *
301 * Inverse of vm_map_entry_create.
302 */
303void vm_map_entry_dispose(map, entry)
304 vm_map_t map;
305 vm_map_entry_t entry;
306{
307 extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map;
308
309 if (map == kernel_map || map == kmem_map || map == mb_map
310 || map == buffer_map || map == pager_map) {
311 entry->next = kentry_free;
312 kentry_free = entry;
313 } else
314 FREE(entry, M_VMMAPENT);
315}
316
317/*
318 * vm_map_entry_{un,}link:
319 *
320 * Insert/remove entries from maps.
321 */
322#define vm_map_entry_link(map, after_where, entry) \
323 { \
324 (map)->nentries++; \
325 (entry)->prev = (after_where); \
326 (entry)->next = (after_where)->next; \
327 (entry)->prev->next = (entry); \
328 (entry)->next->prev = (entry); \
329 }
330#define vm_map_entry_unlink(map, entry) \
331 { \
332 (map)->nentries--; \
333 (entry)->next->prev = (entry)->prev; \
334 (entry)->prev->next = (entry)->next; \
335 }
336
337/*
338 * vm_map_reference:
339 *
340 * Creates another valid reference to the given map.
341 *
342 */
343void vm_map_reference(map)
344 register vm_map_t map;
345{
346 if (map == NULL)
347 return;
348
349 simple_lock(&map->ref_lock);
350 map->ref_count++;
351 simple_unlock(&map->ref_lock);
352}
353
354/*
355 * vm_map_deallocate:
356 *
357 * Removes a reference from the specified map,
358 * destroying it if no references remain.
359 * The map should not be locked.
360 */
361void vm_map_deallocate(map)
362 register vm_map_t map;
363{
364 register int c;
365
366 if (map == NULL)
367 return;
368
369 simple_lock(&map->ref_lock);
370 c = --map->ref_count;
371 simple_unlock(&map->ref_lock);
372
373 if (c > 0) {
374 return;
375 }
376
377 /*
378 * Lock the map, to wait out all other references
379 * to it.
380 */
381
382 vm_map_lock(map);
383
384 (void) vm_map_delete(map, map->min_offset, map->max_offset);
385
386 pmap_destroy(map->pmap);
387
388 FREE(map, M_VMMAP);
389}
390
391/*
392 * vm_map_insert: [ internal use only ]
393 *
394 * Inserts the given whole VM object into the target
395 * map at the specified address range. The object's
396 * size should match that of the address range.
397 *
398 * Requires that the map be locked, and leaves it so.
399 */
400vm_map_insert(map, object, offset, start, end)
401 vm_map_t map;
402 vm_object_t object;
403 vm_offset_t offset;
404 vm_offset_t start;
405 vm_offset_t end;
406{
407 register vm_map_entry_t new_entry;
408 register vm_map_entry_t prev_entry;
409 vm_map_entry_t temp_entry;
410
411 /*
412 * Check that the start and end points are not bogus.
413 */
414
415 if ((start < map->min_offset) || (end > map->max_offset) ||
416 (start >= end))
417 return(KERN_INVALID_ADDRESS);
418
419 /*
420 * Find the entry prior to the proposed
421 * starting address; if it's part of an
422 * existing entry, this range is bogus.
423 */
424
425 if (vm_map_lookup_entry(map, start, &temp_entry))
426 return(KERN_NO_SPACE);
427
428 prev_entry = temp_entry;
429
430 /*
431 * Assert that the next entry doesn't overlap the
432 * end point.
433 */
434
435 if ((prev_entry->next != &map->header) &&
436 (prev_entry->next->start < end))
437 return(KERN_NO_SPACE);
438
439 /*
440 * See if we can avoid creating a new entry by
441 * extending one of our neighbors.
442 */
443
444 if (object == NULL) {
445 if ((prev_entry != &map->header) &&
446 (prev_entry->end == start) &&
447 (map->is_main_map) &&
448 (prev_entry->is_a_map == FALSE) &&
449 (prev_entry->is_sub_map == FALSE) &&
450 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
451 (prev_entry->protection == VM_PROT_DEFAULT) &&
452 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
453 (prev_entry->wired_count == 0)) {
454
455 if (vm_object_coalesce(prev_entry->object.vm_object,
456 NULL,
457 prev_entry->offset,
458 (vm_offset_t) 0,
459 (vm_size_t)(prev_entry->end
460 - prev_entry->start),
461 (vm_size_t)(end - prev_entry->end))) {
462 /*
463 * Coalesced the two objects - can extend
464 * the previous map entry to include the
465 * new range.
466 */
467 map->size += (end - prev_entry->end);
468 prev_entry->end = end;
469 return(KERN_SUCCESS);
470 }
471 }
472 }
473
474 /*
475 * Create a new entry
476 */
477
478 new_entry = vm_map_entry_create(map);
479 new_entry->start = start;
480 new_entry->end = end;
481
482 new_entry->is_a_map = FALSE;
483 new_entry->is_sub_map = FALSE;
484 new_entry->object.vm_object = object;
485 new_entry->offset = offset;
486
487 new_entry->copy_on_write = FALSE;
488 new_entry->needs_copy = FALSE;
489
490 if (map->is_main_map) {
491 new_entry->inheritance = VM_INHERIT_DEFAULT;
492 new_entry->protection = VM_PROT_DEFAULT;
493 new_entry->max_protection = VM_PROT_DEFAULT;
494 new_entry->wired_count = 0;
495 }
496
497 /*
498 * Insert the new entry into the list
499 */
500
501 vm_map_entry_link(map, prev_entry, new_entry);
502 map->size += new_entry->end - new_entry->start;
503
504 /*
505 * Update the free space hint
506 */
507
508 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
509 map->first_free = new_entry;
510
511 return(KERN_SUCCESS);
512}
513
514/*
515 * SAVE_HINT:
516 *
517 * Saves the specified entry as the hint for
518 * future lookups. Performs necessary interlocks.
519 */
520#define SAVE_HINT(map,value) \
521 simple_lock(&(map)->hint_lock); \
522 (map)->hint = (value); \
523 simple_unlock(&(map)->hint_lock);
524
525/*
526 * vm_map_lookup_entry: [ internal use only ]
527 *
528 * Finds the map entry containing (or
529 * immediately preceding) the specified address
530 * in the given map; the entry is returned
531 * in the "entry" parameter. The boolean
532 * result indicates whether the address is
533 * actually contained in the map.
534 */
535boolean_t vm_map_lookup_entry(map, address, entry)
536 register vm_map_t map;
537 register vm_offset_t address;
538 vm_map_entry_t *entry; /* OUT */
539{
540 register vm_map_entry_t cur;
541 register vm_map_entry_t last;
542
543 /*
544 * Start looking either from the head of the
545 * list, or from the hint.
546 */
547
548 simple_lock(&map->hint_lock);
549 cur = map->hint;
550 simple_unlock(&map->hint_lock);
551
552 if (cur == &map->header)
553 cur = cur->next;
554
555 if (address >= cur->start) {
556 /*
557 * Go from hint to end of list.
558 *
559 * But first, make a quick check to see if
560 * we are already looking at the entry we
561 * want (which is usually the case).
562 * Note also that we don't need to save the hint
563 * here... it is the same hint (unless we are
564 * at the header, in which case the hint didn't
565 * buy us anything anyway).
566 */
567 last = &map->header;
568 if ((cur != last) && (cur->end > address)) {
569 *entry = cur;
570 return(TRUE);
571 }
572 }
573 else {
574 /*
575 * Go from start to hint, *inclusively*
576 */
577 last = cur->next;
578 cur = map->header.next;
579 }
580
581 /*
582 * Search linearly
583 */
584
585 while (cur != last) {
586 if (cur->end > address) {
587 if (address >= cur->start) {
588 /*
589 * Save this lookup for future
590 * hints, and return
591 */
592
593 *entry = cur;
594 SAVE_HINT(map, cur);
595 return(TRUE);
596 }
597 break;
598 }
599 cur = cur->next;
600 }
601 *entry = cur->prev;
602 SAVE_HINT(map, *entry);
603 return(FALSE);
604}
605
606/*
607 * vm_map_find finds an unallocated region in the target address
608 * map with the given length. The search is defined to be
609 * first-fit from the specified address; the region found is
610 * returned in the same parameter.
611 *
612 */
613vm_map_find(map, object, offset, addr, length, find_space)
614 vm_map_t map;
615 vm_object_t object;
616 vm_offset_t offset;
617 vm_offset_t *addr; /* IN/OUT */
618 vm_size_t length;
619 boolean_t find_space;
620{
621 register vm_map_entry_t entry;
622 register vm_offset_t start;
623 register vm_offset_t end;
624 int result;
625
626 start = *addr;
627
628 vm_map_lock(map);
629
630 if (find_space) {
631 /*
632 * Calculate the first possible address.
633 */
634
635 if (start < map->min_offset)
636 start = map->min_offset;
637 if (start > map->max_offset) {
638 vm_map_unlock(map);
639 return (KERN_NO_SPACE);
640 }
641
642 /*
643 * Look for the first possible address;
644 * if there's already something at this
645 * address, we have to start after it.
646 */
647
648 if (start == map->min_offset) {
649 if ((entry = map->first_free) != &map->header)
650 start = entry->end;
651 } else {
652 vm_map_entry_t tmp_entry;
653 if (vm_map_lookup_entry(map, start, &tmp_entry))
654 start = tmp_entry->end;
655 entry = tmp_entry;
656 }
657
658 /*
659 * In any case, the "entry" always precedes
660 * the proposed new region throughout the
661 * loop:
662 */
663
664 while (TRUE) {
665 register vm_map_entry_t next;
666
667 /*
668 * Find the end of the proposed new region.
669 * Be sure we didn't go beyond the end, or
670 * wrap around the address.
671 */
672
673 end = start + length;
674
675 if ((end > map->max_offset) || (end < start)) {
676 vm_map_unlock(map);
677 return (KERN_NO_SPACE);
678 }
679
680 /*
681 * If there are no more entries, we must win.
682 */
683
684 next = entry->next;
685 if (next == &map->header)
686 break;
687
688 /*
689 * If there is another entry, it must be
690 * after the end of the potential new region.
691 */
692
693 if (next->start >= end)
694 break;
695
696 /*
697 * Didn't fit -- move to the next entry.
698 */
699
700 entry = next;
701 start = entry->end;
702 }
703 *addr = start;
704
705 SAVE_HINT(map, entry);
706 }
707
708 result = vm_map_insert(map, object, offset, start, start + length);
709
710 vm_map_unlock(map);
711 return(result);
712}
713
714/*
715 * vm_map_simplify_entry: [ internal use only ]
716 *
717 * Simplify the given map entry by:
718 * removing extra sharing maps
719 * [XXX maybe later] merging with a neighbor
720 */
721void vm_map_simplify_entry(map, entry)
722 vm_map_t map;
723 vm_map_entry_t entry;
724{
725#ifdef lint
726 map++;
727#endif lint
728
729 /*
730 * If this entry corresponds to a sharing map, then
731 * see if we can remove the level of indirection.
732 * If it's not a sharing map, then it points to
733 * a VM object, so see if we can merge with either
734 * of our neighbors.
735 */
736
737 if (entry->is_sub_map)
738 return;
739 if (entry->is_a_map) {
740#if 0
741 vm_map_t my_share_map;
742 int count;
743
744 my_share_map = entry->object.share_map;
745 simple_lock(&my_share_map->ref_lock);
746 count = my_share_map->ref_count;
747 simple_unlock(&my_share_map->ref_lock);
748
749 if (count == 1) {
750 /* Can move the region from
751 * entry->start to entry->end (+ entry->offset)
752 * in my_share_map into place of entry.
753 * Later.
754 */
755 }
756#endif 0
757 }
758 else {
759 /*
760 * Try to merge with our neighbors.
761 *
762 * Conditions for merge are:
763 *
764 * 1. entries are adjacent.
765 * 2. both entries point to objects
766 * with null pagers.
767 *
768 * If a merge is possible, we replace the two
769 * entries with a single entry, then merge
770 * the two objects into a single object.
771 *
772 * Now, all that is left to do is write the
773 * code!
774 */
775 }
776}
777
778/*
779 * vm_map_clip_start: [ internal use only ]
780 *
781 * Asserts that the given entry begins at or after
782 * the specified address; if necessary,
783 * it splits the entry into two.
784 */
785#define vm_map_clip_start(map, entry, startaddr) \
786{ \
787 if (startaddr > entry->start) \
788 _vm_map_clip_start(map, entry, startaddr); \
789}
790
791/*
792 * This routine is called only when it is known that
793 * the entry must be split.
794 */
795void _vm_map_clip_start(map, entry, start)
796 register vm_map_t map;
797 register vm_map_entry_t entry;
798 register vm_offset_t start;
799{
800 register vm_map_entry_t new_entry;
801
802 /*
803 * See if we can simplify this entry first
804 */
805
806 vm_map_simplify_entry(map, entry);
807
808 /*
809 * Split off the front portion --
810 * note that we must insert the new
811 * entry BEFORE this one, so that
812 * this entry has the specified starting
813 * address.
814 */
815
816 new_entry = vm_map_entry_create(map);
817 *new_entry = *entry;
818
819 new_entry->end = start;
820 entry->offset += (start - entry->start);
821 entry->start = start;
822
823 vm_map_entry_link(map, entry->prev, new_entry);
824
825 if (entry->is_a_map || entry->is_sub_map)
826 vm_map_reference(new_entry->object.share_map);
827 else
828 vm_object_reference(new_entry->object.vm_object);
829}
830
831/*
832 * vm_map_clip_end: [ internal use only ]
833 *
834 * Asserts that the given entry ends at or before
835 * the specified address; if necessary,
836 * it splits the entry into two.
837 */
838
839void _vm_map_clip_end();
840#define vm_map_clip_end(map, entry, endaddr) \
841{ \
842 if (endaddr < entry->end) \
843 _vm_map_clip_end(map, entry, endaddr); \
844}
845
846/*
847 * This routine is called only when it is known that
848 * the entry must be split.
849 */
850void _vm_map_clip_end(map, entry, end)
851 register vm_map_t map;
852 register vm_map_entry_t entry;
853 register vm_offset_t end;
854{
855 register vm_map_entry_t new_entry;
856
857 /*
858 * Create a new entry and insert it
859 * AFTER the specified entry
860 */
861
862 new_entry = vm_map_entry_create(map);
863 *new_entry = *entry;
864
865 new_entry->start = entry->end = end;
866 new_entry->offset += (end - entry->start);
867
868 vm_map_entry_link(map, entry, new_entry);
869
870 if (entry->is_a_map || entry->is_sub_map)
871 vm_map_reference(new_entry->object.share_map);
872 else
873 vm_object_reference(new_entry->object.vm_object);
874}
875
876/*
877 * VM_MAP_RANGE_CHECK: [ internal use only ]
878 *
879 * Asserts that the starting and ending region
880 * addresses fall within the valid range of the map.
881 */
882#define VM_MAP_RANGE_CHECK(map, start, end) \
883 { \
884 if (start < vm_map_min(map)) \
885 start = vm_map_min(map); \
886 if (end > vm_map_max(map)) \
887 end = vm_map_max(map); \
888 if (start > end) \
889 start = end; \
890 }
891
892/*
893 * vm_map_submap: [ kernel use only ]
894 *
895 * Mark the given range as handled by a subordinate map.
896 *
897 * This range must have been created with vm_map_find,
898 * and no other operations may have been performed on this
899 * range prior to calling vm_map_submap.
900 *
901 * Only a limited number of operations can be performed
902 * within this rage after calling vm_map_submap:
903 * vm_fault
904 * [Don't try vm_map_copy!]
905 *
906 * To remove a submapping, one must first remove the
907 * range from the superior map, and then destroy the
908 * submap (if desired). [Better yet, don't try it.]
909 */
910vm_map_submap(map, start, end, submap)
911 register vm_map_t map;
912 register vm_offset_t start;
913 register vm_offset_t end;
914 vm_map_t submap;
915{
916 vm_map_entry_t entry;
917 register int result = KERN_INVALID_ARGUMENT;
918
919 vm_map_lock(map);
920
921 VM_MAP_RANGE_CHECK(map, start, end);
922
923 if (vm_map_lookup_entry(map, start, &entry)) {
924 vm_map_clip_start(map, entry, start);
925 }
926 else
927 entry = entry->next;
928
929 vm_map_clip_end(map, entry, end);
930
931 if ((entry->start == start) && (entry->end == end) &&
932 (!entry->is_a_map) &&
933 (entry->object.vm_object == NULL) &&
934 (!entry->copy_on_write)) {
935 entry->is_a_map = FALSE;
936 entry->is_sub_map = TRUE;
937 vm_map_reference(entry->object.sub_map = submap);
938 result = KERN_SUCCESS;
939 }
940 vm_map_unlock(map);
941
942 return(result);
943}
944
945/*
946 * vm_map_protect:
947 *
948 * Sets the protection of the specified address
949 * region in the target map. If "set_max" is
950 * specified, the maximum protection is to be set;
951 * otherwise, only the current protection is affected.
952 */
953vm_map_protect(map, start, end, new_prot, set_max)
954 register vm_map_t map;
955 register vm_offset_t start;
956 register vm_offset_t end;
957 register vm_prot_t new_prot;
958 register boolean_t set_max;
959{
960 register vm_map_entry_t current;
961 vm_map_entry_t entry;
962
963 vm_map_lock(map);
964
965 VM_MAP_RANGE_CHECK(map, start, end);
966
967 if (vm_map_lookup_entry(map, start, &entry)) {
968 vm_map_clip_start(map, entry, start);
969 }
970 else
971 entry = entry->next;
972
973 /*
974 * Make a first pass to check for protection
975 * violations.
976 */
977
978 current = entry;
979 while ((current != &map->header) && (current->start < end)) {
980 if (current->is_sub_map)
981 return(KERN_INVALID_ARGUMENT);
982 if ((new_prot & current->max_protection) != new_prot) {
983 vm_map_unlock(map);
984 return(KERN_PROTECTION_FAILURE);
985 }
986
987 current = current->next;
988 }
989
990 /*
991 * Go back and fix up protections.
992 * [Note that clipping is not necessary the second time.]
993 */
994
995 current = entry;
996
997 while ((current != &map->header) && (current->start < end)) {
998 vm_prot_t old_prot;
999
1000 vm_map_clip_end(map, current, end);
1001
1002 old_prot = current->protection;
1003 if (set_max)
1004 current->protection =
1005 (current->max_protection = new_prot) &
1006 old_prot;
1007 else
1008 current->protection = new_prot;
1009
1010 /*
1011 * Update physical map if necessary.
1012 * Worry about copy-on-write here -- CHECK THIS XXX
1013 */
1014
1015 if (current->protection != old_prot) {
1016
1017#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1018 VM_PROT_ALL)
1019#define max(a,b) ((a) > (b) ? (a) : (b))
1020
1021 if (current->is_a_map) {
1022 vm_map_entry_t share_entry;
1023 vm_offset_t share_end;
1024
1025 vm_map_lock(current->object.share_map);
1026 (void) vm_map_lookup_entry(
1027 current->object.share_map,
1028 current->offset,
1029 &share_entry);
1030 share_end = current->offset +
1031 (current->end - current->start);
1032 while ((share_entry !=
1033 &current->object.share_map->header) &&
1034 (share_entry->start < share_end)) {
1035
1036 pmap_protect(map->pmap,
1037 (max(share_entry->start,
1038 current->offset) -
1039 current->offset +
1040 current->start),
1041 min(share_entry->end,
1042 share_end) -
1043 current->offset +
1044 current->start,
1045 current->protection &
1046 MASK(share_entry));
1047
1048 share_entry = share_entry->next;
1049 }
1050 vm_map_unlock(current->object.share_map);
1051 }
1052 else
1053 pmap_protect(map->pmap, current->start,
1054 current->end,
1055 current->protection & MASK(entry));
1056#undef max
1057#undef MASK
1058 }
1059 current = current->next;
1060 }
1061
1062 vm_map_unlock(map);
1063 return(KERN_SUCCESS);
1064}
1065
1066/*
1067 * vm_map_inherit:
1068 *
1069 * Sets the inheritance of the specified address
1070 * range in the target map. Inheritance
1071 * affects how the map will be shared with
1072 * child maps at the time of vm_map_fork.
1073 */
1074vm_map_inherit(map, start, end, new_inheritance)
1075 register vm_map_t map;
1076 register vm_offset_t start;
1077 register vm_offset_t end;
1078 register vm_inherit_t new_inheritance;
1079{
1080 register vm_map_entry_t entry;
1081 vm_map_entry_t temp_entry;
1082
1083 switch (new_inheritance) {
1084 case VM_INHERIT_NONE:
1085 case VM_INHERIT_COPY:
1086 case VM_INHERIT_SHARE:
1087 break;
1088 default:
1089 return(KERN_INVALID_ARGUMENT);
1090 }
1091
1092 vm_map_lock(map);
1093
1094 VM_MAP_RANGE_CHECK(map, start, end);
1095
1096 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1097 entry = temp_entry;
1098 vm_map_clip_start(map, entry, start);
1099 }
1100 else
1101 entry = temp_entry->next;
1102
1103 while ((entry != &map->header) && (entry->start < end)) {
1104 vm_map_clip_end(map, entry, end);
1105
1106 entry->inheritance = new_inheritance;
1107
1108 entry = entry->next;
1109 }
1110
1111 vm_map_unlock(map);
1112 return(KERN_SUCCESS);
1113}
1114
1115/*
1116 * vm_map_pageable:
1117 *
1118 * Sets the pageability of the specified address
1119 * range in the target map. Regions specified
1120 * as not pageable require locked-down physical
1121 * memory and physical page maps.
1122 *
1123 * The map must not be locked, but a reference
1124 * must remain to the map throughout the call.
1125 */
1126vm_map_pageable(map, start, end, new_pageable)
1127 register vm_map_t map;
1128 register vm_offset_t start;
1129 register vm_offset_t end;
1130 register boolean_t new_pageable;
1131{
1132 register vm_map_entry_t entry;
1133 vm_map_entry_t temp_entry;
1134
1135 vm_map_lock(map);
1136
1137 VM_MAP_RANGE_CHECK(map, start, end);
1138
1139 /*
1140 * Only one pageability change may take place at one
1141 * time, since vm_fault assumes it will be called
1142 * only once for each wiring/unwiring. Therefore, we
1143 * have to make sure we're actually changing the pageability
1144 * for the entire region. We do so before making any changes.
1145 */
1146
1147 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1148 entry = temp_entry;
1149 vm_map_clip_start(map, entry, start);
1150 }
1151 else
1152 entry = temp_entry->next;
1153 temp_entry = entry;
1154
1155 /*
1156 * Actions are rather different for wiring and unwiring,
1157 * so we have two separate cases.
1158 */
1159
1160 if (new_pageable) {
1161
1162 /*
1163 * Unwiring. First ensure that the range to be
1164 * unwired is really wired down.
1165 */
1166 while ((entry != &map->header) && (entry->start < end)) {
1167
1168 if (entry->wired_count == 0) {
1169 vm_map_unlock(map);
1170 return(KERN_INVALID_ARGUMENT);
1171 }
1172 entry = entry->next;
1173 }
1174
1175 /*
1176 * Now decrement the wiring count for each region.
1177 * If a region becomes completely unwired,
1178 * unwire its physical pages and mappings.
1179 */
1180 lock_set_recursive(&map->lock);
1181
1182 entry = temp_entry;
1183 while ((entry != &map->header) && (entry->start < end)) {
1184 vm_map_clip_end(map, entry, end);
1185
1186 entry->wired_count--;
1187 if (entry->wired_count == 0)
1188 vm_fault_unwire(map, entry->start, entry->end);
1189
1190 entry = entry->next;
1191 }
1192 lock_clear_recursive(&map->lock);
1193 }
1194
1195 else {
1196 /*
1197 * Wiring. We must do this in two passes:
1198 *
1199 * 1. Holding the write lock, we increment the
1200 * wiring count. For any area that is not already
1201 * wired, we create any shadow objects that need
1202 * to be created.
1203 *
1204 * 2. We downgrade to a read lock, and call
1205 * vm_fault_wire to fault in the pages for any
1206 * newly wired area (wired_count is 1).
1207 *
1208 * Downgrading to a read lock for vm_fault_wire avoids
1209 * a possible deadlock with another thread that may have
1210 * faulted on one of the pages to be wired (it would mark
1211 * the page busy, blocking us, then in turn block on the
1212 * map lock that we hold). Because of problems in the
1213 * recursive lock package, we cannot upgrade to a write
1214 * lock in vm_map_lookup. Thus, any actions that require
1215 * the write lock must be done beforehand. Because we
1216 * keep the read lock on the map, the copy-on-write status
1217 * of the entries we modify here cannot change.
1218 */
1219
1220 /*
1221 * Pass 1.
1222 */
1223 entry = temp_entry;
1224 while ((entry != &map->header) && (entry->start < end)) {
1225 vm_map_clip_end(map, entry, end);
1226
1227 entry->wired_count++;
1228 if (entry->wired_count == 1) {
1229
1230 /*
1231 * Perform actions of vm_map_lookup that need
1232 * the write lock on the map: create a shadow
1233 * object for a copy-on-write region, or an
1234 * object for a zero-fill region.
1235 *
1236 * We don't have to do this for entries that
1237 * point to sharing maps, because we won't hold
1238 * the lock on the sharing map.
1239 */
1240 if (!entry->is_a_map) {
1241 if (entry->needs_copy &&
1242 ((entry->protection & VM_PROT_WRITE) != 0)) {
1243
1244 vm_object_shadow(&entry->object.vm_object,
1245 &entry->offset,
1246 (vm_size_t)(entry->end
1247 - entry->start));
1248 entry->needs_copy = FALSE;
1249 }
1250 else if (entry->object.vm_object == NULL) {
1251 entry->object.vm_object =
1252 vm_object_allocate((vm_size_t)(entry->end
1253 - entry->start));
1254 entry->offset = (vm_offset_t)0;
1255 }
1256 }
1257 }
1258
1259 entry = entry->next;
1260 }
1261
1262 /*
1263 * Pass 2.
1264 */
1265
1266 /*
1267 * HACK HACK HACK HACK
1268 *
1269 * If we are wiring in the kernel map or a submap of it,
1270 * unlock the map to avoid deadlocks. We trust that the
1271 * kernel threads are well-behaved, and therefore will
1272 * not do anything destructive to this region of the map
1273 * while we have it unlocked. We cannot trust user threads
1274 * to do the same.
1275 *
1276 * HACK HACK HACK HACK
1277 */
1278 if (vm_map_pmap(map) == kernel_pmap) {
1279 vm_map_unlock(map); /* trust me ... */
1280 }
1281 else {
1282 lock_set_recursive(&map->lock);
1283 lock_write_to_read(&map->lock);
1284 }
1285
1286 entry = temp_entry;
1287 while (entry != &map->header && entry->start < end) {
b56d9a08
DG
1288 if (entry->wired_count == 1) {
1289 vm_fault_wire(map, entry->start, entry->end);
15637ed4
RG
1290 }
1291 entry = entry->next;
1292 }
1293
1294 if (vm_map_pmap(map) == kernel_pmap) {
1295 vm_map_lock(map);
1296 }
1297 else {
1298 lock_clear_recursive(&map->lock);
1299 }
1300 }
1301
1302 vm_map_unlock(map);
1303
1304 return(KERN_SUCCESS);
1305}
1306
1307/*
1308 * vm_map_entry_unwire: [ internal use only ]
1309 *
1310 * Make the region specified by this entry pageable.
1311 *
1312 * The map in question should be locked.
1313 * [This is the reason for this routine's existence.]
1314 */
1315void vm_map_entry_unwire(map, entry)
1316 vm_map_t map;
1317 register vm_map_entry_t entry;
1318{
1319 vm_fault_unwire(map, entry->start, entry->end);
1320 entry->wired_count = 0;
1321}
1322
1323/*
1324 * vm_map_entry_delete: [ internal use only ]
1325 *
1326 * Deallocate the given entry from the target map.
1327 */
1328void vm_map_entry_delete(map, entry)
1329 register vm_map_t map;
1330 register vm_map_entry_t entry;
1331{
1332 if (entry->wired_count != 0)
1333 vm_map_entry_unwire(map, entry);
1334
1335 vm_map_entry_unlink(map, entry);
1336 map->size -= entry->end - entry->start;
1337
1338 if (entry->is_a_map || entry->is_sub_map)
1339 vm_map_deallocate(entry->object.share_map);
1340 else
1341 vm_object_deallocate(entry->object.vm_object);
1342
1343 vm_map_entry_dispose(map, entry);
1344}
1345
1346/*
1347 * vm_map_delete: [ internal use only ]
1348 *
1349 * Deallocates the given address range from the target
1350 * map.
1351 *
1352 * When called with a sharing map, removes pages from
1353 * that region from all physical maps.
1354 */
1355vm_map_delete(map, start, end)
1356 register vm_map_t map;
1357 vm_offset_t start;
1358 register vm_offset_t end;
1359{
1360 register vm_map_entry_t entry;
1361 vm_map_entry_t first_entry;
1362
1363 /*
1364 * Find the start of the region, and clip it
1365 */
1366
1367 if (!vm_map_lookup_entry(map, start, &first_entry))
1368 entry = first_entry->next;
1369 else {
1370 entry = first_entry;
1371 vm_map_clip_start(map, entry, start);
1372
1373 /*
1374 * Fix the lookup hint now, rather than each
1375 * time though the loop.
1376 */
1377
1378 SAVE_HINT(map, entry->prev);
1379 }
1380
1381 /*
1382 * Save the free space hint
1383 */
1384
1385 if (map->first_free->start >= start)
1386 map->first_free = entry->prev;
1387
1388 /*
1389 * Step through all entries in this region
1390 */
1391
1392 while ((entry != &map->header) && (entry->start < end)) {
1393 vm_map_entry_t next;
1394 register vm_offset_t s, e;
1395 register vm_object_t object;
1396
1397 vm_map_clip_end(map, entry, end);
1398
1399 next = entry->next;
1400 s = entry->start;
1401 e = entry->end;
1402
1403 /*
1404 * Unwire before removing addresses from the pmap;
1405 * otherwise, unwiring will put the entries back in
1406 * the pmap.
1407 */
1408
1409 object = entry->object.vm_object;
1410 if (entry->wired_count != 0)
1411 vm_map_entry_unwire(map, entry);
1412
1413 /*
1414 * If this is a sharing map, we must remove
1415 * *all* references to this data, since we can't
1416 * find all of the physical maps which are sharing
1417 * it.
1418 */
1419
1420 if (object == kernel_object || object == kmem_object)
1421 vm_object_page_remove(object, entry->offset,
1422 entry->offset + (e - s));
1423 else if (!map->is_main_map)
1424 vm_object_pmap_remove(object,
1425 entry->offset,
1426 entry->offset + (e - s));
1427 else
1428 pmap_remove(map->pmap, s, e);
1429
1430 /*
1431 * Delete the entry (which may delete the object)
1432 * only after removing all pmap entries pointing
1433 * to its pages. (Otherwise, its page frames may
1434 * be reallocated, and any modify bits will be
1435 * set in the wrong object!)
1436 */
1437
1438 vm_map_entry_delete(map, entry);
1439 entry = next;
1440 }
1441 return(KERN_SUCCESS);
1442}
1443
1444/*
1445 * vm_map_remove:
1446 *
1447 * Remove the given address range from the target map.
1448 * This is the exported form of vm_map_delete.
1449 */
1450vm_map_remove(map, start, end)
1451 register vm_map_t map;
1452 register vm_offset_t start;
1453 register vm_offset_t end;
1454{
1455 register int result;
1456
1457 vm_map_lock(map);
1458 VM_MAP_RANGE_CHECK(map, start, end);
1459 result = vm_map_delete(map, start, end);
1460 vm_map_unlock(map);
1461
1462 return(result);
1463}
1464
1465/*
1466 * vm_map_check_protection:
1467 *
1468 * Assert that the target map allows the specified
1469 * privilege on the entire address region given.
1470 * The entire region must be allocated.
1471 */
1472boolean_t vm_map_check_protection(map, start, end, protection)
1473 register vm_map_t map;
1474 register vm_offset_t start;
1475 register vm_offset_t end;
1476 register vm_prot_t protection;
1477{
1478 register vm_map_entry_t entry;
1479 vm_map_entry_t tmp_entry;
1480
1481 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1482 return(FALSE);
1483 }
1484
1485 entry = tmp_entry;
1486
1487 while (start < end) {
1488 if (entry == &map->header) {
1489 return(FALSE);
1490 }
1491
1492 /*
1493 * No holes allowed!
1494 */
1495
1496 if (start < entry->start) {
1497 return(FALSE);
1498 }
1499
1500 /*
1501 * Check protection associated with entry.
1502 */
1503
1504 if ((entry->protection & protection) != protection) {
1505 return(FALSE);
1506 }
1507
1508 /* go to next entry */
1509
1510 start = entry->end;
1511 entry = entry->next;
1512 }
1513 return(TRUE);
1514}
1515
1516/*
1517 * vm_map_copy_entry:
1518 *
1519 * Copies the contents of the source entry to the destination
1520 * entry. The entries *must* be aligned properly.
1521 */
1522void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1523 vm_map_t src_map, dst_map;
1524 register vm_map_entry_t src_entry, dst_entry;
1525{
1526 vm_object_t temp_object;
1527
1528 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1529 return;
1530
1531 if (dst_entry->object.vm_object != NULL &&
1532 !dst_entry->object.vm_object->internal)
1533 printf("vm_map_copy_entry: copying over permanent data!\n");
1534
1535 /*
1536 * If our destination map was wired down,
1537 * unwire it now.
1538 */
1539
1540 if (dst_entry->wired_count != 0)
1541 vm_map_entry_unwire(dst_map, dst_entry);
1542
1543 /*
1544 * If we're dealing with a sharing map, we
1545 * must remove the destination pages from
1546 * all maps (since we cannot know which maps
1547 * this sharing map belongs in).
1548 */
1549
1550 if (dst_map->is_main_map)
1551 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1552 else
1553 vm_object_pmap_remove(dst_entry->object.vm_object,
1554 dst_entry->offset,
1555 dst_entry->offset +
1556 (dst_entry->end - dst_entry->start));
1557
1558 if (src_entry->wired_count == 0) {
1559
1560 boolean_t src_needs_copy;
1561
1562 /*
1563 * If the source entry is marked needs_copy,
1564 * it is already write-protected.
1565 */
1566 if (!src_entry->needs_copy) {
1567
1568 boolean_t su;
1569
1570 /*
1571 * If the source entry has only one mapping,
1572 * we can just protect the virtual address
1573 * range.
1574 */
1575 if (!(su = src_map->is_main_map)) {
1576 simple_lock(&src_map->ref_lock);
1577 su = (src_map->ref_count == 1);
1578 simple_unlock(&src_map->ref_lock);
1579 }
1580
1581 if (su) {
1582 pmap_protect(src_map->pmap,
1583 src_entry->start,
1584 src_entry->end,
1585 src_entry->protection & ~VM_PROT_WRITE);
1586 }
1587 else {
1588 vm_object_pmap_copy(src_entry->object.vm_object,
1589 src_entry->offset,
1590 src_entry->offset + (src_entry->end
1591 -src_entry->start));
1592 }
1593 }
1594
1595 /*
1596 * Make a copy of the object.
1597 */
1598 temp_object = dst_entry->object.vm_object;
1599 vm_object_copy(src_entry->object.vm_object,
1600 src_entry->offset,
1601 (vm_size_t)(src_entry->end -
1602 src_entry->start),
1603 &dst_entry->object.vm_object,
1604 &dst_entry->offset,
1605 &src_needs_copy);
1606 /*
1607 * If we didn't get a copy-object now, mark the
1608 * source map entry so that a shadow will be created
1609 * to hold its changed pages.
1610 */
1611 if (src_needs_copy)
1612 src_entry->needs_copy = TRUE;
1613
1614 /*
1615 * The destination always needs to have a shadow
1616 * created.
1617 */
1618 dst_entry->needs_copy = TRUE;
1619
1620 /*
1621 * Mark the entries copy-on-write, so that write-enabling
1622 * the entry won't make copy-on-write pages writable.
1623 */
1624 src_entry->copy_on_write = TRUE;
1625 dst_entry->copy_on_write = TRUE;
1626 /*
1627 * Get rid of the old object.
1628 */
1629 vm_object_deallocate(temp_object);
1630
1631 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1632 dst_entry->end - dst_entry->start, src_entry->start);
1633 }
1634 else {
1635 /*
1636 * Of course, wired down pages can't be set copy-on-write.
1637 * Cause wired pages to be copied into the new
1638 * map by simulating faults (the new pages are
1639 * pageable)
1640 */
1641 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1642 }
1643}
1644
1645/*
1646 * vm_map_copy:
1647 *
1648 * Perform a virtual memory copy from the source
1649 * address map/range to the destination map/range.
1650 *
1651 * If src_destroy or dst_alloc is requested,
1652 * the source and destination regions should be
1653 * disjoint, not only in the top-level map, but
1654 * in the sharing maps as well. [The best way
1655 * to guarantee this is to use a new intermediate
1656 * map to make copies. This also reduces map
1657 * fragmentation.]
1658 */
1659vm_map_copy(dst_map, src_map,
1660 dst_addr, len, src_addr,
1661 dst_alloc, src_destroy)
1662 vm_map_t dst_map;
1663 vm_map_t src_map;
1664 vm_offset_t dst_addr;
1665 vm_size_t len;
1666 vm_offset_t src_addr;
1667 boolean_t dst_alloc;
1668 boolean_t src_destroy;
1669{
1670 register
1671 vm_map_entry_t src_entry;
1672 register
1673 vm_map_entry_t dst_entry;
1674 vm_map_entry_t tmp_entry;
1675 vm_offset_t src_start;
1676 vm_offset_t src_end;
1677 vm_offset_t dst_start;
1678 vm_offset_t dst_end;
1679 vm_offset_t src_clip;
1680 vm_offset_t dst_clip;
1681 int result;
1682 boolean_t old_src_destroy;
1683
1684 /*
1685 * XXX While we figure out why src_destroy screws up,
1686 * we'll do it by explicitly vm_map_delete'ing at the end.
1687 */
1688
1689 old_src_destroy = src_destroy;
1690 src_destroy = FALSE;
1691
1692 /*
1693 * Compute start and end of region in both maps
1694 */
1695
1696 src_start = src_addr;
1697 src_end = src_start + len;
1698 dst_start = dst_addr;
1699 dst_end = dst_start + len;
1700
1701 /*
1702 * Check that the region can exist in both source
1703 * and destination.
1704 */
1705
1706 if ((dst_end < dst_start) || (src_end < src_start))
1707 return(KERN_NO_SPACE);
1708
1709 /*
1710 * Lock the maps in question -- we avoid deadlock
1711 * by ordering lock acquisition by map value
1712 */
1713
1714 if (src_map == dst_map) {
1715 vm_map_lock(src_map);
1716 }
1717 else if ((int) src_map < (int) dst_map) {
1718 vm_map_lock(src_map);
1719 vm_map_lock(dst_map);
1720 } else {
1721 vm_map_lock(dst_map);
1722 vm_map_lock(src_map);
1723 }
1724
1725 result = KERN_SUCCESS;
1726
1727 /*
1728 * Check protections... source must be completely readable and
1729 * destination must be completely writable. [Note that if we're
1730 * allocating the destination region, we don't have to worry
1731 * about protection, but instead about whether the region
1732 * exists.]
1733 */
1734
1735 if (src_map->is_main_map && dst_map->is_main_map) {
1736 if (!vm_map_check_protection(src_map, src_start, src_end,
1737 VM_PROT_READ)) {
1738 result = KERN_PROTECTION_FAILURE;
1739 goto Return;
1740 }
1741
1742 if (dst_alloc) {
1743 /* XXX Consider making this a vm_map_find instead */
1744 if ((result = vm_map_insert(dst_map, NULL,
1745 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1746 goto Return;
1747 }
1748 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1749 VM_PROT_WRITE)) {
1750 result = KERN_PROTECTION_FAILURE;
1751 goto Return;
1752 }
1753 }
1754
1755 /*
1756 * Find the start entries and clip.
1757 *
1758 * Note that checking protection asserts that the
1759 * lookup cannot fail.
1760 *
1761 * Also note that we wait to do the second lookup
1762 * until we have done the first clip, as the clip
1763 * may affect which entry we get!
1764 */
1765
1766 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1767 src_entry = tmp_entry;
1768 vm_map_clip_start(src_map, src_entry, src_start);
1769
1770 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1771 dst_entry = tmp_entry;
1772 vm_map_clip_start(dst_map, dst_entry, dst_start);
1773
1774 /*
1775 * If both source and destination entries are the same,
1776 * retry the first lookup, as it may have changed.
1777 */
1778
1779 if (src_entry == dst_entry) {
1780 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1781 src_entry = tmp_entry;
1782 }
1783
1784 /*
1785 * If source and destination entries are still the same,
1786 * a null copy is being performed.
1787 */
1788
1789 if (src_entry == dst_entry)
1790 goto Return;
1791
1792 /*
1793 * Go through entries until we get to the end of the
1794 * region.
1795 */
1796
1797 while (src_start < src_end) {
1798 /*
1799 * Clip the entries to the endpoint of the entire region.
1800 */
1801
1802 vm_map_clip_end(src_map, src_entry, src_end);
1803 vm_map_clip_end(dst_map, dst_entry, dst_end);
1804
1805 /*
1806 * Clip each entry to the endpoint of the other entry.
1807 */
1808
1809 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1810 vm_map_clip_end(src_map, src_entry, src_clip);
1811
1812 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1813 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1814
1815 /*
1816 * Both entries now match in size and relative endpoints.
1817 *
1818 * If both entries refer to a VM object, we can
1819 * deal with them now.
1820 */
1821
1822 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1823 vm_map_copy_entry(src_map, dst_map, src_entry,
1824 dst_entry);
1825 }
1826 else {
1827 register vm_map_t new_dst_map;
1828 vm_offset_t new_dst_start;
1829 vm_size_t new_size;
1830 vm_map_t new_src_map;
1831 vm_offset_t new_src_start;
1832
1833 /*
1834 * We have to follow at least one sharing map.
1835 */
1836
1837 new_size = (dst_entry->end - dst_entry->start);
1838
1839 if (src_entry->is_a_map) {
1840 new_src_map = src_entry->object.share_map;
1841 new_src_start = src_entry->offset;
1842 }
1843 else {
1844 new_src_map = src_map;
1845 new_src_start = src_entry->start;
1846 lock_set_recursive(&src_map->lock);
1847 }
1848
1849 if (dst_entry->is_a_map) {
1850 vm_offset_t new_dst_end;
1851
1852 new_dst_map = dst_entry->object.share_map;
1853 new_dst_start = dst_entry->offset;
1854
1855 /*
1856 * Since the destination sharing entries
1857 * will be merely deallocated, we can
1858 * do that now, and replace the region
1859 * with a null object. [This prevents
1860 * splitting the source map to match
1861 * the form of the destination map.]
1862 * Note that we can only do so if the
1863 * source and destination do not overlap.
1864 */
1865
1866 new_dst_end = new_dst_start + new_size;
1867
1868 if (new_dst_map != new_src_map) {
1869 vm_map_lock(new_dst_map);
1870 (void) vm_map_delete(new_dst_map,
1871 new_dst_start,
1872 new_dst_end);
1873 (void) vm_map_insert(new_dst_map,
1874 NULL,
1875 (vm_offset_t) 0,
1876 new_dst_start,
1877 new_dst_end);
1878 vm_map_unlock(new_dst_map);
1879 }
1880 }
1881 else {
1882 new_dst_map = dst_map;
1883 new_dst_start = dst_entry->start;
1884 lock_set_recursive(&dst_map->lock);
1885 }
1886
1887 /*
1888 * Recursively copy the sharing map.
1889 */
1890
1891 (void) vm_map_copy(new_dst_map, new_src_map,
1892 new_dst_start, new_size, new_src_start,
1893 FALSE, FALSE);
1894
1895 if (dst_map == new_dst_map)
1896 lock_clear_recursive(&dst_map->lock);
1897 if (src_map == new_src_map)
1898 lock_clear_recursive(&src_map->lock);
1899 }
1900
1901 /*
1902 * Update variables for next pass through the loop.
1903 */
1904
1905 src_start = src_entry->end;
1906 src_entry = src_entry->next;
1907 dst_start = dst_entry->end;
1908 dst_entry = dst_entry->next;
1909
1910 /*
1911 * If the source is to be destroyed, here is the
1912 * place to do it.
1913 */
1914
1915 if (src_destroy && src_map->is_main_map &&
1916 dst_map->is_main_map)
1917 vm_map_entry_delete(src_map, src_entry->prev);
1918 }
1919
1920 /*
1921 * Update the physical maps as appropriate
1922 */
1923
1924 if (src_map->is_main_map && dst_map->is_main_map) {
1925 if (src_destroy)
1926 pmap_remove(src_map->pmap, src_addr, src_addr + len);
1927 }
1928
1929 /*
1930 * Unlock the maps
1931 */
1932
1933 Return: ;
1934
1935 if (old_src_destroy)
1936 vm_map_delete(src_map, src_addr, src_addr + len);
1937
1938 vm_map_unlock(src_map);
1939 if (src_map != dst_map)
1940 vm_map_unlock(dst_map);
1941
1942 return(result);
1943}
1944
1945/*
1946 * vmspace_fork:
1947 * Create a new process vmspace structure and vm_map
1948 * based on those of an existing process. The new map
1949 * is based on the old map, according to the inheritance
1950 * values on the regions in that map.
1951 *
1952 * The source map must not be locked.
1953 */
1954struct vmspace *
1955vmspace_fork(vm1)
1956 register struct vmspace *vm1;
1957{
1958 register struct vmspace *vm2;
1959 vm_map_t old_map = &vm1->vm_map;
1960 vm_map_t new_map;
1961 vm_map_entry_t old_entry;
1962 vm_map_entry_t new_entry;
1963 pmap_t new_pmap;
1964
1965 vm_map_lock(old_map);
1966
1967 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1968 old_map->entries_pageable);
1969 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1970 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1971 new_pmap = &vm2->vm_pmap; /* XXX */
1972 new_map = &vm2->vm_map; /* XXX */
1973
1974 old_entry = old_map->header.next;
1975
1976 while (old_entry != &old_map->header) {
1977 if (old_entry->is_sub_map)
1978 panic("vm_map_fork: encountered a submap");
1979
1980 switch (old_entry->inheritance) {
1981 case VM_INHERIT_NONE:
1982 break;
1983
1984 case VM_INHERIT_SHARE:
1985 /*
1986 * If we don't already have a sharing map:
1987 */
1988
1989 if (!old_entry->is_a_map) {
1990 vm_map_t new_share_map;
1991 vm_map_entry_t new_share_entry;
1992
1993 /*
1994 * Create a new sharing map
1995 */
1996
1997 new_share_map = vm_map_create(NULL,
1998 old_entry->start,
1999 old_entry->end,
2000 TRUE);
2001 new_share_map->is_main_map = FALSE;
2002
2003 /*
2004 * Create the only sharing entry from the
2005 * old task map entry.
2006 */
2007
2008 new_share_entry =
2009 vm_map_entry_create(new_share_map);
2010 *new_share_entry = *old_entry;
2011
2012 /*
2013 * Insert the entry into the new sharing
2014 * map
2015 */
2016
2017 vm_map_entry_link(new_share_map,
2018 new_share_map->header.prev,
2019 new_share_entry);
2020
2021 /*
2022 * Fix up the task map entry to refer
2023 * to the sharing map now.
2024 */
2025
2026 old_entry->is_a_map = TRUE;
2027 old_entry->object.share_map = new_share_map;
2028 old_entry->offset = old_entry->start;
2029 }
2030
2031 /*
2032 * Clone the entry, referencing the sharing map.
2033 */
2034
2035 new_entry = vm_map_entry_create(new_map);
2036 *new_entry = *old_entry;
2037 vm_map_reference(new_entry->object.share_map);
2038
2039 /*
2040 * Insert the entry into the new map -- we
2041 * know we're inserting at the end of the new
2042 * map.
2043 */
2044
2045 vm_map_entry_link(new_map, new_map->header.prev,
2046 new_entry);
2047
2048 /*
2049 * Update the physical map
2050 */
2051
2052 pmap_copy(new_map->pmap, old_map->pmap,
2053 new_entry->start,
2054 (old_entry->end - old_entry->start),
2055 old_entry->start);
2056 break;
2057
2058 case VM_INHERIT_COPY:
2059 /*
2060 * Clone the entry and link into the map.
2061 */
2062
2063 new_entry = vm_map_entry_create(new_map);
2064 *new_entry = *old_entry;
2065 new_entry->wired_count = 0;
2066 new_entry->object.vm_object = NULL;
2067 new_entry->is_a_map = FALSE;
2068 vm_map_entry_link(new_map, new_map->header.prev,
2069 new_entry);
2070 if (old_entry->is_a_map) {
2071 int check;
2072
2073 check = vm_map_copy(new_map,
2074 old_entry->object.share_map,
2075 new_entry->start,
2076 (vm_size_t)(new_entry->end -
2077 new_entry->start),
2078 old_entry->offset,
2079 FALSE, FALSE);
2080 if (check != KERN_SUCCESS)
2081 printf("vm_map_fork: copy in share_map region failed\n");
2082 }
2083 else {
2084 vm_map_copy_entry(old_map, new_map, old_entry,
2085 new_entry);
2086 }
2087 break;
2088 }
2089 old_entry = old_entry->next;
2090 }
2091
2092 new_map->size = old_map->size;
2093 vm_map_unlock(old_map);
2094
2095 return(vm2);
2096}
2097
2098/*
2099 * vm_map_lookup:
2100 *
2101 * Finds the VM object, offset, and
2102 * protection for a given virtual address in the
2103 * specified map, assuming a page fault of the
2104 * type specified.
2105 *
2106 * Leaves the map in question locked for read; return
2107 * values are guaranteed until a vm_map_lookup_done
2108 * call is performed. Note that the map argument
2109 * is in/out; the returned map must be used in
2110 * the call to vm_map_lookup_done.
2111 *
2112 * A handle (out_entry) is returned for use in
2113 * vm_map_lookup_done, to make that fast.
2114 *
2115 * If a lookup is requested with "write protection"
2116 * specified, the map may be changed to perform virtual
2117 * copying operations, although the data referenced will
2118 * remain the same.
2119 */
2120vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2121 object, offset, out_prot, wired, single_use)
2122 vm_map_t *var_map; /* IN/OUT */
2123 register vm_offset_t vaddr;
2124 register vm_prot_t fault_type;
2125
2126 vm_map_entry_t *out_entry; /* OUT */
2127 vm_object_t *object; /* OUT */
2128 vm_offset_t *offset; /* OUT */
2129 vm_prot_t *out_prot; /* OUT */
2130 boolean_t *wired; /* OUT */
2131 boolean_t *single_use; /* OUT */
2132{
2133 vm_map_t share_map;
2134 vm_offset_t share_offset;
2135 register vm_map_entry_t entry;
2136 register vm_map_t map = *var_map;
2137 register vm_prot_t prot;
2138 register boolean_t su;
2139
2140 RetryLookup: ;
2141
2142 /*
2143 * Lookup the faulting address.
2144 */
2145
2146 vm_map_lock_read(map);
2147
2148#define RETURN(why) \
2149 { \
2150 vm_map_unlock_read(map); \
2151 return(why); \
2152 }
2153
2154 /*
2155 * If the map has an interesting hint, try it before calling
2156 * full blown lookup routine.
2157 */
2158
2159 simple_lock(&map->hint_lock);
2160 entry = map->hint;
2161 simple_unlock(&map->hint_lock);
2162
2163 *out_entry = entry;
2164
2165 if ((entry == &map->header) ||
2166 (vaddr < entry->start) || (vaddr >= entry->end)) {
2167 vm_map_entry_t tmp_entry;
2168
2169 /*
2170 * Entry was either not a valid hint, or the vaddr
2171 * was not contained in the entry, so do a full lookup.
2172 */
2173 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2174 RETURN(KERN_INVALID_ADDRESS);
2175
2176 entry = tmp_entry;
2177 *out_entry = entry;
2178 }
2179
2180 /*
2181 * Handle submaps.
2182 */
2183
2184 if (entry->is_sub_map) {
2185 vm_map_t old_map = map;
2186
2187 *var_map = map = entry->object.sub_map;
2188 vm_map_unlock_read(old_map);
2189 goto RetryLookup;
2190 }
2191
2192 /*
2193 * Check whether this task is allowed to have
2194 * this page.
2195 */
2196
2197 prot = entry->protection;
2198 if ((fault_type & (prot)) != fault_type)
2199 RETURN(KERN_PROTECTION_FAILURE);
2200
2201 /*
2202 * If this page is not pageable, we have to get
2203 * it for all possible accesses.
2204 */
2205
2206 if (*wired = (entry->wired_count != 0))
2207 prot = fault_type = entry->protection;
2208
2209 /*
2210 * If we don't already have a VM object, track
2211 * it down.
2212 */
2213
2214 if (su = !entry->is_a_map) {
2215 share_map = map;
2216 share_offset = vaddr;
2217 }
2218 else {
2219 vm_map_entry_t share_entry;
2220
2221 /*
2222 * Compute the sharing map, and offset into it.
2223 */
2224
2225 share_map = entry->object.share_map;
2226 share_offset = (vaddr - entry->start) + entry->offset;
2227
2228 /*
2229 * Look for the backing store object and offset
2230 */
2231
2232 vm_map_lock_read(share_map);
2233
2234 if (!vm_map_lookup_entry(share_map, share_offset,
2235 &share_entry)) {
2236 vm_map_unlock_read(share_map);
2237 RETURN(KERN_INVALID_ADDRESS);
2238 }
2239 entry = share_entry;
2240 }
2241
2242 /*
2243 * If the entry was copy-on-write, we either ...
2244 */
2245
2246 if (entry->needs_copy) {
2247 /*
2248 * If we want to write the page, we may as well
2249 * handle that now since we've got the sharing
2250 * map locked.
2251 *
2252 * If we don't need to write the page, we just
2253 * demote the permissions allowed.
2254 */
2255
2256 if (fault_type & VM_PROT_WRITE) {
2257 /*
2258 * Make a new object, and place it in the
2259 * object chain. Note that no new references
2260 * have appeared -- one just moved from the
2261 * share map to the new object.
2262 */
2263
2264 if (lock_read_to_write(&share_map->lock)) {
2265 if (share_map != map)
2266 vm_map_unlock_read(map);
2267 goto RetryLookup;
2268 }
2269
2270 vm_object_shadow(
2271 &entry->object.vm_object,
2272 &entry->offset,
2273 (vm_size_t) (entry->end - entry->start));
2274
2275 entry->needs_copy = FALSE;
2276
2277 lock_write_to_read(&share_map->lock);
2278 }
2279 else {
2280 /*
2281 * We're attempting to read a copy-on-write
2282 * page -- don't allow writes.
2283 */
2284
2285 prot &= (~VM_PROT_WRITE);
2286 }
2287 }
2288
2289 /*
2290 * Create an object if necessary.
2291 */
2292 if (entry->object.vm_object == NULL) {
2293
2294 if (lock_read_to_write(&share_map->lock)) {
2295 if (share_map != map)
2296 vm_map_unlock_read(map);
2297 goto RetryLookup;
2298 }
2299
2300 entry->object.vm_object = vm_object_allocate(
2301 (vm_size_t)(entry->end - entry->start));
2302 entry->offset = 0;
2303 lock_write_to_read(&share_map->lock);
2304 }
2305
2306 /*
2307 * Return the object/offset from this entry. If the entry
2308 * was copy-on-write or empty, it has been fixed up.
2309 */
2310
2311 *offset = (share_offset - entry->start) + entry->offset;
2312 *object = entry->object.vm_object;
2313
2314 /*
2315 * Return whether this is the only map sharing this data.
2316 */
2317
2318 if (!su) {
2319 simple_lock(&share_map->ref_lock);
2320 su = (share_map->ref_count == 1);
2321 simple_unlock(&share_map->ref_lock);
2322 }
2323
2324 *out_prot = prot;
2325 *single_use = su;
2326
2327 return(KERN_SUCCESS);
2328
2329#undef RETURN
2330}
2331
2332/*
2333 * vm_map_lookup_done:
2334 *
2335 * Releases locks acquired by a vm_map_lookup
2336 * (according to the handle returned by that lookup).
2337 */
2338
2339void vm_map_lookup_done(map, entry)
2340 register vm_map_t map;
2341 vm_map_entry_t entry;
2342{
2343 /*
2344 * If this entry references a map, unlock it first.
2345 */
2346
2347 if (entry->is_a_map)
2348 vm_map_unlock_read(entry->object.share_map);
2349
2350 /*
2351 * Unlock the main-level map
2352 */
2353
2354 vm_map_unlock_read(map);
2355}
2356
2357/*
2358 * Routine: vm_map_simplify
2359 * Purpose:
2360 * Attempt to simplify the map representation in
2361 * the vicinity of the given starting address.
2362 * Note:
2363 * This routine is intended primarily to keep the
2364 * kernel maps more compact -- they generally don't
2365 * benefit from the "expand a map entry" technology
2366 * at allocation time because the adjacent entry
2367 * is often wired down.
2368 */
2369void vm_map_simplify(map, start)
2370 vm_map_t map;
2371 vm_offset_t start;
2372{
2373 vm_map_entry_t this_entry;
2374 vm_map_entry_t prev_entry;
2375
2376 vm_map_lock(map);
2377 if (
2378 (vm_map_lookup_entry(map, start, &this_entry)) &&
2379 ((prev_entry = this_entry->prev) != &map->header) &&
2380
2381 (prev_entry->end == start) &&
2382 (map->is_main_map) &&
2383
2384 (prev_entry->is_a_map == FALSE) &&
2385 (prev_entry->is_sub_map == FALSE) &&
2386
2387 (this_entry->is_a_map == FALSE) &&
2388 (this_entry->is_sub_map == FALSE) &&
2389
2390 (prev_entry->inheritance == this_entry->inheritance) &&
2391 (prev_entry->protection == this_entry->protection) &&
2392 (prev_entry->max_protection == this_entry->max_protection) &&
2393 (prev_entry->wired_count == this_entry->wired_count) &&
2394
2395 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2396 (prev_entry->needs_copy == this_entry->needs_copy) &&
2397
2398 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2399 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2400 == this_entry->offset)
2401 ) {
2402 if (map->first_free == this_entry)
2403 map->first_free = prev_entry;
2404
2405 SAVE_HINT(map, prev_entry);
2406 vm_map_entry_unlink(map, this_entry);
2407 prev_entry->end = this_entry->end;
2408 vm_object_deallocate(this_entry->object.vm_object);
2409 vm_map_entry_dispose(map, this_entry);
2410 }
2411 vm_map_unlock(map);
2412}
2413
e7345b2c 2414#ifdef DEBUG
15637ed4
RG
2415/*
2416 * vm_map_print: [ debug ]
2417 */
2418void vm_map_print(map, full)
2419 register vm_map_t map;
2420 boolean_t full;
2421{
2422 register vm_map_entry_t entry;
2423 extern int indent;
2424
2425 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2426 (map->is_main_map ? "Task" : "Share"),
2427 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2428 map->timestamp);
2429
2430 if (!full && indent)
2431 return;
2432
2433 indent += 2;
2434 for (entry = map->header.next; entry != &map->header;
2435 entry = entry->next) {
2436 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2437 (int) entry, (int) entry->start, (int) entry->end);
2438 if (map->is_main_map) {
2439 static char *inheritance_name[4] =
2440 { "share", "copy", "none", "donate_copy"};
2441 printf("prot=%x/%x/%s, ",
2442 entry->protection,
2443 entry->max_protection,
2444 inheritance_name[entry->inheritance]);
2445 if (entry->wired_count != 0)
2446 printf("wired, ");
2447 }
2448
2449 if (entry->is_a_map || entry->is_sub_map) {
2450 printf("share=0x%x, offset=0x%x\n",
2451 (int) entry->object.share_map,
2452 (int) entry->offset);
2453 if ((entry->prev == &map->header) ||
2454 (!entry->prev->is_a_map) ||
2455 (entry->prev->object.share_map !=
2456 entry->object.share_map)) {
2457 indent += 2;
2458 vm_map_print(entry->object.share_map, full);
2459 indent -= 2;
2460 }
2461
2462 }
2463 else {
2464 printf("object=0x%x, offset=0x%x",
2465 (int) entry->object.vm_object,
2466 (int) entry->offset);
2467 if (entry->copy_on_write)
2468 printf(", copy (%s)",
2469 entry->needs_copy ? "needed" : "done");
2470 printf("\n");
2471
2472 if ((entry->prev == &map->header) ||
2473 (entry->prev->is_a_map) ||
2474 (entry->prev->object.vm_object !=
2475 entry->object.vm_object)) {
2476 indent += 2;
2477 vm_object_print(entry->object.vm_object, full);
2478 indent -= 2;
2479 }
2480 }
2481 }
2482 indent -= 2;
2483}
e7345b2c 2484#endif /* DEBUG */