lint, KNF
[unix-history] / usr / src / sys / vm / vm_map.c
CommitLineData
175f072e 1/*
ad0f93d2
KB
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
175f072e
KM
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
0e24ad83 8 * %sccs.include.redist.c%
175f072e 9 *
ad0f93d2 10 * @(#)vm_map.c 8.1 (Berkeley) %G%
0e24ad83
KM
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
175f072e
KM
37 */
38
39/*
40 * Virtual memory mapping module.
41 */
42
73506ff8
KB
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/malloc.h>
46
47#include <vm/vm.h>
48#include <vm/vm_page.h>
49#include <vm/vm_object.h>
175f072e
KM
50
51/*
52 * Virtual memory maps provide for the mapping, protection,
53 * and sharing of virtual memory objects. In addition,
54 * this module provides for an efficient virtual copy of
55 * memory from one map to another.
56 *
57 * Synchronization is required prior to most operations.
58 *
59 * Maps consist of an ordered doubly-linked list of simple
60 * entries; a single hint is used to speed up lookups.
61 *
62 * In order to properly represent the sharing of virtual
63 * memory regions among maps, the map structure is bi-level.
64 * Top-level ("address") maps refer to regions of sharable
65 * virtual memory. These regions are implemented as
66 * ("sharing") maps, which then refer to the actual virtual
67 * memory objects. When two address maps "share" memory,
68 * their top-level maps both have references to the same
69 * sharing map. When memory is virtual-copied from one
70 * address map to another, the references in the sharing
71 * maps are actually copied -- no copying occurs at the
72 * virtual memory object level.
73 *
74 * Since portions of maps are specified by start/end addreses,
75 * which may not align with existing map entries, all
76 * routines merely "clip" entries to these start/end values.
77 * [That is, an entry is split into two, bordering at a
78 * start or end value.] Note that these clippings may not
79 * always be necessary (as the two resulting entries are then
80 * not changed); however, the clipping is done for convenience.
81 * No attempt is currently made to "glue back together" two
82 * abutting entries.
83 *
84 * As mentioned above, virtual copy operations are performed
85 * by copying VM object references from one sharing map to
86 * another, and then marking both regions as copy-on-write.
87 * It is important to note that only one writeable reference
88 * to a VM object region exists in any map -- this means that
89 * shadow object creation can be delayed until a write operation
90 * occurs.
91 */
92
93/*
5d7b9ad3 94 * vm_map_startup:
175f072e
KM
95 *
96 * Initialize the vm_map module. Must be called before
97 * any other vm_map routines.
98 *
99 * Map and entry structures are allocated from the general
100 * purpose memory pool with some exceptions:
101 *
102 * - The kernel map and kmem submap are allocated statically.
103 * - Kernel map entries are allocated out of a static pool.
104 *
105 * These restrictions are necessary since malloc() uses the
106 * maps and requires map entries.
107 */
108
109vm_offset_t kentry_data;
110vm_size_t kentry_data_size;
111vm_map_entry_t kentry_free;
112vm_map_t kmap_free;
113
73506ff8
KB
114static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
115static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
116
5d7b9ad3 117void vm_map_startup()
175f072e
KM
118{
119 register int i;
120 register vm_map_entry_t mep;
121 vm_map_t mp;
122
123 /*
124 * Static map structures for allocation before initialization of
125 * kernel map or kmem map. vm_map_create knows how to deal with them.
126 */
127 kmap_free = mp = (vm_map_t) kentry_data;
128 i = MAX_KMAP;
129 while (--i > 0) {
130 mp->header.next = (vm_map_entry_t) (mp + 1);
131 mp++;
132 }
5d7b9ad3 133 mp++->header.next = NULL;
175f072e
KM
134
135 /*
136 * Form a free list of statically allocated kernel map entries
137 * with the rest.
138 */
139 kentry_free = mep = (vm_map_entry_t) mp;
140 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
141 while (--i > 0) {
142 mep->next = mep + 1;
143 mep++;
144 }
5d7b9ad3
MK
145 mep->next = NULL;
146}
147
148/*
149 * Allocate a vmspace structure, including a vm_map and pmap,
150 * and initialize those structures. The refcnt is set to 1.
151 * The remaining fields must be initialized by the caller.
152 */
153struct vmspace *
154vmspace_alloc(min, max, pageable)
155 vm_offset_t min, max;
156 int pageable;
157{
158 register struct vmspace *vm;
159
160 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
161 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
162 vm_map_init(&vm->vm_map, min, max, pageable);
163 pmap_pinit(&vm->vm_pmap);
164 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
165 vm->vm_refcnt = 1;
166 return (vm);
167}
168
169void
170vmspace_free(vm)
171 register struct vmspace *vm;
172{
173
174 if (--vm->vm_refcnt == 0) {
175 /*
176 * Lock the map, to wait out all other references to it.
177 * Delete all of the mappings and pages they hold,
178 * then call the pmap module to reclaim anything left.
179 */
180 vm_map_lock(&vm->vm_map);
181 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
182 vm->vm_map.max_offset);
183 pmap_release(&vm->vm_pmap);
184 FREE(vm, M_VMMAP);
185 }
175f072e
KM
186}
187
188/*
189 * vm_map_create:
190 *
191 * Creates and returns a new empty VM map with
192 * the given physical map structure, and having
193 * the given lower and upper address bounds.
194 */
195vm_map_t vm_map_create(pmap, min, max, pageable)
196 pmap_t pmap;
197 vm_offset_t min, max;
198 boolean_t pageable;
199{
200 register vm_map_t result;
201 extern vm_map_t kernel_map, kmem_map;
202
5d7b9ad3 203 if (kmem_map == NULL) {
175f072e
KM
204 result = kmap_free;
205 kmap_free = (vm_map_t) result->header.next;
5d7b9ad3
MK
206 if (result == NULL)
207 panic("vm_map_create: out of maps");
175f072e
KM
208 } else
209 MALLOC(result, vm_map_t, sizeof(struct vm_map),
210 M_VMMAP, M_WAITOK);
211
5d7b9ad3 212 vm_map_init(result, min, max, pageable);
175f072e 213 result->pmap = pmap;
175f072e
KM
214 return(result);
215}
216
5d7b9ad3
MK
217/*
218 * Initialize an existing vm_map structure
219 * such as that in the vmspace structure.
220 * The pmap is set elsewhere.
221 */
222void
223vm_map_init(map, min, max, pageable)
224 register struct vm_map *map;
225 vm_offset_t min, max;
226 boolean_t pageable;
227{
228 map->header.next = map->header.prev = &map->header;
229 map->nentries = 0;
230 map->size = 0;
231 map->ref_count = 1;
232 map->is_main_map = TRUE;
233 map->min_offset = min;
234 map->max_offset = max;
235 map->entries_pageable = pageable;
236 map->first_free = &map->header;
237 map->hint = &map->header;
238 map->timestamp = 0;
239 lock_init(&map->lock, TRUE);
240 simple_lock_init(&map->ref_lock);
241 simple_lock_init(&map->hint_lock);
242}
243
175f072e
KM
244/*
245 * vm_map_entry_create: [ internal use only ]
246 *
247 * Allocates a VM map entry for insertion.
248 * No entry fields are filled in. This routine is
249 */
250vm_map_entry_t vm_map_entry_create(map)
251 vm_map_t map;
252{
253 vm_map_entry_t entry;
931302a1 254 extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
175f072e 255
931302a1
KM
256 if (map == kernel_map || map == kmem_map || map == mb_map ||
257 map == pager_map) {
175f072e
KM
258 if (entry = kentry_free)
259 kentry_free = kentry_free->next;
260 } else
261 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
262 M_VMMAPENT, M_WAITOK);
5d7b9ad3 263 if (entry == NULL)
175f072e
KM
264 panic("vm_map_entry_create: out of map entries");
265
266 return(entry);
267}
268
269/*
270 * vm_map_entry_dispose: [ internal use only ]
271 *
272 * Inverse of vm_map_entry_create.
273 */
274void vm_map_entry_dispose(map, entry)
275 vm_map_t map;
276 vm_map_entry_t entry;
277{
931302a1 278 extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
175f072e 279
931302a1
KM
280 if (map == kernel_map || map == kmem_map || map == mb_map ||
281 map == pager_map) {
175f072e
KM
282 entry->next = kentry_free;
283 kentry_free = entry;
284 } else
285 FREE(entry, M_VMMAPENT);
286}
287
288/*
289 * vm_map_entry_{un,}link:
290 *
291 * Insert/remove entries from maps.
292 */
293#define vm_map_entry_link(map, after_where, entry) \
294 { \
295 (map)->nentries++; \
296 (entry)->prev = (after_where); \
297 (entry)->next = (after_where)->next; \
298 (entry)->prev->next = (entry); \
299 (entry)->next->prev = (entry); \
300 }
301#define vm_map_entry_unlink(map, entry) \
302 { \
303 (map)->nentries--; \
304 (entry)->next->prev = (entry)->prev; \
305 (entry)->prev->next = (entry)->next; \
306 }
307
308/*
309 * vm_map_reference:
310 *
311 * Creates another valid reference to the given map.
312 *
313 */
314void vm_map_reference(map)
315 register vm_map_t map;
316{
5d7b9ad3 317 if (map == NULL)
175f072e
KM
318 return;
319
320 simple_lock(&map->ref_lock);
321 map->ref_count++;
322 simple_unlock(&map->ref_lock);
323}
324
325/*
326 * vm_map_deallocate:
327 *
328 * Removes a reference from the specified map,
329 * destroying it if no references remain.
330 * The map should not be locked.
331 */
332void vm_map_deallocate(map)
333 register vm_map_t map;
334{
335 register int c;
336
5d7b9ad3 337 if (map == NULL)
175f072e
KM
338 return;
339
340 simple_lock(&map->ref_lock);
341 c = --map->ref_count;
342 simple_unlock(&map->ref_lock);
343
344 if (c > 0) {
345 return;
346 }
347
348 /*
349 * Lock the map, to wait out all other references
350 * to it.
351 */
352
353 vm_map_lock(map);
354
355 (void) vm_map_delete(map, map->min_offset, map->max_offset);
356
357 pmap_destroy(map->pmap);
358
359 FREE(map, M_VMMAP);
360}
361
362/*
363 * vm_map_insert: [ internal use only ]
364 *
365 * Inserts the given whole VM object into the target
366 * map at the specified address range. The object's
367 * size should match that of the address range.
368 *
369 * Requires that the map be locked, and leaves it so.
370 */
73506ff8 371int
175f072e
KM
372vm_map_insert(map, object, offset, start, end)
373 vm_map_t map;
374 vm_object_t object;
375 vm_offset_t offset;
376 vm_offset_t start;
377 vm_offset_t end;
378{
379 register vm_map_entry_t new_entry;
380 register vm_map_entry_t prev_entry;
381 vm_map_entry_t temp_entry;
382
383 /*
384 * Check that the start and end points are not bogus.
385 */
386
387 if ((start < map->min_offset) || (end > map->max_offset) ||
388 (start >= end))
389 return(KERN_INVALID_ADDRESS);
390
391 /*
392 * Find the entry prior to the proposed
393 * starting address; if it's part of an
394 * existing entry, this range is bogus.
395 */
396
397 if (vm_map_lookup_entry(map, start, &temp_entry))
398 return(KERN_NO_SPACE);
399
400 prev_entry = temp_entry;
401
402 /*
403 * Assert that the next entry doesn't overlap the
404 * end point.
405 */
406
407 if ((prev_entry->next != &map->header) &&
408 (prev_entry->next->start < end))
409 return(KERN_NO_SPACE);
410
411 /*
412 * See if we can avoid creating a new entry by
413 * extending one of our neighbors.
414 */
415
5d7b9ad3 416 if (object == NULL) {
175f072e
KM
417 if ((prev_entry != &map->header) &&
418 (prev_entry->end == start) &&
419 (map->is_main_map) &&
420 (prev_entry->is_a_map == FALSE) &&
421 (prev_entry->is_sub_map == FALSE) &&
422 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
423 (prev_entry->protection == VM_PROT_DEFAULT) &&
424 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
425 (prev_entry->wired_count == 0)) {
426
427 if (vm_object_coalesce(prev_entry->object.vm_object,
5d7b9ad3 428 NULL,
175f072e
KM
429 prev_entry->offset,
430 (vm_offset_t) 0,
431 (vm_size_t)(prev_entry->end
432 - prev_entry->start),
433 (vm_size_t)(end - prev_entry->end))) {
434 /*
435 * Coalesced the two objects - can extend
436 * the previous map entry to include the
437 * new range.
438 */
439 map->size += (end - prev_entry->end);
440 prev_entry->end = end;
441 return(KERN_SUCCESS);
442 }
443 }
444 }
445
446 /*
447 * Create a new entry
448 */
449
450 new_entry = vm_map_entry_create(map);
451 new_entry->start = start;
452 new_entry->end = end;
453
454 new_entry->is_a_map = FALSE;
455 new_entry->is_sub_map = FALSE;
456 new_entry->object.vm_object = object;
457 new_entry->offset = offset;
458
459 new_entry->copy_on_write = FALSE;
460 new_entry->needs_copy = FALSE;
461
462 if (map->is_main_map) {
463 new_entry->inheritance = VM_INHERIT_DEFAULT;
464 new_entry->protection = VM_PROT_DEFAULT;
465 new_entry->max_protection = VM_PROT_DEFAULT;
466 new_entry->wired_count = 0;
467 }
468
469 /*
470 * Insert the new entry into the list
471 */
472
473 vm_map_entry_link(map, prev_entry, new_entry);
474 map->size += new_entry->end - new_entry->start;
475
476 /*
477 * Update the free space hint
478 */
479
480 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
481 map->first_free = new_entry;
482
483 return(KERN_SUCCESS);
484}
485
486/*
487 * SAVE_HINT:
488 *
489 * Saves the specified entry as the hint for
490 * future lookups. Performs necessary interlocks.
491 */
492#define SAVE_HINT(map,value) \
493 simple_lock(&(map)->hint_lock); \
494 (map)->hint = (value); \
495 simple_unlock(&(map)->hint_lock);
496
497/*
498 * vm_map_lookup_entry: [ internal use only ]
499 *
500 * Finds the map entry containing (or
501 * immediately preceding) the specified address
502 * in the given map; the entry is returned
503 * in the "entry" parameter. The boolean
504 * result indicates whether the address is
505 * actually contained in the map.
506 */
507boolean_t vm_map_lookup_entry(map, address, entry)
508 register vm_map_t map;
509 register vm_offset_t address;
510 vm_map_entry_t *entry; /* OUT */
511{
512 register vm_map_entry_t cur;
513 register vm_map_entry_t last;
514
515 /*
516 * Start looking either from the head of the
517 * list, or from the hint.
518 */
519
520 simple_lock(&map->hint_lock);
521 cur = map->hint;
522 simple_unlock(&map->hint_lock);
523
524 if (cur == &map->header)
525 cur = cur->next;
526
527 if (address >= cur->start) {
528 /*
529 * Go from hint to end of list.
530 *
531 * But first, make a quick check to see if
532 * we are already looking at the entry we
533 * want (which is usually the case).
534 * Note also that we don't need to save the hint
535 * here... it is the same hint (unless we are
536 * at the header, in which case the hint didn't
537 * buy us anything anyway).
538 */
539 last = &map->header;
540 if ((cur != last) && (cur->end > address)) {
541 *entry = cur;
542 return(TRUE);
543 }
544 }
545 else {
546 /*
547 * Go from start to hint, *inclusively*
548 */
549 last = cur->next;
550 cur = map->header.next;
551 }
552
553 /*
554 * Search linearly
555 */
556
557 while (cur != last) {
558 if (cur->end > address) {
559 if (address >= cur->start) {
560 /*
561 * Save this lookup for future
562 * hints, and return
563 */
564
565 *entry = cur;
566 SAVE_HINT(map, cur);
567 return(TRUE);
568 }
569 break;
570 }
571 cur = cur->next;
572 }
573 *entry = cur->prev;
574 SAVE_HINT(map, *entry);
575 return(FALSE);
576}
577
fc8007a4
CT
578/*
579 * Find sufficient space for `length' bytes in the given map, starting at
580 * `start'. The map must be locked. Returns 0 on success, 1 on no space.
581 */
582int
583vm_map_findspace(map, start, length, addr)
584 register vm_map_t map;
585 register vm_offset_t start;
586 vm_size_t length;
587 vm_offset_t *addr;
588{
589 register vm_map_entry_t entry, next;
590 register vm_offset_t end;
591
592 if (start < map->min_offset)
593 start = map->min_offset;
594 if (start > map->max_offset)
595 return (1);
596
597 /*
598 * Look for the first possible address; if there's already
599 * something at this address, we have to start after it.
600 */
601 if (start == map->min_offset) {
602 if ((entry = map->first_free) != &map->header)
603 start = entry->end;
604 } else {
605 vm_map_entry_t tmp;
606 if (vm_map_lookup_entry(map, start, &tmp))
607 start = tmp->end;
608 entry = tmp;
609 }
610
611 /*
612 * Look through the rest of the map, trying to fit a new region in
613 * the gap between existing regions, or after the very last region.
614 */
615 for (;; start = (entry = next)->end) {
616 /*
617 * Find the end of the proposed new region. Be sure we didn't
618 * go beyond the end of the map, or wrap around the address;
619 * if so, we lose. Otherwise, if this is the last entry, or
620 * if the proposed new region fits before the next entry, we
621 * win.
622 */
623 end = start + length;
624 if (end > map->max_offset || end < start)
625 return (1);
626 next = entry->next;
627 if (next == &map->header || next->start >= end)
628 break;
629 }
630 SAVE_HINT(map, entry);
631 *addr = start;
632 return (0);
633}
634
175f072e
KM
635/*
636 * vm_map_find finds an unallocated region in the target address
637 * map with the given length. The search is defined to be
638 * first-fit from the specified address; the region found is
639 * returned in the same parameter.
640 *
641 */
73506ff8 642int
175f072e
KM
643vm_map_find(map, object, offset, addr, length, find_space)
644 vm_map_t map;
645 vm_object_t object;
646 vm_offset_t offset;
647 vm_offset_t *addr; /* IN/OUT */
648 vm_size_t length;
649 boolean_t find_space;
650{
175f072e 651 register vm_offset_t start;
175f072e
KM
652 int result;
653
654 start = *addr;
175f072e 655 vm_map_lock(map);
175f072e 656 if (find_space) {
fc8007a4 657 if (vm_map_findspace(map, start, length, addr)) {
175f072e
KM
658 vm_map_unlock(map);
659 return (KERN_NO_SPACE);
660 }
fc8007a4 661 start = *addr;
175f072e 662 }
175f072e 663 result = vm_map_insert(map, object, offset, start, start + length);
175f072e 664 vm_map_unlock(map);
fc8007a4 665 return (result);
175f072e
KM
666}
667
668/*
669 * vm_map_simplify_entry: [ internal use only ]
670 *
671 * Simplify the given map entry by:
672 * removing extra sharing maps
673 * [XXX maybe later] merging with a neighbor
674 */
675void vm_map_simplify_entry(map, entry)
676 vm_map_t map;
677 vm_map_entry_t entry;
678{
679#ifdef lint
680 map++;
1524bcb8 681#endif
175f072e
KM
682
683 /*
684 * If this entry corresponds to a sharing map, then
685 * see if we can remove the level of indirection.
686 * If it's not a sharing map, then it points to
687 * a VM object, so see if we can merge with either
688 * of our neighbors.
689 */
690
691 if (entry->is_sub_map)
692 return;
693 if (entry->is_a_map) {
694#if 0
695 vm_map_t my_share_map;
696 int count;
697
698 my_share_map = entry->object.share_map;
699 simple_lock(&my_share_map->ref_lock);
700 count = my_share_map->ref_count;
701 simple_unlock(&my_share_map->ref_lock);
702
703 if (count == 1) {
704 /* Can move the region from
705 * entry->start to entry->end (+ entry->offset)
706 * in my_share_map into place of entry.
707 * Later.
708 */
709 }
1524bcb8 710#endif
175f072e
KM
711 }
712 else {
713 /*
714 * Try to merge with our neighbors.
715 *
716 * Conditions for merge are:
717 *
718 * 1. entries are adjacent.
719 * 2. both entries point to objects
720 * with null pagers.
721 *
722 * If a merge is possible, we replace the two
723 * entries with a single entry, then merge
724 * the two objects into a single object.
725 *
726 * Now, all that is left to do is write the
727 * code!
728 */
729 }
730}
731
732/*
733 * vm_map_clip_start: [ internal use only ]
734 *
735 * Asserts that the given entry begins at or after
736 * the specified address; if necessary,
737 * it splits the entry into two.
738 */
739#define vm_map_clip_start(map, entry, startaddr) \
740{ \
741 if (startaddr > entry->start) \
742 _vm_map_clip_start(map, entry, startaddr); \
743}
744
745/*
746 * This routine is called only when it is known that
747 * the entry must be split.
748 */
73506ff8 749static void _vm_map_clip_start(map, entry, start)
175f072e
KM
750 register vm_map_t map;
751 register vm_map_entry_t entry;
752 register vm_offset_t start;
753{
754 register vm_map_entry_t new_entry;
755
756 /*
757 * See if we can simplify this entry first
758 */
759
760 vm_map_simplify_entry(map, entry);
761
762 /*
763 * Split off the front portion --
764 * note that we must insert the new
765 * entry BEFORE this one, so that
766 * this entry has the specified starting
767 * address.
768 */
769
770 new_entry = vm_map_entry_create(map);
771 *new_entry = *entry;
772
773 new_entry->end = start;
774 entry->offset += (start - entry->start);
775 entry->start = start;
776
777 vm_map_entry_link(map, entry->prev, new_entry);
778
779 if (entry->is_a_map || entry->is_sub_map)
780 vm_map_reference(new_entry->object.share_map);
781 else
782 vm_object_reference(new_entry->object.vm_object);
783}
784
785/*
786 * vm_map_clip_end: [ internal use only ]
787 *
788 * Asserts that the given entry ends at or before
789 * the specified address; if necessary,
790 * it splits the entry into two.
791 */
792
175f072e
KM
793#define vm_map_clip_end(map, entry, endaddr) \
794{ \
795 if (endaddr < entry->end) \
796 _vm_map_clip_end(map, entry, endaddr); \
797}
798
799/*
800 * This routine is called only when it is known that
801 * the entry must be split.
802 */
73506ff8 803static void _vm_map_clip_end(map, entry, end)
175f072e
KM
804 register vm_map_t map;
805 register vm_map_entry_t entry;
806 register vm_offset_t end;
807{
808 register vm_map_entry_t new_entry;
809
810 /*
811 * Create a new entry and insert it
812 * AFTER the specified entry
813 */
814
815 new_entry = vm_map_entry_create(map);
816 *new_entry = *entry;
817
818 new_entry->start = entry->end = end;
819 new_entry->offset += (end - entry->start);
820
821 vm_map_entry_link(map, entry, new_entry);
822
823 if (entry->is_a_map || entry->is_sub_map)
824 vm_map_reference(new_entry->object.share_map);
825 else
826 vm_object_reference(new_entry->object.vm_object);
827}
828
829/*
830 * VM_MAP_RANGE_CHECK: [ internal use only ]
831 *
832 * Asserts that the starting and ending region
833 * addresses fall within the valid range of the map.
834 */
835#define VM_MAP_RANGE_CHECK(map, start, end) \
836 { \
837 if (start < vm_map_min(map)) \
838 start = vm_map_min(map); \
839 if (end > vm_map_max(map)) \
840 end = vm_map_max(map); \
841 if (start > end) \
842 start = end; \
843 }
844
845/*
846 * vm_map_submap: [ kernel use only ]
847 *
848 * Mark the given range as handled by a subordinate map.
849 *
850 * This range must have been created with vm_map_find,
851 * and no other operations may have been performed on this
852 * range prior to calling vm_map_submap.
853 *
854 * Only a limited number of operations can be performed
855 * within this rage after calling vm_map_submap:
856 * vm_fault
857 * [Don't try vm_map_copy!]
858 *
859 * To remove a submapping, one must first remove the
860 * range from the superior map, and then destroy the
861 * submap (if desired). [Better yet, don't try it.]
862 */
73506ff8 863int
175f072e
KM
864vm_map_submap(map, start, end, submap)
865 register vm_map_t map;
866 register vm_offset_t start;
867 register vm_offset_t end;
868 vm_map_t submap;
869{
870 vm_map_entry_t entry;
871 register int result = KERN_INVALID_ARGUMENT;
872
873 vm_map_lock(map);
874
875 VM_MAP_RANGE_CHECK(map, start, end);
876
877 if (vm_map_lookup_entry(map, start, &entry)) {
878 vm_map_clip_start(map, entry, start);
879 }
880 else
881 entry = entry->next;
882
883 vm_map_clip_end(map, entry, end);
884
885 if ((entry->start == start) && (entry->end == end) &&
886 (!entry->is_a_map) &&
5d7b9ad3 887 (entry->object.vm_object == NULL) &&
175f072e
KM
888 (!entry->copy_on_write)) {
889 entry->is_a_map = FALSE;
890 entry->is_sub_map = TRUE;
891 vm_map_reference(entry->object.sub_map = submap);
892 result = KERN_SUCCESS;
893 }
894 vm_map_unlock(map);
895
896 return(result);
897}
898
899/*
900 * vm_map_protect:
901 *
902 * Sets the protection of the specified address
903 * region in the target map. If "set_max" is
904 * specified, the maximum protection is to be set;
905 * otherwise, only the current protection is affected.
906 */
73506ff8 907int
175f072e
KM
908vm_map_protect(map, start, end, new_prot, set_max)
909 register vm_map_t map;
910 register vm_offset_t start;
911 register vm_offset_t end;
912 register vm_prot_t new_prot;
913 register boolean_t set_max;
914{
915 register vm_map_entry_t current;
916 vm_map_entry_t entry;
917
918 vm_map_lock(map);
919
920 VM_MAP_RANGE_CHECK(map, start, end);
921
922 if (vm_map_lookup_entry(map, start, &entry)) {
923 vm_map_clip_start(map, entry, start);
924 }
925 else
926 entry = entry->next;
927
928 /*
929 * Make a first pass to check for protection
930 * violations.
931 */
932
933 current = entry;
934 while ((current != &map->header) && (current->start < end)) {
935 if (current->is_sub_map)
936 return(KERN_INVALID_ARGUMENT);
937 if ((new_prot & current->max_protection) != new_prot) {
938 vm_map_unlock(map);
939 return(KERN_PROTECTION_FAILURE);
940 }
941
942 current = current->next;
943 }
944
945 /*
946 * Go back and fix up protections.
947 * [Note that clipping is not necessary the second time.]
948 */
949
950 current = entry;
951
952 while ((current != &map->header) && (current->start < end)) {
953 vm_prot_t old_prot;
954
955 vm_map_clip_end(map, current, end);
956
957 old_prot = current->protection;
958 if (set_max)
959 current->protection =
960 (current->max_protection = new_prot) &
961 old_prot;
962 else
963 current->protection = new_prot;
964
965 /*
966 * Update physical map if necessary.
967 * Worry about copy-on-write here -- CHECK THIS XXX
968 */
969
970 if (current->protection != old_prot) {
971
972#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
973 VM_PROT_ALL)
974#define max(a,b) ((a) > (b) ? (a) : (b))
975
976 if (current->is_a_map) {
977 vm_map_entry_t share_entry;
978 vm_offset_t share_end;
979
980 vm_map_lock(current->object.share_map);
981 (void) vm_map_lookup_entry(
982 current->object.share_map,
983 current->offset,
984 &share_entry);
985 share_end = current->offset +
986 (current->end - current->start);
987 while ((share_entry !=
988 &current->object.share_map->header) &&
989 (share_entry->start < share_end)) {
990
991 pmap_protect(map->pmap,
992 (max(share_entry->start,
993 current->offset) -
994 current->offset +
995 current->start),
996 min(share_entry->end,
997 share_end) -
998 current->offset +
999 current->start,
1000 current->protection &
1001 MASK(share_entry));
1002
1003 share_entry = share_entry->next;
1004 }
1005 vm_map_unlock(current->object.share_map);
1006 }
1007 else
1008 pmap_protect(map->pmap, current->start,
1009 current->end,
1010 current->protection & MASK(entry));
1011#undef max
1012#undef MASK
1013 }
1014 current = current->next;
1015 }
1016
1017 vm_map_unlock(map);
1018 return(KERN_SUCCESS);
1019}
1020
1021/*
1022 * vm_map_inherit:
1023 *
1024 * Sets the inheritance of the specified address
1025 * range in the target map. Inheritance
1026 * affects how the map will be shared with
1027 * child maps at the time of vm_map_fork.
1028 */
73506ff8 1029int
175f072e
KM
1030vm_map_inherit(map, start, end, new_inheritance)
1031 register vm_map_t map;
1032 register vm_offset_t start;
1033 register vm_offset_t end;
1034 register vm_inherit_t new_inheritance;
1035{
1036 register vm_map_entry_t entry;
1037 vm_map_entry_t temp_entry;
1038
1039 switch (new_inheritance) {
1040 case VM_INHERIT_NONE:
1041 case VM_INHERIT_COPY:
1042 case VM_INHERIT_SHARE:
1043 break;
1044 default:
1045 return(KERN_INVALID_ARGUMENT);
1046 }
1047
1048 vm_map_lock(map);
1049
1050 VM_MAP_RANGE_CHECK(map, start, end);
1051
1052 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1053 entry = temp_entry;
1054 vm_map_clip_start(map, entry, start);
1055 }
1056 else
1057 entry = temp_entry->next;
1058
1059 while ((entry != &map->header) && (entry->start < end)) {
1060 vm_map_clip_end(map, entry, end);
1061
1062 entry->inheritance = new_inheritance;
1063
1064 entry = entry->next;
1065 }
1066
1067 vm_map_unlock(map);
1068 return(KERN_SUCCESS);
1069}
1070
1071/*
1072 * vm_map_pageable:
1073 *
1074 * Sets the pageability of the specified address
1075 * range in the target map. Regions specified
1076 * as not pageable require locked-down physical
1077 * memory and physical page maps.
1078 *
1079 * The map must not be locked, but a reference
1080 * must remain to the map throughout the call.
1081 */
73506ff8 1082int
175f072e
KM
1083vm_map_pageable(map, start, end, new_pageable)
1084 register vm_map_t map;
1085 register vm_offset_t start;
1086 register vm_offset_t end;
1087 register boolean_t new_pageable;
1088{
1089 register vm_map_entry_t entry;
b5246c21 1090 vm_map_entry_t start_entry;
d4f41bb2
MH
1091 register vm_offset_t failed;
1092 int rv;
175f072e
KM
1093
1094 vm_map_lock(map);
1095
1096 VM_MAP_RANGE_CHECK(map, start, end);
1097
1098 /*
1099 * Only one pageability change may take place at one
1100 * time, since vm_fault assumes it will be called
1101 * only once for each wiring/unwiring. Therefore, we
1102 * have to make sure we're actually changing the pageability
1103 * for the entire region. We do so before making any changes.
1104 */
1105
b5246c21
MH
1106 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1107 vm_map_unlock(map);
1108 return(KERN_INVALID_ADDRESS);
175f072e 1109 }
b5246c21 1110 entry = start_entry;
175f072e
KM
1111
1112 /*
1113 * Actions are rather different for wiring and unwiring,
1114 * so we have two separate cases.
1115 */
1116
1117 if (new_pageable) {
1118
b5246c21
MH
1119 vm_map_clip_start(map, entry, start);
1120
175f072e
KM
1121 /*
1122 * Unwiring. First ensure that the range to be
b5246c21
MH
1123 * unwired is really wired down and that there
1124 * are no holes.
175f072e
KM
1125 */
1126 while ((entry != &map->header) && (entry->start < end)) {
1127
b5246c21
MH
1128 if (entry->wired_count == 0 ||
1129 (entry->end < end &&
1130 (entry->next == &map->header ||
1131 entry->next->start > entry->end))) {
175f072e
KM
1132 vm_map_unlock(map);
1133 return(KERN_INVALID_ARGUMENT);
1134 }
1135 entry = entry->next;
1136 }
1137
1138 /*
1139 * Now decrement the wiring count for each region.
1140 * If a region becomes completely unwired,
1141 * unwire its physical pages and mappings.
1142 */
1143 lock_set_recursive(&map->lock);
1144
b5246c21 1145 entry = start_entry;
175f072e
KM
1146 while ((entry != &map->header) && (entry->start < end)) {
1147 vm_map_clip_end(map, entry, end);
1148
1149 entry->wired_count--;
1150 if (entry->wired_count == 0)
1151 vm_fault_unwire(map, entry->start, entry->end);
1152
1153 entry = entry->next;
1154 }
1155 lock_clear_recursive(&map->lock);
1156 }
1157
1158 else {
1159 /*
1160 * Wiring. We must do this in two passes:
1161 *
b5246c21
MH
1162 * 1. Holding the write lock, we create any shadow
1163 * or zero-fill objects that need to be created.
1164 * Then we clip each map entry to the region to be
1165 * wired and increment its wiring count. We
1166 * create objects before clipping the map entries
1167 * to avoid object proliferation.
175f072e
KM
1168 *
1169 * 2. We downgrade to a read lock, and call
1170 * vm_fault_wire to fault in the pages for any
1171 * newly wired area (wired_count is 1).
1172 *
1173 * Downgrading to a read lock for vm_fault_wire avoids
1174 * a possible deadlock with another thread that may have
1175 * faulted on one of the pages to be wired (it would mark
1176 * the page busy, blocking us, then in turn block on the
1177 * map lock that we hold). Because of problems in the
1178 * recursive lock package, we cannot upgrade to a write
1179 * lock in vm_map_lookup. Thus, any actions that require
1180 * the write lock must be done beforehand. Because we
1181 * keep the read lock on the map, the copy-on-write status
1182 * of the entries we modify here cannot change.
1183 */
1184
1185 /*
1186 * Pass 1.
1187 */
175f072e 1188 while ((entry != &map->header) && (entry->start < end)) {
b5246c21 1189#if 0
175f072e 1190 vm_map_clip_end(map, entry, end);
b5246c21
MH
1191#endif
1192 if (entry->wired_count == 0) {
175f072e
KM
1193
1194 /*
1195 * Perform actions of vm_map_lookup that need
1196 * the write lock on the map: create a shadow
1197 * object for a copy-on-write region, or an
1198 * object for a zero-fill region.
1199 *
1200 * We don't have to do this for entries that
1201 * point to sharing maps, because we won't hold
1202 * the lock on the sharing map.
1203 */
1204 if (!entry->is_a_map) {
1205 if (entry->needs_copy &&
1206 ((entry->protection & VM_PROT_WRITE) != 0)) {
1207
1208 vm_object_shadow(&entry->object.vm_object,
1209 &entry->offset,
1210 (vm_size_t)(entry->end
1211 - entry->start));
1212 entry->needs_copy = FALSE;
1213 }
5d7b9ad3 1214 else if (entry->object.vm_object == NULL) {
175f072e
KM
1215 entry->object.vm_object =
1216 vm_object_allocate((vm_size_t)(entry->end
1217 - entry->start));
1218 entry->offset = (vm_offset_t)0;
1219 }
1220 }
1221 }
b5246c21
MH
1222 vm_map_clip_start(map, entry, start);
1223 vm_map_clip_end(map, entry, end);
1224 entry->wired_count++;
175f072e 1225
b5246c21
MH
1226 /*
1227 * Check for holes
1228 */
1229 if (entry->end < end &&
1230 (entry->next == &map->header ||
1231 entry->next->start > entry->end)) {
1232 /*
1233 * Found one. Object creation actions
1234 * do not need to be undone, but the
1235 * wired counts need to be restored.
1236 */
1237 while (entry != &map->header && entry->end > start) {
1238 entry->wired_count--;
1239 entry = entry->prev;
1240 }
1241 vm_map_unlock(map);
1242 return(KERN_INVALID_ARGUMENT);
1243 }
175f072e
KM
1244 entry = entry->next;
1245 }
1246
1247 /*
1248 * Pass 2.
1249 */
1250
1251 /*
1252 * HACK HACK HACK HACK
1253 *
1254 * If we are wiring in the kernel map or a submap of it,
1255 * unlock the map to avoid deadlocks. We trust that the
1256 * kernel threads are well-behaved, and therefore will
1257 * not do anything destructive to this region of the map
1258 * while we have it unlocked. We cannot trust user threads
1259 * to do the same.
1260 *
1261 * HACK HACK HACK HACK
1262 */
1263 if (vm_map_pmap(map) == kernel_pmap) {
1264 vm_map_unlock(map); /* trust me ... */
1265 }
1266 else {
1267 lock_set_recursive(&map->lock);
1268 lock_write_to_read(&map->lock);
1269 }
1270
d4f41bb2 1271 rv = 0;
b5246c21 1272 entry = start_entry;
175f072e 1273 while (entry != &map->header && entry->start < end) {
d4f41bb2
MH
1274 /*
1275 * If vm_fault_wire fails for any page we need to
1276 * undo what has been done. We decrement the wiring
1277 * count for those pages which have not yet been
1278 * wired (now) and unwire those that have (later).
1279 *
1280 * XXX this violates the locking protocol on the map,
1281 * needs to be fixed.
1282 */
1283 if (rv)
1284 entry->wired_count--;
1285 else if (entry->wired_count == 1) {
1286 rv = vm_fault_wire(map, entry->start, entry->end);
1287 if (rv) {
1288 failed = entry->start;
1289 entry->wired_count--;
1290 }
175f072e
KM
1291 }
1292 entry = entry->next;
1293 }
1294
1295 if (vm_map_pmap(map) == kernel_pmap) {
1296 vm_map_lock(map);
1297 }
1298 else {
1299 lock_clear_recursive(&map->lock);
1300 }
d4f41bb2
MH
1301 if (rv) {
1302 vm_map_unlock(map);
1303 (void) vm_map_pageable(map, start, failed, TRUE);
1304 return(rv);
1305 }
175f072e
KM
1306 }
1307
1308 vm_map_unlock(map);
1309
1310 return(KERN_SUCCESS);
1311}
1312
1313/*
1314 * vm_map_entry_unwire: [ internal use only ]
1315 *
1316 * Make the region specified by this entry pageable.
1317 *
1318 * The map in question should be locked.
1319 * [This is the reason for this routine's existence.]
1320 */
1321void vm_map_entry_unwire(map, entry)
1322 vm_map_t map;
1323 register vm_map_entry_t entry;
1324{
1325 vm_fault_unwire(map, entry->start, entry->end);
1326 entry->wired_count = 0;
1327}
1328
1329/*
1330 * vm_map_entry_delete: [ internal use only ]
1331 *
1332 * Deallocate the given entry from the target map.
1333 */
1334void vm_map_entry_delete(map, entry)
1335 register vm_map_t map;
1336 register vm_map_entry_t entry;
1337{
1338 if (entry->wired_count != 0)
1339 vm_map_entry_unwire(map, entry);
1340
1341 vm_map_entry_unlink(map, entry);
1342 map->size -= entry->end - entry->start;
1343
1344 if (entry->is_a_map || entry->is_sub_map)
1345 vm_map_deallocate(entry->object.share_map);
1346 else
1347 vm_object_deallocate(entry->object.vm_object);
1348
1349 vm_map_entry_dispose(map, entry);
1350}
1351
1352/*
1353 * vm_map_delete: [ internal use only ]
1354 *
1355 * Deallocates the given address range from the target
1356 * map.
1357 *
1358 * When called with a sharing map, removes pages from
1359 * that region from all physical maps.
1360 */
73506ff8 1361int
175f072e
KM
1362vm_map_delete(map, start, end)
1363 register vm_map_t map;
1364 vm_offset_t start;
1365 register vm_offset_t end;
1366{
1367 register vm_map_entry_t entry;
1368 vm_map_entry_t first_entry;
1369
1370 /*
1371 * Find the start of the region, and clip it
1372 */
1373
1374 if (!vm_map_lookup_entry(map, start, &first_entry))
1375 entry = first_entry->next;
1376 else {
1377 entry = first_entry;
1378 vm_map_clip_start(map, entry, start);
1379
1380 /*
1381 * Fix the lookup hint now, rather than each
1382 * time though the loop.
1383 */
1384
1385 SAVE_HINT(map, entry->prev);
1386 }
1387
1388 /*
1389 * Save the free space hint
1390 */
1391
1392 if (map->first_free->start >= start)
1393 map->first_free = entry->prev;
1394
1395 /*
1396 * Step through all entries in this region
1397 */
1398
1399 while ((entry != &map->header) && (entry->start < end)) {
1400 vm_map_entry_t next;
1401 register vm_offset_t s, e;
1402 register vm_object_t object;
1403
1404 vm_map_clip_end(map, entry, end);
1405
1406 next = entry->next;
1407 s = entry->start;
1408 e = entry->end;
1409
1410 /*
1411 * Unwire before removing addresses from the pmap;
1412 * otherwise, unwiring will put the entries back in
1413 * the pmap.
1414 */
1415
1416 object = entry->object.vm_object;
1417 if (entry->wired_count != 0)
1418 vm_map_entry_unwire(map, entry);
1419
1420 /*
1421 * If this is a sharing map, we must remove
1422 * *all* references to this data, since we can't
1423 * find all of the physical maps which are sharing
1424 * it.
1425 */
1426
1427 if (object == kernel_object || object == kmem_object)
1428 vm_object_page_remove(object, entry->offset,
1429 entry->offset + (e - s));
1430 else if (!map->is_main_map)
1431 vm_object_pmap_remove(object,
1432 entry->offset,
1433 entry->offset + (e - s));
1434 else
1435 pmap_remove(map->pmap, s, e);
1436
1437 /*
1438 * Delete the entry (which may delete the object)
1439 * only after removing all pmap entries pointing
1440 * to its pages. (Otherwise, its page frames may
1441 * be reallocated, and any modify bits will be
1442 * set in the wrong object!)
1443 */
1444
1445 vm_map_entry_delete(map, entry);
1446 entry = next;
1447 }
1448 return(KERN_SUCCESS);
1449}
1450
1451/*
1452 * vm_map_remove:
1453 *
1454 * Remove the given address range from the target map.
1455 * This is the exported form of vm_map_delete.
1456 */
73506ff8 1457int
175f072e
KM
1458vm_map_remove(map, start, end)
1459 register vm_map_t map;
1460 register vm_offset_t start;
1461 register vm_offset_t end;
1462{
1463 register int result;
1464
1465 vm_map_lock(map);
1466 VM_MAP_RANGE_CHECK(map, start, end);
1467 result = vm_map_delete(map, start, end);
1468 vm_map_unlock(map);
1469
1470 return(result);
1471}
1472
1473/*
1474 * vm_map_check_protection:
1475 *
1476 * Assert that the target map allows the specified
1477 * privilege on the entire address region given.
1478 * The entire region must be allocated.
1479 */
1480boolean_t vm_map_check_protection(map, start, end, protection)
1481 register vm_map_t map;
1482 register vm_offset_t start;
1483 register vm_offset_t end;
1484 register vm_prot_t protection;
1485{
1486 register vm_map_entry_t entry;
1487 vm_map_entry_t tmp_entry;
1488
1489 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1490 return(FALSE);
1491 }
1492
1493 entry = tmp_entry;
1494
1495 while (start < end) {
1496 if (entry == &map->header) {
1497 return(FALSE);
1498 }
1499
1500 /*
1501 * No holes allowed!
1502 */
1503
1504 if (start < entry->start) {
1505 return(FALSE);
1506 }
1507
1508 /*
1509 * Check protection associated with entry.
1510 */
1511
1512 if ((entry->protection & protection) != protection) {
1513 return(FALSE);
1514 }
1515
1516 /* go to next entry */
1517
1518 start = entry->end;
1519 entry = entry->next;
1520 }
1521 return(TRUE);
1522}
1523
1524/*
1525 * vm_map_copy_entry:
1526 *
1527 * Copies the contents of the source entry to the destination
1528 * entry. The entries *must* be aligned properly.
1529 */
1530void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1531 vm_map_t src_map, dst_map;
1532 register vm_map_entry_t src_entry, dst_entry;
1533{
1534 vm_object_t temp_object;
1535
1536 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1537 return;
1538
5d7b9ad3 1539 if (dst_entry->object.vm_object != NULL &&
224765a4 1540 (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)
175f072e
KM
1541 printf("vm_map_copy_entry: copying over permanent data!\n");
1542
1543 /*
1544 * If our destination map was wired down,
1545 * unwire it now.
1546 */
1547
1548 if (dst_entry->wired_count != 0)
1549 vm_map_entry_unwire(dst_map, dst_entry);
1550
1551 /*
1552 * If we're dealing with a sharing map, we
1553 * must remove the destination pages from
1554 * all maps (since we cannot know which maps
1555 * this sharing map belongs in).
1556 */
1557
1558 if (dst_map->is_main_map)
1559 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1560 else
1561 vm_object_pmap_remove(dst_entry->object.vm_object,
1562 dst_entry->offset,
1563 dst_entry->offset +
1564 (dst_entry->end - dst_entry->start));
1565
1566 if (src_entry->wired_count == 0) {
1567
1568 boolean_t src_needs_copy;
1569
1570 /*
1571 * If the source entry is marked needs_copy,
1572 * it is already write-protected.
1573 */
1574 if (!src_entry->needs_copy) {
1575
1576 boolean_t su;
1577
1578 /*
1579 * If the source entry has only one mapping,
1580 * we can just protect the virtual address
1581 * range.
1582 */
1583 if (!(su = src_map->is_main_map)) {
1584 simple_lock(&src_map->ref_lock);
1585 su = (src_map->ref_count == 1);
1586 simple_unlock(&src_map->ref_lock);
1587 }
1588
1589 if (su) {
1590 pmap_protect(src_map->pmap,
1591 src_entry->start,
1592 src_entry->end,
1593 src_entry->protection & ~VM_PROT_WRITE);
1594 }
1595 else {
1596 vm_object_pmap_copy(src_entry->object.vm_object,
1597 src_entry->offset,
1598 src_entry->offset + (src_entry->end
1599 -src_entry->start));
1600 }
1601 }
1602
1603 /*
1604 * Make a copy of the object.
1605 */
1606 temp_object = dst_entry->object.vm_object;
1607 vm_object_copy(src_entry->object.vm_object,
1608 src_entry->offset,
1609 (vm_size_t)(src_entry->end -
1610 src_entry->start),
1611 &dst_entry->object.vm_object,
1612 &dst_entry->offset,
1613 &src_needs_copy);
1614 /*
1615 * If we didn't get a copy-object now, mark the
1616 * source map entry so that a shadow will be created
1617 * to hold its changed pages.
1618 */
1619 if (src_needs_copy)
1620 src_entry->needs_copy = TRUE;
1621
1622 /*
1623 * The destination always needs to have a shadow
1624 * created.
1625 */
1626 dst_entry->needs_copy = TRUE;
1627
1628 /*
1629 * Mark the entries copy-on-write, so that write-enabling
1630 * the entry won't make copy-on-write pages writable.
1631 */
1632 src_entry->copy_on_write = TRUE;
1633 dst_entry->copy_on_write = TRUE;
1634 /*
1635 * Get rid of the old object.
1636 */
1637 vm_object_deallocate(temp_object);
1638
1639 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1640 dst_entry->end - dst_entry->start, src_entry->start);
1641 }
1642 else {
1643 /*
1644 * Of course, wired down pages can't be set copy-on-write.
1645 * Cause wired pages to be copied into the new
1646 * map by simulating faults (the new pages are
1647 * pageable)
1648 */
1649 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1650 }
1651}
1652
1653/*
1654 * vm_map_copy:
1655 *
1656 * Perform a virtual memory copy from the source
1657 * address map/range to the destination map/range.
1658 *
1659 * If src_destroy or dst_alloc is requested,
1660 * the source and destination regions should be
1661 * disjoint, not only in the top-level map, but
1662 * in the sharing maps as well. [The best way
1663 * to guarantee this is to use a new intermediate
1664 * map to make copies. This also reduces map
1665 * fragmentation.]
1666 */
73506ff8 1667int
175f072e
KM
1668vm_map_copy(dst_map, src_map,
1669 dst_addr, len, src_addr,
1670 dst_alloc, src_destroy)
1671 vm_map_t dst_map;
1672 vm_map_t src_map;
1673 vm_offset_t dst_addr;
1674 vm_size_t len;
1675 vm_offset_t src_addr;
1676 boolean_t dst_alloc;
1677 boolean_t src_destroy;
1678{
1679 register
1680 vm_map_entry_t src_entry;
1681 register
1682 vm_map_entry_t dst_entry;
1683 vm_map_entry_t tmp_entry;
1684 vm_offset_t src_start;
1685 vm_offset_t src_end;
1686 vm_offset_t dst_start;
1687 vm_offset_t dst_end;
1688 vm_offset_t src_clip;
1689 vm_offset_t dst_clip;
1690 int result;
1691 boolean_t old_src_destroy;
1692
1693 /*
1694 * XXX While we figure out why src_destroy screws up,
1695 * we'll do it by explicitly vm_map_delete'ing at the end.
1696 */
1697
1698 old_src_destroy = src_destroy;
1699 src_destroy = FALSE;
1700
1701 /*
1702 * Compute start and end of region in both maps
1703 */
1704
1705 src_start = src_addr;
1706 src_end = src_start + len;
1707 dst_start = dst_addr;
1708 dst_end = dst_start + len;
1709
1710 /*
1711 * Check that the region can exist in both source
1712 * and destination.
1713 */
1714
1715 if ((dst_end < dst_start) || (src_end < src_start))
1716 return(KERN_NO_SPACE);
1717
1718 /*
1719 * Lock the maps in question -- we avoid deadlock
1720 * by ordering lock acquisition by map value
1721 */
1722
1723 if (src_map == dst_map) {
1724 vm_map_lock(src_map);
1725 }
1726 else if ((int) src_map < (int) dst_map) {
1727 vm_map_lock(src_map);
1728 vm_map_lock(dst_map);
1729 } else {
1730 vm_map_lock(dst_map);
1731 vm_map_lock(src_map);
1732 }
1733
1734 result = KERN_SUCCESS;
1735
1736 /*
1737 * Check protections... source must be completely readable and
1738 * destination must be completely writable. [Note that if we're
1739 * allocating the destination region, we don't have to worry
1740 * about protection, but instead about whether the region
1741 * exists.]
1742 */
1743
1744 if (src_map->is_main_map && dst_map->is_main_map) {
1745 if (!vm_map_check_protection(src_map, src_start, src_end,
1746 VM_PROT_READ)) {
1747 result = KERN_PROTECTION_FAILURE;
1748 goto Return;
1749 }
1750
1751 if (dst_alloc) {
1752 /* XXX Consider making this a vm_map_find instead */
5d7b9ad3 1753 if ((result = vm_map_insert(dst_map, NULL,
175f072e
KM
1754 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1755 goto Return;
1756 }
1757 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1758 VM_PROT_WRITE)) {
1759 result = KERN_PROTECTION_FAILURE;
1760 goto Return;
1761 }
1762 }
1763
1764 /*
1765 * Find the start entries and clip.
1766 *
1767 * Note that checking protection asserts that the
1768 * lookup cannot fail.
1769 *
1770 * Also note that we wait to do the second lookup
1771 * until we have done the first clip, as the clip
1772 * may affect which entry we get!
1773 */
1774
1775 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1776 src_entry = tmp_entry;
1777 vm_map_clip_start(src_map, src_entry, src_start);
1778
1779 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1780 dst_entry = tmp_entry;
1781 vm_map_clip_start(dst_map, dst_entry, dst_start);
1782
1783 /*
1784 * If both source and destination entries are the same,
1785 * retry the first lookup, as it may have changed.
1786 */
1787
1788 if (src_entry == dst_entry) {
1789 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1790 src_entry = tmp_entry;
1791 }
1792
1793 /*
1794 * If source and destination entries are still the same,
1795 * a null copy is being performed.
1796 */
1797
1798 if (src_entry == dst_entry)
1799 goto Return;
1800
1801 /*
1802 * Go through entries until we get to the end of the
1803 * region.
1804 */
1805
1806 while (src_start < src_end) {
1807 /*
1808 * Clip the entries to the endpoint of the entire region.
1809 */
1810
1811 vm_map_clip_end(src_map, src_entry, src_end);
1812 vm_map_clip_end(dst_map, dst_entry, dst_end);
1813
1814 /*
1815 * Clip each entry to the endpoint of the other entry.
1816 */
1817
1818 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1819 vm_map_clip_end(src_map, src_entry, src_clip);
1820
1821 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1822 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1823
1824 /*
1825 * Both entries now match in size and relative endpoints.
1826 *
1827 * If both entries refer to a VM object, we can
1828 * deal with them now.
1829 */
1830
1831 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1832 vm_map_copy_entry(src_map, dst_map, src_entry,
1833 dst_entry);
1834 }
1835 else {
1836 register vm_map_t new_dst_map;
1837 vm_offset_t new_dst_start;
1838 vm_size_t new_size;
1839 vm_map_t new_src_map;
1840 vm_offset_t new_src_start;
1841
1842 /*
1843 * We have to follow at least one sharing map.
1844 */
1845
1846 new_size = (dst_entry->end - dst_entry->start);
1847
1848 if (src_entry->is_a_map) {
1849 new_src_map = src_entry->object.share_map;
1850 new_src_start = src_entry->offset;
1851 }
1852 else {
1853 new_src_map = src_map;
1854 new_src_start = src_entry->start;
1855 lock_set_recursive(&src_map->lock);
1856 }
1857
1858 if (dst_entry->is_a_map) {
1859 vm_offset_t new_dst_end;
1860
1861 new_dst_map = dst_entry->object.share_map;
1862 new_dst_start = dst_entry->offset;
1863
1864 /*
1865 * Since the destination sharing entries
1866 * will be merely deallocated, we can
1867 * do that now, and replace the region
1868 * with a null object. [This prevents
1869 * splitting the source map to match
1870 * the form of the destination map.]
1871 * Note that we can only do so if the
1872 * source and destination do not overlap.
1873 */
1874
1875 new_dst_end = new_dst_start + new_size;
1876
1877 if (new_dst_map != new_src_map) {
1878 vm_map_lock(new_dst_map);
1879 (void) vm_map_delete(new_dst_map,
1880 new_dst_start,
1881 new_dst_end);
1882 (void) vm_map_insert(new_dst_map,
5d7b9ad3 1883 NULL,
175f072e
KM
1884 (vm_offset_t) 0,
1885 new_dst_start,
1886 new_dst_end);
1887 vm_map_unlock(new_dst_map);
1888 }
1889 }
1890 else {
1891 new_dst_map = dst_map;
1892 new_dst_start = dst_entry->start;
1893 lock_set_recursive(&dst_map->lock);
1894 }
1895
1896 /*
1897 * Recursively copy the sharing map.
1898 */
1899
1900 (void) vm_map_copy(new_dst_map, new_src_map,
1901 new_dst_start, new_size, new_src_start,
1902 FALSE, FALSE);
1903
1904 if (dst_map == new_dst_map)
1905 lock_clear_recursive(&dst_map->lock);
1906 if (src_map == new_src_map)
1907 lock_clear_recursive(&src_map->lock);
1908 }
1909
1910 /*
1911 * Update variables for next pass through the loop.
1912 */
1913
1914 src_start = src_entry->end;
1915 src_entry = src_entry->next;
1916 dst_start = dst_entry->end;
1917 dst_entry = dst_entry->next;
1918
1919 /*
1920 * If the source is to be destroyed, here is the
1921 * place to do it.
1922 */
1923
1924 if (src_destroy && src_map->is_main_map &&
1925 dst_map->is_main_map)
1926 vm_map_entry_delete(src_map, src_entry->prev);
1927 }
1928
1929 /*
1930 * Update the physical maps as appropriate
1931 */
1932
1933 if (src_map->is_main_map && dst_map->is_main_map) {
1934 if (src_destroy)
1935 pmap_remove(src_map->pmap, src_addr, src_addr + len);
1936 }
1937
1938 /*
1939 * Unlock the maps
1940 */
1941
1942 Return: ;
1943
1944 if (old_src_destroy)
1945 vm_map_delete(src_map, src_addr, src_addr + len);
1946
1947 vm_map_unlock(src_map);
1948 if (src_map != dst_map)
1949 vm_map_unlock(dst_map);
1950
1951 return(result);
1952}
1953
1954/*
5d7b9ad3
MK
1955 * vmspace_fork:
1956 * Create a new process vmspace structure and vm_map
1957 * based on those of an existing process. The new map
1958 * is based on the old map, according to the inheritance
1959 * values on the regions in that map.
175f072e 1960 *
5d7b9ad3 1961 * The source map must not be locked.
175f072e 1962 */
5d7b9ad3
MK
1963struct vmspace *
1964vmspace_fork(vm1)
1965 register struct vmspace *vm1;
175f072e 1966{
5d7b9ad3
MK
1967 register struct vmspace *vm2;
1968 vm_map_t old_map = &vm1->vm_map;
175f072e
KM
1969 vm_map_t new_map;
1970 vm_map_entry_t old_entry;
1971 vm_map_entry_t new_entry;
1972 pmap_t new_pmap;
1973
1974 vm_map_lock(old_map);
1975
5d7b9ad3
MK
1976 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1977 old_map->entries_pageable);
1978 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1979 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1980 new_pmap = &vm2->vm_pmap; /* XXX */
1981 new_map = &vm2->vm_map; /* XXX */
175f072e
KM
1982
1983 old_entry = old_map->header.next;
1984
1985 while (old_entry != &old_map->header) {
1986 if (old_entry->is_sub_map)
1987 panic("vm_map_fork: encountered a submap");
1988
1989 switch (old_entry->inheritance) {
1990 case VM_INHERIT_NONE:
1991 break;
1992
1993 case VM_INHERIT_SHARE:
1994 /*
1995 * If we don't already have a sharing map:
1996 */
1997
1998 if (!old_entry->is_a_map) {
1999 vm_map_t new_share_map;
2000 vm_map_entry_t new_share_entry;
2001
2002 /*
2003 * Create a new sharing map
2004 */
2005
5d7b9ad3 2006 new_share_map = vm_map_create(NULL,
175f072e
KM
2007 old_entry->start,
2008 old_entry->end,
2009 TRUE);
2010 new_share_map->is_main_map = FALSE;
2011
2012 /*
2013 * Create the only sharing entry from the
2014 * old task map entry.
2015 */
2016
2017 new_share_entry =
2018 vm_map_entry_create(new_share_map);
2019 *new_share_entry = *old_entry;
b5246c21 2020 new_share_entry->wired_count = 0;
175f072e
KM
2021
2022 /*
2023 * Insert the entry into the new sharing
2024 * map
2025 */
2026
2027 vm_map_entry_link(new_share_map,
2028 new_share_map->header.prev,
2029 new_share_entry);
2030
2031 /*
2032 * Fix up the task map entry to refer
2033 * to the sharing map now.
2034 */
2035
2036 old_entry->is_a_map = TRUE;
2037 old_entry->object.share_map = new_share_map;
2038 old_entry->offset = old_entry->start;
2039 }
2040
2041 /*
2042 * Clone the entry, referencing the sharing map.
2043 */
2044
2045 new_entry = vm_map_entry_create(new_map);
2046 *new_entry = *old_entry;
b5246c21 2047 new_entry->wired_count = 0;
175f072e
KM
2048 vm_map_reference(new_entry->object.share_map);
2049
2050 /*
2051 * Insert the entry into the new map -- we
2052 * know we're inserting at the end of the new
2053 * map.
2054 */
2055
2056 vm_map_entry_link(new_map, new_map->header.prev,
2057 new_entry);
2058
2059 /*
2060 * Update the physical map
2061 */
2062
2063 pmap_copy(new_map->pmap, old_map->pmap,
2064 new_entry->start,
2065 (old_entry->end - old_entry->start),
2066 old_entry->start);
2067 break;
2068
2069 case VM_INHERIT_COPY:
2070 /*
2071 * Clone the entry and link into the map.
2072 */
2073
2074 new_entry = vm_map_entry_create(new_map);
2075 *new_entry = *old_entry;
2076 new_entry->wired_count = 0;
5d7b9ad3 2077 new_entry->object.vm_object = NULL;
175f072e
KM
2078 new_entry->is_a_map = FALSE;
2079 vm_map_entry_link(new_map, new_map->header.prev,
2080 new_entry);
2081 if (old_entry->is_a_map) {
2082 int check;
2083
2084 check = vm_map_copy(new_map,
2085 old_entry->object.share_map,
2086 new_entry->start,
2087 (vm_size_t)(new_entry->end -
2088 new_entry->start),
2089 old_entry->offset,
2090 FALSE, FALSE);
2091 if (check != KERN_SUCCESS)
2092 printf("vm_map_fork: copy in share_map region failed\n");
2093 }
2094 else {
2095 vm_map_copy_entry(old_map, new_map, old_entry,
2096 new_entry);
2097 }
2098 break;
2099 }
2100 old_entry = old_entry->next;
2101 }
2102
2103 new_map->size = old_map->size;
2104 vm_map_unlock(old_map);
2105
5d7b9ad3 2106 return(vm2);
175f072e
KM
2107}
2108
2109/*
2110 * vm_map_lookup:
2111 *
2112 * Finds the VM object, offset, and
2113 * protection for a given virtual address in the
2114 * specified map, assuming a page fault of the
2115 * type specified.
2116 *
2117 * Leaves the map in question locked for read; return
2118 * values are guaranteed until a vm_map_lookup_done
2119 * call is performed. Note that the map argument
2120 * is in/out; the returned map must be used in
2121 * the call to vm_map_lookup_done.
2122 *
2123 * A handle (out_entry) is returned for use in
2124 * vm_map_lookup_done, to make that fast.
2125 *
2126 * If a lookup is requested with "write protection"
2127 * specified, the map may be changed to perform virtual
2128 * copying operations, although the data referenced will
2129 * remain the same.
2130 */
73506ff8 2131int
175f072e
KM
2132vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2133 object, offset, out_prot, wired, single_use)
2134 vm_map_t *var_map; /* IN/OUT */
2135 register vm_offset_t vaddr;
2136 register vm_prot_t fault_type;
2137
2138 vm_map_entry_t *out_entry; /* OUT */
2139 vm_object_t *object; /* OUT */
2140 vm_offset_t *offset; /* OUT */
2141 vm_prot_t *out_prot; /* OUT */
2142 boolean_t *wired; /* OUT */
2143 boolean_t *single_use; /* OUT */
2144{
2145 vm_map_t share_map;
2146 vm_offset_t share_offset;
2147 register vm_map_entry_t entry;
2148 register vm_map_t map = *var_map;
2149 register vm_prot_t prot;
2150 register boolean_t su;
2151
2152 RetryLookup: ;
2153
2154 /*
2155 * Lookup the faulting address.
2156 */
2157
2158 vm_map_lock_read(map);
2159
2160#define RETURN(why) \
2161 { \
2162 vm_map_unlock_read(map); \
2163 return(why); \
2164 }
2165
2166 /*
2167 * If the map has an interesting hint, try it before calling
2168 * full blown lookup routine.
2169 */
2170
2171 simple_lock(&map->hint_lock);
2172 entry = map->hint;
2173 simple_unlock(&map->hint_lock);
2174
2175 *out_entry = entry;
2176
2177 if ((entry == &map->header) ||
2178 (vaddr < entry->start) || (vaddr >= entry->end)) {
2179 vm_map_entry_t tmp_entry;
2180
2181 /*
2182 * Entry was either not a valid hint, or the vaddr
2183 * was not contained in the entry, so do a full lookup.
2184 */
2185 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2186 RETURN(KERN_INVALID_ADDRESS);
2187
2188 entry = tmp_entry;
2189 *out_entry = entry;
2190 }
2191
2192 /*
2193 * Handle submaps.
2194 */
2195
2196 if (entry->is_sub_map) {
2197 vm_map_t old_map = map;
2198
2199 *var_map = map = entry->object.sub_map;
2200 vm_map_unlock_read(old_map);
2201 goto RetryLookup;
2202 }
2203
2204 /*
2205 * Check whether this task is allowed to have
2206 * this page.
2207 */
2208
2209 prot = entry->protection;
2210 if ((fault_type & (prot)) != fault_type)
2211 RETURN(KERN_PROTECTION_FAILURE);
2212
2213 /*
2214 * If this page is not pageable, we have to get
2215 * it for all possible accesses.
2216 */
2217
2218 if (*wired = (entry->wired_count != 0))
2219 prot = fault_type = entry->protection;
2220
2221 /*
2222 * If we don't already have a VM object, track
2223 * it down.
2224 */
2225
2226 if (su = !entry->is_a_map) {
2227 share_map = map;
2228 share_offset = vaddr;
2229 }
2230 else {
2231 vm_map_entry_t share_entry;
2232
2233 /*
2234 * Compute the sharing map, and offset into it.
2235 */
2236
2237 share_map = entry->object.share_map;
2238 share_offset = (vaddr - entry->start) + entry->offset;
2239
2240 /*
2241 * Look for the backing store object and offset
2242 */
2243
2244 vm_map_lock_read(share_map);
2245
2246 if (!vm_map_lookup_entry(share_map, share_offset,
2247 &share_entry)) {
2248 vm_map_unlock_read(share_map);
2249 RETURN(KERN_INVALID_ADDRESS);
2250 }
2251 entry = share_entry;
2252 }
2253
2254 /*
2255 * If the entry was copy-on-write, we either ...
2256 */
2257
2258 if (entry->needs_copy) {
2259 /*
2260 * If we want to write the page, we may as well
2261 * handle that now since we've got the sharing
2262 * map locked.
2263 *
2264 * If we don't need to write the page, we just
2265 * demote the permissions allowed.
2266 */
2267
2268 if (fault_type & VM_PROT_WRITE) {
2269 /*
2270 * Make a new object, and place it in the
2271 * object chain. Note that no new references
2272 * have appeared -- one just moved from the
2273 * share map to the new object.
2274 */
2275
2276 if (lock_read_to_write(&share_map->lock)) {
2277 if (share_map != map)
2278 vm_map_unlock_read(map);
2279 goto RetryLookup;
2280 }
2281
2282 vm_object_shadow(
2283 &entry->object.vm_object,
2284 &entry->offset,
2285 (vm_size_t) (entry->end - entry->start));
2286
2287 entry->needs_copy = FALSE;
2288
2289 lock_write_to_read(&share_map->lock);
2290 }
2291 else {
2292 /*
2293 * We're attempting to read a copy-on-write
2294 * page -- don't allow writes.
2295 */
2296
2297 prot &= (~VM_PROT_WRITE);
2298 }
2299 }
2300
2301 /*
2302 * Create an object if necessary.
2303 */
5d7b9ad3 2304 if (entry->object.vm_object == NULL) {
175f072e
KM
2305
2306 if (lock_read_to_write(&share_map->lock)) {
2307 if (share_map != map)
2308 vm_map_unlock_read(map);
2309 goto RetryLookup;
2310 }
2311
2312 entry->object.vm_object = vm_object_allocate(
2313 (vm_size_t)(entry->end - entry->start));
2314 entry->offset = 0;
2315 lock_write_to_read(&share_map->lock);
2316 }
2317
2318 /*
2319 * Return the object/offset from this entry. If the entry
2320 * was copy-on-write or empty, it has been fixed up.
2321 */
2322
2323 *offset = (share_offset - entry->start) + entry->offset;
2324 *object = entry->object.vm_object;
2325
2326 /*
2327 * Return whether this is the only map sharing this data.
2328 */
2329
2330 if (!su) {
2331 simple_lock(&share_map->ref_lock);
2332 su = (share_map->ref_count == 1);
2333 simple_unlock(&share_map->ref_lock);
2334 }
2335
2336 *out_prot = prot;
2337 *single_use = su;
2338
2339 return(KERN_SUCCESS);
2340
2341#undef RETURN
2342}
2343
2344/*
2345 * vm_map_lookup_done:
2346 *
2347 * Releases locks acquired by a vm_map_lookup
2348 * (according to the handle returned by that lookup).
2349 */
2350
2351void vm_map_lookup_done(map, entry)
2352 register vm_map_t map;
2353 vm_map_entry_t entry;
2354{
2355 /*
2356 * If this entry references a map, unlock it first.
2357 */
2358
2359 if (entry->is_a_map)
2360 vm_map_unlock_read(entry->object.share_map);
2361
2362 /*
2363 * Unlock the main-level map
2364 */
2365
2366 vm_map_unlock_read(map);
2367}
2368
2369/*
2370 * Routine: vm_map_simplify
2371 * Purpose:
2372 * Attempt to simplify the map representation in
2373 * the vicinity of the given starting address.
2374 * Note:
2375 * This routine is intended primarily to keep the
2376 * kernel maps more compact -- they generally don't
2377 * benefit from the "expand a map entry" technology
2378 * at allocation time because the adjacent entry
2379 * is often wired down.
2380 */
2381void vm_map_simplify(map, start)
2382 vm_map_t map;
2383 vm_offset_t start;
2384{
2385 vm_map_entry_t this_entry;
2386 vm_map_entry_t prev_entry;
2387
2388 vm_map_lock(map);
2389 if (
2390 (vm_map_lookup_entry(map, start, &this_entry)) &&
2391 ((prev_entry = this_entry->prev) != &map->header) &&
2392
2393 (prev_entry->end == start) &&
2394 (map->is_main_map) &&
2395
2396 (prev_entry->is_a_map == FALSE) &&
2397 (prev_entry->is_sub_map == FALSE) &&
2398
2399 (this_entry->is_a_map == FALSE) &&
2400 (this_entry->is_sub_map == FALSE) &&
2401
2402 (prev_entry->inheritance == this_entry->inheritance) &&
2403 (prev_entry->protection == this_entry->protection) &&
2404 (prev_entry->max_protection == this_entry->max_protection) &&
2405 (prev_entry->wired_count == this_entry->wired_count) &&
2406
2407 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2408 (prev_entry->needs_copy == this_entry->needs_copy) &&
2409
2410 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2411 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2412 == this_entry->offset)
2413 ) {
2414 if (map->first_free == this_entry)
2415 map->first_free = prev_entry;
2416
2417 SAVE_HINT(map, prev_entry);
2418 vm_map_entry_unlink(map, this_entry);
2419 prev_entry->end = this_entry->end;
2420 vm_object_deallocate(this_entry->object.vm_object);
2421 vm_map_entry_dispose(map, this_entry);
2422 }
2423 vm_map_unlock(map);
2424}
2425
2426/*
2427 * vm_map_print: [ debug ]
2428 */
2429void vm_map_print(map, full)
2430 register vm_map_t map;
2431 boolean_t full;
2432{
2433 register vm_map_entry_t entry;
2434 extern int indent;
2435
2436 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2437 (map->is_main_map ? "Task" : "Share"),
2438 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2439 map->timestamp);
2440
2441 if (!full && indent)
2442 return;
2443
2444 indent += 2;
2445 for (entry = map->header.next; entry != &map->header;
2446 entry = entry->next) {
2447 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2448 (int) entry, (int) entry->start, (int) entry->end);
2449 if (map->is_main_map) {
2450 static char *inheritance_name[4] =
2451 { "share", "copy", "none", "donate_copy"};
2452 printf("prot=%x/%x/%s, ",
2453 entry->protection,
2454 entry->max_protection,
2455 inheritance_name[entry->inheritance]);
2456 if (entry->wired_count != 0)
2457 printf("wired, ");
2458 }
2459
2460 if (entry->is_a_map || entry->is_sub_map) {
2461 printf("share=0x%x, offset=0x%x\n",
2462 (int) entry->object.share_map,
2463 (int) entry->offset);
2464 if ((entry->prev == &map->header) ||
2465 (!entry->prev->is_a_map) ||
2466 (entry->prev->object.share_map !=
2467 entry->object.share_map)) {
2468 indent += 2;
2469 vm_map_print(entry->object.share_map, full);
2470 indent -= 2;
2471 }
2472
2473 }
2474 else {
2475 printf("object=0x%x, offset=0x%x",
2476 (int) entry->object.vm_object,
2477 (int) entry->offset);
2478 if (entry->copy_on_write)
2479 printf(", copy (%s)",
2480 entry->needs_copy ? "needed" : "done");
2481 printf("\n");
2482
2483 if ((entry->prev == &map->header) ||
2484 (entry->prev->is_a_map) ||
2485 (entry->prev->object.vm_object !=
2486 entry->object.vm_object)) {
2487 indent += 2;
2488 vm_object_print(entry->object.vm_object, full);
2489 indent -= 2;
2490 }
2491 }
2492 }
2493 indent -= 2;
2494}