1. Remove a rather strangely gratuitous bit of profanity
[unix-history] / sys / vm / vm_map.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_map.c 7.3 (Berkeley) 4/21/91
a200ca2b 37 * $Id: vm_map.c,v 1.10 1994/01/14 16:27:19 davidg Exp $
55768178
DG
38 *
39 *
15637ed4
RG
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 *
45 * Permission to use, copy, modify and distribute this software and
46 * its documentation is hereby granted, provided that both the copyright
47 * notice and this permission notice appear in all copies of the
48 * software, derivative works or modified versions, and any portions
49 * thereof, and that both notices appear in supporting documentation.
50 *
51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 *
55 * Carnegie Mellon requests users of this software to return to
56 *
57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
58 * School of Computer Science
59 * Carnegie Mellon University
60 * Pittsburgh PA 15213-3890
61 *
62 * any improvements or extensions that they make and grant Carnegie the
63 * rights to redistribute these changes.
15637ed4
RG
64 */
65
66/*
67 * Virtual memory mapping module.
68 */
d55b6cf4 69#include "ddb.h"
15637ed4
RG
70#include "param.h"
71#include "malloc.h"
72#include "vm.h"
73#include "vm_page.h"
74#include "vm_object.h"
55768178 75#include "systm.h"
15637ed4
RG
76
77/*
78 * Virtual memory maps provide for the mapping, protection,
79 * and sharing of virtual memory objects. In addition,
80 * this module provides for an efficient virtual copy of
81 * memory from one map to another.
82 *
83 * Synchronization is required prior to most operations.
84 *
85 * Maps consist of an ordered doubly-linked list of simple
86 * entries; a single hint is used to speed up lookups.
87 *
88 * In order to properly represent the sharing of virtual
89 * memory regions among maps, the map structure is bi-level.
90 * Top-level ("address") maps refer to regions of sharable
91 * virtual memory. These regions are implemented as
92 * ("sharing") maps, which then refer to the actual virtual
93 * memory objects. When two address maps "share" memory,
94 * their top-level maps both have references to the same
95 * sharing map. When memory is virtual-copied from one
96 * address map to another, the references in the sharing
97 * maps are actually copied -- no copying occurs at the
98 * virtual memory object level.
99 *
100 * Since portions of maps are specified by start/end addreses,
101 * which may not align with existing map entries, all
102 * routines merely "clip" entries to these start/end values.
103 * [That is, an entry is split into two, bordering at a
104 * start or end value.] Note that these clippings may not
105 * always be necessary (as the two resulting entries are then
106 * not changed); however, the clipping is done for convenience.
107 * No attempt is currently made to "glue back together" two
108 * abutting entries.
109 *
110 * As mentioned above, virtual copy operations are performed
111 * by copying VM object references from one sharing map to
112 * another, and then marking both regions as copy-on-write.
113 * It is important to note that only one writeable reference
114 * to a VM object region exists in any map -- this means that
115 * shadow object creation can be delayed until a write operation
116 * occurs.
117 */
118
119/*
120 * vm_map_startup:
121 *
122 * Initialize the vm_map module. Must be called before
123 * any other vm_map routines.
124 *
125 * Map and entry structures are allocated from the general
126 * purpose memory pool with some exceptions:
127 *
128 * - The kernel map and kmem submap are allocated statically.
129 * - Kernel map entries are allocated out of a static pool.
130 *
131 * These restrictions are necessary since malloc() uses the
132 * maps and requires map entries.
133 */
134
135vm_offset_t kentry_data;
136vm_size_t kentry_data_size;
137vm_map_entry_t kentry_free;
55768178 138int kentry_count;
15637ed4 139vm_map_t kmap_free;
55768178
DG
140static vm_offset_t mapvm=0;
141static int mapvmpgcnt=0;
142extern vm_map_t kernel_map, kmem_map, pager_map;
143extern int vm_page_count;
15637ed4 144
55768178
DG
145void
146vm_map_startup()
15637ed4
RG
147{
148 register int i;
149 register vm_map_entry_t mep;
150 vm_map_t mp;
151
152 /*
153 * Static map structures for allocation before initialization of
154 * kernel map or kmem map. vm_map_create knows how to deal with them.
155 */
156 kmap_free = mp = (vm_map_t) kentry_data;
157 i = MAX_KMAP;
158 while (--i > 0) {
159 mp->header.next = (vm_map_entry_t) (mp + 1);
160 mp++;
161 }
162 mp++->header.next = NULL;
163
164 /*
165 * Form a free list of statically allocated kernel map entries
166 * with the rest.
167 */
55768178 168 kentry_count = 0;
15637ed4
RG
169 kentry_free = mep = (vm_map_entry_t) mp;
170 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
171 while (--i > 0) {
172 mep->next = mep + 1;
173 mep++;
55768178 174 kentry_count++;
15637ed4
RG
175 }
176 mep->next = NULL;
177}
178
179/*
180 * Allocate a vmspace structure, including a vm_map and pmap,
181 * and initialize those structures. The refcnt is set to 1.
182 * The remaining fields must be initialized by the caller.
183 */
184struct vmspace *
185vmspace_alloc(min, max, pageable)
186 vm_offset_t min, max;
187 int pageable;
188{
189 register struct vmspace *vm;
55768178
DG
190 int s;
191
192 if (mapvmpgcnt == 0 && mapvm == 0) {
193 mapvmpgcnt = (vm_page_count * sizeof(struct vm_map_entry) + NBPG - 1) / NBPG;
a200ca2b 194 s = splimp();
55768178
DG
195 mapvm = kmem_alloc_pageable(kmem_map, mapvmpgcnt * NBPG);
196 splx(s);
197 if (!mapvm)
198 mapvmpgcnt = 0;
199 }
15637ed4
RG
200
201 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
202 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
203 vm_map_init(&vm->vm_map, min, max, pageable);
204 pmap_pinit(&vm->vm_pmap);
205 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
206 vm->vm_refcnt = 1;
207 return (vm);
208}
209
210void
55768178 211_vmspace_free(vm)
15637ed4
RG
212 register struct vmspace *vm;
213{
214
55768178
DG
215 /*
216 * Lock the map, to wait out all other references to it.
217 * Delete all of the mappings and pages they hold,
218 * then call the pmap module to reclaim anything left.
219 */
220 vm_map_lock(&vm->vm_map);
221 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
222 vm->vm_map.max_offset);
223 pmap_release(&vm->vm_pmap);
224 FREE(vm, M_VMMAP);
15637ed4
RG
225}
226
227/*
228 * vm_map_create:
229 *
230 * Creates and returns a new empty VM map with
231 * the given physical map structure, and having
232 * the given lower and upper address bounds.
233 */
55768178
DG
234vm_map_t
235vm_map_create(pmap, min, max, pageable)
15637ed4
RG
236 pmap_t pmap;
237 vm_offset_t min, max;
238 boolean_t pageable;
239{
240 register vm_map_t result;
15637ed4
RG
241
242 if (kmem_map == NULL) {
243 result = kmap_free;
244 kmap_free = (vm_map_t) result->header.next;
245 if (result == NULL)
246 panic("vm_map_create: out of maps");
247 } else
248 MALLOC(result, vm_map_t, sizeof(struct vm_map),
249 M_VMMAP, M_WAITOK);
250
251 vm_map_init(result, min, max, pageable);
252 result->pmap = pmap;
253 return(result);
254}
255
256/*
257 * Initialize an existing vm_map structure
258 * such as that in the vmspace structure.
259 * The pmap is set elsewhere.
260 */
261void
262vm_map_init(map, min, max, pageable)
263 register struct vm_map *map;
264 vm_offset_t min, max;
265 boolean_t pageable;
266{
267 map->header.next = map->header.prev = &map->header;
268 map->nentries = 0;
269 map->size = 0;
270 map->ref_count = 1;
271 map->is_main_map = TRUE;
272 map->min_offset = min;
273 map->max_offset = max;
274 map->entries_pageable = pageable;
275 map->first_free = &map->header;
276 map->hint = &map->header;
277 map->timestamp = 0;
278 lock_init(&map->lock, TRUE);
279 simple_lock_init(&map->ref_lock);
280 simple_lock_init(&map->hint_lock);
281}
282
283/*
284 * vm_map_entry_create: [ internal use only ]
285 *
286 * Allocates a VM map entry for insertion.
287 * No entry fields are filled in. This routine is
288 */
55768178
DG
289static struct vm_map_entry *mappool;
290static int mappoolcnt;
291void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
292
293vm_map_entry_t
294vm_map_entry_create(map)
15637ed4
RG
295 vm_map_t map;
296{
297 vm_map_entry_t entry;
55768178
DG
298 int s;
299 int i;
300#define KENTRY_LOW_WATER 64
301#define MAPENTRY_LOW_WATER 64
15637ed4 302
55768178
DG
303 /*
304 * This is a *very* nasty (and sort of incomplete) hack!!!!
305 */
306 if (kentry_count < KENTRY_LOW_WATER) {
307 if (mapvmpgcnt && mapvm) {
308 vm_page_t m;
309 if (m = vm_page_alloc(kmem_object, mapvm-vm_map_min(kmem_map))) {
310 int newentries;
311 newentries = (NBPG/sizeof (struct vm_map_entry));
312 vm_page_wire(m);
313 m->flags &= ~PG_BUSY;
314 pmap_enter(vm_map_pmap(kmem_map), mapvm,
315 VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1);
316
317 entry = (vm_map_entry_t) mapvm;
318 mapvm += NBPG;
319 --mapvmpgcnt;
320
321 for (i = 0; i < newentries; i++) {
322 vm_map_entry_dispose(kernel_map, entry);
323 entry++;
324 }
325 }
326 }
327 }
328
329 if (map == kernel_map || map == kmem_map || map == pager_map) {
330
331 if (entry = kentry_free) {
332 kentry_free = entry->next;
333 --kentry_count;
334 return entry;
335 }
336
337 if (entry = mappool) {
338 mappool = entry->next;
339 --mappoolcnt;
340 return entry;
341 }
342
343 } else {
344 if (entry = mappool) {
345 mappool = entry->next;
346 --mappoolcnt;
347 return entry;
348 }
349
15637ed4
RG
350 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
351 M_VMMAPENT, M_WAITOK);
55768178
DG
352 }
353dopanic:
15637ed4
RG
354 if (entry == NULL)
355 panic("vm_map_entry_create: out of map entries");
356
357 return(entry);
358}
359
360/*
361 * vm_map_entry_dispose: [ internal use only ]
362 *
363 * Inverse of vm_map_entry_create.
364 */
55768178
DG
365void
366vm_map_entry_dispose(map, entry)
15637ed4
RG
367 vm_map_t map;
368 vm_map_entry_t entry;
369{
55768178
DG
370 extern vm_map_t kernel_map, kmem_map, pager_map;
371 int s;
15637ed4 372
55768178
DG
373 if (map == kernel_map || map == kmem_map || map == pager_map ||
374 kentry_count < KENTRY_LOW_WATER) {
15637ed4
RG
375 entry->next = kentry_free;
376 kentry_free = entry;
55768178
DG
377 ++kentry_count;
378 } else {
379 if (mappoolcnt < MAPENTRY_LOW_WATER) {
380 entry->next = mappool;
381 mappool = entry;
382 ++mappoolcnt;
383 return;
384 }
385
15637ed4 386 FREE(entry, M_VMMAPENT);
55768178 387 }
15637ed4
RG
388}
389
390/*
391 * vm_map_entry_{un,}link:
392 *
393 * Insert/remove entries from maps.
394 */
395#define vm_map_entry_link(map, after_where, entry) \
396 { \
397 (map)->nentries++; \
398 (entry)->prev = (after_where); \
399 (entry)->next = (after_where)->next; \
400 (entry)->prev->next = (entry); \
401 (entry)->next->prev = (entry); \
402 }
403#define vm_map_entry_unlink(map, entry) \
404 { \
405 (map)->nentries--; \
406 (entry)->next->prev = (entry)->prev; \
407 (entry)->prev->next = (entry)->next; \
408 }
409
410/*
411 * vm_map_reference:
412 *
413 * Creates another valid reference to the given map.
414 *
415 */
55768178
DG
416inline void
417vm_map_reference(map)
15637ed4
RG
418 register vm_map_t map;
419{
420 if (map == NULL)
421 return;
422
423 simple_lock(&map->ref_lock);
424 map->ref_count++;
425 simple_unlock(&map->ref_lock);
426}
427
428/*
429 * vm_map_deallocate:
430 *
431 * Removes a reference from the specified map,
432 * destroying it if no references remain.
433 * The map should not be locked.
434 */
55768178
DG
435void
436vm_map_deallocate(map)
15637ed4
RG
437 register vm_map_t map;
438{
439 register int c;
440
441 if (map == NULL)
442 return;
443
444 simple_lock(&map->ref_lock);
445 c = --map->ref_count;
446 simple_unlock(&map->ref_lock);
447
448 if (c > 0) {
449 return;
450 }
451
452 /*
453 * Lock the map, to wait out all other references
454 * to it.
455 */
456
457 vm_map_lock(map);
458
459 (void) vm_map_delete(map, map->min_offset, map->max_offset);
460
461 pmap_destroy(map->pmap);
462
463 FREE(map, M_VMMAP);
464}
465
466/*
467 * vm_map_insert: [ internal use only ]
468 *
469 * Inserts the given whole VM object into the target
470 * map at the specified address range. The object's
471 * size should match that of the address range.
472 *
473 * Requires that the map be locked, and leaves it so.
474 */
4c45483e 475int
15637ed4
RG
476vm_map_insert(map, object, offset, start, end)
477 vm_map_t map;
478 vm_object_t object;
479 vm_offset_t offset;
480 vm_offset_t start;
481 vm_offset_t end;
482{
483 register vm_map_entry_t new_entry;
484 register vm_map_entry_t prev_entry;
485 vm_map_entry_t temp_entry;
486
487 /*
488 * Check that the start and end points are not bogus.
489 */
490
491 if ((start < map->min_offset) || (end > map->max_offset) ||
492 (start >= end))
493 return(KERN_INVALID_ADDRESS);
494
495 /*
496 * Find the entry prior to the proposed
497 * starting address; if it's part of an
498 * existing entry, this range is bogus.
499 */
500
55768178 501 if (vm_map_lookup_entry(map, start, &temp_entry)) {
15637ed4 502 return(KERN_NO_SPACE);
55768178 503 }
15637ed4
RG
504
505 prev_entry = temp_entry;
506
507 /*
508 * Assert that the next entry doesn't overlap the
509 * end point.
510 */
511
512 if ((prev_entry->next != &map->header) &&
55768178 513 (prev_entry->next->start < end)) {
15637ed4 514 return(KERN_NO_SPACE);
55768178 515 }
15637ed4
RG
516
517 /*
518 * See if we can avoid creating a new entry by
519 * extending one of our neighbors.
520 */
521
522 if (object == NULL) {
523 if ((prev_entry != &map->header) &&
524 (prev_entry->end == start) &&
525 (map->is_main_map) &&
526 (prev_entry->is_a_map == FALSE) &&
527 (prev_entry->is_sub_map == FALSE) &&
528 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
529 (prev_entry->protection == VM_PROT_DEFAULT) &&
530 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
531 (prev_entry->wired_count == 0)) {
532
533 if (vm_object_coalesce(prev_entry->object.vm_object,
534 NULL,
535 prev_entry->offset,
536 (vm_offset_t) 0,
537 (vm_size_t)(prev_entry->end
538 - prev_entry->start),
539 (vm_size_t)(end - prev_entry->end))) {
540 /*
541 * Coalesced the two objects - can extend
542 * the previous map entry to include the
543 * new range.
544 */
545 map->size += (end - prev_entry->end);
546 prev_entry->end = end;
547 return(KERN_SUCCESS);
548 }
549 }
550 }
551
552 /*
553 * Create a new entry
554 */
555
556 new_entry = vm_map_entry_create(map);
557 new_entry->start = start;
558 new_entry->end = end;
559
560 new_entry->is_a_map = FALSE;
561 new_entry->is_sub_map = FALSE;
562 new_entry->object.vm_object = object;
563 new_entry->offset = offset;
564
565 new_entry->copy_on_write = FALSE;
566 new_entry->needs_copy = FALSE;
567
568 if (map->is_main_map) {
569 new_entry->inheritance = VM_INHERIT_DEFAULT;
570 new_entry->protection = VM_PROT_DEFAULT;
571 new_entry->max_protection = VM_PROT_DEFAULT;
572 new_entry->wired_count = 0;
573 }
574
575 /*
576 * Insert the new entry into the list
577 */
578
579 vm_map_entry_link(map, prev_entry, new_entry);
580 map->size += new_entry->end - new_entry->start;
581
582 /*
583 * Update the free space hint
584 */
585
586 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
587 map->first_free = new_entry;
588
589 return(KERN_SUCCESS);
590}
591
592/*
593 * SAVE_HINT:
594 *
595 * Saves the specified entry as the hint for
596 * future lookups. Performs necessary interlocks.
597 */
598#define SAVE_HINT(map,value) \
599 simple_lock(&(map)->hint_lock); \
600 (map)->hint = (value); \
601 simple_unlock(&(map)->hint_lock);
602
603/*
604 * vm_map_lookup_entry: [ internal use only ]
605 *
606 * Finds the map entry containing (or
607 * immediately preceding) the specified address
608 * in the given map; the entry is returned
609 * in the "entry" parameter. The boolean
610 * result indicates whether the address is
611 * actually contained in the map.
612 */
55768178
DG
613boolean_t
614vm_map_lookup_entry(map, address, entry)
15637ed4
RG
615 register vm_map_t map;
616 register vm_offset_t address;
617 vm_map_entry_t *entry; /* OUT */
618{
619 register vm_map_entry_t cur;
620 register vm_map_entry_t last;
621
622 /*
623 * Start looking either from the head of the
624 * list, or from the hint.
625 */
626
627 simple_lock(&map->hint_lock);
628 cur = map->hint;
629 simple_unlock(&map->hint_lock);
630
631 if (cur == &map->header)
632 cur = cur->next;
633
634 if (address >= cur->start) {
635 /*
636 * Go from hint to end of list.
637 *
638 * But first, make a quick check to see if
639 * we are already looking at the entry we
640 * want (which is usually the case).
641 * Note also that we don't need to save the hint
642 * here... it is the same hint (unless we are
643 * at the header, in which case the hint didn't
644 * buy us anything anyway).
645 */
646 last = &map->header;
647 if ((cur != last) && (cur->end > address)) {
648 *entry = cur;
649 return(TRUE);
650 }
651 }
652 else {
653 /*
654 * Go from start to hint, *inclusively*
655 */
656 last = cur->next;
657 cur = map->header.next;
658 }
659
660 /*
661 * Search linearly
662 */
663
664 while (cur != last) {
665 if (cur->end > address) {
666 if (address >= cur->start) {
667 /*
668 * Save this lookup for future
669 * hints, and return
670 */
671
672 *entry = cur;
673 SAVE_HINT(map, cur);
674 return(TRUE);
675 }
676 break;
677 }
678 cur = cur->next;
679 }
680 *entry = cur->prev;
681 SAVE_HINT(map, *entry);
682 return(FALSE);
683}
684
685/*
686 * vm_map_find finds an unallocated region in the target address
687 * map with the given length. The search is defined to be
688 * first-fit from the specified address; the region found is
689 * returned in the same parameter.
690 *
691 */
4c45483e 692int
15637ed4
RG
693vm_map_find(map, object, offset, addr, length, find_space)
694 vm_map_t map;
695 vm_object_t object;
696 vm_offset_t offset;
697 vm_offset_t *addr; /* IN/OUT */
698 vm_size_t length;
699 boolean_t find_space;
700{
701 register vm_map_entry_t entry;
702 register vm_offset_t start;
703 register vm_offset_t end;
704 int result;
705
15637ed4
RG
706 vm_map_lock(map);
707
55768178
DG
708 start = *addr;
709
15637ed4
RG
710 if (find_space) {
711 /*
712 * Calculate the first possible address.
713 */
714
715 if (start < map->min_offset)
716 start = map->min_offset;
717 if (start > map->max_offset) {
718 vm_map_unlock(map);
719 return (KERN_NO_SPACE);
720 }
721
722 /*
723 * Look for the first possible address;
724 * if there's already something at this
725 * address, we have to start after it.
726 */
727
728 if (start == map->min_offset) {
729 if ((entry = map->first_free) != &map->header)
730 start = entry->end;
731 } else {
732 vm_map_entry_t tmp_entry;
733 if (vm_map_lookup_entry(map, start, &tmp_entry))
734 start = tmp_entry->end;
735 entry = tmp_entry;
736 }
737
738 /*
739 * In any case, the "entry" always precedes
740 * the proposed new region throughout the
741 * loop:
742 */
743
744 while (TRUE) {
745 register vm_map_entry_t next;
746
747 /*
748 * Find the end of the proposed new region.
749 * Be sure we didn't go beyond the end, or
750 * wrap around the address.
751 */
752
753 end = start + length;
754
755 if ((end > map->max_offset) || (end < start)) {
756 vm_map_unlock(map);
757 return (KERN_NO_SPACE);
758 }
759
760 /*
761 * If there are no more entries, we must win.
762 */
763
764 next = entry->next;
765 if (next == &map->header)
766 break;
767
768 /*
769 * If there is another entry, it must be
770 * after the end of the potential new region.
771 */
772
773 if (next->start >= end)
774 break;
775
776 /*
777 * Didn't fit -- move to the next entry.
778 */
779
780 entry = next;
781 start = entry->end;
782 }
783 *addr = start;
784
785 SAVE_HINT(map, entry);
786 }
787
788 result = vm_map_insert(map, object, offset, start, start + length);
789
790 vm_map_unlock(map);
791 return(result);
792}
793
794/*
795 * vm_map_simplify_entry: [ internal use only ]
796 *
797 * Simplify the given map entry by:
798 * removing extra sharing maps
799 * [XXX maybe later] merging with a neighbor
800 */
55768178
DG
801void
802vm_map_simplify_entry(map, entry)
15637ed4
RG
803 vm_map_t map;
804 vm_map_entry_t entry;
805{
806#ifdef lint
807 map++;
808#endif lint
809
810 /*
811 * If this entry corresponds to a sharing map, then
812 * see if we can remove the level of indirection.
813 * If it's not a sharing map, then it points to
814 * a VM object, so see if we can merge with either
815 * of our neighbors.
816 */
817
818 if (entry->is_sub_map)
819 return;
820 if (entry->is_a_map) {
821#if 0
822 vm_map_t my_share_map;
823 int count;
824
825 my_share_map = entry->object.share_map;
826 simple_lock(&my_share_map->ref_lock);
827 count = my_share_map->ref_count;
828 simple_unlock(&my_share_map->ref_lock);
829
830 if (count == 1) {
831 /* Can move the region from
832 * entry->start to entry->end (+ entry->offset)
833 * in my_share_map into place of entry.
834 * Later.
835 */
836 }
837#endif 0
838 }
839 else {
840 /*
841 * Try to merge with our neighbors.
842 *
843 * Conditions for merge are:
844 *
845 * 1. entries are adjacent.
846 * 2. both entries point to objects
847 * with null pagers.
848 *
849 * If a merge is possible, we replace the two
850 * entries with a single entry, then merge
851 * the two objects into a single object.
852 *
853 * Now, all that is left to do is write the
854 * code!
855 */
856 }
857}
858
859/*
860 * vm_map_clip_start: [ internal use only ]
861 *
862 * Asserts that the given entry begins at or after
863 * the specified address; if necessary,
864 * it splits the entry into two.
865 */
866#define vm_map_clip_start(map, entry, startaddr) \
867{ \
868 if (startaddr > entry->start) \
869 _vm_map_clip_start(map, entry, startaddr); \
870}
871
872/*
873 * This routine is called only when it is known that
874 * the entry must be split.
875 */
55768178
DG
876void
877_vm_map_clip_start(map, entry, start)
15637ed4
RG
878 register vm_map_t map;
879 register vm_map_entry_t entry;
880 register vm_offset_t start;
881{
882 register vm_map_entry_t new_entry;
883
884 /*
885 * See if we can simplify this entry first
886 */
887
55768178 888 /* vm_map_simplify_entry(map, entry); */
15637ed4
RG
889
890 /*
891 * Split off the front portion --
892 * note that we must insert the new
893 * entry BEFORE this one, so that
894 * this entry has the specified starting
895 * address.
896 */
897
898 new_entry = vm_map_entry_create(map);
899 *new_entry = *entry;
900
901 new_entry->end = start;
902 entry->offset += (start - entry->start);
903 entry->start = start;
904
905 vm_map_entry_link(map, entry->prev, new_entry);
906
907 if (entry->is_a_map || entry->is_sub_map)
908 vm_map_reference(new_entry->object.share_map);
909 else
910 vm_object_reference(new_entry->object.vm_object);
911}
912
913/*
914 * vm_map_clip_end: [ internal use only ]
915 *
916 * Asserts that the given entry ends at or before
917 * the specified address; if necessary,
918 * it splits the entry into two.
919 */
920
55768178 921void _vm_map_clip_end();
15637ed4
RG
922#define vm_map_clip_end(map, entry, endaddr) \
923{ \
924 if (endaddr < entry->end) \
925 _vm_map_clip_end(map, entry, endaddr); \
926}
927
928/*
929 * This routine is called only when it is known that
930 * the entry must be split.
931 */
55768178
DG
932inline void
933_vm_map_clip_end(map, entry, end)
15637ed4
RG
934 register vm_map_t map;
935 register vm_map_entry_t entry;
936 register vm_offset_t end;
937{
938 register vm_map_entry_t new_entry;
939
940 /*
941 * Create a new entry and insert it
942 * AFTER the specified entry
943 */
944
945 new_entry = vm_map_entry_create(map);
946 *new_entry = *entry;
947
948 new_entry->start = entry->end = end;
949 new_entry->offset += (end - entry->start);
950
951 vm_map_entry_link(map, entry, new_entry);
952
953 if (entry->is_a_map || entry->is_sub_map)
954 vm_map_reference(new_entry->object.share_map);
955 else
956 vm_object_reference(new_entry->object.vm_object);
957}
958
959/*
960 * VM_MAP_RANGE_CHECK: [ internal use only ]
961 *
962 * Asserts that the starting and ending region
963 * addresses fall within the valid range of the map.
964 */
965#define VM_MAP_RANGE_CHECK(map, start, end) \
966 { \
967 if (start < vm_map_min(map)) \
968 start = vm_map_min(map); \
969 if (end > vm_map_max(map)) \
970 end = vm_map_max(map); \
971 if (start > end) \
972 start = end; \
973 }
974
975/*
976 * vm_map_submap: [ kernel use only ]
977 *
978 * Mark the given range as handled by a subordinate map.
979 *
980 * This range must have been created with vm_map_find,
981 * and no other operations may have been performed on this
982 * range prior to calling vm_map_submap.
983 *
984 * Only a limited number of operations can be performed
985 * within this rage after calling vm_map_submap:
986 * vm_fault
987 * [Don't try vm_map_copy!]
988 *
989 * To remove a submapping, one must first remove the
990 * range from the superior map, and then destroy the
991 * submap (if desired). [Better yet, don't try it.]
992 */
4c45483e 993int
15637ed4
RG
994vm_map_submap(map, start, end, submap)
995 register vm_map_t map;
996 register vm_offset_t start;
997 register vm_offset_t end;
998 vm_map_t submap;
999{
1000 vm_map_entry_t entry;
1001 register int result = KERN_INVALID_ARGUMENT;
1002
1003 vm_map_lock(map);
1004
1005 VM_MAP_RANGE_CHECK(map, start, end);
1006
1007 if (vm_map_lookup_entry(map, start, &entry)) {
1008 vm_map_clip_start(map, entry, start);
1009 }
1010 else
1011 entry = entry->next;
1012
1013 vm_map_clip_end(map, entry, end);
1014
1015 if ((entry->start == start) && (entry->end == end) &&
1016 (!entry->is_a_map) &&
1017 (entry->object.vm_object == NULL) &&
1018 (!entry->copy_on_write)) {
1019 entry->is_a_map = FALSE;
1020 entry->is_sub_map = TRUE;
1021 vm_map_reference(entry->object.sub_map = submap);
1022 result = KERN_SUCCESS;
1023 }
1024 vm_map_unlock(map);
1025
1026 return(result);
1027}
1028
1029/*
1030 * vm_map_protect:
1031 *
1032 * Sets the protection of the specified address
1033 * region in the target map. If "set_max" is
1034 * specified, the maximum protection is to be set;
1035 * otherwise, only the current protection is affected.
1036 */
4c45483e 1037int
15637ed4
RG
1038vm_map_protect(map, start, end, new_prot, set_max)
1039 register vm_map_t map;
1040 register vm_offset_t start;
1041 register vm_offset_t end;
1042 register vm_prot_t new_prot;
1043 register boolean_t set_max;
1044{
1045 register vm_map_entry_t current;
1046 vm_map_entry_t entry;
1047
1048 vm_map_lock(map);
1049
1050 VM_MAP_RANGE_CHECK(map, start, end);
1051
1052 if (vm_map_lookup_entry(map, start, &entry)) {
1053 vm_map_clip_start(map, entry, start);
1054 }
1055 else
1056 entry = entry->next;
1057
1058 /*
1059 * Make a first pass to check for protection
1060 * violations.
1061 */
1062
1063 current = entry;
1064 while ((current != &map->header) && (current->start < end)) {
1065 if (current->is_sub_map)
1066 return(KERN_INVALID_ARGUMENT);
1067 if ((new_prot & current->max_protection) != new_prot) {
1068 vm_map_unlock(map);
1069 return(KERN_PROTECTION_FAILURE);
1070 }
1071
1072 current = current->next;
1073 }
1074
1075 /*
1076 * Go back and fix up protections.
1077 * [Note that clipping is not necessary the second time.]
1078 */
1079
1080 current = entry;
1081
1082 while ((current != &map->header) && (current->start < end)) {
1083 vm_prot_t old_prot;
1084
1085 vm_map_clip_end(map, current, end);
1086
1087 old_prot = current->protection;
1088 if (set_max)
1089 current->protection =
1090 (current->max_protection = new_prot) &
1091 old_prot;
1092 else
1093 current->protection = new_prot;
1094
1095 /*
1096 * Update physical map if necessary.
1097 * Worry about copy-on-write here -- CHECK THIS XXX
1098 */
1099
1100 if (current->protection != old_prot) {
1101
1102#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1103 VM_PROT_ALL)
1104#define max(a,b) ((a) > (b) ? (a) : (b))
1105
1106 if (current->is_a_map) {
1107 vm_map_entry_t share_entry;
1108 vm_offset_t share_end;
1109
1110 vm_map_lock(current->object.share_map);
1111 (void) vm_map_lookup_entry(
1112 current->object.share_map,
1113 current->offset,
1114 &share_entry);
1115 share_end = current->offset +
1116 (current->end - current->start);
1117 while ((share_entry !=
1118 &current->object.share_map->header) &&
1119 (share_entry->start < share_end)) {
1120
1121 pmap_protect(map->pmap,
1122 (max(share_entry->start,
1123 current->offset) -
1124 current->offset +
1125 current->start),
1126 min(share_entry->end,
1127 share_end) -
1128 current->offset +
1129 current->start,
1130 current->protection &
1131 MASK(share_entry));
1132
1133 share_entry = share_entry->next;
1134 }
1135 vm_map_unlock(current->object.share_map);
1136 }
1137 else
1138 pmap_protect(map->pmap, current->start,
1139 current->end,
1140 current->protection & MASK(entry));
1141#undef max
1142#undef MASK
1143 }
1144 current = current->next;
1145 }
1146
1147 vm_map_unlock(map);
1148 return(KERN_SUCCESS);
1149}
1150
1151/*
1152 * vm_map_inherit:
1153 *
1154 * Sets the inheritance of the specified address
1155 * range in the target map. Inheritance
1156 * affects how the map will be shared with
1157 * child maps at the time of vm_map_fork.
1158 */
4c45483e 1159int
15637ed4
RG
1160vm_map_inherit(map, start, end, new_inheritance)
1161 register vm_map_t map;
1162 register vm_offset_t start;
1163 register vm_offset_t end;
1164 register vm_inherit_t new_inheritance;
1165{
1166 register vm_map_entry_t entry;
1167 vm_map_entry_t temp_entry;
1168
1169 switch (new_inheritance) {
1170 case VM_INHERIT_NONE:
1171 case VM_INHERIT_COPY:
1172 case VM_INHERIT_SHARE:
1173 break;
1174 default:
1175 return(KERN_INVALID_ARGUMENT);
1176 }
1177
1178 vm_map_lock(map);
1179
1180 VM_MAP_RANGE_CHECK(map, start, end);
1181
1182 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1183 entry = temp_entry;
1184 vm_map_clip_start(map, entry, start);
1185 }
1186 else
1187 entry = temp_entry->next;
1188
1189 while ((entry != &map->header) && (entry->start < end)) {
1190 vm_map_clip_end(map, entry, end);
1191
1192 entry->inheritance = new_inheritance;
1193
1194 entry = entry->next;
1195 }
1196
1197 vm_map_unlock(map);
1198 return(KERN_SUCCESS);
1199}
1200
1201/*
1202 * vm_map_pageable:
1203 *
1204 * Sets the pageability of the specified address
1205 * range in the target map. Regions specified
1206 * as not pageable require locked-down physical
1207 * memory and physical page maps.
1208 *
1209 * The map must not be locked, but a reference
1210 * must remain to the map throughout the call.
1211 */
4c45483e 1212int
15637ed4
RG
1213vm_map_pageable(map, start, end, new_pageable)
1214 register vm_map_t map;
1215 register vm_offset_t start;
1216 register vm_offset_t end;
1217 register boolean_t new_pageable;
1218{
1219 register vm_map_entry_t entry;
1220 vm_map_entry_t temp_entry;
55768178 1221 extern vm_map_t kernel_map;
15637ed4
RG
1222
1223 vm_map_lock(map);
1224
1225 VM_MAP_RANGE_CHECK(map, start, end);
1226
1227 /*
1228 * Only one pageability change may take place at one
1229 * time, since vm_fault assumes it will be called
1230 * only once for each wiring/unwiring. Therefore, we
1231 * have to make sure we're actually changing the pageability
1232 * for the entire region. We do so before making any changes.
1233 */
1234
1235 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1236 entry = temp_entry;
1237 vm_map_clip_start(map, entry, start);
1238 }
1239 else
1240 entry = temp_entry->next;
1241 temp_entry = entry;
1242
1243 /*
1244 * Actions are rather different for wiring and unwiring,
1245 * so we have two separate cases.
1246 */
1247
1248 if (new_pageable) {
1249
1250 /*
1251 * Unwiring. First ensure that the range to be
1252 * unwired is really wired down.
1253 */
1254 while ((entry != &map->header) && (entry->start < end)) {
1255
1256 if (entry->wired_count == 0) {
1257 vm_map_unlock(map);
1258 return(KERN_INVALID_ARGUMENT);
1259 }
1260 entry = entry->next;
1261 }
1262
1263 /*
1264 * Now decrement the wiring count for each region.
1265 * If a region becomes completely unwired,
1266 * unwire its physical pages and mappings.
1267 */
1268 lock_set_recursive(&map->lock);
1269
1270 entry = temp_entry;
1271 while ((entry != &map->header) && (entry->start < end)) {
1272 vm_map_clip_end(map, entry, end);
1273
1274 entry->wired_count--;
1275 if (entry->wired_count == 0)
1276 vm_fault_unwire(map, entry->start, entry->end);
1277
1278 entry = entry->next;
1279 }
1280 lock_clear_recursive(&map->lock);
1281 }
1282
1283 else {
1284 /*
1285 * Wiring. We must do this in two passes:
1286 *
1287 * 1. Holding the write lock, we increment the
1288 * wiring count. For any area that is not already
1289 * wired, we create any shadow objects that need
1290 * to be created.
1291 *
1292 * 2. We downgrade to a read lock, and call
1293 * vm_fault_wire to fault in the pages for any
1294 * newly wired area (wired_count is 1).
1295 *
1296 * Downgrading to a read lock for vm_fault_wire avoids
1297 * a possible deadlock with another thread that may have
1298 * faulted on one of the pages to be wired (it would mark
1299 * the page busy, blocking us, then in turn block on the
1300 * map lock that we hold). Because of problems in the
1301 * recursive lock package, we cannot upgrade to a write
1302 * lock in vm_map_lookup. Thus, any actions that require
1303 * the write lock must be done beforehand. Because we
1304 * keep the read lock on the map, the copy-on-write status
1305 * of the entries we modify here cannot change.
1306 */
1307
1308 /*
1309 * Pass 1.
1310 */
1311 entry = temp_entry;
1312 while ((entry != &map->header) && (entry->start < end)) {
1313 vm_map_clip_end(map, entry, end);
1314
1315 entry->wired_count++;
1316 if (entry->wired_count == 1) {
1317
1318 /*
1319 * Perform actions of vm_map_lookup that need
1320 * the write lock on the map: create a shadow
1321 * object for a copy-on-write region, or an
1322 * object for a zero-fill region.
1323 *
1324 * We don't have to do this for entries that
1325 * point to sharing maps, because we won't hold
1326 * the lock on the sharing map.
1327 */
1328 if (!entry->is_a_map) {
1329 if (entry->needs_copy &&
1330 ((entry->protection & VM_PROT_WRITE) != 0)) {
1331
1332 vm_object_shadow(&entry->object.vm_object,
1333 &entry->offset,
1334 (vm_size_t)(entry->end
1335 - entry->start));
1336 entry->needs_copy = FALSE;
1337 }
1338 else if (entry->object.vm_object == NULL) {
1339 entry->object.vm_object =
1340 vm_object_allocate((vm_size_t)(entry->end
1341 - entry->start));
1342 entry->offset = (vm_offset_t)0;
1343 }
1344 }
1345 }
1346
1347 entry = entry->next;
1348 }
1349
1350 /*
1351 * Pass 2.
1352 */
1353
1354 /*
1355 * HACK HACK HACK HACK
1356 *
1357 * If we are wiring in the kernel map or a submap of it,
1358 * unlock the map to avoid deadlocks. We trust that the
1359 * kernel threads are well-behaved, and therefore will
1360 * not do anything destructive to this region of the map
1361 * while we have it unlocked. We cannot trust user threads
1362 * to do the same.
1363 *
1364 * HACK HACK HACK HACK
1365 */
1366 if (vm_map_pmap(map) == kernel_pmap) {
1367 vm_map_unlock(map); /* trust me ... */
1368 }
1369 else {
1370 lock_set_recursive(&map->lock);
1371 lock_write_to_read(&map->lock);
1372 }
1373
1374 entry = temp_entry;
1375 while (entry != &map->header && entry->start < end) {
b56d9a08
DG
1376 if (entry->wired_count == 1) {
1377 vm_fault_wire(map, entry->start, entry->end);
15637ed4
RG
1378 }
1379 entry = entry->next;
1380 }
1381
1382 if (vm_map_pmap(map) == kernel_pmap) {
1383 vm_map_lock(map);
1384 }
1385 else {
1386 lock_clear_recursive(&map->lock);
1387 }
1388 }
1389
1390 vm_map_unlock(map);
1391
1392 return(KERN_SUCCESS);
1393}
1394
1395/*
1396 * vm_map_entry_unwire: [ internal use only ]
1397 *
1398 * Make the region specified by this entry pageable.
1399 *
1400 * The map in question should be locked.
1401 * [This is the reason for this routine's existence.]
1402 */
55768178
DG
1403void
1404vm_map_entry_unwire(map, entry)
15637ed4
RG
1405 vm_map_t map;
1406 register vm_map_entry_t entry;
1407{
1408 vm_fault_unwire(map, entry->start, entry->end);
1409 entry->wired_count = 0;
1410}
1411
1412/*
1413 * vm_map_entry_delete: [ internal use only ]
1414 *
1415 * Deallocate the given entry from the target map.
1416 */
55768178
DG
1417void
1418vm_map_entry_delete(map, entry)
15637ed4
RG
1419 register vm_map_t map;
1420 register vm_map_entry_t entry;
1421{
55768178 1422 int prev_ref_count;
15637ed4
RG
1423 if (entry->wired_count != 0)
1424 vm_map_entry_unwire(map, entry);
1425
1426 vm_map_entry_unlink(map, entry);
1427 map->size -= entry->end - entry->start;
1428
55768178 1429 if (entry->is_a_map || entry->is_sub_map) {
15637ed4 1430 vm_map_deallocate(entry->object.share_map);
55768178 1431 } else {
15637ed4 1432 vm_object_deallocate(entry->object.vm_object);
55768178 1433 }
15637ed4
RG
1434
1435 vm_map_entry_dispose(map, entry);
1436}
1437
1438/*
1439 * vm_map_delete: [ internal use only ]
1440 *
1441 * Deallocates the given address range from the target
1442 * map.
1443 *
1444 * When called with a sharing map, removes pages from
1445 * that region from all physical maps.
1446 */
4c45483e 1447int
15637ed4
RG
1448vm_map_delete(map, start, end)
1449 register vm_map_t map;
1450 vm_offset_t start;
1451 register vm_offset_t end;
1452{
1453 register vm_map_entry_t entry;
1454 vm_map_entry_t first_entry;
1455
55768178 1456
15637ed4
RG
1457 /*
1458 * Find the start of the region, and clip it
1459 */
1460
1461 if (!vm_map_lookup_entry(map, start, &first_entry))
1462 entry = first_entry->next;
1463 else {
1464 entry = first_entry;
1465 vm_map_clip_start(map, entry, start);
1466
1467 /*
1468 * Fix the lookup hint now, rather than each
1469 * time though the loop.
1470 */
1471
1472 SAVE_HINT(map, entry->prev);
1473 }
1474
1475 /*
1476 * Save the free space hint
1477 */
1478
1479 if (map->first_free->start >= start)
1480 map->first_free = entry->prev;
1481
1482 /*
1483 * Step through all entries in this region
1484 */
1485
1486 while ((entry != &map->header) && (entry->start < end)) {
1487 vm_map_entry_t next;
1488 register vm_offset_t s, e;
1489 register vm_object_t object;
1490
1491 vm_map_clip_end(map, entry, end);
1492
1493 next = entry->next;
1494 s = entry->start;
1495 e = entry->end;
1496
1497 /*
1498 * Unwire before removing addresses from the pmap;
1499 * otherwise, unwiring will put the entries back in
1500 * the pmap.
1501 */
1502
1503 object = entry->object.vm_object;
1504 if (entry->wired_count != 0)
1505 vm_map_entry_unwire(map, entry);
1506
1507 /*
1508 * If this is a sharing map, we must remove
1509 * *all* references to this data, since we can't
1510 * find all of the physical maps which are sharing
1511 * it.
1512 */
1513
55768178 1514 if (object == kernel_object || object == kmem_object) {
15637ed4
RG
1515 vm_object_page_remove(object, entry->offset,
1516 entry->offset + (e - s));
55768178 1517 } else if (!map->is_main_map) {
15637ed4
RG
1518 vm_object_pmap_remove(object,
1519 entry->offset,
1520 entry->offset + (e - s));
55768178
DG
1521 } else {
1522 /*
1523 * save the pmap info
1524 */
15637ed4 1525 pmap_remove(map->pmap, s, e);
55768178 1526 }
15637ed4
RG
1527
1528 /*
1529 * Delete the entry (which may delete the object)
1530 * only after removing all pmap entries pointing
1531 * to its pages. (Otherwise, its page frames may
1532 * be reallocated, and any modify bits will be
1533 * set in the wrong object!)
1534 */
1535
1536 vm_map_entry_delete(map, entry);
1537 entry = next;
1538 }
1539 return(KERN_SUCCESS);
1540}
1541
1542/*
1543 * vm_map_remove:
1544 *
1545 * Remove the given address range from the target map.
1546 * This is the exported form of vm_map_delete.
1547 */
4c45483e 1548int
15637ed4
RG
1549vm_map_remove(map, start, end)
1550 register vm_map_t map;
1551 register vm_offset_t start;
1552 register vm_offset_t end;
1553{
1554 register int result;
1555
1556 vm_map_lock(map);
1557 VM_MAP_RANGE_CHECK(map, start, end);
1558 result = vm_map_delete(map, start, end);
1559 vm_map_unlock(map);
1560
1561 return(result);
1562}
1563
1564/*
1565 * vm_map_check_protection:
1566 *
1567 * Assert that the target map allows the specified
1568 * privilege on the entire address region given.
1569 * The entire region must be allocated.
1570 */
55768178
DG
1571boolean_t
1572vm_map_check_protection(map, start, end, protection)
15637ed4
RG
1573 register vm_map_t map;
1574 register vm_offset_t start;
1575 register vm_offset_t end;
1576 register vm_prot_t protection;
1577{
1578 register vm_map_entry_t entry;
1579 vm_map_entry_t tmp_entry;
1580
1581 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1582 return(FALSE);
1583 }
1584
1585 entry = tmp_entry;
1586
1587 while (start < end) {
1588 if (entry == &map->header) {
1589 return(FALSE);
1590 }
1591
1592 /*
1593 * No holes allowed!
1594 */
1595
1596 if (start < entry->start) {
1597 return(FALSE);
1598 }
1599
1600 /*
1601 * Check protection associated with entry.
1602 */
1603
1604 if ((entry->protection & protection) != protection) {
1605 return(FALSE);
1606 }
1607
1608 /* go to next entry */
1609
1610 start = entry->end;
1611 entry = entry->next;
1612 }
1613 return(TRUE);
1614}
1615
1616/*
1617 * vm_map_copy_entry:
1618 *
1619 * Copies the contents of the source entry to the destination
1620 * entry. The entries *must* be aligned properly.
1621 */
55768178
DG
1622void
1623vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
15637ed4
RG
1624 vm_map_t src_map, dst_map;
1625 register vm_map_entry_t src_entry, dst_entry;
1626{
1627 vm_object_t temp_object;
1628
1629 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1630 return;
1631
1632 if (dst_entry->object.vm_object != NULL &&
1633 !dst_entry->object.vm_object->internal)
1634 printf("vm_map_copy_entry: copying over permanent data!\n");
1635
1636 /*
1637 * If our destination map was wired down,
1638 * unwire it now.
1639 */
1640
1641 if (dst_entry->wired_count != 0)
1642 vm_map_entry_unwire(dst_map, dst_entry);
1643
1644 /*
1645 * If we're dealing with a sharing map, we
1646 * must remove the destination pages from
1647 * all maps (since we cannot know which maps
1648 * this sharing map belongs in).
1649 */
1650
55768178 1651 if (dst_map->is_main_map) {
15637ed4 1652 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
55768178 1653 } else {
15637ed4
RG
1654 vm_object_pmap_remove(dst_entry->object.vm_object,
1655 dst_entry->offset,
1656 dst_entry->offset +
1657 (dst_entry->end - dst_entry->start));
55768178 1658 }
15637ed4
RG
1659
1660 if (src_entry->wired_count == 0) {
1661
1662 boolean_t src_needs_copy;
1663
1664 /*
1665 * If the source entry is marked needs_copy,
1666 * it is already write-protected.
1667 */
1668 if (!src_entry->needs_copy) {
1669
1670 boolean_t su;
1671
1672 /*
1673 * If the source entry has only one mapping,
1674 * we can just protect the virtual address
1675 * range.
1676 */
1677 if (!(su = src_map->is_main_map)) {
1678 simple_lock(&src_map->ref_lock);
1679 su = (src_map->ref_count == 1);
1680 simple_unlock(&src_map->ref_lock);
1681 }
1682
1683 if (su) {
1684 pmap_protect(src_map->pmap,
1685 src_entry->start,
1686 src_entry->end,
1687 src_entry->protection & ~VM_PROT_WRITE);
1688 }
1689 else {
1690 vm_object_pmap_copy(src_entry->object.vm_object,
1691 src_entry->offset,
1692 src_entry->offset + (src_entry->end
1693 -src_entry->start));
1694 }
1695 }
1696
1697 /*
1698 * Make a copy of the object.
1699 */
1700 temp_object = dst_entry->object.vm_object;
1701 vm_object_copy(src_entry->object.vm_object,
1702 src_entry->offset,
1703 (vm_size_t)(src_entry->end -
1704 src_entry->start),
1705 &dst_entry->object.vm_object,
1706 &dst_entry->offset,
1707 &src_needs_copy);
1708 /*
1709 * If we didn't get a copy-object now, mark the
1710 * source map entry so that a shadow will be created
1711 * to hold its changed pages.
1712 */
1713 if (src_needs_copy)
1714 src_entry->needs_copy = TRUE;
1715
1716 /*
1717 * The destination always needs to have a shadow
1718 * created.
1719 */
1720 dst_entry->needs_copy = TRUE;
1721
1722 /*
1723 * Mark the entries copy-on-write, so that write-enabling
1724 * the entry won't make copy-on-write pages writable.
1725 */
1726 src_entry->copy_on_write = TRUE;
1727 dst_entry->copy_on_write = TRUE;
1728 /*
1729 * Get rid of the old object.
1730 */
1731 vm_object_deallocate(temp_object);
1732
1733 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1734 dst_entry->end - dst_entry->start, src_entry->start);
1735 }
1736 else {
1737 /*
1738 * Of course, wired down pages can't be set copy-on-write.
1739 * Cause wired pages to be copied into the new
1740 * map by simulating faults (the new pages are
1741 * pageable)
1742 */
1743 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1744 }
1745}
1746
1747/*
1748 * vm_map_copy:
1749 *
1750 * Perform a virtual memory copy from the source
1751 * address map/range to the destination map/range.
1752 *
1753 * If src_destroy or dst_alloc is requested,
1754 * the source and destination regions should be
1755 * disjoint, not only in the top-level map, but
1756 * in the sharing maps as well. [The best way
1757 * to guarantee this is to use a new intermediate
1758 * map to make copies. This also reduces map
1759 * fragmentation.]
1760 */
4c45483e 1761int
55768178 1762vm_map_copy(dst_map, src_map, dst_addr, len, src_addr, dst_alloc, src_destroy)
15637ed4
RG
1763 vm_map_t dst_map;
1764 vm_map_t src_map;
1765 vm_offset_t dst_addr;
1766 vm_size_t len;
1767 vm_offset_t src_addr;
1768 boolean_t dst_alloc;
1769 boolean_t src_destroy;
1770{
1771 register
1772 vm_map_entry_t src_entry;
1773 register
1774 vm_map_entry_t dst_entry;
1775 vm_map_entry_t tmp_entry;
1776 vm_offset_t src_start;
1777 vm_offset_t src_end;
1778 vm_offset_t dst_start;
1779 vm_offset_t dst_end;
1780 vm_offset_t src_clip;
1781 vm_offset_t dst_clip;
1782 int result;
1783 boolean_t old_src_destroy;
1784
1785 /*
1786 * XXX While we figure out why src_destroy screws up,
1787 * we'll do it by explicitly vm_map_delete'ing at the end.
1788 */
15637ed4
RG
1789 old_src_destroy = src_destroy;
1790 src_destroy = FALSE;
1791
1792 /*
1793 * Compute start and end of region in both maps
1794 */
15637ed4
RG
1795 src_start = src_addr;
1796 src_end = src_start + len;
1797 dst_start = dst_addr;
1798 dst_end = dst_start + len;
1799
1800 /*
1801 * Check that the region can exist in both source
1802 * and destination.
1803 */
15637ed4
RG
1804 if ((dst_end < dst_start) || (src_end < src_start))
1805 return(KERN_NO_SPACE);
1806
1807 /*
1808 * Lock the maps in question -- we avoid deadlock
1809 * by ordering lock acquisition by map value
1810 */
15637ed4
RG
1811 if (src_map == dst_map) {
1812 vm_map_lock(src_map);
1813 }
1814 else if ((int) src_map < (int) dst_map) {
1815 vm_map_lock(src_map);
1816 vm_map_lock(dst_map);
1817 } else {
1818 vm_map_lock(dst_map);
1819 vm_map_lock(src_map);
1820 }
1821
1822 result = KERN_SUCCESS;
1823
1824 /*
1825 * Check protections... source must be completely readable and
1826 * destination must be completely writable. [Note that if we're
1827 * allocating the destination region, we don't have to worry
1828 * about protection, but instead about whether the region
1829 * exists.]
1830 */
15637ed4
RG
1831 if (src_map->is_main_map && dst_map->is_main_map) {
1832 if (!vm_map_check_protection(src_map, src_start, src_end,
1833 VM_PROT_READ)) {
1834 result = KERN_PROTECTION_FAILURE;
1835 goto Return;
1836 }
1837
1838 if (dst_alloc) {
1839 /* XXX Consider making this a vm_map_find instead */
1840 if ((result = vm_map_insert(dst_map, NULL,
1841 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1842 goto Return;
1843 }
1844 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1845 VM_PROT_WRITE)) {
1846 result = KERN_PROTECTION_FAILURE;
1847 goto Return;
1848 }
1849 }
1850
1851 /*
1852 * Find the start entries and clip.
1853 *
1854 * Note that checking protection asserts that the
1855 * lookup cannot fail.
1856 *
1857 * Also note that we wait to do the second lookup
1858 * until we have done the first clip, as the clip
1859 * may affect which entry we get!
1860 */
15637ed4
RG
1861 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1862 src_entry = tmp_entry;
1863 vm_map_clip_start(src_map, src_entry, src_start);
1864
1865 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1866 dst_entry = tmp_entry;
1867 vm_map_clip_start(dst_map, dst_entry, dst_start);
1868
1869 /*
1870 * If both source and destination entries are the same,
1871 * retry the first lookup, as it may have changed.
1872 */
15637ed4
RG
1873 if (src_entry == dst_entry) {
1874 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1875 src_entry = tmp_entry;
1876 }
1877
1878 /*
1879 * If source and destination entries are still the same,
1880 * a null copy is being performed.
1881 */
15637ed4
RG
1882 if (src_entry == dst_entry)
1883 goto Return;
1884
1885 /*
1886 * Go through entries until we get to the end of the
1887 * region.
1888 */
15637ed4
RG
1889 while (src_start < src_end) {
1890 /*
1891 * Clip the entries to the endpoint of the entire region.
1892 */
15637ed4
RG
1893 vm_map_clip_end(src_map, src_entry, src_end);
1894 vm_map_clip_end(dst_map, dst_entry, dst_end);
1895
1896 /*
1897 * Clip each entry to the endpoint of the other entry.
1898 */
15637ed4
RG
1899 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1900 vm_map_clip_end(src_map, src_entry, src_clip);
1901
1902 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1903 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1904
1905 /*
1906 * Both entries now match in size and relative endpoints.
1907 *
1908 * If both entries refer to a VM object, we can
1909 * deal with them now.
1910 */
15637ed4
RG
1911 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1912 vm_map_copy_entry(src_map, dst_map, src_entry,
1913 dst_entry);
1914 }
1915 else {
1916 register vm_map_t new_dst_map;
1917 vm_offset_t new_dst_start;
1918 vm_size_t new_size;
1919 vm_map_t new_src_map;
1920 vm_offset_t new_src_start;
1921
1922 /*
1923 * We have to follow at least one sharing map.
1924 */
15637ed4
RG
1925 new_size = (dst_entry->end - dst_entry->start);
1926
1927 if (src_entry->is_a_map) {
1928 new_src_map = src_entry->object.share_map;
1929 new_src_start = src_entry->offset;
1930 }
1931 else {
1932 new_src_map = src_map;
1933 new_src_start = src_entry->start;
1934 lock_set_recursive(&src_map->lock);
1935 }
1936
1937 if (dst_entry->is_a_map) {
1938 vm_offset_t new_dst_end;
1939
1940 new_dst_map = dst_entry->object.share_map;
1941 new_dst_start = dst_entry->offset;
1942
1943 /*
1944 * Since the destination sharing entries
1945 * will be merely deallocated, we can
1946 * do that now, and replace the region
1947 * with a null object. [This prevents
1948 * splitting the source map to match
1949 * the form of the destination map.]
1950 * Note that we can only do so if the
1951 * source and destination do not overlap.
1952 */
15637ed4
RG
1953 new_dst_end = new_dst_start + new_size;
1954
1955 if (new_dst_map != new_src_map) {
1956 vm_map_lock(new_dst_map);
1957 (void) vm_map_delete(new_dst_map,
1958 new_dst_start,
1959 new_dst_end);
1960 (void) vm_map_insert(new_dst_map,
1961 NULL,
1962 (vm_offset_t) 0,
1963 new_dst_start,
1964 new_dst_end);
1965 vm_map_unlock(new_dst_map);
1966 }
1967 }
1968 else {
1969 new_dst_map = dst_map;
1970 new_dst_start = dst_entry->start;
1971 lock_set_recursive(&dst_map->lock);
1972 }
1973
1974 /*
1975 * Recursively copy the sharing map.
1976 */
15637ed4
RG
1977 (void) vm_map_copy(new_dst_map, new_src_map,
1978 new_dst_start, new_size, new_src_start,
1979 FALSE, FALSE);
1980
1981 if (dst_map == new_dst_map)
1982 lock_clear_recursive(&dst_map->lock);
1983 if (src_map == new_src_map)
1984 lock_clear_recursive(&src_map->lock);
1985 }
1986
1987 /*
1988 * Update variables for next pass through the loop.
1989 */
15637ed4
RG
1990 src_start = src_entry->end;
1991 src_entry = src_entry->next;
1992 dst_start = dst_entry->end;
1993 dst_entry = dst_entry->next;
1994
1995 /*
1996 * If the source is to be destroyed, here is the
1997 * place to do it.
1998 */
15637ed4
RG
1999 if (src_destroy && src_map->is_main_map &&
2000 dst_map->is_main_map)
2001 vm_map_entry_delete(src_map, src_entry->prev);
2002 }
2003
2004 /*
2005 * Update the physical maps as appropriate
2006 */
15637ed4 2007 if (src_map->is_main_map && dst_map->is_main_map) {
55768178 2008 if (src_destroy) {
15637ed4 2009 pmap_remove(src_map->pmap, src_addr, src_addr + len);
55768178 2010 }
15637ed4
RG
2011 }
2012
2013 /*
2014 * Unlock the maps
2015 */
2016
2017 Return: ;
55768178
DG
2018
2019 if (old_src_destroy) {
15637ed4 2020 vm_map_delete(src_map, src_addr, src_addr + len);
55768178 2021 }
15637ed4
RG
2022
2023 vm_map_unlock(src_map);
2024 if (src_map != dst_map)
2025 vm_map_unlock(dst_map);
2026
2027 return(result);
2028}
2029
2030/*
2031 * vmspace_fork:
2032 * Create a new process vmspace structure and vm_map
2033 * based on those of an existing process. The new map
2034 * is based on the old map, according to the inheritance
2035 * values on the regions in that map.
2036 *
2037 * The source map must not be locked.
2038 */
2039struct vmspace *
2040vmspace_fork(vm1)
2041 register struct vmspace *vm1;
2042{
2043 register struct vmspace *vm2;
2044 vm_map_t old_map = &vm1->vm_map;
2045 vm_map_t new_map;
2046 vm_map_entry_t old_entry;
2047 vm_map_entry_t new_entry;
2048 pmap_t new_pmap;
2049
2050 vm_map_lock(old_map);
2051
2052 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
2053 old_map->entries_pageable);
2054 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2055 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2056 new_pmap = &vm2->vm_pmap; /* XXX */
2057 new_map = &vm2->vm_map; /* XXX */
2058
2059 old_entry = old_map->header.next;
2060
2061 while (old_entry != &old_map->header) {
2062 if (old_entry->is_sub_map)
2063 panic("vm_map_fork: encountered a submap");
2064
2065 switch (old_entry->inheritance) {
2066 case VM_INHERIT_NONE:
2067 break;
2068
2069 case VM_INHERIT_SHARE:
2070 /*
2071 * If we don't already have a sharing map:
2072 */
2073
2074 if (!old_entry->is_a_map) {
2075 vm_map_t new_share_map;
2076 vm_map_entry_t new_share_entry;
2077
2078 /*
2079 * Create a new sharing map
2080 */
2081
2082 new_share_map = vm_map_create(NULL,
2083 old_entry->start,
2084 old_entry->end,
2085 TRUE);
2086 new_share_map->is_main_map = FALSE;
2087
2088 /*
2089 * Create the only sharing entry from the
2090 * old task map entry.
2091 */
2092
2093 new_share_entry =
2094 vm_map_entry_create(new_share_map);
2095 *new_share_entry = *old_entry;
2096
2097 /*
2098 * Insert the entry into the new sharing
2099 * map
2100 */
2101
2102 vm_map_entry_link(new_share_map,
2103 new_share_map->header.prev,
2104 new_share_entry);
2105
2106 /*
2107 * Fix up the task map entry to refer
2108 * to the sharing map now.
2109 */
2110
2111 old_entry->is_a_map = TRUE;
2112 old_entry->object.share_map = new_share_map;
2113 old_entry->offset = old_entry->start;
2114 }
2115
2116 /*
2117 * Clone the entry, referencing the sharing map.
2118 */
2119
2120 new_entry = vm_map_entry_create(new_map);
2121 *new_entry = *old_entry;
2122 vm_map_reference(new_entry->object.share_map);
2123
2124 /*
2125 * Insert the entry into the new map -- we
2126 * know we're inserting at the end of the new
2127 * map.
2128 */
2129
2130 vm_map_entry_link(new_map, new_map->header.prev,
2131 new_entry);
2132
2133 /*
2134 * Update the physical map
2135 */
2136
2137 pmap_copy(new_map->pmap, old_map->pmap,
2138 new_entry->start,
2139 (old_entry->end - old_entry->start),
2140 old_entry->start);
2141 break;
2142
2143 case VM_INHERIT_COPY:
2144 /*
2145 * Clone the entry and link into the map.
2146 */
2147
2148 new_entry = vm_map_entry_create(new_map);
2149 *new_entry = *old_entry;
2150 new_entry->wired_count = 0;
2151 new_entry->object.vm_object = NULL;
2152 new_entry->is_a_map = FALSE;
2153 vm_map_entry_link(new_map, new_map->header.prev,
2154 new_entry);
2155 if (old_entry->is_a_map) {
2156 int check;
2157
2158 check = vm_map_copy(new_map,
2159 old_entry->object.share_map,
2160 new_entry->start,
2161 (vm_size_t)(new_entry->end -
2162 new_entry->start),
2163 old_entry->offset,
2164 FALSE, FALSE);
2165 if (check != KERN_SUCCESS)
2166 printf("vm_map_fork: copy in share_map region failed\n");
2167 }
2168 else {
2169 vm_map_copy_entry(old_map, new_map, old_entry,
2170 new_entry);
2171 }
2172 break;
2173 }
2174 old_entry = old_entry->next;
2175 }
2176
2177 new_map->size = old_map->size;
2178 vm_map_unlock(old_map);
2179
2180 return(vm2);
2181}
2182
55768178
DG
2183/*
2184 * vmspace_deallocate
2185 *
2186 * clean up old parent vmspace references
2187 *
2188 */
2189
2190void
2191vmspace_free(struct vmspace *vm) {
2192
2193 if (vm == 0 || --vm->vm_refcnt != 0) {
2194 return;
2195 }
2196 _vmspace_free(vm);
2197}
2198
2199
15637ed4
RG
2200/*
2201 * vm_map_lookup:
2202 *
2203 * Finds the VM object, offset, and
2204 * protection for a given virtual address in the
2205 * specified map, assuming a page fault of the
2206 * type specified.
2207 *
2208 * Leaves the map in question locked for read; return
2209 * values are guaranteed until a vm_map_lookup_done
2210 * call is performed. Note that the map argument
2211 * is in/out; the returned map must be used in
2212 * the call to vm_map_lookup_done.
2213 *
2214 * A handle (out_entry) is returned for use in
2215 * vm_map_lookup_done, to make that fast.
2216 *
2217 * If a lookup is requested with "write protection"
2218 * specified, the map may be changed to perform virtual
2219 * copying operations, although the data referenced will
2220 * remain the same.
2221 */
4c45483e 2222int
55768178 2223vm_map_lookup(var_map, vaddr, fault_type, out_entry, object, offset, out_prot, wired, single_use)
15637ed4
RG
2224 vm_map_t *var_map; /* IN/OUT */
2225 register vm_offset_t vaddr;
2226 register vm_prot_t fault_type;
2227
2228 vm_map_entry_t *out_entry; /* OUT */
2229 vm_object_t *object; /* OUT */
2230 vm_offset_t *offset; /* OUT */
2231 vm_prot_t *out_prot; /* OUT */
2232 boolean_t *wired; /* OUT */
2233 boolean_t *single_use; /* OUT */
2234{
2235 vm_map_t share_map;
2236 vm_offset_t share_offset;
2237 register vm_map_entry_t entry;
2238 register vm_map_t map = *var_map;
2239 register vm_prot_t prot;
2240 register boolean_t su;
2241
2242 RetryLookup: ;
2243
2244 /*
2245 * Lookup the faulting address.
2246 */
2247
2248 vm_map_lock_read(map);
2249
2250#define RETURN(why) \
2251 { \
2252 vm_map_unlock_read(map); \
2253 return(why); \
2254 }
2255
2256 /*
2257 * If the map has an interesting hint, try it before calling
2258 * full blown lookup routine.
2259 */
2260
2261 simple_lock(&map->hint_lock);
2262 entry = map->hint;
2263 simple_unlock(&map->hint_lock);
2264
2265 *out_entry = entry;
2266
2267 if ((entry == &map->header) ||
2268 (vaddr < entry->start) || (vaddr >= entry->end)) {
2269 vm_map_entry_t tmp_entry;
2270
2271 /*
2272 * Entry was either not a valid hint, or the vaddr
2273 * was not contained in the entry, so do a full lookup.
2274 */
55768178 2275 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
15637ed4 2276 RETURN(KERN_INVALID_ADDRESS);
55768178 2277 }
15637ed4
RG
2278
2279 entry = tmp_entry;
2280 *out_entry = entry;
2281 }
2282
2283 /*
2284 * Handle submaps.
2285 */
2286
2287 if (entry->is_sub_map) {
2288 vm_map_t old_map = map;
2289
2290 *var_map = map = entry->object.sub_map;
2291 vm_map_unlock_read(old_map);
2292 goto RetryLookup;
2293 }
2294
2295 /*
2296 * Check whether this task is allowed to have
2297 * this page.
2298 */
2299
2300 prot = entry->protection;
2301 if ((fault_type & (prot)) != fault_type)
2302 RETURN(KERN_PROTECTION_FAILURE);
2303
2304 /*
2305 * If this page is not pageable, we have to get
2306 * it for all possible accesses.
2307 */
2308
2309 if (*wired = (entry->wired_count != 0))
2310 prot = fault_type = entry->protection;
2311
2312 /*
2313 * If we don't already have a VM object, track
2314 * it down.
2315 */
2316
2317 if (su = !entry->is_a_map) {
2318 share_map = map;
2319 share_offset = vaddr;
2320 }
2321 else {
2322 vm_map_entry_t share_entry;
2323
2324 /*
2325 * Compute the sharing map, and offset into it.
2326 */
2327
2328 share_map = entry->object.share_map;
2329 share_offset = (vaddr - entry->start) + entry->offset;
2330
2331 /*
2332 * Look for the backing store object and offset
2333 */
2334
2335 vm_map_lock_read(share_map);
2336
2337 if (!vm_map_lookup_entry(share_map, share_offset,
2338 &share_entry)) {
2339 vm_map_unlock_read(share_map);
2340 RETURN(KERN_INVALID_ADDRESS);
2341 }
2342 entry = share_entry;
2343 }
2344
2345 /*
2346 * If the entry was copy-on-write, we either ...
2347 */
2348
2349 if (entry->needs_copy) {
2350 /*
2351 * If we want to write the page, we may as well
2352 * handle that now since we've got the sharing
2353 * map locked.
2354 *
2355 * If we don't need to write the page, we just
2356 * demote the permissions allowed.
2357 */
2358
2359 if (fault_type & VM_PROT_WRITE) {
2360 /*
2361 * Make a new object, and place it in the
2362 * object chain. Note that no new references
2363 * have appeared -- one just moved from the
2364 * share map to the new object.
2365 */
2366
2367 if (lock_read_to_write(&share_map->lock)) {
2368 if (share_map != map)
2369 vm_map_unlock_read(map);
2370 goto RetryLookup;
2371 }
2372
2373 vm_object_shadow(
2374 &entry->object.vm_object,
2375 &entry->offset,
2376 (vm_size_t) (entry->end - entry->start));
2377
2378 entry->needs_copy = FALSE;
2379
2380 lock_write_to_read(&share_map->lock);
2381 }
2382 else {
2383 /*
2384 * We're attempting to read a copy-on-write
2385 * page -- don't allow writes.
2386 */
2387
2388 prot &= (~VM_PROT_WRITE);
2389 }
2390 }
2391
2392 /*
2393 * Create an object if necessary.
2394 */
2395 if (entry->object.vm_object == NULL) {
2396
2397 if (lock_read_to_write(&share_map->lock)) {
2398 if (share_map != map)
2399 vm_map_unlock_read(map);
2400 goto RetryLookup;
2401 }
2402
2403 entry->object.vm_object = vm_object_allocate(
2404 (vm_size_t)(entry->end - entry->start));
2405 entry->offset = 0;
2406 lock_write_to_read(&share_map->lock);
2407 }
2408
2409 /*
2410 * Return the object/offset from this entry. If the entry
2411 * was copy-on-write or empty, it has been fixed up.
2412 */
2413
2414 *offset = (share_offset - entry->start) + entry->offset;
2415 *object = entry->object.vm_object;
2416
2417 /*
2418 * Return whether this is the only map sharing this data.
2419 */
2420
2421 if (!su) {
2422 simple_lock(&share_map->ref_lock);
2423 su = (share_map->ref_count == 1);
2424 simple_unlock(&share_map->ref_lock);
2425 }
2426
2427 *out_prot = prot;
2428 *single_use = su;
2429
2430 return(KERN_SUCCESS);
2431
2432#undef RETURN
2433}
2434
2435/*
2436 * vm_map_lookup_done:
2437 *
2438 * Releases locks acquired by a vm_map_lookup
2439 * (according to the handle returned by that lookup).
2440 */
2441
55768178
DG
2442void
2443vm_map_lookup_done(map, entry)
15637ed4
RG
2444 register vm_map_t map;
2445 vm_map_entry_t entry;
2446{
2447 /*
2448 * If this entry references a map, unlock it first.
2449 */
2450
2451 if (entry->is_a_map)
2452 vm_map_unlock_read(entry->object.share_map);
2453
2454 /*
2455 * Unlock the main-level map
2456 */
2457
2458 vm_map_unlock_read(map);
2459}
2460
2461/*
2462 * Routine: vm_map_simplify
2463 * Purpose:
2464 * Attempt to simplify the map representation in
2465 * the vicinity of the given starting address.
2466 * Note:
2467 * This routine is intended primarily to keep the
2468 * kernel maps more compact -- they generally don't
2469 * benefit from the "expand a map entry" technology
2470 * at allocation time because the adjacent entry
2471 * is often wired down.
2472 */
55768178
DG
2473void
2474vm_map_simplify(map, start)
15637ed4
RG
2475 vm_map_t map;
2476 vm_offset_t start;
2477{
2478 vm_map_entry_t this_entry;
2479 vm_map_entry_t prev_entry;
2480
2481 vm_map_lock(map);
2482 if (
2483 (vm_map_lookup_entry(map, start, &this_entry)) &&
2484 ((prev_entry = this_entry->prev) != &map->header) &&
2485
2486 (prev_entry->end == start) &&
2487 (map->is_main_map) &&
2488
2489 (prev_entry->is_a_map == FALSE) &&
2490 (prev_entry->is_sub_map == FALSE) &&
2491
2492 (this_entry->is_a_map == FALSE) &&
2493 (this_entry->is_sub_map == FALSE) &&
2494
2495 (prev_entry->inheritance == this_entry->inheritance) &&
2496 (prev_entry->protection == this_entry->protection) &&
2497 (prev_entry->max_protection == this_entry->max_protection) &&
2498 (prev_entry->wired_count == this_entry->wired_count) &&
2499
2500 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2501 (prev_entry->needs_copy == this_entry->needs_copy) &&
2502
2503 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2504 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2505 == this_entry->offset)
2506 ) {
2507 if (map->first_free == this_entry)
2508 map->first_free = prev_entry;
2509
55768178
DG
2510 if (!this_entry->object.vm_object->paging_in_progress) {
2511 SAVE_HINT(map, prev_entry);
2512 vm_map_entry_unlink(map, this_entry);
2513 prev_entry->end = this_entry->end;
2514 vm_object_deallocate(this_entry->object.vm_object);
2515 vm_map_entry_dispose(map, this_entry);
2516 }
15637ed4
RG
2517 }
2518 vm_map_unlock(map);
2519}
2520
d55b6cf4 2521#if defined(DEBUG) || (NDDB > 0)
15637ed4
RG
2522/*
2523 * vm_map_print: [ debug ]
2524 */
55768178
DG
2525void
2526vm_map_print(map, full)
15637ed4
RG
2527 register vm_map_t map;
2528 boolean_t full;
2529{
2530 register vm_map_entry_t entry;
2531 extern int indent;
55768178 2532 static int nmaps;
15637ed4 2533
55768178
DG
2534 if (indent == 0)
2535 nmaps = 0;
15637ed4
RG
2536 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2537 (map->is_main_map ? "Task" : "Share"),
2538 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2539 map->timestamp);
2540
55768178 2541/*
15637ed4
RG
2542 if (!full && indent)
2543 return;
55768178 2544*/
15637ed4
RG
2545
2546 indent += 2;
2547 for (entry = map->header.next; entry != &map->header;
2548 entry = entry->next) {
55768178
DG
2549 nmaps++;
2550 if (full || indent == 2) {
15637ed4
RG
2551 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2552 (int) entry, (int) entry->start, (int) entry->end);
2553 if (map->is_main_map) {
2554 static char *inheritance_name[4] =
2555 { "share", "copy", "none", "donate_copy"};
2556 printf("prot=%x/%x/%s, ",
2557 entry->protection,
2558 entry->max_protection,
2559 inheritance_name[entry->inheritance]);
2560 if (entry->wired_count != 0)
2561 printf("wired, ");
2562 }
55768178 2563 }
15637ed4
RG
2564
2565 if (entry->is_a_map || entry->is_sub_map) {
55768178 2566 if (full || indent == 2)
15637ed4
RG
2567 printf("share=0x%x, offset=0x%x\n",
2568 (int) entry->object.share_map,
2569 (int) entry->offset);
2570 if ((entry->prev == &map->header) ||
2571 (!entry->prev->is_a_map) ||
2572 (entry->prev->object.share_map !=
2573 entry->object.share_map)) {
2574 indent += 2;
2575 vm_map_print(entry->object.share_map, full);
2576 indent -= 2;
2577 }
2578
2579 }
55768178
DG
2580 else if (full || indent == 2) {
2581
15637ed4
RG
2582 printf("object=0x%x, offset=0x%x",
2583 (int) entry->object.vm_object,
2584 (int) entry->offset);
2585 if (entry->copy_on_write)
2586 printf(", copy (%s)",
2587 entry->needs_copy ? "needed" : "done");
2588 printf("\n");
2589
2590 if ((entry->prev == &map->header) ||
2591 (entry->prev->is_a_map) ||
2592 (entry->prev->object.vm_object !=
2593 entry->object.vm_object)) {
2594 indent += 2;
2595 vm_object_print(entry->object.vm_object, full);
2596 indent -= 2;
2597 }
2598 }
2599 }
2600 indent -= 2;
55768178
DG
2601
2602 if (indent == 0)
2603 printf("nmaps=%d\n", nmaps);
15637ed4 2604}
55768178 2605#endif