lock_init => lockinit
[unix-history] / usr / src / sys / vm / vm_map.c
index 9669662..b2daa99 100644 (file)
@@ -1,28 +1,51 @@
 /* 
 /* 
- * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young
- * Copyright (c) 1987 Carnegie-Mellon University
- * Copyright (c) 1991 Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1991, 1993
+ *     The Regents of the University of California.  All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
- * The CMU software License Agreement specifies the terms and conditions
- * for use and redistribution.
+ * %sccs.include.redist.c%
+ *
+ *     @(#)vm_map.c    8.7 (Berkeley) %G%
+ *
+ *
+ * Copyright (c) 1987, 1990 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Authors: Avadis Tevanian, Jr., Michael Wayne Young
+ * 
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ * 
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ * 
+ * Carnegie Mellon requests users of this software to return to
  *
  *
- *     @(#)vm_map.c    7.1 (Berkeley) %G%
+ *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
  */
 
 /*
  *     Virtual memory mapping module.
  */
 
  */
 
 /*
  *     Virtual memory mapping module.
  */
 
-#include "types.h"
-#include "malloc.h"
-#include "../vm/vm_param.h"
-#include "../vm/vm_map.h"
-#include "../vm/vm_page.h"
-#include "../vm/vm_object.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
 
 /*
  *     Virtual memory maps provide for the mapping, protection,
 
 /*
  *     Virtual memory maps provide for the mapping, protection,
@@ -67,7 +90,7 @@
  */
 
 /*
  */
 
 /*
- *     vm_map_init:
+ *     vm_map_startup:
  *
  *     Initialize the vm_map module.  Must be called before
  *     any other vm_map routines.
  *
  *     Initialize the vm_map module.  Must be called before
  *     any other vm_map routines.
@@ -87,7 +110,11 @@ vm_size_t   kentry_data_size;
 vm_map_entry_t kentry_free;
 vm_map_t       kmap_free;
 
 vm_map_entry_t kentry_free;
 vm_map_t       kmap_free;
 
-void vm_map_init()
+static void    _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
+static void    _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
+
+void
+vm_map_startup()
 {
        register int i;
        register vm_map_entry_t mep;
 {
        register int i;
        register vm_map_entry_t mep;
@@ -103,7 +130,7 @@ void vm_map_init()
                mp->header.next = (vm_map_entry_t) (mp + 1);
                mp++;
        }
                mp->header.next = (vm_map_entry_t) (mp + 1);
                mp++;
        }
-       mp++->header.next = VM_MAP_ENTRY_NULL;
+       mp++->header.next = NULL;
 
        /*
         * Form a free list of statically allocated kernel map entries
 
        /*
         * Form a free list of statically allocated kernel map entries
@@ -115,7 +142,47 @@ void vm_map_init()
                mep->next = mep + 1;
                mep++;
        }
                mep->next = mep + 1;
                mep++;
        }
-       mep->next = VM_MAP_ENTRY_NULL;
+       mep->next = NULL;
+}
+
+/*
+ * Allocate a vmspace structure, including a vm_map and pmap,
+ * and initialize those structures.  The refcnt is set to 1.
+ * The remaining fields must be initialized by the caller.
+ */
+struct vmspace *
+vmspace_alloc(min, max, pageable)
+       vm_offset_t min, max;
+       int pageable;
+{
+       register struct vmspace *vm;
+
+       MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
+       bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
+       vm_map_init(&vm->vm_map, min, max, pageable);
+       pmap_pinit(&vm->vm_pmap);
+       vm->vm_map.pmap = &vm->vm_pmap;         /* XXX */
+       vm->vm_refcnt = 1;
+       return (vm);
+}
+
+void
+vmspace_free(vm)
+       register struct vmspace *vm;
+{
+
+       if (--vm->vm_refcnt == 0) {
+               /*
+                * Lock the map, to wait out all other references to it.
+                * Delete all of the mappings and pages they hold,
+                * then call the pmap module to reclaim anything left.
+                */
+               vm_map_lock(&vm->vm_map);
+               (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
+                   vm->vm_map.max_offset);
+               pmap_release(&vm->vm_pmap);
+               FREE(vm, M_VMMAP);
+       }
 }
 
 /*
 }
 
 /*
@@ -125,61 +192,85 @@ void vm_map_init()
  *     the given physical map structure, and having
  *     the given lower and upper address bounds.
  */
  *     the given physical map structure, and having
  *     the given lower and upper address bounds.
  */
-vm_map_t vm_map_create(pmap, min, max, pageable)
+vm_map_t
+vm_map_create(pmap, min, max, pageable)
        pmap_t          pmap;
        vm_offset_t     min, max;
        boolean_t       pageable;
 {
        register vm_map_t       result;
        pmap_t          pmap;
        vm_offset_t     min, max;
        boolean_t       pageable;
 {
        register vm_map_t       result;
-       extern vm_map_t         kernel_map, kmem_map;
+       extern vm_map_t         kmem_map;
 
 
-       if (kmem_map == VM_MAP_NULL) {
+       if (kmem_map == NULL) {
                result = kmap_free;
                result = kmap_free;
+               if (result == NULL)
+                       panic("vm_map_create: out of maps");
                kmap_free = (vm_map_t) result->header.next;
        } else
                MALLOC(result, vm_map_t, sizeof(struct vm_map),
                       M_VMMAP, M_WAITOK);
 
                kmap_free = (vm_map_t) result->header.next;
        } else
                MALLOC(result, vm_map_t, sizeof(struct vm_map),
                       M_VMMAP, M_WAITOK);
 
-       if (result == VM_MAP_NULL)
-               panic("vm_map_create: out of maps");
-
-       result->header.next = result->header.prev = &result->header;
-       result->nentries = 0;
-       result->size = 0;
-       result->ref_count = 1;
+       vm_map_init(result, min, max, pageable);
        result->pmap = pmap;
        result->pmap = pmap;
-       result->is_main_map = TRUE;
-       result->min_offset = min;
-       result->max_offset = max;
-       result->entries_pageable = pageable;
-       result->first_free = &result->header;
-       result->hint = &result->header;
-       result->timestamp = 0;
-       lock_init(&result->lock, TRUE);
-       simple_lock_init(&result->ref_lock);
-       simple_lock_init(&result->hint_lock);
        return(result);
 }
 
        return(result);
 }
 
+/*
+ * Initialize an existing vm_map structure
+ * such as that in the vmspace structure.
+ * The pmap is set elsewhere.
+ */
+void
+vm_map_init(map, min, max, pageable)
+       register struct vm_map *map;
+       vm_offset_t     min, max;
+       boolean_t       pageable;
+{
+       map->header.next = map->header.prev = &map->header;
+       map->nentries = 0;
+       map->size = 0;
+       map->ref_count = 1;
+       map->is_main_map = TRUE;
+       map->min_offset = min;
+       map->max_offset = max;
+       map->entries_pageable = pageable;
+       map->first_free = &map->header;
+       map->hint = &map->header;
+       map->timestamp = 0;
+       lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
+       simple_lock_init(&map->ref_lock);
+       simple_lock_init(&map->hint_lock);
+}
+
 /*
  *     vm_map_entry_create:    [ internal use only ]
  *
  *     Allocates a VM map entry for insertion.
  *     No entry fields are filled in.  This routine is
  */
 /*
  *     vm_map_entry_create:    [ internal use only ]
  *
  *     Allocates a VM map entry for insertion.
  *     No entry fields are filled in.  This routine is
  */
-vm_map_entry_t vm_map_entry_create(map)
+vm_map_entry_t
+vm_map_entry_create(map)
        vm_map_t        map;
 {
        vm_map_entry_t  entry;
        vm_map_t        map;
 {
        vm_map_entry_t  entry;
-       extern vm_map_t         kernel_map, kmem_map, mb_map;
-
-       if (map == kernel_map || map == kmem_map || map == mb_map) {
-               if (entry = kentry_free)
-                       kentry_free = kentry_free->next;
-       } else
+#ifdef DEBUG
+       extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
+       boolean_t               isspecial;
+
+       isspecial = (map == kernel_map || map == kmem_map ||
+                    map == mb_map || map == pager_map);
+       if (isspecial && map->entries_pageable ||
+           !isspecial && !map->entries_pageable)
+               panic("vm_map_entry_create: bogus map");
+#endif
+       if (map->entries_pageable) {
                MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
                       M_VMMAPENT, M_WAITOK);
                MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
                       M_VMMAPENT, M_WAITOK);
-       if (entry == VM_MAP_ENTRY_NULL)
+       } else {
+               if (entry = kentry_free)
+                       kentry_free = kentry_free->next;
+       }
+       if (entry == NULL)
                panic("vm_map_entry_create: out of map entries");
 
        return(entry);
                panic("vm_map_entry_create: out of map entries");
 
        return(entry);
@@ -190,17 +281,27 @@ vm_map_entry_t vm_map_entry_create(map)
  *
  *     Inverse of vm_map_entry_create.
  */
  *
  *     Inverse of vm_map_entry_create.
  */
-void vm_map_entry_dispose(map, entry)
+void
+vm_map_entry_dispose(map, entry)
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
-       extern vm_map_t         kernel_map, kmem_map, mb_map;
-
-       if (map == kernel_map || map == kmem_map || map == mb_map) {
+#ifdef DEBUG
+       extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
+       boolean_t               isspecial;
+
+       isspecial = (map == kernel_map || map == kmem_map ||
+                    map == mb_map || map == pager_map);
+       if (isspecial && map->entries_pageable ||
+           !isspecial && !map->entries_pageable)
+               panic("vm_map_entry_dispose: bogus map");
+#endif
+       if (map->entries_pageable) {
+               FREE(entry, M_VMMAPENT);
+       } else {
                entry->next = kentry_free;
                kentry_free = entry;
                entry->next = kentry_free;
                kentry_free = entry;
-       } else
-               FREE(entry, M_VMMAPENT);
+       }
 }
 
 /*
 }
 
 /*
@@ -229,10 +330,11 @@ void vm_map_entry_dispose(map, entry)
  *     Creates another valid reference to the given map.
  *
  */
  *     Creates another valid reference to the given map.
  *
  */
-void vm_map_reference(map)
+void
+vm_map_reference(map)
        register vm_map_t       map;
 {
        register vm_map_t       map;
 {
-       if (map == VM_MAP_NULL)
+       if (map == NULL)
                return;
 
        simple_lock(&map->ref_lock);
                return;
 
        simple_lock(&map->ref_lock);
@@ -247,12 +349,13 @@ void vm_map_reference(map)
  *     destroying it if no references remain.
  *     The map should not be locked.
  */
  *     destroying it if no references remain.
  *     The map should not be locked.
  */
-void vm_map_deallocate(map)
+void
+vm_map_deallocate(map)
        register vm_map_t       map;
 {
        register int            c;
 
        register vm_map_t       map;
 {
        register int            c;
 
-       if (map == VM_MAP_NULL)
+       if (map == NULL)
                return;
 
        simple_lock(&map->ref_lock);
                return;
 
        simple_lock(&map->ref_lock);
@@ -278,7 +381,7 @@ void vm_map_deallocate(map)
 }
 
 /*
 }
 
 /*
- *     vm_map_insert:  [ internal use only ]
+ *     vm_map_insert:
  *
  *     Inserts the given whole VM object into the target
  *     map at the specified address range.  The object's
  *
  *     Inserts the given whole VM object into the target
  *     map at the specified address range.  The object's
@@ -286,6 +389,7 @@ void vm_map_deallocate(map)
  *
  *     Requires that the map be locked, and leaves it so.
  */
  *
  *     Requires that the map be locked, and leaves it so.
  */
+int
 vm_map_insert(map, object, offset, start, end)
        vm_map_t        map;
        vm_object_t     object;
 vm_map_insert(map, object, offset, start, end)
        vm_map_t        map;
        vm_object_t     object;
@@ -330,7 +434,7 @@ vm_map_insert(map, object, offset, start, end)
         *      extending one of our neighbors.
         */
 
         *      extending one of our neighbors.
         */
 
-       if (object == VM_OBJECT_NULL) {
+       if (object == NULL) {
                if ((prev_entry != &map->header) &&
                    (prev_entry->end == start) &&
                    (map->is_main_map) &&
                if ((prev_entry != &map->header) &&
                    (prev_entry->end == start) &&
                    (map->is_main_map) &&
@@ -342,7 +446,7 @@ vm_map_insert(map, object, offset, start, end)
                    (prev_entry->wired_count == 0)) {
 
                        if (vm_object_coalesce(prev_entry->object.vm_object,
                    (prev_entry->wired_count == 0)) {
 
                        if (vm_object_coalesce(prev_entry->object.vm_object,
-                                       VM_OBJECT_NULL,
+                                       NULL,
                                        prev_entry->offset,
                                        (vm_offset_t) 0,
                                        (vm_size_t)(prev_entry->end
                                        prev_entry->offset,
                                        (vm_offset_t) 0,
                                        (vm_size_t)(prev_entry->end
@@ -421,7 +525,8 @@ vm_map_insert(map, object, offset, start, end)
  *     result indicates whether the address is
  *     actually contained in the map.
  */
  *     result indicates whether the address is
  *     actually contained in the map.
  */
-boolean_t vm_map_lookup_entry(map, address, entry)
+boolean_t
+vm_map_lookup_entry(map, address, entry)
        register vm_map_t       map;
        register vm_offset_t    address;
        vm_map_entry_t          *entry;         /* OUT */
        register vm_map_t       map;
        register vm_offset_t    address;
        vm_map_entry_t          *entry;         /* OUT */
@@ -492,6 +597,63 @@ boolean_t vm_map_lookup_entry(map, address, entry)
        return(FALSE);
 }
 
        return(FALSE);
 }
 
+/*
+ * Find sufficient space for `length' bytes in the given map, starting at
+ * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
+ */
+int
+vm_map_findspace(map, start, length, addr)
+       register vm_map_t map;
+       register vm_offset_t start;
+       vm_size_t length;
+       vm_offset_t *addr;
+{
+       register vm_map_entry_t entry, next;
+       register vm_offset_t end;
+
+       if (start < map->min_offset)
+               start = map->min_offset;
+       if (start > map->max_offset)
+               return (1);
+
+       /*
+        * Look for the first possible address; if there's already
+        * something at this address, we have to start after it.
+        */
+       if (start == map->min_offset) {
+               if ((entry = map->first_free) != &map->header)
+                       start = entry->end;
+       } else {
+               vm_map_entry_t tmp;
+               if (vm_map_lookup_entry(map, start, &tmp))
+                       start = tmp->end;
+               entry = tmp;
+       }
+
+       /*
+        * Look through the rest of the map, trying to fit a new region in
+        * the gap between existing regions, or after the very last region.
+        */
+       for (;; start = (entry = next)->end) {
+               /*
+                * Find the end of the proposed new region.  Be sure we didn't
+                * go beyond the end of the map, or wrap around the address;
+                * if so, we lose.  Otherwise, if this is the last entry, or
+                * if the proposed new region fits before the next entry, we
+                * win.
+                */
+               end = start + length;
+               if (end > map->max_offset || end < start)
+                       return (1);
+               next = entry->next;
+               if (next == &map->header || next->start >= end)
+                       break;
+       }
+       SAVE_HINT(map, entry);
+       *addr = start;
+       return (0);
+}
+
 /*
  *     vm_map_find finds an unallocated region in the target address
  *     map with the given length.  The search is defined to be
 /*
  *     vm_map_find finds an unallocated region in the target address
  *     map with the given length.  The search is defined to be
@@ -499,6 +661,7 @@ boolean_t vm_map_lookup_entry(map, address, entry)
  *     returned in the same parameter.
  *
  */
  *     returned in the same parameter.
  *
  */
+int
 vm_map_find(map, object, offset, addr, length, find_space)
        vm_map_t        map;
        vm_object_t     object;
 vm_map_find(map, object, offset, addr, length, find_space)
        vm_map_t        map;
        vm_object_t     object;
@@ -507,97 +670,21 @@ vm_map_find(map, object, offset, addr, length, find_space)
        vm_size_t       length;
        boolean_t       find_space;
 {
        vm_size_t       length;
        boolean_t       find_space;
 {
-       register vm_map_entry_t entry;
        register vm_offset_t    start;
        register vm_offset_t    start;
-       register vm_offset_t    end;
        int                     result;
 
        start = *addr;
        int                     result;
 
        start = *addr;
-
        vm_map_lock(map);
        vm_map_lock(map);
-
        if (find_space) {
        if (find_space) {
-               /*
-                *      Calculate the first possible address.
-                */
-
-               if (start < map->min_offset)
-                       start = map->min_offset;
-               if (start > map->max_offset) {
+               if (vm_map_findspace(map, start, length, addr)) {
                        vm_map_unlock(map);
                        return (KERN_NO_SPACE);
                }
                        vm_map_unlock(map);
                        return (KERN_NO_SPACE);
                }
-
-               /*
-                *      Look for the first possible address;
-                *      if there's already something at this
-                *      address, we have to start after it.
-                */
-
-               if (start == map->min_offset) {
-                       if ((entry = map->first_free) != &map->header)
-                               start = entry->end;
-               } else {
-                       vm_map_entry_t  tmp_entry;
-                       if (vm_map_lookup_entry(map, start, &tmp_entry))
-                               start = tmp_entry->end;
-                       entry = tmp_entry;
-               }
-
-               /*
-                *      In any case, the "entry" always precedes
-                *      the proposed new region throughout the
-                *      loop:
-                */
-
-               while (TRUE) {
-                       register vm_map_entry_t next;
-
-                       /*
-                        *      Find the end of the proposed new region.
-                        *      Be sure we didn't go beyond the end, or
-                        *      wrap around the address.
-                        */
-
-                       end = start + length;
-
-                       if ((end > map->max_offset) || (end < start)) {
-                               vm_map_unlock(map);
-                               return (KERN_NO_SPACE);
-                       }
-
-                       /*
-                        *      If there are no more entries, we must win.
-                        */
-
-                       next = entry->next;
-                       if (next == &map->header)
-                               break;
-
-                       /*
-                        *      If there is another entry, it must be
-                        *      after the end of the potential new region.
-                        */
-
-                       if (next->start >= end)
-                               break;
-
-                       /*
-                        *      Didn't fit -- move to the next entry.
-                        */
-
-                       entry = next;
-                       start = entry->end;
-               }
-               *addr = start;
-               
-               SAVE_HINT(map, entry);
+               start = *addr;
        }
        }
-
        result = vm_map_insert(map, object, offset, start, start + length);
        result = vm_map_insert(map, object, offset, start, start + length);
-
        vm_map_unlock(map);
        vm_map_unlock(map);
-       return(result);
+       return (result);
 }
 
 /*
 }
 
 /*
@@ -607,13 +694,14 @@ vm_map_find(map, object, offset, addr, length, find_space)
  *             removing extra sharing maps
  *             [XXX maybe later] merging with a neighbor
  */
  *             removing extra sharing maps
  *             [XXX maybe later] merging with a neighbor
  */
-void vm_map_simplify_entry(map, entry)
+void
+vm_map_simplify_entry(map, entry)
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
 #ifdef lint
        map++;
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
 #ifdef lint
        map++;
-#endif lint
+#endif
 
        /*
         *      If this entry corresponds to a sharing map, then
 
        /*
         *      If this entry corresponds to a sharing map, then
@@ -642,7 +730,7 @@ void vm_map_simplify_entry(map, entry)
                         * Later.
                         */
                }
                         * Later.
                         */
                }
-#endif 0
+#endif
        }
        else {
                /*
        }
        else {
                /*
@@ -681,7 +769,8 @@ void vm_map_simplify_entry(map, entry)
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
-void _vm_map_clip_start(map, entry, start)
+static void
+_vm_map_clip_start(map, entry, start)
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    start;
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    start;
@@ -725,7 +814,6 @@ void _vm_map_clip_start(map, entry, start)
  *     it splits the entry into two.
  */
 
  *     it splits the entry into two.
  */
 
-void _vm_map_clip_end();
 #define vm_map_clip_end(map, entry, endaddr) \
 { \
        if (endaddr < entry->end) \
 #define vm_map_clip_end(map, entry, endaddr) \
 { \
        if (endaddr < entry->end) \
@@ -736,7 +824,8 @@ void _vm_map_clip_end();
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
-void _vm_map_clip_end(map, entry, end)
+static void
+_vm_map_clip_end(map, entry, end)
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    end;
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    end;
@@ -796,6 +885,7 @@ void _vm_map_clip_end(map, entry, end)
  *     range from the superior map, and then destroy the
  *     submap (if desired).  [Better yet, don't try it.]
  */
  *     range from the superior map, and then destroy the
  *     submap (if desired).  [Better yet, don't try it.]
  */
+int
 vm_map_submap(map, start, end, submap)
        register vm_map_t       map;
        register vm_offset_t    start;
 vm_map_submap(map, start, end, submap)
        register vm_map_t       map;
        register vm_offset_t    start;
@@ -819,7 +909,7 @@ vm_map_submap(map, start, end, submap)
 
        if ((entry->start == start) && (entry->end == end) &&
            (!entry->is_a_map) &&
 
        if ((entry->start == start) && (entry->end == end) &&
            (!entry->is_a_map) &&
-           (entry->object.vm_object == VM_OBJECT_NULL) &&
+           (entry->object.vm_object == NULL) &&
            (!entry->copy_on_write)) {
                entry->is_a_map = FALSE;
                entry->is_sub_map = TRUE;
            (!entry->copy_on_write)) {
                entry->is_a_map = FALSE;
                entry->is_sub_map = TRUE;
@@ -839,6 +929,7 @@ vm_map_submap(map, start, end, submap)
  *     specified, the maximum protection is to be set;
  *     otherwise, only the current protection is affected.
  */
  *     specified, the maximum protection is to be set;
  *     otherwise, only the current protection is affected.
  */
+int
 vm_map_protect(map, start, end, new_prot, set_max)
        register vm_map_t       map;
        register vm_offset_t    start;
 vm_map_protect(map, start, end, new_prot, set_max)
        register vm_map_t       map;
        register vm_offset_t    start;
@@ -960,6 +1051,7 @@ vm_map_protect(map, start, end, new_prot, set_max)
  *     affects how the map will be shared with
  *     child maps at the time of vm_map_fork.
  */
  *     affects how the map will be shared with
  *     child maps at the time of vm_map_fork.
  */
+int
 vm_map_inherit(map, start, end, new_inheritance)
        register vm_map_t       map;
        register vm_offset_t    start;
 vm_map_inherit(map, start, end, new_inheritance)
        register vm_map_t       map;
        register vm_offset_t    start;
@@ -1012,6 +1104,7 @@ vm_map_inherit(map, start, end, new_inheritance)
  *     The map must not be locked, but a reference
  *     must remain to the map throughout the call.
  */
  *     The map must not be locked, but a reference
  *     must remain to the map throughout the call.
  */
+int
 vm_map_pageable(map, start, end, new_pageable)
        register vm_map_t       map;
        register vm_offset_t    start;
 vm_map_pageable(map, start, end, new_pageable)
        register vm_map_t       map;
        register vm_offset_t    start;
@@ -1019,7 +1112,9 @@ vm_map_pageable(map, start, end, new_pageable)
        register boolean_t      new_pageable;
 {
        register vm_map_entry_t entry;
        register boolean_t      new_pageable;
 {
        register vm_map_entry_t entry;
-       vm_map_entry_t          temp_entry;
+       vm_map_entry_t          start_entry;
+       register vm_offset_t    failed;
+       int                     rv;
 
        vm_map_lock(map);
 
 
        vm_map_lock(map);
 
@@ -1033,13 +1128,11 @@ vm_map_pageable(map, start, end, new_pageable)
         *      for the entire region.  We do so before making any changes.
         */
 
         *      for the entire region.  We do so before making any changes.
         */
 
-       if (vm_map_lookup_entry(map, start, &temp_entry)) {
-               entry = temp_entry;
-               vm_map_clip_start(map, entry, start);
+       if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
+               vm_map_unlock(map);
+               return(KERN_INVALID_ADDRESS);
        }
        }
-       else
-               entry = temp_entry->next;
-       temp_entry = entry;
+       entry = start_entry;
 
        /*
         *      Actions are rather different for wiring and unwiring,
 
        /*
         *      Actions are rather different for wiring and unwiring,
@@ -1048,13 +1141,19 @@ vm_map_pageable(map, start, end, new_pageable)
 
        if (new_pageable) {
 
 
        if (new_pageable) {
 
+               vm_map_clip_start(map, entry, start);
+
                /*
                 *      Unwiring.  First ensure that the range to be
                /*
                 *      Unwiring.  First ensure that the range to be
-                *      unwired is really wired down.
+                *      unwired is really wired down and that there
+                *      are no holes.
                 */
                while ((entry != &map->header) && (entry->start < end)) {
 
                 */
                while ((entry != &map->header) && (entry->start < end)) {
 
-                   if (entry->wired_count == 0) {
+                   if (entry->wired_count == 0 ||
+                       (entry->end < end &&
+                        (entry->next == &map->header ||
+                         entry->next->start > entry->end))) {
                        vm_map_unlock(map);
                        return(KERN_INVALID_ARGUMENT);
                    }
                        vm_map_unlock(map);
                        return(KERN_INVALID_ARGUMENT);
                    }
@@ -1066,9 +1165,9 @@ vm_map_pageable(map, start, end, new_pageable)
                 *      If a region becomes completely unwired,
                 *      unwire its physical pages and mappings.
                 */
                 *      If a region becomes completely unwired,
                 *      unwire its physical pages and mappings.
                 */
-               lock_set_recursive(&map->lock);
+               vm_map_set_recursive(&map->lock);
 
 
-               entry = temp_entry;
+               entry = start_entry;
                while ((entry != &map->header) && (entry->start < end)) {
                    vm_map_clip_end(map, entry, end);
 
                while ((entry != &map->header) && (entry->start < end)) {
                    vm_map_clip_end(map, entry, end);
 
@@ -1078,17 +1177,19 @@ vm_map_pageable(map, start, end, new_pageable)
 
                    entry = entry->next;
                }
 
                    entry = entry->next;
                }
-               lock_clear_recursive(&map->lock);
+               vm_map_clear_recursive(&map->lock);
        }
 
        else {
                /*
                 *      Wiring.  We must do this in two passes:
                 *
        }
 
        else {
                /*
                 *      Wiring.  We must do this in two passes:
                 *
-                *      1.  Holding the write lock, we increment the
-                *          wiring count.  For any area that is not already
-                *          wired, we create any shadow objects that need
-                *          to be created.
+                *      1.  Holding the write lock, we create any shadow
+                *          or zero-fill objects that need to be created.
+                *          Then we clip each map entry to the region to be
+                *          wired and increment its wiring count.  We
+                *          create objects before clipping the map entries
+                *          to avoid object proliferation.
                 *
                 *      2.  We downgrade to a read lock, and call
                 *          vm_fault_wire to fault in the pages for any
                 *
                 *      2.  We downgrade to a read lock, and call
                 *          vm_fault_wire to fault in the pages for any
@@ -1109,12 +1210,8 @@ vm_map_pageable(map, start, end, new_pageable)
                /*
                 *      Pass 1.
                 */
                /*
                 *      Pass 1.
                 */
-               entry = temp_entry;
                while ((entry != &map->header) && (entry->start < end)) {
                while ((entry != &map->header) && (entry->start < end)) {
-                   vm_map_clip_end(map, entry, end);
-
-                   entry->wired_count++;
-                   if (entry->wired_count == 1) {
+                   if (entry->wired_count == 0) {
 
                        /*
                         *      Perform actions of vm_map_lookup that need
 
                        /*
                         *      Perform actions of vm_map_lookup that need
@@ -1136,7 +1233,7 @@ vm_map_pageable(map, start, end, new_pageable)
                                                        - entry->start));
                                entry->needs_copy = FALSE;
                            }
                                                        - entry->start));
                                entry->needs_copy = FALSE;
                            }
-                           else if (entry->object.vm_object == VM_OBJECT_NULL) {
+                           else if (entry->object.vm_object == NULL) {
                                entry->object.vm_object =
                                    vm_object_allocate((vm_size_t)(entry->end
                                                        - entry->start));
                                entry->object.vm_object =
                                    vm_object_allocate((vm_size_t)(entry->end
                                                        - entry->start));
@@ -1144,7 +1241,28 @@ vm_map_pageable(map, start, end, new_pageable)
                            }
                        }
                    }
                            }
                        }
                    }
+                   vm_map_clip_start(map, entry, start);
+                   vm_map_clip_end(map, entry, end);
+                   entry->wired_count++;
 
 
+                   /*
+                    * Check for holes
+                    */
+                   if (entry->end < end &&
+                       (entry->next == &map->header ||
+                        entry->next->start > entry->end)) {
+                       /*
+                        *      Found one.  Object creation actions
+                        *      do not need to be undone, but the
+                        *      wired counts need to be restored.
+                        */
+                       while (entry != &map->header && entry->end > start) {
+                           entry->wired_count--;
+                           entry = entry->prev;
+                       }
+                       vm_map_unlock(map);
+                       return(KERN_INVALID_ARGUMENT);
+                   }
                    entry = entry->next;
                }
 
                    entry = entry->next;
                }
 
@@ -1168,14 +1286,30 @@ vm_map_pageable(map, start, end, new_pageable)
                    vm_map_unlock(map);         /* trust me ... */
                }
                else {
                    vm_map_unlock(map);         /* trust me ... */
                }
                else {
-                   lock_set_recursive(&map->lock);
-                   lock_write_to_read(&map->lock);
+                   vm_map_set_recursive(&map->lock);
+                   lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
                }
 
                }
 
-               entry = temp_entry;
+               rv = 0;
+               entry = start_entry;
                while (entry != &map->header && entry->start < end) {
                while (entry != &map->header && entry->start < end) {
-                   if (entry->wired_count == 1) {
-                       vm_fault_wire(map, entry->start, entry->end);
+                   /*
+                    * If vm_fault_wire fails for any page we need to
+                    * undo what has been done.  We decrement the wiring
+                    * count for those pages which have not yet been
+                    * wired (now) and unwire those that have (later).
+                    *
+                    * XXX this violates the locking protocol on the map,
+                    * needs to be fixed.
+                    */
+                   if (rv)
+                       entry->wired_count--;
+                   else if (entry->wired_count == 1) {
+                       rv = vm_fault_wire(map, entry->start, entry->end);
+                       if (rv) {
+                           failed = entry->start;
+                           entry->wired_count--;
+                       }
                    }
                    entry = entry->next;
                }
                    }
                    entry = entry->next;
                }
@@ -1184,7 +1318,12 @@ vm_map_pageable(map, start, end, new_pageable)
                    vm_map_lock(map);
                }
                else {
                    vm_map_lock(map);
                }
                else {
-                   lock_clear_recursive(&map->lock);
+                   vm_map_clear_recursive(&map->lock);
+               }
+               if (rv) {
+                   vm_map_unlock(map);
+                   (void) vm_map_pageable(map, start, failed, TRUE);
+                   return(rv);
                }
        }
 
                }
        }
 
@@ -1193,6 +1332,99 @@ vm_map_pageable(map, start, end, new_pageable)
        return(KERN_SUCCESS);
 }
 
        return(KERN_SUCCESS);
 }
 
+/*
+ * vm_map_clean
+ *
+ * Push any dirty cached pages in the address range to their pager.
+ * If syncio is TRUE, dirty pages are written synchronously.
+ * If invalidate is TRUE, any cached pages are freed as well.
+ *
+ * Returns an error if any part of the specified range is not mapped.
+ */
+int
+vm_map_clean(map, start, end, syncio, invalidate)
+       vm_map_t        map;
+       vm_offset_t     start;
+       vm_offset_t     end;
+       boolean_t       syncio;
+       boolean_t       invalidate;
+{
+       register vm_map_entry_t current;
+       vm_map_entry_t entry;
+       vm_size_t size;
+       vm_object_t object;
+       vm_offset_t offset;
+
+       vm_map_lock_read(map);
+       VM_MAP_RANGE_CHECK(map, start, end);
+       if (!vm_map_lookup_entry(map, start, &entry)) {
+               vm_map_unlock_read(map);
+               return(KERN_INVALID_ADDRESS);
+       }
+
+       /*
+        * Make a first pass to check for holes.
+        */
+       for (current = entry; current->start < end; current = current->next) {
+               if (current->is_sub_map) {
+                       vm_map_unlock_read(map);
+                       return(KERN_INVALID_ARGUMENT);
+               }
+               if (end > current->end &&
+                   (current->next == &map->header ||
+                    current->end != current->next->start)) {
+                       vm_map_unlock_read(map);
+                       return(KERN_INVALID_ADDRESS);
+               }
+       }
+
+       /*
+        * Make a second pass, cleaning/uncaching pages from the indicated
+        * objects as we go.
+        */
+       for (current = entry; current->start < end; current = current->next) {
+               offset = current->offset + (start - current->start);
+               size = (end <= current->end ? end : current->end) - start;
+               if (current->is_a_map) {
+                       register vm_map_t smap;
+                       vm_map_entry_t tentry;
+                       vm_size_t tsize;
+
+                       smap = current->object.share_map;
+                       vm_map_lock_read(smap);
+                       (void) vm_map_lookup_entry(smap, offset, &tentry);
+                       tsize = tentry->end - offset;
+                       if (tsize < size)
+                               size = tsize;
+                       object = tentry->object.vm_object;
+                       offset = tentry->offset + (offset - tentry->start);
+                       vm_object_lock(object);
+                       vm_map_unlock_read(smap);
+               } else {
+                       object = current->object.vm_object;
+                       vm_object_lock(object);
+               }
+               /*
+                * Flush pages if writing is allowed.
+                * XXX should we continue on an error?
+                */
+               if ((current->protection & VM_PROT_WRITE) &&
+                   !vm_object_page_clean(object, offset, offset+size,
+                                         syncio, FALSE)) {
+                       vm_object_unlock(object);
+                       vm_map_unlock_read(map);
+                       return(KERN_FAILURE);
+               }
+               if (invalidate)
+                       vm_object_page_remove(object, offset, offset+size);
+               vm_object_unlock(object);
+               start += size;
+       }
+
+       vm_map_unlock_read(map);
+       return(KERN_SUCCESS);
+}
+
 /*
  *     vm_map_entry_unwire:    [ internal use only ]
  *
 /*
  *     vm_map_entry_unwire:    [ internal use only ]
  *
@@ -1201,7 +1433,8 @@ vm_map_pageable(map, start, end, new_pageable)
  *     The map in question should be locked.
  *     [This is the reason for this routine's existence.]
  */
  *     The map in question should be locked.
  *     [This is the reason for this routine's existence.]
  */
-void vm_map_entry_unwire(map, entry)
+void
+vm_map_entry_unwire(map, entry)
        vm_map_t                map;
        register vm_map_entry_t entry;
 {
        vm_map_t                map;
        register vm_map_entry_t entry;
 {
@@ -1214,7 +1447,8 @@ void vm_map_entry_unwire(map, entry)
  *
  *     Deallocate the given entry from the target map.
  */            
  *
  *     Deallocate the given entry from the target map.
  */            
-void vm_map_entry_delete(map, entry)
+void
+vm_map_entry_delete(map, entry)
        register vm_map_t       map;
        register vm_map_entry_t entry;
 {
        register vm_map_t       map;
        register vm_map_entry_t entry;
 {
@@ -1241,6 +1475,7 @@ void vm_map_entry_delete(map, entry)
  *     When called with a sharing map, removes pages from
  *     that region from all physical maps.
  */
  *     When called with a sharing map, removes pages from
  *     that region from all physical maps.
  */
+int
 vm_map_delete(map, start, end)
        register vm_map_t       map;
        vm_offset_t             start;
 vm_map_delete(map, start, end)
        register vm_map_t       map;
        vm_offset_t             start;
@@ -1336,6 +1571,7 @@ vm_map_delete(map, start, end)
  *     Remove the given address range from the target map.
  *     This is the exported form of vm_map_delete.
  */
  *     Remove the given address range from the target map.
  *     This is the exported form of vm_map_delete.
  */
+int
 vm_map_remove(map, start, end)
        register vm_map_t       map;
        register vm_offset_t    start;
 vm_map_remove(map, start, end)
        register vm_map_t       map;
        register vm_offset_t    start;
@@ -1358,7 +1594,8 @@ vm_map_remove(map, start, end)
  *     privilege on the entire address region given.
  *     The entire region must be allocated.
  */
  *     privilege on the entire address region given.
  *     The entire region must be allocated.
  */
-boolean_t vm_map_check_protection(map, start, end, protection)
+boolean_t
+vm_map_check_protection(map, start, end, protection)
        register vm_map_t       map;
        register vm_offset_t    start;
        register vm_offset_t    end;
        register vm_map_t       map;
        register vm_offset_t    start;
        register vm_offset_t    end;
@@ -1408,7 +1645,8 @@ boolean_t vm_map_check_protection(map, start, end, protection)
  *     Copies the contents of the source entry to the destination
  *     entry.  The entries *must* be aligned properly.
  */
  *     Copies the contents of the source entry to the destination
  *     entry.  The entries *must* be aligned properly.
  */
-void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
+void
+vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
        vm_map_t                src_map, dst_map;
        register vm_map_entry_t src_entry, dst_entry;
 {
        vm_map_t                src_map, dst_map;
        register vm_map_entry_t src_entry, dst_entry;
 {
@@ -1417,8 +1655,8 @@ void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
        if (src_entry->is_sub_map || dst_entry->is_sub_map)
                return;
 
        if (src_entry->is_sub_map || dst_entry->is_sub_map)
                return;
 
-       if (dst_entry->object.vm_object != VM_OBJECT_NULL &&
-           !dst_entry->object.vm_object->internal)
+       if (dst_entry->object.vm_object != NULL &&
+           (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)
                printf("vm_map_copy_entry: copying over permanent data!\n");
 
        /*
                printf("vm_map_copy_entry: copying over permanent data!\n");
 
        /*
@@ -1545,6 +1783,7 @@ void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
  *     map to make copies.  This also reduces map
  *     fragmentation.]
  */
  *     map to make copies.  This also reduces map
  *     fragmentation.]
  */
+int
 vm_map_copy(dst_map, src_map,
                          dst_addr, len, src_addr,
                          dst_alloc, src_destroy)
 vm_map_copy(dst_map, src_map,
                          dst_addr, len, src_addr,
                          dst_alloc, src_destroy)
@@ -1603,7 +1842,7 @@ vm_map_copy(dst_map, src_map,
        if (src_map == dst_map) {
                vm_map_lock(src_map);
        }
        if (src_map == dst_map) {
                vm_map_lock(src_map);
        }
-       else if ((int) src_map < (int) dst_map) {
+       else if ((long) src_map < (long) dst_map) {
                vm_map_lock(src_map);
                vm_map_lock(dst_map);
        } else {
                vm_map_lock(src_map);
                vm_map_lock(dst_map);
        } else {
@@ -1630,7 +1869,7 @@ vm_map_copy(dst_map, src_map,
 
                if (dst_alloc) {
                        /* XXX Consider making this a vm_map_find instead */
 
                if (dst_alloc) {
                        /* XXX Consider making this a vm_map_find instead */
-                       if ((result = vm_map_insert(dst_map, VM_OBJECT_NULL,
+                       if ((result = vm_map_insert(dst_map, NULL,
                                        (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
                                goto Return;
                }
                                        (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
                                goto Return;
                }
@@ -1732,7 +1971,7 @@ vm_map_copy(dst_map, src_map,
                        else {
                                new_src_map = src_map;
                                new_src_start = src_entry->start;
                        else {
                                new_src_map = src_map;
                                new_src_start = src_entry->start;
-                               lock_set_recursive(&src_map->lock);
+                               vm_map_set_recursive(&src_map->lock);
                        }
 
                        if (dst_entry->is_a_map) {
                        }
 
                        if (dst_entry->is_a_map) {
@@ -1760,7 +1999,7 @@ vm_map_copy(dst_map, src_map,
                                                        new_dst_start,
                                                        new_dst_end);
                                        (void) vm_map_insert(new_dst_map,
                                                        new_dst_start,
                                                        new_dst_end);
                                        (void) vm_map_insert(new_dst_map,
-                                                       VM_OBJECT_NULL,
+                                                       NULL,
                                                        (vm_offset_t) 0,
                                                        new_dst_start,
                                                        new_dst_end);
                                                        (vm_offset_t) 0,
                                                        new_dst_start,
                                                        new_dst_end);
@@ -1770,7 +2009,7 @@ vm_map_copy(dst_map, src_map,
                        else {
                                new_dst_map = dst_map;
                                new_dst_start = dst_entry->start;
                        else {
                                new_dst_map = dst_map;
                                new_dst_start = dst_entry->start;
-                               lock_set_recursive(&dst_map->lock);
+                               vm_map_set_recursive(&dst_map->lock);
                        }
 
                        /*
                        }
 
                        /*
@@ -1782,9 +2021,9 @@ vm_map_copy(dst_map, src_map,
                                FALSE, FALSE);
 
                        if (dst_map == new_dst_map)
                                FALSE, FALSE);
 
                        if (dst_map == new_dst_map)
-                               lock_clear_recursive(&dst_map->lock);
+                               vm_map_clear_recursive(&dst_map->lock);
                        if (src_map == new_src_map)
                        if (src_map == new_src_map)
-                               lock_clear_recursive(&src_map->lock);
+                               vm_map_clear_recursive(&src_map->lock);
                }
 
                /*
                }
 
                /*
@@ -1832,17 +2071,20 @@ vm_map_copy(dst_map, src_map,
 }
 
 /*
 }
 
 /*
- *     vm_map_fork:
+ * vmspace_fork:
+ * Create a new process vmspace structure and vm_map
+ * based on those of an existing process.  The new map
+ * is based on the old map, according to the inheritance
+ * values on the regions in that map.
  *
  *
- *     Create and return a new map based on the old
- *     map, according to the inheritance values on the
- *     regions in that map.
- *
- *     The source map must not be locked.
+ * The source map must not be locked.
  */
  */
-vm_map_t vm_map_fork(old_map)
-       vm_map_t        old_map;
+struct vmspace *
+vmspace_fork(vm1)
+       register struct vmspace *vm1;
 {
 {
+       register struct vmspace *vm2;
+       vm_map_t        old_map = &vm1->vm_map;
        vm_map_t        new_map;
        vm_map_entry_t  old_entry;
        vm_map_entry_t  new_entry;
        vm_map_t        new_map;
        vm_map_entry_t  old_entry;
        vm_map_entry_t  new_entry;
@@ -1850,11 +2092,12 @@ vm_map_t vm_map_fork(old_map)
 
        vm_map_lock(old_map);
 
 
        vm_map_lock(old_map);
 
-       new_pmap = pmap_create((vm_size_t) 0);
-       new_map = vm_map_create(new_pmap,
-                       old_map->min_offset,
-                       old_map->max_offset,
-                       old_map->entries_pageable);
+       vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
+           old_map->entries_pageable);
+       bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
+           (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
+       new_pmap = &vm2->vm_pmap;               /* XXX */
+       new_map = &vm2->vm_map;                 /* XXX */
 
        old_entry = old_map->header.next;
 
 
        old_entry = old_map->header.next;
 
@@ -1879,7 +2122,7 @@ vm_map_t vm_map_fork(old_map)
                                 *      Create a new sharing map
                                 */
                                 
                                 *      Create a new sharing map
                                 */
                                 
-                               new_share_map = vm_map_create(PMAP_NULL,
+                               new_share_map = vm_map_create(NULL,
                                                        old_entry->start,
                                                        old_entry->end,
                                                        TRUE);
                                                        old_entry->start,
                                                        old_entry->end,
                                                        TRUE);
@@ -1893,6 +2136,7 @@ vm_map_t vm_map_fork(old_map)
                                new_share_entry =
                                        vm_map_entry_create(new_share_map);
                                *new_share_entry = *old_entry;
                                new_share_entry =
                                        vm_map_entry_create(new_share_map);
                                *new_share_entry = *old_entry;
+                               new_share_entry->wired_count = 0;
 
                                /*
                                 *      Insert the entry into the new sharing
 
                                /*
                                 *      Insert the entry into the new sharing
@@ -1919,6 +2163,7 @@ vm_map_t vm_map_fork(old_map)
 
                        new_entry = vm_map_entry_create(new_map);
                        *new_entry = *old_entry;
 
                        new_entry = vm_map_entry_create(new_map);
                        *new_entry = *old_entry;
+                       new_entry->wired_count = 0;
                        vm_map_reference(new_entry->object.share_map);
 
                        /*
                        vm_map_reference(new_entry->object.share_map);
 
                        /*
@@ -1948,7 +2193,7 @@ vm_map_t vm_map_fork(old_map)
                        new_entry = vm_map_entry_create(new_map);
                        *new_entry = *old_entry;
                        new_entry->wired_count = 0;
                        new_entry = vm_map_entry_create(new_map);
                        *new_entry = *old_entry;
                        new_entry->wired_count = 0;
-                       new_entry->object.vm_object = VM_OBJECT_NULL;
+                       new_entry->object.vm_object = NULL;
                        new_entry->is_a_map = FALSE;
                        vm_map_entry_link(new_map, new_map->header.prev,
                                                        new_entry);
                        new_entry->is_a_map = FALSE;
                        vm_map_entry_link(new_map, new_map->header.prev,
                                                        new_entry);
@@ -1977,7 +2222,7 @@ vm_map_t vm_map_fork(old_map)
        new_map->size = old_map->size;
        vm_map_unlock(old_map);
 
        new_map->size = old_map->size;
        vm_map_unlock(old_map);
 
-       return(new_map);
+       return(vm2);
 }
 
 /*
 }
 
 /*
@@ -2002,6 +2247,7 @@ vm_map_t vm_map_fork(old_map)
  *     copying operations, although the data referenced will
  *     remain the same.
  */
  *     copying operations, although the data referenced will
  *     remain the same.
  */
+int
 vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                                object, offset, out_prot, wired, single_use)
        vm_map_t                *var_map;       /* IN/OUT */
 vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                                object, offset, out_prot, wired, single_use)
        vm_map_t                *var_map;       /* IN/OUT */
@@ -2146,7 +2392,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                         *      share map to the new object.
                         */
 
                         *      share map to the new object.
                         */
 
-                       if (lock_read_to_write(&share_map->lock)) {
+                       if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+                                   (void *)0, LOCKPID)) {
                                if (share_map != map)
                                        vm_map_unlock_read(map);
                                goto RetryLookup;
                                if (share_map != map)
                                        vm_map_unlock_read(map);
                                goto RetryLookup;
@@ -2159,7 +2406,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                                
                        entry->needs_copy = FALSE;
                        
                                
                        entry->needs_copy = FALSE;
                        
-                       lock_write_to_read(&share_map->lock);
+                       lockmgr(&share_map->lock, LK_DOWNGRADE,
+                               (void *)0, LOCKPID);
                }
                else {
                        /*
                }
                else {
                        /*
@@ -2174,9 +2422,10 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
        /*
         *      Create an object if necessary.
         */
        /*
         *      Create an object if necessary.
         */
-       if (entry->object.vm_object == VM_OBJECT_NULL) {
+       if (entry->object.vm_object == NULL) {
 
 
-               if (lock_read_to_write(&share_map->lock)) {
+               if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+                               (void *)0, LOCKPID)) {
                        if (share_map != map)
                                vm_map_unlock_read(map);
                        goto RetryLookup;
                        if (share_map != map)
                                vm_map_unlock_read(map);
                        goto RetryLookup;
@@ -2185,7 +2434,7 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                entry->object.vm_object = vm_object_allocate(
                                        (vm_size_t)(entry->end - entry->start));
                entry->offset = 0;
                entry->object.vm_object = vm_object_allocate(
                                        (vm_size_t)(entry->end - entry->start));
                entry->offset = 0;
-               lock_write_to_read(&share_map->lock);
+               lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
        }
 
        /*
        }
 
        /*
@@ -2221,7 +2470,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
  *     (according to the handle returned by that lookup).
  */
 
  *     (according to the handle returned by that lookup).
  */
 
-void vm_map_lookup_done(map, entry)
+void
+vm_map_lookup_done(map, entry)
        register vm_map_t       map;
        vm_map_entry_t          entry;
 {
        register vm_map_t       map;
        vm_map_entry_t          entry;
 {
@@ -2251,7 +2501,8 @@ void vm_map_lookup_done(map, entry)
  *             at allocation time because the adjacent entry
  *             is often wired down.
  */
  *             at allocation time because the adjacent entry
  *             is often wired down.
  */
-void vm_map_simplify(map, start)
+void
+vm_map_simplify(map, start)
        vm_map_t        map;
        vm_offset_t     start;
 {
        vm_map_t        map;
        vm_offset_t     start;
 {
@@ -2299,7 +2550,8 @@ void vm_map_simplify(map, start)
 /*
  *     vm_map_print:   [ debug ]
  */
 /*
  *     vm_map_print:   [ debug ]
  */
-void vm_map_print(map, full)
+void
+vm_map_print(map, full)
        register vm_map_t       map;
        boolean_t               full;
 {
        register vm_map_t       map;
        boolean_t               full;
 {