lock_init => lockinit
[unix-history] / usr / src / sys / vm / vm_map.c
index 5b90af7..b2daa99 100644 (file)
@@ -1,13 +1,13 @@
 /* 
 /* 
- * Copyright (c) 1991 Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1991, 1993
+ *     The Regents of the University of California.  All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * %sccs.include.redist.c%
  *
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_map.c    7.9 (Berkeley) %G%
+ *     @(#)vm_map.c    8.7 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -46,7 +46,6 @@
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
-#include <vm/vm_object.h>
 
 /*
  *     Virtual memory maps provide for the mapping, protection,
 
 /*
  *     Virtual memory maps provide for the mapping, protection,
@@ -114,7 +113,8 @@ vm_map_t    kmap_free;
 static void    _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 static void    _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 
 static void    _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 static void    _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 
-void vm_map_startup()
+void
+vm_map_startup()
 {
        register int i;
        register vm_map_entry_t mep;
 {
        register int i;
        register vm_map_entry_t mep;
@@ -192,19 +192,20 @@ vmspace_free(vm)
  *     the given physical map structure, and having
  *     the given lower and upper address bounds.
  */
  *     the given physical map structure, and having
  *     the given lower and upper address bounds.
  */
-vm_map_t vm_map_create(pmap, min, max, pageable)
+vm_map_t
+vm_map_create(pmap, min, max, pageable)
        pmap_t          pmap;
        vm_offset_t     min, max;
        boolean_t       pageable;
 {
        register vm_map_t       result;
        pmap_t          pmap;
        vm_offset_t     min, max;
        boolean_t       pageable;
 {
        register vm_map_t       result;
-       extern vm_map_t         kernel_map, kmem_map;
+       extern vm_map_t         kmem_map;
 
        if (kmem_map == NULL) {
                result = kmap_free;
 
        if (kmem_map == NULL) {
                result = kmap_free;
-               kmap_free = (vm_map_t) result->header.next;
                if (result == NULL)
                        panic("vm_map_create: out of maps");
                if (result == NULL)
                        panic("vm_map_create: out of maps");
+               kmap_free = (vm_map_t) result->header.next;
        } else
                MALLOC(result, vm_map_t, sizeof(struct vm_map),
                       M_VMMAP, M_WAITOK);
        } else
                MALLOC(result, vm_map_t, sizeof(struct vm_map),
                       M_VMMAP, M_WAITOK);
@@ -236,7 +237,7 @@ vm_map_init(map, min, max, pageable)
        map->first_free = &map->header;
        map->hint = &map->header;
        map->timestamp = 0;
        map->first_free = &map->header;
        map->hint = &map->header;
        map->timestamp = 0;
-       lock_init(&map->lock, TRUE);
+       lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
        simple_lock_init(&map->ref_lock);
        simple_lock_init(&map->hint_lock);
 }
        simple_lock_init(&map->ref_lock);
        simple_lock_init(&map->hint_lock);
 }
@@ -247,19 +248,28 @@ vm_map_init(map, min, max, pageable)
  *     Allocates a VM map entry for insertion.
  *     No entry fields are filled in.  This routine is
  */
  *     Allocates a VM map entry for insertion.
  *     No entry fields are filled in.  This routine is
  */
-vm_map_entry_t vm_map_entry_create(map)
+vm_map_entry_t
+vm_map_entry_create(map)
        vm_map_t        map;
 {
        vm_map_entry_t  entry;
        vm_map_t        map;
 {
        vm_map_entry_t  entry;
+#ifdef DEBUG
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
+       boolean_t               isspecial;
 
 
-       if (map == kernel_map || map == kmem_map || map == mb_map ||
-           map == pager_map) {
-               if (entry = kentry_free)
-                       kentry_free = kentry_free->next;
-       } else
+       isspecial = (map == kernel_map || map == kmem_map ||
+                    map == mb_map || map == pager_map);
+       if (isspecial && map->entries_pageable ||
+           !isspecial && !map->entries_pageable)
+               panic("vm_map_entry_create: bogus map");
+#endif
+       if (map->entries_pageable) {
                MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
                       M_VMMAPENT, M_WAITOK);
                MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
                       M_VMMAPENT, M_WAITOK);
+       } else {
+               if (entry = kentry_free)
+                       kentry_free = kentry_free->next;
+       }
        if (entry == NULL)
                panic("vm_map_entry_create: out of map entries");
 
        if (entry == NULL)
                panic("vm_map_entry_create: out of map entries");
 
@@ -271,18 +281,27 @@ vm_map_entry_t vm_map_entry_create(map)
  *
  *     Inverse of vm_map_entry_create.
  */
  *
  *     Inverse of vm_map_entry_create.
  */
-void vm_map_entry_dispose(map, entry)
+void
+vm_map_entry_dispose(map, entry)
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
+#ifdef DEBUG
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
+       boolean_t               isspecial;
 
 
-       if (map == kernel_map || map == kmem_map || map == mb_map ||
-           map == pager_map) {
+       isspecial = (map == kernel_map || map == kmem_map ||
+                    map == mb_map || map == pager_map);
+       if (isspecial && map->entries_pageable ||
+           !isspecial && !map->entries_pageable)
+               panic("vm_map_entry_dispose: bogus map");
+#endif
+       if (map->entries_pageable) {
+               FREE(entry, M_VMMAPENT);
+       } else {
                entry->next = kentry_free;
                kentry_free = entry;
                entry->next = kentry_free;
                kentry_free = entry;
-       } else
-               FREE(entry, M_VMMAPENT);
+       }
 }
 
 /*
 }
 
 /*
@@ -311,7 +330,8 @@ void vm_map_entry_dispose(map, entry)
  *     Creates another valid reference to the given map.
  *
  */
  *     Creates another valid reference to the given map.
  *
  */
-void vm_map_reference(map)
+void
+vm_map_reference(map)
        register vm_map_t       map;
 {
        if (map == NULL)
        register vm_map_t       map;
 {
        if (map == NULL)
@@ -329,7 +349,8 @@ void vm_map_reference(map)
  *     destroying it if no references remain.
  *     The map should not be locked.
  */
  *     destroying it if no references remain.
  *     The map should not be locked.
  */
-void vm_map_deallocate(map)
+void
+vm_map_deallocate(map)
        register vm_map_t       map;
 {
        register int            c;
        register vm_map_t       map;
 {
        register int            c;
@@ -360,7 +381,7 @@ void vm_map_deallocate(map)
 }
 
 /*
 }
 
 /*
- *     vm_map_insert:  [ internal use only ]
+ *     vm_map_insert:
  *
  *     Inserts the given whole VM object into the target
  *     map at the specified address range.  The object's
  *
  *     Inserts the given whole VM object into the target
  *     map at the specified address range.  The object's
@@ -504,7 +525,8 @@ vm_map_insert(map, object, offset, start, end)
  *     result indicates whether the address is
  *     actually contained in the map.
  */
  *     result indicates whether the address is
  *     actually contained in the map.
  */
-boolean_t vm_map_lookup_entry(map, address, entry)
+boolean_t
+vm_map_lookup_entry(map, address, entry)
        register vm_map_t       map;
        register vm_offset_t    address;
        vm_map_entry_t          *entry;         /* OUT */
        register vm_map_t       map;
        register vm_offset_t    address;
        vm_map_entry_t          *entry;         /* OUT */
@@ -672,7 +694,8 @@ vm_map_find(map, object, offset, addr, length, find_space)
  *             removing extra sharing maps
  *             [XXX maybe later] merging with a neighbor
  */
  *             removing extra sharing maps
  *             [XXX maybe later] merging with a neighbor
  */
-void vm_map_simplify_entry(map, entry)
+void
+vm_map_simplify_entry(map, entry)
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
@@ -746,7 +769,8 @@ void vm_map_simplify_entry(map, entry)
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
-static void _vm_map_clip_start(map, entry, start)
+static void
+_vm_map_clip_start(map, entry, start)
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    start;
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    start;
@@ -800,7 +824,8 @@ static void _vm_map_clip_start(map, entry, start)
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
-static void _vm_map_clip_end(map, entry, end)
+static void
+_vm_map_clip_end(map, entry, end)
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    end;
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    end;
@@ -1087,7 +1112,7 @@ vm_map_pageable(map, start, end, new_pageable)
        register boolean_t      new_pageable;
 {
        register vm_map_entry_t entry;
        register boolean_t      new_pageable;
 {
        register vm_map_entry_t entry;
-       vm_map_entry_t          temp_entry;
+       vm_map_entry_t          start_entry;
        register vm_offset_t    failed;
        int                     rv;
 
        register vm_offset_t    failed;
        int                     rv;
 
@@ -1103,13 +1128,11 @@ vm_map_pageable(map, start, end, new_pageable)
         *      for the entire region.  We do so before making any changes.
         */
 
         *      for the entire region.  We do so before making any changes.
         */
 
-       if (vm_map_lookup_entry(map, start, &temp_entry)) {
-               entry = temp_entry;
-               vm_map_clip_start(map, entry, start);
+       if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
+               vm_map_unlock(map);
+               return(KERN_INVALID_ADDRESS);
        }
        }
-       else
-               entry = temp_entry->next;
-       temp_entry = entry;
+       entry = start_entry;
 
        /*
         *      Actions are rather different for wiring and unwiring,
 
        /*
         *      Actions are rather different for wiring and unwiring,
@@ -1118,13 +1141,19 @@ vm_map_pageable(map, start, end, new_pageable)
 
        if (new_pageable) {
 
 
        if (new_pageable) {
 
+               vm_map_clip_start(map, entry, start);
+
                /*
                 *      Unwiring.  First ensure that the range to be
                /*
                 *      Unwiring.  First ensure that the range to be
-                *      unwired is really wired down.
+                *      unwired is really wired down and that there
+                *      are no holes.
                 */
                while ((entry != &map->header) && (entry->start < end)) {
 
                 */
                while ((entry != &map->header) && (entry->start < end)) {
 
-                   if (entry->wired_count == 0) {
+                   if (entry->wired_count == 0 ||
+                       (entry->end < end &&
+                        (entry->next == &map->header ||
+                         entry->next->start > entry->end))) {
                        vm_map_unlock(map);
                        return(KERN_INVALID_ARGUMENT);
                    }
                        vm_map_unlock(map);
                        return(KERN_INVALID_ARGUMENT);
                    }
@@ -1136,9 +1165,9 @@ vm_map_pageable(map, start, end, new_pageable)
                 *      If a region becomes completely unwired,
                 *      unwire its physical pages and mappings.
                 */
                 *      If a region becomes completely unwired,
                 *      unwire its physical pages and mappings.
                 */
-               lock_set_recursive(&map->lock);
+               vm_map_set_recursive(&map->lock);
 
 
-               entry = temp_entry;
+               entry = start_entry;
                while ((entry != &map->header) && (entry->start < end)) {
                    vm_map_clip_end(map, entry, end);
 
                while ((entry != &map->header) && (entry->start < end)) {
                    vm_map_clip_end(map, entry, end);
 
@@ -1148,17 +1177,19 @@ vm_map_pageable(map, start, end, new_pageable)
 
                    entry = entry->next;
                }
 
                    entry = entry->next;
                }
-               lock_clear_recursive(&map->lock);
+               vm_map_clear_recursive(&map->lock);
        }
 
        else {
                /*
                 *      Wiring.  We must do this in two passes:
                 *
        }
 
        else {
                /*
                 *      Wiring.  We must do this in two passes:
                 *
-                *      1.  Holding the write lock, we increment the
-                *          wiring count.  For any area that is not already
-                *          wired, we create any shadow objects that need
-                *          to be created.
+                *      1.  Holding the write lock, we create any shadow
+                *          or zero-fill objects that need to be created.
+                *          Then we clip each map entry to the region to be
+                *          wired and increment its wiring count.  We
+                *          create objects before clipping the map entries
+                *          to avoid object proliferation.
                 *
                 *      2.  We downgrade to a read lock, and call
                 *          vm_fault_wire to fault in the pages for any
                 *
                 *      2.  We downgrade to a read lock, and call
                 *          vm_fault_wire to fault in the pages for any
@@ -1179,12 +1210,8 @@ vm_map_pageable(map, start, end, new_pageable)
                /*
                 *      Pass 1.
                 */
                /*
                 *      Pass 1.
                 */
-               entry = temp_entry;
                while ((entry != &map->header) && (entry->start < end)) {
                while ((entry != &map->header) && (entry->start < end)) {
-                   vm_map_clip_end(map, entry, end);
-
-                   entry->wired_count++;
-                   if (entry->wired_count == 1) {
+                   if (entry->wired_count == 0) {
 
                        /*
                         *      Perform actions of vm_map_lookup that need
 
                        /*
                         *      Perform actions of vm_map_lookup that need
@@ -1214,7 +1241,28 @@ vm_map_pageable(map, start, end, new_pageable)
                            }
                        }
                    }
                            }
                        }
                    }
+                   vm_map_clip_start(map, entry, start);
+                   vm_map_clip_end(map, entry, end);
+                   entry->wired_count++;
 
 
+                   /*
+                    * Check for holes
+                    */
+                   if (entry->end < end &&
+                       (entry->next == &map->header ||
+                        entry->next->start > entry->end)) {
+                       /*
+                        *      Found one.  Object creation actions
+                        *      do not need to be undone, but the
+                        *      wired counts need to be restored.
+                        */
+                       while (entry != &map->header && entry->end > start) {
+                           entry->wired_count--;
+                           entry = entry->prev;
+                       }
+                       vm_map_unlock(map);
+                       return(KERN_INVALID_ARGUMENT);
+                   }
                    entry = entry->next;
                }
 
                    entry = entry->next;
                }
 
@@ -1238,12 +1286,12 @@ vm_map_pageable(map, start, end, new_pageable)
                    vm_map_unlock(map);         /* trust me ... */
                }
                else {
                    vm_map_unlock(map);         /* trust me ... */
                }
                else {
-                   lock_set_recursive(&map->lock);
-                   lock_write_to_read(&map->lock);
+                   vm_map_set_recursive(&map->lock);
+                   lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
                }
 
                rv = 0;
                }
 
                rv = 0;
-               entry = temp_entry;
+               entry = start_entry;
                while (entry != &map->header && entry->start < end) {
                    /*
                     * If vm_fault_wire fails for any page we need to
                while (entry != &map->header && entry->start < end) {
                    /*
                     * If vm_fault_wire fails for any page we need to
@@ -1270,7 +1318,7 @@ vm_map_pageable(map, start, end, new_pageable)
                    vm_map_lock(map);
                }
                else {
                    vm_map_lock(map);
                }
                else {
-                   lock_clear_recursive(&map->lock);
+                   vm_map_clear_recursive(&map->lock);
                }
                if (rv) {
                    vm_map_unlock(map);
                }
                if (rv) {
                    vm_map_unlock(map);
@@ -1284,6 +1332,99 @@ vm_map_pageable(map, start, end, new_pageable)
        return(KERN_SUCCESS);
 }
 
        return(KERN_SUCCESS);
 }
 
+/*
+ * vm_map_clean
+ *
+ * Push any dirty cached pages in the address range to their pager.
+ * If syncio is TRUE, dirty pages are written synchronously.
+ * If invalidate is TRUE, any cached pages are freed as well.
+ *
+ * Returns an error if any part of the specified range is not mapped.
+ */
+int
+vm_map_clean(map, start, end, syncio, invalidate)
+       vm_map_t        map;
+       vm_offset_t     start;
+       vm_offset_t     end;
+       boolean_t       syncio;
+       boolean_t       invalidate;
+{
+       register vm_map_entry_t current;
+       vm_map_entry_t entry;
+       vm_size_t size;
+       vm_object_t object;
+       vm_offset_t offset;
+
+       vm_map_lock_read(map);
+       VM_MAP_RANGE_CHECK(map, start, end);
+       if (!vm_map_lookup_entry(map, start, &entry)) {
+               vm_map_unlock_read(map);
+               return(KERN_INVALID_ADDRESS);
+       }
+
+       /*
+        * Make a first pass to check for holes.
+        */
+       for (current = entry; current->start < end; current = current->next) {
+               if (current->is_sub_map) {
+                       vm_map_unlock_read(map);
+                       return(KERN_INVALID_ARGUMENT);
+               }
+               if (end > current->end &&
+                   (current->next == &map->header ||
+                    current->end != current->next->start)) {
+                       vm_map_unlock_read(map);
+                       return(KERN_INVALID_ADDRESS);
+               }
+       }
+
+       /*
+        * Make a second pass, cleaning/uncaching pages from the indicated
+        * objects as we go.
+        */
+       for (current = entry; current->start < end; current = current->next) {
+               offset = current->offset + (start - current->start);
+               size = (end <= current->end ? end : current->end) - start;
+               if (current->is_a_map) {
+                       register vm_map_t smap;
+                       vm_map_entry_t tentry;
+                       vm_size_t tsize;
+
+                       smap = current->object.share_map;
+                       vm_map_lock_read(smap);
+                       (void) vm_map_lookup_entry(smap, offset, &tentry);
+                       tsize = tentry->end - offset;
+                       if (tsize < size)
+                               size = tsize;
+                       object = tentry->object.vm_object;
+                       offset = tentry->offset + (offset - tentry->start);
+                       vm_object_lock(object);
+                       vm_map_unlock_read(smap);
+               } else {
+                       object = current->object.vm_object;
+                       vm_object_lock(object);
+               }
+               /*
+                * Flush pages if writing is allowed.
+                * XXX should we continue on an error?
+                */
+               if ((current->protection & VM_PROT_WRITE) &&
+                   !vm_object_page_clean(object, offset, offset+size,
+                                         syncio, FALSE)) {
+                       vm_object_unlock(object);
+                       vm_map_unlock_read(map);
+                       return(KERN_FAILURE);
+               }
+               if (invalidate)
+                       vm_object_page_remove(object, offset, offset+size);
+               vm_object_unlock(object);
+               start += size;
+       }
+
+       vm_map_unlock_read(map);
+       return(KERN_SUCCESS);
+}
+
 /*
  *     vm_map_entry_unwire:    [ internal use only ]
  *
 /*
  *     vm_map_entry_unwire:    [ internal use only ]
  *
@@ -1292,7 +1433,8 @@ vm_map_pageable(map, start, end, new_pageable)
  *     The map in question should be locked.
  *     [This is the reason for this routine's existence.]
  */
  *     The map in question should be locked.
  *     [This is the reason for this routine's existence.]
  */
-void vm_map_entry_unwire(map, entry)
+void
+vm_map_entry_unwire(map, entry)
        vm_map_t                map;
        register vm_map_entry_t entry;
 {
        vm_map_t                map;
        register vm_map_entry_t entry;
 {
@@ -1305,7 +1447,8 @@ void vm_map_entry_unwire(map, entry)
  *
  *     Deallocate the given entry from the target map.
  */            
  *
  *     Deallocate the given entry from the target map.
  */            
-void vm_map_entry_delete(map, entry)
+void
+vm_map_entry_delete(map, entry)
        register vm_map_t       map;
        register vm_map_entry_t entry;
 {
        register vm_map_t       map;
        register vm_map_entry_t entry;
 {
@@ -1451,7 +1594,8 @@ vm_map_remove(map, start, end)
  *     privilege on the entire address region given.
  *     The entire region must be allocated.
  */
  *     privilege on the entire address region given.
  *     The entire region must be allocated.
  */
-boolean_t vm_map_check_protection(map, start, end, protection)
+boolean_t
+vm_map_check_protection(map, start, end, protection)
        register vm_map_t       map;
        register vm_offset_t    start;
        register vm_offset_t    end;
        register vm_map_t       map;
        register vm_offset_t    start;
        register vm_offset_t    end;
@@ -1501,7 +1645,8 @@ boolean_t vm_map_check_protection(map, start, end, protection)
  *     Copies the contents of the source entry to the destination
  *     entry.  The entries *must* be aligned properly.
  */
  *     Copies the contents of the source entry to the destination
  *     entry.  The entries *must* be aligned properly.
  */
-void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
+void
+vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
        vm_map_t                src_map, dst_map;
        register vm_map_entry_t src_entry, dst_entry;
 {
        vm_map_t                src_map, dst_map;
        register vm_map_entry_t src_entry, dst_entry;
 {
@@ -1697,7 +1842,7 @@ vm_map_copy(dst_map, src_map,
        if (src_map == dst_map) {
                vm_map_lock(src_map);
        }
        if (src_map == dst_map) {
                vm_map_lock(src_map);
        }
-       else if ((int) src_map < (int) dst_map) {
+       else if ((long) src_map < (long) dst_map) {
                vm_map_lock(src_map);
                vm_map_lock(dst_map);
        } else {
                vm_map_lock(src_map);
                vm_map_lock(dst_map);
        } else {
@@ -1826,7 +1971,7 @@ vm_map_copy(dst_map, src_map,
                        else {
                                new_src_map = src_map;
                                new_src_start = src_entry->start;
                        else {
                                new_src_map = src_map;
                                new_src_start = src_entry->start;
-                               lock_set_recursive(&src_map->lock);
+                               vm_map_set_recursive(&src_map->lock);
                        }
 
                        if (dst_entry->is_a_map) {
                        }
 
                        if (dst_entry->is_a_map) {
@@ -1864,7 +2009,7 @@ vm_map_copy(dst_map, src_map,
                        else {
                                new_dst_map = dst_map;
                                new_dst_start = dst_entry->start;
                        else {
                                new_dst_map = dst_map;
                                new_dst_start = dst_entry->start;
-                               lock_set_recursive(&dst_map->lock);
+                               vm_map_set_recursive(&dst_map->lock);
                        }
 
                        /*
                        }
 
                        /*
@@ -1876,9 +2021,9 @@ vm_map_copy(dst_map, src_map,
                                FALSE, FALSE);
 
                        if (dst_map == new_dst_map)
                                FALSE, FALSE);
 
                        if (dst_map == new_dst_map)
-                               lock_clear_recursive(&dst_map->lock);
+                               vm_map_clear_recursive(&dst_map->lock);
                        if (src_map == new_src_map)
                        if (src_map == new_src_map)
-                               lock_clear_recursive(&src_map->lock);
+                               vm_map_clear_recursive(&src_map->lock);
                }
 
                /*
                }
 
                /*
@@ -1991,6 +2136,7 @@ vmspace_fork(vm1)
                                new_share_entry =
                                        vm_map_entry_create(new_share_map);
                                *new_share_entry = *old_entry;
                                new_share_entry =
                                        vm_map_entry_create(new_share_map);
                                *new_share_entry = *old_entry;
+                               new_share_entry->wired_count = 0;
 
                                /*
                                 *      Insert the entry into the new sharing
 
                                /*
                                 *      Insert the entry into the new sharing
@@ -2017,6 +2163,7 @@ vmspace_fork(vm1)
 
                        new_entry = vm_map_entry_create(new_map);
                        *new_entry = *old_entry;
 
                        new_entry = vm_map_entry_create(new_map);
                        *new_entry = *old_entry;
+                       new_entry->wired_count = 0;
                        vm_map_reference(new_entry->object.share_map);
 
                        /*
                        vm_map_reference(new_entry->object.share_map);
 
                        /*
@@ -2245,7 +2392,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                         *      share map to the new object.
                         */
 
                         *      share map to the new object.
                         */
 
-                       if (lock_read_to_write(&share_map->lock)) {
+                       if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+                                   (void *)0, LOCKPID)) {
                                if (share_map != map)
                                        vm_map_unlock_read(map);
                                goto RetryLookup;
                                if (share_map != map)
                                        vm_map_unlock_read(map);
                                goto RetryLookup;
@@ -2258,7 +2406,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                                
                        entry->needs_copy = FALSE;
                        
                                
                        entry->needs_copy = FALSE;
                        
-                       lock_write_to_read(&share_map->lock);
+                       lockmgr(&share_map->lock, LK_DOWNGRADE,
+                               (void *)0, LOCKPID);
                }
                else {
                        /*
                }
                else {
                        /*
@@ -2275,7 +2424,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
         */
        if (entry->object.vm_object == NULL) {
 
         */
        if (entry->object.vm_object == NULL) {
 
-               if (lock_read_to_write(&share_map->lock)) {
+               if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+                               (void *)0, LOCKPID)) {
                        if (share_map != map)
                                vm_map_unlock_read(map);
                        goto RetryLookup;
                        if (share_map != map)
                                vm_map_unlock_read(map);
                        goto RetryLookup;
@@ -2284,7 +2434,7 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                entry->object.vm_object = vm_object_allocate(
                                        (vm_size_t)(entry->end - entry->start));
                entry->offset = 0;
                entry->object.vm_object = vm_object_allocate(
                                        (vm_size_t)(entry->end - entry->start));
                entry->offset = 0;
-               lock_write_to_read(&share_map->lock);
+               lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
        }
 
        /*
        }
 
        /*
@@ -2320,7 +2470,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
  *     (according to the handle returned by that lookup).
  */
 
  *     (according to the handle returned by that lookup).
  */
 
-void vm_map_lookup_done(map, entry)
+void
+vm_map_lookup_done(map, entry)
        register vm_map_t       map;
        vm_map_entry_t          entry;
 {
        register vm_map_t       map;
        vm_map_entry_t          entry;
 {
@@ -2350,7 +2501,8 @@ void vm_map_lookup_done(map, entry)
  *             at allocation time because the adjacent entry
  *             is often wired down.
  */
  *             at allocation time because the adjacent entry
  *             is often wired down.
  */
-void vm_map_simplify(map, start)
+void
+vm_map_simplify(map, start)
        vm_map_t        map;
        vm_offset_t     start;
 {
        vm_map_t        map;
        vm_offset_t     start;
 {
@@ -2398,7 +2550,8 @@ void vm_map_simplify(map, start)
 /*
  *     vm_map_print:   [ debug ]
  */
 /*
  *     vm_map_print:   [ debug ]
  */
-void vm_map_print(map, full)
+void
+vm_map_print(map, full)
        register vm_map_t       map;
        boolean_t               full;
 {
        register vm_map_t       map;
        boolean_t               full;
 {