lock_init => lockinit
[unix-history] / usr / src / sys / vm / vm_map.c
index 9193182..b2daa99 100644 (file)
@@ -7,7 +7,7 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_map.c    8.2 (Berkeley) %G%
+ *     @(#)vm_map.c    8.7 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -46,7 +46,6 @@
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
-#include <vm/vm_object.h>
 
 /*
  *     Virtual memory maps provide for the mapping, protection,
 
 /*
  *     Virtual memory maps provide for the mapping, protection,
@@ -114,7 +113,8 @@ vm_map_t    kmap_free;
 static void    _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 static void    _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 
 static void    _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 static void    _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
 
-void vm_map_startup()
+void
+vm_map_startup()
 {
        register int i;
        register vm_map_entry_t mep;
 {
        register int i;
        register vm_map_entry_t mep;
@@ -192,7 +192,8 @@ vmspace_free(vm)
  *     the given physical map structure, and having
  *     the given lower and upper address bounds.
  */
  *     the given physical map structure, and having
  *     the given lower and upper address bounds.
  */
-vm_map_t vm_map_create(pmap, min, max, pageable)
+vm_map_t
+vm_map_create(pmap, min, max, pageable)
        pmap_t          pmap;
        vm_offset_t     min, max;
        boolean_t       pageable;
        pmap_t          pmap;
        vm_offset_t     min, max;
        boolean_t       pageable;
@@ -202,9 +203,9 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
 
        if (kmem_map == NULL) {
                result = kmap_free;
 
        if (kmem_map == NULL) {
                result = kmap_free;
-               kmap_free = (vm_map_t) result->header.next;
                if (result == NULL)
                        panic("vm_map_create: out of maps");
                if (result == NULL)
                        panic("vm_map_create: out of maps");
+               kmap_free = (vm_map_t) result->header.next;
        } else
                MALLOC(result, vm_map_t, sizeof(struct vm_map),
                       M_VMMAP, M_WAITOK);
        } else
                MALLOC(result, vm_map_t, sizeof(struct vm_map),
                       M_VMMAP, M_WAITOK);
@@ -236,7 +237,7 @@ vm_map_init(map, min, max, pageable)
        map->first_free = &map->header;
        map->hint = &map->header;
        map->timestamp = 0;
        map->first_free = &map->header;
        map->hint = &map->header;
        map->timestamp = 0;
-       lock_init(&map->lock, TRUE);
+       lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
        simple_lock_init(&map->ref_lock);
        simple_lock_init(&map->hint_lock);
 }
        simple_lock_init(&map->ref_lock);
        simple_lock_init(&map->hint_lock);
 }
@@ -247,19 +248,28 @@ vm_map_init(map, min, max, pageable)
  *     Allocates a VM map entry for insertion.
  *     No entry fields are filled in.  This routine is
  */
  *     Allocates a VM map entry for insertion.
  *     No entry fields are filled in.  This routine is
  */
-vm_map_entry_t vm_map_entry_create(map)
+vm_map_entry_t
+vm_map_entry_create(map)
        vm_map_t        map;
 {
        vm_map_entry_t  entry;
        vm_map_t        map;
 {
        vm_map_entry_t  entry;
+#ifdef DEBUG
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
+       boolean_t               isspecial;
 
 
-       if (map == kernel_map || map == kmem_map || map == mb_map ||
-           map == pager_map) {
-               if (entry = kentry_free)
-                       kentry_free = kentry_free->next;
-       } else
+       isspecial = (map == kernel_map || map == kmem_map ||
+                    map == mb_map || map == pager_map);
+       if (isspecial && map->entries_pageable ||
+           !isspecial && !map->entries_pageable)
+               panic("vm_map_entry_create: bogus map");
+#endif
+       if (map->entries_pageable) {
                MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
                       M_VMMAPENT, M_WAITOK);
                MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
                       M_VMMAPENT, M_WAITOK);
+       } else {
+               if (entry = kentry_free)
+                       kentry_free = kentry_free->next;
+       }
        if (entry == NULL)
                panic("vm_map_entry_create: out of map entries");
 
        if (entry == NULL)
                panic("vm_map_entry_create: out of map entries");
 
@@ -271,18 +281,27 @@ vm_map_entry_t vm_map_entry_create(map)
  *
  *     Inverse of vm_map_entry_create.
  */
  *
  *     Inverse of vm_map_entry_create.
  */
-void vm_map_entry_dispose(map, entry)
+void
+vm_map_entry_dispose(map, entry)
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
+#ifdef DEBUG
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
        extern vm_map_t         kernel_map, kmem_map, mb_map, pager_map;
+       boolean_t               isspecial;
 
 
-       if (map == kernel_map || map == kmem_map || map == mb_map ||
-           map == pager_map) {
+       isspecial = (map == kernel_map || map == kmem_map ||
+                    map == mb_map || map == pager_map);
+       if (isspecial && map->entries_pageable ||
+           !isspecial && !map->entries_pageable)
+               panic("vm_map_entry_dispose: bogus map");
+#endif
+       if (map->entries_pageable) {
+               FREE(entry, M_VMMAPENT);
+       } else {
                entry->next = kentry_free;
                kentry_free = entry;
                entry->next = kentry_free;
                kentry_free = entry;
-       } else
-               FREE(entry, M_VMMAPENT);
+       }
 }
 
 /*
 }
 
 /*
@@ -311,7 +330,8 @@ void vm_map_entry_dispose(map, entry)
  *     Creates another valid reference to the given map.
  *
  */
  *     Creates another valid reference to the given map.
  *
  */
-void vm_map_reference(map)
+void
+vm_map_reference(map)
        register vm_map_t       map;
 {
        if (map == NULL)
        register vm_map_t       map;
 {
        if (map == NULL)
@@ -329,7 +349,8 @@ void vm_map_reference(map)
  *     destroying it if no references remain.
  *     The map should not be locked.
  */
  *     destroying it if no references remain.
  *     The map should not be locked.
  */
-void vm_map_deallocate(map)
+void
+vm_map_deallocate(map)
        register vm_map_t       map;
 {
        register int            c;
        register vm_map_t       map;
 {
        register int            c;
@@ -360,7 +381,7 @@ void vm_map_deallocate(map)
 }
 
 /*
 }
 
 /*
- *     vm_map_insert:  [ internal use only ]
+ *     vm_map_insert:
  *
  *     Inserts the given whole VM object into the target
  *     map at the specified address range.  The object's
  *
  *     Inserts the given whole VM object into the target
  *     map at the specified address range.  The object's
@@ -504,7 +525,8 @@ vm_map_insert(map, object, offset, start, end)
  *     result indicates whether the address is
  *     actually contained in the map.
  */
  *     result indicates whether the address is
  *     actually contained in the map.
  */
-boolean_t vm_map_lookup_entry(map, address, entry)
+boolean_t
+vm_map_lookup_entry(map, address, entry)
        register vm_map_t       map;
        register vm_offset_t    address;
        vm_map_entry_t          *entry;         /* OUT */
        register vm_map_t       map;
        register vm_offset_t    address;
        vm_map_entry_t          *entry;         /* OUT */
@@ -672,7 +694,8 @@ vm_map_find(map, object, offset, addr, length, find_space)
  *             removing extra sharing maps
  *             [XXX maybe later] merging with a neighbor
  */
  *             removing extra sharing maps
  *             [XXX maybe later] merging with a neighbor
  */
-void vm_map_simplify_entry(map, entry)
+void
+vm_map_simplify_entry(map, entry)
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
        vm_map_t        map;
        vm_map_entry_t  entry;
 {
@@ -746,7 +769,8 @@ void vm_map_simplify_entry(map, entry)
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
-static void _vm_map_clip_start(map, entry, start)
+static void
+_vm_map_clip_start(map, entry, start)
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    start;
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    start;
@@ -800,7 +824,8 @@ static void _vm_map_clip_start(map, entry, start)
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
  *     This routine is called only when it is known that
  *     the entry must be split.
  */
-static void _vm_map_clip_end(map, entry, end)
+static void
+_vm_map_clip_end(map, entry, end)
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    end;
        register vm_map_t       map;
        register vm_map_entry_t entry;
        register vm_offset_t    end;
@@ -1140,7 +1165,7 @@ vm_map_pageable(map, start, end, new_pageable)
                 *      If a region becomes completely unwired,
                 *      unwire its physical pages and mappings.
                 */
                 *      If a region becomes completely unwired,
                 *      unwire its physical pages and mappings.
                 */
-               lock_set_recursive(&map->lock);
+               vm_map_set_recursive(&map->lock);
 
                entry = start_entry;
                while ((entry != &map->header) && (entry->start < end)) {
 
                entry = start_entry;
                while ((entry != &map->header) && (entry->start < end)) {
@@ -1152,7 +1177,7 @@ vm_map_pageable(map, start, end, new_pageable)
 
                    entry = entry->next;
                }
 
                    entry = entry->next;
                }
-               lock_clear_recursive(&map->lock);
+               vm_map_clear_recursive(&map->lock);
        }
 
        else {
        }
 
        else {
@@ -1186,9 +1211,6 @@ vm_map_pageable(map, start, end, new_pageable)
                 *      Pass 1.
                 */
                while ((entry != &map->header) && (entry->start < end)) {
                 *      Pass 1.
                 */
                while ((entry != &map->header) && (entry->start < end)) {
-#if 0
-                   vm_map_clip_end(map, entry, end);
-#endif
                    if (entry->wired_count == 0) {
 
                        /*
                    if (entry->wired_count == 0) {
 
                        /*
@@ -1264,8 +1286,8 @@ vm_map_pageable(map, start, end, new_pageable)
                    vm_map_unlock(map);         /* trust me ... */
                }
                else {
                    vm_map_unlock(map);         /* trust me ... */
                }
                else {
-                   lock_set_recursive(&map->lock);
-                   lock_write_to_read(&map->lock);
+                   vm_map_set_recursive(&map->lock);
+                   lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
                }
 
                rv = 0;
                }
 
                rv = 0;
@@ -1296,7 +1318,7 @@ vm_map_pageable(map, start, end, new_pageable)
                    vm_map_lock(map);
                }
                else {
                    vm_map_lock(map);
                }
                else {
-                   lock_clear_recursive(&map->lock);
+                   vm_map_clear_recursive(&map->lock);
                }
                if (rv) {
                    vm_map_unlock(map);
                }
                if (rv) {
                    vm_map_unlock(map);
@@ -1310,6 +1332,99 @@ vm_map_pageable(map, start, end, new_pageable)
        return(KERN_SUCCESS);
 }
 
        return(KERN_SUCCESS);
 }
 
+/*
+ * vm_map_clean
+ *
+ * Push any dirty cached pages in the address range to their pager.
+ * If syncio is TRUE, dirty pages are written synchronously.
+ * If invalidate is TRUE, any cached pages are freed as well.
+ *
+ * Returns an error if any part of the specified range is not mapped.
+ */
+int
+vm_map_clean(map, start, end, syncio, invalidate)
+       vm_map_t        map;
+       vm_offset_t     start;
+       vm_offset_t     end;
+       boolean_t       syncio;
+       boolean_t       invalidate;
+{
+       register vm_map_entry_t current;
+       vm_map_entry_t entry;
+       vm_size_t size;
+       vm_object_t object;
+       vm_offset_t offset;
+
+       vm_map_lock_read(map);
+       VM_MAP_RANGE_CHECK(map, start, end);
+       if (!vm_map_lookup_entry(map, start, &entry)) {
+               vm_map_unlock_read(map);
+               return(KERN_INVALID_ADDRESS);
+       }
+
+       /*
+        * Make a first pass to check for holes.
+        */
+       for (current = entry; current->start < end; current = current->next) {
+               if (current->is_sub_map) {
+                       vm_map_unlock_read(map);
+                       return(KERN_INVALID_ARGUMENT);
+               }
+               if (end > current->end &&
+                   (current->next == &map->header ||
+                    current->end != current->next->start)) {
+                       vm_map_unlock_read(map);
+                       return(KERN_INVALID_ADDRESS);
+               }
+       }
+
+       /*
+        * Make a second pass, cleaning/uncaching pages from the indicated
+        * objects as we go.
+        */
+       for (current = entry; current->start < end; current = current->next) {
+               offset = current->offset + (start - current->start);
+               size = (end <= current->end ? end : current->end) - start;
+               if (current->is_a_map) {
+                       register vm_map_t smap;
+                       vm_map_entry_t tentry;
+                       vm_size_t tsize;
+
+                       smap = current->object.share_map;
+                       vm_map_lock_read(smap);
+                       (void) vm_map_lookup_entry(smap, offset, &tentry);
+                       tsize = tentry->end - offset;
+                       if (tsize < size)
+                               size = tsize;
+                       object = tentry->object.vm_object;
+                       offset = tentry->offset + (offset - tentry->start);
+                       vm_object_lock(object);
+                       vm_map_unlock_read(smap);
+               } else {
+                       object = current->object.vm_object;
+                       vm_object_lock(object);
+               }
+               /*
+                * Flush pages if writing is allowed.
+                * XXX should we continue on an error?
+                */
+               if ((current->protection & VM_PROT_WRITE) &&
+                   !vm_object_page_clean(object, offset, offset+size,
+                                         syncio, FALSE)) {
+                       vm_object_unlock(object);
+                       vm_map_unlock_read(map);
+                       return(KERN_FAILURE);
+               }
+               if (invalidate)
+                       vm_object_page_remove(object, offset, offset+size);
+               vm_object_unlock(object);
+               start += size;
+       }
+
+       vm_map_unlock_read(map);
+       return(KERN_SUCCESS);
+}
+
 /*
  *     vm_map_entry_unwire:    [ internal use only ]
  *
 /*
  *     vm_map_entry_unwire:    [ internal use only ]
  *
@@ -1318,7 +1433,8 @@ vm_map_pageable(map, start, end, new_pageable)
  *     The map in question should be locked.
  *     [This is the reason for this routine's existence.]
  */
  *     The map in question should be locked.
  *     [This is the reason for this routine's existence.]
  */
-void vm_map_entry_unwire(map, entry)
+void
+vm_map_entry_unwire(map, entry)
        vm_map_t                map;
        register vm_map_entry_t entry;
 {
        vm_map_t                map;
        register vm_map_entry_t entry;
 {
@@ -1331,7 +1447,8 @@ void vm_map_entry_unwire(map, entry)
  *
  *     Deallocate the given entry from the target map.
  */            
  *
  *     Deallocate the given entry from the target map.
  */            
-void vm_map_entry_delete(map, entry)
+void
+vm_map_entry_delete(map, entry)
        register vm_map_t       map;
        register vm_map_entry_t entry;
 {
        register vm_map_t       map;
        register vm_map_entry_t entry;
 {
@@ -1477,7 +1594,8 @@ vm_map_remove(map, start, end)
  *     privilege on the entire address region given.
  *     The entire region must be allocated.
  */
  *     privilege on the entire address region given.
  *     The entire region must be allocated.
  */
-boolean_t vm_map_check_protection(map, start, end, protection)
+boolean_t
+vm_map_check_protection(map, start, end, protection)
        register vm_map_t       map;
        register vm_offset_t    start;
        register vm_offset_t    end;
        register vm_map_t       map;
        register vm_offset_t    start;
        register vm_offset_t    end;
@@ -1527,7 +1645,8 @@ boolean_t vm_map_check_protection(map, start, end, protection)
  *     Copies the contents of the source entry to the destination
  *     entry.  The entries *must* be aligned properly.
  */
  *     Copies the contents of the source entry to the destination
  *     entry.  The entries *must* be aligned properly.
  */
-void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
+void
+vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
        vm_map_t                src_map, dst_map;
        register vm_map_entry_t src_entry, dst_entry;
 {
        vm_map_t                src_map, dst_map;
        register vm_map_entry_t src_entry, dst_entry;
 {
@@ -1723,7 +1842,7 @@ vm_map_copy(dst_map, src_map,
        if (src_map == dst_map) {
                vm_map_lock(src_map);
        }
        if (src_map == dst_map) {
                vm_map_lock(src_map);
        }
-       else if ((int) src_map < (int) dst_map) {
+       else if ((long) src_map < (long) dst_map) {
                vm_map_lock(src_map);
                vm_map_lock(dst_map);
        } else {
                vm_map_lock(src_map);
                vm_map_lock(dst_map);
        } else {
@@ -1852,7 +1971,7 @@ vm_map_copy(dst_map, src_map,
                        else {
                                new_src_map = src_map;
                                new_src_start = src_entry->start;
                        else {
                                new_src_map = src_map;
                                new_src_start = src_entry->start;
-                               lock_set_recursive(&src_map->lock);
+                               vm_map_set_recursive(&src_map->lock);
                        }
 
                        if (dst_entry->is_a_map) {
                        }
 
                        if (dst_entry->is_a_map) {
@@ -1890,7 +2009,7 @@ vm_map_copy(dst_map, src_map,
                        else {
                                new_dst_map = dst_map;
                                new_dst_start = dst_entry->start;
                        else {
                                new_dst_map = dst_map;
                                new_dst_start = dst_entry->start;
-                               lock_set_recursive(&dst_map->lock);
+                               vm_map_set_recursive(&dst_map->lock);
                        }
 
                        /*
                        }
 
                        /*
@@ -1902,9 +2021,9 @@ vm_map_copy(dst_map, src_map,
                                FALSE, FALSE);
 
                        if (dst_map == new_dst_map)
                                FALSE, FALSE);
 
                        if (dst_map == new_dst_map)
-                               lock_clear_recursive(&dst_map->lock);
+                               vm_map_clear_recursive(&dst_map->lock);
                        if (src_map == new_src_map)
                        if (src_map == new_src_map)
-                               lock_clear_recursive(&src_map->lock);
+                               vm_map_clear_recursive(&src_map->lock);
                }
 
                /*
                }
 
                /*
@@ -2273,7 +2392,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                         *      share map to the new object.
                         */
 
                         *      share map to the new object.
                         */
 
-                       if (lock_read_to_write(&share_map->lock)) {
+                       if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+                                   (void *)0, LOCKPID)) {
                                if (share_map != map)
                                        vm_map_unlock_read(map);
                                goto RetryLookup;
                                if (share_map != map)
                                        vm_map_unlock_read(map);
                                goto RetryLookup;
@@ -2286,7 +2406,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                                
                        entry->needs_copy = FALSE;
                        
                                
                        entry->needs_copy = FALSE;
                        
-                       lock_write_to_read(&share_map->lock);
+                       lockmgr(&share_map->lock, LK_DOWNGRADE,
+                               (void *)0, LOCKPID);
                }
                else {
                        /*
                }
                else {
                        /*
@@ -2303,7 +2424,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
         */
        if (entry->object.vm_object == NULL) {
 
         */
        if (entry->object.vm_object == NULL) {
 
-               if (lock_read_to_write(&share_map->lock)) {
+               if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+                               (void *)0, LOCKPID)) {
                        if (share_map != map)
                                vm_map_unlock_read(map);
                        goto RetryLookup;
                        if (share_map != map)
                                vm_map_unlock_read(map);
                        goto RetryLookup;
@@ -2312,7 +2434,7 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
                entry->object.vm_object = vm_object_allocate(
                                        (vm_size_t)(entry->end - entry->start));
                entry->offset = 0;
                entry->object.vm_object = vm_object_allocate(
                                        (vm_size_t)(entry->end - entry->start));
                entry->offset = 0;
-               lock_write_to_read(&share_map->lock);
+               lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
        }
 
        /*
        }
 
        /*
@@ -2348,7 +2470,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
  *     (according to the handle returned by that lookup).
  */
 
  *     (according to the handle returned by that lookup).
  */
 
-void vm_map_lookup_done(map, entry)
+void
+vm_map_lookup_done(map, entry)
        register vm_map_t       map;
        vm_map_entry_t          entry;
 {
        register vm_map_t       map;
        vm_map_entry_t          entry;
 {
@@ -2378,7 +2501,8 @@ void vm_map_lookup_done(map, entry)
  *             at allocation time because the adjacent entry
  *             is often wired down.
  */
  *             at allocation time because the adjacent entry
  *             is often wired down.
  */
-void vm_map_simplify(map, start)
+void
+vm_map_simplify(map, start)
        vm_map_t        map;
        vm_offset_t     start;
 {
        vm_map_t        map;
        vm_offset_t     start;
 {
@@ -2426,7 +2550,8 @@ void vm_map_simplify(map, start)
 /*
  *     vm_map_print:   [ debug ]
  */
 /*
  *     vm_map_print:   [ debug ]
  */
-void vm_map_print(map, full)
+void
+vm_map_print(map, full)
        register vm_map_t       map;
        boolean_t               full;
 {
        register vm_map_t       map;
        boolean_t               full;
 {