cleanup.
authorMike Hibler <hibler@ucbvax.Berkeley.EDU>
Thu, 13 Jan 1994 08:07:34 +0000 (00:07 -0800)
committerMike Hibler <hibler@ucbvax.Berkeley.EDU>
Thu, 13 Jan 1994 08:07:34 +0000 (00:07 -0800)
SCCS-vsn: sys/vm/vm_user.c 8.2

usr/src/sys/vm/vm_user.c

index 31bcc31..3a8edd7 100644 (file)
@@ -7,7 +7,7 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_user.c   8.1 (Berkeley) %G%
+ *     @(#)vm_user.c   8.2 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -141,6 +141,42 @@ svm_protect(p, uap, retval)
        rv = vm_protect(uap->map, uap->addr, uap->size, uap->setmax, uap->prot);
        return((int)rv);
 }
        rv = vm_protect(uap->map, uap->addr, uap->size, uap->setmax, uap->prot);
        return((int)rv);
 }
+
+/*
+ *     vm_inherit sets the inheritence of the specified range in the
+ *     specified map.
+ */
+int
+vm_inherit(map, start, size, new_inheritance)
+       register vm_map_t       map;
+       vm_offset_t             start;
+       vm_size_t               size;
+       vm_inherit_t            new_inheritance;
+{
+       if (map == NULL)
+               return(KERN_INVALID_ARGUMENT);
+
+       return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance));
+}
+
+/*
+ *     vm_protect sets the protection of the specified range in the
+ *     specified map.
+ */
+
+int
+vm_protect(map, start, size, set_maximum, new_protection)
+       register vm_map_t       map;
+       vm_offset_t             start;
+       vm_size_t               size;
+       boolean_t               set_maximum;
+       vm_prot_t               new_protection;
+{
+       if (map == NULL)
+               return(KERN_INVALID_ARGUMENT);
+
+       return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum));
+}
 #endif
 
 /*
 #endif
 
 /*
@@ -169,8 +205,7 @@ vm_allocate(map, addr, size, anywhere)
                *addr = trunc_page(*addr);
        size = round_page(size);
 
                *addr = trunc_page(*addr);
        size = round_page(size);
 
-       result = vm_map_find(map, NULL, (vm_offset_t) 0, addr,
-                       size, anywhere);
+       result = vm_map_find(map, NULL, (vm_offset_t) 0, addr, size, anywhere);
 
        return(result);
 }
 
        return(result);
 }
@@ -195,37 +230,57 @@ vm_deallocate(map, start, size)
 }
 
 /*
 }
 
 /*
- *     vm_inherit sets the inheritence of the specified range in the
- *     specified map.
+ * Similar to vm_allocate but assigns an explicit pager.
  */
 int
  */
 int
-vm_inherit(map, start, size, new_inheritance)
+vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
        register vm_map_t       map;
        register vm_map_t       map;
-       vm_offset_t             start;
-       vm_size_t               size;
-       vm_inherit_t            new_inheritance;
+       register vm_offset_t    *addr;
+       register vm_size_t      size;
+       boolean_t               anywhere;
+       vm_pager_t              pager;
+       vm_offset_t             poffset;
+       boolean_t               internal;
 {
 {
+       register vm_object_t    object;
+       register int            result;
+
        if (map == NULL)
                return(KERN_INVALID_ARGUMENT);
 
        if (map == NULL)
                return(KERN_INVALID_ARGUMENT);
 
-       return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance));
-}
-
-/*
- *     vm_protect sets the protection of the specified range in the
- *     specified map.
- */
+       *addr = trunc_page(*addr);
+       size = round_page(size);
 
 
-int
-vm_protect(map, start, size, set_maximum, new_protection)
-       register vm_map_t       map;
-       vm_offset_t             start;
-       vm_size_t               size;
-       boolean_t               set_maximum;
-       vm_prot_t               new_protection;
-{
-       if (map == NULL)
-               return(KERN_INVALID_ARGUMENT);
+       /*
+        *      Lookup the pager/paging-space in the object cache.
+        *      If it's not there, then create a new object and cache
+        *      it.
+        */
+       object = vm_object_lookup(pager);
+       cnt.v_lookups++;
+       if (object == NULL) {
+               object = vm_object_allocate(size);
+               /*
+                * From Mike Hibler: "unnamed anonymous objects should never
+                * be on the hash list ... For now you can just change
+                * vm_allocate_with_pager to not do vm_object_enter if this
+                * is an internal object ..."
+                */
+               if (!internal)
+                       vm_object_enter(object, pager);
+       } else
+               cnt.v_hits++;
+       if (internal)
+               object->flags |= OBJ_INTERNAL;
+       else {
+               object->flags &= ~OBJ_INTERNAL;
+               cnt.v_nzfod -= atop(size);
+       }
 
 
-       return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum));
+       result = vm_map_find(map, object, poffset, addr, size, anywhere);
+       if (result != KERN_SUCCESS)
+               vm_object_deallocate(object);
+       else if (pager != NULL)
+               vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
+       return(result);
 }
 }