move definition of vm_inherit_t to vm.h
[unix-history] / usr / src / sys / vm / vm_page.c
index 37b2643..0c728e8 100644 (file)
@@ -7,7 +7,7 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_page.c   7.7 (Berkeley) %G%
+ *     @(#)vm_page.c   7.13 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *     Resident memory management module.
  */
 
  *     Resident memory management module.
  */
 
-#include "param.h"
+#include <sys/param.h>
+#include <sys/systm.h>
 
 
-#include "vm.h"
-#include "vm_map.h"
-#include "vm_page.h"
-#include "vm_pageout.h"
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_pageout.h>
 
 /*
  *     Associated with page of user-allocatable memory is a
 
 /*
  *     Associated with page of user-allocatable memory is a
@@ -63,6 +64,9 @@ queue_head_t  vm_page_queue_inactive;
 simple_lock_data_t     vm_page_queue_lock;
 simple_lock_data_t     vm_page_queue_free_lock;
 
 simple_lock_data_t     vm_page_queue_lock;
 simple_lock_data_t     vm_page_queue_free_lock;
 
+/* has physical page allocation been initialized? */
+boolean_t vm_page_startup_initialized;
+
 vm_page_t      vm_page_array;
 long           first_page;
 long           last_page;
 vm_page_t      vm_page_array;
 long           first_page;
 long           last_page;
@@ -78,18 +82,18 @@ int         page_shift;
  *     size.  Must be called before any use of page-size
  *     dependent functions.
  *
  *     size.  Must be called before any use of page-size
  *     dependent functions.
  *
- *     Sets page_shift and page_mask from vm_stat.page_size.
+ *     Sets page_shift and page_mask from cnt.v_page_size.
  */
 void vm_set_page_size()
 {
 
  */
 void vm_set_page_size()
 {
 
-       if (vm_stat.page_size == 0)
-               vm_stat.page_size = DEFAULT_PAGE_SIZE;
-       page_mask = vm_stat.page_size - 1;
-       if ((page_mask & vm_stat.page_size) != 0)
+       if (cnt.v_page_size == 0)
+               cnt.v_page_size = DEFAULT_PAGE_SIZE;
+       page_mask = cnt.v_page_size - 1;
+       if ((page_mask & cnt.v_page_size) != 0)
                panic("vm_set_page_size: page size not a power of two");
        for (page_shift = 0; ; page_shift++)
                panic("vm_set_page_size: page size not a power of two");
        for (page_shift = 0; ; page_shift++)
-               if ((1 << page_shift) == vm_stat.page_size)
+               if ((1 << page_shift) == cnt.v_page_size)
                        break;
 }
 
                        break;
 }
 
@@ -103,19 +107,15 @@ void vm_set_page_size()
  *     for the object/offset-to-page hash table headers.
  *     Each page cell is initialized and placed on the free list.
  */
  *     for the object/offset-to-page hash table headers.
  *     Each page cell is initialized and placed on the free list.
  */
-vm_offset_t vm_page_startup(start, end, vaddr)
-       register vm_offset_t    start;
-       vm_offset_t     end;
-       register vm_offset_t    vaddr;
+void vm_page_startup(start, end)
+       vm_offset_t     *start;
+       vm_offset_t     *end;
 {
 {
-       register vm_offset_t    mapped;
        register vm_page_t      m;
        register queue_t        bucket;
        vm_size_t               npages;
        register vm_page_t      m;
        register queue_t        bucket;
        vm_size_t               npages;
-       register vm_offset_t    new_start;
        int                     i;
        vm_offset_t             pa;
        int                     i;
        vm_offset_t             pa;
-
        extern  vm_offset_t     kentry_data;
        extern  vm_size_t       kentry_data_size;
 
        extern  vm_offset_t     kentry_data;
        extern  vm_size_t       kentry_data_size;
 
@@ -137,7 +137,7 @@ vm_offset_t vm_page_startup(start, end, vaddr)
        queue_init(&vm_page_queue_inactive);
 
        /*
        queue_init(&vm_page_queue_inactive);
 
        /*
-        *      Allocate (and initialize) the hash table buckets.
+        *      Calculate the number of hash table buckets.
         *
         *      The number of buckets MUST BE a power of 2, and
         *      the actual value is the next power of 2 greater
         *
         *      The number of buckets MUST BE a power of 2, and
         *      the actual value is the next power of 2 greater
@@ -147,27 +147,20 @@ vm_offset_t vm_page_startup(start, end, vaddr)
         *              This computation can be tweaked if desired.
         */
 
         *              This computation can be tweaked if desired.
         */
 
-       vm_page_buckets = (queue_t) vaddr;
-       bucket = vm_page_buckets;
        if (vm_page_bucket_count == 0) {
                vm_page_bucket_count = 1;
        if (vm_page_bucket_count == 0) {
                vm_page_bucket_count = 1;
-               while (vm_page_bucket_count < atop(end - start))
+               while (vm_page_bucket_count < atop(*end - *start))
                        vm_page_bucket_count <<= 1;
        }
 
        vm_page_hash_mask = vm_page_bucket_count - 1;
 
        /*
                        vm_page_bucket_count <<= 1;
        }
 
        vm_page_hash_mask = vm_page_bucket_count - 1;
 
        /*
-        *      Validate these addresses.
+        *      Allocate (and initialize) the hash table buckets.
         */
         */
-
-       new_start = round_page(((queue_t)start) + vm_page_bucket_count);
-       mapped = vaddr;
-       vaddr = pmap_map(mapped, start, new_start,
-                       VM_PROT_READ|VM_PROT_WRITE);
-       start = new_start;
-       blkclr((caddr_t) mapped, vaddr - mapped);
-       mapped = vaddr;
+       vm_page_buckets = (queue_t) pmap_bootstrap_alloc(vm_page_bucket_count
+               * sizeof(struct queue_entry));
+       bucket = vm_page_buckets;
 
        for (i = vm_page_bucket_count; i--;) {
                queue_init(bucket);
 
        for (i = vm_page_bucket_count; i--;) {
                queue_init(bucket);
@@ -177,10 +170,10 @@ vm_offset_t vm_page_startup(start, end, vaddr)
        simple_lock_init(&bucket_lock);
 
        /*
        simple_lock_init(&bucket_lock);
 
        /*
-        *      round (or truncate) the addresses to our page size.
+        *      Truncate the remainder of physical memory to our page size.
         */
 
         */
 
-       end = trunc_page(end);
+       *end = trunc_page(*end);
 
        /*
         *      Pre-allocate maps and map entries that cannot be dynamically
 
        /*
         *      Pre-allocate maps and map entries that cannot be dynamically
@@ -196,19 +189,7 @@ vm_offset_t vm_page_startup(start, end, vaddr)
 
        kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
                           MAX_KMAPENT * sizeof(struct vm_map_entry);
 
        kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
                           MAX_KMAPENT * sizeof(struct vm_map_entry);
-       kentry_data_size = round_page(kentry_data_size);
-       kentry_data = (vm_offset_t) vaddr;
-       vaddr += kentry_data_size;
-
-       /*
-        *      Validate these zone addresses.
-        */
-
-       new_start = start + (vaddr - mapped);
-       pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
-       blkclr((caddr_t) mapped, (vaddr - mapped));
-       mapped = vaddr;
-       start = new_start;
+       kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
 
        /*
         *      Compute the number of pages of memory that will be
 
        /*
         *      Compute the number of pages of memory that will be
@@ -216,16 +197,15 @@ vm_offset_t vm_page_startup(start, end, vaddr)
         *      of a page structure per page).
         */
 
         *      of a page structure per page).
         */
 
-       vm_stat.free_count = npages =
-               (end - start)/(PAGE_SIZE + sizeof(struct vm_page));
+       cnt.v_free_count = npages =
+               (*end - *start)/(PAGE_SIZE + sizeof(struct vm_page));
 
        /*
 
        /*
-        *      Initialize the mem entry structures now, and
-        *      put them in the free queue.
+        *      Record the extent of physical memory that the
+        *      virtual memory system manages.
         */
 
         */
 
-       m = vm_page_array = (vm_page_t) vaddr;
-       first_page = start;
+       first_page = *start;
        first_page += npages*sizeof(struct vm_page);
        first_page = atop(round_page(first_page));
        last_page  = first_page + npages - 1;
        first_page += npages*sizeof(struct vm_page);
        first_page = atop(round_page(first_page));
        last_page  = first_page + npages - 1;
@@ -234,24 +214,17 @@ vm_offset_t vm_page_startup(start, end, vaddr)
        last_phys_addr  = ptoa(last_page) + PAGE_MASK;
 
 
        last_phys_addr  = ptoa(last_page) + PAGE_MASK;
 
 
-#ifdef i386
-       /* XXX - waiting for pmap_bootstrap_malloc() (or somebody like him) */
-       if (first_phys_addr > 0xa0000)
-               panic("vm_page_startup: fell into the hole");
-#endif
        /*
        /*
-        *      Validate these addresses.
+        *      Allocate and clear the mem entry structures.
         */
 
         */
 
-       new_start = start + (round_page(m + npages) - mapped);
-       mapped = pmap_map(mapped, start, new_start,
-                       VM_PROT_READ|VM_PROT_WRITE);
-       start = new_start;
+       m = vm_page_array = (vm_page_t)
+               pmap_bootstrap_alloc(npages * sizeof(struct vm_page));
 
        /*
 
        /*
-        *      Clear all of the page structures
+        *      Initialize the mem entry structures now, and
+        *      put them in the free queue.
         */
         */
-       blkclr((caddr_t)m, npages * sizeof(*m));
 
        pa = first_phys_addr;
        while (npages--) {
 
        pa = first_phys_addr;
        while (npages--) {
@@ -269,7 +242,7 @@ vm_offset_t vm_page_startup(start, end, vaddr)
                        /* perhaps iomem needs it's own type, or dev pager? */
                        m->fictitious = 1;
                        m->busy = TRUE;
                        /* perhaps iomem needs it's own type, or dev pager? */
                        m->fictitious = 1;
                        m->busy = TRUE;
-                       vm_stat.free_count--;
+                       cnt.v_free_count--;
                }
 #else /* i386 */
                queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
                }
 #else /* i386 */
                queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
@@ -284,7 +257,8 @@ vm_offset_t vm_page_startup(start, end, vaddr)
         */
        simple_lock_init(&vm_pages_needed_lock);
 
         */
        simple_lock_init(&vm_pages_needed_lock);
 
-       return(mapped);
+       /* from now on, pmap_bootstrap_alloc can't be used */
+       vm_page_startup_initialized = TRUE;
 }
 
 /*
 }
 
 /*
@@ -354,6 +328,7 @@ void vm_page_insert(mem, object, offset)
 
 /*
  *     vm_page_remove:         [ internal use only ]
 
 /*
  *     vm_page_remove:         [ internal use only ]
+ *                             NOTE: used by device pager as well -wfj
  *
  *     Removes the given mem entry from the object/offset-page
  *     table and the object page list.
  *
  *     Removes the given mem entry from the object/offset-page
  *     table and the object page list.
@@ -463,52 +438,6 @@ void vm_page_rename(mem, new_object, new_offset)
        vm_page_unlock_queues();
 }
 
        vm_page_unlock_queues();
 }
 
-void           vm_page_init(mem, object, offset)
-       vm_page_t       mem;
-       vm_object_t     object;
-       vm_offset_t     offset;
-{
-#ifdef DEBUG
-#define        vm_page_init(mem, object, offset)  {\
-               (mem)->busy = TRUE; \
-               (mem)->tabled = FALSE; \
-               vm_page_insert((mem), (object), (offset)); \
-               (mem)->absent = FALSE; \
-               (mem)->fictitious = FALSE; \
-               (mem)->page_lock = VM_PROT_NONE; \
-               (mem)->unlock_request = VM_PROT_NONE; \
-               (mem)->laundry = FALSE; \
-               (mem)->active = FALSE; \
-               (mem)->inactive = FALSE; \
-               (mem)->wire_count = 0; \
-               (mem)->clean = TRUE; \
-               (mem)->copy_on_write = FALSE; \
-               (mem)->fake = TRUE; \
-               (mem)->pagerowned = FALSE; \
-               (mem)->ptpage = FALSE; \
-       }
-#else
-#define        vm_page_init(mem, object, offset)  {\
-               (mem)->busy = TRUE; \
-               (mem)->tabled = FALSE; \
-               vm_page_insert((mem), (object), (offset)); \
-               (mem)->absent = FALSE; \
-               (mem)->fictitious = FALSE; \
-               (mem)->page_lock = VM_PROT_NONE; \
-               (mem)->unlock_request = VM_PROT_NONE; \
-               (mem)->laundry = FALSE; \
-               (mem)->active = FALSE; \
-               (mem)->inactive = FALSE; \
-               (mem)->wire_count = 0; \
-               (mem)->clean = TRUE; \
-               (mem)->copy_on_write = FALSE; \
-               (mem)->fake = TRUE; \
-       }
-#endif
-
-       vm_page_init(mem, object, offset);
-}
-
 /*
  *     vm_page_alloc:
  *
 /*
  *     vm_page_alloc:
  *
@@ -534,11 +463,11 @@ vm_page_t vm_page_alloc(object, offset)
 
        queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
 
 
        queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
 
-       vm_stat.free_count--;
+       cnt.v_free_count--;
        simple_unlock(&vm_page_queue_free_lock);
        splx(spl);
 
        simple_unlock(&vm_page_queue_free_lock);
        splx(spl);
 
-       vm_page_init(mem, object, offset);
+       VM_PAGE_INIT(mem, object, offset);
 
        /*
         *      Decide if we should poke the pageout daemon.
 
        /*
         *      Decide if we should poke the pageout daemon.
@@ -551,11 +480,11 @@ vm_page_t vm_page_alloc(object, offset)
         *      it doesn't really matter.
         */
 
         *      it doesn't really matter.
         */
 
-       if ((vm_stat.free_count < vm_stat.free_min) ||
-                       ((vm_stat.free_count < vm_stat.free_target) &&
-                       (vm_stat.inactive_count < vm_stat.inactive_target)))
+       if (cnt.v_free_count < cnt.v_free_min ||
+           (cnt.v_free_count < cnt.v_free_target &&
+            cnt.v_inactive_count < cnt.v_inactive_target))
                thread_wakeup((int)&vm_pages_needed);
                thread_wakeup((int)&vm_pages_needed);
-       return(mem);
+       return (mem);
 }
 
 /*
 }
 
 /*
@@ -573,13 +502,13 @@ void vm_page_free(mem)
        if (mem->active) {
                queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
                mem->active = FALSE;
        if (mem->active) {
                queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
                mem->active = FALSE;
-               vm_stat.active_count--;
+               cnt.v_active_count--;
        }
 
        if (mem->inactive) {
                queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
                mem->inactive = FALSE;
        }
 
        if (mem->inactive) {
                queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
                mem->inactive = FALSE;
-               vm_stat.inactive_count--;
+               cnt.v_inactive_count--;
        }
 
        if (!mem->fictitious) {
        }
 
        if (!mem->fictitious) {
@@ -589,7 +518,7 @@ void vm_page_free(mem)
                simple_lock(&vm_page_queue_free_lock);
                queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
 
                simple_lock(&vm_page_queue_free_lock);
                queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
 
-               vm_stat.free_count++;
+               cnt.v_free_count++;
                simple_unlock(&vm_page_queue_free_lock);
                splx(spl);
        }
                simple_unlock(&vm_page_queue_free_lock);
                splx(spl);
        }
@@ -613,16 +542,16 @@ void vm_page_wire(mem)
                if (mem->active) {
                        queue_remove(&vm_page_queue_active, mem, vm_page_t,
                                                pageq);
                if (mem->active) {
                        queue_remove(&vm_page_queue_active, mem, vm_page_t,
                                                pageq);
-                       vm_stat.active_count--;
+                       cnt.v_active_count--;
                        mem->active = FALSE;
                }
                if (mem->inactive) {
                        queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
                                                pageq);
                        mem->active = FALSE;
                }
                if (mem->inactive) {
                        queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
                                                pageq);
-                       vm_stat.inactive_count--;
+                       cnt.v_inactive_count--;
                        mem->inactive = FALSE;
                }
                        mem->inactive = FALSE;
                }
-               vm_stat.wire_count++;
+               cnt.v_wire_count++;
        }
        mem->wire_count++;
 }
        }
        mem->wire_count++;
 }
@@ -643,9 +572,9 @@ void vm_page_unwire(mem)
        mem->wire_count--;
        if (mem->wire_count == 0) {
                queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
        mem->wire_count--;
        if (mem->wire_count == 0) {
                queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
-               vm_stat.active_count++;
+               cnt.v_active_count++;
                mem->active = TRUE;
                mem->active = TRUE;
-               vm_stat.wire_count--;
+               cnt.v_wire_count--;
        }
 }
 
        }
 }
 
@@ -674,8 +603,8 @@ void vm_page_deactivate(m)
                queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
                m->active = FALSE;
                m->inactive = TRUE;
                queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
                m->active = FALSE;
                m->inactive = TRUE;
-               vm_stat.active_count--;
-               vm_stat.inactive_count++;
+               cnt.v_active_count--;
+               cnt.v_inactive_count++;
                if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
                        m->clean = FALSE;
                m->laundry = !m->clean;
                if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
                        m->clean = FALSE;
                m->laundry = !m->clean;
@@ -698,7 +627,7 @@ void vm_page_activate(m)
        if (m->inactive) {
                queue_remove(&vm_page_queue_inactive, m, vm_page_t,
                                                pageq);
        if (m->inactive) {
                queue_remove(&vm_page_queue_inactive, m, vm_page_t,
                                                pageq);
-               vm_stat.inactive_count--;
+               cnt.v_inactive_count--;
                m->inactive = FALSE;
        }
        if (m->wire_count == 0) {
                m->inactive = FALSE;
        }
        if (m->wire_count == 0) {
@@ -707,7 +636,7 @@ void vm_page_activate(m)
 
                queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
                m->active = TRUE;
 
                queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
                m->active = TRUE;
-               vm_stat.active_count++;
+               cnt.v_active_count++;
        }
 }
 
        }
 }
 
@@ -724,6 +653,7 @@ boolean_t vm_page_zero_fill(m)
 {
        VM_PAGE_CHECK(m);
 
 {
        VM_PAGE_CHECK(m);
 
+       m->clean = 0;
        pmap_zero_page(VM_PAGE_TO_PHYS(m));
        return(TRUE);
 }
        pmap_zero_page(VM_PAGE_TO_PHYS(m));
        return(TRUE);
 }
@@ -741,5 +671,6 @@ void vm_page_copy(src_m, dest_m)
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dest_m);
 
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dest_m);
 
+       dest_m->clean = 0;
        pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
 }
        pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
 }