Add extra argument to VOP_BMAP.
[unix-history] / usr / src / sys / vm / vm_fault.c
index 46dc983..6198126 100644 (file)
@@ -7,7 +7,7 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_fault.c  7.11 (Berkeley) %G%
+ *     @(#)vm_fault.c  7.15 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -214,7 +214,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                         *      If the page is being brought in,
                         *      wait for it and then retry.
                         */
                         *      If the page is being brought in,
                         *      wait for it and then retry.
                         */
-                       if (m->busy) {
+                       if (m->flags & PG_BUSY) {
 #ifdef DOTHREADS
                                int     wait_result;
 
 #ifdef DOTHREADS
                                int     wait_result;
 
@@ -235,7 +235,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
 #endif
                        }
 
 #endif
                        }
 
-                       if (m->absent)
+                       if (m->flags & PG_ABSENT)
                                panic("vm_fault: absent");
 
                        /*
                                panic("vm_fault: absent");
 
                        /*
@@ -276,18 +276,18 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                         */
 
                        vm_page_lock_queues();
                         */
 
                        vm_page_lock_queues();
-                       if (m->inactive) {
+                       if (m->flags & PG_INACTIVE) {
                                queue_remove(&vm_page_queue_inactive, m,
                                                vm_page_t, pageq);
                                queue_remove(&vm_page_queue_inactive, m,
                                                vm_page_t, pageq);
-                               m->inactive = FALSE;
+                               m->flags &= ~PG_INACTIVE;
                                cnt.v_inactive_count--;
                                cnt.v_reactivated++;
                        } 
 
                                cnt.v_inactive_count--;
                                cnt.v_reactivated++;
                        } 
 
-                       if (m->active) {
+                       if (m->flags & PG_ACTIVE) {
                                queue_remove(&vm_page_queue_active, m,
                                                vm_page_t, pageq);
                                queue_remove(&vm_page_queue_active, m,
                                                vm_page_t, pageq);
-                               m->active = FALSE;
+                               m->flags &= ~PG_ACTIVE;
                                cnt.v_active_count--;
                        }
                        vm_page_unlock_queues();
                                cnt.v_active_count--;
                        }
                        vm_page_unlock_queues();
@@ -295,8 +295,8 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                        /*
                         *      Mark page busy for other threads.
                         */
                        /*
                         *      Mark page busy for other threads.
                         */
-                       m->busy = TRUE;
-                       m->absent = FALSE;
+                       m->flags |= PG_BUSY;
+                       m->flags &= ~PG_ABSENT;
                        break;
                }
 
                        break;
                }
 
@@ -318,8 +318,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                        }
                }
 
                        }
                }
 
-               if ((object->pager != NULL) &&
-                               (!change_wiring || wired)) {
+               if (object->pager != NULL && (!change_wiring || wired)) {
                        int rv;
 
                        /*
                        int rv;
 
                        /*
@@ -333,15 +332,19 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                         *      after releasing the lock on the map.
                         */
                        UNLOCK_MAP;
                         *      after releasing the lock on the map.
                         */
                        UNLOCK_MAP;
-
                        rv = vm_pager_get(object->pager, m, TRUE);
                        rv = vm_pager_get(object->pager, m, TRUE);
-                       if (rv == VM_PAGER_OK) {
-                               /*
-                                *      Found the page.
-                                *      Leave it busy while we play with it.
-                                */
-                               vm_object_lock(object);
 
 
+                       /*
+                        *      Reaquire the object lock to preserve our
+                        *      invariant.
+                        */
+                       vm_object_lock(object);
+
+                       /*
+                        *      Found the page.
+                        *      Leave it busy while we play with it.
+                        */
+                       if (rv == VM_PAGER_OK) {
                                /*
                                 *      Relookup in case pager changed page.
                                 *      Pager is responsible for disposition
                                /*
                                 *      Relookup in case pager changed page.
                                 *      Pager is responsible for disposition
@@ -350,44 +353,37 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                                m = vm_page_lookup(object, offset);
 
                                cnt.v_pageins++;
                                m = vm_page_lookup(object, offset);
 
                                cnt.v_pageins++;
-                               m->fake = FALSE;
-                               m->clean = TRUE;
+                               m->flags &= ~PG_FAKE;
+                               m->flags |= PG_CLEAN;
                                pmap_clear_modify(VM_PAGE_TO_PHYS(m));
                                break;
                        }
 
                        /*
                                pmap_clear_modify(VM_PAGE_TO_PHYS(m));
                                break;
                        }
 
                        /*
-                        *      Remove the bogus page (which does not
-                        *      exist at this object/offset); before
-                        *      doing so, we must get back our object
-                        *      lock to preserve our invariant.
-                        *
-                        *      Also wake up any other thread that may want
-                        *      to bring in this page.
-                        *
-                        *      If this is the top-level object, we must
-                        *      leave the busy page to prevent another
-                        *      thread from rushing past us, and inserting
-                        *      the page in that object at the same time
-                        *      that we are.
-                        */
-
-                       vm_object_lock(object);
-                       /*
-                        * Data outside the range of the pager; an error
+                        * IO error or page outside the range of the pager:
+                        * cleanup and return an error.
                         */
                         */
-                       if (rv == VM_PAGER_BAD) {
+                       if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
                                FREE_PAGE(m);
                                UNLOCK_AND_DEALLOCATE;
                                return(KERN_PROTECTION_FAILURE); /* XXX */
                        }
                                FREE_PAGE(m);
                                UNLOCK_AND_DEALLOCATE;
                                return(KERN_PROTECTION_FAILURE); /* XXX */
                        }
+                       /*
+                        * rv == VM_PAGER_FAIL:
+                        *
+                        * Page does not exist at this object/offset.
+                        * Free the bogus page (waking up anyone waiting
+                        * for it) and continue on to the next object.
+                        *
+                        * If this is the top-level object, we must
+                        * leave the busy page to prevent another
+                        * thread from rushing past us, and inserting
+                        * the page in that object at the same time
+                        * that we are.
+                        */
                        if (object != first_object) {
                                FREE_PAGE(m);
                        if (object != first_object) {
                                FREE_PAGE(m);
-                               /*
-                                * XXX - we cannot just fall out at this
-                                * point, m has been freed and is invalid!
-                                */
-                               panic("vm_fault: free page"); /* XXX */
+                               /* note that `m' is not used after this */
                        }
                }
 
                        }
                }
 
@@ -423,8 +419,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
 
                        vm_page_zero_fill(m);
                        cnt.v_zfod++;
 
                        vm_page_zero_fill(m);
                        cnt.v_zfod++;
-                       m->fake = FALSE;
-                       m->absent = FALSE;
+                       m->flags &= ~(PG_FAKE | PG_ABSENT);
                        break;
                }
                else {
                        break;
                }
                else {
@@ -437,7 +432,8 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                }
        }
 
                }
        }
 
-       if (m->absent || m->active || m->inactive || !m->busy)
+       if ((m->flags & (PG_ABSENT | PG_ACTIVE | PG_INACTIVE)) ||
+           !(m->flags & PG_BUSY))
                panic("vm_fault: absent or active or inactive or not busy after main loop");
 
        /*
                panic("vm_fault: absent or active or inactive or not busy after main loop");
 
        /*
@@ -485,8 +481,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                         */
 
                        vm_page_copy(m, first_m);
                         */
 
                        vm_page_copy(m, first_m);
-                       first_m->fake = FALSE;
-                       first_m->absent = FALSE;
+                       first_m->flags &= ~(PG_FAKE | PG_ABSENT);
 
                        /*
                         *      If another map is truly sharing this
 
                        /*
                         *      If another map is truly sharing this
@@ -537,11 +532,11 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                }
                else {
                        prot &= (~VM_PROT_WRITE);
                }
                else {
                        prot &= (~VM_PROT_WRITE);
-                       m->copy_on_write = TRUE;
+                       m->flags |= PG_COPYONWRITE;
                }
        }
 
                }
        }
 
-       if (m->active || m->inactive)
+       if (m->flags & (PG_ACTIVE | PG_INACTIVE))
                panic("vm_fault: active or inactive before copy object handling");
 
        /*
                panic("vm_fault: active or inactive before copy object handling");
 
        /*
@@ -559,7 +554,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                 */
                if ((fault_type & VM_PROT_WRITE) == 0) {
                        prot &= ~VM_PROT_WRITE;
                 */
                if ((fault_type & VM_PROT_WRITE) == 0) {
                        prot &= ~VM_PROT_WRITE;
-                       m->copy_on_write = TRUE;
+                       m->flags |= PG_COPYONWRITE;
                }
                else {
                        /*
                }
                else {
                        /*
@@ -586,7 +581,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                                - copy_object->shadow_offset;
                        copy_m = vm_page_lookup(copy_object, copy_offset);
                        if (page_exists = (copy_m != NULL)) {
                                - copy_object->shadow_offset;
                        copy_m = vm_page_lookup(copy_object, copy_offset);
                        if (page_exists = (copy_m != NULL)) {
-                               if (copy_m->busy) {
+                               if (copy_m->flags & PG_BUSY) {
 #ifdef DOTHREADS
                                        int     wait_result;
 
 #ifdef DOTHREADS
                                        int     wait_result;
 
@@ -702,8 +697,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                                 *      Must copy page into copy-object.
                                 */
                                vm_page_copy(m, copy_m);
                                 *      Must copy page into copy-object.
                                 */
                                vm_page_copy(m, copy_m);
-                               copy_m->fake = FALSE;
-                               copy_m->absent = FALSE;
+                               copy_m->flags &= ~(PG_FAKE | PG_ABSENT);
 
                                /*
                                 * Things to remember:
 
                                /*
                                 * Things to remember:
@@ -718,7 +712,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                                vm_page_lock_queues();
                                pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
                                                  VM_PROT_NONE);
                                vm_page_lock_queues();
                                pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
                                                  VM_PROT_NONE);
-                               copy_m->clean = FALSE;
+                               copy_m->flags &= ~PG_CLEAN;
                                vm_page_activate(copy_m);       /* XXX */
                                vm_page_unlock_queues();
 
                                vm_page_activate(copy_m);       /* XXX */
                                vm_page_unlock_queues();
 
@@ -733,11 +727,11 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                         */
                        copy_object->ref_count--;
                        vm_object_unlock(copy_object);
                         */
                        copy_object->ref_count--;
                        vm_object_unlock(copy_object);
-                       m->copy_on_write = FALSE;
+                       m->flags &= ~PG_COPYONWRITE;
                }
        }
 
                }
        }
 
-       if (m->active || m->inactive)
+       if (m->flags & (PG_ACTIVE | PG_INACTIVE))
                panic("vm_fault: active or inactive before retrying lookup");
 
        /*
                panic("vm_fault: active or inactive before retrying lookup");
 
        /*
@@ -802,7 +796,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                 *      can't mark the page write-enabled after all.
                 */
                prot &= retry_prot;
                 *      can't mark the page write-enabled after all.
                 */
                prot &= retry_prot;
-               if (m->copy_on_write)
+               if (m->flags & PG_COPYONWRITE)
                        prot &= ~VM_PROT_WRITE;
        }
 
                        prot &= ~VM_PROT_WRITE;
        }
 
@@ -814,14 +808,14 @@ vm_fault(map, vaddr, fault_type, change_wiring)
        /* XXX This distorts the meaning of the copy_on_write bit */
 
        if (prot & VM_PROT_WRITE)
        /* XXX This distorts the meaning of the copy_on_write bit */
 
        if (prot & VM_PROT_WRITE)
-               m->copy_on_write = FALSE;
+               m->flags &= ~PG_COPYONWRITE;
 
        /*
         *      It's critically important that a wired-down page be faulted
         *      only once in each map for which it is wired.
         */
 
 
        /*
         *      It's critically important that a wired-down page be faulted
         *      only once in each map for which it is wired.
         */
 
-       if (m->active || m->inactive)
+       if (m->flags & (PG_ACTIVE | PG_INACTIVE))
                panic("vm_fault: active or inactive before pmap_enter");
 
        vm_object_unlock(object);
                panic("vm_fault: active or inactive before pmap_enter");
 
        vm_object_unlock(object);