Update to handle fragments correctly (set bsize to be fragment
[unix-history] / usr / src / sys / vm / vm_fault.c
index 3c90adf..7908ec9 100644 (file)
@@ -7,7 +7,7 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_fault.c  8.3 (Berkeley) %G%
+ *     @(#)vm_fault.c  8.6 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -88,7 +88,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
        vm_page_t               old_m;
        vm_object_t             next_object;
 
        vm_page_t               old_m;
        vm_object_t             next_object;
 
-       cnt.v_vm_faults++;              /* needs lock XXX */
+       cnt.v_faults++;         /* needs lock XXX */
 /*
  *     Recovery actions
  */
 /*
  *     Recovery actions
  */
@@ -229,6 +229,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
 #else
                                PAGE_ASSERT_WAIT(m, !change_wiring);
                                UNLOCK_THINGS;
 #else
                                PAGE_ASSERT_WAIT(m, !change_wiring);
                                UNLOCK_THINGS;
+                               cnt.v_intrans++;
                                thread_block();
                                vm_object_deallocate(first_object);
                                goto RetryFault;
                                thread_block();
                                vm_object_deallocate(first_object);
                                goto RetryFault;
@@ -294,6 +295,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                         *      after releasing the lock on the map.
                         */
                        UNLOCK_MAP;
                         *      after releasing the lock on the map.
                         */
                        UNLOCK_MAP;
+                       cnt.v_pageins++;
                        rv = vm_pager_get(object->pager, m, TRUE);
 
                        /*
                        rv = vm_pager_get(object->pager, m, TRUE);
 
                        /*
@@ -314,7 +316,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                                 */
                                m = vm_page_lookup(object, offset);
 
                                 */
                                m = vm_page_lookup(object, offset);
 
-                               cnt.v_pageins++;
+                               cnt.v_pgpgin++;
                                m->flags &= ~PG_FAKE;
                                m->flags |= PG_CLEAN;
                                pmap_clear_modify(VM_PAGE_TO_PHYS(m));
                                m->flags &= ~PG_FAKE;
                                m->flags |= PG_CLEAN;
                                pmap_clear_modify(VM_PAGE_TO_PHYS(m));
@@ -460,8 +462,23 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                        vm_page_lock_queues();
                        vm_page_activate(m);
                        vm_page_deactivate(m);
                        vm_page_lock_queues();
                        vm_page_activate(m);
                        vm_page_deactivate(m);
-                       pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
                        vm_page_unlock_queues();
                        vm_page_unlock_queues();
+                       /*
+                        * XXX gag!  The page protect has been moved out
+                        * of the page queue lock section to avoid a deadlock
+                        * in the hp300-style (recursive) pmap module.
+                        * If you were on an MP, p_p_protect might result
+                        * in a vm_map_pageable(..., TRUE) for the associated
+                        * page table page.  This would call vm_fault_unwire
+                        * which would try to lock the page queues.
+                        * Moving the call out is safe here because the
+                        * object is still locked and that will prevent
+                        * the pageout daemon from messing with this page
+                        * on the inactive list.  (It would move it back to
+                        * the active list if it were referenced but
+                        * v_p_deallocate clears the ref bit).
+                        */
+                       pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
 
                        /*
                         *      We no longer need the old page or object.
 
                        /*
                         *      We no longer need the old page or object.
@@ -493,7 +510,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
                        object->paging_in_progress++;
                }
                else {
                        object->paging_in_progress++;
                }
                else {
-                       prot &= (~VM_PROT_WRITE);
+                       prot &= ~VM_PROT_WRITE;
                        m->flags |= PG_COPYONWRITE;
                }
        }
                        m->flags |= PG_COPYONWRITE;
                }
        }
@@ -865,7 +882,8 @@ vm_fault_wire(map, start, end)
  *
  *     Unwire a range of virtual addresses in a map.
  */
  *
  *     Unwire a range of virtual addresses in a map.
  */
-void vm_fault_unwire(map, start, end)
+void
+vm_fault_unwire(map, start, end)
        vm_map_t        map;
        vm_offset_t     start, end;
 {
        vm_map_t        map;
        vm_offset_t     start, end;
 {
@@ -914,7 +932,8 @@ void vm_fault_unwire(map, start, end)
  *             entry corresponding to a main map entry that is wired down).
  */
 
  *             entry corresponding to a main map entry that is wired down).
  */
 
-void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
+void
+vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
        vm_map_t        dst_map;
        vm_map_t        src_map;
        vm_map_entry_t  dst_entry;
        vm_map_t        dst_map;
        vm_map_t        src_map;
        vm_map_entry_t  dst_entry;