cleanup function prototypes
[unix-history] / usr / src / sys / vm / vm_page.h
index 2b9f02c..f3a5ebb 100644 (file)
@@ -7,7 +7,7 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_page.h   7.3 (Berkeley) %G%
+ *     @(#)vm_page.h   7.7 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -108,8 +108,6 @@ struct vm_page {
        vm_prot_t       unlock_request; /* Outstanding unlock request */
 };
 
        vm_prot_t       unlock_request; /* Outstanding unlock request */
 };
 
-typedef struct vm_page *vm_page_t;
-
 #if    VM_PAGE_DEBUG
 #define        VM_PAGE_CHECK(mem) { \
                if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
 #if    VM_PAGE_DEBUG
 #define        VM_PAGE_CHECK(mem) { \
                if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
@@ -121,7 +119,7 @@ typedef struct vm_page      *vm_page_t;
 #define        VM_PAGE_CHECK(mem)
 #endif VM_PAGE_DEBUG
 
 #define        VM_PAGE_CHECK(mem)
 #endif VM_PAGE_DEBUG
 
-#ifdef KERNEL
+#ifdef KERNEL
 /*
  *     Each pageable resident page falls into one of three lists:
  *
 /*
  *     Each pageable resident page falls into one of three lists:
  *
@@ -159,25 +157,6 @@ vm_offset_t        first_phys_addr;        /* physical address for first_page */
 extern
 vm_offset_t    last_phys_addr;         /* physical address for last_page */
 
 extern
 vm_offset_t    last_phys_addr;         /* physical address for last_page */
 
-extern
-int    vm_page_free_count;     /* How many pages are free? */
-extern
-int    vm_page_active_count;   /* How many pages are active? */
-extern
-int    vm_page_inactive_count; /* How many pages are inactive? */
-extern
-int    vm_page_wire_count;     /* How many pages are wired? */
-extern
-int    vm_page_free_target;    /* How many do we want free? */
-extern
-int    vm_page_free_min;       /* When to wakeup pageout */
-extern
-int    vm_page_inactive_target;/* How many do we want inactive? */
-extern
-int    vm_page_free_reserved;  /* How many pages reserved to do pageout */
-extern
-int    vm_page_laundry_count;  /* How many pages being laundered? */
-
 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
 
 #define IS_VM_PHYSADDR(pa) \
 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
 
 #define IS_VM_PHYSADDR(pa) \
@@ -189,26 +168,8 @@ int        vm_page_laundry_count;  /* How many pages being laundered? */
 extern
 simple_lock_data_t     vm_page_queue_lock;     /* lock on active and inactive
                                                   page queues */
 extern
 simple_lock_data_t     vm_page_queue_lock;     /* lock on active and inactive
                                                   page queues */
-extern
+extern                                         /* lock on free page queue */
 simple_lock_data_t     vm_page_queue_free_lock;
 simple_lock_data_t     vm_page_queue_free_lock;
-                                               /* lock on free page queue */
-vm_offset_t    vm_page_startup();
-vm_page_t      vm_page_lookup();
-vm_page_t      vm_page_alloc();
-void           vm_page_init();
-void           vm_page_free();
-void           vm_page_activate();
-void           vm_page_deactivate();
-void           vm_page_rename();
-void           vm_page_replace();
-
-boolean_t      vm_page_zero_fill();
-void           vm_page_copy();
-
-void           vm_page_wire();
-void           vm_page_unwire();
-
-void           vm_set_page_size();
 
 /*
  *     Functions implemented as macros
 
 /*
  *     Functions implemented as macros
@@ -231,5 +192,44 @@ void               vm_set_page_size();
 #define        vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
 
 #define vm_page_set_modified(m)        { (m)->clean = FALSE; }
 #define        vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
 
 #define vm_page_set_modified(m)        { (m)->clean = FALSE; }
-#endif KERNEL
-#endif _VM_PAGE_
+
+#ifdef DEBUG
+#define        VM_PAGE_DEBUG_INIT(m) ((m)->pagerowned = 0, (m)->ptpage = 0)
+#else
+#define        VM_PAGE_DEBUG_INIT(m)
+#endif
+
+#define        VM_PAGE_INIT(mem, object, offset) { \
+       (mem)->busy = TRUE; \
+       (mem)->tabled = FALSE; \
+       vm_page_insert((mem), (object), (offset)); \
+       (mem)->absent = FALSE; \
+       (mem)->fictitious = FALSE; \
+       (mem)->page_lock = VM_PROT_NONE; \
+       (mem)->unlock_request = VM_PROT_NONE; \
+       (mem)->laundry = FALSE; \
+       (mem)->active = FALSE; \
+       (mem)->inactive = FALSE; \
+       (mem)->wire_count = 0; \
+       (mem)->clean = TRUE; \
+       (mem)->copy_on_write = FALSE; \
+       (mem)->fake = TRUE; \
+       VM_PAGE_DEBUG_INIT(mem); \
+}
+
+void            vm_page_activate __P((vm_page_t));
+vm_page_t       vm_page_alloc __P((vm_object_t, vm_offset_t));
+void            vm_page_copy __P((vm_page_t, vm_page_t));
+void            vm_page_deactivate __P((vm_page_t));
+void            vm_page_free __P((vm_page_t));
+void            vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
+vm_page_t       vm_page_lookup __P((vm_object_t, vm_offset_t));
+void            vm_page_remove __P((vm_page_t));
+void            vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
+void            vm_page_startup __P((vm_offset_t *, vm_offset_t *));
+void            vm_page_unwire __P((vm_page_t));
+void            vm_page_wire __P((vm_page_t));
+boolean_t       vm_page_zero_fill __P((vm_page_t));
+
+#endif /* KERNEL */
+#endif /* !_VM_PAGE_ */