+int vm_pageout_pages_needed = 0; /* flag saying that the pageout daemon needs pages */
+int vm_page_pagesfreed;
+
+extern int npendingio;
+extern int hz;
+int vm_pageout_proc_limit;
+extern int nswiodone;
+
+#define MAXREF 32767
+
+/*
+ * vm_pageout_clean:
+ * cleans a vm_page
+ */
+int
+vm_pageout_clean(m, wait)
+ register vm_page_t m;
+ int wait;
+{
+ /*
+ * Clean the page and remove it from the
+ * laundry.
+ *
+ * We set the busy bit to cause
+ * potential page faults on this page to
+ * block.
+ *
+ * And we set pageout-in-progress to keep
+ * the object from disappearing during
+ * pageout. This guarantees that the
+ * page won't move from the inactive
+ * queue. (However, any other page on
+ * the inactive queue may move!)
+ */
+
+ register vm_object_t object;
+ register vm_pager_t pager;
+ int pageout_status;
+
+ object = m->object;
+ if (!object) {
+ printf("pager: object missing\n");
+ return 0;
+ }
+
+ /*
+ * Try to collapse the object before
+ * making a pager for it. We must
+ * unlock the page queues first.
+ * We try to defer the creation of a pager
+ * until all shadows are not paging. This
+ * allows vm_object_collapse to work better and
+ * helps control swap space size.
+ * (J. Dyson 11 Nov 93)
+ */
+
+ if (!object->pager &&
+ vm_page_free_count < vm_pageout_free_min)
+ return 0;
+
+ if (!object->pager &&
+ object->shadow &&
+ object->shadow->paging_in_progress)
+ return 0;
+
+ if (object->shadow) {
+ vm_offset_t offset = m->offset;
+ vm_object_collapse(object);
+ if (!vm_page_lookup(object, offset))
+ return 0;
+ }
+
+waitagain:
+ if (!wait && (m->flags & PG_BUSY)) {
+ return 0;
+ } else if (m->flags & PG_BUSY) {
+ int s = splhigh();
+ m->flags |= PG_WANTED;
+ tsleep((caddr_t)m, PVM, "clnslp", 0);
+ splx(s);
+ goto waitagain;
+ }
+
+ m->flags |= PG_BUSY;
+
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ);
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+
+ vm_stat.pageouts++;
+
+ object->paging_in_progress++;
+
+ /*
+ * If there is no pager for the page,
+ * use the default pager. If there's
+ * no place to put the page at the
+ * moment, leave it in the laundry and
+ * hope that there will be paging space
+ * later.
+ */
+
+ if ((pager = object->pager) == NULL) {
+ pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
+ object->size, VM_PROT_ALL, 0);
+ if (pager != NULL) {
+ vm_object_setpager(object, pager, 0, FALSE);
+ }
+ }
+ if ((pager && pager->pg_type == PG_SWAP) ||
+ vm_page_free_count >= vm_pageout_free_min) {
+ pageout_status = pager ?
+ vm_pager_put(pager, m, (((object == kernel_object) || wait) ? TRUE: FALSE)) :
+ VM_PAGER_FAIL;
+ } else
+ pageout_status = VM_PAGER_FAIL;
+
+ switch (pageout_status) {
+ case VM_PAGER_OK:
+ m->flags &= ~PG_LAUNDRY;
+ break;
+ case VM_PAGER_PEND:
+ m->flags &= ~PG_LAUNDRY;
+ break;
+ case VM_PAGER_BAD:
+ /*
+ * Page outside of range of object.
+ * Right now we essentially lose the
+ * changes by pretending it worked.
+ */
+ m->flags &= ~PG_LAUNDRY;
+ m->flags |= PG_CLEAN;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ break;
+ case VM_PAGER_FAIL:
+ /*
+ * If page couldn't be paged out, then
+ * reactivate the page so it doesn't
+ * clog the inactive list. (We will
+ * try paging out it again later).
+ */
+ vm_page_activate(m);
+ break;
+ case VM_PAGER_TRYAGAIN:
+ break;
+ }
+
+
+ /*
+ * If the operation is still going, leave
+ * the page busy to block all other accesses.
+ * Also, leave the paging in progress
+ * indicator set so that we don't attempt an
+ * object collapse.
+ */
+ if (pageout_status != VM_PAGER_PEND) {
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ vm_page_activate(m);
+ }
+ PAGE_WAKEUP(m);
+ if (--object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
+ }
+ return (pageout_status == VM_PAGER_PEND ||
+ pageout_status == VM_PAGER_OK) ? 1 : 0;
+}
+
+/*
+ * vm_pageout_object_deactivate_pages
+ *
+ * deactivate enough pages to satisfy the inactive target
+ * requirements or if vm_page_proc_limit is set, then
+ * deactivate all of the pages in the object and its
+ * shadows.
+ *
+ * The object and map must be locked.
+ */
+int
+vm_pageout_object_deactivate_pages(map, object, count)
+ vm_map_t map;
+ vm_object_t object;
+ int count;
+{
+ register vm_page_t p, next;
+ int rcount;
+ int s;
+ int dcount;
+
+ dcount = 0;
+/*
+ * if the object is used by more than one process and we are not enforcing
+ * RSS size, then dont deactivate the pages. Note that the check for
+ * ref_count is not really a check for more than one process, but is
+ * close enough.
+ */
+
+#if 0
+ if (object->ref_count > 1)
+ return 0;
+#endif
+ /*
+ * deactivate the pages in the objects shadow
+ */
+
+ if (object->ref_count)
+ count /= object->ref_count;
+
+ if (count == 0)
+ count = 1;
+
+ if (object->shadow)
+ dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count);
+
+ /*
+ * scan the objects entire memory queue
+ */
+ rcount = object->resident_page_count;
+ p = (vm_page_t) queue_first(&object->memq);
+ while ((rcount-- > 0) && !queue_end(&object->memq, (queue_entry_t) p) ) {
+ next = (vm_page_t) queue_next(&p->listq);
+ vm_page_lock_queues();
+ /*
+ * if a page is active, not wired and is in the processes pmap,
+ * then deactivate the page.
+ */
+ if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE &&
+ p->wire_count == 0 &&
+ !pmap_is_wired(VM_PAGE_TO_PHYS(p)) &&
+ pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
+ if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p))) {
+ vm_page_pageout_deactivate(p);
+ /*
+ * see if we are done yet
+ */
+ if (p->flags & PG_INACTIVE) {
+ if ((p->flags & PG_CLEAN) == 0)
+ vm_pageout_clean(p, 0);
+ --count;
+ ++dcount;
+ if (count <= 0 &&
+ vm_page_inactive_count > vm_page_inactive_target) {
+ vm_page_unlock_queues();
+ return dcount;
+ }
+ }
+
+ } else {
+ p->deact = 2;
+ pmap_clear_reference(VM_PAGE_TO_PHYS(p));
+ queue_remove(&object->memq, p, vm_page_t, listq);
+ queue_enter(&object->memq, p, vm_page_t, listq);
+ queue_remove(&vm_page_queue_active, p, vm_page_t, pageq);
+ queue_enter(&vm_page_queue_active, p, vm_page_t, pageq);
+ }
+ }
+ vm_page_unlock_queues();
+ p = next;
+ }
+ return dcount;
+}