+/*
+ * An iteration of the clock pointer (hand) around the loop.
+ * Look at the page at hand. If it is a
+ * locked (for physical i/o e.g.), system (u., page table)
+ * or free, then leave it alone.
+ * Otherwise, if we are running the front hand,
+ * invalidate the page for simulation of the reference bit.
+ * If the proc is over maxrss, we take it.
+ * If running the back hand, check whether the page
+ * has been reclaimed. If not, free the page,
+ * pushing it to disk first if necessary.
+ */
+checkpage(hand, whichhand)
+ int hand, whichhand;
+{
+ register struct proc *rp;
+ register struct text *xp;
+ register struct cmap *c;
+ register struct pte *pte;
+ swblk_t daddr;
+ unsigned v;
+ int klsize;
+
+top:
+ /*
+ * Find a process and text pointer for the
+ * page, and a virtual page number in either the
+ * process or the text image.
+ */
+ c = &cmap[hand];
+ if (c->c_lock || c->c_free)
+ return (0);
+ switch (c->c_type) {
+
+ case CSYS:
+ return (0);
+
+ case CTEXT:
+ xp = &text[c->c_ndx];
+ rp = xp->x_caddr;
+ v = tptov(rp, c->c_page);
+ pte = tptopte(rp, c->c_page);
+ break;
+
+ case CDATA:
+ case CSTACK:
+ rp = &proc[c->c_ndx];
+ while (rp->p_flag & SNOVM)
+ rp = rp->p_xlink;
+ xp = rp->p_textp;
+ if (c->c_type == CDATA) {
+ v = dptov(rp, c->c_page);
+ pte = dptopte(rp, c->c_page);
+ } else {
+ v = sptov(rp, c->c_page);
+ pte = sptopte(rp, c->c_page);
+ }
+ break;
+ }
+
+ if (pte->pg_pfnum != cmtopg(hand))
+ panic("bad c_page");
+
+ /*
+ * If page is valid; make invalid but reclaimable.
+ * If this pte is not valid, then it must be reclaimable
+ * and we can add it to the free list.
+ */
+ if (pte->pg_v) {
+ if (whichhand == BACK)
+ return(0);
+ pte->pg_v = 0;
+ if (anycl(pte, pg_m))
+ pte->pg_m = 1;
+ distcl(pte);
+ if (c->c_type == CTEXT)
+ distpte(xp, (unsigned)vtotp(rp, v), pte);
+ if ((rp->p_flag & (SSEQL|SUANOM)) == 0 &&
+ rp->p_rssize <= rp->p_maxrss)
+ return (0);
+ }
+ if (c->c_type != CTEXT) {
+ /*
+ * Guarantee a minimal investment in data
+ * space for jobs in balance set.
+ */
+ if (rp->p_rssize < saferss - rp->p_slptime)
+ return (0);
+ }
+
+ /*
+ * If the page is currently dirty, we
+ * have to arrange to have it cleaned before it
+ * can be freed. We mark it clean immediately.
+ * If it is reclaimed while being pushed, then modified
+ * again, we are assured of the correct order of
+ * writes because we lock the page during the write.
+ * This guarantees that a swap() of this process (and
+ * thus this page), initiated in parallel, will,
+ * in fact, push the page after us.
+ *
+ * The most general worst case here would be for
+ * a reclaim, a modify and a swapout to occur
+ * all before the single page transfer completes.
+ */
+ if (dirtycl(pte)) {
+ /*
+ * If the process is being swapped out
+ * or about to exit, do not bother with its
+ * dirty pages
+ */
+ if (rp->p_flag & (SLOCK|SWEXIT))
+ return (0);
+ /*
+ * Limit pushes to avoid saturating
+ * pageout device.
+ */
+ if (pushes > maxpgio / RATETOSCHEDPAGING)
+ return (0);
+ pushes++;
+
+ /*
+ * Now carefully make sure that there will
+ * be a header available for the push so that
+ * we will not block waiting for a header in
+ * swap(). The reason this is important is
+ * that we (proc[2]) are the one who cleans
+ * dirty swap headers and we could otherwise
+ * deadlock waiting for ourselves to clean
+ * swap headers. The sleep here on &proc[2]
+ * is actually (effectively) a sleep on both
+ * ourselves and &bswlist, and this is known
+ * to swdone and swap in vm_swp.c. That is,
+ * &proc[2] will be awakened both when dirty
+ * headers show up and also to get the pageout
+ * daemon moving.
+ */
+loop2:
+ (void) splbio();
+ if (bclnlist != NULL) {
+ (void) spl0();
+ cleanup();
+ goto loop2;
+ }
+ if (bswlist.av_forw == NULL) {
+ bswlist.b_flags |= B_WANTED;
+ sleep((caddr_t)&proc[2], PSWP+2);
+ (void) spl0();
+ /*
+ * Page disposition may have changed
+ * since process may have exec'ed,
+ * forked, exited or just about
+ * anything else... try this page
+ * frame again, from the top.
+ */
+ goto top;
+ }
+ (void) spl0();
+
+ MLOCK(c);
+ uaccess(rp, Pushmap, &pushutl);
+ /*
+ * Now committed to pushing the page...
+ */
+ pte->pg_m = 0;
+ distcl(pte);
+ if (c->c_type == CTEXT) {
+ xp->x_poip++;
+ distpte(xp, (unsigned)vtotp(rp, v), pte);
+ } else
+ rp->p_poip++;
+ v = kluster(rp, v, pte, B_WRITE, &klsize, klout, (daddr_t)0);
+ if (klsize == 0)
+ panic("pageout klsize");
+ daddr = vtod(rp, v, &pushutl.u_dmap, &pushutl.u_smap);
+ (void)swap(rp, daddr, ptob(v), klsize * ctob(CLSIZE),
+ B_WRITE, B_DIRTY, swapdev, pte->pg_pfnum);
+ /*
+ * The cleaning of this page will be
+ * completed later, in cleanup() called
+ * (synchronously) by us (proc[2]). In
+ * the meantime, the page frame is locked
+ * so no havoc can result.
+ */
+ return (1); /* well, it'll be free soon */
+
+ }
+ /*
+ * Decrement the resident set size of the current
+ * text object/process, and put the page in the
+ * free list. Note that we don't give memfree the
+ * pte as its argument, since we don't want to destroy
+ * the pte. If it hasn't already been discarded
+ * it may yet have a chance to be reclaimed from
+ * the free list.
+ */
+ if (c->c_gone == 0)
+ if (c->c_type == CTEXT)
+ xp->x_rssize -= CLSIZE;
+ else
+ rp->p_rssize -= CLSIZE;
+ memfree(pte, CLSIZE, 0);
+ cnt.v_dfree += CLSIZE;
+ return (1); /* freed a page! */
+}
+