-/* vm_mem.c 6.1 83/07/29 */
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved. The Berkeley software License Agreement
+ * specifies the terms and conditions for redistribution.
+ *
+ * @(#)vm_mem.c 7.7 (Berkeley) 8/23/87
+ */
+#include "param.h"
+#include "systm.h"
+#include "cmap.h"
+#include "dir.h"
+#include "user.h"
+#include "proc.h"
+#include "text.h"
+#include "vm.h"
+#include "file.h"
+#include "buf.h"
+#include "mount.h"
+#include "trace.h"
+#include "map.h"
+
+#include "../machine/cpu.h"
#include "../machine/pte.h"
-
-#include "../h/param.h"
-#include "../h/systm.h"
-#include "../h/cmap.h"
-#include "../h/dir.h"
-#include "../h/user.h"
-#include "../h/proc.h"
-#include "../h/text.h"
-#include "../h/vm.h"
-#include "../h/file.h"
-#include "../h/inode.h"
-#include "../h/buf.h"
-#include "../h/mount.h"
-#include "../h/trace.h"
-#include "../h/map.h"
-#include "../h/kernel.h"
+#include "../machine/mtpr.h"
/*
* Allocate memory, and always succeed
register struct cmap *c;
register struct pte *spte;
register int j;
- int size, pcnt, fileno;
+ int size, pcnt;
+#ifdef notdef
+ int fileno;
+#endif
if (count % CLSIZE)
panic("vmemfree");
for (j = 0; j < CLSIZE; j++)
*(int *)(pte+j) &= PG_PROT;
if (c->c_type == CTEXT)
- distpte(&text[c->c_ndx], (int)c->c_page, pte);
+ distpte(&text[c->c_ndx], c->c_page,
+ pte);
c->c_gone = 1;
goto free;
}
goto free;
#endif
if (pte->pg_fod) {
+#ifdef notdef
fileno = ((struct fpte *)pte)->pg_fileno;
if (fileno < NOFILE)
panic("vmemfree vread");
+#endif
for (j = 0; j < CLSIZE; j++)
*(int *)(pte+j) &= PG_PROT;
}
* Performed if the page being reclaimed
* is in the free list.
*/
-munlink(pf)
- unsigned pf;
+munlink(c)
+ register struct cmap *c;
{
register int next, prev;
- next = cmap[pgtocm(pf)].c_next;
- prev = cmap[pgtocm(pf)].c_prev;
+ next = c->c_next;
+ prev = c->c_prev;
cmap[prev].c_next = next;
cmap[next].c_prev = prev;
- cmap[pgtocm(pf)].c_free = 0;
+ c->c_free = 0;
if (freemem < minfree)
outofmem();
freemem -= CLSIZE;
}
zapcl(rpte, pg_pfnum) = 0;
if (c->c_type == CTEXT)
- distpte(&text[c->c_ndx], (int)c->c_page, rpte);
+ distpte(&text[c->c_ndx], c->c_page, rpte);
}
switch (type) {
}
}
-/*
- * Allocate wired-down (non-paged) pages in kernel virtual memory.
- */
-caddr_t
-wmemall(pmemall, n)
- int (*pmemall)(), n;
-{
- register int npg;
- register caddr_t va;
- register int a;
-
- npg = clrnd(btoc(n));
- a = rmalloc(kernelmap, (long)npg);
- if (a == 0)
- return (0);
- if ((*pmemall)(&Usrptmap[a], npg, &proc[0], CSYS) == 0) {
- rmfree(kernelmap, (long)npg, (long)a);
- return (0);
- }
- va = (caddr_t) kmxtob(a);
- vmaccess(&Usrptmap[a], va, npg);
- return (va);
-}
-
-/*
- * Allocate wired-down (non-paged) pages in kernel virtual memory.
- * (and clear them)
- */
-caddr_t
-zmemall(pmemall, n)
- int (*pmemall)(), n;
-{
- register int npg;
- register caddr_t va;
- register int a;
-
- npg = clrnd(btoc(n));
- a = rmalloc(kernelmap, (long)npg);
- if (a == 0)
- return (0);
- if ((*pmemall)(&Usrptmap[a], npg, &proc[0], CSYS) == 0) {
- rmfree(kernelmap, (long)npg, (long)a);
- return (0);
- }
- va = (caddr_t) kmxtob(a);
- vmaccess(&Usrptmap[a], va, npg);
- while (--npg >= 0)
- clearseg((unsigned)(PG_PFNUM & *(int *)&Usrptmap[a++]));
- return (va);
-}
-
-wmemfree(va, n)
- caddr_t va;
- int n;
-{
- register int a, npg;
-
- a = btokmx((struct pte *) va);
- npg = clrnd(btoc(n));
- (void) memfree(&Usrptmap[a], npg, 0);
- rmfree(kernelmap, (long)npg, (long)a);
-}
-
/*
* Enter clist block c on the hash chains.
* It contains file system block bn from device dev.
}
/*
- * Pull the clist entry of <dev,bn> off the hash chains.
- * We have checked before calling (using mfind) that the
- * entry really needs to be unhashed, so panic if we can't
- * find it (can't happen).
+ * Pull the clist entry of <dev,bn> off the hash chains
+ * if present.
*/
munhash(dev, bn)
dev_t dev;
daddr_t bn;
{
- register int i = CMHASH(bn);
+ int i = CMHASH(bn);
register struct cmap *c1, *c2;
- int si = splimp();
+ int mdev, s;
+ mdev = getfsx(dev);
+ s = splimp();
c1 = &cmap[cmhash[i]];
if (c1 == ecmap)
- panic("munhash");
- if (c1->c_blkno == bn && getfsx(dev) == c1->c_mdev)
+ goto out;
+ if (c1->c_blkno == bn && c1->c_mdev == mdev)
cmhash[i] = c1->c_hlink;
else {
for (;;) {
c2 = c1;
c1 = &cmap[c2->c_hlink];
if (c1 == ecmap)
- panic("munhash");
- if (c1->c_blkno == bn && getfsx(dev) == c1->c_mdev)
+ goto out;
+ if (c1->c_blkno == bn && c1->c_mdev == mdev)
break;
}
c2->c_hlink = c1->c_hlink;
}
- if (mfind(dev, bn))
- panic("munhash mfind");
c1->c_mdev = 0;
c1->c_blkno = 0;
c1->c_hlink = 0;
- splx(si);
+out:
+if (mfind(dev, bn))
+panic("munhash mfind");
+ splx(s);
}
/*
daddr_t bn;
{
register struct cmap *c1 = &cmap[cmhash[CMHASH(bn)]];
+ int mdev = getfsx(dev);
int si = splimp();
while (c1 != ecmap) {
- if (c1->c_blkno == bn && c1->c_mdev == getfsx(dev))
+ if (c1->c_blkno == bn && c1->c_mdev == mdev) {
+ splx(si);
return (c1);
+ }
c1 = &cmap[c1->c_hlink];
}
splx(si);
cmap[CMHEAD].c_prev = freemem / CLSIZE;
cmap[CMHEAD].c_type = CSYS;
avefree = freemem;
- hand = 0;
}
+#ifdef notdef
/*
* Wait for frame pf to become unlocked
* if it is currently locked.
- *
- * THIS ROUTINE SHOULD TAKE A CMAP STRUCTURE AS ARGUMENT.
*/
-mwait(pf)
- unsigned pf;
+mwait(c)
+ struct cmap *c;
{
- mlock(pf);
- munlock(pf);
+ mlock(c);
+ munlock(c);
}
/*
* Lock a page frame.
- *
- * THIS ROUTINE SHOULD TAKE A CMAP STRUCTURE AS ARGUMENT.
*/
-mlock(pf)
- unsigned pf;
+mlock(c)
+ register struct cmap *c;
{
- register struct cmap *c = &cmap[pgtocm(pf)];
while (c->c_lock) {
c->c_want = 1;
/*
* Unlock a page frame.
- *
- * THIS ROUTINE SHOULD TAKE A CMAP STRUCTURE AS ARGUMENT.
*/
-munlock(pf)
- unsigned pf;
+munlock(c)
+ register struct cmap *c;
{
- register struct cmap *c = &cmap[pgtocm(pf)];
if (c->c_lock == 0)
panic("dup page unlock");
- if (c->c_want)
+ if (c->c_want) {
wakeup((caddr_t)c);
+ c->c_want = 0;
+ }
c->c_lock = 0;
- c->c_want = 0;
}
+#endif
/*
* Lock a virtual segment.
register unsigned v;
register int npf;
register struct pte *pte;
+ register struct cmap *c;
+#if defined(tahoe)
+ /*
+ * TAHOE I/O drivers may arrive here on raw I/O,
+ * base will be a system address in this case
+ */
+ if (((int)base & KERNBASE) == KERNBASE) /* system addresses */
+ return;
+#endif
v = btop(base);
pte = vtopte(u.u_procp, v);
npf = btoc(count + ((int)base & CLOFSET));
- while (npf > 0) {
- if (pte->pg_v)
- mlock(pte->pg_pfnum);
- else
+ for (; npf > 0; pte += CLSIZE, v += CLSIZE, npf -= CLSIZE) {
+retry:
+ if (pte->pg_v) {
+#ifdef MMAP
+ if (pte->pg_fod) /* mapped page */
+ continue;
+#endif
+ c = &cmap[pgtocm(pte->pg_pfnum)];
+ if (c->c_lock) {
+ MLOCK(c);
+ MUNLOCK(c);
+ goto retry;
+ }
+ MLOCK(c);
+ } else
pagein(ctob(v), 1); /* return it locked */
- pte += CLSIZE;
- v += CLSIZE;
- npf -= CLSIZE;
}
}
caddr_t base;
{
register struct pte *pte;
- register int npf;
+ register struct cmap *c;
+ int npf;
+#if defined(tahoe)
+ /*
+ * TAHOE I/O drivers may arrive here on raw I/O,
+ * base will be a system address in this case
+ */
+ if (((int)base & KERNBASE) == KERNBASE) /* system addresses */
+ return;
+#endif
pte = vtopte(u.u_procp, btop(base));
npf = btoc(count + ((int)base & CLOFSET));
- while (npf > 0) {
- munlock(pte->pg_pfnum);
+ for (; npf > 0; pte += CLSIZE, npf -= CLSIZE) {
+#ifdef MMAP
+ if (pte->pg_fod && pte->pg_v) /* mapped page */
+ continue;
+#endif
+ c = &cmap[pgtocm(pte->pg_pfnum)];
+ MUNLOCK(c);
if (rw == B_READ) /* Reading from device writes memory */
pte->pg_m = 1;
- pte += CLSIZE;
- npf -= CLSIZE;
}
}