-/* vfs_cluster.c 3.8 %G% */
+/* vfs_cluster.c 4.5 %G% */
#include "../h/param.h"
#include "../h/systm.h"
#include "../h/seg.h"
#include "../h/pte.h"
#include "../h/vm.h"
+#include "../h/trace.h"
/*
* The following several routines allocate and free
bp = getblk(dev, blkno);
if (bp->b_flags&B_DONE) {
+#ifdef EPAWNJ
+ trace(TR_BREAD|TR_HIT, dev, blkno);
+#endif
#ifdef DISKMON
io_info.ncache++;
#endif
bp->b_flags |= B_READ;
bp->b_bcount = BSIZE;
(*bdevsw[major(dev)].d_strategy)(bp);
+#ifdef EPAWNJ
+ trace(TR_BREAD|TR_MISS, dev, blkno);
+#endif
#ifdef DISKMON
io_info.nread++;
#endif
bp->b_flags |= B_READ;
bp->b_bcount = BSIZE;
(*bdevsw[major(dev)].d_strategy)(bp);
+#ifdef EPAWNJ
+ trace(TR_BREAD|TR_MISS, dev, blkno);
+#endif
#ifdef DISKMON
io_info.nread++;
#endif
u.u_vm.vm_inblk++; /* pay for read */
}
+#ifdef EPAWNJ
+ else
+ trace(TR_BREAD|TR_HIT, dev, blkno);
+#endif
}
if (rablkno && !incore(dev, rablkno)) {
rabp = getblk(dev, rablkno);
- if (rabp->b_flags & B_DONE)
+ if (rabp->b_flags & B_DONE) {
brelse(rabp);
- else {
+#ifdef EPAWNJ
+ trace(TR_BREAD|TR_HIT|TR_RA, dev, blkno);
+#endif
+ } else {
rabp->b_flags |= B_READ|B_ASYNC;
rabp->b_bcount = BSIZE;
(*bdevsw[major(dev)].d_strategy)(rabp);
+#ifdef EPAWNJ
+ trace(TR_BREAD|TR_MISS|TR_RA, dev, rablock);
+#endif
#ifdef DISKMON
io_info.nreada++;
#endif
#endif
if ((flag&B_DELWRI) == 0)
u.u_vm.vm_oublk++; /* noone paid yet */
+#ifdef EPAWNJ
+ trace(TR_BWRITE, bp->b_dev, dbtofsb(bp->b_blkno));
+#endif
(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
if ((flag&B_ASYNC) == 0) {
iowait(bp);
for (bp = &buf[bufhash[BUFHASH(blkno)]]; bp != &buf[-1];
bp = &buf[bp->b_hlink])
- if (bp->b_blkno == dblkno && bp->b_dev == dev)
+ if (bp->b_blkno == dblkno && bp->b_dev == dev
+ && !(bp->b_flags & B_INVAL))
return (1);
return (0);
}
daddr_t blkno;
{
register struct buf *bp, *dp, *ep;
- register int i, x;
- register int dblkno = fsbtodb(blkno);
+ register int i, x, dblkno;
+ if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT))
+ blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1);
+ dblkno = fsbtodb(blkno);
loop:
(void) spl0();
for (bp = &buf[bufhash[BUFHASH(blkno)]]; bp != &buf[-1];
bp = &buf[bp->b_hlink]) {
- if (bp->b_blkno != dblkno || bp->b_dev != dev)
+ if (bp->b_blkno != dblkno || bp->b_dev != dev
+ || bp->b_flags & B_INVAL)
continue;
(void) spl6();
if (bp->b_flags&B_BUSY) {
sleep((caddr_t)&bfreelist, PRIBIO+1);
goto loop;
}
- spl0();
+ (void) spl0();
bp = bfreelist.av_forw;
notavail(bp);
if (bp->b_flags & B_DELWRI) {
if (bp->b_dev == NODEV)
goto done;
/* INLINE EXPANSION OF bunhash(bp) */
+#ifdef EPAWNJ
+ trace(TR_BRELSE, bp->b_dev, dbtofsb(bp->b_blkno));
+#endif
+ (void) spl6();
i = BUFHASH(dbtofsb(bp->b_blkno));
x = bp - buf;
if (bufhash[i] == x) {
panic("getblk");
}
done:
+ (void) spl0();
/* END INLINE EXPANSION */
bp->b_flags = B_BUSY;
bp->b_back->b_forw = bp->b_forw;
bwrite(bp);
goto loop;
}
- if (bp->b_dev != NODEV)
+ if (bp->b_dev != NODEV) {
+#ifdef EPAWNJ
+ trace(TR_BRELSE, bp->b_dev, dbtofsb(bp->b_blkno));
+#endif
bunhash(bp);
+ }
bp->b_flags = B_BUSY;
bp->b_back->b_forw = bp->b_forw;
bp->b_forw->b_back = bp->b_back;
register struct buf *bp;
{
register struct buf *ep;
- register int i, x;
+ register int i, x, s;
if (bp->b_dev == NODEV)
return;
+ s = spl6();
i = BUFHASH(dbtofsb(bp->b_blkno));
x = bp - buf;
if (bufhash[i] == x) {
bufhash[i] = bp->b_hlink;
- return;
+ goto ret;
}
for (ep = &buf[bufhash[i]]; ep != &buf[-1];
ep = &buf[ep->b_hlink])
if (ep->b_hlink == x) {
ep->b_hlink = bp->b_hlink;
- return;
+ goto ret;
}
panic("bunhash");
+ret:
+ splx(s);
}
/*
{
register int s;
+ if (bp->b_flags & B_DONE)
+ panic("dup iodone");
bp->b_flags |= B_DONE;
if (bp->b_flags & B_DIRTY) {
if (bp->b_flags & B_ERROR)
if (bswlist.b_flags & B_WANTED)
wakeup((caddr_t)&proc[2]);
splx(s);
+ return;
}
if (bp->b_flags&B_ASYNC)
brelse(bp);
bp->b_bcount = c;
bp->b_blkno = dblkno;
bp->b_dev = dev;
+ if (flag & B_DIRTY) {
+ swpf[bp - swbuf] = pfcent;
+ swsize[bp - swbuf] = nbytes;
+ }
(*bdevsw[major(dev)].d_strategy)(bp);
if (flag & B_DIRTY) {
if (c < nbytes)
panic("big push");
- swsize[bp - swbuf] = nbytes;
- swpf[bp - swbuf] = pfcent;
return;
}
(void) spl6();
if ((u.u_error = bp->b_error)==0)
u.u_error = EIO;
}
+
+/*
+ * Invalidate in core blocks belonging to closed or umounted filesystem
+ *
+ * This is not nicely done at all - the buffer ought to be removed from the
+ * hash chains & have its dev/blkno fields clobbered, but unfortunately we
+ * can't do that here, as it is quite possible that the block is still
+ * being used for i/o. Eventually, all disc drivers should be forced to
+ * have a close routine, which ought ensure that the queue is empty, then
+ * properly flush the queues. Until that happy day, this suffices for
+ * correctness. ... kre
+ */
+binval(dev)
+dev_t dev;
+{
+ register struct buf *bp, *dp;
+
+ dp = bdevsw[major(dev)].d_tab;
+
+ for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
+ if (bp->b_dev == dev)
+ bp->b_flags |= B_INVAL;
+}