/*
- * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
+ * Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
* %sccs.include.redist.c%
*
- * @(#)lfs_bio.c 5.4 (Berkeley) %G%
+ * @(#)lfs_bio.c 7.4 (Berkeley) %G%
*/
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/buf.h>
+#include <sys/vnode.h>
#include <sys/resourcevar.h>
+#include <sys/mount.h>
-#include <lfs/lfs.h>
-#include <lfs/lfs_extern.h>
+#include <ufs/ufs/quota.h>
+#include <ufs/ufs/inode.h>
+#include <ufs/ufs/ufsmount.h>
+
+#include <ufs/lfs/lfs.h>
+#include <ufs/lfs/lfs_extern.h>
/*
- * LFS version of bawrite, bdwrite, bwrite. Set the delayed write flag and
- * use reassignbuf to move the buffer from the clean list to the dirty one,
- * then unlock the buffer.
+ * LFS block write function.
+ *
+ * XXX
+ * No write cost accounting is done.
+ * This is almost certainly wrong for synchronous operations and NFS.
*/
+int locked_queue_count; /* XXX Count of locked-down buffers. */
+
int
lfs_bwrite(bp)
register BUF *bp;
{
-#ifdef DO_ACCOUNTING
- Not included as this gets called from lots of places where the
- current proc structure is probably wrong. Ignore for now.
- curproc->p_stats->p_ru.ru_oublock++; /* XXX: no one paid yet */
+#ifdef VERBOSE
+printf("lfs_bwrite\n");
#endif
+ /*
+ * Set the delayed write flag and use reassignbuf to move the buffer
+ * from the clean list to the dirty one.
+ *
+ * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
+ * the buffer onto the LOCKED free list. This is necessary, otherwise
+ * getnewbuf() would try to reclaim the buffers using bawrite, which
+ * isn't going to work.
+ */
+ if (!(bp->b_flags & B_LOCKED))
+ ++locked_queue_count;
+ bp->b_flags |= B_DELWRI | B_LOCKED;
bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
- bp->b_flags |= B_WRITE | B_DELWRI;
- reassignbuf(bp, bp->b_vp); /* XXX: do this inline? */
+ reassignbuf(bp, bp->b_vp);
brelse(bp);
return (0);
}
+
+/*
+ * XXX
+ * This routine flushes buffers out of the B_LOCKED queue when LFS has too
+ * many locked down. Eventually the pageout daemon will simply call LFS
+ * when pages need to be reclaimed.
+ */
+void
+lfs_flush()
+{
+ register struct mount *mp;
+ struct mount *omp;
+
+ /* 800K in a 4K file system. */
+ if (locked_queue_count < 200)
+ return;
+ mp = rootfs;
+ do {
+ /*
+ * The lock check below is to avoid races with mount
+ * and unmount.
+ */
+ if (mp->mnt_stat.f_type == MOUNT_LFS &&
+ (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_MPBUSY)) == 0 &&
+ !vfs_busy(mp)) {
+ lfs_segwrite(mp, 0);
+ omp = mp;
+ mp = mp->mnt_next;
+ vfs_unbusy(omp);
+ } else
+ mp = mp->mnt_next;
+ } while (mp != rootfs);
+ /* Not exact, but it doesn't matter. */
+ locked_queue_count = 0;
+}