* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
* %sccs.include.redist.c%
* @(#)lfs_segment.c 8.2 (Berkeley) %G%
#include <sys/resourcevar.h>
#include <miscfs/specfs/specdev.h>
#include <miscfs/fifofs/fifo.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
#include <ufs/lfs/lfs_extern.h>
extern int count_lock_queue
__P((void));
* Determine if it's OK to start a partial in this segment, or if we need
* to go on to a new segment.
#define LFS_PARTIAL_FITS(fs) \
((fs)->lfs_dbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
void lfs_callback
__P((struct buf
*));
void lfs_gather
__P((struct lfs
*, struct segment
*,
struct vnode
*, int (*) __P((struct lfs
*, struct buf
*))));
int lfs_gatherblock
__P((struct segment
*, struct buf
*, int *));
void lfs_iset
__P((struct inode
*, daddr_t
, time_t));
int lfs_match_data
__P((struct lfs
*, struct buf
*));
int lfs_match_dindir
__P((struct lfs
*, struct buf
*));
int lfs_match_indir
__P((struct lfs
*, struct buf
*));
int lfs_match_tindir
__P((struct lfs
*, struct buf
*));
void lfs_newseg
__P((struct lfs
*));
void lfs_shellsort
__P((struct buf
**, daddr_t
*, register int));
void lfs_supercallback
__P((struct buf
*));
void lfs_updatemeta
__P((struct segment
*));
int lfs_vref
__P((struct vnode
*));
void lfs_vunref
__P((struct vnode
*));
void lfs_writefile
__P((struct lfs
*, struct segment
*, struct vnode
*));
int lfs_writeinode
__P((struct lfs
*, struct segment
*, struct inode
*));
int lfs_writeseg
__P((struct lfs
*, struct segment
*));
void lfs_writesuper
__P((struct lfs
*));
void lfs_writevnodes
__P((struct lfs
*fs
, struct mount
*mp
,
struct segment
*sp
, int dirops
));
int lfs_allclean_wakeup
; /* Cleaner wakeup address. */
/* Statistics Counters */
struct lfs_stats lfs_stats
;
/* op values to lfs_writevnodes */
* Ifile and meta data blocks are not marked busy, so segment writes MUST be
* single threaded. Currently, there are two paths into lfs_segwrite, sync()
* and getnewbuf(). They both mark the file system busy. Lfs_vflush()
* explicitly marks the file system busy. So lfs_segwrite is safe. I think.
fs
= VFSTOUFS(vp
->v_mount
)->um_lfs
;
if (fs
->lfs_nactive
> MAX_ACTIVE
)
return(lfs_segwrite(vp
->v_mount
, SEGM_SYNC
|SEGM_CKP
));
lfs_seglock(fs
, SEGM_SYNC
);
if (vp
->v_dirtyblkhd
.le_next
== NULL
)
lfs_writevnodes(fs
, vp
->v_mount
, sp
, VN_EMPTY
);
if (vp
->v_dirtyblkhd
.le_next
!= NULL
)
lfs_writefile(fs
, sp
, vp
);
} while (lfs_writeinode(fs
, sp
, ip
));
} while (lfs_writeseg(fs
, sp
) && ip
->i_number
== LFS_IFILE_INUM
);
if (sp
->seg_flags
& SEGM_SYNC
)
++lfs_stats
.nsync_writes
;
if (sp
->seg_flags
& SEGM_CKP
)
++lfs_stats
.ncheckpoints
;
lfs_writevnodes(fs
, mp
, sp
, op
)
loop
: for (vp
= mp
->mnt_mounth
; vp
; vp
= vp
->v_mountf
) {
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
/* XXX ignore dirops for now
if (op == VN_DIROP && !(vp->v_flag & VDIROP) ||
op != VN_DIROP && (vp->v_flag & VDIROP))
if (op
== VN_EMPTY
&& vp
->v_dirtyblkhd
.le_next
)
* Write the inode/file if dirty and it's not the
if ((ip
->i_flag
& (IMODIFIED
| IACCESS
| IUPDATE
| ICHANGE
) ||
vp
->v_dirtyblkhd
.le_next
!= NULL
) &&
ip
->i_number
!= LFS_IFILE_INUM
) {
if (vp
->v_dirtyblkhd
.le_next
!= NULL
)
lfs_writefile(fs
, sp
, vp
);
(void) lfs_writeinode(fs
, sp
, ip
);
int flags
; /* Do a checkpoint. */
fs
= VFSTOUFS(mp
)->um_lfs
;
* If we have fewer than 2 clean segments, wait until cleaner
LFS_CLEANERINFO(cip
, fs
, bp
);
printf ("segs clean: %d\n", clean
);
wakeup(&lfs_allclean_wakeup
);
if (error
= tsleep(&fs
->lfs_avail
, PRIBIO
+ 1,
* Allocate a segment structure and enough space to hold pointers to
* the maximum possible number of buffers which can be described in a
do_ckp
= flags
& SEGM_CKP
|| fs
->lfs_nactive
> MAX_ACTIVE
;
lfs_seglock(fs
, flags
| (do_ckp
? SEGM_CKP
: 0));
lfs_writevnodes(fs
, mp
, sp
, VN_REG
);
/* XXX ignore ordering of dirops for now */
if (fs->lfs_dirops && (error =
tsleep(&fs->lfs_writer, PRIBIO + 1, "lfs writer", 0))) {
free(sp->bpp, M_SEGMENT);
lfs_writevnodes(fs, mp, sp, VN_DIROP);
* If we are doing a checkpoint, mark everything since the
* last checkpoint as no longer ACTIVE.
for (ibno
= fs
->lfs_cleansz
+ fs
->lfs_segtabsz
;
--ibno
>= fs
->lfs_cleansz
; ) {
if (bread(fs
->lfs_ivnode
, ibno
, fs
->lfs_bsize
,
panic("lfs: ifile read");
segusep
= (SEGUSE
*)bp
->b_data
;
for (i
= fs
->lfs_sepb
; i
--; segusep
++)
segusep
->su_flags
&= ~SEGUSE_ACTIVE
;
if (do_ckp
|| fs
->lfs_doifile
) {
if (vp
->v_dirtyblkhd
.le_next
!= NULL
)
lfs_writefile(fs
, sp
, vp
);
(void)lfs_writeinode(fs
, sp
, ip
);
if (lfs_writeseg(fs
, sp
) && do_ckp
)
(void) lfs_writeseg(fs
, sp
);
* If the I/O count is non-zero, sleep until it reaches zero. At the
* moment, the user's process hangs around so we can sleep.
/* XXX ignore dirops for now
if (sp
->seg_flags
& SEGM_SYNC
)
++lfs_stats
.nsync_writes
;
if (sp
->seg_flags
& SEGM_CKP
)
++lfs_stats
.ncheckpoints
;
* Write the dirty blocks associated with a vnode.
lfs_writefile(fs
, sp
, vp
)
if (sp
->seg_bytes_left
< fs
->lfs_bsize
||
sp
->sum_bytes_left
< sizeof(struct finfo
))
(void) lfs_writeseg(fs
, sp
);
sp
->sum_bytes_left
-= sizeof(struct finfo
) - sizeof(daddr_t
);
++((SEGSUM
*)(sp
->segsum
))->ss_nfinfo
;
fip
->fi_ino
= VTOI(vp
)->i_number
;
LFS_IENTRY(ifp
, fs
, fip
->fi_ino
, bp
);
fip
->fi_version
= ifp
->if_version
;
* It may not be necessary to write the meta-data blocks at this point,
* as the roll-forward recovery code should be able to reconstruct the
lfs_gather(fs
, sp
, vp
, lfs_match_data
);
lfs_gather(fs
, sp
, vp
, lfs_match_indir
);
lfs_gather(fs
, sp
, vp
, lfs_match_dindir
);
lfs_gather(fs
, sp
, vp
, lfs_match_tindir
);
if (fip
->fi_nblocks
!= 0) {
(struct finfo
*)((caddr_t
)fip
+ sizeof(struct finfo
) +
sizeof(daddr_t
) * (fip
->fi_nblocks
- 1));
sp
->start_lbp
= &sp
->fip
->fi_blocks
[0];
sp
->sum_bytes_left
+= sizeof(struct finfo
) - sizeof(daddr_t
);
--((SEGSUM
*)(sp
->segsum
))->ss_nfinfo
;
lfs_writeinode(fs
, sp
, ip
)
if (!(ip
->i_flag
& (IMODIFIED
| IACCESS
| IUPDATE
| ICHANGE
)))
/* Allocate a new inode block if necessary. */
/* Allocate a new segment if necessary. */
if (sp
->seg_bytes_left
< fs
->lfs_bsize
||
sp
->sum_bytes_left
< sizeof(daddr_t
))
(void) lfs_writeseg(fs
, sp
);
/* Get next inode block. */
fs
->lfs_offset
+= fsbtodb(fs
, 1);
lfs_newbuf(VTOI(fs
->lfs_ivnode
)->i_devvp
, daddr
,
/* Zero out inode numbers */
for (i
= 0; i
< INOPB(fs
); ++i
)
((struct dinode
*)sp
->ibp
->b_data
)[i
].di_inumber
= 0;
fs
->lfs_avail
-= fsbtodb(fs
, 1);
/* Set remaining space counters. */
sp
->seg_bytes_left
-= fs
->lfs_bsize
;
sp
->sum_bytes_left
-= sizeof(daddr_t
);
ndx
= LFS_SUMMARY_SIZE
/ sizeof(daddr_t
) -
sp
->ninodes
/ INOPB(fs
) - 1;
((daddr_t
*)(sp
->segsum
))[ndx
] = daddr
;
/* Update the inode times and copy the inode onto the inode page. */
if (ip
->i_flag
& IMODIFIED
)
ITIMES(ip
, &time
, &time
);
ip
->i_flag
&= ~(IMODIFIED
| IACCESS
| IUPDATE
| ICHANGE
);
((struct dinode
*)bp
->b_data
)[sp
->ninodes
% INOPB(fs
)] = ip
->i_din
;
/* Increment inode count in segment summary block. */
++((SEGSUM
*)(sp
->segsum
))->ss_ninos
;
/* If this page is full, set flag to allocate a new page. */
if (++sp
->ninodes
% INOPB(fs
) == 0)
* If updating the ifile, update the super-block. Update the disk
* address and access times for this inode in the ifile.
if (ino
== LFS_IFILE_INUM
) {
fs
->lfs_idaddr
= bp
->b_blkno
;
LFS_IENTRY(ifp
, fs
, ino
, ibp
);
ifp
->if_daddr
= bp
->b_blkno
;
* No need to update segment usage if there was no former inode address
* or if the last inode address is in the current partial segment.
if (daddr
!= LFS_UNUSED_DADDR
&&
!(daddr
>= fs
->lfs_lastpseg
&& daddr
<= bp
->b_blkno
)) {
LFS_SEGENTRY(sup
, fs
, datosn(fs
, daddr
), bp
);
if (sup
->su_nbytes
< sizeof(struct dinode
)) {
/* XXX -- Change to a panic. */
printf("lfs: negative bytes (segment %d)\n",
sup
->su_nbytes
-= sizeof(struct dinode
);
(ino
== LFS_IFILE_INUM
&& !(bp
->b_flags
& B_GATHERED
));
lfs_gatherblock(sp
, bp
, sptr
)
* If full, finish this segment. We may be doing I/O, so
* release and reacquire the splbio().
panic ("lfs_gatherblock: Null vp in segment");
if (sp
->sum_bytes_left
< sizeof(daddr_t
) ||
sp
->seg_bytes_left
< fs
->lfs_bsize
) {
version
= sp
->fip
->fi_version
;
(void) lfs_writeseg(fs
, sp
);
sp
->fip
->fi_version
= version
;
sp
->fip
->fi_ino
= VTOI(sp
->vp
)->i_number
;
/* Add the current file to the segment summary. */
++((SEGSUM
*)(sp
->segsum
))->ss_nfinfo
;
sizeof(struct finfo
) - sizeof(daddr_t
);
/* Insert into the buffer list, update the FINFO block. */
bp
->b_flags
|= B_GATHERED
;
sp
->fip
->fi_blocks
[sp
->fip
->fi_nblocks
++] = bp
->b_lblkno
;
sp
->sum_bytes_left
-= sizeof(daddr_t
);
sp
->seg_bytes_left
-= fs
->lfs_bsize
;
lfs_gather(fs
, sp
, vp
, match
)
int (*match
) __P((struct lfs
*, struct buf
*));
loop
: for (bp
= vp
->v_dirtyblkhd
.le_next
; bp
; bp
= bp
->b_vnbufs
.qe_next
) {
if (bp
->b_flags
& B_BUSY
|| !match(fs
, bp
) ||
bp
->b_flags
& B_GATHERED
)
if (!(bp
->b_flags
& B_DELWRI
))
panic("lfs_gather: bp not B_DELWRI");
if (!(bp
->b_flags
& B_LOCKED
))
panic("lfs_gather: bp not B_LOCKED");
if (lfs_gatherblock(sp
, bp
, &s
))
* Update the metadata that points to the blocks listed in the FINFO
struct indir a
[NIADDR
+ 2], *ap
;
int db_per_fsb
, error
, i
, nblocks
, num
;
nblocks
= &sp
->fip
->fi_blocks
[sp
->fip
->fi_nblocks
] - sp
->start_lbp
;
if (vp
== NULL
|| nblocks
== 0)
if (!(sp
->seg_flags
& SEGM_CLEAN
))
lfs_shellsort(sp
->start_bpp
, sp
->start_lbp
, nblocks
);
* Assign disk addresses, and update references to the logical
* block and the segment usage information.
db_per_fsb
= fsbtodb(fs
, 1);
for (i
= nblocks
; i
--; ++sp
->start_bpp
) {
(*sp
->start_bpp
)->b_blkno
= off
= fs
->lfs_offset
;
fs
->lfs_offset
+= db_per_fsb
;
if (error
= ufs_bmaparray(vp
, lbn
, &daddr
, a
, &num
, NULL
))
panic("lfs_updatemeta: ufs_bmaparray %d", error
);
ip
->i_ib
[a
[0].in_off
] = off
;
if (bread(vp
, ap
->in_lbn
, fs
->lfs_bsize
, NOCRED
, &bp
))
panic("lfs_updatemeta: bread bno %d",
* Bread may create a new indirect block which needs
* to get counted for the inode.
if (bp
->b_blkno
== -1 && !(bp
->b_flags
& B_CACHE
)) {
printf ("Updatemeta allocating indirect block: shouldn't happen\n");
ip
->i_blocks
+= btodb(fs
->lfs_bsize
);
fs
->lfs_bfree
-= btodb(fs
->lfs_bsize
);
((daddr_t
*)bp
->b_data
)[ap
->in_off
] = off
;
/* Update segment usage information. */
if (daddr
!= UNASSIGNED
&&
!(daddr
>= fs
->lfs_lastpseg
&& daddr
<= off
)) {
LFS_SEGENTRY(sup
, fs
, datosn(fs
, daddr
), bp
);
if (sup
->su_nbytes
< fs
->lfs_bsize
) {
/* XXX -- Change to a panic. */
printf("lfs: negative bytes (segment %d)\n",
panic ("Negative Bytes");
sup
->su_nbytes
-= fs
->lfs_bsize
;
/* Advance to the next segment. */
if (!LFS_PARTIAL_FITS(fs
)) {
/* Wake up any cleaning procs waiting on this file system. */
wakeup(&lfs_allclean_wakeup
);
fs
->lfs_offset
= fs
->lfs_curseg
;
sp
->seg_number
= datosn(fs
, fs
->lfs_curseg
);
sp
->seg_bytes_left
= fs
->lfs_dbpseg
* DEV_BSIZE
;
* If the segment contains a superblock, update the offset
* and summary address to skip over it.
LFS_SEGENTRY(sup
, fs
, sp
->seg_number
, bp
);
if (sup
->su_flags
& SEGUSE_SUPERBLOCK
) {
fs
->lfs_offset
+= LFS_SBPAD
/ DEV_BSIZE
;
sp
->seg_bytes_left
-= LFS_SBPAD
;
sp
->seg_number
= datosn(fs
, fs
->lfs_curseg
);
sp
->seg_bytes_left
= (fs
->lfs_dbpseg
-
(fs
->lfs_offset
- fs
->lfs_curseg
)) * DEV_BSIZE
;
fs
->lfs_lastpseg
= fs
->lfs_offset
;
/* Get a new buffer for SEGSUM and enter it into the buffer list. */
*sp
->cbpp
= lfs_newbuf(VTOI(fs
->lfs_ivnode
)->i_devvp
, fs
->lfs_offset
,
sp
->segsum
= (*sp
->cbpp
)->b_data
;
bzero(sp
->segsum
, LFS_SUMMARY_SIZE
);
sp
->start_bpp
= ++sp
->cbpp
;
fs
->lfs_offset
+= LFS_SUMMARY_SIZE
/ DEV_BSIZE
;
/* Set point to SEGSUM, initialize it. */
ssp
->ss_next
= fs
->lfs_nextseg
;
ssp
->ss_nfinfo
= ssp
->ss_ninos
= 0;
/* Set pointer to first FINFO, initialize it. */
sp
->fip
= (struct finfo
*)(sp
->segsum
+ sizeof(SEGSUM
));
sp
->start_lbp
= &sp
->fip
->fi_blocks
[0];
sp
->seg_bytes_left
-= LFS_SUMMARY_SIZE
;
sp
->sum_bytes_left
= LFS_SUMMARY_SIZE
- sizeof(SEGSUM
);
* Return the next segment to write.
int curseg
, error
, isdirty
, sn
;
LFS_SEGENTRY(sup
, fs
, datosn(fs
, fs
->lfs_nextseg
), bp
);
sup
->su_flags
|= SEGUSE_DIRTY
| SEGUSE_ACTIVE
;
LFS_CLEANERINFO(cip
, fs
, bp
);
fs
->lfs_lastseg
= fs
->lfs_curseg
;
fs
->lfs_curseg
= fs
->lfs_nextseg
;
for (sn
= curseg
= datosn(fs
, fs
->lfs_curseg
);;) {
sn
= (sn
+ 1) % fs
->lfs_nseg
;
panic("lfs_nextseg: no clean segments");
LFS_SEGENTRY(sup
, fs
, sn
, bp
);
isdirty
= sup
->su_flags
& SEGUSE_DIRTY
;
fs
->lfs_nextseg
= sntoda(fs
, sn
);
extern int locked_queue_count
;
struct buf
**bpp
, *bp
, *cbp
;
int ch_per_blk
, do_again
, error
, i
, nblocks
, num
, s
;
int (*strategy
)__P((struct vop_strategy_args
*));
struct vop_strategy_args vop_strategy_a
;
* If there are no buffers other than the segment summary to write
* and it is not a checkpoint, don't do anything. On a checkpoint,
* even if there aren't any buffers, you need to write the superblock.
if ((nblocks
= sp
->cbpp
- sp
->bpp
) == 1)
ssp
= (SEGSUM
*)sp
->segsum
;
/* Update the segment usage information. */
LFS_SEGENTRY(sup
, fs
, sp
->seg_number
, bp
);
ninos
= (ssp
->ss_ninos
+ INOPB(fs
) - 1) / INOPB(fs
);
sup
->su_nbytes
+= nblocks
- 1 - ninos
<< fs
->lfs_bshift
;
sup
->su_nbytes
+= ssp
->ss_ninos
* sizeof(struct dinode
);
sup
->su_nbytes
+= LFS_SUMMARY_SIZE
;
sup
->su_lastmod
= time
.tv_sec
;
do_again
= !(bp
->b_flags
& B_GATHERED
);
* Compute checksum across data and then across summary; the first
* block (the summary block) is skipped. Set the create time here
* so that it's guaranteed to be later than the inode mod times.
* Fix this to do it inline, instead of malloc/copy.
datap
= dp
= malloc(nblocks
* sizeof(u_long
), M_SEGMENT
, M_WAITOK
);
for (bpp
= sp
->bpp
, i
= nblocks
- 1; i
--;) {
if ((*++bpp
)->b_flags
& B_INVAL
) {
if (copyin((*bpp
)->b_saveaddr
, dp
++, sizeof(u_long
)))
panic("lfs_writeseg: copyin failed");
*dp
++ = ((u_long
*)(*bpp
)->b_data
)[0];
ssp
->ss_create
= time
.tv_sec
;
ssp
->ss_datasum
= cksum(datap
, (nblocks
- 1) * sizeof(u_long
));
cksum(&ssp
->ss_datasum
, LFS_SUMMARY_SIZE
- sizeof(ssp
->ss_sumsum
));
if (fs
->lfs_bfree
< fsbtodb(fs
, ninos
) + LFS_SUMMARY_SIZE
/ DEV_BSIZE
)
panic("lfs_writeseg: No diskspace for summary");
fs
->lfs_bfree
-= (fsbtodb(fs
, ninos
) + LFS_SUMMARY_SIZE
/ DEV_BSIZE
);
i_dev
= VTOI(fs
->lfs_ivnode
)->i_dev
;
strategy
= VTOI(fs
->lfs_ivnode
)->i_devvp
->v_op
[VOFFSET(vop_strategy
)];
* When we simply write the blocks we lose a rotation for every block
* written. To avoid this problem, we allocate memory in chunks, copy
* the buffers into the chunk and write the chunk. MAXPHYS is the
* largest size I/O devices can handle.
* When the data is copied to the chunk, turn off the the B_LOCKED bit
* and brelse the buffer (which will move them to the LRU list). Add
* the B_CALL flag to the buffer header so we can count I/O's for the
* checkpoints and so we can release the allocated memory.
* This should be removed if the new virtual memory system allows us to
* easily make the buffers contiguous in kernel memory and if that's
ch_per_blk
= MAXPHYS
/ fs
->lfs_bsize
;
for (bpp
= sp
->bpp
, i
= nblocks
; i
;) {
size
= num
* fs
->lfs_bsize
;
cbp
= lfs_newbuf(VTOI(fs
->lfs_ivnode
)->i_devvp
,
cbp
->b_flags
|= B_ASYNC
| B_BUSY
;
for (p
= cbp
->b_data
; num
--;) {
* Fake buffers from the cleaner are marked as B_INVAL.
* We need to copy the data from user space rather than
* from the buffer indicated.
* XXX == what do I do on an error?
if (bp
->b_flags
& B_INVAL
) {
if (copyin(bp
->b_saveaddr
, p
, bp
->b_bcount
))
panic("lfs_writeseg: copyin failed");
bcopy(bp
->b_data
, p
, bp
->b_bcount
);
if (bp
->b_flags
& B_LOCKED
)
bp
->b_flags
&= ~(B_ERROR
| B_READ
| B_DELWRI
|
if (bp
->b_flags
& B_CALL
) {
/* if B_CALL, it was created with newbuf */
if (!(bp
->b_flags
& B_INVAL
))
free(bp
->b_data
, M_SEGMENT
);
reassignbuf(bp
, bp
->b_vp
);
++cbp
->b_vp
->v_numoutput
;
cbp
->b_bcount
= p
- (char *)cbp
->b_data
;
* XXXX This is a gross and disgusting hack. Since these
* buffers are physically addressed, they hang off the
* device vnode (devvp). As a result, they have no way
* of getting to the LFS superblock or lfs structure to
* keep track of the number of I/O's pending. So, I am
* going to stuff the fs into the saveaddr field of
cbp
->b_saveaddr
= (caddr_t
)fs
;
vop_strategy_a
.a_desc
= VDESC(vop_strategy
);
vop_strategy_a
.a_bp
= cbp
;
(strategy
)(&vop_strategy_a
);
* Vinvalbuf can move locked buffers off the locked queue
* and we have no way of knowing about this. So, after
* doing a big write, we recalculate how many bufers are
* really still left on the locked queue.
locked_queue_count
= count_lock_queue();
wakeup(&locked_queue_count
);
lfs_stats
.blocktot
+= nblocks
- 1;
if (fs
->lfs_sp
->seg_flags
& SEGM_SYNC
)
if (fs
->lfs_sp
->seg_flags
& SEGM_CLEAN
) {
++lfs_stats
.pcleanwrites
;
lfs_stats
.cleanblocks
+= nblocks
- 1;
return (lfs_initseg(fs
) || do_again
);
int (*strategy
) __P((struct vop_strategy_args
*));
struct vop_strategy_args vop_strategy_a
;
i_dev
= VTOI(fs
->lfs_ivnode
)->i_dev
;
strategy
= VTOI(fs
->lfs_ivnode
)->i_devvp
->v_op
[VOFFSET(vop_strategy
)];
/* Checksum the superblock and copy it into a buffer. */
fs
->lfs_cksum
= cksum(fs
, sizeof(struct lfs
) - sizeof(fs
->lfs_cksum
));
bp
= lfs_newbuf(VTOI(fs
->lfs_ivnode
)->i_devvp
, fs
->lfs_sboffs
[0],
*(struct lfs
*)bp
->b_data
= *fs
;
/* XXX Toggle between first two superblocks; for now just write first */
bp
->b_flags
|= B_BUSY
| B_CALL
| B_ASYNC
;
bp
->b_flags
&= ~(B_DONE
| B_ERROR
| B_READ
| B_DELWRI
);
bp
->b_iodone
= lfs_supercallback
;
vop_strategy_a
.a_desc
= VDESC(vop_strategy
);
vop_strategy_a
.a_bp
= bp
;
(strategy
)(&vop_strategy_a
);
* Logical block number match routines used when traversing the dirty block
return (bp
->b_lblkno
>= 0);
return (lbn
< 0 && (-lbn
- NDADDR
) % NINDIR(fs
) == 0);
return (lbn
< 0 && (-lbn
- NDADDR
) % NINDIR(fs
) == 1);
return (lbn
< 0 && (-lbn
- NDADDR
) % NINDIR(fs
) == 2);
* Allocate a new buffer header.
lfs_newbuf(vp
, daddr
, size
)
nbytes
= roundup(size
, DEV_BSIZE
);
bp
= malloc(sizeof(struct buf
), M_SEGMENT
, M_WAITOK
);
bzero(bp
, sizeof(struct buf
));
bp
->b_data
= malloc(nbytes
, M_SEGMENT
, M_WAITOK
);
bp
->b_iodone
= lfs_callback
;
bp
->b_flags
|= B_BUSY
| B_CALL
| B_NOCACHE
;
fs
= (struct lfs
*)bp
->b_saveaddr
;
if (fs
->lfs_iocount
== 0)
panic("lfs_callback: zero iocount\n");
if (--fs
->lfs_iocount
== 0)
wakeup(&fs
->lfs_iocount
);
free(bp
->b_data
, M_SEGMENT
);
free(bp
->b_data
, M_SEGMENT
);
* Shellsort (diminishing increment sort) from Data Structures and
* Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
* see also Knuth Vol. 3, page 84. The increments are selected from
* formula (8), page 95. Roughly O(N^3/2).
* This is our own private copy of shellsort because we want to sort
* two parallel arrays (the array of buffer pointers and the array of
* logical block numbers) simultaneously. Note that we cast the array
* of logical block numbers to a unsigned in this routine so that the
* negative block numbers (meta data blocks) sort AFTER the data blocks.
lfs_shellsort(bp_array
, lb_array
, nmemb
)
static int __rsshell_increments
[] = { 4, 1, 0 };
register int incr
, *incrp
, t1
, t2
;
for (incrp
= __rsshell_increments
; incr
= *incrp
++;)
for (t1
= incr
; t1
< nmemb
; ++t1
)
for (t2
= t1
- incr
; t2
>= 0;)
if (lb_array
[t2
] > lb_array
[t2
+ incr
]) {
lb_array
[t2
] = lb_array
[t2
+ incr
];
lb_array
[t2
+ incr
] = lb_temp
;
bp_array
[t2
] = bp_array
[t2
+ incr
];
bp_array
[t2
+ incr
] = bp_temp
;
* Check VXLOCK. Return 1 if the vnode is locked. Otherwise, bump the
* ref count, removing the vnode from the free list if it is on it.
register struct vnode
*vp
;
register struct vnode
*vq
;
extern struct vnode
*vfreeh
;
extern struct vnode
**vfreet
;
if (vp
->v_usecount
== 0) {
vq
->v_freeb
= vp
->v_freeb
;
register struct vnode
*vp
;
extern struct vnode
*vfreeh
;
extern struct vnode
**vfreet
;
if (vp
->v_usecount
== 0) {