- s = splbio();
- if (sp->seg_flags & SEGM_CKP) {
- fs->lfs_iocount += nblocks;
- flags = B_ASYNC | B_BUSY | B_CALL;
- } else
- flags = B_ASYNC | B_BUSY;
- for (bpp = sp->bpp, i = nblocks; i--;) {
- bp = *bpp++;
- bp->b_flags |= flags;
- bp->b_flags &=
- ~(B_DONE | B_ERROR | B_READ | B_DELWRI | B_LOCKED);
- bp->b_dev = i_dev;
- bp->b_iodone = lfs_callback;
- if (!(bp->b_flags & B_NOCACHE)) {
- bremfree(bp);
- reassignbuf(bp, bp->b_vp);
+ /*
+ * When we simply write the blocks we lose a rotation for every block
+ * written. To avoid this problem, we allocate memory in chunks, copy
+ * the buffers into the chunk and write the chunk. 56K was chosen as
+ * some driver/controllers can't handle unsigned 16 bit transfers.
+ * When the data is copied to the chunk, turn off the the B_LOCKED bit
+ * and brelse the buffer (which will move them to the LRU list). Add
+ * the B_CALL flag to the buffer header so we can count I/O's for the
+ * checkpoints and so we can release the allocated memory.
+ *
+ * XXX
+ * This should be removed if the new virtual memory system allows us to
+ * easily make the buffers contiguous in kernel memory and if that's
+ * fast enough.
+ */
+#define LFS_CHUNKSIZE (56 * 1024)
+ ch_per_blk = LFS_CHUNKSIZE / fs->lfs_bsize;
+ for (bpp = sp->bpp, i = nblocks; i;) {
+ num = ch_per_blk;
+ if (num > i)
+ num = i;
+ i -= num;
+ size = num * fs->lfs_bsize;
+
+ cbp = lfs_newbuf(fs, (*bpp)->b_blkno, 0);
+ cbp->b_dev = i_dev;
+ cbp->b_flags = B_ASYNC | B_BUSY | B_CALL;
+ cbp->b_iodone = lfs_callback;
+ cbp->b_saveaddr = cbp->b_un.b_addr;
+ cbp->b_un.b_addr = malloc(size, M_SEGMENT, M_WAITOK);
+
+ s = splbio();
+ ++fs->lfs_iocount;
+ for (p = cbp->b_un.b_addr; num--;) {
+ bp = *bpp++;
+ bcopy(bp->b_un.b_addr, p, bp->b_bcount);
+ p += bp->b_bcount;
+ bp->b_flags &=
+ ~(B_DONE | B_ERROR | B_READ | B_DELWRI | B_LOCKED);
+ if (!(bp->b_flags & B_NOCACHE)) {
+ bremfree(bp);
+ reassignbuf(bp, bp->b_vp);
+ }
+ brelse(bp);