+ /*
+ * When we simply write the blocks we lose a rotation for every block
+ * written. To avoid this problem, we allocate memory in chunks, copy
+ * the buffers into the chunk and write the chunk. MAXPHYS is the
+ * largest size I/O devices can handle.
+ * When the data is copied to the chunk, turn off the the B_LOCKED bit
+ * and brelse the buffer (which will move them to the LRU list). Add
+ * the B_CALL flag to the buffer header so we can count I/O's for the
+ * checkpoints and so we can release the allocated memory.
+ *
+ * XXX
+ * This should be removed if the new virtual memory system allows us to
+ * easily make the buffers contiguous in kernel memory and if that's
+ * fast enough.
+ */
+ ch_per_blk = MAXPHYS / fs->lfs_bsize;
+ for (bpp = sp->bpp, i = nblocks; i;) {
+ num = ch_per_blk;
+ if (num > i)
+ num = i;
+ i -= num;
+ size = num * fs->lfs_bsize;
+
+ cbp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp,
+ (*bpp)->b_blkno, size);
+ cbp->b_dev = i_dev;
+ cbp->b_flags |= B_ASYNC | B_BUSY;
+
+ s = splbio();
+ ++fs->lfs_iocount;
+ for (p = cbp->b_data; num--;) {
+ bp = *bpp++;
+ /*
+ * Fake buffers from the cleaner are marked as B_INVAL.
+ * We need to copy the data from user space rather than
+ * from the buffer indicated.
+ * XXX == what do I do on an error?
+ */
+ if (bp->b_flags & B_INVAL) {
+ if (copyin(bp->b_saveaddr, p, bp->b_bcount))
+ panic("lfs_writeseg: copyin failed");
+ } else
+ bcopy(bp->b_data, p, bp->b_bcount);
+ p += bp->b_bcount;
+ if (bp->b_flags & B_LOCKED)
+ --locked_queue_count;
+ bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
+ B_LOCKED | B_GATHERED);
+ if (bp->b_flags & B_CALL) {
+ /* if B_CALL, it was created with newbuf */
+ brelvp(bp);
+ if (!(bp->b_flags & B_INVAL))
+ free(bp->b_data, M_SEGMENT);
+ free(bp, M_SEGMENT);
+ } else {
+ bremfree(bp);
+ bp->b_flags |= B_DONE;
+ reassignbuf(bp, bp->b_vp);
+ brelse(bp);
+ }