+ return(bp);
+}
+
+/*
+ * Allocate space associated with a buffer.
+ * If can't get space, buffer is released
+ */
+brealloc(bp, size)
+ register struct buf *bp;
+ int size;
+{
+ daddr_t start, last;
+ register struct buf *ep;
+ struct buf *dp;
+ int s;
+
+ /*
+ * First need to make sure that all overlaping previous I/O
+ * is dispatched with.
+ */
+ if (size == bp->b_bcount)
+ return (1);
+ if (size < bp->b_bcount) {
+ if (bp->b_flags & B_DELWRI) {
+ bwrite(bp);
+ return (0);
+ }
+ if (bp->b_flags & B_LOCKED)
+ panic("brealloc");
+ return (allocbuf(bp, size));
+ }
+ bp->b_flags &= ~B_DONE;
+ if (bp->b_dev == NODEV)
+ return (allocbuf(bp, size));
+
+ /*
+ * Search cache for any buffers that overlap the one that we
+ * are trying to allocate. Overlapping buffers must be marked
+ * invalid, after being written out if they are dirty. (indicated
+ * by B_DELWRI) A disk block must be mapped by at most one buffer
+ * at any point in time. Care must be taken to avoid deadlocking
+ * when two buffer are trying to get the same set of disk blocks.
+ */
+ start = bp->b_blkno;
+ last = start + (size / DEV_BSIZE) - 1;
+ dp = BUFHASH(bp->b_dev, bp->b_blkno);
+loop:
+ for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
+ if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL))
+ continue;
+ /* look for overlap */
+ if (ep->b_bcount == 0 || ep->b_blkno > last ||
+ ep->b_blkno + (ep->b_bcount / DEV_BSIZE) <= start)
+ continue;
+ s = spl6();
+ if (ep->b_flags&B_BUSY) {
+ ep->b_flags |= B_WANTED;
+ sleep((caddr_t)ep, PRIBIO+1);
+ splx(s);
+ goto loop;
+ }
+ splx(s);
+ notavail(ep);
+ if (ep->b_flags & B_DELWRI) {
+ bwrite(ep);
+ goto loop;
+ }
+ ep->b_flags |= B_INVAL;
+ brelse(ep);
+ }
+ return (allocbuf(bp, size));
+}
+
+/*
+ * Expand or contract the actual memory allocated to a buffer.
+ * If no memory is available, release buffer and take error exit
+ */
+allocbuf(tp, size)
+ register struct buf *tp;
+ int size;
+{
+ register struct buf *bp, *ep;
+ int sizealloc, take;
+#ifdef sun
+ register char *a;
+ int osize;
+#endif
+
+#ifndef sun
+ sizealloc = roundup(size, CLBYTES);
+#else
+ sizealloc = roundup(size, BUFALLOCSIZE);
+#endif
+ /*
+ * Buffer size does not change
+ */
+ if (sizealloc == tp->b_bufsize)
+ goto out;
+#ifndef sun
+ /*
+ * Buffer size is shrinking.
+ * Place excess space in a buffer header taken from the
+ * BQ_EMPTY buffer list and placed on the "most free" list.
+ * If no extra buffer headers are available, leave the
+ * extra space in the present buffer.
+ */
+ if (sizealloc < tp->b_bufsize) {
+ ep = bfreelist[BQ_EMPTY].av_forw;
+ if (ep == &bfreelist[BQ_EMPTY])
+ goto out;
+ notavail(ep);
+ pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
+ (int)tp->b_bufsize - sizealloc);
+ ep->b_bufsize = tp->b_bufsize - sizealloc;
+ tp->b_bufsize = sizealloc;
+ ep->b_flags |= B_INVAL;
+ ep->b_bcount = 0;
+ brelse(ep);
+ goto out;
+ }
+ /*
+ * More buffer space is needed. Get it out of buffers on
+ * the "most free" list, placing the empty headers on the
+ * BQ_EMPTY buffer header list.
+ */
+ while (tp->b_bufsize < sizealloc) {
+ take = sizealloc - tp->b_bufsize;
+ bp = getnewbuf();
+ if (take >= bp->b_bufsize)
+ take = bp->b_bufsize;
+ pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
+ &tp->b_un.b_addr[tp->b_bufsize], take);
+ tp->b_bufsize += take;
+ bp->b_bufsize = bp->b_bufsize - take;
+ if (bp->b_bcount > bp->b_bufsize)
+ bp->b_bcount = bp->b_bufsize;
+ if (bp->b_bufsize <= 0) {
+ bremhash(bp);
+ binshash(bp, &bfreelist[BQ_EMPTY]);
+ bp->b_dev = (dev_t)NODEV;
+ bp->b_error = 0;
+ bp->b_flags |= B_INVAL;
+ }
+ brelse(bp);
+ }
+#else
+ /*
+ * Buffer size is shrinking
+ * Just put the tail end back in the map
+ */
+ if (sizealloc < tp->b_bufsize) {
+ rmfree(buffermap, (long)(tp->b_bufsize - sizealloc),
+ (long)(tp->b_un.b_addr + sizealloc));
+ tp->b_bufsize = sizealloc;
+ goto out;