+ return(bp);
+}
+
+/*
+ * get an empty block,
+ * not assigned to any particular device
+ */
+struct buf *
+geteblk(size)
+ int size;
+{
+ register struct buf *bp, *flist;
+
+loop:
+ bp = getnewbuf();
+ bp->b_flags |= B_INVAL;
+ bfree(bp);
+ bremhash(bp);
+ flist = &bfreelist[BQ_AGE];
+ binshash(bp, flist);
+ bp->b_dev = (dev_t)NODEV;
+ bp->b_error = 0;
+ if (brealloc(bp, size) == 0)
+ goto loop;
+ return(bp);
+}
+
+/*
+ * Allocate space associated with a buffer.
+ * If can't get space, buffer is released
+ */
+brealloc(bp, size)
+ register struct buf *bp;
+ int size;
+{
+ daddr_t start, last;
+ register struct buf *ep;
+ struct buf *dp;
+ int s;
+
+ /*
+ * First need to make sure that all overlaping previous I/O
+ * is dispatched with.
+ */
+ if (size == bp->b_bcount)
+ return (1);
+ if (size < bp->b_bcount) {
+ if (bp->b_flags & B_DELWRI) {
+ bwrite(bp);
+ return (0);
+ }
+ if (bp->b_flags & B_LOCKED)
+ panic("brealloc");
+ return (allocbuf(bp, size));
+ }
+ bp->b_flags &= ~B_DONE;
+ if (bp->b_dev == NODEV)
+ return (allocbuf(bp, size));
+
+ /*
+ * Search cache for any buffers that overlap the one that we
+ * are trying to allocate. Overlapping buffers must be marked
+ * invalid, after being written out if they are dirty. (indicated
+ * by B_DELWRI) A disk block must be mapped by at most one buffer
+ * at any point in time. Care must be taken to avoid deadlocking
+ * when two buffer are trying to get the same set of disk blocks.
+ */
+ start = bp->b_blkno;
+ last = start + (size / DEV_BSIZE) - 1;
+ dp = BUFHASH(bp->b_dev, bp->b_blkno);
+loop:
+ for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
+ if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL))
+ continue;
+ /* look for overlap */
+ if (ep->b_bcount == 0 || ep->b_blkno > last ||
+ ep->b_blkno + (ep->b_bcount / DEV_BSIZE) <= start)
+ continue;
+ s = spl6();
+ if (ep->b_flags&B_BUSY) {
+ ep->b_flags |= B_WANTED;
+ sleep((caddr_t)ep, PRIBIO+1);
+ splx(s);
+ goto loop;
+ }
+ splx(s);
+ notavail(ep);
+ if (ep->b_flags & B_DELWRI) {
+ bwrite(ep);
+ goto loop;
+ }
+ ep->b_flags |= B_INVAL;
+ brelse(ep);
+ }
+ return (allocbuf(bp, size));
+}
+
+/*
+ * Expand or contract the actual memory allocated to a buffer.
+ * If no memory is available, release buffer and take error exit
+ */
+allocbuf(tp, size)
+ register struct buf *tp;
+ int size;
+{
+ register struct buf *bp, *ep;
+ int sizealloc, take;
+#ifdef sun
+ register char *a;
+ int osize;
+#endif
+
+#ifndef sun
+ sizealloc = roundup(size, CLBYTES);
+#else
+ sizealloc = roundup(size, BUFALLOCSIZE);
+#endif
+ /*
+ * Buffer size does not change
+ */
+ if (sizealloc == tp->b_bufsize)
+ goto out;
+#ifndef sun
+ /*
+ * Buffer size is shrinking.
+ * Place excess space in a buffer header taken from the
+ * BQ_EMPTY buffer list and placed on the "most free" list.
+ * If no extra buffer headers are available, leave the
+ * extra space in the present buffer.
+ */
+ if (sizealloc < tp->b_bufsize) {
+ ep = bfreelist[BQ_EMPTY].av_forw;
+ if (ep == &bfreelist[BQ_EMPTY])
+ goto out;
+ notavail(ep);
+ pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
+ (int)tp->b_bufsize - sizealloc);
+ ep->b_bufsize = tp->b_bufsize - sizealloc;
+ tp->b_bufsize = sizealloc;
+ ep->b_flags |= B_INVAL;
+ ep->b_bcount = 0;
+ brelse(ep);
+ goto out;
+ }
+ /*
+ * More buffer space is needed. Get it out of buffers on
+ * the "most free" list, placing the empty headers on the
+ * BQ_EMPTY buffer header list.
+ */
+ while (tp->b_bufsize < sizealloc) {
+ take = sizealloc - tp->b_bufsize;
+ bp = getnewbuf();
+ if (take >= bp->b_bufsize)
+ take = bp->b_bufsize;
+ pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
+ &tp->b_un.b_addr[tp->b_bufsize], take);
+ tp->b_bufsize += take;
+ bp->b_bufsize = bp->b_bufsize - take;
+ if (bp->b_bcount > bp->b_bufsize)
+ bp->b_bcount = bp->b_bufsize;
+ if (bp->b_bufsize <= 0) {
+ bremhash(bp);
+ binshash(bp, &bfreelist[BQ_EMPTY]);
+ bp->b_dev = (dev_t)NODEV;
+ bp->b_error = 0;
+ bp->b_flags |= B_INVAL;
+ }
+ brelse(bp);
+ }
+#else
+ /*
+ * Buffer size is shrinking
+ * Just put the tail end back in the map
+ */
+ if (sizealloc < tp->b_bufsize) {
+ rmfree(buffermap, (long)(tp->b_bufsize - sizealloc),
+ (long)(tp->b_un.b_addr + sizealloc));
+ tp->b_bufsize = sizealloc;
+ goto out;
+ }
+ /*
+ * Buffer is being expanded or created
+ * If being expanded, attempt to get contiguous
+ * section, otherwise get a new chunk and copy.
+ * If no space, free up a buffer on the AGE list
+ * and try again.
+ */
+ do {
+ if ((osize = tp->b_bufsize)) {
+ a = (char *)rmget(buffermap, (long)(sizealloc-osize),
+ (long)(tp->b_un.b_addr + osize));
+ if (a == 0) {
+ a = (char *)rmalloc(buffermap, (long)sizealloc);
+ if (a != 0) {
+ bcopy(tp->b_un.b_addr, a, osize);
+ rmfree(buffermap, (long)osize,
+ (long)tp->b_un.b_addr);
+ tp->b_un.b_addr = a;
+ }
+ }
+ } else {
+ a = (char *)rmalloc(buffermap, (long)sizealloc);
+ if (a != 0)
+ tp->b_un.b_addr = a;
+ }
+ } while (a == 0 && bfreemem());
+ if (a == 0) {
+ brelse(tp);
+ return (0);
+ }
+ tp->b_bufsize = sizealloc;
+#endif
+out:
+ tp->b_bcount = size;
+ return (1);
+}
+
+/*
+ * Release space associated with a buffer.
+ */
+bfree(bp)
+ struct buf *bp;
+{
+#ifdef sun
+ if (bp->b_bufsize) {
+ rmfree(buffermap, (long)bp->b_bufsize, (long)bp->b_un.b_addr);
+ bp->b_bufsize = 0;