add memory filesystem initialization
[unix-history] / usr / src / sys / kern / vfs_bio.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms are permitted
6 * provided that the above copyright notice and this paragraph are
7 * duplicated in all such forms and that any documentation,
8 * advertising materials, and other materials related to such
9 * distribution and use acknowledge that the software was developed
10 * by the University of California, Berkeley. The name of the
11 * University may not be used to endorse or promote products derived
12 * from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16 *
17 * @(#)vfs_bio.c 7.12 (Berkeley) %G%
18 */
19
20#include "param.h"
21#include "user.h"
22#include "buf.h"
23#include "vnode.h"
24#include "trace.h"
25#include "ucred.h"
26
27/*
28 * Read in (if necessary) the block and return a buffer pointer.
29 */
30bread(vp, blkno, size, cred, bpp)
31 struct vnode *vp;
32 daddr_t blkno;
33 int size;
34 struct ucred *cred;
35 struct buf **bpp;
36#ifdef SECSIZE
37 long secsize;
38#endif SECSIZE
39{
40 register struct buf *bp;
41
42 if (size == 0)
43 panic("bread: size 0");
44#ifdef SECSIZE
45 bp = getblk(dev, blkno, size, secsize);
46#else SECSIZE
47 *bpp = bp = getblk(vp, blkno, size);
48#endif SECSIZE
49 if (bp->b_flags&(B_DONE|B_DELWRI)) {
50 trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size), blkno);
51 return (0);
52 }
53 bp->b_flags |= B_READ;
54 if (bp->b_bcount > bp->b_bufsize)
55 panic("bread");
56 if (bp->b_rcred == NOCRED && cred != NOCRED) {
57 crhold(cred);
58 bp->b_rcred = cred;
59 }
60 VOP_STRATEGY(bp);
61 trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size), blkno);
62 u.u_ru.ru_inblock++; /* pay for read */
63 return (biowait(bp));
64}
65
66/*
67 * Read in the block, like bread, but also start I/O on the
68 * read-ahead block (which is not allocated to the caller)
69 */
70breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
71 struct vnode *vp;
72 daddr_t blkno; int size;
73#ifdef SECSIZE
74 long secsize;
75#endif SECSIZE
76 daddr_t rablkno; int rabsize;
77 struct ucred *cred;
78 struct buf **bpp;
79{
80 register struct buf *bp, *rabp;
81
82 bp = NULL;
83 /*
84 * If the block isn't in core, then allocate
85 * a buffer and initiate i/o (getblk checks
86 * for a cache hit).
87 */
88 if (!incore(vp, blkno)) {
89 *bpp = bp = getblk(vp, blkno, size);
90#endif SECSIZE
91 if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
92 bp->b_flags |= B_READ;
93 if (bp->b_bcount > bp->b_bufsize)
94 panic("breada");
95 if (bp->b_rcred == NOCRED && cred != NOCRED) {
96 crhold(cred);
97 bp->b_rcred = cred;
98 }
99 VOP_STRATEGY(bp);
100 trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size),
101 blkno);
102 u.u_ru.ru_inblock++; /* pay for read */
103 } else
104 trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size),
105 blkno);
106 }
107
108 /*
109 * If there's a read-ahead block, start i/o
110 * on it also (as above).
111 */
112 if (rablkno && !incore(vp, rablkno)) {
113 rabp = getblk(vp, rablkno, rabsize);
114#endif SECSIZE
115 if (rabp->b_flags & (B_DONE|B_DELWRI)) {
116 brelse(rabp);
117 trace(TR_BREADHITRA,
118 pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
119 } else {
120 rabp->b_flags |= B_READ|B_ASYNC;
121 if (rabp->b_bcount > rabp->b_bufsize)
122 panic("breadrabp");
123 if (rabp->b_rcred == NOCRED && cred != NOCRED) {
124 crhold(cred);
125 rabp->b_rcred = cred;
126 }
127 VOP_STRATEGY(rabp);
128 trace(TR_BREADMISSRA,
129 pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
130 u.u_ru.ru_inblock++; /* pay in advance */
131 }
132 }
133
134 /*
135 * If block was in core, let bread get it.
136 * If block wasn't in core, then the read was started
137 * above, and just wait for it.
138 */
139 if (bp == NULL)
140#ifdef SECSIZE
141 return (bread(dev, blkno, size, secsize));
142#else SECSIZE
143 return (bread(vp, blkno, size, cred, bpp));
144 return (biowait(bp));
145}
146
147/*
148 * Write the buffer, waiting for completion.
149 * Then release the buffer.
150 */
151bwrite(bp)
152 register struct buf *bp;
153{
154 register int flag;
155 int error;
156
157 flag = bp->b_flags;
158 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
159 if ((flag&B_DELWRI) == 0)
160 u.u_ru.ru_oublock++; /* noone paid yet */
161 trace(TR_BWRITE,
162 pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bcount), bp->b_blkno);
163 if (bp->b_bcount > bp->b_bufsize)
164 panic("bwrite");
165 VOP_STRATEGY(bp);
166
167 /*
168 * If the write was synchronous, then await i/o completion.
169 * If the write was "delayed", then we put the buffer on
170 * the q of blocks awaiting i/o completion status.
171 */
172 if ((flag&B_ASYNC) == 0) {
173 error = biowait(bp);
174 brelse(bp);
175 } else if (flag & B_DELWRI) {
176 bp->b_flags |= B_AGE;
177 error = 0;
178 }
179 return (error);
180}
181
182/*
183 * Release the buffer, marking it so that if it is grabbed
184 * for another purpose it will be written out before being
185 * given up (e.g. when writing a partial block where it is
186 * assumed that another write for the same block will soon follow).
187 * This can't be done for magtape, since writes must be done
188 * in the same order as requested.
189 */
190bdwrite(bp)
191 register struct buf *bp;
192{
193
194 if ((bp->b_flags&B_DELWRI) == 0)
195 u.u_ru.ru_oublock++; /* noone paid yet */
196#ifdef notdef
197 /*
198 * This does not work for buffers associated with
199 * vnodes that are remote - they have no dev.
200 * Besides, we don't use bio with tapes, so rather
201 * than develop a fix, we just ifdef this out for now.
202 */
203 if (bdevsw[major(bp->b_dev)].d_flags & B_TAPE)
204 bawrite(bp);
205 else {
206 bp->b_flags |= B_DELWRI | B_DONE;
207 brelse(bp);
208 }
209#endif
210 bp->b_flags |= B_DELWRI | B_DONE;
211 brelse(bp);
212}
213
214/*
215 * Release the buffer, start I/O on it, but don't wait for completion.
216 */
217bawrite(bp)
218 register struct buf *bp;
219{
220
221 bp->b_flags |= B_ASYNC;
222 (void) bwrite(bp);
223}
224
225/*
226 * Release the buffer, with no I/O implied.
227 */
228brelse(bp)
229 register struct buf *bp;
230{
231 register struct buf *flist;
232 register s;
233
234 trace(TR_BRELSE,
235 pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_blkno);
236 /*
237 * If someone's waiting for the buffer, or
238 * is waiting for a buffer wake 'em up.
239 */
240 if (bp->b_flags&B_WANTED)
241 wakeup((caddr_t)bp);
242 if (bfreelist[0].b_flags&B_WANTED) {
243 bfreelist[0].b_flags &= ~B_WANTED;
244 wakeup((caddr_t)bfreelist);
245 }
246 if (bp->b_flags & B_NOCACHE) {
247 bp->b_flags |= B_INVAL;
248 }
249 if (bp->b_flags&B_ERROR)
250 if (bp->b_flags & B_LOCKED)
251 bp->b_flags &= ~B_ERROR; /* try again later */
252 else
253 brelvp(bp); /* no assoc */
254
255 /*
256 * Stick the buffer back on a free list.
257 */
258 s = splbio();
259 if (bp->b_bufsize <= 0) {
260 /* block has no buffer ... put at front of unused buffer list */
261 flist = &bfreelist[BQ_EMPTY];
262 binsheadfree(bp, flist);
263 } else if (bp->b_flags & (B_ERROR|B_INVAL)) {
264 /* block has no info ... put at front of most free list */
265 flist = &bfreelist[BQ_AGE];
266 binsheadfree(bp, flist);
267 } else {
268 if (bp->b_flags & B_LOCKED)
269 flist = &bfreelist[BQ_LOCKED];
270 else if (bp->b_flags & B_AGE)
271 flist = &bfreelist[BQ_AGE];
272 else
273 flist = &bfreelist[BQ_LRU];
274 binstailfree(bp, flist);
275 }
276 bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
277 splx(s);
278}
279
280/*
281 * See if the block is associated with some buffer
282 * (mainly to avoid getting hung up on a wait in breada)
283 */
284incore(vp, blkno)
285 struct vnode *vp;
286 daddr_t blkno;
287{
288 register struct buf *bp;
289 register struct buf *dp;
290
291 dp = BUFHASH(vp, blkno);
292 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
293 if (bp->b_blkno == blkno && bp->b_vp == vp &&
294 (bp->b_flags & B_INVAL) == 0)
295 return (1);
296 return (0);
297}
298
299baddr(vp, blkno, size, cred, bpp)
300 struct vnode *vp;
301 daddr_t blkno;
302 int size;
303 struct ucred *cred;
304 struct buf **bpp;
305#ifdef SECSIZE
306 long secsize;
307#endif SECSIZE
308{
309
310 if (incore(vp, blkno))
311 return (bread(vp, blkno, size, cred, bpp));
312 *bpp = 0;
313#endif SECSIZE
314 return (0);
315}
316
317/*
318 * Assign a buffer for the given block. If the appropriate
319 * block is already associated, return it; otherwise search
320 * for the oldest non-busy buffer and reassign it.
321 *
322 * If we find the buffer, but it is dirty (marked DELWRI) and
323 * its size is changing, we must write it out first. When the
324 * buffer is shrinking, the write is done by brealloc to avoid
325 * losing the unwritten data. When the buffer is growing, the
326 * write is done by getblk, so that bread will not read stale
327 * disk data over the modified data in the buffer.
328 *
329 * We use splx here because this routine may be called
330 * on the interrupt stack during a dump, and we don't
331 * want to lower the ipl back to 0.
332 */
333struct buf *
334#ifdef SECSIZE
335getblk(dev, blkno, size, secsize)
336#else SECSIZE
337getblk(vp, blkno, size)
338 register struct vnode *vp;
339 daddr_t blkno;
340 int size;
341#ifdef SECSIZE
342 long secsize;
343#endif SECSIZE
344{
345 register struct buf *bp, *dp;
346 int s;
347
348 if (size > MAXBSIZE)
349 panic("getblk: size too big");
350 /*
351 * To prevent overflow of 32-bit ints when converting block
352 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
353 * to the maximum number that can be converted to a byte offset
354 * without overflow. This is historic code; what bug it fixed,
355 * or whether it is still a reasonable thing to do is open to
356 * dispute. mkm 9/85
357 */
358 if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT))
359 blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
360 /*
361 * Search the cache for the block. If we hit, but
362 * the buffer is in use for i/o, then we wait until
363 * the i/o has completed.
364 */
365 dp = BUFHASH(vp, blkno);
366loop:
367 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
368 if (bp->b_blkno != blkno || bp->b_vp != vp ||
369 bp->b_flags&B_INVAL)
370 continue;
371 s = splbio();
372 if (bp->b_flags&B_BUSY) {
373 bp->b_flags |= B_WANTED;
374 sleep((caddr_t)bp, PRIBIO+1);
375 splx(s);
376 goto loop;
377 }
378 splx(s);
379 notavail(bp);
380 if (bp->b_bcount != size) {
381 if (bp->b_bcount < size && (bp->b_flags&B_DELWRI)) {
382 bp->b_flags &= ~B_ASYNC;
383 (void) bwrite(bp);
384 goto loop;
385 }
386 if (brealloc(bp, size) == 0)
387 goto loop;
388 }
389 if (bp->b_bcount != size && brealloc(bp, size) == 0)
390 goto loop;
391 bp->b_flags |= B_CACHE;
392 return (bp);
393 }
394 bp = getnewbuf();
395 bfree(bp);
396 bremhash(bp);
397 if (bp->b_vp)
398 brelvp(bp);
399 VREF(vp);
400 bp->b_vp = vp;
401 bp->b_dev = vp->v_rdev;
402#ifdef SECSIZE
403 bp->b_blksize = secsize;
404#endif SECSIZE
405 bp->b_blkno = blkno;
406 bp->b_error = 0;
407 bp->b_resid = 0;
408 binshash(bp, dp);
409 if (brealloc(bp, size) == 0)
410 goto loop;
411 return (bp);
412}
413
414/*
415 * get an empty block,
416 * not assigned to any particular device
417 */
418struct buf *
419geteblk(size)
420 int size;
421{
422 register struct buf *bp, *flist;
423
424 if (size > MAXBSIZE)
425 panic("geteblk: size too big");
426loop:
427 bp = getnewbuf();
428 bp->b_flags |= B_INVAL;
429 bfree(bp);
430 bremhash(bp);
431 flist = &bfreelist[BQ_AGE];
432 brelvp(bp);
433#ifdef SECSIZE
434 bp->b_blksize = DEV_BSIZE;
435#endif SECSIZE
436 bp->b_error = 0;
437 bp->b_resid = 0;
438 binshash(bp, flist);
439 if (brealloc(bp, size) == 0)
440 goto loop;
441 return (bp);
442}
443
444/*
445 * Allocate space associated with a buffer.
446 * If can't get space, buffer is released
447 */
448brealloc(bp, size)
449 register struct buf *bp;
450 int size;
451{
452 daddr_t start, last;
453 register struct buf *ep;
454 struct buf *dp;
455 int s;
456
457 /*
458 * First need to make sure that all overlapping previous I/O
459 * is dispatched with.
460 */
461 if (size == bp->b_bcount)
462 return (1);
463 if (size < bp->b_bcount) {
464 if (bp->b_flags & B_DELWRI) {
465 (void) bwrite(bp);
466 return (0);
467 }
468 if (bp->b_flags & B_LOCKED)
469 panic("brealloc");
470 return (allocbuf(bp, size));
471 }
472 bp->b_flags &= ~B_DONE;
473 if (bp->b_vp == (struct vnode *)0)
474 return (allocbuf(bp, size));
475
476 trace(TR_BREALLOC,
477 pack(bp->b_vp->v_mount->m_fsid[0], size), bp->b_blkno);
478 /*
479 * Search cache for any buffers that overlap the one that we
480 * are trying to allocate. Overlapping buffers must be marked
481 * invalid, after being written out if they are dirty. (indicated
482 * by B_DELWRI) A disk block must be mapped by at most one buffer
483 * at any point in time. Care must be taken to avoid deadlocking
484 * when two buffer are trying to get the same set of disk blocks.
485 */
486 start = bp->b_blkno;
487#ifdef SECSIZE
488 last = start + size/bp->b_blksize - 1;
489#else SECSIZE
490 last = start + btodb(size) - 1;
491#endif SECSIZE
492 dp = BUFHASH(bp->b_vp, bp->b_blkno);
493loop:
494 for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
495 if (ep == bp || ep->b_vp != bp->b_vp ||
496 (ep->b_flags & B_INVAL))
497 continue;
498 /* look for overlap */
499 if (ep->b_bcount == 0 || ep->b_blkno > last ||
500#ifdef SECSIZE
501 ep->b_blkno + ep->b_bcount/ep->b_blksize <= start)
502#else SECSIZE
503 ep->b_blkno + btodb(ep->b_bcount) <= start)
504#endif SECSIZE
505 continue;
506 s = splbio();
507 if (ep->b_flags&B_BUSY) {
508 ep->b_flags |= B_WANTED;
509 sleep((caddr_t)ep, PRIBIO+1);
510 splx(s);
511 goto loop;
512 }
513 splx(s);
514 notavail(ep);
515 if (ep->b_flags & B_DELWRI) {
516 (void) bwrite(ep);
517 goto loop;
518 }
519 ep->b_flags |= B_INVAL;
520 brelse(ep);
521 }
522 return (allocbuf(bp, size));
523}
524
525/*
526 * Find a buffer which is available for use.
527 * Select something from a free list.
528 * Preference is to AGE list, then LRU list.
529 */
530struct buf *
531getnewbuf()
532{
533 register struct buf *bp, *dp;
534 register struct ucred *cred;
535 int s;
536
537loop:
538 s = splbio();
539 for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
540 if (dp->av_forw != dp)
541 break;
542 if (dp == bfreelist) { /* no free blocks */
543 dp->b_flags |= B_WANTED;
544 sleep((caddr_t)dp, PRIBIO+1);
545 splx(s);
546 goto loop;
547 }
548 splx(s);
549 bp = dp->av_forw;
550 notavail(bp);
551 if (bp->b_flags & B_DELWRI) {
552 (void) bawrite(bp);
553 goto loop;
554 }
555 trace(TR_BRELSE,
556 pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_blkno);
557 brelvp(bp);
558 if (bp->b_rcred != NOCRED) {
559 cred = bp->b_rcred;
560 bp->b_rcred = NOCRED;
561 crfree(cred);
562 }
563 if (bp->b_wcred != NOCRED) {
564 cred = bp->b_wcred;
565 bp->b_wcred = NOCRED;
566 crfree(cred);
567 }
568 bp->b_flags = B_BUSY;
569 return (bp);
570}
571
572/*
573 * Wait for I/O completion on the buffer; return errors
574 * to the user.
575 */
576biowait(bp)
577 register struct buf *bp;
578{
579 int s;
580
581 s = splbio();
582 while ((bp->b_flags & B_DONE) == 0)
583 sleep((caddr_t)bp, PRIBIO);
584 splx(s);
585 /*
586 * Pick up the device's error number and pass it to the user;
587 * if there is an error but the number is 0 set a generalized code.
588 */
589 if ((bp->b_flags & B_ERROR) == 0)
590 return (0);
591 if (bp->b_error)
592 return (bp->b_error);
593 return (EIO);
594}
595
596/*
597 * Mark I/O complete on a buffer.
598 * If someone should be called, e.g. the pageout
599 * daemon, do so. Otherwise, wake up anyone
600 * waiting for it.
601 */
602biodone(bp)
603 register struct buf *bp;
604{
605
606 if (bp->b_flags & B_DONE)
607 panic("dup biodone");
608 bp->b_flags |= B_DONE;
609 if ((bp->b_flags & B_READ) == 0)
610 bp->b_dirtyoff = bp->b_dirtyend = 0;
611 if (bp->b_flags & B_CALL) {
612 bp->b_flags &= ~B_CALL;
613 (*bp->b_iodone)(bp);
614 return;
615 }
616 if (bp->b_flags&B_ASYNC)
617 brelse(bp);
618 else {
619 bp->b_flags &= ~B_WANTED;
620 wakeup((caddr_t)bp);
621 }
622}
623
624/*
625 * Ensure that no part of a specified block is in an incore buffer.
626#ifdef SECSIZE
627 * "size" is given in device blocks (the units of b_blkno).
628#endif SECSIZE
629#ifdef SECSIZE
630 * "size" is given in device blocks (the units of b_blkno).
631#endif SECSIZE
632 */
633blkflush(vp, blkno, size)
634 struct vnode *vp;
635 daddr_t blkno;
636#ifdef SECSIZE
637 int size;
638#else SECSIZE
639 long size;
640#endif SECSIZE
641{
642 register struct buf *ep;
643 struct buf *dp;
644 daddr_t start, last;
645 int s, error, allerrors = 0;
646
647 start = blkno;
648#ifdef SECSIZE
649 last = start + size - 1;
650#else SECSIZE
651 last = start + btodb(size) - 1;
652#endif SECSIZE
653 dp = BUFHASH(vp, blkno);
654loop:
655 for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
656 if (ep->b_vp != vp || (ep->b_flags & B_INVAL))
657 continue;
658 /* look for overlap */
659 if (ep->b_bcount == 0 || ep->b_blkno > last ||
660#ifdef SECSIZE
661 ep->b_blkno + ep->b_bcount / ep->b_blksize <= start)
662#else SECSIZE
663 ep->b_blkno + btodb(ep->b_bcount) <= start)
664#endif SECSIZE
665 continue;
666 s = splbio();
667 if (ep->b_flags&B_BUSY) {
668 ep->b_flags |= B_WANTED;
669 sleep((caddr_t)ep, PRIBIO+1);
670 splx(s);
671 goto loop;
672 }
673 if (ep->b_flags & B_DELWRI) {
674 splx(s);
675 notavail(ep);
676 if (error = bwrite(ep))
677 allerrors = error;
678 goto loop;
679 }
680 splx(s);
681 }
682 return (allerrors);
683}
684
685/*
686 * Make sure all write-behind blocks associated
687 * with mount point are flushed out (from sync).
688 */
689bflush(mountp)
690 struct mount *mountp;
691{
692 register struct buf *bp;
693 register struct buf *flist;
694 int s;
695
696loop:
697 s = splbio();
698 for (flist = bfreelist; flist < &bfreelist[BQ_EMPTY]; flist++) {
699 for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) {
700 if ((bp->b_flags & B_BUSY))
701 continue;
702 if ((bp->b_flags & B_DELWRI) == 0)
703 continue;
704 if (bp->b_vp && bp->b_vp->v_mount == mountp) {
705 splx(s);
706 notavail(bp);
707 (void) bawrite(bp);
708 goto loop;
709 }
710 }
711 }
712 splx(s);
713}
714
715/*
716 * Invalidate in core blocks belonging to closed or umounted filesystem
717 *
718 * We walk through the buffer pool and invalidate any buffers for the
719 * indicated mount point. Normally this routine is preceeded by a bflush
720 * call, so that on a quiescent filesystem there will be no dirty
721 * buffers when we are done. We return the count of dirty buffers when
722 * we are finished.
723 */
724binval(mountp)
725 struct mount *mountp;
726{
727 register struct buf *bp;
728 register struct bufhd *hp;
729 int s, dirty = 0;
730#define dp ((struct buf *)hp)
731
732loop:
733 for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) {
734 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
735 if (bp->b_vp == NULL || bp->b_vp->v_mount != mountp)
736 continue;
737 s = splbio();
738 if (bp->b_flags & B_BUSY) {
739 bp->b_flags |= B_WANTED;
740 sleep((caddr_t)bp, PRIBIO+1);
741 splx(s);
742 goto loop;
743 }
744 splx(s);
745 notavail(bp);
746 if (bp->b_flags & B_DELWRI) {
747 (void) bawrite(bp);
748 dirty++;
749 continue;
750 }
751 bp->b_flags |= B_INVAL;
752 brelvp(bp);
753 brelse(bp);
754 }
755 }
756 return (dirty);
757}
758
759brelvp(bp)
760 struct buf *bp;
761{
762 struct vnode *vp;
763
764 if (bp->b_vp == (struct vnode *) 0)
765 return;
766 vp = bp->b_vp;
767 bp->b_vp = (struct vnode *) 0;
768 vrele(vp);
769}