add lfs_flush routine, just a hack until Kirk's done
[unix-history] / usr / src / sys / kern / vfs_cluster.c
CommitLineData
5dc2581e
KB
1/*-
2 * Copyright (c) 1982, 1986, 1989 The Regents of the University of California.
7188ac27 3 * All rights reserved.
da7c5cc6 4 *
217c3be4
KM
5 * This module is believed to contain source code proprietary to AT&T.
6 * Use and redistribution is subject to the Berkeley Software License
7 * Agreement and your Software Agreement with AT&T (Western Electric).
7188ac27 8 *
5777440f 9 * @(#)vfs_cluster.c 7.44 (Berkeley) %G%
da7c5cc6 10 */
961945a8 11
251f56ba
KB
12#include <sys/param.h>
13#include <sys/proc.h>
14#include <sys/buf.h>
15#include <sys/vnode.h>
16#include <sys/specdev.h>
17#include <sys/mount.h>
18#include <sys/trace.h>
19#include <sys/resourcevar.h>
663dbc72 20
e7db227e
MK
21/*
22 * Initialize buffers and hash links for buffers.
23 */
251f56ba 24void
e7db227e
MK
25bufinit()
26{
27 register int i;
28 register struct buf *bp, *dp;
29 register struct bufhd *hp;
30 int base, residual;
31
32 for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++)
33 hp->b_forw = hp->b_back = (struct buf *)hp;
34
35 for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) {
36 dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp;
37 dp->b_flags = B_HEAD;
38 }
39 base = bufpages / nbuf;
40 residual = bufpages % nbuf;
41 for (i = 0; i < nbuf; i++) {
42 bp = &buf[i];
43 bp->b_dev = NODEV;
44 bp->b_bcount = 0;
45 bp->b_rcred = NOCRED;
46 bp->b_wcred = NOCRED;
47 bp->b_dirtyoff = 0;
48 bp->b_dirtyend = 0;
49 bp->b_un.b_addr = buffers + i * MAXBSIZE;
50 if (i < residual)
51 bp->b_bufsize = (base + 1) * CLBYTES;
52 else
53 bp->b_bufsize = base * CLBYTES;
54 binshash(bp, &bfreelist[BQ_AGE]);
55 bp->b_flags = B_BUSY|B_INVAL;
56 brelse(bp);
57 }
58}
59
663dbc72 60/*
d42a4811
KM
61 * Find the block in the buffer pool.
62 * If the buffer is not present, allocate a new buffer and load
63 * its contents according to the filesystem fill routine.
663dbc72 64 */
a937f856 65bread(vp, blkno, size, cred, bpp)
7188ac27 66 struct vnode *vp;
ad30fb67
KM
67 daddr_t blkno;
68 int size;
a937f856 69 struct ucred *cred;
7188ac27 70 struct buf **bpp;
ec67a3ce
MK
71#ifdef SECSIZE
72 long secsize;
73#endif SECSIZE
663dbc72 74{
3789a403 75 struct proc *p = curproc; /* XXX */
663dbc72
BJ
76 register struct buf *bp;
77
4f083fd7
SL
78 if (size == 0)
79 panic("bread: size 0");
ec67a3ce
MK
80#ifdef SECSIZE
81 bp = getblk(dev, blkno, size, secsize);
82#else SECSIZE
7188ac27 83 *bpp = bp = getblk(vp, blkno, size);
ec67a3ce 84#endif SECSIZE
d42a4811 85 if (bp->b_flags & (B_DONE | B_DELWRI)) {
c5a600cf 86 trace(TR_BREADHIT, pack(vp, size), blkno);
7188ac27 87 return (0);
663dbc72
BJ
88 }
89 bp->b_flags |= B_READ;
4f083fd7
SL
90 if (bp->b_bcount > bp->b_bufsize)
91 panic("bread");
a937f856
KM
92 if (bp->b_rcred == NOCRED && cred != NOCRED) {
93 crhold(cred);
94 bp->b_rcred = cred;
95 }
7188ac27 96 VOP_STRATEGY(bp);
c5a600cf 97 trace(TR_BREADMISS, pack(vp, size), blkno);
3789a403 98 p->p_stats->p_ru.ru_inblock++; /* pay for read */
7188ac27 99 return (biowait(bp));
663dbc72
BJ
100}
101
102/*
d42a4811
KM
103 * Operates like bread, but also starts I/O on the specified
104 * read-ahead block.
663dbc72 105 */
a937f856 106breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
7188ac27 107 struct vnode *vp;
84baaab3 108 daddr_t blkno; int size;
ec67a3ce
MK
109#ifdef SECSIZE
110 long secsize;
111#endif SECSIZE
a8d3bf7f 112 daddr_t rablkno; int rabsize;
a937f856 113 struct ucred *cred;
7188ac27 114 struct buf **bpp;
663dbc72 115{
3789a403 116 struct proc *p = curproc; /* XXX */
663dbc72
BJ
117 register struct buf *bp, *rabp;
118
119 bp = NULL;
3efdd860 120 /*
d42a4811
KM
121 * If the block is not memory resident,
122 * allocate a buffer and start I/O.
3efdd860 123 */
7188ac27
KM
124 if (!incore(vp, blkno)) {
125 *bpp = bp = getblk(vp, blkno, size);
ec67a3ce 126#endif SECSIZE
d42a4811 127 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
663dbc72 128 bp->b_flags |= B_READ;
4f083fd7
SL
129 if (bp->b_bcount > bp->b_bufsize)
130 panic("breada");
a937f856
KM
131 if (bp->b_rcred == NOCRED && cred != NOCRED) {
132 crhold(cred);
133 bp->b_rcred = cred;
134 }
7188ac27 135 VOP_STRATEGY(bp);
c5a600cf 136 trace(TR_BREADMISS, pack(vp, size), blkno);
3789a403 137 p->p_stats->p_ru.ru_inblock++; /* pay for read */
3efdd860 138 } else
c5a600cf 139 trace(TR_BREADHIT, pack(vp, size), blkno);
663dbc72 140 }
3efdd860
KM
141
142 /*
d42a4811 143 * If there is a read-ahead block, start I/O on it too.
3efdd860 144 */
ee19707c 145 if (!incore(vp, rablkno)) {
7188ac27 146 rabp = getblk(vp, rablkno, rabsize);
ec67a3ce 147#endif SECSIZE
d42a4811 148 if (rabp->b_flags & (B_DONE | B_DELWRI)) {
663dbc72 149 brelse(rabp);
c5a600cf 150 trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
973ecc4f 151 } else {
d42a4811 152 rabp->b_flags |= B_ASYNC | B_READ;
4f083fd7
SL
153 if (rabp->b_bcount > rabp->b_bufsize)
154 panic("breadrabp");
5062ac4a 155 if (rabp->b_rcred == NOCRED && cred != NOCRED) {
a937f856 156 crhold(cred);
5062ac4a 157 rabp->b_rcred = cred;
a937f856 158 }
7188ac27 159 VOP_STRATEGY(rabp);
c5a600cf 160 trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
3789a403 161 p->p_stats->p_ru.ru_inblock++; /* pay in advance */
663dbc72
BJ
162 }
163 }
3efdd860
KM
164
165 /*
d42a4811
KM
166 * If block was memory resident, let bread get it.
167 * If block was not memory resident, the read was
168 * started above, so just wait for the read to complete.
3efdd860 169 */
84baaab3 170 if (bp == NULL)
ec67a3ce
MK
171#ifdef SECSIZE
172 return (bread(dev, blkno, size, secsize));
173#else SECSIZE
a937f856 174 return (bread(vp, blkno, size, cred, bpp));
7188ac27 175 return (biowait(bp));
663dbc72
BJ
176}
177
178/*
d42a4811
KM
179 * Synchronous write.
180 * Release buffer on completion.
663dbc72
BJ
181 */
182bwrite(bp)
3efdd860 183 register struct buf *bp;
663dbc72 184{
3789a403 185 struct proc *p = curproc; /* XXX */
7188ac27 186 register int flag;
86e7dd3b 187 int s, error;
663dbc72
BJ
188
189 flag = bp->b_flags;
f844ee62 190 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
77dc8a8c
KM
191 if (flag & B_ASYNC) {
192 if ((flag & B_DELWRI) == 0)
193 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
194 else
195 reassignbuf(bp, bp->b_vp);
196 }
c5a600cf 197 trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
4f083fd7
SL
198 if (bp->b_bcount > bp->b_bufsize)
199 panic("bwrite");
86e7dd3b 200 s = splbio();
c669f646 201 bp->b_vp->v_numoutput++;
86e7dd3b 202 splx(s);
7188ac27 203 VOP_STRATEGY(bp);
3efdd860
KM
204
205 /*
d42a4811 206 * If the write was synchronous, then await I/O completion.
3efdd860 207 * If the write was "delayed", then we put the buffer on
d42a4811 208 * the queue of blocks awaiting I/O completion status.
3efdd860 209 */
d42a4811 210 if ((flag & B_ASYNC) == 0) {
7188ac27 211 error = biowait(bp);
77dc8a8c
KM
212 if ((flag&B_DELWRI) == 0)
213 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
214 else
215 reassignbuf(bp, bp->b_vp);
663dbc72 216 brelse(bp);
7188ac27 217 } else if (flag & B_DELWRI) {
663dbc72 218 bp->b_flags |= B_AGE;
7188ac27
KM
219 error = 0;
220 }
221 return (error);
663dbc72
BJ
222}
223
224/*
d42a4811
KM
225 * Delayed write.
226 *
227 * The buffer is marked dirty, but is not queued for I/O.
228 * This routine should be used when the buffer is expected
229 * to be modified again soon, typically a small write that
230 * partially fills a buffer.
231 *
232 * NB: magnetic tapes cannot be delayed; they must be
233 * written in the order that the writes are requested.
663dbc72
BJ
234 */
235bdwrite(bp)
3efdd860 236 register struct buf *bp;
663dbc72 237{
3789a403 238 struct proc *p = curproc; /* XXX */
663dbc72 239
c669f646
KM
240 if ((bp->b_flags & B_DELWRI) == 0) {
241 bp->b_flags |= B_DELWRI;
242 reassignbuf(bp, bp->b_vp);
3789a403 243 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
c669f646 244 }
7188ac27 245 /*
edadbc2c 246 * If this is a tape drive, the write must be initiated.
7188ac27 247 */
ec67a3ce 248 if (bdevsw[major(bp->b_dev)].d_flags & B_TAPE)
663dbc72 249 bawrite(bp);
edadbc2c 250 } else {
d42a4811 251 bp->b_flags |= (B_DONE | B_DELWRI);
663dbc72
BJ
252 brelse(bp);
253 }
254}
255
256/*
d42a4811
KM
257 * Asynchronous write.
258 * Start I/O on a buffer, but do not wait for it to complete.
259 * The buffer is released when the I/O completes.
663dbc72
BJ
260 */
261bawrite(bp)
3efdd860 262 register struct buf *bp;
663dbc72
BJ
263{
264
d42a4811
KM
265 /*
266 * Setting the ASYNC flag causes bwrite to return
267 * after starting the I/O.
268 */
663dbc72 269 bp->b_flags |= B_ASYNC;
7188ac27 270 (void) bwrite(bp);
663dbc72
BJ
271}
272
273/*
d42a4811
KM
274 * Release a buffer.
275 * Even if the buffer is dirty, no I/O is started.
663dbc72
BJ
276 */
277brelse(bp)
3efdd860 278 register struct buf *bp;
663dbc72 279{
46387ee3 280 register struct buf *flist;
d42a4811 281 int s;
663dbc72 282
c5a600cf 283 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3efdd860 284 /*
edadbc2c
KM
285 * If a process is waiting for the buffer, or
286 * is waiting for a free buffer, awaken it.
3efdd860 287 */
d42a4811 288 if (bp->b_flags & B_WANTED)
663dbc72 289 wakeup((caddr_t)bp);
d42a4811 290 if (bfreelist[0].b_flags & B_WANTED) {
46387ee3
BJ
291 bfreelist[0].b_flags &= ~B_WANTED;
292 wakeup((caddr_t)bfreelist);
663dbc72 293 }
edadbc2c
KM
294 /*
295 * Retry I/O for locked buffers rather than invalidating them.
296 */
297 if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
298 bp->b_flags &= ~B_ERROR;
edadbc2c
KM
299 /*
300 * Disassociate buffers that are no longer valid.
301 */
d42a4811 302 if (bp->b_flags & (B_NOCACHE | B_ERROR))
7188ac27 303 bp->b_flags |= B_INVAL;
d42a4811 304 if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
edadbc2c
KM
305 if (bp->b_vp)
306 brelvp(bp);
307 bp->b_flags &= ~B_DELWRI;
7188ac27 308 }
3efdd860
KM
309 /*
310 * Stick the buffer back on a free list.
311 */
a5e62f37 312 s = splbio();
4f083fd7
SL
313 if (bp->b_bufsize <= 0) {
314 /* block has no buffer ... put at front of unused buffer list */
315 flist = &bfreelist[BQ_EMPTY];
316 binsheadfree(bp, flist);
d42a4811 317 } else if (bp->b_flags & (B_ERROR | B_INVAL)) {
46387ee3 318 /* block has no info ... put at front of most free list */
4f083fd7 319 flist = &bfreelist[BQ_AGE];
3efdd860 320 binsheadfree(bp, flist);
663dbc72 321 } else {
46387ee3
BJ
322 if (bp->b_flags & B_LOCKED)
323 flist = &bfreelist[BQ_LOCKED];
324 else if (bp->b_flags & B_AGE)
325 flist = &bfreelist[BQ_AGE];
326 else
327 flist = &bfreelist[BQ_LRU];
3efdd860 328 binstailfree(bp, flist);
663dbc72 329 }
d42a4811 330 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
663dbc72
BJ
331 splx(s);
332}
333
334/*
d42a4811 335 * Check to see if a block is currently memory resident.
663dbc72 336 */
7188ac27
KM
337incore(vp, blkno)
338 struct vnode *vp;
3efdd860 339 daddr_t blkno;
663dbc72
BJ
340{
341 register struct buf *bp;
46387ee3 342 register struct buf *dp;
663dbc72 343
243d4743 344 dp = BUFHASH(vp, blkno);
46387ee3 345 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
edadbc2c 346 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3efdd860 347 (bp->b_flags & B_INVAL) == 0)
5603d07d 348 return (1);
5603d07d 349 return (0);
663dbc72
BJ
350}
351
edadbc2c 352/*
d42a4811
KM
353 * Check to see if a block is currently memory resident.
354 * If it is resident, return it. If it is not resident,
355 * allocate a new buffer and assign it to the block.
663dbc72
BJ
356 */
357struct buf *
ec67a3ce
MK
358#ifdef SECSIZE
359getblk(dev, blkno, size, secsize)
360#else SECSIZE
7188ac27
KM
361getblk(vp, blkno, size)
362 register struct vnode *vp;
ad30fb67
KM
363 daddr_t blkno;
364 int size;
ec67a3ce
MK
365#ifdef SECSIZE
366 long secsize;
367#endif SECSIZE
663dbc72 368{
4f083fd7 369 register struct buf *bp, *dp;
23900030 370 int s;
663dbc72 371
00a6a148
KM
372 if (size > MAXBSIZE)
373 panic("getblk: size too big");
3efdd860 374 /*
d42a4811
KM
375 * Search the cache for the block. If the buffer is found,
376 * but it is currently locked, the we must wait for it to
377 * become available.
3efdd860 378 */
7188ac27 379 dp = BUFHASH(vp, blkno);
3efdd860 380loop:
46387ee3 381 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
edadbc2c 382 if (bp->b_lblkno != blkno || bp->b_vp != vp ||
d42a4811 383 (bp->b_flags & B_INVAL))
663dbc72 384 continue;
a5e62f37 385 s = splbio();
d42a4811 386 if (bp->b_flags & B_BUSY) {
663dbc72 387 bp->b_flags |= B_WANTED;
d42a4811 388 sleep((caddr_t)bp, PRIBIO + 1);
23900030 389 splx(s);
663dbc72
BJ
390 goto loop;
391 }
c669f646
KM
392 bremfree(bp);
393 bp->b_flags |= B_BUSY;
23900030 394 splx(s);
32a56bda 395 if (bp->b_bcount != size) {
edadbc2c
KM
396 printf("getblk: stray size");
397 bp->b_flags |= B_INVAL;
398 bwrite(bp);
9d6d37ce 399 goto loop;
edadbc2c 400 }
663dbc72 401 bp->b_flags |= B_CACHE;
a5e62f37 402 return (bp);
663dbc72 403 }
4f083fd7 404 bp = getnewbuf();
3efdd860 405 bremhash(bp);
edadbc2c 406 bgetvp(vp, bp);
521a4688 407 bp->b_bcount = 0;
edadbc2c 408 bp->b_lblkno = blkno;
ec67a3ce
MK
409#ifdef SECSIZE
410 bp->b_blksize = secsize;
411#endif SECSIZE
ad30fb67 412 bp->b_blkno = blkno;
4f083fd7 413 bp->b_error = 0;
7188ac27
KM
414 bp->b_resid = 0;
415 binshash(bp, dp);
521a4688 416 allocbuf(bp, size);
a5e62f37 417 return (bp);
663dbc72
BJ
418}
419
420/*
d42a4811
KM
421 * Allocate a buffer.
422 * The caller will assign it to a block.
663dbc72
BJ
423 */
424struct buf *
ad30fb67
KM
425geteblk(size)
426 int size;
663dbc72 427{
4f083fd7 428 register struct buf *bp, *flist;
663dbc72 429
00a6a148
KM
430 if (size > MAXBSIZE)
431 panic("geteblk: size too big");
4f083fd7
SL
432 bp = getnewbuf();
433 bp->b_flags |= B_INVAL;
3efdd860 434 bremhash(bp);
4f083fd7 435 flist = &bfreelist[BQ_AGE];
521a4688 436 bp->b_bcount = 0;
ec67a3ce
MK
437#ifdef SECSIZE
438 bp->b_blksize = DEV_BSIZE;
439#endif SECSIZE
4f083fd7 440 bp->b_error = 0;
7188ac27
KM
441 bp->b_resid = 0;
442 binshash(bp, flist);
521a4688 443 allocbuf(bp, size);
a5e62f37 444 return (bp);
663dbc72
BJ
445}
446
ad30fb67 447/*
521a4688 448 * Expand or contract the actual memory allocated to a buffer.
d42a4811 449 * If no memory is available, release buffer and take error exit.
ad30fb67 450 */
521a4688
KM
451allocbuf(tp, size)
452 register struct buf *tp;
ad30fb67
KM
453 int size;
454{
521a4688
KM
455 register struct buf *bp, *ep;
456 int sizealloc, take, s;
ad30fb67 457
521a4688
KM
458 sizealloc = roundup(size, CLBYTES);
459 /*
460 * Buffer size does not change
461 */
462 if (sizealloc == tp->b_bufsize)
463 goto out;
464 /*
465 * Buffer size is shrinking.
466 * Place excess space in a buffer header taken from the
467 * BQ_EMPTY buffer list and placed on the "most free" list.
468 * If no extra buffer headers are available, leave the
469 * extra space in the present buffer.
470 */
471 if (sizealloc < tp->b_bufsize) {
472 ep = bfreelist[BQ_EMPTY].av_forw;
473 if (ep == &bfreelist[BQ_EMPTY])
474 goto out;
475 s = splbio();
476 bremfree(ep);
477 ep->b_flags |= B_BUSY;
478 splx(s);
479 pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
480 (int)tp->b_bufsize - sizealloc);
481 ep->b_bufsize = tp->b_bufsize - sizealloc;
482 tp->b_bufsize = sizealloc;
483 ep->b_flags |= B_INVAL;
484 ep->b_bcount = 0;
485 brelse(ep);
486 goto out;
487 }
488 /*
489 * More buffer space is needed. Get it out of buffers on
490 * the "most free" list, placing the empty headers on the
491 * BQ_EMPTY buffer header list.
492 */
493 while (tp->b_bufsize < sizealloc) {
494 take = sizealloc - tp->b_bufsize;
495 bp = getnewbuf();
496 if (take >= bp->b_bufsize)
497 take = bp->b_bufsize;
498 pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
499 &tp->b_un.b_addr[tp->b_bufsize], take);
500 tp->b_bufsize += take;
501 bp->b_bufsize = bp->b_bufsize - take;
502 if (bp->b_bcount > bp->b_bufsize)
503 bp->b_bcount = bp->b_bufsize;
504 if (bp->b_bufsize <= 0) {
505 bremhash(bp);
506 binshash(bp, &bfreelist[BQ_EMPTY]);
d42a4811 507 bp->b_dev = NODEV;
521a4688
KM
508 bp->b_error = 0;
509 bp->b_flags |= B_INVAL;
510 }
511 brelse(bp);
512 }
513out:
514 tp->b_bcount = size;
515 return (1);
4f083fd7
SL
516}
517
4f083fd7
SL
518/*
519 * Find a buffer which is available for use.
520 * Select something from a free list.
521 * Preference is to AGE list, then LRU list.
522 */
523struct buf *
524getnewbuf()
525{
526 register struct buf *bp, *dp;
a937f856 527 register struct ucred *cred;
4f083fd7
SL
528 int s;
529
5777440f
KB
530#ifdef LFS
531 lfs_flush();
532#endif
4f083fd7 533loop:
a5e62f37 534 s = splbio();
4f083fd7
SL
535 for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
536 if (dp->av_forw != dp)
537 break;
538 if (dp == bfreelist) { /* no free blocks */
539 dp->b_flags |= B_WANTED;
d42a4811 540 sleep((caddr_t)dp, PRIBIO + 1);
4b7d506c 541 splx(s);
4f083fd7
SL
542 goto loop;
543 }
4f083fd7 544 bp = dp->av_forw;
c669f646
KM
545 bremfree(bp);
546 bp->b_flags |= B_BUSY;
547 splx(s);
4f083fd7 548 if (bp->b_flags & B_DELWRI) {
033a786e 549 (void) bawrite(bp);
4f083fd7
SL
550 goto loop;
551 }
c5a600cf 552 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
edadbc2c
KM
553 if (bp->b_vp)
554 brelvp(bp);
a937f856
KM
555 if (bp->b_rcred != NOCRED) {
556 cred = bp->b_rcred;
557 bp->b_rcred = NOCRED;
558 crfree(cred);
559 }
560 if (bp->b_wcred != NOCRED) {
561 cred = bp->b_wcred;
562 bp->b_wcred = NOCRED;
563 crfree(cred);
564 }
4f083fd7 565 bp->b_flags = B_BUSY;
1c89915d 566 bp->b_dirtyoff = bp->b_dirtyend = 0;
4f083fd7
SL
567 return (bp);
568}
569
663dbc72 570/*
d42a4811
KM
571 * Wait for I/O to complete.
572 *
573 * Extract and return any errors associated with the I/O.
574 * If the error flag is set, but no specific error is
575 * given, return EIO.
663dbc72 576 */
3efdd860 577biowait(bp)
ad30fb67 578 register struct buf *bp;
663dbc72 579{
530d0032 580 int s;
663dbc72 581
a5e62f37 582 s = splbio();
a937f856 583 while ((bp->b_flags & B_DONE) == 0)
663dbc72 584 sleep((caddr_t)bp, PRIBIO);
530d0032 585 splx(s);
7188ac27
KM
586 if ((bp->b_flags & B_ERROR) == 0)
587 return (0);
588 if (bp->b_error)
589 return (bp->b_error);
590 return (EIO);
663dbc72
BJ
591}
592
663dbc72 593/*
af04ce66 594 * Mark I/O complete on a buffer.
d42a4811
KM
595 *
596 * If a callback has been requested, e.g. the pageout
597 * daemon, do so. Otherwise, awaken waiting processes.
663dbc72 598 */
251f56ba 599void
3efdd860
KM
600biodone(bp)
601 register struct buf *bp;
663dbc72 602{
663dbc72 603
80e7c811 604 if (bp->b_flags & B_DONE)
3efdd860 605 panic("dup biodone");
663dbc72 606 bp->b_flags |= B_DONE;
76429560
KM
607 if ((bp->b_flags & B_READ) == 0)
608 vwakeup(bp);
961945a8
SL
609 if (bp->b_flags & B_CALL) {
610 bp->b_flags &= ~B_CALL;
611 (*bp->b_iodone)(bp);
612 return;
613 }
d42a4811 614 if (bp->b_flags & B_ASYNC)
663dbc72
BJ
615 brelse(bp);
616 else {
617 bp->b_flags &= ~B_WANTED;
618 wakeup((caddr_t)bp);
619 }
620}