notify clock routines when a process requests profiling
[unix-history] / usr / src / sys / kern / vfs_cluster.c
CommitLineData
5dc2581e
KB
1/*-
2 * Copyright (c) 1982, 1986, 1989 The Regents of the University of California.
7188ac27 3 * All rights reserved.
da7c5cc6 4 *
217c3be4
KM
5 * This module is believed to contain source code proprietary to AT&T.
6 * Use and redistribution is subject to the Berkeley Software License
7 * Agreement and your Software Agreement with AT&T (Western Electric).
7188ac27 8 *
80746147 9 * @(#)vfs_cluster.c 7.48 (Berkeley) %G%
da7c5cc6 10 */
961945a8 11
251f56ba
KB
12#include <sys/param.h>
13#include <sys/proc.h>
14#include <sys/buf.h>
15#include <sys/vnode.h>
16#include <sys/specdev.h>
17#include <sys/mount.h>
18#include <sys/trace.h>
19#include <sys/resourcevar.h>
663dbc72 20
e7db227e
MK
21/*
22 * Initialize buffers and hash links for buffers.
23 */
251f56ba 24void
e7db227e
MK
25bufinit()
26{
27 register int i;
28 register struct buf *bp, *dp;
29 register struct bufhd *hp;
30 int base, residual;
31
32 for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++)
33 hp->b_forw = hp->b_back = (struct buf *)hp;
34
35 for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) {
36 dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp;
37 dp->b_flags = B_HEAD;
38 }
39 base = bufpages / nbuf;
40 residual = bufpages % nbuf;
41 for (i = 0; i < nbuf; i++) {
42 bp = &buf[i];
43 bp->b_dev = NODEV;
44 bp->b_bcount = 0;
45 bp->b_rcred = NOCRED;
46 bp->b_wcred = NOCRED;
47 bp->b_dirtyoff = 0;
48 bp->b_dirtyend = 0;
bb1626f7
KM
49 bp->b_validoff = 0;
50 bp->b_validend = 0;
e7db227e
MK
51 bp->b_un.b_addr = buffers + i * MAXBSIZE;
52 if (i < residual)
53 bp->b_bufsize = (base + 1) * CLBYTES;
54 else
55 bp->b_bufsize = base * CLBYTES;
56 binshash(bp, &bfreelist[BQ_AGE]);
31222d0d
CT
57 bp->b_flags = B_INVAL;
58 dp = bp->b_bufsize ? &bfreelist[BQ_AGE] : &bfreelist[BQ_EMPTY];
59 binsheadfree(bp, dp);
e7db227e
MK
60 }
61}
62
663dbc72 63/*
d42a4811
KM
64 * Find the block in the buffer pool.
65 * If the buffer is not present, allocate a new buffer and load
66 * its contents according to the filesystem fill routine.
663dbc72 67 */
a937f856 68bread(vp, blkno, size, cred, bpp)
7188ac27 69 struct vnode *vp;
ad30fb67
KM
70 daddr_t blkno;
71 int size;
a937f856 72 struct ucred *cred;
7188ac27 73 struct buf **bpp;
ec67a3ce
MK
74#ifdef SECSIZE
75 long secsize;
76#endif SECSIZE
663dbc72 77{
9342689a 78 USES_VOP_STRATEGY;
3789a403 79 struct proc *p = curproc; /* XXX */
663dbc72
BJ
80 register struct buf *bp;
81
4f083fd7
SL
82 if (size == 0)
83 panic("bread: size 0");
ec67a3ce
MK
84#ifdef SECSIZE
85 bp = getblk(dev, blkno, size, secsize);
86#else SECSIZE
7188ac27 87 *bpp = bp = getblk(vp, blkno, size);
ec67a3ce 88#endif SECSIZE
d42a4811 89 if (bp->b_flags & (B_DONE | B_DELWRI)) {
c5a600cf 90 trace(TR_BREADHIT, pack(vp, size), blkno);
7188ac27 91 return (0);
663dbc72
BJ
92 }
93 bp->b_flags |= B_READ;
4f083fd7
SL
94 if (bp->b_bcount > bp->b_bufsize)
95 panic("bread");
a937f856
KM
96 if (bp->b_rcred == NOCRED && cred != NOCRED) {
97 crhold(cred);
98 bp->b_rcred = cred;
99 }
7188ac27 100 VOP_STRATEGY(bp);
c5a600cf 101 trace(TR_BREADMISS, pack(vp, size), blkno);
3789a403 102 p->p_stats->p_ru.ru_inblock++; /* pay for read */
7188ac27 103 return (biowait(bp));
663dbc72
BJ
104}
105
106/*
bb1626f7
KM
107 * Operates like bread, but also starts I/O on the N specified
108 * read-ahead blocks.
663dbc72 109 */
bb1626f7 110breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
7188ac27 111 struct vnode *vp;
84baaab3 112 daddr_t blkno; int size;
ec67a3ce
MK
113#ifdef SECSIZE
114 long secsize;
115#endif SECSIZE
bb1626f7
KM
116 daddr_t rablkno[]; int rabsize[];
117 int num;
a937f856 118 struct ucred *cred;
7188ac27 119 struct buf **bpp;
663dbc72 120{
9342689a 121 USES_VOP_STRATEGY;
3789a403 122 struct proc *p = curproc; /* XXX */
663dbc72 123 register struct buf *bp, *rabp;
bb1626f7 124 register int i;
663dbc72
BJ
125
126 bp = NULL;
3efdd860 127 /*
d42a4811
KM
128 * If the block is not memory resident,
129 * allocate a buffer and start I/O.
3efdd860 130 */
7188ac27
KM
131 if (!incore(vp, blkno)) {
132 *bpp = bp = getblk(vp, blkno, size);
ec67a3ce 133#endif SECSIZE
d42a4811 134 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
663dbc72 135 bp->b_flags |= B_READ;
4f083fd7 136 if (bp->b_bcount > bp->b_bufsize)
bb1626f7 137 panic("breadn");
a937f856
KM
138 if (bp->b_rcred == NOCRED && cred != NOCRED) {
139 crhold(cred);
140 bp->b_rcred = cred;
141 }
7188ac27 142 VOP_STRATEGY(bp);
c5a600cf 143 trace(TR_BREADMISS, pack(vp, size), blkno);
3789a403 144 p->p_stats->p_ru.ru_inblock++; /* pay for read */
3efdd860 145 } else
c5a600cf 146 trace(TR_BREADHIT, pack(vp, size), blkno);
663dbc72 147 }
3efdd860
KM
148
149 /*
bb1626f7
KM
150 * If there's read-ahead block(s), start I/O
151 * on them also (as above).
3efdd860 152 */
bb1626f7
KM
153 for (i = 0; i < num; i++) {
154 if (incore(vp, rablkno[i]))
155 continue;
156 rabp = getblk(vp, rablkno[i], rabsize[i]);
ec67a3ce 157#endif SECSIZE
d42a4811 158 if (rabp->b_flags & (B_DONE | B_DELWRI)) {
663dbc72 159 brelse(rabp);
bb1626f7 160 trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
973ecc4f 161 } else {
d42a4811 162 rabp->b_flags |= B_ASYNC | B_READ;
4f083fd7
SL
163 if (rabp->b_bcount > rabp->b_bufsize)
164 panic("breadrabp");
5062ac4a 165 if (rabp->b_rcred == NOCRED && cred != NOCRED) {
a937f856 166 crhold(cred);
5062ac4a 167 rabp->b_rcred = cred;
a937f856 168 }
7188ac27 169 VOP_STRATEGY(rabp);
bb1626f7 170 trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
3789a403 171 p->p_stats->p_ru.ru_inblock++; /* pay in advance */
663dbc72
BJ
172 }
173 }
3efdd860
KM
174
175 /*
d42a4811
KM
176 * If block was memory resident, let bread get it.
177 * If block was not memory resident, the read was
178 * started above, so just wait for the read to complete.
3efdd860 179 */
84baaab3 180 if (bp == NULL)
ec67a3ce
MK
181#ifdef SECSIZE
182 return (bread(dev, blkno, size, secsize));
183#else SECSIZE
a937f856 184 return (bread(vp, blkno, size, cred, bpp));
7188ac27 185 return (biowait(bp));
663dbc72
BJ
186}
187
188/*
d42a4811
KM
189 * Synchronous write.
190 * Release buffer on completion.
663dbc72
BJ
191 */
192bwrite(bp)
3efdd860 193 register struct buf *bp;
663dbc72 194{
9342689a 195 USES_VOP_STRATEGY;
3789a403 196 struct proc *p = curproc; /* XXX */
7188ac27 197 register int flag;
31222d0d 198 int s, error = 0;
663dbc72
BJ
199
200 flag = bp->b_flags;
f844ee62 201 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
77dc8a8c
KM
202 if (flag & B_ASYNC) {
203 if ((flag & B_DELWRI) == 0)
204 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
205 else
206 reassignbuf(bp, bp->b_vp);
207 }
c5a600cf 208 trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
4f083fd7
SL
209 if (bp->b_bcount > bp->b_bufsize)
210 panic("bwrite");
86e7dd3b 211 s = splbio();
c669f646 212 bp->b_vp->v_numoutput++;
86e7dd3b 213 splx(s);
7188ac27 214 VOP_STRATEGY(bp);
3efdd860
KM
215
216 /*
d42a4811 217 * If the write was synchronous, then await I/O completion.
3efdd860 218 * If the write was "delayed", then we put the buffer on
d42a4811 219 * the queue of blocks awaiting I/O completion status.
3efdd860 220 */
d42a4811 221 if ((flag & B_ASYNC) == 0) {
7188ac27 222 error = biowait(bp);
77dc8a8c
KM
223 if ((flag&B_DELWRI) == 0)
224 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
225 else
226 reassignbuf(bp, bp->b_vp);
663dbc72 227 brelse(bp);
7188ac27 228 } else if (flag & B_DELWRI) {
31222d0d 229 s = splbio();
663dbc72 230 bp->b_flags |= B_AGE;
31222d0d 231 splx(s);
7188ac27
KM
232 }
233 return (error);
663dbc72
BJ
234}
235
80746147
JH
236int
237vn_bwrite(ap)
238 struct vop_bwrite_args *ap;
239{
240 return bwrite (ap->a_bp);
241}
242
243
663dbc72 244/*
d42a4811
KM
245 * Delayed write.
246 *
247 * The buffer is marked dirty, but is not queued for I/O.
248 * This routine should be used when the buffer is expected
249 * to be modified again soon, typically a small write that
250 * partially fills a buffer.
251 *
252 * NB: magnetic tapes cannot be delayed; they must be
253 * written in the order that the writes are requested.
663dbc72
BJ
254 */
255bdwrite(bp)
3efdd860 256 register struct buf *bp;
663dbc72 257{
9342689a 258 USES_VOP_IOCTL;
3789a403 259 struct proc *p = curproc; /* XXX */
663dbc72 260
c669f646
KM
261 if ((bp->b_flags & B_DELWRI) == 0) {
262 bp->b_flags |= B_DELWRI;
263 reassignbuf(bp, bp->b_vp);
3789a403 264 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
c669f646 265 }
7188ac27 266 /*
edadbc2c 267 * If this is a tape drive, the write must be initiated.
7188ac27 268 */
ec67a3ce 269 if (bdevsw[major(bp->b_dev)].d_flags & B_TAPE)
663dbc72 270 bawrite(bp);
edadbc2c 271 } else {
d42a4811 272 bp->b_flags |= (B_DONE | B_DELWRI);
663dbc72
BJ
273 brelse(bp);
274 }
275}
276
277/*
d42a4811
KM
278 * Asynchronous write.
279 * Start I/O on a buffer, but do not wait for it to complete.
280 * The buffer is released when the I/O completes.
663dbc72
BJ
281 */
282bawrite(bp)
3efdd860 283 register struct buf *bp;
663dbc72
BJ
284{
285
d42a4811
KM
286 /*
287 * Setting the ASYNC flag causes bwrite to return
288 * after starting the I/O.
289 */
663dbc72 290 bp->b_flags |= B_ASYNC;
7188ac27 291 (void) bwrite(bp);
663dbc72
BJ
292}
293
294/*
d42a4811
KM
295 * Release a buffer.
296 * Even if the buffer is dirty, no I/O is started.
663dbc72
BJ
297 */
298brelse(bp)
3efdd860 299 register struct buf *bp;
663dbc72 300{
46387ee3 301 register struct buf *flist;
d42a4811 302 int s;
663dbc72 303
c5a600cf 304 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3efdd860 305 /*
edadbc2c
KM
306 * If a process is waiting for the buffer, or
307 * is waiting for a free buffer, awaken it.
3efdd860 308 */
d42a4811 309 if (bp->b_flags & B_WANTED)
663dbc72 310 wakeup((caddr_t)bp);
d42a4811 311 if (bfreelist[0].b_flags & B_WANTED) {
46387ee3
BJ
312 bfreelist[0].b_flags &= ~B_WANTED;
313 wakeup((caddr_t)bfreelist);
663dbc72 314 }
edadbc2c
KM
315 /*
316 * Retry I/O for locked buffers rather than invalidating them.
317 */
31222d0d 318 s = splbio();
edadbc2c
KM
319 if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
320 bp->b_flags &= ~B_ERROR;
edadbc2c
KM
321 /*
322 * Disassociate buffers that are no longer valid.
323 */
d42a4811 324 if (bp->b_flags & (B_NOCACHE | B_ERROR))
7188ac27 325 bp->b_flags |= B_INVAL;
d42a4811 326 if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
edadbc2c
KM
327 if (bp->b_vp)
328 brelvp(bp);
329 bp->b_flags &= ~B_DELWRI;
7188ac27 330 }
3efdd860
KM
331 /*
332 * Stick the buffer back on a free list.
333 */
4f083fd7
SL
334 if (bp->b_bufsize <= 0) {
335 /* block has no buffer ... put at front of unused buffer list */
336 flist = &bfreelist[BQ_EMPTY];
337 binsheadfree(bp, flist);
d42a4811 338 } else if (bp->b_flags & (B_ERROR | B_INVAL)) {
46387ee3 339 /* block has no info ... put at front of most free list */
4f083fd7 340 flist = &bfreelist[BQ_AGE];
3efdd860 341 binsheadfree(bp, flist);
663dbc72 342 } else {
46387ee3
BJ
343 if (bp->b_flags & B_LOCKED)
344 flist = &bfreelist[BQ_LOCKED];
345 else if (bp->b_flags & B_AGE)
346 flist = &bfreelist[BQ_AGE];
347 else
348 flist = &bfreelist[BQ_LRU];
3efdd860 349 binstailfree(bp, flist);
663dbc72 350 }
d42a4811 351 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
663dbc72
BJ
352 splx(s);
353}
354
355/*
d42a4811 356 * Check to see if a block is currently memory resident.
663dbc72 357 */
7188ac27
KM
358incore(vp, blkno)
359 struct vnode *vp;
3efdd860 360 daddr_t blkno;
663dbc72
BJ
361{
362 register struct buf *bp;
46387ee3 363 register struct buf *dp;
663dbc72 364
243d4743 365 dp = BUFHASH(vp, blkno);
46387ee3 366 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
edadbc2c 367 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3efdd860 368 (bp->b_flags & B_INVAL) == 0)
5603d07d 369 return (1);
5603d07d 370 return (0);
663dbc72
BJ
371}
372
edadbc2c 373/*
d42a4811
KM
374 * Check to see if a block is currently memory resident.
375 * If it is resident, return it. If it is not resident,
376 * allocate a new buffer and assign it to the block.
663dbc72
BJ
377 */
378struct buf *
ec67a3ce
MK
379#ifdef SECSIZE
380getblk(dev, blkno, size, secsize)
381#else SECSIZE
7188ac27
KM
382getblk(vp, blkno, size)
383 register struct vnode *vp;
ad30fb67
KM
384 daddr_t blkno;
385 int size;
ec67a3ce
MK
386#ifdef SECSIZE
387 long secsize;
388#endif SECSIZE
663dbc72 389{
4f083fd7 390 register struct buf *bp, *dp;
23900030 391 int s;
663dbc72 392
00a6a148
KM
393 if (size > MAXBSIZE)
394 panic("getblk: size too big");
3efdd860 395 /*
d42a4811
KM
396 * Search the cache for the block. If the buffer is found,
397 * but it is currently locked, the we must wait for it to
398 * become available.
3efdd860 399 */
7188ac27 400 dp = BUFHASH(vp, blkno);
3efdd860 401loop:
46387ee3 402 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
edadbc2c 403 if (bp->b_lblkno != blkno || bp->b_vp != vp ||
d42a4811 404 (bp->b_flags & B_INVAL))
663dbc72 405 continue;
a5e62f37 406 s = splbio();
d42a4811 407 if (bp->b_flags & B_BUSY) {
663dbc72 408 bp->b_flags |= B_WANTED;
d42a4811 409 sleep((caddr_t)bp, PRIBIO + 1);
23900030 410 splx(s);
663dbc72
BJ
411 goto loop;
412 }
c669f646
KM
413 bremfree(bp);
414 bp->b_flags |= B_BUSY;
23900030 415 splx(s);
32a56bda 416 if (bp->b_bcount != size) {
edadbc2c
KM
417 printf("getblk: stray size");
418 bp->b_flags |= B_INVAL;
419 bwrite(bp);
9d6d37ce 420 goto loop;
edadbc2c 421 }
663dbc72 422 bp->b_flags |= B_CACHE;
a5e62f37 423 return (bp);
663dbc72 424 }
4f083fd7 425 bp = getnewbuf();
3efdd860 426 bremhash(bp);
edadbc2c 427 bgetvp(vp, bp);
521a4688 428 bp->b_bcount = 0;
edadbc2c 429 bp->b_lblkno = blkno;
ec67a3ce
MK
430#ifdef SECSIZE
431 bp->b_blksize = secsize;
432#endif SECSIZE
ad30fb67 433 bp->b_blkno = blkno;
4f083fd7 434 bp->b_error = 0;
7188ac27
KM
435 bp->b_resid = 0;
436 binshash(bp, dp);
521a4688 437 allocbuf(bp, size);
a5e62f37 438 return (bp);
663dbc72
BJ
439}
440
441/*
d42a4811
KM
442 * Allocate a buffer.
443 * The caller will assign it to a block.
663dbc72
BJ
444 */
445struct buf *
ad30fb67
KM
446geteblk(size)
447 int size;
663dbc72 448{
4f083fd7 449 register struct buf *bp, *flist;
663dbc72 450
00a6a148
KM
451 if (size > MAXBSIZE)
452 panic("geteblk: size too big");
4f083fd7
SL
453 bp = getnewbuf();
454 bp->b_flags |= B_INVAL;
3efdd860 455 bremhash(bp);
4f083fd7 456 flist = &bfreelist[BQ_AGE];
521a4688 457 bp->b_bcount = 0;
ec67a3ce
MK
458#ifdef SECSIZE
459 bp->b_blksize = DEV_BSIZE;
460#endif SECSIZE
4f083fd7 461 bp->b_error = 0;
7188ac27
KM
462 bp->b_resid = 0;
463 binshash(bp, flist);
521a4688 464 allocbuf(bp, size);
a5e62f37 465 return (bp);
663dbc72
BJ
466}
467
ad30fb67 468/*
521a4688 469 * Expand or contract the actual memory allocated to a buffer.
d42a4811 470 * If no memory is available, release buffer and take error exit.
ad30fb67 471 */
521a4688
KM
472allocbuf(tp, size)
473 register struct buf *tp;
ad30fb67
KM
474 int size;
475{
521a4688
KM
476 register struct buf *bp, *ep;
477 int sizealloc, take, s;
ad30fb67 478
521a4688
KM
479 sizealloc = roundup(size, CLBYTES);
480 /*
481 * Buffer size does not change
482 */
483 if (sizealloc == tp->b_bufsize)
484 goto out;
485 /*
486 * Buffer size is shrinking.
487 * Place excess space in a buffer header taken from the
488 * BQ_EMPTY buffer list and placed on the "most free" list.
489 * If no extra buffer headers are available, leave the
490 * extra space in the present buffer.
491 */
492 if (sizealloc < tp->b_bufsize) {
493 ep = bfreelist[BQ_EMPTY].av_forw;
494 if (ep == &bfreelist[BQ_EMPTY])
495 goto out;
496 s = splbio();
497 bremfree(ep);
498 ep->b_flags |= B_BUSY;
499 splx(s);
500 pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
501 (int)tp->b_bufsize - sizealloc);
502 ep->b_bufsize = tp->b_bufsize - sizealloc;
503 tp->b_bufsize = sizealloc;
504 ep->b_flags |= B_INVAL;
505 ep->b_bcount = 0;
506 brelse(ep);
507 goto out;
508 }
509 /*
510 * More buffer space is needed. Get it out of buffers on
511 * the "most free" list, placing the empty headers on the
512 * BQ_EMPTY buffer header list.
513 */
514 while (tp->b_bufsize < sizealloc) {
515 take = sizealloc - tp->b_bufsize;
516 bp = getnewbuf();
517 if (take >= bp->b_bufsize)
518 take = bp->b_bufsize;
519 pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
520 &tp->b_un.b_addr[tp->b_bufsize], take);
521 tp->b_bufsize += take;
522 bp->b_bufsize = bp->b_bufsize - take;
523 if (bp->b_bcount > bp->b_bufsize)
524 bp->b_bcount = bp->b_bufsize;
525 if (bp->b_bufsize <= 0) {
526 bremhash(bp);
527 binshash(bp, &bfreelist[BQ_EMPTY]);
d42a4811 528 bp->b_dev = NODEV;
521a4688
KM
529 bp->b_error = 0;
530 bp->b_flags |= B_INVAL;
531 }
532 brelse(bp);
533 }
534out:
535 tp->b_bcount = size;
536 return (1);
4f083fd7
SL
537}
538
4f083fd7
SL
539/*
540 * Find a buffer which is available for use.
541 * Select something from a free list.
542 * Preference is to AGE list, then LRU list.
543 */
544struct buf *
545getnewbuf()
546{
547 register struct buf *bp, *dp;
a937f856 548 register struct ucred *cred;
4f083fd7
SL
549 int s;
550
5777440f
KB
551#ifdef LFS
552 lfs_flush();
553#endif
4f083fd7 554loop:
a5e62f37 555 s = splbio();
4f083fd7
SL
556 for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
557 if (dp->av_forw != dp)
558 break;
559 if (dp == bfreelist) { /* no free blocks */
560 dp->b_flags |= B_WANTED;
d42a4811 561 sleep((caddr_t)dp, PRIBIO + 1);
4b7d506c 562 splx(s);
4f083fd7
SL
563 goto loop;
564 }
4f083fd7 565 bp = dp->av_forw;
c669f646
KM
566 bremfree(bp);
567 bp->b_flags |= B_BUSY;
568 splx(s);
4f083fd7 569 if (bp->b_flags & B_DELWRI) {
033a786e 570 (void) bawrite(bp);
4f083fd7
SL
571 goto loop;
572 }
c5a600cf 573 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
edadbc2c
KM
574 if (bp->b_vp)
575 brelvp(bp);
a937f856
KM
576 if (bp->b_rcred != NOCRED) {
577 cred = bp->b_rcred;
578 bp->b_rcred = NOCRED;
579 crfree(cred);
580 }
581 if (bp->b_wcred != NOCRED) {
582 cred = bp->b_wcred;
583 bp->b_wcred = NOCRED;
584 crfree(cred);
585 }
4f083fd7 586 bp->b_flags = B_BUSY;
1c89915d 587 bp->b_dirtyoff = bp->b_dirtyend = 0;
bb1626f7 588 bp->b_validoff = bp->b_validend = 0;
4f083fd7
SL
589 return (bp);
590}
591
663dbc72 592/*
d42a4811
KM
593 * Wait for I/O to complete.
594 *
595 * Extract and return any errors associated with the I/O.
596 * If the error flag is set, but no specific error is
597 * given, return EIO.
663dbc72 598 */
3efdd860 599biowait(bp)
ad30fb67 600 register struct buf *bp;
663dbc72 601{
530d0032 602 int s;
663dbc72 603
a5e62f37 604 s = splbio();
a937f856 605 while ((bp->b_flags & B_DONE) == 0)
663dbc72 606 sleep((caddr_t)bp, PRIBIO);
530d0032 607 splx(s);
7188ac27
KM
608 if ((bp->b_flags & B_ERROR) == 0)
609 return (0);
610 if (bp->b_error)
611 return (bp->b_error);
612 return (EIO);
663dbc72
BJ
613}
614
663dbc72 615/*
af04ce66 616 * Mark I/O complete on a buffer.
d42a4811
KM
617 *
618 * If a callback has been requested, e.g. the pageout
619 * daemon, do so. Otherwise, awaken waiting processes.
663dbc72 620 */
251f56ba 621void
3efdd860
KM
622biodone(bp)
623 register struct buf *bp;
663dbc72 624{
663dbc72 625
80e7c811 626 if (bp->b_flags & B_DONE)
3efdd860 627 panic("dup biodone");
663dbc72 628 bp->b_flags |= B_DONE;
76429560
KM
629 if ((bp->b_flags & B_READ) == 0)
630 vwakeup(bp);
961945a8
SL
631 if (bp->b_flags & B_CALL) {
632 bp->b_flags &= ~B_CALL;
633 (*bp->b_iodone)(bp);
634 return;
635 }
d42a4811 636 if (bp->b_flags & B_ASYNC)
663dbc72
BJ
637 brelse(bp);
638 else {
639 bp->b_flags &= ~B_WANTED;
640 wakeup((caddr_t)bp);
641 }
642}