This commit was generated by cvs2svn to track changes on a CVS vendor
[unix-history] / sys / kern / vfs__bio.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1989, 1990, 1991, 1992 William F. Jolitz, TeleMuse
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This software is a component of "386BSD" developed by
16 William F. Jolitz, TeleMuse.
17 * 4. Neither the name of the developer nor the name "386BSD"
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
22 * AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS
23 * SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT.
24 * THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT
25 * NOT MAKE USE THIS WORK.
26 *
27 * FOR USERS WHO WISH TO UNDERSTAND THE 386BSD SYSTEM DEVELOPED
28 * BY WILLIAM F. JOLITZ, WE RECOMMEND THE USER STUDY WRITTEN
29 * REFERENCES SUCH AS THE "PORTING UNIX TO THE 386" SERIES
30 * (BEGINNING JANUARY 1991 "DR. DOBBS JOURNAL", USA AND BEGINNING
31 * JUNE 1991 "UNIX MAGAZIN", GERMANY) BY WILLIAM F. JOLITZ AND
32 * LYNNE GREER JOLITZ, AS WELL AS OTHER BOOKS ON UNIX AND THE
33 * ON-LINE 386BSD USER MANUAL BEFORE USE. A BOOK DISCUSSING THE INTERNALS
34 * OF 386BSD ENTITLED "386BSD FROM THE INSIDE OUT" WILL BE AVAILABLE LATE 1992.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
2157b04d 48 * $Id: vfs__bio.c,v 1.17 1994/03/23 09:15:38 davidg Exp $
15637ed4 49 */
15637ed4
RG
50
51#include "param.h"
dd18dc33 52#include "systm.h"
83dc58ce 53#include "kernel.h"
15637ed4
RG
54#include "proc.h"
55#include "vnode.h"
56#include "buf.h"
57#include "specdev.h"
58#include "mount.h"
59#include "malloc.h"
60#include "vm/vm.h"
61#include "resourcevar.h"
62
bbc3f849
GW
63/* From sys/buf.h */
64struct buf *buf; /* the buffer pool itself */
65char *buffers;
66int nbuf; /* number of buffer headers */
67int bufpages; /* number of memory pages in the buffer pool */
68struct buf *swbuf; /* swap I/O headers */
69int nswbuf;
70struct bufhd bufhash[BUFHSZ]; /* heads of hash lists */
71struct buf bfreelist[BQUEUES]; /* heads of available lists */
72struct buf bswlist; /* head of free swap header list */
73struct buf *bclnlist; /* head of cleaned page list */
74
dbd7c74f 75static struct buf *getnewbuf(int);
2157b04d
DG
76extern vm_map_t buffer_map, io_map;
77
78/*
79 * Internel update daemon, process 3
80 * The variable vfs_update_wakeup allows for internal syncs.
81 */
82int vfs_update_wakeup;
15637ed4
RG
83
84/*
85 * Initialize buffer headers and related structures.
86 */
87void bufinit()
88{
89 struct bufhd *bh;
90 struct buf *bp;
91
92 /* first, make a null hash table */
93 for(bh = bufhash; bh < bufhash + BUFHSZ; bh++) {
94 bh->b_flags = 0;
95 bh->b_forw = (struct buf *)bh;
96 bh->b_back = (struct buf *)bh;
97 }
98
99 /* next, make a null set of free lists */
100 for(bp = bfreelist; bp < bfreelist + BQUEUES; bp++) {
101 bp->b_flags = 0;
102 bp->av_forw = bp;
103 bp->av_back = bp;
104 bp->b_forw = bp;
105 bp->b_back = bp;
106 }
107
108 /* finally, initialize each buffer header and stick on empty q */
109 for(bp = buf; bp < buf + nbuf ; bp++) {
110 bp->b_flags = B_HEAD | B_INVAL; /* we're just an empty header */
111 bp->b_dev = NODEV;
112 bp->b_vp = 0;
113 binstailfree(bp, bfreelist + BQ_EMPTY);
114 binshash(bp, bfreelist + BQ_EMPTY);
115 }
116}
117
118/*
119 * Find the block in the buffer pool.
120 * If the buffer is not present, allocate a new buffer and load
121 * its contents according to the filesystem fill routine.
122 */
123int
124bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
125 struct buf **bpp)
126{
127 struct buf *bp;
128 int rv = 0;
129
130 bp = getblk (vp, blkno, size);
131
132 /* if not found in cache, do some I/O */
133 if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
134 if (curproc && curproc->p_stats) /* count block I/O */
135 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
136 bp->b_flags |= B_READ;
137 bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
138 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
139 bp->b_rcred = cred;
140 VOP_STRATEGY(bp);
141 rv = biowait (bp);
142 }
143 *bpp = bp;
144
145 return (rv);
146}
147
148/*
149 * Operates like bread, but also starts I/O on the specified
150 * read-ahead block. [See page 55 of Bach's Book]
151 */
152int
153breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno, int rabsize,
154 struct ucred *cred, struct buf **bpp)
155{
156 struct buf *bp, *rabp;
157 int rv = 0, needwait = 0;
158
159 bp = getblk (vp, blkno, size);
160
161 /* if not found in cache, do some I/O */
162 if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
163 if (curproc && curproc->p_stats) /* count block I/O */
164 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
165 bp->b_flags |= B_READ;
166 bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
167 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
168 bp->b_rcred = cred;
169 VOP_STRATEGY(bp);
170 needwait++;
171 }
172
173 rabp = getblk (vp, rablkno, rabsize);
174
175 /* if not found in cache, do some I/O (overlapped with first) */
176 if ((rabp->b_flags & B_CACHE) == 0 || (rabp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
177 if (curproc && curproc->p_stats) /* count block I/O */
178 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
179 rabp->b_flags |= B_READ | B_ASYNC;
180 rabp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
181 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
182 rabp->b_rcred = cred;
183 VOP_STRATEGY(rabp);
184 } else
185 brelse(rabp);
186
187 /* wait for original I/O */
188 if (needwait)
189 rv = biowait (bp);
190
191 *bpp = bp;
192 return (rv);
193}
194
195/*
196 * Synchronous write.
197 * Release buffer on completion.
198 */
199int
200bwrite(register struct buf *bp)
201{
202 int rv;
203
204 if(bp->b_flags & B_INVAL) {
205 brelse(bp);
206 return (0);
207 } else {
208 int wasdelayed;
209
210 if(!(bp->b_flags & B_BUSY))
211 panic("bwrite: not busy");
212
213 wasdelayed = bp->b_flags & B_DELWRI;
214 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_ASYNC|B_DELWRI);
215 if(wasdelayed)
216 reassignbuf(bp, bp->b_vp);
217
8a8a439a
NW
218 if (curproc && curproc->p_stats) /* count block I/O */
219 curproc->p_stats->p_ru.ru_oublock++;
15637ed4
RG
220 bp->b_flags |= B_DIRTY;
221 bp->b_vp->v_numoutput++;
222 VOP_STRATEGY(bp);
223 rv = biowait(bp);
224 brelse(bp);
225 return (rv);
226 }
227}
228
229/*
230 * Delayed write.
231 *
232 * The buffer is marked dirty, but is not queued for I/O.
233 * This routine should be used when the buffer is expected
234 * to be modified again soon, typically a small write that
235 * partially fills a buffer.
236 *
237 * NB: magnetic tapes cannot be delayed; they must be
238 * written in the order that the writes are requested.
239 */
240void
241bdwrite(register struct buf *bp)
242{
243
244 if(!(bp->b_flags & B_BUSY))
245 panic("bdwrite: not busy");
246
247 if(bp->b_flags & B_INVAL) {
248 brelse(bp);
2877196e 249 return;
15637ed4
RG
250 }
251 if(bp->b_flags & B_TAPE) {
252 bwrite(bp);
253 return;
254 }
255 bp->b_flags &= ~(B_READ|B_DONE);
256 bp->b_flags |= B_DIRTY|B_DELWRI;
257 reassignbuf(bp, bp->b_vp);
258 brelse(bp);
259 return;
260}
261
262/*
263 * Asynchronous write.
264 * Start I/O on a buffer, but do not wait for it to complete.
265 * The buffer is released when the I/O completes.
266 */
267void
268bawrite(register struct buf *bp)
269{
270
271 if(!(bp->b_flags & B_BUSY))
272 panic("bawrite: not busy");
273
274 if(bp->b_flags & B_INVAL)
275 brelse(bp);
276 else {
277 int wasdelayed;
278
279 wasdelayed = bp->b_flags & B_DELWRI;
280 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
281 if(wasdelayed)
282 reassignbuf(bp, bp->b_vp);
283
8a8a439a
NW
284 if (curproc && curproc->p_stats) /* count block I/O */
285 curproc->p_stats->p_ru.ru_oublock++;
15637ed4
RG
286 bp->b_flags |= B_DIRTY | B_ASYNC;
287 bp->b_vp->v_numoutput++;
288 VOP_STRATEGY(bp);
289 }
290}
291
292/*
293 * Release a buffer.
294 * Even if the buffer is dirty, no I/O is started.
295 */
296void
297brelse(register struct buf *bp)
298{
299 int x;
300
301 /* anyone need a "free" block? */
302 x=splbio();
303 if ((bfreelist + BQ_AGE)->b_flags & B_WANTED) {
304 (bfreelist + BQ_AGE) ->b_flags &= ~B_WANTED;
4c45483e 305 wakeup((caddr_t)bfreelist);
15637ed4
RG
306 }
307 /* anyone need this very block? */
308 if (bp->b_flags & B_WANTED) {
309 bp->b_flags &= ~B_WANTED;
4c45483e 310 wakeup((caddr_t)bp);
15637ed4
RG
311 }
312
313 if (bp->b_flags & (B_INVAL|B_ERROR)) {
314 bp->b_flags |= B_INVAL;
315 bp->b_flags &= ~(B_DELWRI|B_CACHE);
316 if(bp->b_vp)
317 brelvp(bp);
318 }
319
320 /* enqueue */
321 /* just an empty buffer head ... */
322 /*if(bp->b_flags & B_HEAD)
323 binsheadfree(bp, bfreelist + BQ_EMPTY)*/
324 /* buffers with junk contents */
325 /*else*/ if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE))
326 binsheadfree(bp, bfreelist + BQ_AGE)
327 /* buffers with stale but valid contents */
328 else if(bp->b_flags & B_AGE)
329 binstailfree(bp, bfreelist + BQ_AGE)
330 /* buffers with valid and quite potentially reuseable contents */
331 else
332 binstailfree(bp, bfreelist + BQ_LRU)
333
334 /* unlock */
335 bp->b_flags &= ~B_BUSY;
336 splx(x);
337
338}
339
340int freebufspace;
341int allocbufspace;
342
343/*
344 * Find a buffer which is available for use.
345 * If free memory for buffer space and an empty header from the empty list,
346 * use that. Otherwise, select something from a free list.
347 * Preference is to AGE list, then LRU list.
348 */
349static struct buf *
350getnewbuf(int sz)
351{
352 struct buf *bp;
353 int x;
354
355 x = splbio();
356start:
357 /* can we constitute a new buffer? */
358 if (freebufspace > sz
359 && bfreelist[BQ_EMPTY].av_forw != (struct buf *)bfreelist+BQ_EMPTY) {
360 caddr_t addr;
361
362/*#define notyet*/
363#ifndef notyet
8f311334 364 if ((addr = malloc (sz, M_IOBUF, M_WAITOK)) == 0) goto tryfree;
15637ed4
RG
365#else /* notyet */
366 /* get new memory buffer */
367 if (round_page(sz) == sz)
368 addr = (caddr_t) kmem_alloc_wired_wait(buffer_map, sz);
369 else
8f311334
DG
370 addr = (caddr_t) malloc (sz, M_IOBUF, M_WAITOK);
371 /*if ((addr = malloc (sz, M_IOBUF, M_NOWAIT)) == 0) goto tryfree;*/
15637ed4
RG
372 bzero(addr, sz);
373#endif /* notyet */
374 freebufspace -= sz;
375 allocbufspace += sz;
376
377 bp = bfreelist[BQ_EMPTY].av_forw;
378 bp->b_flags = B_BUSY | B_INVAL;
379 bremfree(bp);
380 bp->b_un.b_addr = addr;
381 bp->b_bufsize = sz; /* 20 Aug 92*/
382 goto fillin;
383 }
384
385tryfree:
386 if (bfreelist[BQ_AGE].av_forw != (struct buf *)bfreelist+BQ_AGE) {
387 bp = bfreelist[BQ_AGE].av_forw;
388 bremfree(bp);
389 } else if (bfreelist[BQ_LRU].av_forw != (struct buf *)bfreelist+BQ_LRU) {
390 bp = bfreelist[BQ_LRU].av_forw;
391 bremfree(bp);
392 } else {
393 /* wait for a free buffer of any kind */
394 (bfreelist + BQ_AGE)->b_flags |= B_WANTED;
4c45483e 395 tsleep((caddr_t)bfreelist, PRIBIO, "newbuf", 0);
15637ed4
RG
396 splx(x);
397 return (0);
398 }
399
400 /* if we are a delayed write, convert to an async write! */
401 if (bp->b_flags & B_DELWRI) {
402 bp->b_flags |= B_BUSY;
403 bawrite (bp);
404 goto start;
405 }
406
407
408 if(bp->b_vp)
409 brelvp(bp);
410
411 /* we are not free, nor do we contain interesting data */
412 if (bp->b_rcred != NOCRED) crfree(bp->b_rcred); /* 25 Apr 92*/
413 if (bp->b_wcred != NOCRED) crfree(bp->b_wcred);
414 bp->b_flags = B_BUSY;
415fillin:
416 bremhash(bp);
417 splx(x);
418 bp->b_dev = NODEV;
419 bp->b_vp = NULL;
420 bp->b_blkno = bp->b_lblkno = 0;
421 bp->b_iodone = 0;
422 bp->b_error = 0;
8b376180 423 bp->b_resid = 0;
15637ed4
RG
424 bp->b_wcred = bp->b_rcred = NOCRED;
425 if (bp->b_bufsize != sz)
426 allocbuf(bp, sz);
427 bp->b_bcount = bp->b_bufsize = sz;
428 bp->b_dirtyoff = bp->b_dirtyend = 0;
429 return (bp);
430}
431
432/*
433 * Check to see if a block is currently memory resident.
434 */
435struct buf *
436incore(struct vnode *vp, daddr_t blkno)
437{
438 struct buf *bh;
439 struct buf *bp;
440
441 bh = BUFHASH(vp, blkno);
442
443 /* Search hash chain */
444 bp = bh->b_forw;
445 while (bp != (struct buf *) bh) {
446 /* hit */
447 if (bp->b_lblkno == blkno && bp->b_vp == vp
448 && (bp->b_flags & B_INVAL) == 0)
449 return (bp);
450 bp = bp->b_forw;
451 }
452
453 return(0);
454}
455
456/*
457 * Get a block of requested size that is associated with
458 * a given vnode and block offset. If it is found in the
459 * block cache, mark it as having been found, make it busy
460 * and return it. Otherwise, return an empty block of the
461 * correct size. It is up to the caller to insure that the
462 * cached blocks be of the correct size.
463 */
464struct buf *
465getblk(register struct vnode *vp, daddr_t blkno, int size)
466{
467 struct buf *bp, *bh;
468 int x;
469
021f2a1a
DG
470 x = splbio();
471loop:
472 if (bp = incore(vp, blkno)) {
473 if (bp->b_flags & B_BUSY) {
474 bp->b_flags |= B_WANTED;
475 tsleep ((caddr_t)bp, PRIBIO, "getblk", 0);
476 goto loop;
15637ed4 477 }
021f2a1a
DG
478 bp->b_flags |= B_BUSY | B_CACHE;
479 bremfree(bp);
480 if (size > bp->b_bufsize)
481 panic("now what do we do?");
482 /* if (bp->b_bufsize != size) allocbuf(bp, size); */
483 } else {
484
485 if ((bp = getnewbuf(size)) == 0) goto loop;
486 bp->b_blkno = bp->b_lblkno = blkno;
487 bgetvp(vp, bp);
488 bh = BUFHASH(vp, blkno);
489 binshash(bp, bh);
490 bp->b_flags = B_BUSY;
15637ed4 491 }
021f2a1a
DG
492 splx(x);
493 return (bp);
15637ed4
RG
494}
495
496/*
497 * Get an empty, disassociated buffer of given size.
498 */
499struct buf *
500geteblk(int size)
501{
502 struct buf *bp;
503 int x;
504
505 while ((bp = getnewbuf(size)) == 0)
506 ;
507 x = splbio();
508 binshash(bp, bfreelist + BQ_AGE);
509 splx(x);
510
511 return (bp);
512}
513
514/*
515 * Exchange a buffer's underlying buffer storage for one of different
516 * size, taking care to maintain contents appropriately. When buffer
517 * increases in size, caller is responsible for filling out additional
518 * contents. When buffer shrinks in size, data is lost, so caller must
519 * first return it to backing store before shrinking the buffer, as
520 * no implied I/O will be done.
521 *
522 * Expanded buffer is returned as value.
523 */
524void
525allocbuf(register struct buf *bp, int size)
526{
527 caddr_t newcontents;
528
529 /* get new memory buffer */
530#ifndef notyet
8f311334 531 newcontents = (caddr_t) malloc (size, M_IOBUF, M_WAITOK);
15637ed4
RG
532#else /* notyet */
533 if (round_page(size) == size)
534 newcontents = (caddr_t) kmem_alloc_wired_wait(buffer_map, size);
535 else
8f311334 536 newcontents = (caddr_t) malloc (size, M_IOBUF, M_WAITOK);
15637ed4
RG
537#endif /* notyet */
538
539 /* copy the old into the new, up to the maximum that will fit */
540 bcopy (bp->b_un.b_addr, newcontents, min(bp->b_bufsize, size));
541
542 /* return old contents to free heap */
543#ifndef notyet
8f311334 544 free (bp->b_un.b_addr, M_IOBUF);
15637ed4
RG
545#else /* notyet */
546 if (round_page(bp->b_bufsize) == bp->b_bufsize)
547 kmem_free_wakeup(buffer_map, bp->b_un.b_addr, bp->b_bufsize);
548 else
8f311334 549 free (bp->b_un.b_addr, M_IOBUF);
15637ed4
RG
550#endif /* notyet */
551
552 /* adjust buffer cache's idea of memory allocated to buffer contents */
553 freebufspace -= size - bp->b_bufsize;
554 allocbufspace += size - bp->b_bufsize;
555
556 /* update buffer header */
557 bp->b_un.b_addr = newcontents;
558 bp->b_bcount = bp->b_bufsize = size;
559}
560
561/*
562 * Patiently await operations to complete on this buffer.
563 * When they do, extract error value and return it.
564 * Extract and return any errors associated with the I/O.
565 * If an invalid block, force it off the lookup hash chains.
566 */
567int
568biowait(register struct buf *bp)
569{
570 int x;
571
572 x = splbio();
573 while ((bp->b_flags & B_DONE) == 0)
73419b27 574 tsleep((caddr_t)bp, PRIBIO, "biowait", 0);
15637ed4
RG
575 if((bp->b_flags & B_ERROR) || bp->b_error) {
576 if ((bp->b_flags & B_INVAL) == 0) {
577 bp->b_flags |= B_INVAL;
578 bremhash(bp);
579 binshash(bp, bfreelist + BQ_AGE);
580 }
581 if (!bp->b_error)
582 bp->b_error = EIO;
583 else
584 bp->b_flags |= B_ERROR;
585 splx(x);
586 return (bp->b_error);
587 } else {
588 splx(x);
589 return (0);
590 }
591}
592
593/*
933ee974
DG
594 * Finish up operations on a buffer, calling an optional function
595 * (if requested), and releasing the buffer if marked asynchronous.
596 * Mark this buffer done so that others biowait()'ing for it will
597 * notice when they are woken up from sleep().
15637ed4 598 */
4c45483e 599void
15637ed4
RG
600biodone(register struct buf *bp)
601{
2157b04d
DG
602 int s;
603 s = splbio();
604 if (bp->b_flags & B_CLUSTER) {
605 struct buf *tbp;
606 bp->b_resid = bp->b_bcount;
607 while ( tbp = bp->b_clusterf) {
608 bp->b_clusterf = tbp->av_forw;
609 bp->b_resid -= tbp->b_bcount;
610 tbp->b_resid = 0;
611 if( bp->b_resid <= 0) {
612 tbp->b_error = bp->b_error;
613 tbp->b_flags |= (bp->b_flags & B_ERROR);
614 tbp->b_resid = -bp->b_resid;
615 bp->b_resid = 0;
616 }
617/*
618 printf("rdc (%d,%d,%d) ", tbp->b_blkno, tbp->b_bcount, tbp->b_resid);
619*/
620
621 biodone(tbp);
622 }
623 vm_bounce_kva_free( bp->b_un.b_addr, bp->b_bufsize, 0);
624 relpbuf(bp);
625 splx(s);
626 return;
627 }
628
8387479d
DG
629#ifndef NOBOUNCE
630 if (bp->b_flags & B_BOUNCE)
631 vm_bounce_free(bp);
632#endif
933ee974 633 bp->b_flags |= B_DONE;
15637ed4 634
933ee974 635 if ((bp->b_flags & B_READ) == 0) {
15637ed4
RG
636 vwakeup(bp);
637 }
933ee974
DG
638
639 /* call optional completion function if requested */
640 if (bp->b_flags & B_CALL) {
641 bp->b_flags &= ~B_CALL;
642 (*bp->b_iodone)(bp);
2157b04d 643 splx(s);
933ee974
DG
644 return;
645 }
646
647/*
648 * For asynchronous completions, release the buffer now. The brelse
649 * checks for B_WANTED and will do the wakeup there if necessary -
650 * so no need to do a wakeup here in the async case.
651 */
652
653 if (bp->b_flags & B_ASYNC) {
15637ed4 654 brelse(bp);
933ee974
DG
655 } else {
656 bp->b_flags &= ~B_WANTED;
657 wakeup((caddr_t) bp);
658 }
2157b04d 659 splx(s);
933ee974
DG
660}
661
c23068d5
GW
662#ifndef UPDATE_INTERVAL
663int vfs_update_interval = 30;
664#else
665int vfs_update_interval = UPDATE_INTERVAL;
666#endif
933ee974
DG
667
668void
669vfs_update() {
58167ae3 670 (void) spl0();
933ee974 671 while(1) {
c23068d5
GW
672 tsleep((caddr_t)&vfs_update_wakeup, PRIBIO, "update",
673 hz * vfs_update_interval);
933ee974 674 vfs_update_wakeup = 0;
933ee974
DG
675 sync(curproc, NULL, NULL);
676 }
15637ed4 677}
4c45483e
GW
678
679/*
680 * Print out statistics on the current allocation of the buffer pool.
681 * Can be enabled to print out on every ``sync'' by setting "syncprt"
682 * in ufs/ufs_vfsops.c.
683 */
684void
685bufstats()
686{
687 int s, i, j, count;
688 register struct buf *bp, *dp;
689 int counts[MAXBSIZE/CLBYTES+1];
690 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
691
692 for (bp = bfreelist, i = 0; bp < &bfreelist[BQUEUES]; bp++, i++) {
693 count = 0;
694 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
695 counts[j] = 0;
696 s = splbio();
697 for (dp = bp->av_forw; dp != bp; dp = dp->av_forw) {
698 counts[dp->b_bufsize/CLBYTES]++;
699 count++;
700 }
701 splx(s);
702 printf("%s: total-%d", bname[i], count);
703 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
704 if (counts[j] != 0)
705 printf(", %d-%d", j * CLBYTES, counts[j]);
706 printf("\n");
707 }
708}