From Bde:
[unix-history] / sys / kern / vfs__bio.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1989, 1990, 1991, 1992 William F. Jolitz, TeleMuse
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This software is a component of "386BSD" developed by
16 William F. Jolitz, TeleMuse.
17 * 4. Neither the name of the developer nor the name "386BSD"
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
22 * AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS
23 * SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT.
24 * THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT
25 * NOT MAKE USE THIS WORK.
26 *
27 * FOR USERS WHO WISH TO UNDERSTAND THE 386BSD SYSTEM DEVELOPED
28 * BY WILLIAM F. JOLITZ, WE RECOMMEND THE USER STUDY WRITTEN
29 * REFERENCES SUCH AS THE "PORTING UNIX TO THE 386" SERIES
30 * (BEGINNING JANUARY 1991 "DR. DOBBS JOURNAL", USA AND BEGINNING
31 * JUNE 1991 "UNIX MAGAZIN", GERMANY) BY WILLIAM F. JOLITZ AND
32 * LYNNE GREER JOLITZ, AS WELL AS OTHER BOOKS ON UNIX AND THE
33 * ON-LINE 386BSD USER MANUAL BEFORE USE. A BOOK DISCUSSING THE INTERNALS
34 * OF 386BSD ENTITLED "386BSD FROM THE INSIDE OUT" WILL BE AVAILABLE LATE 1992.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
58332e59 48 * $Id: vfs__bio.c,v 1.20 1994/05/29 07:34:15 davidg Exp $
15637ed4 49 */
15637ed4
RG
50
51#include "param.h"
dd18dc33 52#include "systm.h"
83dc58ce 53#include "kernel.h"
15637ed4
RG
54#include "proc.h"
55#include "vnode.h"
56#include "buf.h"
57#include "specdev.h"
58#include "mount.h"
59#include "malloc.h"
60#include "vm/vm.h"
61#include "resourcevar.h"
62
bbc3f849
GW
63/* From sys/buf.h */
64struct buf *buf; /* the buffer pool itself */
65char *buffers;
66int nbuf; /* number of buffer headers */
67int bufpages; /* number of memory pages in the buffer pool */
68struct buf *swbuf; /* swap I/O headers */
69int nswbuf;
70struct bufhd bufhash[BUFHSZ]; /* heads of hash lists */
71struct buf bfreelist[BQUEUES]; /* heads of available lists */
72struct buf bswlist; /* head of free swap header list */
73struct buf *bclnlist; /* head of cleaned page list */
74
dbd7c74f 75static struct buf *getnewbuf(int);
2157b04d
DG
76extern vm_map_t buffer_map, io_map;
77
78/*
79 * Internel update daemon, process 3
80 * The variable vfs_update_wakeup allows for internal syncs.
81 */
82int vfs_update_wakeup;
15637ed4
RG
83
84/*
85 * Initialize buffer headers and related structures.
86 */
87void bufinit()
88{
89 struct bufhd *bh;
90 struct buf *bp;
91
92 /* first, make a null hash table */
93 for(bh = bufhash; bh < bufhash + BUFHSZ; bh++) {
94 bh->b_flags = 0;
95 bh->b_forw = (struct buf *)bh;
96 bh->b_back = (struct buf *)bh;
97 }
98
99 /* next, make a null set of free lists */
100 for(bp = bfreelist; bp < bfreelist + BQUEUES; bp++) {
101 bp->b_flags = 0;
102 bp->av_forw = bp;
103 bp->av_back = bp;
104 bp->b_forw = bp;
105 bp->b_back = bp;
106 }
107
108 /* finally, initialize each buffer header and stick on empty q */
109 for(bp = buf; bp < buf + nbuf ; bp++) {
110 bp->b_flags = B_HEAD | B_INVAL; /* we're just an empty header */
111 bp->b_dev = NODEV;
112 bp->b_vp = 0;
113 binstailfree(bp, bfreelist + BQ_EMPTY);
114 binshash(bp, bfreelist + BQ_EMPTY);
115 }
116}
117
118/*
119 * Find the block in the buffer pool.
120 * If the buffer is not present, allocate a new buffer and load
121 * its contents according to the filesystem fill routine.
122 */
123int
124bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
125 struct buf **bpp)
126{
127 struct buf *bp;
128 int rv = 0;
129
130 bp = getblk (vp, blkno, size);
131
132 /* if not found in cache, do some I/O */
133 if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
134 if (curproc && curproc->p_stats) /* count block I/O */
135 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
136 bp->b_flags |= B_READ;
137 bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
138 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
139 bp->b_rcred = cred;
140 VOP_STRATEGY(bp);
141 rv = biowait (bp);
142 }
143 *bpp = bp;
144
145 return (rv);
146}
147
148/*
149 * Operates like bread, but also starts I/O on the specified
150 * read-ahead block. [See page 55 of Bach's Book]
151 */
152int
153breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno, int rabsize,
154 struct ucred *cred, struct buf **bpp)
155{
156 struct buf *bp, *rabp;
157 int rv = 0, needwait = 0;
158
159 bp = getblk (vp, blkno, size);
160
161 /* if not found in cache, do some I/O */
162 if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
163 if (curproc && curproc->p_stats) /* count block I/O */
164 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
165 bp->b_flags |= B_READ;
166 bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
167 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
168 bp->b_rcred = cred;
169 VOP_STRATEGY(bp);
170 needwait++;
171 }
172
173 rabp = getblk (vp, rablkno, rabsize);
174
175 /* if not found in cache, do some I/O (overlapped with first) */
176 if ((rabp->b_flags & B_CACHE) == 0 || (rabp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
177 if (curproc && curproc->p_stats) /* count block I/O */
178 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
179 rabp->b_flags |= B_READ | B_ASYNC;
180 rabp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
181 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
182 rabp->b_rcred = cred;
183 VOP_STRATEGY(rabp);
184 } else
185 brelse(rabp);
186
187 /* wait for original I/O */
188 if (needwait)
189 rv = biowait (bp);
190
191 *bpp = bp;
192 return (rv);
193}
194
195/*
196 * Synchronous write.
197 * Release buffer on completion.
198 */
199int
200bwrite(register struct buf *bp)
201{
202 int rv;
203
204 if(bp->b_flags & B_INVAL) {
205 brelse(bp);
206 return (0);
207 } else {
208 int wasdelayed;
209
210 if(!(bp->b_flags & B_BUSY))
211 panic("bwrite: not busy");
212
213 wasdelayed = bp->b_flags & B_DELWRI;
214 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_ASYNC|B_DELWRI);
215 if(wasdelayed)
216 reassignbuf(bp, bp->b_vp);
217
8a8a439a
NW
218 if (curproc && curproc->p_stats) /* count block I/O */
219 curproc->p_stats->p_ru.ru_oublock++;
15637ed4
RG
220 bp->b_flags |= B_DIRTY;
221 bp->b_vp->v_numoutput++;
222 VOP_STRATEGY(bp);
223 rv = biowait(bp);
224 brelse(bp);
225 return (rv);
226 }
227}
228
229/*
230 * Delayed write.
231 *
232 * The buffer is marked dirty, but is not queued for I/O.
233 * This routine should be used when the buffer is expected
234 * to be modified again soon, typically a small write that
235 * partially fills a buffer.
236 *
237 * NB: magnetic tapes cannot be delayed; they must be
238 * written in the order that the writes are requested.
239 */
240void
241bdwrite(register struct buf *bp)
242{
243
244 if(!(bp->b_flags & B_BUSY))
245 panic("bdwrite: not busy");
246
247 if(bp->b_flags & B_INVAL) {
248 brelse(bp);
2877196e 249 return;
15637ed4
RG
250 }
251 if(bp->b_flags & B_TAPE) {
252 bwrite(bp);
253 return;
254 }
255 bp->b_flags &= ~(B_READ|B_DONE);
256 bp->b_flags |= B_DIRTY|B_DELWRI;
257 reassignbuf(bp, bp->b_vp);
258 brelse(bp);
259 return;
260}
261
262/*
263 * Asynchronous write.
264 * Start I/O on a buffer, but do not wait for it to complete.
265 * The buffer is released when the I/O completes.
266 */
267void
268bawrite(register struct buf *bp)
269{
270
271 if(!(bp->b_flags & B_BUSY))
272 panic("bawrite: not busy");
273
274 if(bp->b_flags & B_INVAL)
275 brelse(bp);
276 else {
277 int wasdelayed;
278
279 wasdelayed = bp->b_flags & B_DELWRI;
280 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
281 if(wasdelayed)
282 reassignbuf(bp, bp->b_vp);
283
8a8a439a
NW
284 if (curproc && curproc->p_stats) /* count block I/O */
285 curproc->p_stats->p_ru.ru_oublock++;
15637ed4
RG
286 bp->b_flags |= B_DIRTY | B_ASYNC;
287 bp->b_vp->v_numoutput++;
288 VOP_STRATEGY(bp);
289 }
290}
291
292/*
293 * Release a buffer.
294 * Even if the buffer is dirty, no I/O is started.
295 */
296void
297brelse(register struct buf *bp)
298{
299 int x;
300
301 /* anyone need a "free" block? */
302 x=splbio();
303 if ((bfreelist + BQ_AGE)->b_flags & B_WANTED) {
304 (bfreelist + BQ_AGE) ->b_flags &= ~B_WANTED;
4c45483e 305 wakeup((caddr_t)bfreelist);
15637ed4
RG
306 }
307 /* anyone need this very block? */
308 if (bp->b_flags & B_WANTED) {
309 bp->b_flags &= ~B_WANTED;
4c45483e 310 wakeup((caddr_t)bp);
15637ed4
RG
311 }
312
313 if (bp->b_flags & (B_INVAL|B_ERROR)) {
314 bp->b_flags |= B_INVAL;
315 bp->b_flags &= ~(B_DELWRI|B_CACHE);
316 if(bp->b_vp)
317 brelvp(bp);
318 }
319
320 /* enqueue */
321 /* just an empty buffer head ... */
322 /*if(bp->b_flags & B_HEAD)
323 binsheadfree(bp, bfreelist + BQ_EMPTY)*/
324 /* buffers with junk contents */
325 /*else*/ if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE))
326 binsheadfree(bp, bfreelist + BQ_AGE)
327 /* buffers with stale but valid contents */
328 else if(bp->b_flags & B_AGE)
329 binstailfree(bp, bfreelist + BQ_AGE)
330 /* buffers with valid and quite potentially reuseable contents */
331 else
332 binstailfree(bp, bfreelist + BQ_LRU)
333
334 /* unlock */
335 bp->b_flags &= ~B_BUSY;
336 splx(x);
337
338}
339
340int freebufspace;
341int allocbufspace;
342
343/*
344 * Find a buffer which is available for use.
345 * If free memory for buffer space and an empty header from the empty list,
346 * use that. Otherwise, select something from a free list.
347 * Preference is to AGE list, then LRU list.
348 */
349static struct buf *
350getnewbuf(int sz)
351{
352 struct buf *bp;
353 int x;
354
355 x = splbio();
356start:
357 /* can we constitute a new buffer? */
358 if (freebufspace > sz
359 && bfreelist[BQ_EMPTY].av_forw != (struct buf *)bfreelist+BQ_EMPTY) {
360 caddr_t addr;
361
f0f5a18a
DG
362 if ((addr = malloc (sz, M_IOBUF, M_NOWAIT)) == 0)
363 goto tryfree;
15637ed4
RG
364 freebufspace -= sz;
365 allocbufspace += sz;
366
367 bp = bfreelist[BQ_EMPTY].av_forw;
368 bp->b_flags = B_BUSY | B_INVAL;
369 bremfree(bp);
370 bp->b_un.b_addr = addr;
371 bp->b_bufsize = sz; /* 20 Aug 92*/
372 goto fillin;
373 }
374
375tryfree:
376 if (bfreelist[BQ_AGE].av_forw != (struct buf *)bfreelist+BQ_AGE) {
377 bp = bfreelist[BQ_AGE].av_forw;
378 bremfree(bp);
379 } else if (bfreelist[BQ_LRU].av_forw != (struct buf *)bfreelist+BQ_LRU) {
380 bp = bfreelist[BQ_LRU].av_forw;
381 bremfree(bp);
382 } else {
383 /* wait for a free buffer of any kind */
384 (bfreelist + BQ_AGE)->b_flags |= B_WANTED;
4c45483e 385 tsleep((caddr_t)bfreelist, PRIBIO, "newbuf", 0);
15637ed4
RG
386 splx(x);
387 return (0);
388 }
389
390 /* if we are a delayed write, convert to an async write! */
391 if (bp->b_flags & B_DELWRI) {
392 bp->b_flags |= B_BUSY;
393 bawrite (bp);
394 goto start;
395 }
396
397
398 if(bp->b_vp)
399 brelvp(bp);
400
401 /* we are not free, nor do we contain interesting data */
402 if (bp->b_rcred != NOCRED) crfree(bp->b_rcred); /* 25 Apr 92*/
403 if (bp->b_wcred != NOCRED) crfree(bp->b_wcred);
404 bp->b_flags = B_BUSY;
405fillin:
406 bremhash(bp);
407 splx(x);
408 bp->b_dev = NODEV;
409 bp->b_vp = NULL;
410 bp->b_blkno = bp->b_lblkno = 0;
411 bp->b_iodone = 0;
412 bp->b_error = 0;
8b376180 413 bp->b_resid = 0;
15637ed4
RG
414 bp->b_wcred = bp->b_rcred = NOCRED;
415 if (bp->b_bufsize != sz)
416 allocbuf(bp, sz);
417 bp->b_bcount = bp->b_bufsize = sz;
418 bp->b_dirtyoff = bp->b_dirtyend = 0;
419 return (bp);
420}
421
422/*
423 * Check to see if a block is currently memory resident.
424 */
425struct buf *
426incore(struct vnode *vp, daddr_t blkno)
427{
428 struct buf *bh;
429 struct buf *bp;
430
431 bh = BUFHASH(vp, blkno);
432
433 /* Search hash chain */
434 bp = bh->b_forw;
435 while (bp != (struct buf *) bh) {
436 /* hit */
437 if (bp->b_lblkno == blkno && bp->b_vp == vp
438 && (bp->b_flags & B_INVAL) == 0)
439 return (bp);
440 bp = bp->b_forw;
441 }
442
443 return(0);
444}
445
446/*
447 * Get a block of requested size that is associated with
448 * a given vnode and block offset. If it is found in the
449 * block cache, mark it as having been found, make it busy
450 * and return it. Otherwise, return an empty block of the
451 * correct size. It is up to the caller to insure that the
452 * cached blocks be of the correct size.
453 */
454struct buf *
455getblk(register struct vnode *vp, daddr_t blkno, int size)
456{
457 struct buf *bp, *bh;
458 int x;
459
021f2a1a
DG
460 x = splbio();
461loop:
462 if (bp = incore(vp, blkno)) {
463 if (bp->b_flags & B_BUSY) {
464 bp->b_flags |= B_WANTED;
465 tsleep ((caddr_t)bp, PRIBIO, "getblk", 0);
466 goto loop;
15637ed4 467 }
021f2a1a
DG
468 bp->b_flags |= B_BUSY | B_CACHE;
469 bremfree(bp);
470 if (size > bp->b_bufsize)
471 panic("now what do we do?");
472 /* if (bp->b_bufsize != size) allocbuf(bp, size); */
473 } else {
474
f0f5a18a
DG
475 if ((bp = getnewbuf(size)) == 0)
476 goto loop;
477 if ( incore(vp, blkno)) {
478 bp->b_flags |= B_INVAL;
479 brelse(bp);
480 goto loop;
481 }
482
021f2a1a
DG
483 bp->b_blkno = bp->b_lblkno = blkno;
484 bgetvp(vp, bp);
485 bh = BUFHASH(vp, blkno);
486 binshash(bp, bh);
487 bp->b_flags = B_BUSY;
15637ed4 488 }
021f2a1a
DG
489 splx(x);
490 return (bp);
15637ed4
RG
491}
492
493/*
494 * Get an empty, disassociated buffer of given size.
495 */
496struct buf *
497geteblk(int size)
498{
499 struct buf *bp;
500 int x;
501
502 while ((bp = getnewbuf(size)) == 0)
503 ;
504 x = splbio();
505 binshash(bp, bfreelist + BQ_AGE);
506 splx(x);
507
508 return (bp);
509}
510
511/*
512 * Exchange a buffer's underlying buffer storage for one of different
513 * size, taking care to maintain contents appropriately. When buffer
514 * increases in size, caller is responsible for filling out additional
515 * contents. When buffer shrinks in size, data is lost, so caller must
516 * first return it to backing store before shrinking the buffer, as
517 * no implied I/O will be done.
518 *
519 * Expanded buffer is returned as value.
520 */
521void
522allocbuf(register struct buf *bp, int size)
523{
524 caddr_t newcontents;
525
526 /* get new memory buffer */
8f311334 527 newcontents = (caddr_t) malloc (size, M_IOBUF, M_WAITOK);
15637ed4
RG
528
529 /* copy the old into the new, up to the maximum that will fit */
530 bcopy (bp->b_un.b_addr, newcontents, min(bp->b_bufsize, size));
531
532 /* return old contents to free heap */
8f311334 533 free (bp->b_un.b_addr, M_IOBUF);
15637ed4
RG
534
535 /* adjust buffer cache's idea of memory allocated to buffer contents */
536 freebufspace -= size - bp->b_bufsize;
537 allocbufspace += size - bp->b_bufsize;
538
539 /* update buffer header */
540 bp->b_un.b_addr = newcontents;
541 bp->b_bcount = bp->b_bufsize = size;
542}
543
544/*
545 * Patiently await operations to complete on this buffer.
546 * When they do, extract error value and return it.
547 * Extract and return any errors associated with the I/O.
548 * If an invalid block, force it off the lookup hash chains.
549 */
550int
551biowait(register struct buf *bp)
552{
553 int x;
554
555 x = splbio();
556 while ((bp->b_flags & B_DONE) == 0)
73419b27 557 tsleep((caddr_t)bp, PRIBIO, "biowait", 0);
15637ed4
RG
558 if((bp->b_flags & B_ERROR) || bp->b_error) {
559 if ((bp->b_flags & B_INVAL) == 0) {
560 bp->b_flags |= B_INVAL;
561 bremhash(bp);
562 binshash(bp, bfreelist + BQ_AGE);
563 }
564 if (!bp->b_error)
565 bp->b_error = EIO;
566 else
567 bp->b_flags |= B_ERROR;
568 splx(x);
569 return (bp->b_error);
570 } else {
571 splx(x);
572 return (0);
573 }
574}
575
576/*
933ee974
DG
577 * Finish up operations on a buffer, calling an optional function
578 * (if requested), and releasing the buffer if marked asynchronous.
579 * Mark this buffer done so that others biowait()'ing for it will
580 * notice when they are woken up from sleep().
15637ed4 581 */
4c45483e 582void
15637ed4
RG
583biodone(register struct buf *bp)
584{
2157b04d
DG
585 int s;
586 s = splbio();
587 if (bp->b_flags & B_CLUSTER) {
588 struct buf *tbp;
589 bp->b_resid = bp->b_bcount;
590 while ( tbp = bp->b_clusterf) {
591 bp->b_clusterf = tbp->av_forw;
592 bp->b_resid -= tbp->b_bcount;
593 tbp->b_resid = 0;
594 if( bp->b_resid <= 0) {
595 tbp->b_error = bp->b_error;
596 tbp->b_flags |= (bp->b_flags & B_ERROR);
597 tbp->b_resid = -bp->b_resid;
598 bp->b_resid = 0;
599 }
600/*
601 printf("rdc (%d,%d,%d) ", tbp->b_blkno, tbp->b_bcount, tbp->b_resid);
602*/
603
604 biodone(tbp);
605 }
58332e59 606#ifndef NOBOUNCE
2157b04d 607 vm_bounce_kva_free( bp->b_un.b_addr, bp->b_bufsize, 0);
58332e59 608#endif
2157b04d
DG
609 relpbuf(bp);
610 splx(s);
611 return;
612 }
613
8387479d
DG
614#ifndef NOBOUNCE
615 if (bp->b_flags & B_BOUNCE)
616 vm_bounce_free(bp);
617#endif
933ee974 618 bp->b_flags |= B_DONE;
15637ed4 619
933ee974 620 if ((bp->b_flags & B_READ) == 0) {
15637ed4
RG
621 vwakeup(bp);
622 }
933ee974
DG
623
624 /* call optional completion function if requested */
625 if (bp->b_flags & B_CALL) {
626 bp->b_flags &= ~B_CALL;
627 (*bp->b_iodone)(bp);
2157b04d 628 splx(s);
933ee974
DG
629 return;
630 }
631
632/*
633 * For asynchronous completions, release the buffer now. The brelse
634 * checks for B_WANTED and will do the wakeup there if necessary -
635 * so no need to do a wakeup here in the async case.
636 */
637
638 if (bp->b_flags & B_ASYNC) {
15637ed4 639 brelse(bp);
933ee974
DG
640 } else {
641 bp->b_flags &= ~B_WANTED;
642 wakeup((caddr_t) bp);
643 }
2157b04d 644 splx(s);
933ee974
DG
645}
646
c23068d5
GW
647#ifndef UPDATE_INTERVAL
648int vfs_update_interval = 30;
649#else
650int vfs_update_interval = UPDATE_INTERVAL;
651#endif
933ee974
DG
652
653void
654vfs_update() {
58167ae3 655 (void) spl0();
933ee974 656 while(1) {
c23068d5
GW
657 tsleep((caddr_t)&vfs_update_wakeup, PRIBIO, "update",
658 hz * vfs_update_interval);
933ee974 659 vfs_update_wakeup = 0;
933ee974
DG
660 sync(curproc, NULL, NULL);
661 }
15637ed4 662}
4c45483e
GW
663
664/*
665 * Print out statistics on the current allocation of the buffer pool.
666 * Can be enabled to print out on every ``sync'' by setting "syncprt"
667 * in ufs/ufs_vfsops.c.
668 */
669void
670bufstats()
671{
672 int s, i, j, count;
673 register struct buf *bp, *dp;
674 int counts[MAXBSIZE/CLBYTES+1];
675 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
676
677 for (bp = bfreelist, i = 0; bp < &bfreelist[BQUEUES]; bp++, i++) {
678 count = 0;
679 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
680 counts[j] = 0;
681 s = splbio();
682 for (dp = bp->av_forw; dp != bp; dp = dp->av_forw) {
683 counts[dp->b_bufsize/CLBYTES]++;
684 count++;
685 }
686 splx(s);
687 printf("%s: total-%d", bname[i], count);
688 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
689 if (counts[j] != 0)
690 printf(", %d-%d", j * CLBYTES, counts[j]);
691 printf("\n");
692 }
693}