Fix two bugs in slave ptys:
[unix-history] / sys / kern / vfs__bio.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1989, 1990, 1991, 1992 William F. Jolitz, TeleMuse
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This software is a component of "386BSD" developed by
16 William F. Jolitz, TeleMuse.
17 * 4. Neither the name of the developer nor the name "386BSD"
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
22 * AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS
23 * SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT.
24 * THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT
25 * NOT MAKE USE THIS WORK.
26 *
27 * FOR USERS WHO WISH TO UNDERSTAND THE 386BSD SYSTEM DEVELOPED
28 * BY WILLIAM F. JOLITZ, WE RECOMMEND THE USER STUDY WRITTEN
29 * REFERENCES SUCH AS THE "PORTING UNIX TO THE 386" SERIES
30 * (BEGINNING JANUARY 1991 "DR. DOBBS JOURNAL", USA AND BEGINNING
31 * JUNE 1991 "UNIX MAGAZIN", GERMANY) BY WILLIAM F. JOLITZ AND
32 * LYNNE GREER JOLITZ, AS WELL AS OTHER BOOKS ON UNIX AND THE
33 * ON-LINE 386BSD USER MANUAL BEFORE USE. A BOOK DISCUSSING THE INTERNALS
34 * OF 386BSD ENTITLED "386BSD FROM THE INSIDE OUT" WILL BE AVAILABLE LATE 1992.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
8387479d 48 * $Id: vfs__bio.c,v 1.16 1994/03/19 22:19:11 wollman Exp $
15637ed4 49 */
15637ed4
RG
50
51#include "param.h"
dd18dc33 52#include "systm.h"
83dc58ce 53#include "kernel.h"
15637ed4
RG
54#include "proc.h"
55#include "vnode.h"
56#include "buf.h"
57#include "specdev.h"
58#include "mount.h"
59#include "malloc.h"
60#include "vm/vm.h"
61#include "resourcevar.h"
62
bbc3f849
GW
63/* From sys/buf.h */
64struct buf *buf; /* the buffer pool itself */
65char *buffers;
66int nbuf; /* number of buffer headers */
67int bufpages; /* number of memory pages in the buffer pool */
68struct buf *swbuf; /* swap I/O headers */
69int nswbuf;
70struct bufhd bufhash[BUFHSZ]; /* heads of hash lists */
71struct buf bfreelist[BQUEUES]; /* heads of available lists */
72struct buf bswlist; /* head of free swap header list */
73struct buf *bclnlist; /* head of cleaned page list */
74
dbd7c74f 75static struct buf *getnewbuf(int);
15637ed4
RG
76extern vm_map_t buffer_map;
77
78/*
79 * Initialize buffer headers and related structures.
80 */
81void bufinit()
82{
83 struct bufhd *bh;
84 struct buf *bp;
85
86 /* first, make a null hash table */
87 for(bh = bufhash; bh < bufhash + BUFHSZ; bh++) {
88 bh->b_flags = 0;
89 bh->b_forw = (struct buf *)bh;
90 bh->b_back = (struct buf *)bh;
91 }
92
93 /* next, make a null set of free lists */
94 for(bp = bfreelist; bp < bfreelist + BQUEUES; bp++) {
95 bp->b_flags = 0;
96 bp->av_forw = bp;
97 bp->av_back = bp;
98 bp->b_forw = bp;
99 bp->b_back = bp;
100 }
101
102 /* finally, initialize each buffer header and stick on empty q */
103 for(bp = buf; bp < buf + nbuf ; bp++) {
104 bp->b_flags = B_HEAD | B_INVAL; /* we're just an empty header */
105 bp->b_dev = NODEV;
106 bp->b_vp = 0;
107 binstailfree(bp, bfreelist + BQ_EMPTY);
108 binshash(bp, bfreelist + BQ_EMPTY);
109 }
110}
111
112/*
113 * Find the block in the buffer pool.
114 * If the buffer is not present, allocate a new buffer and load
115 * its contents according to the filesystem fill routine.
116 */
117int
118bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
119 struct buf **bpp)
120{
121 struct buf *bp;
122 int rv = 0;
123
124 bp = getblk (vp, blkno, size);
125
126 /* if not found in cache, do some I/O */
127 if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
128 if (curproc && curproc->p_stats) /* count block I/O */
129 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
130 bp->b_flags |= B_READ;
131 bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
132 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
133 bp->b_rcred = cred;
134 VOP_STRATEGY(bp);
135 rv = biowait (bp);
136 }
137 *bpp = bp;
138
139 return (rv);
140}
141
142/*
143 * Operates like bread, but also starts I/O on the specified
144 * read-ahead block. [See page 55 of Bach's Book]
145 */
146int
147breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno, int rabsize,
148 struct ucred *cred, struct buf **bpp)
149{
150 struct buf *bp, *rabp;
151 int rv = 0, needwait = 0;
152
153 bp = getblk (vp, blkno, size);
154
155 /* if not found in cache, do some I/O */
156 if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
157 if (curproc && curproc->p_stats) /* count block I/O */
158 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
159 bp->b_flags |= B_READ;
160 bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
161 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
162 bp->b_rcred = cred;
163 VOP_STRATEGY(bp);
164 needwait++;
165 }
166
167 rabp = getblk (vp, rablkno, rabsize);
168
169 /* if not found in cache, do some I/O (overlapped with first) */
170 if ((rabp->b_flags & B_CACHE) == 0 || (rabp->b_flags & B_INVAL) != 0) {
8a8a439a
NW
171 if (curproc && curproc->p_stats) /* count block I/O */
172 curproc->p_stats->p_ru.ru_inblock++;
15637ed4
RG
173 rabp->b_flags |= B_READ | B_ASYNC;
174 rabp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
175 if (cred != NOCRED) crhold(cred); /* 25 Apr 92*/
176 rabp->b_rcred = cred;
177 VOP_STRATEGY(rabp);
178 } else
179 brelse(rabp);
180
181 /* wait for original I/O */
182 if (needwait)
183 rv = biowait (bp);
184
185 *bpp = bp;
186 return (rv);
187}
188
189/*
190 * Synchronous write.
191 * Release buffer on completion.
192 */
193int
194bwrite(register struct buf *bp)
195{
196 int rv;
197
198 if(bp->b_flags & B_INVAL) {
199 brelse(bp);
200 return (0);
201 } else {
202 int wasdelayed;
203
204 if(!(bp->b_flags & B_BUSY))
205 panic("bwrite: not busy");
206
207 wasdelayed = bp->b_flags & B_DELWRI;
208 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_ASYNC|B_DELWRI);
209 if(wasdelayed)
210 reassignbuf(bp, bp->b_vp);
211
8a8a439a
NW
212 if (curproc && curproc->p_stats) /* count block I/O */
213 curproc->p_stats->p_ru.ru_oublock++;
15637ed4
RG
214 bp->b_flags |= B_DIRTY;
215 bp->b_vp->v_numoutput++;
216 VOP_STRATEGY(bp);
217 rv = biowait(bp);
218 brelse(bp);
219 return (rv);
220 }
221}
222
223/*
224 * Delayed write.
225 *
226 * The buffer is marked dirty, but is not queued for I/O.
227 * This routine should be used when the buffer is expected
228 * to be modified again soon, typically a small write that
229 * partially fills a buffer.
230 *
231 * NB: magnetic tapes cannot be delayed; they must be
232 * written in the order that the writes are requested.
233 */
234void
235bdwrite(register struct buf *bp)
236{
237
238 if(!(bp->b_flags & B_BUSY))
239 panic("bdwrite: not busy");
240
241 if(bp->b_flags & B_INVAL) {
242 brelse(bp);
2877196e 243 return;
15637ed4
RG
244 }
245 if(bp->b_flags & B_TAPE) {
246 bwrite(bp);
247 return;
248 }
249 bp->b_flags &= ~(B_READ|B_DONE);
250 bp->b_flags |= B_DIRTY|B_DELWRI;
251 reassignbuf(bp, bp->b_vp);
252 brelse(bp);
253 return;
254}
255
256/*
257 * Asynchronous write.
258 * Start I/O on a buffer, but do not wait for it to complete.
259 * The buffer is released when the I/O completes.
260 */
261void
262bawrite(register struct buf *bp)
263{
264
265 if(!(bp->b_flags & B_BUSY))
266 panic("bawrite: not busy");
267
268 if(bp->b_flags & B_INVAL)
269 brelse(bp);
270 else {
271 int wasdelayed;
272
273 wasdelayed = bp->b_flags & B_DELWRI;
274 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
275 if(wasdelayed)
276 reassignbuf(bp, bp->b_vp);
277
8a8a439a
NW
278 if (curproc && curproc->p_stats) /* count block I/O */
279 curproc->p_stats->p_ru.ru_oublock++;
15637ed4
RG
280 bp->b_flags |= B_DIRTY | B_ASYNC;
281 bp->b_vp->v_numoutput++;
282 VOP_STRATEGY(bp);
283 }
284}
285
286/*
287 * Release a buffer.
288 * Even if the buffer is dirty, no I/O is started.
289 */
290void
291brelse(register struct buf *bp)
292{
293 int x;
294
295 /* anyone need a "free" block? */
296 x=splbio();
297 if ((bfreelist + BQ_AGE)->b_flags & B_WANTED) {
298 (bfreelist + BQ_AGE) ->b_flags &= ~B_WANTED;
4c45483e 299 wakeup((caddr_t)bfreelist);
15637ed4
RG
300 }
301 /* anyone need this very block? */
302 if (bp->b_flags & B_WANTED) {
303 bp->b_flags &= ~B_WANTED;
4c45483e 304 wakeup((caddr_t)bp);
15637ed4
RG
305 }
306
307 if (bp->b_flags & (B_INVAL|B_ERROR)) {
308 bp->b_flags |= B_INVAL;
309 bp->b_flags &= ~(B_DELWRI|B_CACHE);
310 if(bp->b_vp)
311 brelvp(bp);
312 }
313
314 /* enqueue */
315 /* just an empty buffer head ... */
316 /*if(bp->b_flags & B_HEAD)
317 binsheadfree(bp, bfreelist + BQ_EMPTY)*/
318 /* buffers with junk contents */
319 /*else*/ if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE))
320 binsheadfree(bp, bfreelist + BQ_AGE)
321 /* buffers with stale but valid contents */
322 else if(bp->b_flags & B_AGE)
323 binstailfree(bp, bfreelist + BQ_AGE)
324 /* buffers with valid and quite potentially reuseable contents */
325 else
326 binstailfree(bp, bfreelist + BQ_LRU)
327
328 /* unlock */
329 bp->b_flags &= ~B_BUSY;
330 splx(x);
331
332}
333
334int freebufspace;
335int allocbufspace;
336
337/*
338 * Find a buffer which is available for use.
339 * If free memory for buffer space and an empty header from the empty list,
340 * use that. Otherwise, select something from a free list.
341 * Preference is to AGE list, then LRU list.
342 */
343static struct buf *
344getnewbuf(int sz)
345{
346 struct buf *bp;
347 int x;
348
349 x = splbio();
350start:
351 /* can we constitute a new buffer? */
352 if (freebufspace > sz
353 && bfreelist[BQ_EMPTY].av_forw != (struct buf *)bfreelist+BQ_EMPTY) {
354 caddr_t addr;
355
356/*#define notyet*/
357#ifndef notyet
8f311334 358 if ((addr = malloc (sz, M_IOBUF, M_WAITOK)) == 0) goto tryfree;
15637ed4
RG
359#else /* notyet */
360 /* get new memory buffer */
361 if (round_page(sz) == sz)
362 addr = (caddr_t) kmem_alloc_wired_wait(buffer_map, sz);
363 else
8f311334
DG
364 addr = (caddr_t) malloc (sz, M_IOBUF, M_WAITOK);
365 /*if ((addr = malloc (sz, M_IOBUF, M_NOWAIT)) == 0) goto tryfree;*/
15637ed4
RG
366 bzero(addr, sz);
367#endif /* notyet */
368 freebufspace -= sz;
369 allocbufspace += sz;
370
371 bp = bfreelist[BQ_EMPTY].av_forw;
372 bp->b_flags = B_BUSY | B_INVAL;
373 bremfree(bp);
374 bp->b_un.b_addr = addr;
375 bp->b_bufsize = sz; /* 20 Aug 92*/
376 goto fillin;
377 }
378
379tryfree:
380 if (bfreelist[BQ_AGE].av_forw != (struct buf *)bfreelist+BQ_AGE) {
381 bp = bfreelist[BQ_AGE].av_forw;
382 bremfree(bp);
383 } else if (bfreelist[BQ_LRU].av_forw != (struct buf *)bfreelist+BQ_LRU) {
384 bp = bfreelist[BQ_LRU].av_forw;
385 bremfree(bp);
386 } else {
387 /* wait for a free buffer of any kind */
388 (bfreelist + BQ_AGE)->b_flags |= B_WANTED;
4c45483e 389 tsleep((caddr_t)bfreelist, PRIBIO, "newbuf", 0);
15637ed4
RG
390 splx(x);
391 return (0);
392 }
393
394 /* if we are a delayed write, convert to an async write! */
395 if (bp->b_flags & B_DELWRI) {
396 bp->b_flags |= B_BUSY;
397 bawrite (bp);
398 goto start;
399 }
400
401
402 if(bp->b_vp)
403 brelvp(bp);
404
405 /* we are not free, nor do we contain interesting data */
406 if (bp->b_rcred != NOCRED) crfree(bp->b_rcred); /* 25 Apr 92*/
407 if (bp->b_wcred != NOCRED) crfree(bp->b_wcred);
408 bp->b_flags = B_BUSY;
409fillin:
410 bremhash(bp);
411 splx(x);
412 bp->b_dev = NODEV;
413 bp->b_vp = NULL;
414 bp->b_blkno = bp->b_lblkno = 0;
415 bp->b_iodone = 0;
416 bp->b_error = 0;
8b376180 417 bp->b_resid = 0;
15637ed4
RG
418 bp->b_wcred = bp->b_rcred = NOCRED;
419 if (bp->b_bufsize != sz)
420 allocbuf(bp, sz);
421 bp->b_bcount = bp->b_bufsize = sz;
422 bp->b_dirtyoff = bp->b_dirtyend = 0;
423 return (bp);
424}
425
426/*
427 * Check to see if a block is currently memory resident.
428 */
429struct buf *
430incore(struct vnode *vp, daddr_t blkno)
431{
432 struct buf *bh;
433 struct buf *bp;
434
435 bh = BUFHASH(vp, blkno);
436
437 /* Search hash chain */
438 bp = bh->b_forw;
439 while (bp != (struct buf *) bh) {
440 /* hit */
441 if (bp->b_lblkno == blkno && bp->b_vp == vp
442 && (bp->b_flags & B_INVAL) == 0)
443 return (bp);
444 bp = bp->b_forw;
445 }
446
447 return(0);
448}
449
450/*
451 * Get a block of requested size that is associated with
452 * a given vnode and block offset. If it is found in the
453 * block cache, mark it as having been found, make it busy
454 * and return it. Otherwise, return an empty block of the
455 * correct size. It is up to the caller to insure that the
456 * cached blocks be of the correct size.
457 */
458struct buf *
459getblk(register struct vnode *vp, daddr_t blkno, int size)
460{
461 struct buf *bp, *bh;
462 int x;
463
021f2a1a
DG
464 x = splbio();
465loop:
466 if (bp = incore(vp, blkno)) {
467 if (bp->b_flags & B_BUSY) {
468 bp->b_flags |= B_WANTED;
469 tsleep ((caddr_t)bp, PRIBIO, "getblk", 0);
470 goto loop;
15637ed4 471 }
021f2a1a
DG
472 bp->b_flags |= B_BUSY | B_CACHE;
473 bremfree(bp);
474 if (size > bp->b_bufsize)
475 panic("now what do we do?");
476 /* if (bp->b_bufsize != size) allocbuf(bp, size); */
477 } else {
478
479 if ((bp = getnewbuf(size)) == 0) goto loop;
480 bp->b_blkno = bp->b_lblkno = blkno;
481 bgetvp(vp, bp);
482 bh = BUFHASH(vp, blkno);
483 binshash(bp, bh);
484 bp->b_flags = B_BUSY;
15637ed4 485 }
021f2a1a
DG
486 splx(x);
487 return (bp);
15637ed4
RG
488}
489
490/*
491 * Get an empty, disassociated buffer of given size.
492 */
493struct buf *
494geteblk(int size)
495{
496 struct buf *bp;
497 int x;
498
499 while ((bp = getnewbuf(size)) == 0)
500 ;
501 x = splbio();
502 binshash(bp, bfreelist + BQ_AGE);
503 splx(x);
504
505 return (bp);
506}
507
508/*
509 * Exchange a buffer's underlying buffer storage for one of different
510 * size, taking care to maintain contents appropriately. When buffer
511 * increases in size, caller is responsible for filling out additional
512 * contents. When buffer shrinks in size, data is lost, so caller must
513 * first return it to backing store before shrinking the buffer, as
514 * no implied I/O will be done.
515 *
516 * Expanded buffer is returned as value.
517 */
518void
519allocbuf(register struct buf *bp, int size)
520{
521 caddr_t newcontents;
522
523 /* get new memory buffer */
524#ifndef notyet
8f311334 525 newcontents = (caddr_t) malloc (size, M_IOBUF, M_WAITOK);
15637ed4
RG
526#else /* notyet */
527 if (round_page(size) == size)
528 newcontents = (caddr_t) kmem_alloc_wired_wait(buffer_map, size);
529 else
8f311334 530 newcontents = (caddr_t) malloc (size, M_IOBUF, M_WAITOK);
15637ed4
RG
531#endif /* notyet */
532
533 /* copy the old into the new, up to the maximum that will fit */
534 bcopy (bp->b_un.b_addr, newcontents, min(bp->b_bufsize, size));
535
536 /* return old contents to free heap */
537#ifndef notyet
8f311334 538 free (bp->b_un.b_addr, M_IOBUF);
15637ed4
RG
539#else /* notyet */
540 if (round_page(bp->b_bufsize) == bp->b_bufsize)
541 kmem_free_wakeup(buffer_map, bp->b_un.b_addr, bp->b_bufsize);
542 else
8f311334 543 free (bp->b_un.b_addr, M_IOBUF);
15637ed4
RG
544#endif /* notyet */
545
546 /* adjust buffer cache's idea of memory allocated to buffer contents */
547 freebufspace -= size - bp->b_bufsize;
548 allocbufspace += size - bp->b_bufsize;
549
550 /* update buffer header */
551 bp->b_un.b_addr = newcontents;
552 bp->b_bcount = bp->b_bufsize = size;
553}
554
555/*
556 * Patiently await operations to complete on this buffer.
557 * When they do, extract error value and return it.
558 * Extract and return any errors associated with the I/O.
559 * If an invalid block, force it off the lookup hash chains.
560 */
561int
562biowait(register struct buf *bp)
563{
564 int x;
565
566 x = splbio();
567 while ((bp->b_flags & B_DONE) == 0)
73419b27 568 tsleep((caddr_t)bp, PRIBIO, "biowait", 0);
15637ed4
RG
569 if((bp->b_flags & B_ERROR) || bp->b_error) {
570 if ((bp->b_flags & B_INVAL) == 0) {
571 bp->b_flags |= B_INVAL;
572 bremhash(bp);
573 binshash(bp, bfreelist + BQ_AGE);
574 }
575 if (!bp->b_error)
576 bp->b_error = EIO;
577 else
578 bp->b_flags |= B_ERROR;
579 splx(x);
580 return (bp->b_error);
581 } else {
582 splx(x);
583 return (0);
584 }
585}
586
587/*
933ee974
DG
588 * Finish up operations on a buffer, calling an optional function
589 * (if requested), and releasing the buffer if marked asynchronous.
590 * Mark this buffer done so that others biowait()'ing for it will
591 * notice when they are woken up from sleep().
15637ed4 592 */
4c45483e 593void
15637ed4
RG
594biodone(register struct buf *bp)
595{
8387479d
DG
596#ifndef NOBOUNCE
597 if (bp->b_flags & B_BOUNCE)
598 vm_bounce_free(bp);
599#endif
933ee974 600 bp->b_flags |= B_DONE;
15637ed4 601
933ee974 602 if ((bp->b_flags & B_READ) == 0) {
15637ed4
RG
603 vwakeup(bp);
604 }
933ee974
DG
605
606 /* call optional completion function if requested */
607 if (bp->b_flags & B_CALL) {
608 bp->b_flags &= ~B_CALL;
609 (*bp->b_iodone)(bp);
610 return;
611 }
612
613/*
614 * For asynchronous completions, release the buffer now. The brelse
615 * checks for B_WANTED and will do the wakeup there if necessary -
616 * so no need to do a wakeup here in the async case.
617 */
618
619 if (bp->b_flags & B_ASYNC) {
15637ed4 620 brelse(bp);
933ee974
DG
621 } else {
622 bp->b_flags &= ~B_WANTED;
623 wakeup((caddr_t) bp);
624 }
625}
626
627/*
628 * Internel update daemon, process 3
629 * The variable vfs_update_wakeup allows for internal syncs.
630 */
631int vfs_update_wakeup;
c23068d5
GW
632#ifndef UPDATE_INTERVAL
633int vfs_update_interval = 30;
634#else
635int vfs_update_interval = UPDATE_INTERVAL;
636#endif
933ee974
DG
637
638void
639vfs_update() {
58167ae3 640 (void) spl0();
933ee974 641 while(1) {
c23068d5
GW
642 tsleep((caddr_t)&vfs_update_wakeup, PRIBIO, "update",
643 hz * vfs_update_interval);
933ee974 644 vfs_update_wakeup = 0;
933ee974
DG
645 sync(curproc, NULL, NULL);
646 }
15637ed4 647}
4c45483e
GW
648
649/*
650 * Print out statistics on the current allocation of the buffer pool.
651 * Can be enabled to print out on every ``sync'' by setting "syncprt"
652 * in ufs/ufs_vfsops.c.
653 */
654void
655bufstats()
656{
657 int s, i, j, count;
658 register struct buf *bp, *dp;
659 int counts[MAXBSIZE/CLBYTES+1];
660 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
661
662 for (bp = bfreelist, i = 0; bp < &bfreelist[BQUEUES]; bp++, i++) {
663 count = 0;
664 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
665 counts[j] = 0;
666 s = splbio();
667 for (dp = bp->av_forw; dp != bp; dp = dp->av_forw) {
668 counts[dp->b_bufsize/CLBYTES]++;
669 count++;
670 }
671 splx(s);
672 printf("%s: total-%d", bname[i], count);
673 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
674 if (counts[j] != 0)
675 printf(", %d-%d", j * CLBYTES, counts[j]);
676 printf("\n");
677 }
678}