Commit | Line | Data |
---|---|---|
c9857c5b | 1 | /* vfs_bio.c 4.23 81/07/25 */ |
663dbc72 BJ |
2 | |
3 | #include "../h/param.h" | |
4 | #include "../h/systm.h" | |
5 | #include "../h/dir.h" | |
6 | #include "../h/user.h" | |
7 | #include "../h/buf.h" | |
8 | #include "../h/conf.h" | |
9 | #include "../h/proc.h" | |
10 | #include "../h/seg.h" | |
11 | #include "../h/pte.h" | |
12 | #include "../h/vm.h" | |
973ecc4f | 13 | #include "../h/trace.h" |
663dbc72 | 14 | |
5603d07d BJ |
15 | /* |
16 | * The following several routines allocate and free | |
17 | * buffers with various side effects. In general the | |
18 | * arguments to an allocate routine are a device and | |
19 | * a block number, and the value is a pointer to | |
20 | * to the buffer header; the buffer is marked "busy" | |
21 | * so that no one else can touch it. If the block was | |
22 | * already in core, no I/O need be done; if it is | |
23 | * already busy, the process waits until it becomes free. | |
24 | * The following routines allocate a buffer: | |
25 | * getblk | |
26 | * bread | |
27 | * breada | |
28 | * baddr (if it is incore) | |
29 | * Eventually the buffer must be released, possibly with the | |
30 | * side effect of writing it out, by using one of | |
31 | * bwrite | |
32 | * bdwrite | |
33 | * bawrite | |
34 | * brelse | |
35 | */ | |
36 | ||
a0eab615 BJ |
37 | struct buf bfreelist[BQUEUES]; |
38 | struct buf bswlist, *bclnlist; | |
39 | ||
5603d07d | 40 | #define BUFHSZ 63 |
46387ee3 | 41 | struct bufhd bufhash[BUFHSZ]; |
337ed2cc BJ |
42 | #define BUFHASH(dev, dblkno) \ |
43 | ((struct buf *)&bufhash[((int)(dev)+(int)(dblkno)) % BUFHSZ]) | |
5603d07d BJ |
44 | |
45 | /* | |
46 | * Initialize hash links for buffers. | |
47 | */ | |
48 | bhinit() | |
49 | { | |
50 | register int i; | |
46387ee3 | 51 | register struct bufhd *bp; |
5603d07d | 52 | |
46387ee3 BJ |
53 | for (bp = bufhash, i = 0; i < BUFHSZ; i++, bp++) |
54 | bp->b_forw = bp->b_back = (struct buf *)bp; | |
5603d07d BJ |
55 | } |
56 | ||
663dbc72 BJ |
57 | /* #define DISKMON 1 */ |
58 | ||
59 | #ifdef DISKMON | |
60 | struct { | |
61 | int nbuf; | |
62 | long nread; | |
63 | long nreada; | |
64 | long ncache; | |
65 | long nwrite; | |
4c05b581 | 66 | long bufcount[64]; |
663dbc72 BJ |
67 | } io_info; |
68 | #endif | |
69 | ||
70 | /* | |
71 | * Swap IO headers - | |
72 | * They contain the necessary information for the swap I/O. | |
73 | * At any given time, a swap header can be in three | |
74 | * different lists. When free it is in the free list, | |
75 | * when allocated and the I/O queued, it is on the swap | |
76 | * device list, and finally, if the operation was a dirty | |
77 | * page push, when the I/O completes, it is inserted | |
78 | * in a list of cleaned pages to be processed by the pageout daemon. | |
79 | */ | |
4c05b581 BJ |
80 | struct buf *swbuf; |
81 | short *swsize; /* CAN WE JUST USE B_BCOUNT? */ | |
82 | int *swpf; | |
663dbc72 | 83 | |
663dbc72 | 84 | |
443c8066 | 85 | #ifndef UNFAST |
663dbc72 BJ |
86 | #define notavail(bp) \ |
87 | { \ | |
88 | int s = spl6(); \ | |
89 | (bp)->av_back->av_forw = (bp)->av_forw; \ | |
90 | (bp)->av_forw->av_back = (bp)->av_back; \ | |
91 | (bp)->b_flags |= B_BUSY; \ | |
92 | splx(s); \ | |
93 | } | |
94 | #endif | |
95 | ||
96 | /* | |
97 | * Read in (if necessary) the block and return a buffer pointer. | |
98 | */ | |
99 | struct buf * | |
100 | bread(dev, blkno) | |
101 | dev_t dev; | |
102 | daddr_t blkno; | |
103 | { | |
104 | register struct buf *bp; | |
105 | ||
106 | bp = getblk(dev, blkno); | |
107 | if (bp->b_flags&B_DONE) { | |
15f77b9b BJ |
108 | #ifdef TRACE |
109 | trace(TR_BREADHIT, dev, blkno); | |
973ecc4f | 110 | #endif |
663dbc72 BJ |
111 | #ifdef DISKMON |
112 | io_info.ncache++; | |
113 | #endif | |
114 | return(bp); | |
115 | } | |
116 | bp->b_flags |= B_READ; | |
117 | bp->b_bcount = BSIZE; | |
118 | (*bdevsw[major(dev)].d_strategy)(bp); | |
15f77b9b BJ |
119 | #ifdef TRACE |
120 | trace(TR_BREADMISS, dev, blkno); | |
973ecc4f | 121 | #endif |
663dbc72 BJ |
122 | #ifdef DISKMON |
123 | io_info.nread++; | |
124 | #endif | |
125 | u.u_vm.vm_inblk++; /* pay for read */ | |
126 | iowait(bp); | |
127 | return(bp); | |
128 | } | |
129 | ||
130 | /* | |
131 | * Read in the block, like bread, but also start I/O on the | |
132 | * read-ahead block (which is not allocated to the caller) | |
133 | */ | |
134 | struct buf * | |
135 | breada(dev, blkno, rablkno) | |
136 | dev_t dev; | |
137 | daddr_t blkno, rablkno; | |
138 | { | |
139 | register struct buf *bp, *rabp; | |
140 | ||
141 | bp = NULL; | |
142 | if (!incore(dev, blkno)) { | |
143 | bp = getblk(dev, blkno); | |
144 | if ((bp->b_flags&B_DONE) == 0) { | |
145 | bp->b_flags |= B_READ; | |
146 | bp->b_bcount = BSIZE; | |
147 | (*bdevsw[major(dev)].d_strategy)(bp); | |
15f77b9b BJ |
148 | #ifdef TRACE |
149 | trace(TR_BREADMISS, dev, blkno); | |
973ecc4f | 150 | #endif |
663dbc72 BJ |
151 | #ifdef DISKMON |
152 | io_info.nread++; | |
153 | #endif | |
154 | u.u_vm.vm_inblk++; /* pay for read */ | |
155 | } | |
15f77b9b | 156 | #ifdef TRACE |
973ecc4f | 157 | else |
15f77b9b | 158 | trace(TR_BREADHIT, dev, blkno); |
973ecc4f | 159 | #endif |
663dbc72 BJ |
160 | } |
161 | if (rablkno && !incore(dev, rablkno)) { | |
162 | rabp = getblk(dev, rablkno); | |
973ecc4f | 163 | if (rabp->b_flags & B_DONE) { |
663dbc72 | 164 | brelse(rabp); |
15f77b9b BJ |
165 | #ifdef TRACE |
166 | trace(TR_BREADHITRA, dev, blkno); | |
973ecc4f BJ |
167 | #endif |
168 | } else { | |
663dbc72 BJ |
169 | rabp->b_flags |= B_READ|B_ASYNC; |
170 | rabp->b_bcount = BSIZE; | |
171 | (*bdevsw[major(dev)].d_strategy)(rabp); | |
15f77b9b BJ |
172 | #ifdef TRACE |
173 | trace(TR_BREADMISSRA, dev, rablock); | |
973ecc4f | 174 | #endif |
663dbc72 BJ |
175 | #ifdef DISKMON |
176 | io_info.nreada++; | |
177 | #endif | |
178 | u.u_vm.vm_inblk++; /* pay in advance */ | |
179 | } | |
180 | } | |
181 | if(bp == NULL) | |
182 | return(bread(dev, blkno)); | |
183 | iowait(bp); | |
184 | return(bp); | |
185 | } | |
186 | ||
187 | /* | |
188 | * Write the buffer, waiting for completion. | |
189 | * Then release the buffer. | |
190 | */ | |
191 | bwrite(bp) | |
192 | register struct buf *bp; | |
193 | { | |
194 | register flag; | |
195 | ||
196 | flag = bp->b_flags; | |
197 | bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI | B_AGE); | |
198 | bp->b_bcount = BSIZE; | |
199 | #ifdef DISKMON | |
200 | io_info.nwrite++; | |
201 | #endif | |
202 | if ((flag&B_DELWRI) == 0) | |
203 | u.u_vm.vm_oublk++; /* noone paid yet */ | |
15f77b9b | 204 | #ifdef TRACE |
53f9ca20 | 205 | trace(TR_BWRITE, bp->b_dev, bp->b_blkno); |
973ecc4f | 206 | #endif |
663dbc72 BJ |
207 | (*bdevsw[major(bp->b_dev)].d_strategy)(bp); |
208 | if ((flag&B_ASYNC) == 0) { | |
209 | iowait(bp); | |
210 | brelse(bp); | |
211 | } else if (flag & B_DELWRI) | |
212 | bp->b_flags |= B_AGE; | |
213 | else | |
214 | geterror(bp); | |
215 | } | |
216 | ||
217 | /* | |
218 | * Release the buffer, marking it so that if it is grabbed | |
219 | * for another purpose it will be written out before being | |
220 | * given up (e.g. when writing a partial block where it is | |
221 | * assumed that another write for the same block will soon follow). | |
222 | * This can't be done for magtape, since writes must be done | |
223 | * in the same order as requested. | |
224 | */ | |
225 | bdwrite(bp) | |
226 | register struct buf *bp; | |
227 | { | |
e1e57888 | 228 | register int flags; |
663dbc72 BJ |
229 | |
230 | if ((bp->b_flags&B_DELWRI) == 0) | |
231 | u.u_vm.vm_oublk++; /* noone paid yet */ | |
e1e57888 RE |
232 | flags = bdevsw[major(bp->b_dev)].d_flags; |
233 | if(flags & B_TAPE) | |
663dbc72 BJ |
234 | bawrite(bp); |
235 | else { | |
236 | bp->b_flags |= B_DELWRI | B_DONE; | |
237 | brelse(bp); | |
238 | } | |
239 | } | |
240 | ||
241 | /* | |
242 | * Release the buffer, start I/O on it, but don't wait for completion. | |
243 | */ | |
244 | bawrite(bp) | |
245 | register struct buf *bp; | |
246 | { | |
247 | ||
248 | bp->b_flags |= B_ASYNC; | |
249 | bwrite(bp); | |
250 | } | |
251 | ||
252 | /* | |
253 | * release the buffer, with no I/O implied. | |
254 | */ | |
255 | brelse(bp) | |
256 | register struct buf *bp; | |
257 | { | |
46387ee3 | 258 | register struct buf *flist; |
663dbc72 BJ |
259 | register s; |
260 | ||
261 | if (bp->b_flags&B_WANTED) | |
262 | wakeup((caddr_t)bp); | |
46387ee3 BJ |
263 | if (bfreelist[0].b_flags&B_WANTED) { |
264 | bfreelist[0].b_flags &= ~B_WANTED; | |
265 | wakeup((caddr_t)bfreelist); | |
663dbc72 | 266 | } |
60a71525 BJ |
267 | if (bp->b_flags&B_ERROR) |
268 | if (bp->b_flags & B_LOCKED) | |
269 | bp->b_flags &= ~B_ERROR; /* try again later */ | |
270 | else | |
271 | bp->b_dev = NODEV; /* no assoc */ | |
663dbc72 | 272 | s = spl6(); |
46387ee3 BJ |
273 | if (bp->b_flags & (B_ERROR|B_INVAL)) { |
274 | /* block has no info ... put at front of most free list */ | |
275 | flist = &bfreelist[BQUEUES-1]; | |
276 | flist->av_forw->av_back = bp; | |
277 | bp->av_forw = flist->av_forw; | |
278 | flist->av_forw = bp; | |
279 | bp->av_back = flist; | |
663dbc72 | 280 | } else { |
46387ee3 BJ |
281 | if (bp->b_flags & B_LOCKED) |
282 | flist = &bfreelist[BQ_LOCKED]; | |
283 | else if (bp->b_flags & B_AGE) | |
284 | flist = &bfreelist[BQ_AGE]; | |
285 | else | |
286 | flist = &bfreelist[BQ_LRU]; | |
287 | flist->av_back->av_forw = bp; | |
288 | bp->av_back = flist->av_back; | |
289 | flist->av_back = bp; | |
290 | bp->av_forw = flist; | |
663dbc72 BJ |
291 | } |
292 | bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE); | |
293 | splx(s); | |
294 | } | |
295 | ||
296 | /* | |
297 | * See if the block is associated with some buffer | |
298 | * (mainly to avoid getting hung up on a wait in breada) | |
299 | */ | |
300 | incore(dev, blkno) | |
301 | dev_t dev; | |
302 | daddr_t blkno; | |
303 | { | |
304 | register struct buf *bp; | |
46387ee3 | 305 | register struct buf *dp; |
663dbc72 BJ |
306 | register int dblkno = fsbtodb(blkno); |
307 | ||
337ed2cc | 308 | dp = BUFHASH(dev, dblkno); |
46387ee3 BJ |
309 | for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) |
310 | if (bp->b_blkno == dblkno && bp->b_dev == dev && | |
311 | !(bp->b_flags & B_INVAL)) | |
5603d07d | 312 | return (1); |
5603d07d | 313 | return (0); |
663dbc72 BJ |
314 | } |
315 | ||
316 | struct buf * | |
317 | baddr(dev, blkno) | |
318 | dev_t dev; | |
319 | daddr_t blkno; | |
320 | { | |
321 | ||
322 | if (incore(dev, blkno)) | |
323 | return (bread(dev, blkno)); | |
324 | return (0); | |
325 | } | |
326 | ||
327 | /* | |
328 | * Assign a buffer for the given block. If the appropriate | |
329 | * block is already associated, return it; otherwise search | |
330 | * for the oldest non-busy buffer and reassign it. | |
331 | */ | |
332 | struct buf * | |
333 | getblk(dev, blkno) | |
334 | dev_t dev; | |
335 | daddr_t blkno; | |
336 | { | |
5603d07d | 337 | register struct buf *bp, *dp, *ep; |
46387ee3 | 338 | register int dblkno = fsbtodb(blkno); |
5aa9d5ea RE |
339 | #ifdef DISKMON |
340 | register int i; | |
341 | #endif | |
663dbc72 | 342 | |
01659974 BJ |
343 | if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT)) |
344 | blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1); | |
345 | dblkno = fsbtodb(blkno); | |
46387ee3 | 346 | dp = BUFHASH(dev, dblkno); |
663dbc72 | 347 | loop: |
81263dba | 348 | (void) spl0(); |
46387ee3 BJ |
349 | for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { |
350 | if (bp->b_blkno != dblkno || bp->b_dev != dev || | |
351 | bp->b_flags&B_INVAL) | |
663dbc72 | 352 | continue; |
81263dba | 353 | (void) spl6(); |
663dbc72 BJ |
354 | if (bp->b_flags&B_BUSY) { |
355 | bp->b_flags |= B_WANTED; | |
356 | sleep((caddr_t)bp, PRIBIO+1); | |
357 | goto loop; | |
358 | } | |
81263dba | 359 | (void) spl0(); |
663dbc72 BJ |
360 | #ifdef DISKMON |
361 | i = 0; | |
362 | dp = bp->av_forw; | |
46387ee3 | 363 | while ((dp->b_flags & B_HEAD) == 0) { |
663dbc72 BJ |
364 | i++; |
365 | dp = dp->av_forw; | |
366 | } | |
4c05b581 | 367 | if (i<64) |
663dbc72 BJ |
368 | io_info.bufcount[i]++; |
369 | #endif | |
370 | notavail(bp); | |
371 | bp->b_flags |= B_CACHE; | |
372 | return(bp); | |
373 | } | |
5603d07d BJ |
374 | if (major(dev) >= nblkdev) |
375 | panic("blkdev"); | |
81263dba | 376 | (void) spl6(); |
46387ee3 BJ |
377 | for (ep = &bfreelist[BQUEUES-1]; ep > bfreelist; ep--) |
378 | if (ep->av_forw != ep) | |
379 | break; | |
380 | if (ep == bfreelist) { /* no free blocks at all */ | |
381 | ep->b_flags |= B_WANTED; | |
382 | sleep((caddr_t)ep, PRIBIO+1); | |
663dbc72 BJ |
383 | goto loop; |
384 | } | |
283cac0a | 385 | (void) spl0(); |
46387ee3 | 386 | bp = ep->av_forw; |
663dbc72 BJ |
387 | notavail(bp); |
388 | if (bp->b_flags & B_DELWRI) { | |
389 | bp->b_flags |= B_ASYNC; | |
390 | bwrite(bp); | |
391 | goto loop; | |
392 | } | |
15f77b9b | 393 | #ifdef TRACE |
53f9ca20 | 394 | trace(TR_BRELSE, bp->b_dev, bp->b_blkno); |
973ecc4f | 395 | #endif |
663dbc72 BJ |
396 | bp->b_flags = B_BUSY; |
397 | bp->b_back->b_forw = bp->b_forw; | |
398 | bp->b_forw->b_back = bp->b_back; | |
399 | bp->b_forw = dp->b_forw; | |
400 | bp->b_back = dp; | |
401 | dp->b_forw->b_back = bp; | |
402 | dp->b_forw = bp; | |
403 | bp->b_dev = dev; | |
404 | bp->b_blkno = dblkno; | |
405 | return(bp); | |
406 | } | |
407 | ||
408 | /* | |
409 | * get an empty block, | |
410 | * not assigned to any particular device | |
411 | */ | |
412 | struct buf * | |
413 | geteblk() | |
414 | { | |
436518b9 | 415 | register struct buf *bp, *dp; |
663dbc72 BJ |
416 | |
417 | loop: | |
81263dba | 418 | (void) spl6(); |
46387ee3 BJ |
419 | for (dp = &bfreelist[BQUEUES-1]; dp > bfreelist; dp--) |
420 | if (dp->av_forw != dp) | |
421 | break; | |
422 | if (dp == bfreelist) { /* no free blocks */ | |
423 | dp->b_flags |= B_WANTED; | |
424 | sleep((caddr_t)dp, PRIBIO+1); | |
425 | goto loop; | |
663dbc72 | 426 | } |
81263dba | 427 | (void) spl0(); |
46387ee3 | 428 | bp = dp->av_forw; |
663dbc72 BJ |
429 | notavail(bp); |
430 | if (bp->b_flags & B_DELWRI) { | |
431 | bp->b_flags |= B_ASYNC; | |
432 | bwrite(bp); | |
433 | goto loop; | |
434 | } | |
15f77b9b | 435 | #ifdef TRACE |
53f9ca20 | 436 | trace(TR_BRELSE, bp->b_dev, bp->b_blkno); |
973ecc4f | 437 | #endif |
46387ee3 | 438 | bp->b_flags = B_BUSY|B_INVAL; |
663dbc72 BJ |
439 | bp->b_back->b_forw = bp->b_forw; |
440 | bp->b_forw->b_back = bp->b_back; | |
441 | bp->b_forw = dp->b_forw; | |
442 | bp->b_back = dp; | |
443 | dp->b_forw->b_back = bp; | |
444 | dp->b_forw = bp; | |
445 | bp->b_dev = (dev_t)NODEV; | |
446 | return(bp); | |
447 | } | |
448 | ||
449 | /* | |
450 | * Wait for I/O completion on the buffer; return errors | |
451 | * to the user. | |
452 | */ | |
453 | iowait(bp) | |
454 | register struct buf *bp; | |
455 | { | |
456 | ||
81263dba | 457 | (void) spl6(); |
663dbc72 BJ |
458 | while ((bp->b_flags&B_DONE)==0) |
459 | sleep((caddr_t)bp, PRIBIO); | |
81263dba | 460 | (void) spl0(); |
663dbc72 BJ |
461 | geterror(bp); |
462 | } | |
463 | ||
443c8066 | 464 | #ifdef UNFAST |
663dbc72 BJ |
465 | /* |
466 | * Unlink a buffer from the available list and mark it busy. | |
467 | * (internal interface) | |
468 | */ | |
469 | notavail(bp) | |
470 | register struct buf *bp; | |
471 | { | |
472 | register s; | |
473 | ||
474 | s = spl6(); | |
475 | bp->av_back->av_forw = bp->av_forw; | |
476 | bp->av_forw->av_back = bp->av_back; | |
477 | bp->b_flags |= B_BUSY; | |
478 | splx(s); | |
479 | } | |
480 | #endif | |
481 | ||
482 | /* | |
483 | * Mark I/O complete on a buffer. If the header | |
484 | * indicates a dirty page push completion, the | |
485 | * header is inserted into the ``cleaned'' list | |
486 | * to be processed by the pageout daemon. Otherwise | |
487 | * release it if I/O is asynchronous, and wake | |
488 | * up anyone waiting for it. | |
489 | */ | |
490 | iodone(bp) | |
491 | register struct buf *bp; | |
492 | { | |
493 | register int s; | |
494 | ||
80e7c811 BJ |
495 | if (bp->b_flags & B_DONE) |
496 | panic("dup iodone"); | |
663dbc72 BJ |
497 | bp->b_flags |= B_DONE; |
498 | if (bp->b_flags & B_DIRTY) { | |
499 | if (bp->b_flags & B_ERROR) | |
500 | panic("IO err in push"); | |
501 | s = spl6(); | |
663dbc72 BJ |
502 | bp->av_forw = bclnlist; |
503 | bp->b_bcount = swsize[bp - swbuf]; | |
504 | bp->b_pfcent = swpf[bp - swbuf]; | |
796c66c0 BJ |
505 | cnt.v_pgout++; |
506 | cnt.v_pgpgout += bp->b_bcount / NBPG; | |
663dbc72 BJ |
507 | bclnlist = bp; |
508 | if (bswlist.b_flags & B_WANTED) | |
509 | wakeup((caddr_t)&proc[2]); | |
510 | splx(s); | |
a3ee1d55 | 511 | return; |
663dbc72 BJ |
512 | } |
513 | if (bp->b_flags&B_ASYNC) | |
514 | brelse(bp); | |
515 | else { | |
516 | bp->b_flags &= ~B_WANTED; | |
517 | wakeup((caddr_t)bp); | |
518 | } | |
519 | } | |
520 | ||
521 | /* | |
522 | * Zero the core associated with a buffer. | |
523 | */ | |
524 | clrbuf(bp) | |
525 | struct buf *bp; | |
526 | { | |
527 | register *p; | |
528 | register c; | |
529 | ||
530 | p = bp->b_un.b_words; | |
531 | c = BSIZE/sizeof(int); | |
532 | do | |
533 | *p++ = 0; | |
534 | while (--c); | |
535 | bp->b_resid = 0; | |
536 | } | |
537 | ||
538 | /* | |
539 | * swap I/O - | |
540 | * | |
541 | * If the flag indicates a dirty page push initiated | |
542 | * by the pageout daemon, we map the page into the i th | |
543 | * virtual page of process 2 (the daemon itself) where i is | |
544 | * the index of the swap header that has been allocated. | |
545 | * We simply initialize the header and queue the I/O but | |
546 | * do not wait for completion. When the I/O completes, | |
547 | * iodone() will link the header to a list of cleaned | |
548 | * pages to be processed by the pageout daemon. | |
549 | */ | |
550 | swap(p, dblkno, addr, nbytes, rdflg, flag, dev, pfcent) | |
551 | struct proc *p; | |
552 | swblk_t dblkno; | |
553 | caddr_t addr; | |
554 | int flag, nbytes; | |
555 | dev_t dev; | |
556 | unsigned pfcent; | |
557 | { | |
558 | register struct buf *bp; | |
559 | register int c; | |
560 | int p2dp; | |
561 | register struct pte *dpte, *vpte; | |
562 | ||
81263dba | 563 | (void) spl6(); |
663dbc72 BJ |
564 | while (bswlist.av_forw == NULL) { |
565 | bswlist.b_flags |= B_WANTED; | |
566 | sleep((caddr_t)&bswlist, PSWP+1); | |
567 | } | |
568 | bp = bswlist.av_forw; | |
569 | bswlist.av_forw = bp->av_forw; | |
81263dba | 570 | (void) spl0(); |
663dbc72 BJ |
571 | |
572 | bp->b_flags = B_BUSY | B_PHYS | rdflg | flag; | |
573 | if ((bp->b_flags & (B_DIRTY|B_PGIN)) == 0) | |
574 | if (rdflg == B_READ) | |
575 | sum.v_pswpin += btoc(nbytes); | |
576 | else | |
577 | sum.v_pswpout += btoc(nbytes); | |
578 | bp->b_proc = p; | |
579 | if (flag & B_DIRTY) { | |
580 | p2dp = ((bp - swbuf) * CLSIZE) * KLMAX; | |
581 | dpte = dptopte(&proc[2], p2dp); | |
582 | vpte = vtopte(p, btop(addr)); | |
583 | for (c = 0; c < nbytes; c += NBPG) { | |
584 | if (vpte->pg_pfnum == 0 || vpte->pg_fod) | |
585 | panic("swap bad pte"); | |
586 | *dpte++ = *vpte++; | |
587 | } | |
588 | bp->b_un.b_addr = (caddr_t)ctob(p2dp); | |
589 | } else | |
590 | bp->b_un.b_addr = addr; | |
591 | while (nbytes > 0) { | |
592 | c = imin(ctob(120), nbytes); | |
593 | bp->b_bcount = c; | |
594 | bp->b_blkno = dblkno; | |
595 | bp->b_dev = dev; | |
d2f87136 BJ |
596 | if (flag & B_DIRTY) { |
597 | swpf[bp - swbuf] = pfcent; | |
598 | swsize[bp - swbuf] = nbytes; | |
599 | } | |
53f9ca20 BJ |
600 | #ifdef TRACE |
601 | trace(TR_SWAPIO, dev, bp->b_blkno); | |
602 | #endif | |
663dbc72 BJ |
603 | (*bdevsw[major(dev)].d_strategy)(bp); |
604 | if (flag & B_DIRTY) { | |
605 | if (c < nbytes) | |
606 | panic("big push"); | |
663dbc72 BJ |
607 | return; |
608 | } | |
81263dba | 609 | (void) spl6(); |
663dbc72 BJ |
610 | while((bp->b_flags&B_DONE)==0) |
611 | sleep((caddr_t)bp, PSWP); | |
81263dba | 612 | (void) spl0(); |
663dbc72 BJ |
613 | bp->b_un.b_addr += c; |
614 | bp->b_flags &= ~B_DONE; | |
615 | if (bp->b_flags & B_ERROR) { | |
616 | if ((flag & (B_UAREA|B_PAGET)) || rdflg == B_WRITE) | |
617 | panic("hard IO err in swap"); | |
618 | swkill(p, (char *)0); | |
619 | } | |
620 | nbytes -= c; | |
621 | dblkno += btoc(c); | |
622 | } | |
81263dba | 623 | (void) spl6(); |
663dbc72 BJ |
624 | bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); |
625 | bp->av_forw = bswlist.av_forw; | |
626 | bswlist.av_forw = bp; | |
627 | if (bswlist.b_flags & B_WANTED) { | |
628 | bswlist.b_flags &= ~B_WANTED; | |
629 | wakeup((caddr_t)&bswlist); | |
630 | wakeup((caddr_t)&proc[2]); | |
631 | } | |
81263dba | 632 | (void) spl0(); |
663dbc72 BJ |
633 | } |
634 | ||
635 | /* | |
636 | * If rout == 0 then killed on swap error, else | |
637 | * rout is the name of the routine where we ran out of | |
638 | * swap space. | |
639 | */ | |
640 | swkill(p, rout) | |
641 | struct proc *p; | |
642 | char *rout; | |
643 | { | |
444f631c | 644 | char *mesg; |
663dbc72 | 645 | |
444f631c | 646 | printf("pid %d: ", p->p_pid); |
663dbc72 | 647 | if (rout) |
444f631c | 648 | printf(mesg = "killed due to no swap space\n"); |
663dbc72 | 649 | else |
444f631c BJ |
650 | printf(mesg = "killed on swap error\n"); |
651 | uprintf("sorry, pid %d was %s", p->p_pid, mesg); | |
663dbc72 BJ |
652 | /* |
653 | * To be sure no looping (e.g. in vmsched trying to | |
654 | * swap out) mark process locked in core (as though | |
655 | * done by user) after killing it so noone will try | |
656 | * to swap it out. | |
657 | */ | |
a30d2e97 | 658 | psignal(p, SIGKILL); |
663dbc72 BJ |
659 | p->p_flag |= SULOCK; |
660 | } | |
661 | ||
662 | /* | |
663 | * make sure all write-behind blocks | |
664 | * on dev (or NODEV for all) | |
665 | * are flushed out. | |
666 | * (from umount and update) | |
667 | */ | |
668 | bflush(dev) | |
669 | dev_t dev; | |
670 | { | |
671 | register struct buf *bp; | |
46387ee3 | 672 | register struct buf *flist; |
663dbc72 BJ |
673 | |
674 | loop: | |
81263dba | 675 | (void) spl6(); |
46387ee3 BJ |
676 | for (flist = bfreelist; flist < &bfreelist[BQUEUES]; flist++) |
677 | for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) { | |
663dbc72 BJ |
678 | if (bp->b_flags&B_DELWRI && (dev == NODEV||dev==bp->b_dev)) { |
679 | bp->b_flags |= B_ASYNC; | |
680 | notavail(bp); | |
681 | bwrite(bp); | |
682 | goto loop; | |
683 | } | |
684 | } | |
81263dba | 685 | (void) spl0(); |
663dbc72 BJ |
686 | } |
687 | ||
688 | /* | |
689 | * Raw I/O. The arguments are | |
690 | * The strategy routine for the device | |
691 | * A buffer, which will always be a special buffer | |
692 | * header owned exclusively by the device for this purpose | |
693 | * The device number | |
694 | * Read/write flag | |
695 | * Essentially all the work is computing physical addresses and | |
696 | * validating them. | |
697 | * If the user has the proper access privilidges, the process is | |
698 | * marked 'delayed unlock' and the pages involved in the I/O are | |
699 | * faulted and locked. After the completion of the I/O, the above pages | |
700 | * are unlocked. | |
701 | */ | |
702 | physio(strat, bp, dev, rw, mincnt) | |
703 | int (*strat)(); | |
704 | register struct buf *bp; | |
705 | unsigned (*mincnt)(); | |
706 | { | |
707 | register int c; | |
708 | char *a; | |
709 | ||
710 | if (useracc(u.u_base,u.u_count,rw==B_READ?B_WRITE:B_READ) == NULL) { | |
711 | u.u_error = EFAULT; | |
712 | return; | |
713 | } | |
81263dba | 714 | (void) spl6(); |
663dbc72 BJ |
715 | while (bp->b_flags&B_BUSY) { |
716 | bp->b_flags |= B_WANTED; | |
717 | sleep((caddr_t)bp, PRIBIO+1); | |
718 | } | |
719 | bp->b_error = 0; | |
720 | bp->b_proc = u.u_procp; | |
721 | bp->b_un.b_addr = u.u_base; | |
52a593fa | 722 | while (u.u_count != 0) { |
663dbc72 BJ |
723 | bp->b_flags = B_BUSY | B_PHYS | rw; |
724 | bp->b_dev = dev; | |
725 | bp->b_blkno = u.u_offset >> PGSHIFT; | |
726 | bp->b_bcount = u.u_count; | |
727 | (*mincnt)(bp); | |
728 | c = bp->b_bcount; | |
729 | u.u_procp->p_flag |= SPHYSIO; | |
730 | vslock(a = bp->b_un.b_addr, c); | |
731 | (*strat)(bp); | |
81263dba | 732 | (void) spl6(); |
663dbc72 BJ |
733 | while ((bp->b_flags&B_DONE) == 0) |
734 | sleep((caddr_t)bp, PRIBIO); | |
735 | vsunlock(a, c, rw); | |
736 | u.u_procp->p_flag &= ~SPHYSIO; | |
737 | if (bp->b_flags&B_WANTED) | |
738 | wakeup((caddr_t)bp); | |
81263dba | 739 | (void) spl0(); |
663dbc72 BJ |
740 | bp->b_un.b_addr += c; |
741 | u.u_count -= c; | |
742 | u.u_offset += c; | |
52a593fa BJ |
743 | if (bp->b_flags&B_ERROR) |
744 | break; | |
663dbc72 BJ |
745 | } |
746 | bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS); | |
747 | u.u_count = bp->b_resid; | |
748 | geterror(bp); | |
749 | } | |
750 | ||
751 | /*ARGSUSED*/ | |
752 | unsigned | |
753 | minphys(bp) | |
754 | struct buf *bp; | |
755 | { | |
756 | ||
757 | if (bp->b_bcount > 60 * 1024) | |
758 | bp->b_bcount = 60 * 1024; | |
759 | } | |
760 | ||
761 | /* | |
762 | * Pick up the device's error number and pass it to the user; | |
763 | * if there is an error but the number is 0 set a generalized | |
764 | * code. Actually the latter is always true because devices | |
765 | * don't yet return specific errors. | |
766 | */ | |
767 | geterror(bp) | |
768 | register struct buf *bp; | |
769 | { | |
770 | ||
771 | if (bp->b_flags&B_ERROR) | |
772 | if ((u.u_error = bp->b_error)==0) | |
773 | u.u_error = EIO; | |
774 | } | |
7b8b5a01 RE |
775 | |
776 | /* | |
777 | * Invalidate in core blocks belonging to closed or umounted filesystem | |
778 | * | |
779 | * This is not nicely done at all - the buffer ought to be removed from the | |
780 | * hash chains & have its dev/blkno fields clobbered, but unfortunately we | |
781 | * can't do that here, as it is quite possible that the block is still | |
782 | * being used for i/o. Eventually, all disc drivers should be forced to | |
783 | * have a close routine, which ought ensure that the queue is empty, then | |
784 | * properly flush the queues. Until that happy day, this suffices for | |
785 | * correctness. ... kre | |
786 | */ | |
787 | binval(dev) | |
788 | dev_t dev; | |
789 | { | |
634ebdbe RE |
790 | register struct buf *bp; |
791 | register struct bufhd *hp; | |
792 | #define dp ((struct buf *)hp) | |
7b8b5a01 | 793 | |
634ebdbe RE |
794 | for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) |
795 | for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) | |
796 | if (bp->b_dev == dev) | |
797 | bp->b_flags |= B_INVAL; | |
7b8b5a01 | 798 | } |