| 1 | /* |
| 2 | * Copyright (c) 1982, 1986 Regents of the University of California. |
| 3 | * All rights reserved. The Berkeley software License Agreement |
| 4 | * specifies the terms and conditions for redistribution. |
| 5 | * |
| 6 | * @(#)vfs_bio.c 7.3 (Berkeley) %G% |
| 7 | */ |
| 8 | |
| 9 | #include "../machine/pte.h" |
| 10 | |
| 11 | #include "param.h" |
| 12 | #include "systm.h" |
| 13 | #include "dir.h" |
| 14 | #include "user.h" |
| 15 | #include "buf.h" |
| 16 | #include "conf.h" |
| 17 | #include "proc.h" |
| 18 | #include "seg.h" |
| 19 | #include "vm.h" |
| 20 | #include "trace.h" |
| 21 | |
| 22 | /* |
| 23 | * Read in (if necessary) the block and return a buffer pointer. |
| 24 | */ |
| 25 | struct buf * |
| 26 | #ifdef SECSIZE |
| 27 | bread(dev, blkno, size, secsize) |
| 28 | #else SECSIZE |
| 29 | bread(dev, blkno, size) |
| 30 | #endif SECSIZE |
| 31 | dev_t dev; |
| 32 | daddr_t blkno; |
| 33 | int size; |
| 34 | #ifdef SECSIZE |
| 35 | long secsize; |
| 36 | #endif SECSIZE |
| 37 | { |
| 38 | register struct buf *bp; |
| 39 | |
| 40 | if (size == 0) |
| 41 | panic("bread: size 0"); |
| 42 | #ifdef SECSIZE |
| 43 | bp = getblk(dev, blkno, size, secsize); |
| 44 | #else SECSIZE |
| 45 | bp = getblk(dev, blkno, size); |
| 46 | #endif SECSIZE |
| 47 | if (bp->b_flags&(B_DONE|B_DELWRI)) { |
| 48 | trace(TR_BREADHIT, pack(dev, size), blkno); |
| 49 | return (bp); |
| 50 | } |
| 51 | bp->b_flags |= B_READ; |
| 52 | if (bp->b_bcount > bp->b_bufsize) |
| 53 | panic("bread"); |
| 54 | (*bdevsw[major(dev)].d_strategy)(bp); |
| 55 | trace(TR_BREADMISS, pack(dev, size), blkno); |
| 56 | u.u_ru.ru_inblock++; /* pay for read */ |
| 57 | biowait(bp); |
| 58 | return (bp); |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * Read in the block, like bread, but also start I/O on the |
| 63 | * read-ahead block (which is not allocated to the caller) |
| 64 | */ |
| 65 | struct buf * |
| 66 | #ifdef SECSIZE |
| 67 | breada(dev, blkno, size, secsize, rablkno, rabsize) |
| 68 | #else SECSIZE |
| 69 | breada(dev, blkno, size, rablkno, rabsize) |
| 70 | #endif SECSIZE |
| 71 | dev_t dev; |
| 72 | daddr_t blkno; int size; |
| 73 | #ifdef SECSIZE |
| 74 | long secsize; |
| 75 | #endif SECSIZE |
| 76 | daddr_t rablkno; int rabsize; |
| 77 | { |
| 78 | register struct buf *bp, *rabp; |
| 79 | |
| 80 | bp = NULL; |
| 81 | /* |
| 82 | * If the block isn't in core, then allocate |
| 83 | * a buffer and initiate i/o (getblk checks |
| 84 | * for a cache hit). |
| 85 | */ |
| 86 | if (!incore(dev, blkno)) { |
| 87 | #ifdef SECSIZE |
| 88 | bp = getblk(dev, blkno, size, secsize); |
| 89 | #else SECSIZE |
| 90 | bp = getblk(dev, blkno, size); |
| 91 | #endif SECSIZE |
| 92 | if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) { |
| 93 | bp->b_flags |= B_READ; |
| 94 | if (bp->b_bcount > bp->b_bufsize) |
| 95 | panic("breada"); |
| 96 | (*bdevsw[major(dev)].d_strategy)(bp); |
| 97 | trace(TR_BREADMISS, pack(dev, size), blkno); |
| 98 | u.u_ru.ru_inblock++; /* pay for read */ |
| 99 | } else |
| 100 | trace(TR_BREADHIT, pack(dev, size), blkno); |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * If there's a read-ahead block, start i/o |
| 105 | * on it also (as above). |
| 106 | */ |
| 107 | if (rablkno && !incore(dev, rablkno)) { |
| 108 | #ifdef SECSIZE |
| 109 | rabp = getblk(dev, rablkno, rabsize, secsize); |
| 110 | #else SECSIZE |
| 111 | rabp = getblk(dev, rablkno, rabsize); |
| 112 | #endif SECSIZE |
| 113 | if (rabp->b_flags & (B_DONE|B_DELWRI)) { |
| 114 | brelse(rabp); |
| 115 | trace(TR_BREADHITRA, pack(dev, rabsize), blkno); |
| 116 | } else { |
| 117 | rabp->b_flags |= B_READ|B_ASYNC; |
| 118 | if (rabp->b_bcount > rabp->b_bufsize) |
| 119 | panic("breadrabp"); |
| 120 | (*bdevsw[major(dev)].d_strategy)(rabp); |
| 121 | trace(TR_BREADMISSRA, pack(dev, rabsize), rablock); |
| 122 | u.u_ru.ru_inblock++; /* pay in advance */ |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * If block was in core, let bread get it. |
| 128 | * If block wasn't in core, then the read was started |
| 129 | * above, and just wait for it. |
| 130 | */ |
| 131 | if (bp == NULL) |
| 132 | #ifdef SECSIZE |
| 133 | return (bread(dev, blkno, size, secsize)); |
| 134 | #else SECSIZE |
| 135 | return (bread(dev, blkno, size)); |
| 136 | #endif SECSIZE |
| 137 | biowait(bp); |
| 138 | return (bp); |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * Write the buffer, waiting for completion. |
| 143 | * Then release the buffer. |
| 144 | */ |
| 145 | bwrite(bp) |
| 146 | register struct buf *bp; |
| 147 | { |
| 148 | register flag; |
| 149 | |
| 150 | flag = bp->b_flags; |
| 151 | bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); |
| 152 | if ((flag&B_DELWRI) == 0) |
| 153 | u.u_ru.ru_oublock++; /* noone paid yet */ |
| 154 | trace(TR_BWRITE, pack(bp->b_dev, bp->b_bcount), bp->b_blkno); |
| 155 | if (bp->b_bcount > bp->b_bufsize) |
| 156 | panic("bwrite"); |
| 157 | (*bdevsw[major(bp->b_dev)].d_strategy)(bp); |
| 158 | |
| 159 | /* |
| 160 | * If the write was synchronous, then await i/o completion. |
| 161 | * If the write was "delayed", then we put the buffer on |
| 162 | * the q of blocks awaiting i/o completion status. |
| 163 | */ |
| 164 | if ((flag&B_ASYNC) == 0) { |
| 165 | biowait(bp); |
| 166 | brelse(bp); |
| 167 | } else if (flag & B_DELWRI) |
| 168 | bp->b_flags |= B_AGE; |
| 169 | } |
| 170 | |
| 171 | /* |
| 172 | * Release the buffer, marking it so that if it is grabbed |
| 173 | * for another purpose it will be written out before being |
| 174 | * given up (e.g. when writing a partial block where it is |
| 175 | * assumed that another write for the same block will soon follow). |
| 176 | * This can't be done for magtape, since writes must be done |
| 177 | * in the same order as requested. |
| 178 | */ |
| 179 | bdwrite(bp) |
| 180 | register struct buf *bp; |
| 181 | { |
| 182 | |
| 183 | if ((bp->b_flags&B_DELWRI) == 0) |
| 184 | u.u_ru.ru_oublock++; /* noone paid yet */ |
| 185 | if (bdevsw[major(bp->b_dev)].d_flags & B_TAPE) |
| 186 | bawrite(bp); |
| 187 | else { |
| 188 | bp->b_flags |= B_DELWRI | B_DONE; |
| 189 | brelse(bp); |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * Release the buffer, start I/O on it, but don't wait for completion. |
| 195 | */ |
| 196 | bawrite(bp) |
| 197 | register struct buf *bp; |
| 198 | { |
| 199 | |
| 200 | bp->b_flags |= B_ASYNC; |
| 201 | bwrite(bp); |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Release the buffer, with no I/O implied. |
| 206 | */ |
| 207 | brelse(bp) |
| 208 | register struct buf *bp; |
| 209 | { |
| 210 | register struct buf *flist; |
| 211 | register s; |
| 212 | |
| 213 | trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno); |
| 214 | /* |
| 215 | * If someone's waiting for the buffer, or |
| 216 | * is waiting for a buffer wake 'em up. |
| 217 | */ |
| 218 | if (bp->b_flags&B_WANTED) |
| 219 | wakeup((caddr_t)bp); |
| 220 | if (bfreelist[0].b_flags&B_WANTED) { |
| 221 | bfreelist[0].b_flags &= ~B_WANTED; |
| 222 | wakeup((caddr_t)bfreelist); |
| 223 | } |
| 224 | if (bp->b_flags&B_ERROR) |
| 225 | if (bp->b_flags & B_LOCKED) |
| 226 | bp->b_flags &= ~B_ERROR; /* try again later */ |
| 227 | else |
| 228 | bp->b_dev = NODEV; /* no assoc */ |
| 229 | |
| 230 | /* |
| 231 | * Stick the buffer back on a free list. |
| 232 | */ |
| 233 | s = splbio(); |
| 234 | if (bp->b_bufsize <= 0) { |
| 235 | /* block has no buffer ... put at front of unused buffer list */ |
| 236 | flist = &bfreelist[BQ_EMPTY]; |
| 237 | binsheadfree(bp, flist); |
| 238 | } else if (bp->b_flags & (B_ERROR|B_INVAL)) { |
| 239 | /* block has no info ... put at front of most free list */ |
| 240 | flist = &bfreelist[BQ_AGE]; |
| 241 | binsheadfree(bp, flist); |
| 242 | } else { |
| 243 | if (bp->b_flags & B_LOCKED) |
| 244 | flist = &bfreelist[BQ_LOCKED]; |
| 245 | else if (bp->b_flags & B_AGE) |
| 246 | flist = &bfreelist[BQ_AGE]; |
| 247 | else |
| 248 | flist = &bfreelist[BQ_LRU]; |
| 249 | binstailfree(bp, flist); |
| 250 | } |
| 251 | bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE); |
| 252 | splx(s); |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * See if the block is associated with some buffer |
| 257 | * (mainly to avoid getting hung up on a wait in breada) |
| 258 | */ |
| 259 | incore(dev, blkno) |
| 260 | dev_t dev; |
| 261 | daddr_t blkno; |
| 262 | { |
| 263 | register struct buf *bp; |
| 264 | register struct buf *dp; |
| 265 | |
| 266 | dp = BUFHASH(dev, blkno); |
| 267 | for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) |
| 268 | if (bp->b_blkno == blkno && bp->b_dev == dev && |
| 269 | (bp->b_flags & B_INVAL) == 0) |
| 270 | return (1); |
| 271 | return (0); |
| 272 | } |
| 273 | |
| 274 | struct buf * |
| 275 | #ifdef SECSIZE |
| 276 | baddr(dev, blkno, size, secsize) |
| 277 | #else SECSIZE |
| 278 | baddr(dev, blkno, size) |
| 279 | #endif SECSIZE |
| 280 | dev_t dev; |
| 281 | daddr_t blkno; |
| 282 | int size; |
| 283 | #ifdef SECSIZE |
| 284 | long secsize; |
| 285 | #endif SECSIZE |
| 286 | { |
| 287 | |
| 288 | if (incore(dev, blkno)) |
| 289 | #ifdef SECSIZE |
| 290 | return (bread(dev, blkno, size, secsize)); |
| 291 | #else SECSIZE |
| 292 | return (bread(dev, blkno, size)); |
| 293 | #endif SECSIZE |
| 294 | return (0); |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * Assign a buffer for the given block. If the appropriate |
| 299 | * block is already associated, return it; otherwise search |
| 300 | * for the oldest non-busy buffer and reassign it. |
| 301 | * |
| 302 | * If we find the buffer, but it is dirty (marked DELWRI) and |
| 303 | * its size is changing, we must write it out first. When the |
| 304 | * buffer is shrinking, the write is done by brealloc to avoid |
| 305 | * losing the unwritten data. When the buffer is growing, the |
| 306 | * write is done by getblk, so that bread will not read stale |
| 307 | * disk data over the modified data in the buffer. |
| 308 | * |
| 309 | * We use splx here because this routine may be called |
| 310 | * on the interrupt stack during a dump, and we don't |
| 311 | * want to lower the ipl back to 0. |
| 312 | */ |
| 313 | struct buf * |
| 314 | #ifdef SECSIZE |
| 315 | getblk(dev, blkno, size, secsize) |
| 316 | #else SECSIZE |
| 317 | getblk(dev, blkno, size) |
| 318 | #endif SECSIZE |
| 319 | dev_t dev; |
| 320 | daddr_t blkno; |
| 321 | int size; |
| 322 | #ifdef SECSIZE |
| 323 | long secsize; |
| 324 | #endif SECSIZE |
| 325 | { |
| 326 | register struct buf *bp, *dp; |
| 327 | int s; |
| 328 | |
| 329 | if (size > MAXBSIZE) |
| 330 | panic("getblk: size too big"); |
| 331 | /* |
| 332 | * To prevent overflow of 32-bit ints when converting block |
| 333 | * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set |
| 334 | * to the maximum number that can be converted to a byte offset |
| 335 | * without overflow. This is historic code; what bug it fixed, |
| 336 | * or whether it is still a reasonable thing to do is open to |
| 337 | * dispute. mkm 9/85 |
| 338 | */ |
| 339 | if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) |
| 340 | blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1); |
| 341 | /* |
| 342 | * Search the cache for the block. If we hit, but |
| 343 | * the buffer is in use for i/o, then we wait until |
| 344 | * the i/o has completed. |
| 345 | */ |
| 346 | dp = BUFHASH(dev, blkno); |
| 347 | loop: |
| 348 | for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { |
| 349 | if (bp->b_blkno != blkno || bp->b_dev != dev || |
| 350 | bp->b_flags&B_INVAL) |
| 351 | continue; |
| 352 | s = splbio(); |
| 353 | if (bp->b_flags&B_BUSY) { |
| 354 | bp->b_flags |= B_WANTED; |
| 355 | sleep((caddr_t)bp, PRIBIO+1); |
| 356 | splx(s); |
| 357 | goto loop; |
| 358 | } |
| 359 | splx(s); |
| 360 | notavail(bp); |
| 361 | if (bp->b_bcount != size) { |
| 362 | if (bp->b_bcount < size && (bp->b_flags&B_DELWRI)) { |
| 363 | bp->b_flags &= ~B_ASYNC; |
| 364 | bwrite(bp); |
| 365 | goto loop; |
| 366 | } |
| 367 | if (brealloc(bp, size) == 0) |
| 368 | goto loop; |
| 369 | } |
| 370 | if (bp->b_bcount != size && brealloc(bp, size) == 0) |
| 371 | goto loop; |
| 372 | bp->b_flags |= B_CACHE; |
| 373 | return (bp); |
| 374 | } |
| 375 | if (major(dev) >= nblkdev) |
| 376 | panic("blkdev"); |
| 377 | bp = getnewbuf(); |
| 378 | bfree(bp); |
| 379 | bremhash(bp); |
| 380 | binshash(bp, dp); |
| 381 | bp->b_dev = dev; |
| 382 | #ifdef SECSIZE |
| 383 | bp->b_blksize = secsize; |
| 384 | #endif SECSIZE |
| 385 | bp->b_blkno = blkno; |
| 386 | bp->b_error = 0; |
| 387 | if (brealloc(bp, size) == 0) |
| 388 | goto loop; |
| 389 | return (bp); |
| 390 | } |
| 391 | |
| 392 | /* |
| 393 | * get an empty block, |
| 394 | * not assigned to any particular device |
| 395 | */ |
| 396 | struct buf * |
| 397 | geteblk(size) |
| 398 | int size; |
| 399 | { |
| 400 | register struct buf *bp, *flist; |
| 401 | |
| 402 | if (size > MAXBSIZE) |
| 403 | panic("geteblk: size too big"); |
| 404 | loop: |
| 405 | bp = getnewbuf(); |
| 406 | bp->b_flags |= B_INVAL; |
| 407 | bfree(bp); |
| 408 | bremhash(bp); |
| 409 | flist = &bfreelist[BQ_AGE]; |
| 410 | binshash(bp, flist); |
| 411 | bp->b_dev = (dev_t)NODEV; |
| 412 | #ifdef SECSIZE |
| 413 | bp->b_blksize = DEV_BSIZE; |
| 414 | #endif SECSIZE |
| 415 | bp->b_error = 0; |
| 416 | if (brealloc(bp, size) == 0) |
| 417 | goto loop; |
| 418 | return (bp); |
| 419 | } |
| 420 | |
| 421 | /* |
| 422 | * Allocate space associated with a buffer. |
| 423 | * If can't get space, buffer is released |
| 424 | */ |
| 425 | brealloc(bp, size) |
| 426 | register struct buf *bp; |
| 427 | int size; |
| 428 | { |
| 429 | daddr_t start, last; |
| 430 | register struct buf *ep; |
| 431 | struct buf *dp; |
| 432 | int s; |
| 433 | |
| 434 | /* |
| 435 | * First need to make sure that all overlapping previous I/O |
| 436 | * is dispatched with. |
| 437 | */ |
| 438 | if (size == bp->b_bcount) |
| 439 | return (1); |
| 440 | if (size < bp->b_bcount) { |
| 441 | if (bp->b_flags & B_DELWRI) { |
| 442 | bwrite(bp); |
| 443 | return (0); |
| 444 | } |
| 445 | if (bp->b_flags & B_LOCKED) |
| 446 | panic("brealloc"); |
| 447 | return (allocbuf(bp, size)); |
| 448 | } |
| 449 | bp->b_flags &= ~B_DONE; |
| 450 | if (bp->b_dev == NODEV) |
| 451 | return (allocbuf(bp, size)); |
| 452 | |
| 453 | trace(TR_BREALLOC, pack(bp->b_dev, size), bp->b_blkno); |
| 454 | /* |
| 455 | * Search cache for any buffers that overlap the one that we |
| 456 | * are trying to allocate. Overlapping buffers must be marked |
| 457 | * invalid, after being written out if they are dirty. (indicated |
| 458 | * by B_DELWRI) A disk block must be mapped by at most one buffer |
| 459 | * at any point in time. Care must be taken to avoid deadlocking |
| 460 | * when two buffer are trying to get the same set of disk blocks. |
| 461 | */ |
| 462 | start = bp->b_blkno; |
| 463 | #ifdef SECSIZE |
| 464 | last = start + size/bp->b_blksize - 1; |
| 465 | #else SECSIZE |
| 466 | last = start + btodb(size) - 1; |
| 467 | #endif SECSIZE |
| 468 | dp = BUFHASH(bp->b_dev, bp->b_blkno); |
| 469 | loop: |
| 470 | for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { |
| 471 | if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL)) |
| 472 | continue; |
| 473 | /* look for overlap */ |
| 474 | if (ep->b_bcount == 0 || ep->b_blkno > last || |
| 475 | #ifdef SECSIZE |
| 476 | ep->b_blkno + ep->b_bcount/ep->b_blksize <= start) |
| 477 | #else SECSIZE |
| 478 | ep->b_blkno + btodb(ep->b_bcount) <= start) |
| 479 | #endif SECSIZE |
| 480 | continue; |
| 481 | s = splbio(); |
| 482 | if (ep->b_flags&B_BUSY) { |
| 483 | ep->b_flags |= B_WANTED; |
| 484 | sleep((caddr_t)ep, PRIBIO+1); |
| 485 | splx(s); |
| 486 | goto loop; |
| 487 | } |
| 488 | splx(s); |
| 489 | notavail(ep); |
| 490 | if (ep->b_flags & B_DELWRI) { |
| 491 | bwrite(ep); |
| 492 | goto loop; |
| 493 | } |
| 494 | ep->b_flags |= B_INVAL; |
| 495 | brelse(ep); |
| 496 | } |
| 497 | return (allocbuf(bp, size)); |
| 498 | } |
| 499 | |
| 500 | /* |
| 501 | * Find a buffer which is available for use. |
| 502 | * Select something from a free list. |
| 503 | * Preference is to AGE list, then LRU list. |
| 504 | */ |
| 505 | struct buf * |
| 506 | getnewbuf() |
| 507 | { |
| 508 | register struct buf *bp, *dp; |
| 509 | int s; |
| 510 | |
| 511 | loop: |
| 512 | s = splbio(); |
| 513 | for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) |
| 514 | if (dp->av_forw != dp) |
| 515 | break; |
| 516 | if (dp == bfreelist) { /* no free blocks */ |
| 517 | dp->b_flags |= B_WANTED; |
| 518 | sleep((caddr_t)dp, PRIBIO+1); |
| 519 | splx(s); |
| 520 | goto loop; |
| 521 | } |
| 522 | splx(s); |
| 523 | bp = dp->av_forw; |
| 524 | notavail(bp); |
| 525 | if (bp->b_flags & B_DELWRI) { |
| 526 | bp->b_flags |= B_ASYNC; |
| 527 | bwrite(bp); |
| 528 | goto loop; |
| 529 | } |
| 530 | trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno); |
| 531 | bp->b_flags = B_BUSY; |
| 532 | return (bp); |
| 533 | } |
| 534 | |
| 535 | /* |
| 536 | * Wait for I/O completion on the buffer; return errors |
| 537 | * to the user. |
| 538 | */ |
| 539 | biowait(bp) |
| 540 | register struct buf *bp; |
| 541 | { |
| 542 | int s; |
| 543 | |
| 544 | s = splbio(); |
| 545 | while ((bp->b_flags&B_DONE)==0) |
| 546 | sleep((caddr_t)bp, PRIBIO); |
| 547 | splx(s); |
| 548 | if (u.u_error == 0) /* XXX */ |
| 549 | u.u_error = geterror(bp); |
| 550 | } |
| 551 | |
| 552 | /* |
| 553 | * Mark I/O complete on a buffer. |
| 554 | * If someone should be called, e.g. the pageout |
| 555 | * daemon, do so. Otherwise, wake up anyone |
| 556 | * waiting for it. |
| 557 | */ |
| 558 | biodone(bp) |
| 559 | register struct buf *bp; |
| 560 | { |
| 561 | |
| 562 | if (bp->b_flags & B_DONE) |
| 563 | panic("dup biodone"); |
| 564 | bp->b_flags |= B_DONE; |
| 565 | if (bp->b_flags & B_CALL) { |
| 566 | bp->b_flags &= ~B_CALL; |
| 567 | (*bp->b_iodone)(bp); |
| 568 | return; |
| 569 | } |
| 570 | if (bp->b_flags&B_ASYNC) |
| 571 | brelse(bp); |
| 572 | else { |
| 573 | bp->b_flags &= ~B_WANTED; |
| 574 | wakeup((caddr_t)bp); |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | /* |
| 579 | * Insure that no part of a specified block is in an incore buffer. |
| 580 | #ifdef SECSIZE |
| 581 | * "size" is given in device blocks (the units of b_blkno). |
| 582 | #endif SECSIZE |
| 583 | #ifdef SECSIZE |
| 584 | * "size" is given in device blocks (the units of b_blkno). |
| 585 | #endif SECSIZE |
| 586 | */ |
| 587 | blkflush(dev, blkno, size) |
| 588 | dev_t dev; |
| 589 | daddr_t blkno; |
| 590 | #ifdef SECSIZE |
| 591 | int size; |
| 592 | #else SECSIZE |
| 593 | long size; |
| 594 | #endif SECSIZE |
| 595 | { |
| 596 | register struct buf *ep; |
| 597 | struct buf *dp; |
| 598 | daddr_t start, last; |
| 599 | int s; |
| 600 | |
| 601 | start = blkno; |
| 602 | #ifdef SECSIZE |
| 603 | last = start + size - 1; |
| 604 | #else SECSIZE |
| 605 | last = start + btodb(size) - 1; |
| 606 | #endif SECSIZE |
| 607 | dp = BUFHASH(dev, blkno); |
| 608 | loop: |
| 609 | for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { |
| 610 | if (ep->b_dev != dev || (ep->b_flags&B_INVAL)) |
| 611 | continue; |
| 612 | /* look for overlap */ |
| 613 | if (ep->b_bcount == 0 || ep->b_blkno > last || |
| 614 | #ifdef SECSIZE |
| 615 | ep->b_blkno + ep->b_bcount / ep->b_blksize <= start) |
| 616 | #else SECSIZE |
| 617 | ep->b_blkno + btodb(ep->b_bcount) <= start) |
| 618 | #endif SECSIZE |
| 619 | continue; |
| 620 | s = splbio(); |
| 621 | if (ep->b_flags&B_BUSY) { |
| 622 | ep->b_flags |= B_WANTED; |
| 623 | sleep((caddr_t)ep, PRIBIO+1); |
| 624 | splx(s); |
| 625 | goto loop; |
| 626 | } |
| 627 | if (ep->b_flags & B_DELWRI) { |
| 628 | splx(s); |
| 629 | notavail(ep); |
| 630 | bwrite(ep); |
| 631 | goto loop; |
| 632 | } |
| 633 | splx(s); |
| 634 | } |
| 635 | } |
| 636 | |
| 637 | /* |
| 638 | * Make sure all write-behind blocks |
| 639 | * on dev (or NODEV for all) |
| 640 | * are flushed out. |
| 641 | * (from umount and update) |
| 642 | */ |
| 643 | bflush(dev) |
| 644 | dev_t dev; |
| 645 | { |
| 646 | register struct buf *bp; |
| 647 | register struct buf *flist; |
| 648 | int s; |
| 649 | |
| 650 | loop: |
| 651 | s = splbio(); |
| 652 | for (flist = bfreelist; flist < &bfreelist[BQ_EMPTY]; flist++) |
| 653 | for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) { |
| 654 | if ((bp->b_flags & B_DELWRI) == 0) |
| 655 | continue; |
| 656 | if (dev == NODEV || dev == bp->b_dev) { |
| 657 | bp->b_flags |= B_ASYNC; |
| 658 | notavail(bp); |
| 659 | bwrite(bp); |
| 660 | splx(s); |
| 661 | goto loop; |
| 662 | } |
| 663 | } |
| 664 | splx(s); |
| 665 | } |
| 666 | |
| 667 | /* |
| 668 | * Pick up the device's error number and pass it to the user; |
| 669 | * if there is an error but the number is 0 set a generalized code. |
| 670 | */ |
| 671 | geterror(bp) |
| 672 | register struct buf *bp; |
| 673 | { |
| 674 | int error = 0; |
| 675 | |
| 676 | if (bp->b_flags&B_ERROR) |
| 677 | if ((error = bp->b_error)==0) |
| 678 | return (EIO); |
| 679 | return (error); |
| 680 | } |
| 681 | |
| 682 | /* |
| 683 | * Invalidate in core blocks belonging to closed or umounted filesystem |
| 684 | * |
| 685 | * This is not nicely done at all - the buffer ought to be removed from the |
| 686 | * hash chains & have its dev/blkno fields clobbered, but unfortunately we |
| 687 | * can't do that here, as it is quite possible that the block is still |
| 688 | * being used for i/o. Eventually, all disc drivers should be forced to |
| 689 | * have a close routine, which ought ensure that the queue is empty, then |
| 690 | * properly flush the queues. Until that happy day, this suffices for |
| 691 | * correctness. ... kre |
| 692 | */ |
| 693 | binval(dev) |
| 694 | dev_t dev; |
| 695 | { |
| 696 | register struct buf *bp; |
| 697 | register struct bufhd *hp; |
| 698 | #define dp ((struct buf *)hp) |
| 699 | |
| 700 | for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) |
| 701 | for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) |
| 702 | if (bp->b_dev == dev) |
| 703 | bp->b_flags |= B_INVAL; |
| 704 | } |