Commit | Line | Data |
---|---|---|
e3fe2d69 KM |
1 | /* Copyright (c) 1981 Regents of the University of California */ |
2 | ||
d995d89d | 3 | static char vers[] = "@(#)ffs_alloc.c 1.18 %G%"; |
e3fe2d69 KM |
4 | |
5 | /* alloc.c 4.8 81/03/08 */ | |
6 | ||
7 | #include "../h/param.h" | |
8 | #include "../h/systm.h" | |
9 | #include "../h/mount.h" | |
10 | #include "../h/fs.h" | |
11 | #include "../h/conf.h" | |
12 | #include "../h/buf.h" | |
13 | #include "../h/inode.h" | |
e3fe2d69 KM |
14 | #include "../h/user.h" |
15 | ||
daaf7bee KM |
16 | extern u_long hashalloc(); |
17 | extern ino_t ialloccg(); | |
743f1ef7 KM |
18 | extern daddr_t alloccg(); |
19 | extern daddr_t alloccgblk(); | |
20 | extern daddr_t fragextend(); | |
21 | extern daddr_t blkpref(); | |
22 | extern daddr_t mapsearch(); | |
1d7a08c5 | 23 | extern int inside[], around[]; |
b6407c9d | 24 | extern unsigned char *fragtbl[]; |
e3fe2d69 | 25 | |
502770a3 KM |
26 | /* |
27 | * Allocate a block in the file system. | |
28 | * | |
29 | * The size of the requested block is given, which must be some | |
30 | * multiple of fs_fsize and <= fs_bsize. | |
31 | * A preference may be optionally specified. If a preference is given | |
32 | * the following hierarchy is used to allocate a block: | |
33 | * 1) allocate the requested block. | |
34 | * 2) allocate a rotationally optimal block in the same cylinder. | |
35 | * 3) allocate a block in the same cylinder group. | |
36 | * 4) quadradically rehash into other cylinder groups, until an | |
37 | * available block is located. | |
38 | * If no block preference is given the following heirarchy is used | |
39 | * to allocate a block: | |
40 | * 1) allocate a block in the cylinder group that contains the | |
41 | * inode for the file. | |
42 | * 2) quadradically rehash into other cylinder groups, until an | |
43 | * available block is located. | |
44 | */ | |
e3fe2d69 KM |
45 | struct buf * |
46 | alloc(dev, ip, bpref, size) | |
47 | dev_t dev; | |
f3c028b7 | 48 | register struct inode *ip; |
e3fe2d69 KM |
49 | daddr_t bpref; |
50 | int size; | |
51 | { | |
52 | daddr_t bno; | |
53 | register struct fs *fs; | |
f3c028b7 | 54 | register struct buf *bp; |
e3fe2d69 KM |
55 | int cg; |
56 | ||
57 | fs = getfs(dev); | |
d995d89d | 58 | if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) |
b6407c9d KM |
59 | panic("alloc: bad size"); |
60 | if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) | |
0947395d KM |
61 | goto nospace; |
62 | if (u.u_uid != 0 && | |
b6407c9d KM |
63 | fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < |
64 | fs->fs_dsize * fs->fs_minfree / 100) | |
e3fe2d69 | 65 | goto nospace; |
260e5e3c KM |
66 | if (bpref >= fs->fs_size) |
67 | bpref = 0; | |
e3fe2d69 | 68 | if (bpref == 0) |
6994bf5d | 69 | cg = itog(fs, ip->i_number); |
e3fe2d69 | 70 | else |
6994bf5d | 71 | cg = dtog(fs, bpref); |
daaf7bee | 72 | bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, size, alloccg); |
e3fe2d69 KM |
73 | if (bno == 0) |
74 | goto nospace; | |
b6407c9d | 75 | bp = getblk(dev, fsbtodb(fs, bno), size); |
e3fe2d69 KM |
76 | clrbuf(bp); |
77 | return (bp); | |
78 | nospace: | |
79 | fserr(fs, "file system full"); | |
80 | uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); | |
81 | u.u_error = ENOSPC; | |
82 | return (NULL); | |
83 | } | |
84 | ||
502770a3 KM |
85 | /* |
86 | * Reallocate a fragment to a bigger size | |
87 | * | |
88 | * The number and size of the old block is given, and a preference | |
89 | * and new size is also specified. The allocator attempts to extend | |
90 | * the original block. Failing that, the regular block allocator is | |
91 | * invoked to get an appropriate block. | |
92 | */ | |
07670f7d | 93 | struct buf * |
daaf7bee | 94 | realloccg(dev, bprev, bpref, osize, nsize) |
07670f7d | 95 | dev_t dev; |
743f1ef7 | 96 | daddr_t bprev, bpref; |
07670f7d KM |
97 | int osize, nsize; |
98 | { | |
99 | daddr_t bno; | |
100 | register struct fs *fs; | |
f3c028b7 KM |
101 | register struct buf *bp, *obp; |
102 | caddr_t cp; | |
07670f7d KM |
103 | int cg; |
104 | ||
105 | fs = getfs(dev); | |
d995d89d KM |
106 | if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || |
107 | (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) | |
b6407c9d | 108 | panic("realloccg: bad size"); |
0947395d | 109 | if (u.u_uid != 0 && |
b6407c9d KM |
110 | fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < |
111 | fs->fs_dsize * fs->fs_minfree / 100) | |
0947395d | 112 | goto nospace; |
502770a3 | 113 | if (bprev != 0) |
6994bf5d | 114 | cg = dtog(fs, bprev); |
502770a3 KM |
115 | else |
116 | panic("realloccg: bad bprev"); | |
f3c028b7 KM |
117 | bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize); |
118 | if (bno != 0) { | |
b6407c9d | 119 | bp = bread(dev, fsbtodb(fs, bno), osize); |
d995d89d KM |
120 | if (bp->b_flags & B_ERROR) { |
121 | brelse(bp); | |
122 | return 0; | |
123 | } | |
f3c028b7 KM |
124 | bp->b_bcount = nsize; |
125 | blkclr(bp->b_un.b_addr + osize, nsize - osize); | |
126 | return (bp); | |
127 | } | |
260e5e3c KM |
128 | if (bpref >= fs->fs_size) |
129 | bpref = 0; | |
daaf7bee | 130 | bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg); |
f3c028b7 KM |
131 | if (bno != 0) { |
132 | /* | |
133 | * make a new copy | |
134 | */ | |
b6407c9d | 135 | obp = bread(dev, fsbtodb(fs, bprev), osize); |
d995d89d KM |
136 | if (obp->b_flags & B_ERROR) { |
137 | brelse(obp); | |
138 | return 0; | |
139 | } | |
b6407c9d | 140 | bp = getblk(dev, fsbtodb(fs, bno), nsize); |
f3c028b7 KM |
141 | cp = bp->b_un.b_addr; |
142 | bp->b_un.b_addr = obp->b_un.b_addr; | |
143 | obp->b_un.b_addr = cp; | |
144 | obp->b_flags |= B_INVAL; | |
145 | brelse(obp); | |
daaf7bee | 146 | fre(dev, bprev, (off_t)osize); |
f3c028b7 KM |
147 | blkclr(bp->b_un.b_addr + osize, nsize - osize); |
148 | return(bp); | |
149 | } | |
0947395d | 150 | nospace: |
f3c028b7 KM |
151 | /* |
152 | * no space available | |
153 | */ | |
07670f7d KM |
154 | fserr(fs, "file system full"); |
155 | uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); | |
156 | u.u_error = ENOSPC; | |
157 | return (NULL); | |
158 | } | |
159 | ||
502770a3 KM |
160 | /* |
161 | * Allocate an inode in the file system. | |
162 | * | |
163 | * A preference may be optionally specified. If a preference is given | |
164 | * the following hierarchy is used to allocate an inode: | |
165 | * 1) allocate the requested inode. | |
166 | * 2) allocate an inode in the same cylinder group. | |
167 | * 3) quadradically rehash into other cylinder groups, until an | |
168 | * available inode is located. | |
169 | * If no inode preference is given the following heirarchy is used | |
170 | * to allocate an inode: | |
171 | * 1) allocate an inode in cylinder group 0. | |
172 | * 2) quadradically rehash into other cylinder groups, until an | |
173 | * available inode is located. | |
174 | */ | |
e3fe2d69 KM |
175 | struct inode * |
176 | ialloc(dev, ipref, mode) | |
177 | dev_t dev; | |
178 | ino_t ipref; | |
179 | int mode; | |
180 | { | |
daaf7bee | 181 | ino_t ino; |
e3fe2d69 KM |
182 | register struct fs *fs; |
183 | register struct inode *ip; | |
184 | int cg; | |
185 | ||
186 | fs = getfs(dev); | |
0947395d | 187 | if (fs->fs_cstotal.cs_nifree == 0) |
e3fe2d69 | 188 | goto noinodes; |
260e5e3c KM |
189 | if (ipref >= fs->fs_ncg * fs->fs_ipg) |
190 | ipref = 0; | |
6994bf5d | 191 | cg = itog(fs, ipref); |
daaf7bee | 192 | ino = (ino_t)hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg); |
e3fe2d69 KM |
193 | if (ino == 0) |
194 | goto noinodes; | |
195 | ip = iget(dev, ino); | |
196 | if (ip == NULL) { | |
daaf7bee | 197 | ifree(dev, ino, 0); |
e3fe2d69 KM |
198 | return (NULL); |
199 | } | |
200 | if (ip->i_mode) | |
201 | panic("ialloc: dup alloc"); | |
202 | return (ip); | |
203 | noinodes: | |
204 | fserr(fs, "out of inodes"); | |
205 | uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt); | |
206 | u.u_error = ENOSPC; | |
207 | return (NULL); | |
208 | } | |
209 | ||
743f1ef7 | 210 | /* |
502770a3 KM |
211 | * Find a cylinder to place a directory. |
212 | * | |
213 | * The policy implemented by this algorithm is to select from | |
214 | * among those cylinder groups with above the average number of | |
215 | * free inodes, the one with the smallest number of directories. | |
743f1ef7 KM |
216 | */ |
217 | dirpref(dev) | |
e3fe2d69 KM |
218 | dev_t dev; |
219 | { | |
220 | register struct fs *fs; | |
743f1ef7 | 221 | int cg, minndir, mincg, avgifree; |
e3fe2d69 KM |
222 | |
223 | fs = getfs(dev); | |
0947395d | 224 | avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; |
743f1ef7 | 225 | minndir = fs->fs_ipg; |
e3fe2d69 | 226 | mincg = 0; |
743f1ef7 | 227 | for (cg = 0; cg < fs->fs_ncg; cg++) |
b6407c9d KM |
228 | if (fs->fs_cs(fs, cg).cs_ndir < minndir && |
229 | fs->fs_cs(fs, cg).cs_nifree >= avgifree) { | |
e3fe2d69 | 230 | mincg = cg; |
b6407c9d | 231 | minndir = fs->fs_cs(fs, cg).cs_ndir; |
e3fe2d69 KM |
232 | } |
233 | return (fs->fs_ipg * mincg); | |
234 | } | |
235 | ||
743f1ef7 | 236 | /* |
502770a3 KM |
237 | * Select a cylinder to place a large block of data. |
238 | * | |
239 | * The policy implemented by this algorithm is to maintain a | |
240 | * rotor that sweeps the cylinder groups. When a block is | |
241 | * needed, the rotor is advanced until a cylinder group with | |
242 | * greater than the average number of free blocks is found. | |
743f1ef7 | 243 | */ |
daaf7bee | 244 | daddr_t |
743f1ef7 KM |
245 | blkpref(dev) |
246 | dev_t dev; | |
247 | { | |
248 | register struct fs *fs; | |
249 | int cg, avgbfree; | |
250 | ||
251 | fs = getfs(dev); | |
0947395d | 252 | avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; |
743f1ef7 | 253 | for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) |
b6407c9d | 254 | if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { |
743f1ef7 | 255 | fs->fs_cgrotor = cg; |
b6407c9d | 256 | return (fs->fs_fpg * cg + fs->fs_frag); |
743f1ef7 KM |
257 | } |
258 | for (cg = 0; cg <= fs->fs_cgrotor; cg++) | |
b6407c9d | 259 | if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { |
743f1ef7 | 260 | fs->fs_cgrotor = cg; |
b6407c9d | 261 | return (fs->fs_fpg * cg + fs->fs_frag); |
743f1ef7 KM |
262 | } |
263 | return (0); | |
264 | } | |
265 | ||
502770a3 KM |
266 | /* |
267 | * Implement the cylinder overflow algorithm. | |
268 | * | |
269 | * The policy implemented by this algorithm is: | |
270 | * 1) allocate the block in its requested cylinder group. | |
271 | * 2) quadradically rehash on the cylinder group number. | |
272 | * 3) brute force search for a free block. | |
273 | */ | |
daaf7bee KM |
274 | /*VARARGS5*/ |
275 | u_long | |
e3fe2d69 KM |
276 | hashalloc(dev, fs, cg, pref, size, allocator) |
277 | dev_t dev; | |
278 | register struct fs *fs; | |
279 | int cg; | |
280 | long pref; | |
281 | int size; /* size for data blocks, mode for inodes */ | |
daaf7bee | 282 | u_long (*allocator)(); |
e3fe2d69 KM |
283 | { |
284 | long result; | |
285 | int i, icg = cg; | |
286 | ||
287 | /* | |
288 | * 1: preferred cylinder group | |
289 | */ | |
290 | result = (*allocator)(dev, fs, cg, pref, size); | |
291 | if (result) | |
292 | return (result); | |
293 | /* | |
294 | * 2: quadratic rehash | |
295 | */ | |
296 | for (i = 1; i < fs->fs_ncg; i *= 2) { | |
297 | cg += i; | |
298 | if (cg >= fs->fs_ncg) | |
299 | cg -= fs->fs_ncg; | |
300 | result = (*allocator)(dev, fs, cg, 0, size); | |
301 | if (result) | |
302 | return (result); | |
303 | } | |
304 | /* | |
305 | * 3: brute force search | |
306 | */ | |
307 | cg = icg; | |
308 | for (i = 0; i < fs->fs_ncg; i++) { | |
309 | result = (*allocator)(dev, fs, cg, 0, size); | |
310 | if (result) | |
311 | return (result); | |
312 | cg++; | |
313 | if (cg == fs->fs_ncg) | |
314 | cg = 0; | |
315 | } | |
316 | return (0); | |
317 | } | |
318 | ||
502770a3 KM |
319 | /* |
320 | * Determine whether a fragment can be extended. | |
321 | * | |
322 | * Check to see if the necessary fragments are available, and | |
323 | * if they are, allocate them. | |
324 | */ | |
07670f7d | 325 | daddr_t |
f3c028b7 | 326 | fragextend(dev, fs, cg, bprev, osize, nsize) |
07670f7d KM |
327 | dev_t dev; |
328 | register struct fs *fs; | |
329 | int cg; | |
f3c028b7 | 330 | long bprev; |
07670f7d KM |
331 | int osize, nsize; |
332 | { | |
f3c028b7 KM |
333 | register struct buf *bp; |
334 | register struct cg *cgp; | |
335 | long bno; | |
336 | int frags, bbase; | |
07670f7d KM |
337 | int i; |
338 | ||
d995d89d KM |
339 | frags = numfrags(fs, nsize); |
340 | bbase = fragoff(fs, bprev); | |
b6407c9d | 341 | if (bbase > (bprev + frags - 1) % fs->fs_frag) { |
f3c028b7 KM |
342 | /* cannot extend across a block boundry */ |
343 | return (0); | |
344 | } | |
6994bf5d | 345 | bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
346 | if (bp->b_flags & B_ERROR) { |
347 | brelse(bp); | |
348 | return 0; | |
349 | } | |
07670f7d | 350 | cgp = bp->b_un.b_cg; |
6994bf5d | 351 | bno = dtogd(fs, bprev); |
d995d89d | 352 | for (i = numfrags(fs, osize); i < frags; i++) |
aca50d72 KM |
353 | if (isclr(cgp->cg_free, bno + i)) { |
354 | brelse(bp); | |
355 | return (0); | |
356 | } | |
357 | /* | |
358 | * the current fragment can be extended | |
359 | * deduct the count on fragment being extended into | |
360 | * increase the count on the remaining fragment (if any) | |
361 | * allocate the extended piece | |
362 | */ | |
363 | for (i = frags; i < fs->fs_frag - bbase; i++) | |
f3c028b7 KM |
364 | if (isclr(cgp->cg_free, bno + i)) |
365 | break; | |
d995d89d | 366 | cgp->cg_frsum[i - numfrags(fs, osize)]--; |
aca50d72 KM |
367 | if (i != frags) |
368 | cgp->cg_frsum[i - frags]++; | |
d995d89d | 369 | for (i = numfrags(fs, osize); i < frags; i++) { |
aca50d72 KM |
370 | clrbit(cgp->cg_free, bno + i); |
371 | cgp->cg_cs.cs_nffree--; | |
372 | fs->fs_cstotal.cs_nffree--; | |
373 | fs->fs_cs(fs, cg).cs_nffree--; | |
f3c028b7 | 374 | } |
aca50d72 KM |
375 | fs->fs_fmod++; |
376 | bdwrite(bp); | |
377 | return (bprev); | |
07670f7d KM |
378 | } |
379 | ||
502770a3 KM |
380 | /* |
381 | * Determine whether a block can be allocated. | |
382 | * | |
383 | * Check to see if a block of the apprpriate size is available, | |
384 | * and if it is, allocate it. | |
385 | */ | |
e3fe2d69 KM |
386 | daddr_t |
387 | alloccg(dev, fs, cg, bpref, size) | |
388 | dev_t dev; | |
f3c028b7 | 389 | register struct fs *fs; |
e3fe2d69 KM |
390 | int cg; |
391 | daddr_t bpref; | |
392 | int size; | |
393 | { | |
f3c028b7 KM |
394 | register struct buf *bp; |
395 | register struct cg *cgp; | |
396 | int bno, frags; | |
397 | int allocsiz; | |
f3c028b7 | 398 | register int i; |
e3fe2d69 | 399 | |
b6407c9d | 400 | if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) |
0947395d | 401 | return (0); |
6994bf5d | 402 | bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
403 | if (bp->b_flags & B_ERROR) { |
404 | brelse(bp); | |
405 | return 0; | |
406 | } | |
e3fe2d69 | 407 | cgp = bp->b_un.b_cg; |
b6407c9d | 408 | if (size == fs->fs_bsize) { |
daaf7bee | 409 | bno = alloccgblk(fs, cgp, bpref); |
f3c028b7 KM |
410 | bdwrite(bp); |
411 | return (bno); | |
412 | } | |
413 | /* | |
414 | * check to see if any fragments are already available | |
415 | * allocsiz is the size which will be allocated, hacking | |
416 | * it down to a smaller size if necessary | |
417 | */ | |
d995d89d | 418 | frags = numfrags(fs, size); |
b6407c9d | 419 | for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) |
f3c028b7 KM |
420 | if (cgp->cg_frsum[allocsiz] != 0) |
421 | break; | |
b6407c9d | 422 | if (allocsiz == fs->fs_frag) { |
f3c028b7 KM |
423 | /* |
424 | * no fragments were available, so a block will be | |
425 | * allocated, and hacked up | |
426 | */ | |
0947395d | 427 | if (cgp->cg_cs.cs_nbfree == 0) { |
f3c028b7 KM |
428 | brelse(bp); |
429 | return (0); | |
430 | } | |
daaf7bee | 431 | bno = alloccgblk(fs, cgp, bpref); |
6994bf5d | 432 | bpref = dtogd(fs, bno); |
b6407c9d | 433 | for (i = frags; i < fs->fs_frag; i++) |
f3c028b7 | 434 | setbit(cgp->cg_free, bpref + i); |
b6407c9d | 435 | i = fs->fs_frag - frags; |
0947395d KM |
436 | cgp->cg_cs.cs_nffree += i; |
437 | fs->fs_cstotal.cs_nffree += i; | |
b6407c9d | 438 | fs->fs_cs(fs, cg).cs_nffree += i; |
f3c028b7 KM |
439 | cgp->cg_frsum[i]++; |
440 | bdwrite(bp); | |
441 | return (bno); | |
442 | } | |
743f1ef7 KM |
443 | bno = mapsearch(fs, cgp, bpref, allocsiz); |
444 | if (bno == 0) | |
445 | return (0); | |
f3c028b7 KM |
446 | for (i = 0; i < frags; i++) |
447 | clrbit(cgp->cg_free, bno + i); | |
0947395d KM |
448 | cgp->cg_cs.cs_nffree -= frags; |
449 | fs->fs_cstotal.cs_nffree -= frags; | |
b6407c9d | 450 | fs->fs_cs(fs, cg).cs_nffree -= frags; |
f3c028b7 KM |
451 | cgp->cg_frsum[allocsiz]--; |
452 | if (frags != allocsiz) | |
453 | cgp->cg_frsum[allocsiz - frags]++; | |
454 | bdwrite(bp); | |
455 | return (cg * fs->fs_fpg + bno); | |
456 | } | |
457 | ||
502770a3 KM |
458 | /* |
459 | * Allocate a block in a cylinder group. | |
460 | * | |
461 | * This algorithm implements the following policy: | |
462 | * 1) allocate the requested block. | |
463 | * 2) allocate a rotationally optimal block in the same cylinder. | |
464 | * 3) allocate the next available block on the block rotor for the | |
465 | * specified cylinder group. | |
466 | * Note that this routine only allocates fs_bsize blocks; these | |
467 | * blocks may be fragmented by the routine that allocates them. | |
468 | */ | |
f3c028b7 | 469 | daddr_t |
daaf7bee | 470 | alloccgblk(fs, cgp, bpref) |
f3c028b7 KM |
471 | struct fs *fs; |
472 | register struct cg *cgp; | |
473 | daddr_t bpref; | |
474 | { | |
743f1ef7 KM |
475 | daddr_t bno; |
476 | int cylno, pos; | |
477 | short *cylbp; | |
aca50d72 | 478 | register int i; |
f3c028b7 | 479 | |
743f1ef7 KM |
480 | if (bpref == 0) { |
481 | bpref = cgp->cg_rotor; | |
aca50d72 KM |
482 | goto norot; |
483 | } | |
484 | bpref &= ~(fs->fs_frag - 1); | |
6994bf5d | 485 | bpref = dtogd(fs, bpref); |
aca50d72 KM |
486 | /* |
487 | * if the requested block is available, use it | |
488 | */ | |
489 | if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) { | |
490 | bno = bpref; | |
491 | goto gotit; | |
492 | } | |
aca50d72 KM |
493 | /* |
494 | * check for a block available on the same cylinder | |
aca50d72 KM |
495 | */ |
496 | cylno = cbtocylno(fs, bpref); | |
502770a3 KM |
497 | if (cgp->cg_btot[cylno] == 0) |
498 | goto norot; | |
499 | if (fs->fs_cpc == 0) { | |
500 | /* | |
501 | * block layout info is not available, so just have | |
502 | * to take any block in this cylinder. | |
503 | */ | |
504 | bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); | |
505 | goto norot; | |
506 | } | |
507 | /* | |
508 | * find a block that is rotationally optimal | |
509 | */ | |
aca50d72 KM |
510 | cylbp = cgp->cg_b[cylno]; |
511 | if (fs->fs_rotdelay == 0) { | |
512 | pos = cbtorpos(fs, bpref); | |
743f1ef7 | 513 | } else { |
743f1ef7 | 514 | /* |
aca50d72 KM |
515 | * here we convert ms of delay to frags as: |
516 | * (frags) = (ms) * (rev/sec) * (sect/rev) / | |
517 | * ((sect/frag) * (ms/sec)) | |
518 | * then round up to the next rotational position | |
743f1ef7 | 519 | */ |
aca50d72 KM |
520 | bpref += fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / |
521 | (NSPF(fs) * 1000); | |
522 | pos = cbtorpos(fs, bpref); | |
523 | pos = (pos + 1) % NRPOS; | |
524 | } | |
525 | /* | |
526 | * check the summary information to see if a block is | |
527 | * available in the requested cylinder starting at the | |
528 | * optimal rotational position and proceeding around. | |
529 | */ | |
530 | for (i = pos; i < NRPOS; i++) | |
531 | if (cylbp[i] > 0) | |
532 | break; | |
533 | if (i == NRPOS) | |
534 | for (i = 0; i < pos; i++) | |
743f1ef7 KM |
535 | if (cylbp[i] > 0) |
536 | break; | |
aca50d72 KM |
537 | if (cylbp[i] > 0) { |
538 | /* | |
539 | * found a rotational position, now find the actual | |
540 | * block. A panic if none is actually there. | |
541 | */ | |
542 | pos = cylno % fs->fs_cpc; | |
543 | bno = (cylno - pos) * fs->fs_spc / NSPB(fs); | |
544 | if (fs->fs_postbl[pos][i] == -1) | |
545 | panic("alloccgblk: cyl groups corrupted"); | |
546 | for (i = fs->fs_postbl[pos][i]; ; i += fs->fs_rotbl[i]) { | |
547 | if (isblock(fs, cgp->cg_free, bno + i)) { | |
548 | bno = (bno + i) * fs->fs_frag; | |
549 | goto gotit; | |
743f1ef7 | 550 | } |
aca50d72 KM |
551 | if (fs->fs_rotbl[i] == 0) |
552 | break; | |
743f1ef7 | 553 | } |
aca50d72 | 554 | panic("alloccgblk: can't find blk in cyl"); |
e3fe2d69 | 555 | } |
aca50d72 KM |
556 | norot: |
557 | /* | |
558 | * no blocks in the requested cylinder, so take next | |
559 | * available one in this cylinder group. | |
560 | */ | |
b6407c9d | 561 | bno = mapsearch(fs, cgp, bpref, fs->fs_frag); |
743f1ef7 KM |
562 | if (bno == 0) |
563 | return (0); | |
564 | cgp->cg_rotor = bno; | |
e3fe2d69 | 565 | gotit: |
b6407c9d | 566 | clrblock(fs, cgp->cg_free, bno/fs->fs_frag); |
0947395d KM |
567 | cgp->cg_cs.cs_nbfree--; |
568 | fs->fs_cstotal.cs_nbfree--; | |
b6407c9d | 569 | fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; |
502770a3 KM |
570 | cylno = cbtocylno(fs, bno); |
571 | cgp->cg_b[cylno][cbtorpos(fs, bno)]--; | |
572 | cgp->cg_btot[cylno]--; | |
e3fe2d69 | 573 | fs->fs_fmod++; |
743f1ef7 | 574 | return (cgp->cg_cgx * fs->fs_fpg + bno); |
e3fe2d69 KM |
575 | } |
576 | ||
502770a3 KM |
577 | /* |
578 | * Determine whether an inode can be allocated. | |
579 | * | |
580 | * Check to see if an inode is available, and if it is, | |
581 | * allocate it using the following policy: | |
582 | * 1) allocate the requested inode. | |
583 | * 2) allocate the next available inode after the requested | |
584 | * inode in the specified cylinder group. | |
585 | */ | |
daaf7bee | 586 | ino_t |
e3fe2d69 KM |
587 | ialloccg(dev, fs, cg, ipref, mode) |
588 | dev_t dev; | |
f3c028b7 | 589 | register struct fs *fs; |
e3fe2d69 KM |
590 | int cg; |
591 | daddr_t ipref; | |
592 | int mode; | |
593 | { | |
f3c028b7 KM |
594 | register struct buf *bp; |
595 | register struct cg *cgp; | |
e3fe2d69 KM |
596 | int i; |
597 | ||
b6407c9d | 598 | if (fs->fs_cs(fs, cg).cs_nifree == 0) |
0947395d | 599 | return (0); |
6994bf5d | 600 | bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
601 | if (bp->b_flags & B_ERROR) { |
602 | brelse(bp); | |
603 | return 0; | |
604 | } | |
e3fe2d69 | 605 | cgp = bp->b_un.b_cg; |
e3fe2d69 KM |
606 | if (ipref) { |
607 | ipref %= fs->fs_ipg; | |
608 | if (isclr(cgp->cg_iused, ipref)) | |
609 | goto gotit; | |
610 | } else | |
611 | ipref = cgp->cg_irotor; | |
612 | for (i = 0; i < fs->fs_ipg; i++) { | |
613 | ipref++; | |
614 | if (ipref >= fs->fs_ipg) | |
615 | ipref = 0; | |
616 | if (isclr(cgp->cg_iused, ipref)) { | |
617 | cgp->cg_irotor = ipref; | |
618 | goto gotit; | |
619 | } | |
620 | } | |
621 | brelse(bp); | |
622 | return (0); | |
623 | gotit: | |
624 | setbit(cgp->cg_iused, ipref); | |
0947395d KM |
625 | cgp->cg_cs.cs_nifree--; |
626 | fs->fs_cstotal.cs_nifree--; | |
b6407c9d | 627 | fs->fs_cs(fs, cg).cs_nifree--; |
e3fe2d69 KM |
628 | fs->fs_fmod++; |
629 | if ((mode & IFMT) == IFDIR) { | |
0947395d KM |
630 | cgp->cg_cs.cs_ndir++; |
631 | fs->fs_cstotal.cs_ndir++; | |
b6407c9d | 632 | fs->fs_cs(fs, cg).cs_ndir++; |
e3fe2d69 KM |
633 | } |
634 | bdwrite(bp); | |
635 | return (cg * fs->fs_ipg + ipref); | |
636 | } | |
637 | ||
502770a3 KM |
638 | /* |
639 | * Free a block or fragment. | |
640 | * | |
641 | * The specified block or fragment is placed back in the | |
642 | * free map. If a fragment is deallocated, a possible | |
643 | * block reassembly is checked. | |
644 | */ | |
e3fe2d69 KM |
645 | fre(dev, bno, size) |
646 | dev_t dev; | |
647 | daddr_t bno; | |
daaf7bee | 648 | off_t size; |
e3fe2d69 KM |
649 | { |
650 | register struct fs *fs; | |
651 | register struct cg *cgp; | |
652 | register struct buf *bp; | |
f3c028b7 KM |
653 | int cg, blk, frags, bbase; |
654 | register int i; | |
e3fe2d69 | 655 | |
e3fe2d69 | 656 | fs = getfs(dev); |
d995d89d | 657 | if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) |
b6407c9d | 658 | panic("free: bad size"); |
6994bf5d | 659 | cg = dtog(fs, bno); |
e3fe2d69 KM |
660 | if (badblock(fs, bno)) |
661 | return; | |
6994bf5d | 662 | bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
663 | if (bp->b_flags & B_ERROR) { |
664 | brelse(bp); | |
e3fe2d69 | 665 | return; |
d995d89d | 666 | } |
e3fe2d69 | 667 | cgp = bp->b_un.b_cg; |
6994bf5d | 668 | bno = dtogd(fs, bno); |
b6407c9d KM |
669 | if (size == fs->fs_bsize) { |
670 | if (isblock(fs, cgp->cg_free, bno/fs->fs_frag)) | |
07670f7d | 671 | panic("free: freeing free block"); |
b6407c9d | 672 | setblock(fs, cgp->cg_free, bno/fs->fs_frag); |
0947395d KM |
673 | cgp->cg_cs.cs_nbfree++; |
674 | fs->fs_cstotal.cs_nbfree++; | |
b6407c9d | 675 | fs->fs_cs(fs, cg).cs_nbfree++; |
502770a3 KM |
676 | i = cbtocylno(fs, bno); |
677 | cgp->cg_b[i][cbtorpos(fs, bno)]++; | |
678 | cgp->cg_btot[i]++; | |
07670f7d | 679 | } else { |
b6407c9d | 680 | bbase = bno - (bno % fs->fs_frag); |
f3c028b7 KM |
681 | /* |
682 | * decrement the counts associated with the old frags | |
683 | */ | |
684 | blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & | |
b6407c9d KM |
685 | (0xff >> (NBBY - fs->fs_frag))); |
686 | fragacct(fs, blk, cgp->cg_frsum, -1); | |
f3c028b7 KM |
687 | /* |
688 | * deallocate the fragment | |
689 | */ | |
d995d89d | 690 | frags = numfrags(fs, size); |
f3c028b7 | 691 | for (i = 0; i < frags; i++) { |
07670f7d KM |
692 | if (isset(cgp->cg_free, bno + i)) |
693 | panic("free: freeing free frag"); | |
694 | setbit(cgp->cg_free, bno + i); | |
0947395d KM |
695 | cgp->cg_cs.cs_nffree++; |
696 | fs->fs_cstotal.cs_nffree++; | |
b6407c9d | 697 | fs->fs_cs(fs, cg).cs_nffree++; |
07670f7d | 698 | } |
f3c028b7 KM |
699 | /* |
700 | * add back in counts associated with the new frags | |
701 | */ | |
702 | blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & | |
b6407c9d KM |
703 | (0xff >> (NBBY - fs->fs_frag))); |
704 | fragacct(fs, blk, cgp->cg_frsum, 1); | |
f3c028b7 KM |
705 | /* |
706 | * if a complete block has been reassembled, account for it | |
707 | */ | |
b6407c9d KM |
708 | if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) { |
709 | cgp->cg_cs.cs_nffree -= fs->fs_frag; | |
710 | fs->fs_cstotal.cs_nffree -= fs->fs_frag; | |
711 | fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; | |
0947395d KM |
712 | cgp->cg_cs.cs_nbfree++; |
713 | fs->fs_cstotal.cs_nbfree++; | |
b6407c9d | 714 | fs->fs_cs(fs, cg).cs_nbfree++; |
502770a3 KM |
715 | i = cbtocylno(fs, bbase); |
716 | cgp->cg_b[i][cbtorpos(fs, bbase)]++; | |
717 | cgp->cg_btot[i]++; | |
07670f7d KM |
718 | } |
719 | } | |
e3fe2d69 | 720 | fs->fs_fmod++; |
e3fe2d69 KM |
721 | bdwrite(bp); |
722 | } | |
723 | ||
502770a3 KM |
724 | /* |
725 | * Free an inode. | |
726 | * | |
727 | * The specified inode is placed back in the free map. | |
728 | */ | |
e3fe2d69 KM |
729 | ifree(dev, ino, mode) |
730 | dev_t dev; | |
731 | ino_t ino; | |
732 | int mode; | |
733 | { | |
734 | register struct fs *fs; | |
735 | register struct cg *cgp; | |
736 | register struct buf *bp; | |
e3fe2d69 KM |
737 | int cg; |
738 | ||
739 | fs = getfs(dev); | |
740 | if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) | |
741 | panic("ifree: range"); | |
6994bf5d KM |
742 | cg = itog(fs, ino); |
743 | bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); | |
d995d89d KM |
744 | if (bp->b_flags & B_ERROR) { |
745 | brelse(bp); | |
e3fe2d69 | 746 | return; |
d995d89d | 747 | } |
e3fe2d69 KM |
748 | cgp = bp->b_un.b_cg; |
749 | ino %= fs->fs_ipg; | |
750 | if (isclr(cgp->cg_iused, ino)) | |
751 | panic("ifree: freeing free inode"); | |
752 | clrbit(cgp->cg_iused, ino); | |
0947395d KM |
753 | cgp->cg_cs.cs_nifree++; |
754 | fs->fs_cstotal.cs_nifree++; | |
b6407c9d | 755 | fs->fs_cs(fs, cg).cs_nifree++; |
e3fe2d69 | 756 | if ((mode & IFMT) == IFDIR) { |
0947395d KM |
757 | cgp->cg_cs.cs_ndir--; |
758 | fs->fs_cstotal.cs_ndir--; | |
b6407c9d | 759 | fs->fs_cs(fs, cg).cs_ndir--; |
e3fe2d69 KM |
760 | } |
761 | fs->fs_fmod++; | |
762 | bdwrite(bp); | |
763 | } | |
764 | ||
743f1ef7 | 765 | /* |
502770a3 KM |
766 | * Find a block of the specified size in the specified cylinder group. |
767 | * | |
743f1ef7 KM |
768 | * It is a panic if a request is made to find a block if none are |
769 | * available. | |
770 | */ | |
771 | daddr_t | |
772 | mapsearch(fs, cgp, bpref, allocsiz) | |
773 | register struct fs *fs; | |
774 | register struct cg *cgp; | |
775 | daddr_t bpref; | |
776 | int allocsiz; | |
777 | { | |
778 | daddr_t bno; | |
779 | int start, len, loc, i; | |
780 | int blk, field, subfield, pos; | |
781 | ||
782 | /* | |
783 | * find the fragment by searching through the free block | |
784 | * map for an appropriate bit pattern | |
785 | */ | |
786 | if (bpref) | |
6994bf5d | 787 | start = dtogd(fs, bpref) / NBBY; |
743f1ef7 KM |
788 | else |
789 | start = cgp->cg_frotor / NBBY; | |
942bd18b | 790 | len = howmany(fs->fs_fpg, NBBY) - start; |
b6407c9d KM |
791 | loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], |
792 | 1 << (allocsiz - 1)); | |
743f1ef7 | 793 | if (loc == 0) { |
942bd18b KM |
794 | loc = fs->fs_dblkno / NBBY; |
795 | len = start - loc + 1; | |
796 | start = loc; | |
b6407c9d | 797 | loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], |
743f1ef7 KM |
798 | 1 << (allocsiz - 1)); |
799 | if (loc == 0) { | |
800 | panic("alloccg: map corrupted"); | |
801 | return (0); | |
802 | } | |
803 | } | |
804 | bno = (start + len - loc) * NBBY; | |
805 | cgp->cg_frotor = bno; | |
806 | /* | |
807 | * found the byte in the map | |
808 | * sift through the bits to find the selected frag | |
809 | */ | |
b6407c9d KM |
810 | for (i = 0; i < NBBY; i += fs->fs_frag) { |
811 | blk = (cgp->cg_free[bno / NBBY] >> i) & | |
812 | (0xff >> NBBY - fs->fs_frag); | |
743f1ef7 KM |
813 | blk <<= 1; |
814 | field = around[allocsiz]; | |
815 | subfield = inside[allocsiz]; | |
b6407c9d | 816 | for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { |
743f1ef7 KM |
817 | if ((blk & field) == subfield) { |
818 | return (bno + i + pos); | |
819 | } | |
820 | field <<= 1; | |
821 | subfield <<= 1; | |
822 | } | |
823 | } | |
824 | panic("alloccg: block not in map"); | |
825 | return (0); | |
826 | } | |
827 | ||
f3c028b7 | 828 | /* |
502770a3 KM |
829 | * Update the frsum fields to reflect addition or deletion |
830 | * of some frags. | |
f3c028b7 | 831 | */ |
b6407c9d KM |
832 | fragacct(fs, fragmap, fraglist, cnt) |
833 | struct fs *fs; | |
1b940b13 | 834 | int fragmap; |
0947395d | 835 | long fraglist[]; |
f3c028b7 KM |
836 | int cnt; |
837 | { | |
838 | int inblk; | |
839 | register int field, subfield; | |
840 | register int siz, pos; | |
841 | ||
b6407c9d | 842 | inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; |
f3c028b7 | 843 | fragmap <<= 1; |
b6407c9d | 844 | for (siz = 1; siz < fs->fs_frag; siz++) { |
f3c028b7 KM |
845 | if (((1 << siz) & inblk) == 0) |
846 | continue; | |
847 | field = around[siz]; | |
848 | subfield = inside[siz]; | |
b6407c9d | 849 | for (pos = siz; pos <= fs->fs_frag; pos++) { |
f3c028b7 KM |
850 | if ((fragmap & field) == subfield) { |
851 | fraglist[siz] += cnt; | |
852 | pos += siz; | |
853 | field <<= siz; | |
854 | subfield <<= siz; | |
855 | } | |
856 | field <<= 1; | |
857 | subfield <<= 1; | |
858 | } | |
859 | } | |
860 | } | |
861 | ||
502770a3 KM |
862 | /* |
863 | * Check that a specified block number is in range. | |
864 | */ | |
e3fe2d69 KM |
865 | badblock(fs, bn) |
866 | register struct fs *fs; | |
867 | daddr_t bn; | |
868 | { | |
869 | ||
6994bf5d | 870 | if ((unsigned)bn >= fs->fs_size || bn < cgdmin(fs, dtog(fs, bn))) { |
e3fe2d69 KM |
871 | fserr(fs, "bad block"); |
872 | return (1); | |
873 | } | |
874 | return (0); | |
875 | } | |
876 | ||
877 | /* | |
502770a3 KM |
878 | * Getfs maps a device number into a pointer to the incore super block. |
879 | * | |
880 | * The algorithm is a linear search through the mount table. A | |
881 | * consistency check of the super block magic number is performed. | |
e3fe2d69 KM |
882 | * |
883 | * panic: no fs -- the device is not mounted. | |
884 | * this "cannot happen" | |
885 | */ | |
886 | struct fs * | |
887 | getfs(dev) | |
888 | dev_t dev; | |
889 | { | |
890 | register struct mount *mp; | |
891 | register struct fs *fs; | |
892 | ||
893 | for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) | |
894 | if (mp->m_bufp != NULL && mp->m_dev == dev) { | |
895 | fs = mp->m_bufp->b_un.b_fs; | |
896 | if (fs->fs_magic != FS_MAGIC) | |
897 | panic("getfs: bad magic"); | |
898 | return (fs); | |
899 | } | |
900 | panic("getfs: no fs"); | |
901 | return (NULL); | |
902 | } | |
903 | ||
904 | /* | |
502770a3 KM |
905 | * Fserr prints the name of a file system with an error diagnostic. |
906 | * | |
907 | * The form of the error message is: | |
e3fe2d69 KM |
908 | * fs: error message |
909 | */ | |
910 | fserr(fs, cp) | |
911 | struct fs *fs; | |
912 | char *cp; | |
913 | { | |
914 | ||
915 | printf("%s: %s\n", fs->fs_fsmnt, cp); | |
916 | } | |
917 | ||
918 | /* | |
919 | * Getfsx returns the index in the file system | |
920 | * table of the specified device. The swap device | |
921 | * is also assigned a pseudo-index. The index may | |
922 | * be used as a compressed indication of the location | |
923 | * of a block, recording | |
924 | * <getfsx(dev),blkno> | |
925 | * rather than | |
926 | * <dev, blkno> | |
927 | * provided the information need remain valid only | |
928 | * as long as the file system is mounted. | |
929 | */ | |
930 | getfsx(dev) | |
931 | dev_t dev; | |
932 | { | |
933 | register struct mount *mp; | |
934 | ||
935 | if (dev == swapdev) | |
936 | return (MSWAPX); | |
937 | for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++) | |
938 | if (mp->m_dev == dev) | |
939 | return (mp - &mount[0]); | |
940 | return (-1); | |
941 | } | |
942 | ||
943 | /* | |
944 | * Update is the internal name of 'sync'. It goes through the disk | |
945 | * queues to initiate sandbagged IO; goes through the inodes to write | |
502770a3 KM |
946 | * modified nodes; and it goes through the mount table to initiate |
947 | * the writing of the modified super blocks. | |
e3fe2d69 KM |
948 | */ |
949 | update() | |
950 | { | |
951 | register struct inode *ip; | |
952 | register struct mount *mp; | |
953 | register struct buf *bp; | |
954 | struct fs *fs; | |
955 | time_t tim; | |
743f1ef7 | 956 | int i, blks; |
e3fe2d69 KM |
957 | |
958 | if (updlock) | |
959 | return; | |
960 | updlock++; | |
961 | /* | |
962 | * Write back modified superblocks. | |
963 | * Consistency check that the superblock | |
964 | * of each file system is still in the buffer cache. | |
965 | */ | |
966 | for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) | |
967 | if (mp->m_bufp != NULL) { | |
968 | fs = mp->m_bufp->b_un.b_fs; | |
969 | if (fs->fs_fmod == 0) | |
970 | continue; | |
971 | if (fs->fs_ronly != 0) | |
972 | panic("update: rofs mod"); | |
4eeeb431 | 973 | bp = getblk(mp->m_dev, SBLOCK, SBSIZE); |
e3fe2d69 KM |
974 | fs->fs_fmod = 0; |
975 | fs->fs_time = TIME; | |
976 | if (bp->b_un.b_fs != fs) | |
977 | panic("update: bad b_fs"); | |
978 | bwrite(bp); | |
b6407c9d | 979 | blks = howmany(fs->fs_cssize, fs->fs_bsize); |
743f1ef7 | 980 | for (i = 0; i < blks; i++) { |
b6407c9d KM |
981 | bp = getblk(mp->m_dev, |
982 | fsbtodb(fs, fs->fs_csaddr + (i * fs->fs_frag)), | |
983 | fs->fs_bsize); | |
e3fe2d69 KM |
984 | bwrite(bp); |
985 | } | |
986 | } | |
987 | /* | |
988 | * Write back each (modified) inode. | |
989 | */ | |
990 | for (ip = inode; ip < inodeNINODE; ip++) | |
991 | if((ip->i_flag&ILOCK)==0 && ip->i_count) { | |
992 | ip->i_flag |= ILOCK; | |
993 | ip->i_count++; | |
994 | tim = TIME; | |
995 | iupdat(ip, &tim, &tim, 0); | |
996 | iput(ip); | |
997 | } | |
998 | updlock = 0; | |
999 | /* | |
1000 | * Force stale buffer cache information to be flushed, | |
1001 | * for all devices. | |
1002 | */ | |
1003 | bflush(NODEV); | |
1004 | } | |
b6407c9d KM |
1005 | |
1006 | /* | |
502770a3 KM |
1007 | * block operations |
1008 | * | |
1009 | * check if a block is available | |
b6407c9d | 1010 | */ |
b6407c9d KM |
1011 | isblock(fs, cp, h) |
1012 | struct fs *fs; | |
1013 | unsigned char *cp; | |
1014 | int h; | |
1015 | { | |
1016 | unsigned char mask; | |
1017 | ||
1018 | switch (fs->fs_frag) { | |
1019 | case 8: | |
1020 | return (cp[h] == 0xff); | |
1021 | case 4: | |
1022 | mask = 0x0f << ((h & 0x1) << 2); | |
1023 | return ((cp[h >> 1] & mask) == mask); | |
1024 | case 2: | |
1025 | mask = 0x03 << ((h & 0x3) << 1); | |
1026 | return ((cp[h >> 2] & mask) == mask); | |
1027 | case 1: | |
1028 | mask = 0x01 << (h & 0x7); | |
1029 | return ((cp[h >> 3] & mask) == mask); | |
1030 | default: | |
1031 | panic("isblock bad fs_frag"); | |
1032 | return; | |
1033 | } | |
1034 | } | |
502770a3 KM |
1035 | |
1036 | /* | |
1037 | * take a block out of the map | |
1038 | */ | |
b6407c9d KM |
1039 | clrblock(fs, cp, h) |
1040 | struct fs *fs; | |
1041 | unsigned char *cp; | |
1042 | int h; | |
1043 | { | |
1044 | switch ((fs)->fs_frag) { | |
1045 | case 8: | |
1046 | cp[h] = 0; | |
1047 | return; | |
1048 | case 4: | |
1049 | cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); | |
1050 | return; | |
1051 | case 2: | |
1052 | cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); | |
1053 | return; | |
1054 | case 1: | |
1055 | cp[h >> 3] &= ~(0x01 << (h & 0x7)); | |
1056 | return; | |
1057 | default: | |
1058 | panic("clrblock bad fs_frag"); | |
1059 | return; | |
1060 | } | |
1061 | } | |
502770a3 KM |
1062 | |
1063 | /* | |
1064 | * put a block into the map | |
1065 | */ | |
b6407c9d KM |
1066 | setblock(fs, cp, h) |
1067 | struct fs *fs; | |
1068 | unsigned char *cp; | |
1069 | int h; | |
1070 | { | |
1071 | switch (fs->fs_frag) { | |
1072 | case 8: | |
1073 | cp[h] = 0xff; | |
1074 | return; | |
1075 | case 4: | |
1076 | cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); | |
1077 | return; | |
1078 | case 2: | |
1079 | cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); | |
1080 | return; | |
1081 | case 1: | |
1082 | cp[h >> 3] |= (0x01 << (h & 0x7)); | |
1083 | return; | |
1084 | default: | |
1085 | panic("setblock bad fs_frag"); | |
1086 | return; | |
1087 | } | |
1088 | } |