Commit | Line | Data |
---|---|---|
e3fe2d69 KM |
1 | /* Copyright (c) 1981 Regents of the University of California */ |
2 | ||
156b8f82 | 3 | static char vers[] = "@(#)lfs_alloc.c 1.20 %G%"; |
e3fe2d69 KM |
4 | |
5 | /* alloc.c 4.8 81/03/08 */ | |
6 | ||
7 | #include "../h/param.h" | |
8 | #include "../h/systm.h" | |
9 | #include "../h/mount.h" | |
10 | #include "../h/fs.h" | |
11 | #include "../h/conf.h" | |
12 | #include "../h/buf.h" | |
13 | #include "../h/inode.h" | |
e3fe2d69 KM |
14 | #include "../h/user.h" |
15 | ||
daaf7bee KM |
16 | extern u_long hashalloc(); |
17 | extern ino_t ialloccg(); | |
743f1ef7 KM |
18 | extern daddr_t alloccg(); |
19 | extern daddr_t alloccgblk(); | |
20 | extern daddr_t fragextend(); | |
21 | extern daddr_t blkpref(); | |
22 | extern daddr_t mapsearch(); | |
1d7a08c5 | 23 | extern int inside[], around[]; |
b6407c9d | 24 | extern unsigned char *fragtbl[]; |
e3fe2d69 | 25 | |
502770a3 KM |
26 | /* |
27 | * Allocate a block in the file system. | |
28 | * | |
29 | * The size of the requested block is given, which must be some | |
30 | * multiple of fs_fsize and <= fs_bsize. | |
31 | * A preference may be optionally specified. If a preference is given | |
32 | * the following hierarchy is used to allocate a block: | |
33 | * 1) allocate the requested block. | |
34 | * 2) allocate a rotationally optimal block in the same cylinder. | |
35 | * 3) allocate a block in the same cylinder group. | |
36 | * 4) quadradically rehash into other cylinder groups, until an | |
37 | * available block is located. | |
38 | * If no block preference is given the following heirarchy is used | |
39 | * to allocate a block: | |
40 | * 1) allocate a block in the cylinder group that contains the | |
41 | * inode for the file. | |
42 | * 2) quadradically rehash into other cylinder groups, until an | |
43 | * available block is located. | |
44 | */ | |
e3fe2d69 | 45 | struct buf * |
f7287e4b | 46 | alloc(ip, bpref, size) |
f3c028b7 | 47 | register struct inode *ip; |
e3fe2d69 KM |
48 | daddr_t bpref; |
49 | int size; | |
50 | { | |
51 | daddr_t bno; | |
52 | register struct fs *fs; | |
f3c028b7 | 53 | register struct buf *bp; |
e3fe2d69 KM |
54 | int cg; |
55 | ||
f7287e4b | 56 | fs = ip->i_fs; |
d995d89d | 57 | if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) |
b6407c9d KM |
58 | panic("alloc: bad size"); |
59 | if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) | |
0947395d KM |
60 | goto nospace; |
61 | if (u.u_uid != 0 && | |
b6407c9d KM |
62 | fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < |
63 | fs->fs_dsize * fs->fs_minfree / 100) | |
e3fe2d69 | 64 | goto nospace; |
260e5e3c KM |
65 | if (bpref >= fs->fs_size) |
66 | bpref = 0; | |
e3fe2d69 | 67 | if (bpref == 0) |
6994bf5d | 68 | cg = itog(fs, ip->i_number); |
e3fe2d69 | 69 | else |
6994bf5d | 70 | cg = dtog(fs, bpref); |
f7287e4b | 71 | bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, alloccg); |
e3fe2d69 KM |
72 | if (bno == 0) |
73 | goto nospace; | |
f7287e4b | 74 | bp = getblk(ip->i_dev, fsbtodb(fs, bno), size); |
e3fe2d69 KM |
75 | clrbuf(bp); |
76 | return (bp); | |
77 | nospace: | |
78 | fserr(fs, "file system full"); | |
79 | uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); | |
80 | u.u_error = ENOSPC; | |
81 | return (NULL); | |
82 | } | |
83 | ||
502770a3 KM |
84 | /* |
85 | * Reallocate a fragment to a bigger size | |
86 | * | |
87 | * The number and size of the old block is given, and a preference | |
88 | * and new size is also specified. The allocator attempts to extend | |
89 | * the original block. Failing that, the regular block allocator is | |
90 | * invoked to get an appropriate block. | |
91 | */ | |
07670f7d | 92 | struct buf * |
f7287e4b KM |
93 | realloccg(ip, bprev, bpref, osize, nsize) |
94 | register struct inode *ip; | |
743f1ef7 | 95 | daddr_t bprev, bpref; |
07670f7d KM |
96 | int osize, nsize; |
97 | { | |
98 | daddr_t bno; | |
99 | register struct fs *fs; | |
f3c028b7 KM |
100 | register struct buf *bp, *obp; |
101 | caddr_t cp; | |
07670f7d KM |
102 | int cg; |
103 | ||
f7287e4b | 104 | fs = ip->i_fs; |
d995d89d KM |
105 | if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || |
106 | (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) | |
b6407c9d | 107 | panic("realloccg: bad size"); |
0947395d | 108 | if (u.u_uid != 0 && |
b6407c9d KM |
109 | fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < |
110 | fs->fs_dsize * fs->fs_minfree / 100) | |
0947395d | 111 | goto nospace; |
502770a3 | 112 | if (bprev != 0) |
6994bf5d | 113 | cg = dtog(fs, bprev); |
502770a3 KM |
114 | else |
115 | panic("realloccg: bad bprev"); | |
f7287e4b | 116 | bno = fragextend(ip, cg, (long)bprev, osize, nsize); |
f3c028b7 | 117 | if (bno != 0) { |
f7287e4b | 118 | bp = bread(ip->i_dev, fsbtodb(fs, bno), osize); |
d995d89d KM |
119 | if (bp->b_flags & B_ERROR) { |
120 | brelse(bp); | |
121 | return 0; | |
122 | } | |
f3c028b7 KM |
123 | bp->b_bcount = nsize; |
124 | blkclr(bp->b_un.b_addr + osize, nsize - osize); | |
125 | return (bp); | |
126 | } | |
260e5e3c KM |
127 | if (bpref >= fs->fs_size) |
128 | bpref = 0; | |
f7287e4b | 129 | bno = (daddr_t)hashalloc(ip, cg, (long)bpref, nsize, alloccg); |
f3c028b7 KM |
130 | if (bno != 0) { |
131 | /* | |
132 | * make a new copy | |
133 | */ | |
f7287e4b | 134 | obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize); |
d995d89d KM |
135 | if (obp->b_flags & B_ERROR) { |
136 | brelse(obp); | |
137 | return 0; | |
138 | } | |
f7287e4b | 139 | bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize); |
f3c028b7 KM |
140 | cp = bp->b_un.b_addr; |
141 | bp->b_un.b_addr = obp->b_un.b_addr; | |
142 | obp->b_un.b_addr = cp; | |
143 | obp->b_flags |= B_INVAL; | |
144 | brelse(obp); | |
f7287e4b | 145 | fre(ip, bprev, (off_t)osize); |
f3c028b7 KM |
146 | blkclr(bp->b_un.b_addr + osize, nsize - osize); |
147 | return(bp); | |
148 | } | |
0947395d | 149 | nospace: |
f3c028b7 KM |
150 | /* |
151 | * no space available | |
152 | */ | |
07670f7d KM |
153 | fserr(fs, "file system full"); |
154 | uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); | |
155 | u.u_error = ENOSPC; | |
156 | return (NULL); | |
157 | } | |
158 | ||
502770a3 KM |
159 | /* |
160 | * Allocate an inode in the file system. | |
161 | * | |
162 | * A preference may be optionally specified. If a preference is given | |
163 | * the following hierarchy is used to allocate an inode: | |
164 | * 1) allocate the requested inode. | |
165 | * 2) allocate an inode in the same cylinder group. | |
166 | * 3) quadradically rehash into other cylinder groups, until an | |
167 | * available inode is located. | |
168 | * If no inode preference is given the following heirarchy is used | |
169 | * to allocate an inode: | |
170 | * 1) allocate an inode in cylinder group 0. | |
171 | * 2) quadradically rehash into other cylinder groups, until an | |
172 | * available inode is located. | |
173 | */ | |
e3fe2d69 | 174 | struct inode * |
f7287e4b KM |
175 | ialloc(pip, ipref, mode) |
176 | register struct inode *pip; | |
e3fe2d69 KM |
177 | ino_t ipref; |
178 | int mode; | |
179 | { | |
daaf7bee | 180 | ino_t ino; |
e3fe2d69 KM |
181 | register struct fs *fs; |
182 | register struct inode *ip; | |
183 | int cg; | |
184 | ||
f7287e4b | 185 | fs = pip->i_fs; |
0947395d | 186 | if (fs->fs_cstotal.cs_nifree == 0) |
e3fe2d69 | 187 | goto noinodes; |
260e5e3c KM |
188 | if (ipref >= fs->fs_ncg * fs->fs_ipg) |
189 | ipref = 0; | |
6994bf5d | 190 | cg = itog(fs, ipref); |
f7287e4b | 191 | ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); |
e3fe2d69 KM |
192 | if (ino == 0) |
193 | goto noinodes; | |
f7287e4b | 194 | ip = iget(pip->i_dev, pip->i_fs, ino); |
e3fe2d69 | 195 | if (ip == NULL) { |
f7287e4b | 196 | ifree(ip, ino, 0); |
e3fe2d69 KM |
197 | return (NULL); |
198 | } | |
199 | if (ip->i_mode) | |
200 | panic("ialloc: dup alloc"); | |
201 | return (ip); | |
202 | noinodes: | |
203 | fserr(fs, "out of inodes"); | |
204 | uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt); | |
205 | u.u_error = ENOSPC; | |
206 | return (NULL); | |
207 | } | |
208 | ||
743f1ef7 | 209 | /* |
502770a3 KM |
210 | * Find a cylinder to place a directory. |
211 | * | |
212 | * The policy implemented by this algorithm is to select from | |
213 | * among those cylinder groups with above the average number of | |
214 | * free inodes, the one with the smallest number of directories. | |
743f1ef7 | 215 | */ |
f7287e4b | 216 | dirpref(fs) |
e3fe2d69 | 217 | register struct fs *fs; |
f7287e4b | 218 | { |
743f1ef7 | 219 | int cg, minndir, mincg, avgifree; |
e3fe2d69 | 220 | |
0947395d | 221 | avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; |
743f1ef7 | 222 | minndir = fs->fs_ipg; |
e3fe2d69 | 223 | mincg = 0; |
743f1ef7 | 224 | for (cg = 0; cg < fs->fs_ncg; cg++) |
b6407c9d KM |
225 | if (fs->fs_cs(fs, cg).cs_ndir < minndir && |
226 | fs->fs_cs(fs, cg).cs_nifree >= avgifree) { | |
e3fe2d69 | 227 | mincg = cg; |
b6407c9d | 228 | minndir = fs->fs_cs(fs, cg).cs_ndir; |
e3fe2d69 KM |
229 | } |
230 | return (fs->fs_ipg * mincg); | |
231 | } | |
232 | ||
743f1ef7 | 233 | /* |
502770a3 KM |
234 | * Select a cylinder to place a large block of data. |
235 | * | |
236 | * The policy implemented by this algorithm is to maintain a | |
237 | * rotor that sweeps the cylinder groups. When a block is | |
238 | * needed, the rotor is advanced until a cylinder group with | |
239 | * greater than the average number of free blocks is found. | |
743f1ef7 | 240 | */ |
daaf7bee | 241 | daddr_t |
f7287e4b | 242 | blkpref(fs) |
743f1ef7 | 243 | register struct fs *fs; |
f7287e4b | 244 | { |
743f1ef7 KM |
245 | int cg, avgbfree; |
246 | ||
0947395d | 247 | avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; |
743f1ef7 | 248 | for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) |
b6407c9d | 249 | if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { |
743f1ef7 | 250 | fs->fs_cgrotor = cg; |
b6407c9d | 251 | return (fs->fs_fpg * cg + fs->fs_frag); |
743f1ef7 KM |
252 | } |
253 | for (cg = 0; cg <= fs->fs_cgrotor; cg++) | |
b6407c9d | 254 | if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { |
743f1ef7 | 255 | fs->fs_cgrotor = cg; |
b6407c9d | 256 | return (fs->fs_fpg * cg + fs->fs_frag); |
743f1ef7 KM |
257 | } |
258 | return (0); | |
259 | } | |
260 | ||
502770a3 KM |
261 | /* |
262 | * Implement the cylinder overflow algorithm. | |
263 | * | |
264 | * The policy implemented by this algorithm is: | |
265 | * 1) allocate the block in its requested cylinder group. | |
266 | * 2) quadradically rehash on the cylinder group number. | |
267 | * 3) brute force search for a free block. | |
268 | */ | |
daaf7bee KM |
269 | /*VARARGS5*/ |
270 | u_long | |
f7287e4b KM |
271 | hashalloc(ip, cg, pref, size, allocator) |
272 | struct inode *ip; | |
e3fe2d69 KM |
273 | int cg; |
274 | long pref; | |
275 | int size; /* size for data blocks, mode for inodes */ | |
daaf7bee | 276 | u_long (*allocator)(); |
e3fe2d69 | 277 | { |
f7287e4b | 278 | register struct fs *fs; |
e3fe2d69 KM |
279 | long result; |
280 | int i, icg = cg; | |
281 | ||
f7287e4b | 282 | fs = ip->i_fs; |
e3fe2d69 KM |
283 | /* |
284 | * 1: preferred cylinder group | |
285 | */ | |
f7287e4b | 286 | result = (*allocator)(ip, cg, pref, size); |
e3fe2d69 KM |
287 | if (result) |
288 | return (result); | |
289 | /* | |
290 | * 2: quadratic rehash | |
291 | */ | |
292 | for (i = 1; i < fs->fs_ncg; i *= 2) { | |
293 | cg += i; | |
294 | if (cg >= fs->fs_ncg) | |
295 | cg -= fs->fs_ncg; | |
f7287e4b | 296 | result = (*allocator)(ip, cg, 0, size); |
e3fe2d69 KM |
297 | if (result) |
298 | return (result); | |
299 | } | |
300 | /* | |
301 | * 3: brute force search | |
302 | */ | |
303 | cg = icg; | |
304 | for (i = 0; i < fs->fs_ncg; i++) { | |
f7287e4b | 305 | result = (*allocator)(ip, cg, 0, size); |
e3fe2d69 KM |
306 | if (result) |
307 | return (result); | |
308 | cg++; | |
309 | if (cg == fs->fs_ncg) | |
310 | cg = 0; | |
311 | } | |
312 | return (0); | |
313 | } | |
314 | ||
502770a3 KM |
315 | /* |
316 | * Determine whether a fragment can be extended. | |
317 | * | |
318 | * Check to see if the necessary fragments are available, and | |
319 | * if they are, allocate them. | |
320 | */ | |
07670f7d | 321 | daddr_t |
f7287e4b KM |
322 | fragextend(ip, cg, bprev, osize, nsize) |
323 | struct inode *ip; | |
07670f7d | 324 | int cg; |
f3c028b7 | 325 | long bprev; |
07670f7d KM |
326 | int osize, nsize; |
327 | { | |
f7287e4b | 328 | register struct fs *fs; |
f3c028b7 KM |
329 | register struct buf *bp; |
330 | register struct cg *cgp; | |
331 | long bno; | |
332 | int frags, bbase; | |
07670f7d KM |
333 | int i; |
334 | ||
f7287e4b | 335 | fs = ip->i_fs; |
d995d89d KM |
336 | frags = numfrags(fs, nsize); |
337 | bbase = fragoff(fs, bprev); | |
b6407c9d | 338 | if (bbase > (bprev + frags - 1) % fs->fs_frag) { |
f3c028b7 KM |
339 | /* cannot extend across a block boundry */ |
340 | return (0); | |
341 | } | |
f7287e4b | 342 | bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
343 | if (bp->b_flags & B_ERROR) { |
344 | brelse(bp); | |
345 | return 0; | |
346 | } | |
07670f7d | 347 | cgp = bp->b_un.b_cg; |
6994bf5d | 348 | bno = dtogd(fs, bprev); |
d995d89d | 349 | for (i = numfrags(fs, osize); i < frags; i++) |
aca50d72 KM |
350 | if (isclr(cgp->cg_free, bno + i)) { |
351 | brelse(bp); | |
352 | return (0); | |
353 | } | |
354 | /* | |
355 | * the current fragment can be extended | |
356 | * deduct the count on fragment being extended into | |
357 | * increase the count on the remaining fragment (if any) | |
358 | * allocate the extended piece | |
359 | */ | |
360 | for (i = frags; i < fs->fs_frag - bbase; i++) | |
f3c028b7 KM |
361 | if (isclr(cgp->cg_free, bno + i)) |
362 | break; | |
d995d89d | 363 | cgp->cg_frsum[i - numfrags(fs, osize)]--; |
aca50d72 KM |
364 | if (i != frags) |
365 | cgp->cg_frsum[i - frags]++; | |
d995d89d | 366 | for (i = numfrags(fs, osize); i < frags; i++) { |
aca50d72 KM |
367 | clrbit(cgp->cg_free, bno + i); |
368 | cgp->cg_cs.cs_nffree--; | |
369 | fs->fs_cstotal.cs_nffree--; | |
370 | fs->fs_cs(fs, cg).cs_nffree--; | |
f3c028b7 | 371 | } |
aca50d72 KM |
372 | fs->fs_fmod++; |
373 | bdwrite(bp); | |
374 | return (bprev); | |
07670f7d KM |
375 | } |
376 | ||
502770a3 KM |
377 | /* |
378 | * Determine whether a block can be allocated. | |
379 | * | |
380 | * Check to see if a block of the apprpriate size is available, | |
381 | * and if it is, allocate it. | |
382 | */ | |
e3fe2d69 | 383 | daddr_t |
f7287e4b KM |
384 | alloccg(ip, cg, bpref, size) |
385 | struct inode *ip; | |
e3fe2d69 KM |
386 | int cg; |
387 | daddr_t bpref; | |
388 | int size; | |
389 | { | |
f7287e4b | 390 | register struct fs *fs; |
f3c028b7 KM |
391 | register struct buf *bp; |
392 | register struct cg *cgp; | |
393 | int bno, frags; | |
394 | int allocsiz; | |
f3c028b7 | 395 | register int i; |
e3fe2d69 | 396 | |
f7287e4b | 397 | fs = ip->i_fs; |
b6407c9d | 398 | if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) |
0947395d | 399 | return (0); |
f7287e4b | 400 | bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
401 | if (bp->b_flags & B_ERROR) { |
402 | brelse(bp); | |
403 | return 0; | |
404 | } | |
e3fe2d69 | 405 | cgp = bp->b_un.b_cg; |
b6407c9d | 406 | if (size == fs->fs_bsize) { |
daaf7bee | 407 | bno = alloccgblk(fs, cgp, bpref); |
f3c028b7 KM |
408 | bdwrite(bp); |
409 | return (bno); | |
410 | } | |
411 | /* | |
412 | * check to see if any fragments are already available | |
413 | * allocsiz is the size which will be allocated, hacking | |
414 | * it down to a smaller size if necessary | |
415 | */ | |
d995d89d | 416 | frags = numfrags(fs, size); |
b6407c9d | 417 | for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) |
f3c028b7 KM |
418 | if (cgp->cg_frsum[allocsiz] != 0) |
419 | break; | |
b6407c9d | 420 | if (allocsiz == fs->fs_frag) { |
f3c028b7 KM |
421 | /* |
422 | * no fragments were available, so a block will be | |
423 | * allocated, and hacked up | |
424 | */ | |
0947395d | 425 | if (cgp->cg_cs.cs_nbfree == 0) { |
f3c028b7 KM |
426 | brelse(bp); |
427 | return (0); | |
428 | } | |
daaf7bee | 429 | bno = alloccgblk(fs, cgp, bpref); |
6994bf5d | 430 | bpref = dtogd(fs, bno); |
b6407c9d | 431 | for (i = frags; i < fs->fs_frag; i++) |
f3c028b7 | 432 | setbit(cgp->cg_free, bpref + i); |
b6407c9d | 433 | i = fs->fs_frag - frags; |
0947395d KM |
434 | cgp->cg_cs.cs_nffree += i; |
435 | fs->fs_cstotal.cs_nffree += i; | |
b6407c9d | 436 | fs->fs_cs(fs, cg).cs_nffree += i; |
f3c028b7 KM |
437 | cgp->cg_frsum[i]++; |
438 | bdwrite(bp); | |
439 | return (bno); | |
440 | } | |
743f1ef7 KM |
441 | bno = mapsearch(fs, cgp, bpref, allocsiz); |
442 | if (bno == 0) | |
443 | return (0); | |
f3c028b7 KM |
444 | for (i = 0; i < frags; i++) |
445 | clrbit(cgp->cg_free, bno + i); | |
0947395d KM |
446 | cgp->cg_cs.cs_nffree -= frags; |
447 | fs->fs_cstotal.cs_nffree -= frags; | |
b6407c9d | 448 | fs->fs_cs(fs, cg).cs_nffree -= frags; |
f3c028b7 KM |
449 | cgp->cg_frsum[allocsiz]--; |
450 | if (frags != allocsiz) | |
451 | cgp->cg_frsum[allocsiz - frags]++; | |
452 | bdwrite(bp); | |
453 | return (cg * fs->fs_fpg + bno); | |
454 | } | |
455 | ||
502770a3 KM |
456 | /* |
457 | * Allocate a block in a cylinder group. | |
458 | * | |
459 | * This algorithm implements the following policy: | |
460 | * 1) allocate the requested block. | |
461 | * 2) allocate a rotationally optimal block in the same cylinder. | |
462 | * 3) allocate the next available block on the block rotor for the | |
463 | * specified cylinder group. | |
464 | * Note that this routine only allocates fs_bsize blocks; these | |
465 | * blocks may be fragmented by the routine that allocates them. | |
466 | */ | |
f3c028b7 | 467 | daddr_t |
daaf7bee | 468 | alloccgblk(fs, cgp, bpref) |
f7287e4b | 469 | register struct fs *fs; |
f3c028b7 KM |
470 | register struct cg *cgp; |
471 | daddr_t bpref; | |
472 | { | |
743f1ef7 KM |
473 | daddr_t bno; |
474 | int cylno, pos; | |
475 | short *cylbp; | |
aca50d72 | 476 | register int i; |
f3c028b7 | 477 | |
743f1ef7 KM |
478 | if (bpref == 0) { |
479 | bpref = cgp->cg_rotor; | |
aca50d72 KM |
480 | goto norot; |
481 | } | |
482 | bpref &= ~(fs->fs_frag - 1); | |
6994bf5d | 483 | bpref = dtogd(fs, bpref); |
aca50d72 KM |
484 | /* |
485 | * if the requested block is available, use it | |
486 | */ | |
487 | if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) { | |
488 | bno = bpref; | |
489 | goto gotit; | |
490 | } | |
aca50d72 KM |
491 | /* |
492 | * check for a block available on the same cylinder | |
aca50d72 KM |
493 | */ |
494 | cylno = cbtocylno(fs, bpref); | |
502770a3 KM |
495 | if (cgp->cg_btot[cylno] == 0) |
496 | goto norot; | |
497 | if (fs->fs_cpc == 0) { | |
498 | /* | |
499 | * block layout info is not available, so just have | |
500 | * to take any block in this cylinder. | |
501 | */ | |
502 | bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); | |
503 | goto norot; | |
504 | } | |
505 | /* | |
506 | * find a block that is rotationally optimal | |
507 | */ | |
aca50d72 KM |
508 | cylbp = cgp->cg_b[cylno]; |
509 | if (fs->fs_rotdelay == 0) { | |
510 | pos = cbtorpos(fs, bpref); | |
743f1ef7 | 511 | } else { |
743f1ef7 | 512 | /* |
aca50d72 KM |
513 | * here we convert ms of delay to frags as: |
514 | * (frags) = (ms) * (rev/sec) * (sect/rev) / | |
515 | * ((sect/frag) * (ms/sec)) | |
516 | * then round up to the next rotational position | |
743f1ef7 | 517 | */ |
aca50d72 KM |
518 | bpref += fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / |
519 | (NSPF(fs) * 1000); | |
520 | pos = cbtorpos(fs, bpref); | |
521 | pos = (pos + 1) % NRPOS; | |
522 | } | |
523 | /* | |
524 | * check the summary information to see if a block is | |
525 | * available in the requested cylinder starting at the | |
526 | * optimal rotational position and proceeding around. | |
527 | */ | |
528 | for (i = pos; i < NRPOS; i++) | |
529 | if (cylbp[i] > 0) | |
530 | break; | |
531 | if (i == NRPOS) | |
532 | for (i = 0; i < pos; i++) | |
743f1ef7 KM |
533 | if (cylbp[i] > 0) |
534 | break; | |
aca50d72 KM |
535 | if (cylbp[i] > 0) { |
536 | /* | |
537 | * found a rotational position, now find the actual | |
538 | * block. A panic if none is actually there. | |
539 | */ | |
540 | pos = cylno % fs->fs_cpc; | |
541 | bno = (cylno - pos) * fs->fs_spc / NSPB(fs); | |
542 | if (fs->fs_postbl[pos][i] == -1) | |
543 | panic("alloccgblk: cyl groups corrupted"); | |
544 | for (i = fs->fs_postbl[pos][i]; ; i += fs->fs_rotbl[i]) { | |
545 | if (isblock(fs, cgp->cg_free, bno + i)) { | |
546 | bno = (bno + i) * fs->fs_frag; | |
547 | goto gotit; | |
743f1ef7 | 548 | } |
aca50d72 KM |
549 | if (fs->fs_rotbl[i] == 0) |
550 | break; | |
743f1ef7 | 551 | } |
aca50d72 | 552 | panic("alloccgblk: can't find blk in cyl"); |
e3fe2d69 | 553 | } |
aca50d72 KM |
554 | norot: |
555 | /* | |
556 | * no blocks in the requested cylinder, so take next | |
557 | * available one in this cylinder group. | |
558 | */ | |
b6407c9d | 559 | bno = mapsearch(fs, cgp, bpref, fs->fs_frag); |
743f1ef7 KM |
560 | if (bno == 0) |
561 | return (0); | |
562 | cgp->cg_rotor = bno; | |
e3fe2d69 | 563 | gotit: |
b6407c9d | 564 | clrblock(fs, cgp->cg_free, bno/fs->fs_frag); |
0947395d KM |
565 | cgp->cg_cs.cs_nbfree--; |
566 | fs->fs_cstotal.cs_nbfree--; | |
b6407c9d | 567 | fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; |
502770a3 KM |
568 | cylno = cbtocylno(fs, bno); |
569 | cgp->cg_b[cylno][cbtorpos(fs, bno)]--; | |
570 | cgp->cg_btot[cylno]--; | |
e3fe2d69 | 571 | fs->fs_fmod++; |
743f1ef7 | 572 | return (cgp->cg_cgx * fs->fs_fpg + bno); |
e3fe2d69 KM |
573 | } |
574 | ||
502770a3 KM |
575 | /* |
576 | * Determine whether an inode can be allocated. | |
577 | * | |
578 | * Check to see if an inode is available, and if it is, | |
579 | * allocate it using the following policy: | |
580 | * 1) allocate the requested inode. | |
581 | * 2) allocate the next available inode after the requested | |
582 | * inode in the specified cylinder group. | |
583 | */ | |
daaf7bee | 584 | ino_t |
f7287e4b KM |
585 | ialloccg(ip, cg, ipref, mode) |
586 | struct inode *ip; | |
e3fe2d69 KM |
587 | int cg; |
588 | daddr_t ipref; | |
589 | int mode; | |
590 | { | |
f7287e4b | 591 | register struct fs *fs; |
f3c028b7 KM |
592 | register struct buf *bp; |
593 | register struct cg *cgp; | |
e3fe2d69 KM |
594 | int i; |
595 | ||
f7287e4b | 596 | fs = ip->i_fs; |
b6407c9d | 597 | if (fs->fs_cs(fs, cg).cs_nifree == 0) |
0947395d | 598 | return (0); |
f7287e4b | 599 | bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
600 | if (bp->b_flags & B_ERROR) { |
601 | brelse(bp); | |
602 | return 0; | |
603 | } | |
e3fe2d69 | 604 | cgp = bp->b_un.b_cg; |
e3fe2d69 KM |
605 | if (ipref) { |
606 | ipref %= fs->fs_ipg; | |
607 | if (isclr(cgp->cg_iused, ipref)) | |
608 | goto gotit; | |
609 | } else | |
610 | ipref = cgp->cg_irotor; | |
611 | for (i = 0; i < fs->fs_ipg; i++) { | |
612 | ipref++; | |
613 | if (ipref >= fs->fs_ipg) | |
614 | ipref = 0; | |
615 | if (isclr(cgp->cg_iused, ipref)) { | |
616 | cgp->cg_irotor = ipref; | |
617 | goto gotit; | |
618 | } | |
619 | } | |
620 | brelse(bp); | |
621 | return (0); | |
622 | gotit: | |
623 | setbit(cgp->cg_iused, ipref); | |
0947395d KM |
624 | cgp->cg_cs.cs_nifree--; |
625 | fs->fs_cstotal.cs_nifree--; | |
b6407c9d | 626 | fs->fs_cs(fs, cg).cs_nifree--; |
e3fe2d69 KM |
627 | fs->fs_fmod++; |
628 | if ((mode & IFMT) == IFDIR) { | |
0947395d KM |
629 | cgp->cg_cs.cs_ndir++; |
630 | fs->fs_cstotal.cs_ndir++; | |
b6407c9d | 631 | fs->fs_cs(fs, cg).cs_ndir++; |
e3fe2d69 KM |
632 | } |
633 | bdwrite(bp); | |
634 | return (cg * fs->fs_ipg + ipref); | |
635 | } | |
636 | ||
502770a3 KM |
637 | /* |
638 | * Free a block or fragment. | |
639 | * | |
640 | * The specified block or fragment is placed back in the | |
641 | * free map. If a fragment is deallocated, a possible | |
642 | * block reassembly is checked. | |
643 | */ | |
f7287e4b KM |
644 | fre(ip, bno, size) |
645 | register struct inode *ip; | |
e3fe2d69 | 646 | daddr_t bno; |
daaf7bee | 647 | off_t size; |
e3fe2d69 KM |
648 | { |
649 | register struct fs *fs; | |
650 | register struct cg *cgp; | |
651 | register struct buf *bp; | |
f3c028b7 KM |
652 | int cg, blk, frags, bbase; |
653 | register int i; | |
e3fe2d69 | 654 | |
f7287e4b | 655 | fs = ip->i_fs; |
d995d89d | 656 | if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) |
b6407c9d | 657 | panic("free: bad size"); |
6994bf5d | 658 | cg = dtog(fs, bno); |
e3fe2d69 KM |
659 | if (badblock(fs, bno)) |
660 | return; | |
f7287e4b | 661 | bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
662 | if (bp->b_flags & B_ERROR) { |
663 | brelse(bp); | |
e3fe2d69 | 664 | return; |
d995d89d | 665 | } |
e3fe2d69 | 666 | cgp = bp->b_un.b_cg; |
6994bf5d | 667 | bno = dtogd(fs, bno); |
b6407c9d KM |
668 | if (size == fs->fs_bsize) { |
669 | if (isblock(fs, cgp->cg_free, bno/fs->fs_frag)) | |
07670f7d | 670 | panic("free: freeing free block"); |
b6407c9d | 671 | setblock(fs, cgp->cg_free, bno/fs->fs_frag); |
0947395d KM |
672 | cgp->cg_cs.cs_nbfree++; |
673 | fs->fs_cstotal.cs_nbfree++; | |
b6407c9d | 674 | fs->fs_cs(fs, cg).cs_nbfree++; |
502770a3 KM |
675 | i = cbtocylno(fs, bno); |
676 | cgp->cg_b[i][cbtorpos(fs, bno)]++; | |
677 | cgp->cg_btot[i]++; | |
07670f7d | 678 | } else { |
b6407c9d | 679 | bbase = bno - (bno % fs->fs_frag); |
f3c028b7 KM |
680 | /* |
681 | * decrement the counts associated with the old frags | |
682 | */ | |
683 | blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & | |
b6407c9d KM |
684 | (0xff >> (NBBY - fs->fs_frag))); |
685 | fragacct(fs, blk, cgp->cg_frsum, -1); | |
f3c028b7 KM |
686 | /* |
687 | * deallocate the fragment | |
688 | */ | |
d995d89d | 689 | frags = numfrags(fs, size); |
f3c028b7 | 690 | for (i = 0; i < frags; i++) { |
07670f7d KM |
691 | if (isset(cgp->cg_free, bno + i)) |
692 | panic("free: freeing free frag"); | |
693 | setbit(cgp->cg_free, bno + i); | |
0947395d KM |
694 | cgp->cg_cs.cs_nffree++; |
695 | fs->fs_cstotal.cs_nffree++; | |
b6407c9d | 696 | fs->fs_cs(fs, cg).cs_nffree++; |
07670f7d | 697 | } |
f3c028b7 KM |
698 | /* |
699 | * add back in counts associated with the new frags | |
700 | */ | |
701 | blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & | |
b6407c9d KM |
702 | (0xff >> (NBBY - fs->fs_frag))); |
703 | fragacct(fs, blk, cgp->cg_frsum, 1); | |
f3c028b7 KM |
704 | /* |
705 | * if a complete block has been reassembled, account for it | |
706 | */ | |
b6407c9d KM |
707 | if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) { |
708 | cgp->cg_cs.cs_nffree -= fs->fs_frag; | |
709 | fs->fs_cstotal.cs_nffree -= fs->fs_frag; | |
710 | fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; | |
0947395d KM |
711 | cgp->cg_cs.cs_nbfree++; |
712 | fs->fs_cstotal.cs_nbfree++; | |
b6407c9d | 713 | fs->fs_cs(fs, cg).cs_nbfree++; |
502770a3 KM |
714 | i = cbtocylno(fs, bbase); |
715 | cgp->cg_b[i][cbtorpos(fs, bbase)]++; | |
716 | cgp->cg_btot[i]++; | |
07670f7d KM |
717 | } |
718 | } | |
e3fe2d69 | 719 | fs->fs_fmod++; |
e3fe2d69 KM |
720 | bdwrite(bp); |
721 | } | |
722 | ||
502770a3 KM |
723 | /* |
724 | * Free an inode. | |
725 | * | |
726 | * The specified inode is placed back in the free map. | |
727 | */ | |
f7287e4b KM |
728 | ifree(ip, ino, mode) |
729 | struct inode *ip; | |
e3fe2d69 KM |
730 | ino_t ino; |
731 | int mode; | |
732 | { | |
733 | register struct fs *fs; | |
734 | register struct cg *cgp; | |
735 | register struct buf *bp; | |
e3fe2d69 KM |
736 | int cg; |
737 | ||
f7287e4b | 738 | fs = ip->i_fs; |
e3fe2d69 KM |
739 | if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) |
740 | panic("ifree: range"); | |
6994bf5d | 741 | cg = itog(fs, ino); |
f7287e4b | 742 | bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); |
d995d89d KM |
743 | if (bp->b_flags & B_ERROR) { |
744 | brelse(bp); | |
e3fe2d69 | 745 | return; |
d995d89d | 746 | } |
e3fe2d69 KM |
747 | cgp = bp->b_un.b_cg; |
748 | ino %= fs->fs_ipg; | |
749 | if (isclr(cgp->cg_iused, ino)) | |
750 | panic("ifree: freeing free inode"); | |
751 | clrbit(cgp->cg_iused, ino); | |
0947395d KM |
752 | cgp->cg_cs.cs_nifree++; |
753 | fs->fs_cstotal.cs_nifree++; | |
b6407c9d | 754 | fs->fs_cs(fs, cg).cs_nifree++; |
e3fe2d69 | 755 | if ((mode & IFMT) == IFDIR) { |
0947395d KM |
756 | cgp->cg_cs.cs_ndir--; |
757 | fs->fs_cstotal.cs_ndir--; | |
b6407c9d | 758 | fs->fs_cs(fs, cg).cs_ndir--; |
e3fe2d69 KM |
759 | } |
760 | fs->fs_fmod++; | |
761 | bdwrite(bp); | |
762 | } | |
763 | ||
743f1ef7 | 764 | /* |
502770a3 KM |
765 | * Find a block of the specified size in the specified cylinder group. |
766 | * | |
743f1ef7 KM |
767 | * It is a panic if a request is made to find a block if none are |
768 | * available. | |
769 | */ | |
770 | daddr_t | |
771 | mapsearch(fs, cgp, bpref, allocsiz) | |
772 | register struct fs *fs; | |
773 | register struct cg *cgp; | |
774 | daddr_t bpref; | |
775 | int allocsiz; | |
776 | { | |
777 | daddr_t bno; | |
778 | int start, len, loc, i; | |
779 | int blk, field, subfield, pos; | |
780 | ||
781 | /* | |
782 | * find the fragment by searching through the free block | |
783 | * map for an appropriate bit pattern | |
784 | */ | |
785 | if (bpref) | |
6994bf5d | 786 | start = dtogd(fs, bpref) / NBBY; |
743f1ef7 KM |
787 | else |
788 | start = cgp->cg_frotor / NBBY; | |
942bd18b | 789 | len = howmany(fs->fs_fpg, NBBY) - start; |
b6407c9d | 790 | loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], |
156b8f82 | 791 | 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); |
743f1ef7 | 792 | if (loc == 0) { |
942bd18b KM |
793 | loc = fs->fs_dblkno / NBBY; |
794 | len = start - loc + 1; | |
795 | start = loc; | |
b6407c9d | 796 | loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], |
156b8f82 | 797 | 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); |
743f1ef7 KM |
798 | if (loc == 0) { |
799 | panic("alloccg: map corrupted"); | |
800 | return (0); | |
801 | } | |
802 | } | |
803 | bno = (start + len - loc) * NBBY; | |
804 | cgp->cg_frotor = bno; | |
805 | /* | |
806 | * found the byte in the map | |
807 | * sift through the bits to find the selected frag | |
808 | */ | |
b6407c9d KM |
809 | for (i = 0; i < NBBY; i += fs->fs_frag) { |
810 | blk = (cgp->cg_free[bno / NBBY] >> i) & | |
811 | (0xff >> NBBY - fs->fs_frag); | |
743f1ef7 KM |
812 | blk <<= 1; |
813 | field = around[allocsiz]; | |
814 | subfield = inside[allocsiz]; | |
b6407c9d | 815 | for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { |
743f1ef7 KM |
816 | if ((blk & field) == subfield) { |
817 | return (bno + i + pos); | |
818 | } | |
819 | field <<= 1; | |
820 | subfield <<= 1; | |
821 | } | |
822 | } | |
823 | panic("alloccg: block not in map"); | |
824 | return (0); | |
825 | } | |
826 | ||
f3c028b7 | 827 | /* |
502770a3 KM |
828 | * Update the frsum fields to reflect addition or deletion |
829 | * of some frags. | |
f3c028b7 | 830 | */ |
b6407c9d KM |
831 | fragacct(fs, fragmap, fraglist, cnt) |
832 | struct fs *fs; | |
1b940b13 | 833 | int fragmap; |
0947395d | 834 | long fraglist[]; |
f3c028b7 KM |
835 | int cnt; |
836 | { | |
837 | int inblk; | |
838 | register int field, subfield; | |
839 | register int siz, pos; | |
840 | ||
b6407c9d | 841 | inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; |
f3c028b7 | 842 | fragmap <<= 1; |
b6407c9d | 843 | for (siz = 1; siz < fs->fs_frag; siz++) { |
156b8f82 | 844 | if ((inblk & (1 << (siz + (fs->fs_frag % NBBY)))) == 0) |
f3c028b7 KM |
845 | continue; |
846 | field = around[siz]; | |
847 | subfield = inside[siz]; | |
b6407c9d | 848 | for (pos = siz; pos <= fs->fs_frag; pos++) { |
f3c028b7 KM |
849 | if ((fragmap & field) == subfield) { |
850 | fraglist[siz] += cnt; | |
851 | pos += siz; | |
852 | field <<= siz; | |
853 | subfield <<= siz; | |
854 | } | |
855 | field <<= 1; | |
856 | subfield <<= 1; | |
857 | } | |
858 | } | |
859 | } | |
860 | ||
502770a3 KM |
861 | /* |
862 | * Check that a specified block number is in range. | |
863 | */ | |
e3fe2d69 KM |
864 | badblock(fs, bn) |
865 | register struct fs *fs; | |
866 | daddr_t bn; | |
867 | { | |
868 | ||
6994bf5d | 869 | if ((unsigned)bn >= fs->fs_size || bn < cgdmin(fs, dtog(fs, bn))) { |
e3fe2d69 KM |
870 | fserr(fs, "bad block"); |
871 | return (1); | |
872 | } | |
873 | return (0); | |
874 | } | |
875 | ||
876 | /* | |
502770a3 KM |
877 | * Getfs maps a device number into a pointer to the incore super block. |
878 | * | |
879 | * The algorithm is a linear search through the mount table. A | |
880 | * consistency check of the super block magic number is performed. | |
e3fe2d69 KM |
881 | * |
882 | * panic: no fs -- the device is not mounted. | |
883 | * this "cannot happen" | |
884 | */ | |
885 | struct fs * | |
886 | getfs(dev) | |
887 | dev_t dev; | |
888 | { | |
889 | register struct mount *mp; | |
890 | register struct fs *fs; | |
891 | ||
892 | for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) | |
893 | if (mp->m_bufp != NULL && mp->m_dev == dev) { | |
894 | fs = mp->m_bufp->b_un.b_fs; | |
895 | if (fs->fs_magic != FS_MAGIC) | |
896 | panic("getfs: bad magic"); | |
897 | return (fs); | |
898 | } | |
899 | panic("getfs: no fs"); | |
900 | return (NULL); | |
901 | } | |
902 | ||
903 | /* | |
502770a3 KM |
904 | * Fserr prints the name of a file system with an error diagnostic. |
905 | * | |
906 | * The form of the error message is: | |
e3fe2d69 KM |
907 | * fs: error message |
908 | */ | |
909 | fserr(fs, cp) | |
910 | struct fs *fs; | |
911 | char *cp; | |
912 | { | |
913 | ||
914 | printf("%s: %s\n", fs->fs_fsmnt, cp); | |
915 | } | |
916 | ||
917 | /* | |
918 | * Getfsx returns the index in the file system | |
919 | * table of the specified device. The swap device | |
920 | * is also assigned a pseudo-index. The index may | |
921 | * be used as a compressed indication of the location | |
922 | * of a block, recording | |
923 | * <getfsx(dev),blkno> | |
924 | * rather than | |
925 | * <dev, blkno> | |
926 | * provided the information need remain valid only | |
927 | * as long as the file system is mounted. | |
928 | */ | |
929 | getfsx(dev) | |
930 | dev_t dev; | |
931 | { | |
932 | register struct mount *mp; | |
933 | ||
934 | if (dev == swapdev) | |
935 | return (MSWAPX); | |
936 | for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++) | |
937 | if (mp->m_dev == dev) | |
938 | return (mp - &mount[0]); | |
939 | return (-1); | |
940 | } | |
941 | ||
942 | /* | |
943 | * Update is the internal name of 'sync'. It goes through the disk | |
944 | * queues to initiate sandbagged IO; goes through the inodes to write | |
502770a3 KM |
945 | * modified nodes; and it goes through the mount table to initiate |
946 | * the writing of the modified super blocks. | |
e3fe2d69 KM |
947 | */ |
948 | update() | |
949 | { | |
950 | register struct inode *ip; | |
951 | register struct mount *mp; | |
952 | register struct buf *bp; | |
953 | struct fs *fs; | |
954 | time_t tim; | |
743f1ef7 | 955 | int i, blks; |
e3fe2d69 KM |
956 | |
957 | if (updlock) | |
958 | return; | |
959 | updlock++; | |
960 | /* | |
961 | * Write back modified superblocks. | |
962 | * Consistency check that the superblock | |
963 | * of each file system is still in the buffer cache. | |
964 | */ | |
965 | for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) | |
966 | if (mp->m_bufp != NULL) { | |
967 | fs = mp->m_bufp->b_un.b_fs; | |
968 | if (fs->fs_fmod == 0) | |
969 | continue; | |
970 | if (fs->fs_ronly != 0) | |
971 | panic("update: rofs mod"); | |
4eeeb431 | 972 | bp = getblk(mp->m_dev, SBLOCK, SBSIZE); |
e3fe2d69 KM |
973 | fs->fs_fmod = 0; |
974 | fs->fs_time = TIME; | |
975 | if (bp->b_un.b_fs != fs) | |
976 | panic("update: bad b_fs"); | |
977 | bwrite(bp); | |
b6407c9d | 978 | blks = howmany(fs->fs_cssize, fs->fs_bsize); |
743f1ef7 | 979 | for (i = 0; i < blks; i++) { |
b6407c9d KM |
980 | bp = getblk(mp->m_dev, |
981 | fsbtodb(fs, fs->fs_csaddr + (i * fs->fs_frag)), | |
982 | fs->fs_bsize); | |
e3fe2d69 KM |
983 | bwrite(bp); |
984 | } | |
985 | } | |
986 | /* | |
987 | * Write back each (modified) inode. | |
988 | */ | |
989 | for (ip = inode; ip < inodeNINODE; ip++) | |
990 | if((ip->i_flag&ILOCK)==0 && ip->i_count) { | |
991 | ip->i_flag |= ILOCK; | |
992 | ip->i_count++; | |
993 | tim = TIME; | |
994 | iupdat(ip, &tim, &tim, 0); | |
995 | iput(ip); | |
996 | } | |
997 | updlock = 0; | |
998 | /* | |
999 | * Force stale buffer cache information to be flushed, | |
1000 | * for all devices. | |
1001 | */ | |
1002 | bflush(NODEV); | |
1003 | } | |
b6407c9d KM |
1004 | |
1005 | /* | |
502770a3 KM |
1006 | * block operations |
1007 | * | |
1008 | * check if a block is available | |
b6407c9d | 1009 | */ |
b6407c9d KM |
1010 | isblock(fs, cp, h) |
1011 | struct fs *fs; | |
1012 | unsigned char *cp; | |
1013 | int h; | |
1014 | { | |
1015 | unsigned char mask; | |
1016 | ||
1017 | switch (fs->fs_frag) { | |
1018 | case 8: | |
1019 | return (cp[h] == 0xff); | |
1020 | case 4: | |
1021 | mask = 0x0f << ((h & 0x1) << 2); | |
1022 | return ((cp[h >> 1] & mask) == mask); | |
1023 | case 2: | |
1024 | mask = 0x03 << ((h & 0x3) << 1); | |
1025 | return ((cp[h >> 2] & mask) == mask); | |
1026 | case 1: | |
1027 | mask = 0x01 << (h & 0x7); | |
1028 | return ((cp[h >> 3] & mask) == mask); | |
1029 | default: | |
1030 | panic("isblock bad fs_frag"); | |
1031 | return; | |
1032 | } | |
1033 | } | |
502770a3 KM |
1034 | |
1035 | /* | |
1036 | * take a block out of the map | |
1037 | */ | |
b6407c9d KM |
1038 | clrblock(fs, cp, h) |
1039 | struct fs *fs; | |
1040 | unsigned char *cp; | |
1041 | int h; | |
1042 | { | |
1043 | switch ((fs)->fs_frag) { | |
1044 | case 8: | |
1045 | cp[h] = 0; | |
1046 | return; | |
1047 | case 4: | |
1048 | cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); | |
1049 | return; | |
1050 | case 2: | |
1051 | cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); | |
1052 | return; | |
1053 | case 1: | |
1054 | cp[h >> 3] &= ~(0x01 << (h & 0x7)); | |
1055 | return; | |
1056 | default: | |
1057 | panic("clrblock bad fs_frag"); | |
1058 | return; | |
1059 | } | |
1060 | } | |
502770a3 KM |
1061 | |
1062 | /* | |
1063 | * put a block into the map | |
1064 | */ | |
b6407c9d KM |
1065 | setblock(fs, cp, h) |
1066 | struct fs *fs; | |
1067 | unsigned char *cp; | |
1068 | int h; | |
1069 | { | |
1070 | switch (fs->fs_frag) { | |
1071 | case 8: | |
1072 | cp[h] = 0xff; | |
1073 | return; | |
1074 | case 4: | |
1075 | cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); | |
1076 | return; | |
1077 | case 2: | |
1078 | cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); | |
1079 | return; | |
1080 | case 1: | |
1081 | cp[h >> 3] |= (0x01 << (h & 0x7)); | |
1082 | return; | |
1083 | default: | |
1084 | panic("setblock bad fs_frag"); | |
1085 | return; | |
1086 | } | |
1087 | } |