Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1982, 1986, 1989 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
78ed81a3 | 33 | * from: @(#)ufs_alloc.c 7.26 (Berkeley) 5/2/91 |
34 | * $Id$ | |
15637ed4 RG |
35 | */ |
36 | ||
37 | #include "param.h" | |
38 | #include "systm.h" | |
39 | #include "buf.h" | |
40 | #include "proc.h" | |
41 | #include "vnode.h" | |
42 | #include "kernel.h" | |
43 | #include "syslog.h" | |
44 | ||
45 | #include "quota.h" | |
46 | #include "inode.h" | |
47 | #include "fs.h" | |
48 | ||
49 | extern u_long hashalloc(); | |
50 | extern ino_t ialloccg(); | |
51 | extern daddr_t alloccg(); | |
52 | extern daddr_t alloccgblk(); | |
53 | extern daddr_t fragextend(); | |
54 | extern daddr_t blkpref(); | |
55 | extern daddr_t mapsearch(); | |
56 | extern int inside[], around[]; | |
57 | extern unsigned char *fragtbl[]; | |
58 | ||
59 | /* | |
60 | * Allocate a block in the file system. | |
61 | * | |
62 | * The size of the requested block is given, which must be some | |
63 | * multiple of fs_fsize and <= fs_bsize. | |
64 | * A preference may be optionally specified. If a preference is given | |
65 | * the following hierarchy is used to allocate a block: | |
66 | * 1) allocate the requested block. | |
67 | * 2) allocate a rotationally optimal block in the same cylinder. | |
68 | * 3) allocate a block in the same cylinder group. | |
69 | * 4) quadradically rehash into other cylinder groups, until an | |
70 | * available block is located. | |
71 | * If no block preference is given the following heirarchy is used | |
72 | * to allocate a block: | |
73 | * 1) allocate a block in the cylinder group that contains the | |
74 | * inode for the file. | |
75 | * 2) quadradically rehash into other cylinder groups, until an | |
76 | * available block is located. | |
77 | */ | |
78 | alloc(ip, lbn, bpref, size, bnp) | |
79 | register struct inode *ip; | |
80 | daddr_t lbn, bpref; | |
81 | int size; | |
82 | daddr_t *bnp; | |
83 | { | |
84 | daddr_t bno; | |
85 | register struct fs *fs; | |
86 | register struct buf *bp; | |
87 | int cg, error; | |
88 | struct ucred *cred = curproc->p_ucred; /* XXX */ | |
89 | ||
90 | *bnp = 0; | |
91 | fs = ip->i_fs; | |
92 | if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { | |
93 | printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", | |
94 | ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); | |
95 | panic("alloc: bad size"); | |
96 | } | |
97 | if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) | |
98 | goto nospace; | |
99 | if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) | |
100 | goto nospace; | |
101 | #ifdef QUOTA | |
102 | if (error = chkdq(ip, (long)btodb(size), cred, 0)) | |
103 | return (error); | |
104 | #endif | |
105 | if (bpref >= fs->fs_size) | |
106 | bpref = 0; | |
107 | if (bpref == 0) | |
108 | cg = itog(fs, ip->i_number); | |
109 | else | |
110 | cg = dtog(fs, bpref); | |
111 | bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, | |
112 | (u_long (*)())alloccg); | |
113 | if (bno > 0) { | |
114 | ip->i_blocks += btodb(size); | |
115 | ip->i_flag |= IUPD|ICHG; | |
116 | *bnp = bno; | |
117 | return (0); | |
118 | } | |
119 | #ifdef QUOTA | |
120 | /* | |
121 | * Restore user's disk quota because allocation failed. | |
122 | */ | |
123 | (void) chkdq(ip, (long)-btodb(size), cred, FORCE); | |
124 | #endif | |
125 | nospace: | |
126 | fserr(fs, cred->cr_uid, "file system full"); | |
127 | uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); | |
128 | return (ENOSPC); | |
129 | } | |
130 | ||
131 | /* | |
132 | * Reallocate a fragment to a bigger size | |
133 | * | |
134 | * The number and size of the old block is given, and a preference | |
135 | * and new size is also specified. The allocator attempts to extend | |
136 | * the original block. Failing that, the regular block allocator is | |
137 | * invoked to get an appropriate block. | |
138 | */ | |
139 | realloccg(ip, lbprev, bpref, osize, nsize, bpp) | |
140 | register struct inode *ip; | |
141 | off_t lbprev; | |
142 | daddr_t bpref; | |
143 | int osize, nsize; | |
144 | struct buf **bpp; | |
145 | { | |
146 | register struct fs *fs; | |
147 | struct buf *bp, *obp; | |
148 | int cg, request, error; | |
149 | daddr_t bprev, bno; | |
150 | struct ucred *cred = curproc->p_ucred; /* XXX */ | |
151 | ||
152 | *bpp = 0; | |
153 | fs = ip->i_fs; | |
154 | if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || | |
155 | (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { | |
156 | printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", | |
157 | ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); | |
158 | panic("realloccg: bad size"); | |
159 | } | |
160 | if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) | |
161 | goto nospace; | |
162 | if ((bprev = ip->i_db[lbprev]) == 0) { | |
163 | printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", | |
164 | ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); | |
165 | panic("realloccg: bad bprev"); | |
166 | } | |
167 | /* | |
168 | * Allocate the extra space in the buffer. | |
169 | */ | |
170 | if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { | |
171 | brelse(bp); | |
172 | return (error); | |
173 | } | |
174 | #ifdef QUOTA | |
175 | if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) { | |
176 | brelse(bp); | |
177 | return (error); | |
178 | } | |
179 | #endif | |
180 | /* | |
181 | * Check for extension in the existing location. | |
182 | */ | |
183 | cg = dtog(fs, bprev); | |
184 | if (bno = fragextend(ip, cg, (long)bprev, osize, nsize)) { | |
185 | if (bp->b_blkno != fsbtodb(fs, bno)) | |
186 | panic("bad blockno"); | |
187 | ip->i_blocks += btodb(nsize - osize); | |
188 | ip->i_flag |= IUPD|ICHG; | |
189 | allocbuf(bp, nsize); | |
190 | bp->b_flags |= B_DONE; | |
191 | bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); | |
192 | *bpp = bp; | |
193 | return (0); | |
194 | } | |
195 | /* | |
196 | * Allocate a new disk location. | |
197 | */ | |
198 | if (bpref >= fs->fs_size) | |
199 | bpref = 0; | |
200 | switch ((int)fs->fs_optim) { | |
201 | case FS_OPTSPACE: | |
202 | /* | |
203 | * Allocate an exact sized fragment. Although this makes | |
204 | * best use of space, we will waste time relocating it if | |
205 | * the file continues to grow. If the fragmentation is | |
206 | * less than half of the minimum free reserve, we choose | |
207 | * to begin optimizing for time. | |
208 | */ | |
209 | request = nsize; | |
210 | if (fs->fs_minfree < 5 || | |
211 | fs->fs_cstotal.cs_nffree > | |
212 | fs->fs_dsize * fs->fs_minfree / (2 * 100)) | |
213 | break; | |
214 | log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", | |
215 | fs->fs_fsmnt); | |
216 | fs->fs_optim = FS_OPTTIME; | |
217 | break; | |
218 | case FS_OPTTIME: | |
219 | /* | |
220 | * At this point we have discovered a file that is trying | |
221 | * to grow a small fragment to a larger fragment. To save | |
222 | * time, we allocate a full sized block, then free the | |
223 | * unused portion. If the file continues to grow, the | |
224 | * `fragextend' call above will be able to grow it in place | |
225 | * without further copying. If aberrant programs cause | |
226 | * disk fragmentation to grow within 2% of the free reserve, | |
227 | * we choose to begin optimizing for space. | |
228 | */ | |
229 | request = fs->fs_bsize; | |
230 | if (fs->fs_cstotal.cs_nffree < | |
231 | fs->fs_dsize * (fs->fs_minfree - 2) / 100) | |
232 | break; | |
233 | log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", | |
234 | fs->fs_fsmnt); | |
235 | fs->fs_optim = FS_OPTSPACE; | |
236 | break; | |
237 | default: | |
238 | printf("dev = 0x%x, optim = %d, fs = %s\n", | |
239 | ip->i_dev, fs->fs_optim, fs->fs_fsmnt); | |
240 | panic("realloccg: bad optim"); | |
241 | /* NOTREACHED */ | |
242 | } | |
243 | bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request, | |
244 | (u_long (*)())alloccg); | |
245 | if (bno > 0) { | |
246 | bp->b_blkno = fsbtodb(fs, bno); | |
247 | (void) vnode_pager_uncache(ITOV(ip)); | |
248 | blkfree(ip, bprev, (off_t)osize); | |
249 | if (nsize < request) | |
250 | blkfree(ip, bno + numfrags(fs, nsize), | |
251 | (off_t)(request - nsize)); | |
252 | ip->i_blocks += btodb(nsize - osize); | |
253 | ip->i_flag |= IUPD|ICHG; | |
254 | allocbuf(bp, nsize); | |
255 | bp->b_flags |= B_DONE; | |
256 | bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); | |
257 | *bpp = bp; | |
258 | return (0); | |
259 | } | |
260 | #ifdef QUOTA | |
261 | /* | |
262 | * Restore user's disk quota because allocation failed. | |
263 | */ | |
264 | (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); | |
265 | #endif | |
266 | brelse(bp); | |
267 | nospace: | |
268 | /* | |
269 | * no space available | |
270 | */ | |
271 | fserr(fs, cred->cr_uid, "file system full"); | |
272 | uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); | |
273 | return (ENOSPC); | |
274 | } | |
275 | ||
276 | /* | |
277 | * Allocate an inode in the file system. | |
278 | * | |
279 | * A preference may be optionally specified. If a preference is given | |
280 | * the following hierarchy is used to allocate an inode: | |
281 | * 1) allocate the requested inode. | |
282 | * 2) allocate an inode in the same cylinder group. | |
283 | * 3) quadradically rehash into other cylinder groups, until an | |
284 | * available inode is located. | |
285 | * If no inode preference is given the following heirarchy is used | |
286 | * to allocate an inode: | |
287 | * 1) allocate an inode in cylinder group 0. | |
288 | * 2) quadradically rehash into other cylinder groups, until an | |
289 | * available inode is located. | |
290 | */ | |
291 | ialloc(pip, ipref, mode, cred, ipp) | |
292 | register struct inode *pip; | |
293 | ino_t ipref; | |
294 | int mode; | |
295 | struct ucred *cred; | |
296 | struct inode **ipp; | |
297 | { | |
298 | ino_t ino; | |
299 | register struct fs *fs; | |
300 | register struct inode *ip; | |
301 | int cg, error; | |
302 | ||
303 | *ipp = 0; | |
304 | fs = pip->i_fs; | |
305 | if (fs->fs_cstotal.cs_nifree == 0) | |
306 | goto noinodes; | |
307 | if (ipref >= fs->fs_ncg * fs->fs_ipg) | |
308 | ipref = 0; | |
309 | cg = itog(fs, ipref); | |
310 | ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); | |
311 | if (ino == 0) | |
312 | goto noinodes; | |
313 | error = iget(pip, ino, ipp); | |
314 | if (error) { | |
315 | ifree(pip, ino, mode); | |
316 | return (error); | |
317 | } | |
318 | ip = *ipp; | |
319 | if (ip->i_mode) { | |
320 | printf("mode = 0%o, inum = %d, fs = %s\n", | |
321 | ip->i_mode, ip->i_number, fs->fs_fsmnt); | |
322 | panic("ialloc: dup alloc"); | |
323 | } | |
324 | if (ip->i_blocks) { /* XXX */ | |
325 | printf("free inode %s/%d had %d blocks\n", | |
326 | fs->fs_fsmnt, ino, ip->i_blocks); | |
327 | ip->i_blocks = 0; | |
328 | } | |
329 | ip->i_flags = 0; | |
330 | /* | |
331 | * Set up a new generation number for this inode. | |
332 | */ | |
333 | if (++nextgennumber < (u_long)time.tv_sec) | |
334 | nextgennumber = time.tv_sec; | |
335 | ip->i_gen = nextgennumber; | |
336 | return (0); | |
337 | noinodes: | |
338 | fserr(fs, cred->cr_uid, "out of inodes"); | |
339 | uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); | |
340 | return (ENOSPC); | |
341 | } | |
342 | ||
343 | /* | |
344 | * Find a cylinder to place a directory. | |
345 | * | |
346 | * The policy implemented by this algorithm is to select from | |
347 | * among those cylinder groups with above the average number of | |
348 | * free inodes, the one with the smallest number of directories. | |
349 | */ | |
350 | ino_t | |
351 | dirpref(fs) | |
352 | register struct fs *fs; | |
353 | { | |
354 | int cg, minndir, mincg, avgifree; | |
355 | ||
356 | avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; | |
357 | minndir = fs->fs_ipg; | |
358 | mincg = 0; | |
359 | for (cg = 0; cg < fs->fs_ncg; cg++) | |
360 | if (fs->fs_cs(fs, cg).cs_ndir < minndir && | |
361 | fs->fs_cs(fs, cg).cs_nifree >= avgifree) { | |
362 | mincg = cg; | |
363 | minndir = fs->fs_cs(fs, cg).cs_ndir; | |
364 | } | |
365 | return ((ino_t)(fs->fs_ipg * mincg)); | |
366 | } | |
367 | ||
368 | /* | |
369 | * Select the desired position for the next block in a file. The file is | |
370 | * logically divided into sections. The first section is composed of the | |
371 | * direct blocks. Each additional section contains fs_maxbpg blocks. | |
372 | * | |
373 | * If no blocks have been allocated in the first section, the policy is to | |
374 | * request a block in the same cylinder group as the inode that describes | |
375 | * the file. If no blocks have been allocated in any other section, the | |
376 | * policy is to place the section in a cylinder group with a greater than | |
377 | * average number of free blocks. An appropriate cylinder group is found | |
378 | * by using a rotor that sweeps the cylinder groups. When a new group of | |
379 | * blocks is needed, the sweep begins in the cylinder group following the | |
380 | * cylinder group from which the previous allocation was made. The sweep | |
381 | * continues until a cylinder group with greater than the average number | |
382 | * of free blocks is found. If the allocation is for the first block in an | |
383 | * indirect block, the information on the previous allocation is unavailable; | |
384 | * here a best guess is made based upon the logical block number being | |
385 | * allocated. | |
386 | * | |
387 | * If a section is already partially allocated, the policy is to | |
388 | * contiguously allocate fs_maxcontig blocks. The end of one of these | |
389 | * contiguous blocks and the beginning of the next is physically separated | |
390 | * so that the disk head will be in transit between them for at least | |
391 | * fs_rotdelay milliseconds. This is to allow time for the processor to | |
392 | * schedule another I/O transfer. | |
393 | */ | |
394 | daddr_t | |
395 | blkpref(ip, lbn, indx, bap) | |
396 | struct inode *ip; | |
397 | daddr_t lbn; | |
398 | int indx; | |
399 | daddr_t *bap; | |
400 | { | |
401 | register struct fs *fs; | |
402 | register int cg; | |
403 | int avgbfree, startcg; | |
404 | daddr_t nextblk; | |
405 | ||
406 | fs = ip->i_fs; | |
407 | if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { | |
408 | if (lbn < NDADDR) { | |
409 | cg = itog(fs, ip->i_number); | |
410 | return (fs->fs_fpg * cg + fs->fs_frag); | |
411 | } | |
412 | /* | |
413 | * Find a cylinder with greater than average number of | |
414 | * unused data blocks. | |
415 | */ | |
416 | if (indx == 0 || bap[indx - 1] == 0) | |
417 | startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg; | |
418 | else | |
419 | startcg = dtog(fs, bap[indx - 1]) + 1; | |
420 | startcg %= fs->fs_ncg; | |
421 | avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; | |
422 | for (cg = startcg; cg < fs->fs_ncg; cg++) | |
423 | if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { | |
424 | fs->fs_cgrotor = cg; | |
425 | return (fs->fs_fpg * cg + fs->fs_frag); | |
426 | } | |
427 | for (cg = 0; cg <= startcg; cg++) | |
428 | if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { | |
429 | fs->fs_cgrotor = cg; | |
430 | return (fs->fs_fpg * cg + fs->fs_frag); | |
431 | } | |
432 | return (NULL); | |
433 | } | |
434 | /* | |
435 | * One or more previous blocks have been laid out. If less | |
436 | * than fs_maxcontig previous blocks are contiguous, the | |
437 | * next block is requested contiguously, otherwise it is | |
438 | * requested rotationally delayed by fs_rotdelay milliseconds. | |
439 | */ | |
440 | nextblk = bap[indx - 1] + fs->fs_frag; | |
441 | if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] + | |
442 | blkstofrags(fs, fs->fs_maxcontig) != nextblk) | |
443 | return (nextblk); | |
444 | if (fs->fs_rotdelay != 0) | |
445 | /* | |
446 | * Here we convert ms of delay to frags as: | |
447 | * (frags) = (ms) * (rev/sec) * (sect/rev) / | |
448 | * ((sect/frag) * (ms/sec)) | |
449 | * then round up to the next block. | |
450 | */ | |
451 | nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / | |
452 | (NSPF(fs) * 1000), fs->fs_frag); | |
453 | return (nextblk); | |
454 | } | |
455 | ||
456 | /* | |
457 | * Implement the cylinder overflow algorithm. | |
458 | * | |
459 | * The policy implemented by this algorithm is: | |
460 | * 1) allocate the block in its requested cylinder group. | |
461 | * 2) quadradically rehash on the cylinder group number. | |
462 | * 3) brute force search for a free block. | |
463 | */ | |
464 | /*VARARGS5*/ | |
465 | u_long | |
466 | hashalloc(ip, cg, pref, size, allocator) | |
467 | struct inode *ip; | |
468 | int cg; | |
469 | long pref; | |
470 | int size; /* size for data blocks, mode for inodes */ | |
471 | u_long (*allocator)(); | |
472 | { | |
473 | register struct fs *fs; | |
474 | long result; | |
475 | int i, icg = cg; | |
476 | ||
477 | fs = ip->i_fs; | |
478 | /* | |
479 | * 1: preferred cylinder group | |
480 | */ | |
481 | result = (*allocator)(ip, cg, pref, size); | |
482 | if (result) | |
483 | return (result); | |
484 | /* | |
485 | * 2: quadratic rehash | |
486 | */ | |
487 | for (i = 1; i < fs->fs_ncg; i *= 2) { | |
488 | cg += i; | |
489 | if (cg >= fs->fs_ncg) | |
490 | cg -= fs->fs_ncg; | |
491 | result = (*allocator)(ip, cg, 0, size); | |
492 | if (result) | |
493 | return (result); | |
494 | } | |
495 | /* | |
496 | * 3: brute force search | |
497 | * Note that we start at i == 2, since 0 was checked initially, | |
498 | * and 1 is always checked in the quadratic rehash. | |
499 | */ | |
500 | cg = (icg + 2) % fs->fs_ncg; | |
501 | for (i = 2; i < fs->fs_ncg; i++) { | |
502 | result = (*allocator)(ip, cg, 0, size); | |
503 | if (result) | |
504 | return (result); | |
505 | cg++; | |
506 | if (cg == fs->fs_ncg) | |
507 | cg = 0; | |
508 | } | |
509 | return (NULL); | |
510 | } | |
511 | ||
512 | /* | |
513 | * Determine whether a fragment can be extended. | |
514 | * | |
515 | * Check to see if the necessary fragments are available, and | |
516 | * if they are, allocate them. | |
517 | */ | |
518 | daddr_t | |
519 | fragextend(ip, cg, bprev, osize, nsize) | |
520 | struct inode *ip; | |
521 | int cg; | |
522 | long bprev; | |
523 | int osize, nsize; | |
524 | { | |
525 | register struct fs *fs; | |
526 | register struct cg *cgp; | |
527 | struct buf *bp; | |
528 | long bno; | |
529 | int frags, bbase; | |
530 | int i, error; | |
531 | ||
532 | fs = ip->i_fs; | |
533 | if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) | |
534 | return (NULL); | |
535 | frags = numfrags(fs, nsize); | |
536 | bbase = fragnum(fs, bprev); | |
537 | if (bbase > fragnum(fs, (bprev + frags - 1))) { | |
538 | /* cannot extend across a block boundary */ | |
539 | return (NULL); | |
540 | } | |
541 | error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), | |
542 | (int)fs->fs_cgsize, NOCRED, &bp); | |
543 | if (error) { | |
544 | brelse(bp); | |
545 | return (NULL); | |
546 | } | |
547 | cgp = bp->b_un.b_cg; | |
548 | if (!cg_chkmagic(cgp)) { | |
549 | brelse(bp); | |
550 | return (NULL); | |
551 | } | |
552 | cgp->cg_time = time.tv_sec; | |
553 | bno = dtogd(fs, bprev); | |
554 | for (i = numfrags(fs, osize); i < frags; i++) | |
555 | if (isclr(cg_blksfree(cgp), bno + i)) { | |
556 | brelse(bp); | |
557 | return (NULL); | |
558 | } | |
559 | /* | |
560 | * the current fragment can be extended | |
561 | * deduct the count on fragment being extended into | |
562 | * increase the count on the remaining fragment (if any) | |
563 | * allocate the extended piece | |
564 | */ | |
565 | for (i = frags; i < fs->fs_frag - bbase; i++) | |
566 | if (isclr(cg_blksfree(cgp), bno + i)) | |
567 | break; | |
568 | cgp->cg_frsum[i - numfrags(fs, osize)]--; | |
569 | if (i != frags) | |
570 | cgp->cg_frsum[i - frags]++; | |
571 | for (i = numfrags(fs, osize); i < frags; i++) { | |
572 | clrbit(cg_blksfree(cgp), bno + i); | |
573 | cgp->cg_cs.cs_nffree--; | |
574 | fs->fs_cstotal.cs_nffree--; | |
575 | fs->fs_cs(fs, cg).cs_nffree--; | |
576 | } | |
577 | fs->fs_fmod++; | |
578 | bdwrite(bp); | |
579 | return (bprev); | |
580 | } | |
581 | ||
582 | /* | |
583 | * Determine whether a block can be allocated. | |
584 | * | |
585 | * Check to see if a block of the apprpriate size is available, | |
586 | * and if it is, allocate it. | |
587 | */ | |
588 | daddr_t | |
589 | alloccg(ip, cg, bpref, size) | |
590 | struct inode *ip; | |
591 | int cg; | |
592 | daddr_t bpref; | |
593 | int size; | |
594 | { | |
595 | register struct fs *fs; | |
596 | register struct cg *cgp; | |
597 | struct buf *bp; | |
598 | register int i; | |
599 | int error, bno, frags, allocsiz; | |
600 | ||
601 | fs = ip->i_fs; | |
602 | if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) | |
603 | return (NULL); | |
604 | error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), | |
605 | (int)fs->fs_cgsize, NOCRED, &bp); | |
606 | if (error) { | |
607 | brelse(bp); | |
608 | return (NULL); | |
609 | } | |
610 | cgp = bp->b_un.b_cg; | |
611 | if (!cg_chkmagic(cgp) || | |
612 | (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { | |
613 | brelse(bp); | |
614 | return (NULL); | |
615 | } | |
616 | cgp->cg_time = time.tv_sec; | |
617 | if (size == fs->fs_bsize) { | |
618 | bno = alloccgblk(fs, cgp, bpref); | |
619 | bdwrite(bp); | |
620 | return (bno); | |
621 | } | |
622 | /* | |
623 | * check to see if any fragments are already available | |
624 | * allocsiz is the size which will be allocated, hacking | |
625 | * it down to a smaller size if necessary | |
626 | */ | |
627 | frags = numfrags(fs, size); | |
628 | for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) | |
629 | if (cgp->cg_frsum[allocsiz] != 0) | |
630 | break; | |
631 | if (allocsiz == fs->fs_frag) { | |
632 | /* | |
633 | * no fragments were available, so a block will be | |
634 | * allocated, and hacked up | |
635 | */ | |
636 | if (cgp->cg_cs.cs_nbfree == 0) { | |
637 | brelse(bp); | |
638 | return (NULL); | |
639 | } | |
640 | bno = alloccgblk(fs, cgp, bpref); | |
641 | bpref = dtogd(fs, bno); | |
642 | for (i = frags; i < fs->fs_frag; i++) | |
643 | setbit(cg_blksfree(cgp), bpref + i); | |
644 | i = fs->fs_frag - frags; | |
645 | cgp->cg_cs.cs_nffree += i; | |
646 | fs->fs_cstotal.cs_nffree += i; | |
647 | fs->fs_cs(fs, cg).cs_nffree += i; | |
648 | fs->fs_fmod++; | |
649 | cgp->cg_frsum[i]++; | |
650 | bdwrite(bp); | |
651 | return (bno); | |
652 | } | |
653 | bno = mapsearch(fs, cgp, bpref, allocsiz); | |
654 | if (bno < 0) { | |
655 | brelse(bp); | |
656 | return (NULL); | |
657 | } | |
658 | for (i = 0; i < frags; i++) | |
659 | clrbit(cg_blksfree(cgp), bno + i); | |
660 | cgp->cg_cs.cs_nffree -= frags; | |
661 | fs->fs_cstotal.cs_nffree -= frags; | |
662 | fs->fs_cs(fs, cg).cs_nffree -= frags; | |
663 | fs->fs_fmod++; | |
664 | cgp->cg_frsum[allocsiz]--; | |
665 | if (frags != allocsiz) | |
666 | cgp->cg_frsum[allocsiz - frags]++; | |
667 | bdwrite(bp); | |
668 | return (cg * fs->fs_fpg + bno); | |
669 | } | |
670 | ||
671 | /* | |
672 | * Allocate a block in a cylinder group. | |
673 | * | |
674 | * This algorithm implements the following policy: | |
675 | * 1) allocate the requested block. | |
676 | * 2) allocate a rotationally optimal block in the same cylinder. | |
677 | * 3) allocate the next available block on the block rotor for the | |
678 | * specified cylinder group. | |
679 | * Note that this routine only allocates fs_bsize blocks; these | |
680 | * blocks may be fragmented by the routine that allocates them. | |
681 | */ | |
682 | daddr_t | |
683 | alloccgblk(fs, cgp, bpref) | |
684 | register struct fs *fs; | |
685 | register struct cg *cgp; | |
686 | daddr_t bpref; | |
687 | { | |
688 | daddr_t bno; | |
689 | int cylno, pos, delta; | |
690 | short *cylbp; | |
691 | register int i; | |
692 | ||
693 | if (bpref == 0) { | |
694 | bpref = cgp->cg_rotor; | |
695 | goto norot; | |
696 | } | |
697 | bpref = blknum(fs, bpref); | |
698 | bpref = dtogd(fs, bpref); | |
699 | /* | |
700 | * if the requested block is available, use it | |
701 | */ | |
702 | if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { | |
703 | bno = bpref; | |
704 | goto gotit; | |
705 | } | |
706 | /* | |
707 | * check for a block available on the same cylinder | |
708 | */ | |
709 | cylno = cbtocylno(fs, bpref); | |
710 | if (cg_blktot(cgp)[cylno] == 0) | |
711 | goto norot; | |
712 | if (fs->fs_cpc == 0) { | |
713 | /* | |
714 | * block layout info is not available, so just have | |
715 | * to take any block in this cylinder. | |
716 | */ | |
717 | bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); | |
718 | goto norot; | |
719 | } | |
720 | /* | |
721 | * check the summary information to see if a block is | |
722 | * available in the requested cylinder starting at the | |
723 | * requested rotational position and proceeding around. | |
724 | */ | |
725 | cylbp = cg_blks(fs, cgp, cylno); | |
726 | pos = cbtorpos(fs, bpref); | |
727 | for (i = pos; i < fs->fs_nrpos; i++) | |
728 | if (cylbp[i] > 0) | |
729 | break; | |
730 | if (i == fs->fs_nrpos) | |
731 | for (i = 0; i < pos; i++) | |
732 | if (cylbp[i] > 0) | |
733 | break; | |
734 | if (cylbp[i] > 0) { | |
735 | /* | |
736 | * found a rotational position, now find the actual | |
737 | * block. A panic if none is actually there. | |
738 | */ | |
739 | pos = cylno % fs->fs_cpc; | |
740 | bno = (cylno - pos) * fs->fs_spc / NSPB(fs); | |
741 | if (fs_postbl(fs, pos)[i] == -1) { | |
742 | printf("pos = %d, i = %d, fs = %s\n", | |
743 | pos, i, fs->fs_fsmnt); | |
744 | panic("alloccgblk: cyl groups corrupted"); | |
745 | } | |
746 | for (i = fs_postbl(fs, pos)[i];; ) { | |
747 | if (isblock(fs, cg_blksfree(cgp), bno + i)) { | |
748 | bno = blkstofrags(fs, (bno + i)); | |
749 | goto gotit; | |
750 | } | |
751 | delta = fs_rotbl(fs)[i]; | |
752 | if (delta <= 0 || | |
753 | delta + i > fragstoblks(fs, fs->fs_fpg)) | |
754 | break; | |
755 | i += delta; | |
756 | } | |
757 | printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); | |
758 | panic("alloccgblk: can't find blk in cyl"); | |
759 | } | |
760 | norot: | |
761 | /* | |
762 | * no blocks in the requested cylinder, so take next | |
763 | * available one in this cylinder group. | |
764 | */ | |
765 | bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag); | |
766 | if (bno < 0) | |
767 | return (NULL); | |
768 | cgp->cg_rotor = bno; | |
769 | gotit: | |
770 | clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno)); | |
771 | cgp->cg_cs.cs_nbfree--; | |
772 | fs->fs_cstotal.cs_nbfree--; | |
773 | fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; | |
774 | cylno = cbtocylno(fs, bno); | |
775 | cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; | |
776 | cg_blktot(cgp)[cylno]--; | |
777 | fs->fs_fmod++; | |
778 | return (cgp->cg_cgx * fs->fs_fpg + bno); | |
779 | } | |
780 | ||
781 | /* | |
782 | * Determine whether an inode can be allocated. | |
783 | * | |
784 | * Check to see if an inode is available, and if it is, | |
785 | * allocate it using the following policy: | |
786 | * 1) allocate the requested inode. | |
787 | * 2) allocate the next available inode after the requested | |
788 | * inode in the specified cylinder group. | |
789 | */ | |
790 | ino_t | |
791 | ialloccg(ip, cg, ipref, mode) | |
792 | struct inode *ip; | |
793 | int cg; | |
794 | daddr_t ipref; | |
795 | int mode; | |
796 | { | |
797 | register struct fs *fs; | |
798 | register struct cg *cgp; | |
799 | struct buf *bp; | |
800 | int error, start, len, loc, map, i; | |
801 | ||
802 | fs = ip->i_fs; | |
803 | if (fs->fs_cs(fs, cg).cs_nifree == 0) | |
804 | return (NULL); | |
805 | error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), | |
806 | (int)fs->fs_cgsize, NOCRED, &bp); | |
807 | if (error) { | |
808 | brelse(bp); | |
809 | return (NULL); | |
810 | } | |
811 | cgp = bp->b_un.b_cg; | |
812 | if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { | |
813 | brelse(bp); | |
814 | return (NULL); | |
815 | } | |
816 | cgp->cg_time = time.tv_sec; | |
817 | if (ipref) { | |
818 | ipref %= fs->fs_ipg; | |
819 | if (isclr(cg_inosused(cgp), ipref)) | |
820 | goto gotit; | |
821 | } | |
822 | start = cgp->cg_irotor / NBBY; | |
823 | len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); | |
824 | loc = skpc(0xff, len, &cg_inosused(cgp)[start]); | |
825 | if (loc == 0) { | |
826 | len = start + 1; | |
827 | start = 0; | |
828 | loc = skpc(0xff, len, &cg_inosused(cgp)[0]); | |
829 | if (loc == 0) { | |
830 | printf("cg = %s, irotor = %d, fs = %s\n", | |
831 | cg, cgp->cg_irotor, fs->fs_fsmnt); | |
832 | panic("ialloccg: map corrupted"); | |
833 | /* NOTREACHED */ | |
834 | } | |
835 | } | |
836 | i = start + len - loc; | |
837 | map = cg_inosused(cgp)[i]; | |
838 | ipref = i * NBBY; | |
839 | for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { | |
840 | if ((map & i) == 0) { | |
841 | cgp->cg_irotor = ipref; | |
842 | goto gotit; | |
843 | } | |
844 | } | |
845 | printf("fs = %s\n", fs->fs_fsmnt); | |
846 | panic("ialloccg: block not in map"); | |
847 | /* NOTREACHED */ | |
848 | gotit: | |
849 | setbit(cg_inosused(cgp), ipref); | |
850 | cgp->cg_cs.cs_nifree--; | |
851 | fs->fs_cstotal.cs_nifree--; | |
852 | fs->fs_cs(fs, cg).cs_nifree--; | |
853 | fs->fs_fmod++; | |
854 | if ((mode & IFMT) == IFDIR) { | |
855 | cgp->cg_cs.cs_ndir++; | |
856 | fs->fs_cstotal.cs_ndir++; | |
857 | fs->fs_cs(fs, cg).cs_ndir++; | |
858 | } | |
859 | bdwrite(bp); | |
860 | return (cg * fs->fs_ipg + ipref); | |
861 | } | |
862 | ||
863 | /* | |
864 | * Free a block or fragment. | |
865 | * | |
866 | * The specified block or fragment is placed back in the | |
867 | * free map. If a fragment is deallocated, a possible | |
868 | * block reassembly is checked. | |
869 | */ | |
870 | blkfree(ip, bno, size) | |
871 | register struct inode *ip; | |
872 | daddr_t bno; | |
873 | off_t size; | |
874 | { | |
875 | register struct fs *fs; | |
876 | register struct cg *cgp; | |
877 | struct buf *bp; | |
878 | int error, cg, blk, frags, bbase; | |
879 | register int i; | |
880 | struct ucred *cred = curproc->p_ucred; /* XXX */ | |
881 | ||
882 | fs = ip->i_fs; | |
883 | if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { | |
884 | printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", | |
885 | ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); | |
886 | panic("blkfree: bad size"); | |
887 | } | |
888 | cg = dtog(fs, bno); | |
889 | if ((unsigned)bno >= fs->fs_size) { | |
890 | printf("bad block %d, ino %d\n", bno, ip->i_number); | |
891 | fserr(fs, cred->cr_uid, "bad block"); | |
892 | return; | |
893 | } | |
894 | error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), | |
895 | (int)fs->fs_cgsize, NOCRED, &bp); | |
896 | if (error) { | |
897 | brelse(bp); | |
898 | return; | |
899 | } | |
900 | cgp = bp->b_un.b_cg; | |
901 | if (!cg_chkmagic(cgp)) { | |
902 | brelse(bp); | |
903 | return; | |
904 | } | |
905 | cgp->cg_time = time.tv_sec; | |
906 | bno = dtogd(fs, bno); | |
907 | if (size == fs->fs_bsize) { | |
908 | if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) { | |
909 | printf("dev = 0x%x, block = %d, fs = %s\n", | |
910 | ip->i_dev, bno, fs->fs_fsmnt); | |
911 | panic("blkfree: freeing free block"); | |
912 | } | |
913 | setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); | |
914 | cgp->cg_cs.cs_nbfree++; | |
915 | fs->fs_cstotal.cs_nbfree++; | |
916 | fs->fs_cs(fs, cg).cs_nbfree++; | |
917 | i = cbtocylno(fs, bno); | |
918 | cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; | |
919 | cg_blktot(cgp)[i]++; | |
920 | } else { | |
921 | bbase = bno - fragnum(fs, bno); | |
922 | /* | |
923 | * decrement the counts associated with the old frags | |
924 | */ | |
925 | blk = blkmap(fs, cg_blksfree(cgp), bbase); | |
926 | fragacct(fs, blk, cgp->cg_frsum, -1); | |
927 | /* | |
928 | * deallocate the fragment | |
929 | */ | |
930 | frags = numfrags(fs, size); | |
931 | for (i = 0; i < frags; i++) { | |
932 | if (isset(cg_blksfree(cgp), bno + i)) { | |
933 | printf("dev = 0x%x, block = %d, fs = %s\n", | |
934 | ip->i_dev, bno + i, fs->fs_fsmnt); | |
935 | panic("blkfree: freeing free frag"); | |
936 | } | |
937 | setbit(cg_blksfree(cgp), bno + i); | |
938 | } | |
939 | cgp->cg_cs.cs_nffree += i; | |
940 | fs->fs_cstotal.cs_nffree += i; | |
941 | fs->fs_cs(fs, cg).cs_nffree += i; | |
942 | /* | |
943 | * add back in counts associated with the new frags | |
944 | */ | |
945 | blk = blkmap(fs, cg_blksfree(cgp), bbase); | |
946 | fragacct(fs, blk, cgp->cg_frsum, 1); | |
947 | /* | |
948 | * if a complete block has been reassembled, account for it | |
949 | */ | |
950 | if (isblock(fs, cg_blksfree(cgp), | |
951 | (daddr_t)fragstoblks(fs, bbase))) { | |
952 | cgp->cg_cs.cs_nffree -= fs->fs_frag; | |
953 | fs->fs_cstotal.cs_nffree -= fs->fs_frag; | |
954 | fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; | |
955 | cgp->cg_cs.cs_nbfree++; | |
956 | fs->fs_cstotal.cs_nbfree++; | |
957 | fs->fs_cs(fs, cg).cs_nbfree++; | |
958 | i = cbtocylno(fs, bbase); | |
959 | cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; | |
960 | cg_blktot(cgp)[i]++; | |
961 | } | |
962 | } | |
963 | fs->fs_fmod++; | |
964 | bdwrite(bp); | |
965 | } | |
966 | ||
967 | /* | |
968 | * Free an inode. | |
969 | * | |
970 | * The specified inode is placed back in the free map. | |
971 | */ | |
972 | ifree(ip, ino, mode) | |
973 | struct inode *ip; | |
974 | ino_t ino; | |
975 | int mode; | |
976 | { | |
977 | register struct fs *fs; | |
978 | register struct cg *cgp; | |
979 | struct buf *bp; | |
980 | int error, cg; | |
981 | ||
982 | fs = ip->i_fs; | |
983 | if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) { | |
984 | printf("dev = 0x%x, ino = %d, fs = %s\n", | |
985 | ip->i_dev, ino, fs->fs_fsmnt); | |
986 | panic("ifree: range"); | |
987 | } | |
988 | cg = itog(fs, ino); | |
989 | error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), | |
990 | (int)fs->fs_cgsize, NOCRED, &bp); | |
991 | if (error) { | |
992 | brelse(bp); | |
993 | return; | |
994 | } | |
995 | cgp = bp->b_un.b_cg; | |
996 | if (!cg_chkmagic(cgp)) { | |
997 | brelse(bp); | |
998 | return; | |
999 | } | |
1000 | cgp->cg_time = time.tv_sec; | |
1001 | ino %= fs->fs_ipg; | |
1002 | if (isclr(cg_inosused(cgp), ino)) { | |
1003 | printf("dev = 0x%x, ino = %d, fs = %s\n", | |
1004 | ip->i_dev, ino, fs->fs_fsmnt); | |
1005 | if (fs->fs_ronly == 0) | |
1006 | panic("ifree: freeing free inode"); | |
1007 | } | |
1008 | clrbit(cg_inosused(cgp), ino); | |
1009 | if (ino < cgp->cg_irotor) | |
1010 | cgp->cg_irotor = ino; | |
1011 | cgp->cg_cs.cs_nifree++; | |
1012 | fs->fs_cstotal.cs_nifree++; | |
1013 | fs->fs_cs(fs, cg).cs_nifree++; | |
1014 | if ((mode & IFMT) == IFDIR) { | |
1015 | cgp->cg_cs.cs_ndir--; | |
1016 | fs->fs_cstotal.cs_ndir--; | |
1017 | fs->fs_cs(fs, cg).cs_ndir--; | |
1018 | } | |
1019 | fs->fs_fmod++; | |
1020 | bdwrite(bp); | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * Find a block of the specified size in the specified cylinder group. | |
1025 | * | |
1026 | * It is a panic if a request is made to find a block if none are | |
1027 | * available. | |
1028 | */ | |
1029 | daddr_t | |
1030 | mapsearch(fs, cgp, bpref, allocsiz) | |
1031 | register struct fs *fs; | |
1032 | register struct cg *cgp; | |
1033 | daddr_t bpref; | |
1034 | int allocsiz; | |
1035 | { | |
1036 | daddr_t bno; | |
1037 | int start, len, loc, i; | |
1038 | int blk, field, subfield, pos; | |
1039 | ||
1040 | /* | |
1041 | * find the fragment by searching through the free block | |
1042 | * map for an appropriate bit pattern | |
1043 | */ | |
1044 | if (bpref) | |
1045 | start = dtogd(fs, bpref) / NBBY; | |
1046 | else | |
1047 | start = cgp->cg_frotor / NBBY; | |
1048 | len = howmany(fs->fs_fpg, NBBY) - start; | |
1049 | loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start], | |
1050 | (u_char *)fragtbl[fs->fs_frag], | |
1051 | (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); | |
1052 | if (loc == 0) { | |
1053 | len = start + 1; | |
1054 | start = 0; | |
1055 | loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0], | |
1056 | (u_char *)fragtbl[fs->fs_frag], | |
1057 | (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); | |
1058 | if (loc == 0) { | |
1059 | printf("start = %d, len = %d, fs = %s\n", | |
1060 | start, len, fs->fs_fsmnt); | |
1061 | panic("alloccg: map corrupted"); | |
1062 | /* NOTREACHED */ | |
1063 | } | |
1064 | } | |
1065 | bno = (start + len - loc) * NBBY; | |
1066 | cgp->cg_frotor = bno; | |
1067 | /* | |
1068 | * found the byte in the map | |
1069 | * sift through the bits to find the selected frag | |
1070 | */ | |
1071 | for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { | |
1072 | blk = blkmap(fs, cg_blksfree(cgp), bno); | |
1073 | blk <<= 1; | |
1074 | field = around[allocsiz]; | |
1075 | subfield = inside[allocsiz]; | |
1076 | for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { | |
1077 | if ((blk & field) == subfield) | |
1078 | return (bno + pos); | |
1079 | field <<= 1; | |
1080 | subfield <<= 1; | |
1081 | } | |
1082 | } | |
1083 | printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); | |
1084 | panic("alloccg: block not in map"); | |
1085 | return (-1); | |
1086 | } | |
1087 | ||
1088 | /* | |
1089 | * Fserr prints the name of a file system with an error diagnostic. | |
1090 | * | |
1091 | * The form of the error message is: | |
1092 | * fs: error message | |
1093 | */ | |
1094 | fserr(fs, uid, cp) | |
1095 | struct fs *fs; | |
1096 | uid_t uid; | |
1097 | char *cp; | |
1098 | { | |
1099 | ||
1100 | log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); | |
1101 | } |