need to restore quota credit if allocation fails
[unix-history] / usr / src / sys / ufs / lfs / lfs_alloc.c
CommitLineData
da7c5cc6 1/*
7188ac27 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
202a4bd9 3 * All rights reserved.
da7c5cc6 4 *
b702c21d 5 * %sccs.include.redist.c%
202a4bd9 6 *
8a403f0d 7 * @(#)lfs_alloc.c 7.22 (Berkeley) %G%
da7c5cc6 8 */
e3fe2d69 9
94368568
JB
10#include "param.h"
11#include "systm.h"
94368568 12#include "buf.h"
94368568 13#include "user.h"
7188ac27 14#include "vnode.h"
94368568 15#include "kernel.h"
7580177b 16#include "syslog.h"
aec7dd3b 17#include "cmap.h"
7188ac27
KM
18#include "../ufs/quota.h"
19#include "../ufs/inode.h"
20#include "../ufs/fs.h"
e3fe2d69 21
daaf7bee 22extern u_long hashalloc();
4f083fd7
SL
23extern ino_t ialloccg();
24extern daddr_t alloccg();
743f1ef7
KM
25extern daddr_t alloccgblk();
26extern daddr_t fragextend();
27extern daddr_t blkpref();
28extern daddr_t mapsearch();
1d7a08c5 29extern int inside[], around[];
b6407c9d 30extern unsigned char *fragtbl[];
e3fe2d69 31
502770a3
KM
32/*
33 * Allocate a block in the file system.
34 *
35 * The size of the requested block is given, which must be some
36 * multiple of fs_fsize and <= fs_bsize.
37 * A preference may be optionally specified. If a preference is given
38 * the following hierarchy is used to allocate a block:
39 * 1) allocate the requested block.
40 * 2) allocate a rotationally optimal block in the same cylinder.
41 * 3) allocate a block in the same cylinder group.
42 * 4) quadradically rehash into other cylinder groups, until an
43 * available block is located.
44 * If no block preference is given the following heirarchy is used
45 * to allocate a block:
46 * 1) allocate a block in the cylinder group that contains the
47 * inode for the file.
48 * 2) quadradically rehash into other cylinder groups, until an
49 * available block is located.
50 */
0c36425b 51alloc(ip, lbn, bpref, size, bnp)
f3c028b7 52 register struct inode *ip;
0c36425b 53 daddr_t lbn, bpref;
e3fe2d69 54 int size;
0c36425b 55 daddr_t *bnp;
e3fe2d69
KM
56{
57 daddr_t bno;
58 register struct fs *fs;
f3c028b7 59 register struct buf *bp;
7188ac27 60 int cg, error;
00bff950 61 struct ucred *cred = u.u_cred; /* XXX */
e3fe2d69 62
0c36425b 63 *bnp = 0;
f7287e4b 64 fs = ip->i_fs;
ffd90e52
KM
65 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
66 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
67 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
b6407c9d 68 panic("alloc: bad size");
ffd90e52 69 }
b6407c9d 70 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
0947395d 71 goto nospace;
00bff950 72 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
e3fe2d69 73 goto nospace;
b4567e9c 74#ifdef QUOTA
00bff950 75 if (error = chkdq(ip, (long)btodb(size), cred, 0))
7188ac27 76 return (error);
ca90a6bf 77#endif
260e5e3c
KM
78 if (bpref >= fs->fs_size)
79 bpref = 0;
e3fe2d69 80 if (bpref == 0)
6994bf5d 81 cg = itog(fs, ip->i_number);
e3fe2d69 82 else
6994bf5d 83 cg = dtog(fs, bpref);
4f083fd7
SL
84 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
85 (u_long (*)())alloccg);
0c36425b
KM
86 if (bno > 0) {
87 ip->i_blocks += btodb(size);
88 ip->i_flag |= IUPD|ICHG;
89 *bnp = bno;
90 return (0);
91 }
8a403f0d
KM
92#ifdef QUOTA
93 /*
94 * Restore user's disk quota because allocation failed.
95 */
96 (void) chkdq(ip, (long)-btodb(size), cred, FORCE);
97#endif
e3fe2d69 98nospace:
e2546752 99 fserr(fs, cred->cr_uid, "file system full");
e3fe2d69 100 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 101 return (ENOSPC);
e3fe2d69
KM
102}
103
502770a3
KM
104/*
105 * Reallocate a fragment to a bigger size
106 *
107 * The number and size of the old block is given, and a preference
108 * and new size is also specified. The allocator attempts to extend
109 * the original block. Failing that, the regular block allocator is
110 * invoked to get an appropriate block.
111 */
0c36425b 112realloccg(ip, lbprev, bpref, osize, nsize, bpp)
f7287e4b 113 register struct inode *ip;
0c36425b
KM
114 off_t lbprev;
115 daddr_t bpref;
07670f7d 116 int osize, nsize;
7188ac27 117 struct buf **bpp;
07670f7d 118{
07670f7d 119 register struct fs *fs;
7188ac27 120 struct buf *bp, *obp;
f7fa0c54 121 int cg, request;
0c36425b 122 daddr_t bprev, bno, bn;
7188ac27 123 int i, error, count;
00bff950 124 struct ucred *cred = u.u_cred; /* XXX */
07670f7d 125
7188ac27 126 *bpp = 0;
f7287e4b 127 fs = ip->i_fs;
d995d89d 128 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
ffd90e52
KM
129 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
130 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
131 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
b6407c9d 132 panic("realloccg: bad size");
ffd90e52 133 }
00bff950 134 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
0947395d 135 goto nospace;
0c36425b 136 if ((bprev = ip->i_db[lbprev]) == 0) {
ffd90e52
KM
137 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
138 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
502770a3 139 panic("realloccg: bad bprev");
ffd90e52 140 }
0c36425b
KM
141 /*
142 * Allocate the extra space in the buffer.
143 */
144 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) {
145 brelse(bp);
146 return (error);
147 }
8a403f0d
KM
148#ifdef QUOTA
149 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) {
150 brelse(bp);
151 return (error);
152 }
153#endif
9cf42d55 154 allocbuf(bp, nsize);
0c36425b
KM
155 bp->b_flags |= B_DONE;
156 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
0c36425b
KM
157 /*
158 * Check for extension in the existing location.
159 */
ae851115 160 cg = dtog(fs, bprev);
0c36425b 161 if (bno = fragextend(ip, cg, (long)bprev, osize, nsize)) {
72ef728e 162 if (bp->b_blkno != fsbtodb(fs, bno))
0c36425b 163 panic("bad blockno");
2393702a
KM
164 ip->i_blocks += btodb(nsize - osize);
165 ip->i_flag |= IUPD|ICHG;
7188ac27
KM
166 *bpp = bp;
167 return (0);
f3c028b7 168 }
0c36425b
KM
169 /*
170 * Allocate a new disk location.
171 */
260e5e3c
KM
172 if (bpref >= fs->fs_size)
173 bpref = 0;
aec7dd3b 174 switch ((int)fs->fs_optim) {
f8484b5f
KM
175 case FS_OPTSPACE:
176 /*
177 * Allocate an exact sized fragment. Although this makes
178 * best use of space, we will waste time relocating it if
179 * the file continues to grow. If the fragmentation is
180 * less than half of the minimum free reserve, we choose
181 * to begin optimizing for time.
182 */
f7fa0c54 183 request = nsize;
f8484b5f
KM
184 if (fs->fs_minfree < 5 ||
185 fs->fs_cstotal.cs_nffree >
186 fs->fs_dsize * fs->fs_minfree / (2 * 100))
187 break;
188 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
189 fs->fs_fsmnt);
190 fs->fs_optim = FS_OPTTIME;
191 break;
192 case FS_OPTTIME:
193 /*
194 * At this point we have discovered a file that is trying
195 * to grow a small fragment to a larger fragment. To save
196 * time, we allocate a full sized block, then free the
197 * unused portion. If the file continues to grow, the
198 * `fragextend' call above will be able to grow it in place
199 * without further copying. If aberrant programs cause
200 * disk fragmentation to grow within 2% of the free reserve,
201 * we choose to begin optimizing for space.
202 */
f7fa0c54 203 request = fs->fs_bsize;
f8484b5f
KM
204 if (fs->fs_cstotal.cs_nffree <
205 fs->fs_dsize * (fs->fs_minfree - 2) / 100)
206 break;
207 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
208 fs->fs_fsmnt);
209 fs->fs_optim = FS_OPTSPACE;
210 break;
211 default:
212 printf("dev = 0x%x, optim = %d, fs = %s\n",
213 ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
214 panic("realloccg: bad optim");
215 /* NOTREACHED */
216 }
f7fa0c54 217 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
4f083fd7 218 (u_long (*)())alloccg);
6459ebe0 219 if (bno > 0) {
ec67a3ce
MK
220#ifdef SECSIZE
221 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize,
222 fs->fs_dbsize);
223#else SECSIZE
0c36425b 224 bp->b_blkno = bn = fsbtodb(fs, bno);
ec67a3ce
MK
225 count = howmany(osize, CLBYTES);
226 for (i = 0; i < count; i++)
227#ifdef SECSIZE
228 munhash(ip->i_dev, bn + i * CLBYTES / fs->fs_dbsize);
229#else SECSIZE
230 munhash(ip->i_dev, bn + i * CLBYTES / DEV_BSIZE);
231#endif SECSIZE
ced3a252 232 blkfree(ip, bprev, (off_t)osize);
f7fa0c54 233 if (nsize < request)
ced3a252 234 blkfree(ip, bno + numfrags(fs, nsize),
f7fa0c54 235 (off_t)(request - nsize));
2523b389
SL
236 ip->i_blocks += btodb(nsize - osize);
237 ip->i_flag |= IUPD|ICHG;
7188ac27
KM
238 *bpp = bp;
239 return (0);
f3c028b7 240 }
8a403f0d
KM
241#ifdef QUOTA
242 /*
243 * Restore user's disk quota because allocation failed.
244 */
245 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE);
246#endif
0c36425b 247 brelse(bp);
0947395d 248nospace:
f3c028b7
KM
249 /*
250 * no space available
251 */
e2546752 252 fserr(fs, cred->cr_uid, "file system full");
07670f7d 253 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 254 return (ENOSPC);
07670f7d
KM
255}
256
502770a3
KM
257/*
258 * Allocate an inode in the file system.
259 *
260 * A preference may be optionally specified. If a preference is given
261 * the following hierarchy is used to allocate an inode:
262 * 1) allocate the requested inode.
263 * 2) allocate an inode in the same cylinder group.
264 * 3) quadradically rehash into other cylinder groups, until an
265 * available inode is located.
266 * If no inode preference is given the following heirarchy is used
267 * to allocate an inode:
268 * 1) allocate an inode in cylinder group 0.
269 * 2) quadradically rehash into other cylinder groups, until an
270 * available inode is located.
271 */
00bff950 272ialloc(pip, ipref, mode, cred, ipp)
f7287e4b 273 register struct inode *pip;
e3fe2d69
KM
274 ino_t ipref;
275 int mode;
00bff950 276 struct ucred *cred;
7188ac27 277 struct inode **ipp;
e3fe2d69 278{
daaf7bee 279 ino_t ino;
e3fe2d69
KM
280 register struct fs *fs;
281 register struct inode *ip;
7188ac27 282 int cg, error;
e3fe2d69 283
7188ac27 284 *ipp = 0;
f7287e4b 285 fs = pip->i_fs;
0947395d 286 if (fs->fs_cstotal.cs_nifree == 0)
e3fe2d69 287 goto noinodes;
260e5e3c
KM
288 if (ipref >= fs->fs_ncg * fs->fs_ipg)
289 ipref = 0;
6994bf5d 290 cg = itog(fs, ipref);
f7287e4b 291 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
e3fe2d69
KM
292 if (ino == 0)
293 goto noinodes;
7188ac27 294 error = iget(pip, ino, ipp);
7188ac27 295 if (error) {
00bff950 296 ifree(pip, ino, mode);
7188ac27 297 return (error);
e3fe2d69 298 }
fb92d0ab 299 ip = *ipp;
ffd90e52
KM
300 if (ip->i_mode) {
301 printf("mode = 0%o, inum = %d, fs = %s\n",
302 ip->i_mode, ip->i_number, fs->fs_fsmnt);
e3fe2d69 303 panic("ialloc: dup alloc");
ffd90e52 304 }
2523b389
SL
305 if (ip->i_blocks) { /* XXX */
306 printf("free inode %s/%d had %d blocks\n",
307 fs->fs_fsmnt, ino, ip->i_blocks);
308 ip->i_blocks = 0;
309 }
7e0dee76 310 ip->i_flags = 0;
fb92d0ab
KM
311 /*
312 * Set up a new generation number for this inode.
313 */
314 if (++nextgennumber < (u_long)time.tv_sec)
315 nextgennumber = time.tv_sec;
316 ip->i_gen = nextgennumber;
7188ac27 317 return (0);
e3fe2d69 318noinodes:
e2546752 319 fserr(fs, cred->cr_uid, "out of inodes");
ae851115 320 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
7188ac27 321 return (ENOSPC);
e3fe2d69
KM
322}
323
743f1ef7 324/*
502770a3
KM
325 * Find a cylinder to place a directory.
326 *
327 * The policy implemented by this algorithm is to select from
328 * among those cylinder groups with above the average number of
329 * free inodes, the one with the smallest number of directories.
743f1ef7 330 */
4f083fd7 331ino_t
f7287e4b 332dirpref(fs)
e3fe2d69 333 register struct fs *fs;
f7287e4b 334{
743f1ef7 335 int cg, minndir, mincg, avgifree;
e3fe2d69 336
0947395d 337 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
743f1ef7 338 minndir = fs->fs_ipg;
e3fe2d69 339 mincg = 0;
743f1ef7 340 for (cg = 0; cg < fs->fs_ncg; cg++)
b6407c9d
KM
341 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
342 fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
e3fe2d69 343 mincg = cg;
b6407c9d 344 minndir = fs->fs_cs(fs, cg).cs_ndir;
e3fe2d69 345 }
4f083fd7 346 return ((ino_t)(fs->fs_ipg * mincg));
e3fe2d69
KM
347}
348
743f1ef7 349/*
4f083fd7
SL
350 * Select the desired position for the next block in a file. The file is
351 * logically divided into sections. The first section is composed of the
352 * direct blocks. Each additional section contains fs_maxbpg blocks.
353 *
354 * If no blocks have been allocated in the first section, the policy is to
355 * request a block in the same cylinder group as the inode that describes
356 * the file. If no blocks have been allocated in any other section, the
357 * policy is to place the section in a cylinder group with a greater than
358 * average number of free blocks. An appropriate cylinder group is found
16e7863f
KM
359 * by using a rotor that sweeps the cylinder groups. When a new group of
360 * blocks is needed, the sweep begins in the cylinder group following the
361 * cylinder group from which the previous allocation was made. The sweep
362 * continues until a cylinder group with greater than the average number
363 * of free blocks is found. If the allocation is for the first block in an
364 * indirect block, the information on the previous allocation is unavailable;
365 * here a best guess is made based upon the logical block number being
366 * allocated.
4f083fd7
SL
367 *
368 * If a section is already partially allocated, the policy is to
369 * contiguously allocate fs_maxcontig blocks. The end of one of these
370 * contiguous blocks and the beginning of the next is physically separated
371 * so that the disk head will be in transit between them for at least
372 * fs_rotdelay milliseconds. This is to allow time for the processor to
373 * schedule another I/O transfer.
743f1ef7 374 */
daaf7bee 375daddr_t
4f083fd7
SL
376blkpref(ip, lbn, indx, bap)
377 struct inode *ip;
378 daddr_t lbn;
379 int indx;
380 daddr_t *bap;
f7287e4b 381{
4f083fd7 382 register struct fs *fs;
16e7863f
KM
383 register int cg;
384 int avgbfree, startcg;
4f083fd7 385 daddr_t nextblk;
743f1ef7 386
4f083fd7
SL
387 fs = ip->i_fs;
388 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
389 if (lbn < NDADDR) {
390 cg = itog(fs, ip->i_number);
b6407c9d 391 return (fs->fs_fpg * cg + fs->fs_frag);
743f1ef7 392 }
4f083fd7
SL
393 /*
394 * Find a cylinder with greater than average number of
395 * unused data blocks.
396 */
16e7863f
KM
397 if (indx == 0 || bap[indx - 1] == 0)
398 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
399 else
400 startcg = dtog(fs, bap[indx - 1]) + 1;
401 startcg %= fs->fs_ncg;
4f083fd7 402 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
16e7863f 403 for (cg = startcg; cg < fs->fs_ncg; cg++)
4f083fd7
SL
404 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
405 fs->fs_cgrotor = cg;
406 return (fs->fs_fpg * cg + fs->fs_frag);
407 }
16e7863f 408 for (cg = 0; cg <= startcg; cg++)
4f083fd7
SL
409 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
410 fs->fs_cgrotor = cg;
411 return (fs->fs_fpg * cg + fs->fs_frag);
412 }
413 return (NULL);
414 }
415 /*
416 * One or more previous blocks have been laid out. If less
417 * than fs_maxcontig previous blocks are contiguous, the
418 * next block is requested contiguously, otherwise it is
419 * requested rotationally delayed by fs_rotdelay milliseconds.
420 */
421 nextblk = bap[indx - 1] + fs->fs_frag;
422 if (indx > fs->fs_maxcontig &&
240a4664 423 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
4f083fd7
SL
424 != nextblk)
425 return (nextblk);
426 if (fs->fs_rotdelay != 0)
427 /*
428 * Here we convert ms of delay to frags as:
429 * (frags) = (ms) * (rev/sec) * (sect/rev) /
430 * ((sect/frag) * (ms/sec))
431 * then round up to the next block.
432 */
433 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
434 (NSPF(fs) * 1000), fs->fs_frag);
435 return (nextblk);
743f1ef7
KM
436}
437
502770a3
KM
438/*
439 * Implement the cylinder overflow algorithm.
440 *
441 * The policy implemented by this algorithm is:
442 * 1) allocate the block in its requested cylinder group.
443 * 2) quadradically rehash on the cylinder group number.
444 * 3) brute force search for a free block.
445 */
daaf7bee
KM
446/*VARARGS5*/
447u_long
f7287e4b
KM
448hashalloc(ip, cg, pref, size, allocator)
449 struct inode *ip;
e3fe2d69
KM
450 int cg;
451 long pref;
452 int size; /* size for data blocks, mode for inodes */
daaf7bee 453 u_long (*allocator)();
e3fe2d69 454{
f7287e4b 455 register struct fs *fs;
e3fe2d69
KM
456 long result;
457 int i, icg = cg;
458
f7287e4b 459 fs = ip->i_fs;
e3fe2d69
KM
460 /*
461 * 1: preferred cylinder group
462 */
f7287e4b 463 result = (*allocator)(ip, cg, pref, size);
e3fe2d69
KM
464 if (result)
465 return (result);
466 /*
467 * 2: quadratic rehash
468 */
469 for (i = 1; i < fs->fs_ncg; i *= 2) {
470 cg += i;
471 if (cg >= fs->fs_ncg)
472 cg -= fs->fs_ncg;
f7287e4b 473 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
474 if (result)
475 return (result);
476 }
477 /*
478 * 3: brute force search
620b3290
SL
479 * Note that we start at i == 2, since 0 was checked initially,
480 * and 1 is always checked in the quadratic rehash.
e3fe2d69 481 */
2136305e 482 cg = (icg + 2) % fs->fs_ncg;
620b3290 483 for (i = 2; i < fs->fs_ncg; i++) {
f7287e4b 484 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
485 if (result)
486 return (result);
487 cg++;
488 if (cg == fs->fs_ncg)
489 cg = 0;
490 }
ae851115 491 return (NULL);
e3fe2d69
KM
492}
493
502770a3
KM
494/*
495 * Determine whether a fragment can be extended.
496 *
497 * Check to see if the necessary fragments are available, and
498 * if they are, allocate them.
499 */
07670f7d 500daddr_t
f7287e4b
KM
501fragextend(ip, cg, bprev, osize, nsize)
502 struct inode *ip;
07670f7d 503 int cg;
f3c028b7 504 long bprev;
07670f7d
KM
505 int osize, nsize;
506{
f7287e4b 507 register struct fs *fs;
f3c028b7 508 register struct cg *cgp;
7188ac27 509 struct buf *bp;
f3c028b7
KM
510 long bno;
511 int frags, bbase;
7188ac27 512 int i, error;
07670f7d 513
f7287e4b 514 fs = ip->i_fs;
a8580723 515 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
e5476900 516 return (NULL);
d995d89d 517 frags = numfrags(fs, nsize);
a8580723
KM
518 bbase = fragnum(fs, bprev);
519 if (bbase > fragnum(fs, (bprev + frags - 1))) {
ec67a3ce 520 /* cannot extend across a block boundary */
ae851115 521 return (NULL);
f3c028b7 522 }
ec67a3ce
MK
523#ifdef SECSIZE
524 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
525 fs->fs_dbsize);
526#else SECSIZE
7188ac27 527 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 528 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
529 if (error) {
530 brelse(bp);
531 return (NULL);
532 }
ec67a3ce 533#endif SECSIZE
e5476900 534 cgp = bp->b_un.b_cg;
7188ac27 535 if (!cg_chkmagic(cgp)) {
d995d89d 536 brelse(bp);
ae851115 537 return (NULL);
d995d89d 538 }
ad9250ee 539 cgp->cg_time = time.tv_sec;
6994bf5d 540 bno = dtogd(fs, bprev);
d995d89d 541 for (i = numfrags(fs, osize); i < frags; i++)
10adeb11 542 if (isclr(cg_blksfree(cgp), bno + i)) {
aca50d72 543 brelse(bp);
ae851115 544 return (NULL);
aca50d72
KM
545 }
546 /*
547 * the current fragment can be extended
548 * deduct the count on fragment being extended into
549 * increase the count on the remaining fragment (if any)
550 * allocate the extended piece
551 */
552 for (i = frags; i < fs->fs_frag - bbase; i++)
10adeb11 553 if (isclr(cg_blksfree(cgp), bno + i))
f3c028b7 554 break;
d995d89d 555 cgp->cg_frsum[i - numfrags(fs, osize)]--;
aca50d72
KM
556 if (i != frags)
557 cgp->cg_frsum[i - frags]++;
d995d89d 558 for (i = numfrags(fs, osize); i < frags; i++) {
10adeb11 559 clrbit(cg_blksfree(cgp), bno + i);
aca50d72
KM
560 cgp->cg_cs.cs_nffree--;
561 fs->fs_cstotal.cs_nffree--;
562 fs->fs_cs(fs, cg).cs_nffree--;
f3c028b7 563 }
aca50d72
KM
564 fs->fs_fmod++;
565 bdwrite(bp);
566 return (bprev);
07670f7d
KM
567}
568
502770a3
KM
569/*
570 * Determine whether a block can be allocated.
571 *
572 * Check to see if a block of the apprpriate size is available,
573 * and if it is, allocate it.
574 */
4f083fd7 575daddr_t
f7287e4b
KM
576alloccg(ip, cg, bpref, size)
577 struct inode *ip;
e3fe2d69
KM
578 int cg;
579 daddr_t bpref;
580 int size;
581{
f7287e4b 582 register struct fs *fs;
f3c028b7 583 register struct cg *cgp;
7188ac27 584 struct buf *bp;
f3c028b7 585 register int i;
7188ac27 586 int error, bno, frags, allocsiz;
e3fe2d69 587
f7287e4b 588 fs = ip->i_fs;
b6407c9d 589 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
ae851115 590 return (NULL);
ec67a3ce
MK
591#ifdef SECSIZE
592 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
593 fs->fs_dbsize);
594#else SECSIZE
7188ac27 595 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 596 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
597 if (error) {
598 brelse(bp);
599 return (NULL);
600 }
ec67a3ce 601#endif SECSIZE
e5476900 602 cgp = bp->b_un.b_cg;
7188ac27 603 if (!cg_chkmagic(cgp) ||
0f538882 604 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
d995d89d 605 brelse(bp);
ae851115 606 return (NULL);
d995d89d 607 }
ad9250ee 608 cgp->cg_time = time.tv_sec;
b6407c9d 609 if (size == fs->fs_bsize) {
daaf7bee 610 bno = alloccgblk(fs, cgp, bpref);
f3c028b7
KM
611 bdwrite(bp);
612 return (bno);
613 }
614 /*
615 * check to see if any fragments are already available
616 * allocsiz is the size which will be allocated, hacking
617 * it down to a smaller size if necessary
618 */
d995d89d 619 frags = numfrags(fs, size);
b6407c9d 620 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
f3c028b7
KM
621 if (cgp->cg_frsum[allocsiz] != 0)
622 break;
b6407c9d 623 if (allocsiz == fs->fs_frag) {
f3c028b7
KM
624 /*
625 * no fragments were available, so a block will be
626 * allocated, and hacked up
627 */
0947395d 628 if (cgp->cg_cs.cs_nbfree == 0) {
f3c028b7 629 brelse(bp);
ae851115 630 return (NULL);
f3c028b7 631 }
daaf7bee 632 bno = alloccgblk(fs, cgp, bpref);
6994bf5d 633 bpref = dtogd(fs, bno);
b6407c9d 634 for (i = frags; i < fs->fs_frag; i++)
10adeb11 635 setbit(cg_blksfree(cgp), bpref + i);
b6407c9d 636 i = fs->fs_frag - frags;
0947395d
KM
637 cgp->cg_cs.cs_nffree += i;
638 fs->fs_cstotal.cs_nffree += i;
b6407c9d 639 fs->fs_cs(fs, cg).cs_nffree += i;
961945a8 640 fs->fs_fmod++;
f3c028b7
KM
641 cgp->cg_frsum[i]++;
642 bdwrite(bp);
643 return (bno);
644 }
743f1ef7 645 bno = mapsearch(fs, cgp, bpref, allocsiz);
0f538882
KM
646 if (bno < 0) {
647 brelse(bp);
ae851115 648 return (NULL);
0f538882 649 }
f3c028b7 650 for (i = 0; i < frags; i++)
10adeb11 651 clrbit(cg_blksfree(cgp), bno + i);
0947395d
KM
652 cgp->cg_cs.cs_nffree -= frags;
653 fs->fs_cstotal.cs_nffree -= frags;
b6407c9d 654 fs->fs_cs(fs, cg).cs_nffree -= frags;
961945a8 655 fs->fs_fmod++;
f3c028b7
KM
656 cgp->cg_frsum[allocsiz]--;
657 if (frags != allocsiz)
658 cgp->cg_frsum[allocsiz - frags]++;
659 bdwrite(bp);
660 return (cg * fs->fs_fpg + bno);
661}
662
502770a3
KM
663/*
664 * Allocate a block in a cylinder group.
665 *
666 * This algorithm implements the following policy:
667 * 1) allocate the requested block.
668 * 2) allocate a rotationally optimal block in the same cylinder.
669 * 3) allocate the next available block on the block rotor for the
670 * specified cylinder group.
671 * Note that this routine only allocates fs_bsize blocks; these
672 * blocks may be fragmented by the routine that allocates them.
673 */
f3c028b7 674daddr_t
daaf7bee 675alloccgblk(fs, cgp, bpref)
f7287e4b 676 register struct fs *fs;
f3c028b7
KM
677 register struct cg *cgp;
678 daddr_t bpref;
679{
743f1ef7 680 daddr_t bno;
ae851115 681 int cylno, pos, delta;
743f1ef7 682 short *cylbp;
aca50d72 683 register int i;
f3c028b7 684
743f1ef7
KM
685 if (bpref == 0) {
686 bpref = cgp->cg_rotor;
aca50d72
KM
687 goto norot;
688 }
a8580723 689 bpref = blknum(fs, bpref);
6994bf5d 690 bpref = dtogd(fs, bpref);
aca50d72
KM
691 /*
692 * if the requested block is available, use it
693 */
10adeb11 694 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {
aca50d72
KM
695 bno = bpref;
696 goto gotit;
697 }
aca50d72
KM
698 /*
699 * check for a block available on the same cylinder
aca50d72
KM
700 */
701 cylno = cbtocylno(fs, bpref);
10adeb11 702 if (cg_blktot(cgp)[cylno] == 0)
502770a3
KM
703 goto norot;
704 if (fs->fs_cpc == 0) {
705 /*
706 * block layout info is not available, so just have
707 * to take any block in this cylinder.
708 */
709 bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
710 goto norot;
711 }
aca50d72
KM
712 /*
713 * check the summary information to see if a block is
714 * available in the requested cylinder starting at the
4f083fd7 715 * requested rotational position and proceeding around.
aca50d72 716 */
10adeb11 717 cylbp = cg_blks(fs, cgp, cylno);
4f083fd7 718 pos = cbtorpos(fs, bpref);
10adeb11 719 for (i = pos; i < fs->fs_nrpos; i++)
aca50d72
KM
720 if (cylbp[i] > 0)
721 break;
10adeb11 722 if (i == fs->fs_nrpos)
aca50d72 723 for (i = 0; i < pos; i++)
743f1ef7
KM
724 if (cylbp[i] > 0)
725 break;
aca50d72
KM
726 if (cylbp[i] > 0) {
727 /*
728 * found a rotational position, now find the actual
729 * block. A panic if none is actually there.
730 */
731 pos = cylno % fs->fs_cpc;
732 bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
10adeb11 733 if (fs_postbl(fs, pos)[i] == -1) {
ffd90e52
KM
734 printf("pos = %d, i = %d, fs = %s\n",
735 pos, i, fs->fs_fsmnt);
aca50d72 736 panic("alloccgblk: cyl groups corrupted");
ffd90e52 737 }
10adeb11
KM
738 for (i = fs_postbl(fs, pos)[i];; ) {
739 if (isblock(fs, cg_blksfree(cgp), bno + i)) {
240a4664 740 bno = blkstofrags(fs, (bno + i));
aca50d72 741 goto gotit;
743f1ef7 742 }
10adeb11
KM
743 delta = fs_rotbl(fs)[i];
744 if (delta <= 0 ||
745 delta + i > fragstoblks(fs, fs->fs_fpg))
aca50d72 746 break;
ae851115 747 i += delta;
743f1ef7 748 }
ffd90e52 749 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
aca50d72 750 panic("alloccgblk: can't find blk in cyl");
e3fe2d69 751 }
aca50d72
KM
752norot:
753 /*
754 * no blocks in the requested cylinder, so take next
755 * available one in this cylinder group.
756 */
b32450f4 757 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
6459ebe0 758 if (bno < 0)
ae851115 759 return (NULL);
743f1ef7 760 cgp->cg_rotor = bno;
e3fe2d69 761gotit:
10adeb11 762 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno));
0947395d
KM
763 cgp->cg_cs.cs_nbfree--;
764 fs->fs_cstotal.cs_nbfree--;
b6407c9d 765 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
502770a3 766 cylno = cbtocylno(fs, bno);
10adeb11
KM
767 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
768 cg_blktot(cgp)[cylno]--;
e3fe2d69 769 fs->fs_fmod++;
743f1ef7 770 return (cgp->cg_cgx * fs->fs_fpg + bno);
e3fe2d69 771}
10adeb11 772
502770a3
KM
773/*
774 * Determine whether an inode can be allocated.
775 *
776 * Check to see if an inode is available, and if it is,
777 * allocate it using the following policy:
778 * 1) allocate the requested inode.
779 * 2) allocate the next available inode after the requested
780 * inode in the specified cylinder group.
781 */
4f083fd7 782ino_t
f7287e4b
KM
783ialloccg(ip, cg, ipref, mode)
784 struct inode *ip;
e3fe2d69
KM
785 int cg;
786 daddr_t ipref;
787 int mode;
788{
f7287e4b 789 register struct fs *fs;
f3c028b7 790 register struct cg *cgp;
4e0c7b8a 791 struct buf *bp;
7188ac27 792 int error, start, len, loc, map, i;
e3fe2d69 793
f7287e4b 794 fs = ip->i_fs;
b6407c9d 795 if (fs->fs_cs(fs, cg).cs_nifree == 0)
ae851115 796 return (NULL);
ec67a3ce
MK
797#ifdef SECSIZE
798 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
799 fs->fs_dbsize);
800#else SECSIZE
7188ac27 801 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 802 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
803 if (error) {
804 brelse(bp);
805 return (NULL);
806 }
ec67a3ce 807#endif SECSIZE
e5476900 808 cgp = bp->b_un.b_cg;
7188ac27 809 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
d995d89d 810 brelse(bp);
ae851115 811 return (NULL);
d995d89d 812 }
ad9250ee 813 cgp->cg_time = time.tv_sec;
e3fe2d69
KM
814 if (ipref) {
815 ipref %= fs->fs_ipg;
10adeb11 816 if (isclr(cg_inosused(cgp), ipref))
e3fe2d69 817 goto gotit;
4e0c7b8a
KM
818 }
819 start = cgp->cg_irotor / NBBY;
820 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
10adeb11 821 loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
4e0c7b8a 822 if (loc == 0) {
e5889092
KM
823 len = start + 1;
824 start = 0;
10adeb11 825 loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
e5889092
KM
826 if (loc == 0) {
827 printf("cg = %s, irotor = %d, fs = %s\n",
828 cg, cgp->cg_irotor, fs->fs_fsmnt);
829 panic("ialloccg: map corrupted");
830 /* NOTREACHED */
831 }
4e0c7b8a
KM
832 }
833 i = start + len - loc;
10adeb11 834 map = cg_inosused(cgp)[i];
4e0c7b8a
KM
835 ipref = i * NBBY;
836 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
837 if ((map & i) == 0) {
e3fe2d69
KM
838 cgp->cg_irotor = ipref;
839 goto gotit;
840 }
841 }
4e0c7b8a
KM
842 printf("fs = %s\n", fs->fs_fsmnt);
843 panic("ialloccg: block not in map");
844 /* NOTREACHED */
e3fe2d69 845gotit:
10adeb11 846 setbit(cg_inosused(cgp), ipref);
0947395d
KM
847 cgp->cg_cs.cs_nifree--;
848 fs->fs_cstotal.cs_nifree--;
b6407c9d 849 fs->fs_cs(fs, cg).cs_nifree--;
e3fe2d69
KM
850 fs->fs_fmod++;
851 if ((mode & IFMT) == IFDIR) {
0947395d
KM
852 cgp->cg_cs.cs_ndir++;
853 fs->fs_cstotal.cs_ndir++;
b6407c9d 854 fs->fs_cs(fs, cg).cs_ndir++;
e3fe2d69
KM
855 }
856 bdwrite(bp);
857 return (cg * fs->fs_ipg + ipref);
858}
859
502770a3
KM
860/*
861 * Free a block or fragment.
862 *
863 * The specified block or fragment is placed back in the
864 * free map. If a fragment is deallocated, a possible
865 * block reassembly is checked.
866 */
ced3a252 867blkfree(ip, bno, size)
f7287e4b 868 register struct inode *ip;
e3fe2d69 869 daddr_t bno;
daaf7bee 870 off_t size;
e3fe2d69
KM
871{
872 register struct fs *fs;
873 register struct cg *cgp;
7188ac27
KM
874 struct buf *bp;
875 int error, cg, blk, frags, bbase;
f3c028b7 876 register int i;
e2546752 877 struct ucred *cred = u.u_cred; /* XXX */
e3fe2d69 878
f7287e4b 879 fs = ip->i_fs;
ffd90e52
KM
880 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
881 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
882 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
ced3a252 883 panic("blkfree: bad size");
ffd90e52 884 }
6994bf5d 885 cg = dtog(fs, bno);
e2546752 886 if ((unsigned)bno >= fs->fs_size) {
6459ebe0 887 printf("bad block %d, ino %d\n", bno, ip->i_number);
e2546752 888 fserr(fs, cred->cr_uid, "bad block");
e3fe2d69 889 return;
6459ebe0 890 }
ec67a3ce
MK
891#ifdef SECSIZE
892 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
893 fs->fs_dbsize);
894#else SECSIZE
7188ac27 895 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 896 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
897 if (error) {
898 brelse(bp);
899 return;
900 }
ec67a3ce 901#endif SECSIZE
e5476900 902 cgp = bp->b_un.b_cg;
7188ac27 903 if (!cg_chkmagic(cgp)) {
d995d89d 904 brelse(bp);
e3fe2d69 905 return;
d995d89d 906 }
ad9250ee 907 cgp->cg_time = time.tv_sec;
6994bf5d 908 bno = dtogd(fs, bno);
b6407c9d 909 if (size == fs->fs_bsize) {
10adeb11 910 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) {
ffd90e52
KM
911 printf("dev = 0x%x, block = %d, fs = %s\n",
912 ip->i_dev, bno, fs->fs_fsmnt);
ced3a252 913 panic("blkfree: freeing free block");
6459ebe0 914 }
10adeb11 915 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno));
0947395d
KM
916 cgp->cg_cs.cs_nbfree++;
917 fs->fs_cstotal.cs_nbfree++;
b6407c9d 918 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 919 i = cbtocylno(fs, bno);
10adeb11
KM
920 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
921 cg_blktot(cgp)[i]++;
07670f7d 922 } else {
a8580723 923 bbase = bno - fragnum(fs, bno);
f3c028b7
KM
924 /*
925 * decrement the counts associated with the old frags
926 */
10adeb11 927 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 928 fragacct(fs, blk, cgp->cg_frsum, -1);
f3c028b7
KM
929 /*
930 * deallocate the fragment
931 */
d995d89d 932 frags = numfrags(fs, size);
f3c028b7 933 for (i = 0; i < frags; i++) {
10adeb11 934 if (isset(cg_blksfree(cgp), bno + i)) {
ffd90e52
KM
935 printf("dev = 0x%x, block = %d, fs = %s\n",
936 ip->i_dev, bno + i, fs->fs_fsmnt);
ced3a252 937 panic("blkfree: freeing free frag");
ffd90e52 938 }
10adeb11 939 setbit(cg_blksfree(cgp), bno + i);
07670f7d 940 }
ae851115
KM
941 cgp->cg_cs.cs_nffree += i;
942 fs->fs_cstotal.cs_nffree += i;
943 fs->fs_cs(fs, cg).cs_nffree += i;
f3c028b7
KM
944 /*
945 * add back in counts associated with the new frags
946 */
10adeb11 947 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 948 fragacct(fs, blk, cgp->cg_frsum, 1);
f3c028b7
KM
949 /*
950 * if a complete block has been reassembled, account for it
951 */
9523729e
KM
952 if (isblock(fs, cg_blksfree(cgp),
953 (daddr_t)fragstoblks(fs, bbase))) {
b6407c9d
KM
954 cgp->cg_cs.cs_nffree -= fs->fs_frag;
955 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
956 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
0947395d
KM
957 cgp->cg_cs.cs_nbfree++;
958 fs->fs_cstotal.cs_nbfree++;
b6407c9d 959 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 960 i = cbtocylno(fs, bbase);
10adeb11
KM
961 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
962 cg_blktot(cgp)[i]++;
07670f7d
KM
963 }
964 }
e3fe2d69 965 fs->fs_fmod++;
e3fe2d69
KM
966 bdwrite(bp);
967}
968
502770a3
KM
969/*
970 * Free an inode.
971 *
972 * The specified inode is placed back in the free map.
973 */
f7287e4b
KM
974ifree(ip, ino, mode)
975 struct inode *ip;
e3fe2d69
KM
976 ino_t ino;
977 int mode;
978{
979 register struct fs *fs;
980 register struct cg *cgp;
7188ac27
KM
981 struct buf *bp;
982 int error, cg;
e3fe2d69 983
f7287e4b 984 fs = ip->i_fs;
ffd90e52
KM
985 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
986 printf("dev = 0x%x, ino = %d, fs = %s\n",
987 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 988 panic("ifree: range");
ffd90e52 989 }
6994bf5d 990 cg = itog(fs, ino);
ec67a3ce
MK
991#ifdef SECSIZE
992 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
993 fs->fs_dbsize);
994#else SECSIZE
7188ac27 995 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 996 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
997 if (error) {
998 brelse(bp);
999 return;
1000 }
ec67a3ce 1001#endif SECSIZE
e5476900 1002 cgp = bp->b_un.b_cg;
7188ac27 1003 if (!cg_chkmagic(cgp)) {
d995d89d 1004 brelse(bp);
e3fe2d69 1005 return;
d995d89d 1006 }
ad9250ee 1007 cgp->cg_time = time.tv_sec;
e3fe2d69 1008 ino %= fs->fs_ipg;
10adeb11 1009 if (isclr(cg_inosused(cgp), ino)) {
ffd90e52
KM
1010 printf("dev = 0x%x, ino = %d, fs = %s\n",
1011 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 1012 panic("ifree: freeing free inode");
ffd90e52 1013 }
10adeb11 1014 clrbit(cg_inosused(cgp), ino);
4e0c7b8a
KM
1015 if (ino < cgp->cg_irotor)
1016 cgp->cg_irotor = ino;
0947395d
KM
1017 cgp->cg_cs.cs_nifree++;
1018 fs->fs_cstotal.cs_nifree++;
b6407c9d 1019 fs->fs_cs(fs, cg).cs_nifree++;
e3fe2d69 1020 if ((mode & IFMT) == IFDIR) {
0947395d
KM
1021 cgp->cg_cs.cs_ndir--;
1022 fs->fs_cstotal.cs_ndir--;
b6407c9d 1023 fs->fs_cs(fs, cg).cs_ndir--;
e3fe2d69
KM
1024 }
1025 fs->fs_fmod++;
1026 bdwrite(bp);
1027}
1028
743f1ef7 1029/*
502770a3
KM
1030 * Find a block of the specified size in the specified cylinder group.
1031 *
743f1ef7
KM
1032 * It is a panic if a request is made to find a block if none are
1033 * available.
1034 */
1035daddr_t
1036mapsearch(fs, cgp, bpref, allocsiz)
1037 register struct fs *fs;
1038 register struct cg *cgp;
1039 daddr_t bpref;
1040 int allocsiz;
1041{
1042 daddr_t bno;
1043 int start, len, loc, i;
1044 int blk, field, subfield, pos;
1045
1046 /*
1047 * find the fragment by searching through the free block
1048 * map for an appropriate bit pattern
1049 */
1050 if (bpref)
6994bf5d 1051 start = dtogd(fs, bpref) / NBBY;
743f1ef7
KM
1052 else
1053 start = cgp->cg_frotor / NBBY;
942bd18b 1054 len = howmany(fs->fs_fpg, NBBY) - start;
9523729e
KM
1055 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start],
1056 (u_char *)fragtbl[fs->fs_frag],
1057 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
743f1ef7 1058 if (loc == 0) {
e5476900
KM
1059 len = start + 1;
1060 start = 0;
9523729e
KM
1061 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0],
1062 (u_char *)fragtbl[fs->fs_frag],
1063 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
4e0c7b8a
KM
1064 if (loc == 0) {
1065 printf("start = %d, len = %d, fs = %s\n",
1066 start, len, fs->fs_fsmnt);
1067 panic("alloccg: map corrupted");
e5889092 1068 /* NOTREACHED */
4e0c7b8a 1069 }
743f1ef7
KM
1070 }
1071 bno = (start + len - loc) * NBBY;
1072 cgp->cg_frotor = bno;
1073 /*
1074 * found the byte in the map
1075 * sift through the bits to find the selected frag
1076 */
ae851115 1077 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
10adeb11 1078 blk = blkmap(fs, cg_blksfree(cgp), bno);
743f1ef7
KM
1079 blk <<= 1;
1080 field = around[allocsiz];
1081 subfield = inside[allocsiz];
b6407c9d 1082 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
ae851115
KM
1083 if ((blk & field) == subfield)
1084 return (bno + pos);
743f1ef7
KM
1085 field <<= 1;
1086 subfield <<= 1;
1087 }
1088 }
ffd90e52 1089 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
743f1ef7 1090 panic("alloccg: block not in map");
e5476900 1091 return (-1);
743f1ef7
KM
1092}
1093
e3fe2d69 1094/*
502770a3
KM
1095 * Fserr prints the name of a file system with an error diagnostic.
1096 *
1097 * The form of the error message is:
e3fe2d69
KM
1098 * fs: error message
1099 */
e2546752 1100fserr(fs, uid, cp)
e3fe2d69 1101 struct fs *fs;
e2546752 1102 uid_t uid;
e3fe2d69
KM
1103 char *cp;
1104{
1105
e2546752 1106 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp);
e3fe2d69 1107}