fix of/seek/truncate stuff
[unix-history] / usr / src / sys / ufs / ffs / ffs_alloc.c
CommitLineData
da7c5cc6 1/*
7188ac27 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
202a4bd9 3 * All rights reserved.
da7c5cc6 4 *
b702c21d 5 * %sccs.include.redist.c%
202a4bd9 6 *
b3f3adbd 7 * @(#)ffs_alloc.c 7.26 (Berkeley) %G%
da7c5cc6 8 */
e3fe2d69 9
94368568
JB
10#include "param.h"
11#include "systm.h"
94368568 12#include "buf.h"
c6f5111d 13#include "proc.h"
7188ac27 14#include "vnode.h"
94368568 15#include "kernel.h"
7580177b 16#include "syslog.h"
c6f5111d
MK
17
18#include "quota.h"
19#include "inode.h"
20#include "fs.h"
e3fe2d69 21
daaf7bee 22extern u_long hashalloc();
4f083fd7
SL
23extern ino_t ialloccg();
24extern daddr_t alloccg();
743f1ef7
KM
25extern daddr_t alloccgblk();
26extern daddr_t fragextend();
27extern daddr_t blkpref();
28extern daddr_t mapsearch();
1d7a08c5 29extern int inside[], around[];
b6407c9d 30extern unsigned char *fragtbl[];
e3fe2d69 31
502770a3
KM
32/*
33 * Allocate a block in the file system.
34 *
35 * The size of the requested block is given, which must be some
36 * multiple of fs_fsize and <= fs_bsize.
37 * A preference may be optionally specified. If a preference is given
38 * the following hierarchy is used to allocate a block:
39 * 1) allocate the requested block.
40 * 2) allocate a rotationally optimal block in the same cylinder.
41 * 3) allocate a block in the same cylinder group.
42 * 4) quadradically rehash into other cylinder groups, until an
43 * available block is located.
44 * If no block preference is given the following heirarchy is used
45 * to allocate a block:
46 * 1) allocate a block in the cylinder group that contains the
47 * inode for the file.
48 * 2) quadradically rehash into other cylinder groups, until an
49 * available block is located.
50 */
0c36425b 51alloc(ip, lbn, bpref, size, bnp)
f3c028b7 52 register struct inode *ip;
0c36425b 53 daddr_t lbn, bpref;
e3fe2d69 54 int size;
0c36425b 55 daddr_t *bnp;
e3fe2d69
KM
56{
57 daddr_t bno;
58 register struct fs *fs;
f3c028b7 59 register struct buf *bp;
7188ac27 60 int cg, error;
c6f5111d 61 struct ucred *cred = curproc->p_ucred; /* XXX */
e3fe2d69 62
0c36425b 63 *bnp = 0;
f7287e4b 64 fs = ip->i_fs;
ffd90e52
KM
65 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
66 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
67 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
b6407c9d 68 panic("alloc: bad size");
ffd90e52 69 }
b6407c9d 70 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
0947395d 71 goto nospace;
00bff950 72 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
e3fe2d69 73 goto nospace;
b4567e9c 74#ifdef QUOTA
00bff950 75 if (error = chkdq(ip, (long)btodb(size), cred, 0))
7188ac27 76 return (error);
ca90a6bf 77#endif
260e5e3c
KM
78 if (bpref >= fs->fs_size)
79 bpref = 0;
e3fe2d69 80 if (bpref == 0)
6994bf5d 81 cg = itog(fs, ip->i_number);
e3fe2d69 82 else
6994bf5d 83 cg = dtog(fs, bpref);
4f083fd7
SL
84 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
85 (u_long (*)())alloccg);
0c36425b
KM
86 if (bno > 0) {
87 ip->i_blocks += btodb(size);
88 ip->i_flag |= IUPD|ICHG;
89 *bnp = bno;
90 return (0);
91 }
8a403f0d
KM
92#ifdef QUOTA
93 /*
94 * Restore user's disk quota because allocation failed.
95 */
96 (void) chkdq(ip, (long)-btodb(size), cred, FORCE);
97#endif
e3fe2d69 98nospace:
e2546752 99 fserr(fs, cred->cr_uid, "file system full");
e3fe2d69 100 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 101 return (ENOSPC);
e3fe2d69
KM
102}
103
502770a3
KM
104/*
105 * Reallocate a fragment to a bigger size
106 *
107 * The number and size of the old block is given, and a preference
108 * and new size is also specified. The allocator attempts to extend
109 * the original block. Failing that, the regular block allocator is
110 * invoked to get an appropriate block.
111 */
0c36425b 112realloccg(ip, lbprev, bpref, osize, nsize, bpp)
f7287e4b 113 register struct inode *ip;
0c36425b
KM
114 off_t lbprev;
115 daddr_t bpref;
07670f7d 116 int osize, nsize;
7188ac27 117 struct buf **bpp;
07670f7d 118{
07670f7d 119 register struct fs *fs;
7188ac27 120 struct buf *bp, *obp;
8986c97c
KM
121 int cg, request, error;
122 daddr_t bprev, bno;
c6f5111d 123 struct ucred *cred = curproc->p_ucred; /* XXX */
07670f7d 124
7188ac27 125 *bpp = 0;
f7287e4b 126 fs = ip->i_fs;
d995d89d 127 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
ffd90e52
KM
128 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
129 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
130 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
b6407c9d 131 panic("realloccg: bad size");
ffd90e52 132 }
00bff950 133 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
0947395d 134 goto nospace;
0c36425b 135 if ((bprev = ip->i_db[lbprev]) == 0) {
ffd90e52
KM
136 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
137 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
502770a3 138 panic("realloccg: bad bprev");
ffd90e52 139 }
0c36425b
KM
140 /*
141 * Allocate the extra space in the buffer.
142 */
143 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) {
144 brelse(bp);
145 return (error);
146 }
8a403f0d
KM
147#ifdef QUOTA
148 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) {
149 brelse(bp);
150 return (error);
151 }
152#endif
0c36425b
KM
153 /*
154 * Check for extension in the existing location.
155 */
ae851115 156 cg = dtog(fs, bprev);
0c36425b 157 if (bno = fragextend(ip, cg, (long)bprev, osize, nsize)) {
72ef728e 158 if (bp->b_blkno != fsbtodb(fs, bno))
0c36425b 159 panic("bad blockno");
2393702a
KM
160 ip->i_blocks += btodb(nsize - osize);
161 ip->i_flag |= IUPD|ICHG;
b3f3adbd
KM
162 allocbuf(bp, nsize);
163 bp->b_flags |= B_DONE;
164 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
7188ac27
KM
165 *bpp = bp;
166 return (0);
f3c028b7 167 }
0c36425b
KM
168 /*
169 * Allocate a new disk location.
170 */
260e5e3c
KM
171 if (bpref >= fs->fs_size)
172 bpref = 0;
aec7dd3b 173 switch ((int)fs->fs_optim) {
f8484b5f
KM
174 case FS_OPTSPACE:
175 /*
176 * Allocate an exact sized fragment. Although this makes
177 * best use of space, we will waste time relocating it if
178 * the file continues to grow. If the fragmentation is
179 * less than half of the minimum free reserve, we choose
180 * to begin optimizing for time.
181 */
f7fa0c54 182 request = nsize;
f8484b5f
KM
183 if (fs->fs_minfree < 5 ||
184 fs->fs_cstotal.cs_nffree >
185 fs->fs_dsize * fs->fs_minfree / (2 * 100))
186 break;
187 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
188 fs->fs_fsmnt);
189 fs->fs_optim = FS_OPTTIME;
190 break;
191 case FS_OPTTIME:
192 /*
193 * At this point we have discovered a file that is trying
194 * to grow a small fragment to a larger fragment. To save
195 * time, we allocate a full sized block, then free the
196 * unused portion. If the file continues to grow, the
197 * `fragextend' call above will be able to grow it in place
198 * without further copying. If aberrant programs cause
199 * disk fragmentation to grow within 2% of the free reserve,
200 * we choose to begin optimizing for space.
201 */
f7fa0c54 202 request = fs->fs_bsize;
f8484b5f
KM
203 if (fs->fs_cstotal.cs_nffree <
204 fs->fs_dsize * (fs->fs_minfree - 2) / 100)
205 break;
206 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
207 fs->fs_fsmnt);
208 fs->fs_optim = FS_OPTSPACE;
209 break;
210 default:
211 printf("dev = 0x%x, optim = %d, fs = %s\n",
212 ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
213 panic("realloccg: bad optim");
214 /* NOTREACHED */
215 }
f7fa0c54 216 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
4f083fd7 217 (u_long (*)())alloccg);
6459ebe0 218 if (bno > 0) {
ec67a3ce
MK
219#ifdef SECSIZE
220 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize,
221 fs->fs_dbsize);
222#else SECSIZE
ec67a3ce
MK
223 count = howmany(osize, CLBYTES);
224 for (i = 0; i < count; i++)
225#ifdef SECSIZE
226 munhash(ip->i_dev, bn + i * CLBYTES / fs->fs_dbsize);
227#else SECSIZE
228 munhash(ip->i_dev, bn + i * CLBYTES / DEV_BSIZE);
229#endif SECSIZE
ced3a252 230 blkfree(ip, bprev, (off_t)osize);
f7fa0c54 231 if (nsize < request)
ced3a252 232 blkfree(ip, bno + numfrags(fs, nsize),
f7fa0c54 233 (off_t)(request - nsize));
2523b389
SL
234 ip->i_blocks += btodb(nsize - osize);
235 ip->i_flag |= IUPD|ICHG;
b3f3adbd
KM
236 allocbuf(bp, nsize);
237 bp->b_flags |= B_DONE;
238 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
7188ac27
KM
239 *bpp = bp;
240 return (0);
f3c028b7 241 }
8a403f0d
KM
242#ifdef QUOTA
243 /*
244 * Restore user's disk quota because allocation failed.
245 */
246 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE);
247#endif
0c36425b 248 brelse(bp);
0947395d 249nospace:
f3c028b7
KM
250 /*
251 * no space available
252 */
e2546752 253 fserr(fs, cred->cr_uid, "file system full");
07670f7d 254 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 255 return (ENOSPC);
07670f7d
KM
256}
257
502770a3
KM
258/*
259 * Allocate an inode in the file system.
260 *
261 * A preference may be optionally specified. If a preference is given
262 * the following hierarchy is used to allocate an inode:
263 * 1) allocate the requested inode.
264 * 2) allocate an inode in the same cylinder group.
265 * 3) quadradically rehash into other cylinder groups, until an
266 * available inode is located.
267 * If no inode preference is given the following heirarchy is used
268 * to allocate an inode:
269 * 1) allocate an inode in cylinder group 0.
270 * 2) quadradically rehash into other cylinder groups, until an
271 * available inode is located.
272 */
00bff950 273ialloc(pip, ipref, mode, cred, ipp)
f7287e4b 274 register struct inode *pip;
e3fe2d69
KM
275 ino_t ipref;
276 int mode;
00bff950 277 struct ucred *cred;
7188ac27 278 struct inode **ipp;
e3fe2d69 279{
daaf7bee 280 ino_t ino;
e3fe2d69
KM
281 register struct fs *fs;
282 register struct inode *ip;
7188ac27 283 int cg, error;
e3fe2d69 284
7188ac27 285 *ipp = 0;
f7287e4b 286 fs = pip->i_fs;
0947395d 287 if (fs->fs_cstotal.cs_nifree == 0)
e3fe2d69 288 goto noinodes;
260e5e3c
KM
289 if (ipref >= fs->fs_ncg * fs->fs_ipg)
290 ipref = 0;
6994bf5d 291 cg = itog(fs, ipref);
f7287e4b 292 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
e3fe2d69
KM
293 if (ino == 0)
294 goto noinodes;
7188ac27 295 error = iget(pip, ino, ipp);
7188ac27 296 if (error) {
00bff950 297 ifree(pip, ino, mode);
7188ac27 298 return (error);
e3fe2d69 299 }
fb92d0ab 300 ip = *ipp;
ffd90e52
KM
301 if (ip->i_mode) {
302 printf("mode = 0%o, inum = %d, fs = %s\n",
303 ip->i_mode, ip->i_number, fs->fs_fsmnt);
e3fe2d69 304 panic("ialloc: dup alloc");
ffd90e52 305 }
2523b389
SL
306 if (ip->i_blocks) { /* XXX */
307 printf("free inode %s/%d had %d blocks\n",
308 fs->fs_fsmnt, ino, ip->i_blocks);
309 ip->i_blocks = 0;
310 }
7e0dee76 311 ip->i_flags = 0;
fb92d0ab
KM
312 /*
313 * Set up a new generation number for this inode.
314 */
315 if (++nextgennumber < (u_long)time.tv_sec)
316 nextgennumber = time.tv_sec;
317 ip->i_gen = nextgennumber;
7188ac27 318 return (0);
e3fe2d69 319noinodes:
e2546752 320 fserr(fs, cred->cr_uid, "out of inodes");
ae851115 321 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
7188ac27 322 return (ENOSPC);
e3fe2d69
KM
323}
324
743f1ef7 325/*
502770a3
KM
326 * Find a cylinder to place a directory.
327 *
328 * The policy implemented by this algorithm is to select from
329 * among those cylinder groups with above the average number of
330 * free inodes, the one with the smallest number of directories.
743f1ef7 331 */
4f083fd7 332ino_t
f7287e4b 333dirpref(fs)
e3fe2d69 334 register struct fs *fs;
f7287e4b 335{
743f1ef7 336 int cg, minndir, mincg, avgifree;
e3fe2d69 337
0947395d 338 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
743f1ef7 339 minndir = fs->fs_ipg;
e3fe2d69 340 mincg = 0;
743f1ef7 341 for (cg = 0; cg < fs->fs_ncg; cg++)
b6407c9d
KM
342 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
343 fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
e3fe2d69 344 mincg = cg;
b6407c9d 345 minndir = fs->fs_cs(fs, cg).cs_ndir;
e3fe2d69 346 }
4f083fd7 347 return ((ino_t)(fs->fs_ipg * mincg));
e3fe2d69
KM
348}
349
743f1ef7 350/*
4f083fd7
SL
351 * Select the desired position for the next block in a file. The file is
352 * logically divided into sections. The first section is composed of the
353 * direct blocks. Each additional section contains fs_maxbpg blocks.
354 *
355 * If no blocks have been allocated in the first section, the policy is to
356 * request a block in the same cylinder group as the inode that describes
357 * the file. If no blocks have been allocated in any other section, the
358 * policy is to place the section in a cylinder group with a greater than
359 * average number of free blocks. An appropriate cylinder group is found
16e7863f
KM
360 * by using a rotor that sweeps the cylinder groups. When a new group of
361 * blocks is needed, the sweep begins in the cylinder group following the
362 * cylinder group from which the previous allocation was made. The sweep
363 * continues until a cylinder group with greater than the average number
364 * of free blocks is found. If the allocation is for the first block in an
365 * indirect block, the information on the previous allocation is unavailable;
366 * here a best guess is made based upon the logical block number being
367 * allocated.
4f083fd7
SL
368 *
369 * If a section is already partially allocated, the policy is to
370 * contiguously allocate fs_maxcontig blocks. The end of one of these
371 * contiguous blocks and the beginning of the next is physically separated
372 * so that the disk head will be in transit between them for at least
373 * fs_rotdelay milliseconds. This is to allow time for the processor to
374 * schedule another I/O transfer.
743f1ef7 375 */
daaf7bee 376daddr_t
4f083fd7
SL
377blkpref(ip, lbn, indx, bap)
378 struct inode *ip;
379 daddr_t lbn;
380 int indx;
381 daddr_t *bap;
f7287e4b 382{
4f083fd7 383 register struct fs *fs;
16e7863f
KM
384 register int cg;
385 int avgbfree, startcg;
4f083fd7 386 daddr_t nextblk;
743f1ef7 387
4f083fd7
SL
388 fs = ip->i_fs;
389 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
390 if (lbn < NDADDR) {
391 cg = itog(fs, ip->i_number);
b6407c9d 392 return (fs->fs_fpg * cg + fs->fs_frag);
743f1ef7 393 }
4f083fd7
SL
394 /*
395 * Find a cylinder with greater than average number of
396 * unused data blocks.
397 */
16e7863f
KM
398 if (indx == 0 || bap[indx - 1] == 0)
399 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
400 else
401 startcg = dtog(fs, bap[indx - 1]) + 1;
402 startcg %= fs->fs_ncg;
4f083fd7 403 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
16e7863f 404 for (cg = startcg; cg < fs->fs_ncg; cg++)
4f083fd7
SL
405 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
406 fs->fs_cgrotor = cg;
407 return (fs->fs_fpg * cg + fs->fs_frag);
408 }
16e7863f 409 for (cg = 0; cg <= startcg; cg++)
4f083fd7
SL
410 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
411 fs->fs_cgrotor = cg;
412 return (fs->fs_fpg * cg + fs->fs_frag);
413 }
414 return (NULL);
415 }
416 /*
417 * One or more previous blocks have been laid out. If less
418 * than fs_maxcontig previous blocks are contiguous, the
419 * next block is requested contiguously, otherwise it is
420 * requested rotationally delayed by fs_rotdelay milliseconds.
421 */
422 nextblk = bap[indx - 1] + fs->fs_frag;
423 if (indx > fs->fs_maxcontig &&
240a4664 424 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
4f083fd7
SL
425 != nextblk)
426 return (nextblk);
427 if (fs->fs_rotdelay != 0)
428 /*
429 * Here we convert ms of delay to frags as:
430 * (frags) = (ms) * (rev/sec) * (sect/rev) /
431 * ((sect/frag) * (ms/sec))
432 * then round up to the next block.
433 */
434 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
435 (NSPF(fs) * 1000), fs->fs_frag);
436 return (nextblk);
743f1ef7
KM
437}
438
502770a3
KM
439/*
440 * Implement the cylinder overflow algorithm.
441 *
442 * The policy implemented by this algorithm is:
443 * 1) allocate the block in its requested cylinder group.
444 * 2) quadradically rehash on the cylinder group number.
445 * 3) brute force search for a free block.
446 */
daaf7bee
KM
447/*VARARGS5*/
448u_long
f7287e4b
KM
449hashalloc(ip, cg, pref, size, allocator)
450 struct inode *ip;
e3fe2d69
KM
451 int cg;
452 long pref;
453 int size; /* size for data blocks, mode for inodes */
daaf7bee 454 u_long (*allocator)();
e3fe2d69 455{
f7287e4b 456 register struct fs *fs;
e3fe2d69
KM
457 long result;
458 int i, icg = cg;
459
f7287e4b 460 fs = ip->i_fs;
e3fe2d69
KM
461 /*
462 * 1: preferred cylinder group
463 */
f7287e4b 464 result = (*allocator)(ip, cg, pref, size);
e3fe2d69
KM
465 if (result)
466 return (result);
467 /*
468 * 2: quadratic rehash
469 */
470 for (i = 1; i < fs->fs_ncg; i *= 2) {
471 cg += i;
472 if (cg >= fs->fs_ncg)
473 cg -= fs->fs_ncg;
f7287e4b 474 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
475 if (result)
476 return (result);
477 }
478 /*
479 * 3: brute force search
620b3290
SL
480 * Note that we start at i == 2, since 0 was checked initially,
481 * and 1 is always checked in the quadratic rehash.
e3fe2d69 482 */
2136305e 483 cg = (icg + 2) % fs->fs_ncg;
620b3290 484 for (i = 2; i < fs->fs_ncg; i++) {
f7287e4b 485 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
486 if (result)
487 return (result);
488 cg++;
489 if (cg == fs->fs_ncg)
490 cg = 0;
491 }
ae851115 492 return (NULL);
e3fe2d69
KM
493}
494
502770a3
KM
495/*
496 * Determine whether a fragment can be extended.
497 *
498 * Check to see if the necessary fragments are available, and
499 * if they are, allocate them.
500 */
07670f7d 501daddr_t
f7287e4b
KM
502fragextend(ip, cg, bprev, osize, nsize)
503 struct inode *ip;
07670f7d 504 int cg;
f3c028b7 505 long bprev;
07670f7d
KM
506 int osize, nsize;
507{
f7287e4b 508 register struct fs *fs;
f3c028b7 509 register struct cg *cgp;
7188ac27 510 struct buf *bp;
f3c028b7
KM
511 long bno;
512 int frags, bbase;
7188ac27 513 int i, error;
07670f7d 514
f7287e4b 515 fs = ip->i_fs;
a8580723 516 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
e5476900 517 return (NULL);
d995d89d 518 frags = numfrags(fs, nsize);
a8580723
KM
519 bbase = fragnum(fs, bprev);
520 if (bbase > fragnum(fs, (bprev + frags - 1))) {
ec67a3ce 521 /* cannot extend across a block boundary */
ae851115 522 return (NULL);
f3c028b7 523 }
ec67a3ce
MK
524#ifdef SECSIZE
525 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
526 fs->fs_dbsize);
527#else SECSIZE
7188ac27 528 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 529 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
530 if (error) {
531 brelse(bp);
532 return (NULL);
533 }
ec67a3ce 534#endif SECSIZE
e5476900 535 cgp = bp->b_un.b_cg;
7188ac27 536 if (!cg_chkmagic(cgp)) {
d995d89d 537 brelse(bp);
ae851115 538 return (NULL);
d995d89d 539 }
95d0807c 540 cgp->cg_time = time.tv_sec;
6994bf5d 541 bno = dtogd(fs, bprev);
d995d89d 542 for (i = numfrags(fs, osize); i < frags; i++)
10adeb11 543 if (isclr(cg_blksfree(cgp), bno + i)) {
aca50d72 544 brelse(bp);
ae851115 545 return (NULL);
aca50d72
KM
546 }
547 /*
548 * the current fragment can be extended
549 * deduct the count on fragment being extended into
550 * increase the count on the remaining fragment (if any)
551 * allocate the extended piece
552 */
553 for (i = frags; i < fs->fs_frag - bbase; i++)
10adeb11 554 if (isclr(cg_blksfree(cgp), bno + i))
f3c028b7 555 break;
d995d89d 556 cgp->cg_frsum[i - numfrags(fs, osize)]--;
aca50d72
KM
557 if (i != frags)
558 cgp->cg_frsum[i - frags]++;
d995d89d 559 for (i = numfrags(fs, osize); i < frags; i++) {
10adeb11 560 clrbit(cg_blksfree(cgp), bno + i);
aca50d72
KM
561 cgp->cg_cs.cs_nffree--;
562 fs->fs_cstotal.cs_nffree--;
563 fs->fs_cs(fs, cg).cs_nffree--;
f3c028b7 564 }
aca50d72
KM
565 fs->fs_fmod++;
566 bdwrite(bp);
567 return (bprev);
07670f7d
KM
568}
569
502770a3
KM
570/*
571 * Determine whether a block can be allocated.
572 *
573 * Check to see if a block of the apprpriate size is available,
574 * and if it is, allocate it.
575 */
4f083fd7 576daddr_t
f7287e4b
KM
577alloccg(ip, cg, bpref, size)
578 struct inode *ip;
e3fe2d69
KM
579 int cg;
580 daddr_t bpref;
581 int size;
582{
f7287e4b 583 register struct fs *fs;
f3c028b7 584 register struct cg *cgp;
7188ac27 585 struct buf *bp;
f3c028b7 586 register int i;
7188ac27 587 int error, bno, frags, allocsiz;
e3fe2d69 588
f7287e4b 589 fs = ip->i_fs;
b6407c9d 590 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
ae851115 591 return (NULL);
ec67a3ce
MK
592#ifdef SECSIZE
593 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
594 fs->fs_dbsize);
595#else SECSIZE
7188ac27 596 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 597 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
598 if (error) {
599 brelse(bp);
600 return (NULL);
601 }
ec67a3ce 602#endif SECSIZE
e5476900 603 cgp = bp->b_un.b_cg;
7188ac27 604 if (!cg_chkmagic(cgp) ||
0f538882 605 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
d995d89d 606 brelse(bp);
ae851115 607 return (NULL);
d995d89d 608 }
95d0807c 609 cgp->cg_time = time.tv_sec;
b6407c9d 610 if (size == fs->fs_bsize) {
daaf7bee 611 bno = alloccgblk(fs, cgp, bpref);
f3c028b7
KM
612 bdwrite(bp);
613 return (bno);
614 }
615 /*
616 * check to see if any fragments are already available
617 * allocsiz is the size which will be allocated, hacking
618 * it down to a smaller size if necessary
619 */
d995d89d 620 frags = numfrags(fs, size);
b6407c9d 621 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
f3c028b7
KM
622 if (cgp->cg_frsum[allocsiz] != 0)
623 break;
b6407c9d 624 if (allocsiz == fs->fs_frag) {
f3c028b7
KM
625 /*
626 * no fragments were available, so a block will be
627 * allocated, and hacked up
628 */
0947395d 629 if (cgp->cg_cs.cs_nbfree == 0) {
f3c028b7 630 brelse(bp);
ae851115 631 return (NULL);
f3c028b7 632 }
daaf7bee 633 bno = alloccgblk(fs, cgp, bpref);
6994bf5d 634 bpref = dtogd(fs, bno);
b6407c9d 635 for (i = frags; i < fs->fs_frag; i++)
10adeb11 636 setbit(cg_blksfree(cgp), bpref + i);
b6407c9d 637 i = fs->fs_frag - frags;
0947395d
KM
638 cgp->cg_cs.cs_nffree += i;
639 fs->fs_cstotal.cs_nffree += i;
b6407c9d 640 fs->fs_cs(fs, cg).cs_nffree += i;
961945a8 641 fs->fs_fmod++;
f3c028b7
KM
642 cgp->cg_frsum[i]++;
643 bdwrite(bp);
644 return (bno);
645 }
743f1ef7 646 bno = mapsearch(fs, cgp, bpref, allocsiz);
0f538882
KM
647 if (bno < 0) {
648 brelse(bp);
ae851115 649 return (NULL);
0f538882 650 }
f3c028b7 651 for (i = 0; i < frags; i++)
10adeb11 652 clrbit(cg_blksfree(cgp), bno + i);
0947395d
KM
653 cgp->cg_cs.cs_nffree -= frags;
654 fs->fs_cstotal.cs_nffree -= frags;
b6407c9d 655 fs->fs_cs(fs, cg).cs_nffree -= frags;
961945a8 656 fs->fs_fmod++;
f3c028b7
KM
657 cgp->cg_frsum[allocsiz]--;
658 if (frags != allocsiz)
659 cgp->cg_frsum[allocsiz - frags]++;
660 bdwrite(bp);
661 return (cg * fs->fs_fpg + bno);
662}
663
502770a3
KM
664/*
665 * Allocate a block in a cylinder group.
666 *
667 * This algorithm implements the following policy:
668 * 1) allocate the requested block.
669 * 2) allocate a rotationally optimal block in the same cylinder.
670 * 3) allocate the next available block on the block rotor for the
671 * specified cylinder group.
672 * Note that this routine only allocates fs_bsize blocks; these
673 * blocks may be fragmented by the routine that allocates them.
674 */
f3c028b7 675daddr_t
daaf7bee 676alloccgblk(fs, cgp, bpref)
f7287e4b 677 register struct fs *fs;
f3c028b7
KM
678 register struct cg *cgp;
679 daddr_t bpref;
680{
743f1ef7 681 daddr_t bno;
ae851115 682 int cylno, pos, delta;
743f1ef7 683 short *cylbp;
aca50d72 684 register int i;
f3c028b7 685
743f1ef7
KM
686 if (bpref == 0) {
687 bpref = cgp->cg_rotor;
aca50d72
KM
688 goto norot;
689 }
a8580723 690 bpref = blknum(fs, bpref);
6994bf5d 691 bpref = dtogd(fs, bpref);
aca50d72
KM
692 /*
693 * if the requested block is available, use it
694 */
10adeb11 695 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {
aca50d72
KM
696 bno = bpref;
697 goto gotit;
698 }
aca50d72
KM
699 /*
700 * check for a block available on the same cylinder
aca50d72
KM
701 */
702 cylno = cbtocylno(fs, bpref);
10adeb11 703 if (cg_blktot(cgp)[cylno] == 0)
502770a3
KM
704 goto norot;
705 if (fs->fs_cpc == 0) {
706 /*
707 * block layout info is not available, so just have
708 * to take any block in this cylinder.
709 */
710 bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
711 goto norot;
712 }
aca50d72
KM
713 /*
714 * check the summary information to see if a block is
715 * available in the requested cylinder starting at the
4f083fd7 716 * requested rotational position and proceeding around.
aca50d72 717 */
10adeb11 718 cylbp = cg_blks(fs, cgp, cylno);
4f083fd7 719 pos = cbtorpos(fs, bpref);
10adeb11 720 for (i = pos; i < fs->fs_nrpos; i++)
aca50d72
KM
721 if (cylbp[i] > 0)
722 break;
10adeb11 723 if (i == fs->fs_nrpos)
aca50d72 724 for (i = 0; i < pos; i++)
743f1ef7
KM
725 if (cylbp[i] > 0)
726 break;
aca50d72
KM
727 if (cylbp[i] > 0) {
728 /*
729 * found a rotational position, now find the actual
730 * block. A panic if none is actually there.
731 */
732 pos = cylno % fs->fs_cpc;
733 bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
10adeb11 734 if (fs_postbl(fs, pos)[i] == -1) {
ffd90e52
KM
735 printf("pos = %d, i = %d, fs = %s\n",
736 pos, i, fs->fs_fsmnt);
aca50d72 737 panic("alloccgblk: cyl groups corrupted");
ffd90e52 738 }
10adeb11
KM
739 for (i = fs_postbl(fs, pos)[i];; ) {
740 if (isblock(fs, cg_blksfree(cgp), bno + i)) {
240a4664 741 bno = blkstofrags(fs, (bno + i));
aca50d72 742 goto gotit;
743f1ef7 743 }
10adeb11
KM
744 delta = fs_rotbl(fs)[i];
745 if (delta <= 0 ||
746 delta + i > fragstoblks(fs, fs->fs_fpg))
aca50d72 747 break;
ae851115 748 i += delta;
743f1ef7 749 }
ffd90e52 750 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
aca50d72 751 panic("alloccgblk: can't find blk in cyl");
e3fe2d69 752 }
aca50d72
KM
753norot:
754 /*
755 * no blocks in the requested cylinder, so take next
756 * available one in this cylinder group.
757 */
b32450f4 758 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
6459ebe0 759 if (bno < 0)
ae851115 760 return (NULL);
743f1ef7 761 cgp->cg_rotor = bno;
e3fe2d69 762gotit:
10adeb11 763 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno));
0947395d
KM
764 cgp->cg_cs.cs_nbfree--;
765 fs->fs_cstotal.cs_nbfree--;
b6407c9d 766 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
502770a3 767 cylno = cbtocylno(fs, bno);
10adeb11
KM
768 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
769 cg_blktot(cgp)[cylno]--;
e3fe2d69 770 fs->fs_fmod++;
743f1ef7 771 return (cgp->cg_cgx * fs->fs_fpg + bno);
e3fe2d69 772}
10adeb11 773
502770a3
KM
774/*
775 * Determine whether an inode can be allocated.
776 *
777 * Check to see if an inode is available, and if it is,
778 * allocate it using the following policy:
779 * 1) allocate the requested inode.
780 * 2) allocate the next available inode after the requested
781 * inode in the specified cylinder group.
782 */
4f083fd7 783ino_t
f7287e4b
KM
784ialloccg(ip, cg, ipref, mode)
785 struct inode *ip;
e3fe2d69
KM
786 int cg;
787 daddr_t ipref;
788 int mode;
789{
f7287e4b 790 register struct fs *fs;
f3c028b7 791 register struct cg *cgp;
4e0c7b8a 792 struct buf *bp;
7188ac27 793 int error, start, len, loc, map, i;
e3fe2d69 794
f7287e4b 795 fs = ip->i_fs;
b6407c9d 796 if (fs->fs_cs(fs, cg).cs_nifree == 0)
ae851115 797 return (NULL);
ec67a3ce
MK
798#ifdef SECSIZE
799 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
800 fs->fs_dbsize);
801#else SECSIZE
7188ac27 802 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 803 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
804 if (error) {
805 brelse(bp);
806 return (NULL);
807 }
ec67a3ce 808#endif SECSIZE
e5476900 809 cgp = bp->b_un.b_cg;
7188ac27 810 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
d995d89d 811 brelse(bp);
ae851115 812 return (NULL);
d995d89d 813 }
95d0807c 814 cgp->cg_time = time.tv_sec;
e3fe2d69
KM
815 if (ipref) {
816 ipref %= fs->fs_ipg;
10adeb11 817 if (isclr(cg_inosused(cgp), ipref))
e3fe2d69 818 goto gotit;
4e0c7b8a
KM
819 }
820 start = cgp->cg_irotor / NBBY;
821 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
10adeb11 822 loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
4e0c7b8a 823 if (loc == 0) {
e5889092
KM
824 len = start + 1;
825 start = 0;
10adeb11 826 loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
e5889092
KM
827 if (loc == 0) {
828 printf("cg = %s, irotor = %d, fs = %s\n",
829 cg, cgp->cg_irotor, fs->fs_fsmnt);
830 panic("ialloccg: map corrupted");
831 /* NOTREACHED */
832 }
4e0c7b8a
KM
833 }
834 i = start + len - loc;
10adeb11 835 map = cg_inosused(cgp)[i];
4e0c7b8a
KM
836 ipref = i * NBBY;
837 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
838 if ((map & i) == 0) {
e3fe2d69
KM
839 cgp->cg_irotor = ipref;
840 goto gotit;
841 }
842 }
4e0c7b8a
KM
843 printf("fs = %s\n", fs->fs_fsmnt);
844 panic("ialloccg: block not in map");
845 /* NOTREACHED */
e3fe2d69 846gotit:
10adeb11 847 setbit(cg_inosused(cgp), ipref);
0947395d
KM
848 cgp->cg_cs.cs_nifree--;
849 fs->fs_cstotal.cs_nifree--;
b6407c9d 850 fs->fs_cs(fs, cg).cs_nifree--;
e3fe2d69
KM
851 fs->fs_fmod++;
852 if ((mode & IFMT) == IFDIR) {
0947395d
KM
853 cgp->cg_cs.cs_ndir++;
854 fs->fs_cstotal.cs_ndir++;
b6407c9d 855 fs->fs_cs(fs, cg).cs_ndir++;
e3fe2d69
KM
856 }
857 bdwrite(bp);
858 return (cg * fs->fs_ipg + ipref);
859}
860
502770a3
KM
861/*
862 * Free a block or fragment.
863 *
864 * The specified block or fragment is placed back in the
865 * free map. If a fragment is deallocated, a possible
866 * block reassembly is checked.
867 */
ced3a252 868blkfree(ip, bno, size)
f7287e4b 869 register struct inode *ip;
e3fe2d69 870 daddr_t bno;
daaf7bee 871 off_t size;
e3fe2d69
KM
872{
873 register struct fs *fs;
874 register struct cg *cgp;
7188ac27
KM
875 struct buf *bp;
876 int error, cg, blk, frags, bbase;
f3c028b7 877 register int i;
c6f5111d 878 struct ucred *cred = curproc->p_ucred; /* XXX */
e3fe2d69 879
f7287e4b 880 fs = ip->i_fs;
ffd90e52
KM
881 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
882 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
883 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
ced3a252 884 panic("blkfree: bad size");
ffd90e52 885 }
6994bf5d 886 cg = dtog(fs, bno);
e2546752 887 if ((unsigned)bno >= fs->fs_size) {
6459ebe0 888 printf("bad block %d, ino %d\n", bno, ip->i_number);
e2546752 889 fserr(fs, cred->cr_uid, "bad block");
e3fe2d69 890 return;
6459ebe0 891 }
ec67a3ce
MK
892#ifdef SECSIZE
893 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
894 fs->fs_dbsize);
895#else SECSIZE
7188ac27 896 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 897 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
898 if (error) {
899 brelse(bp);
900 return;
901 }
ec67a3ce 902#endif SECSIZE
e5476900 903 cgp = bp->b_un.b_cg;
7188ac27 904 if (!cg_chkmagic(cgp)) {
d995d89d 905 brelse(bp);
e3fe2d69 906 return;
d995d89d 907 }
95d0807c 908 cgp->cg_time = time.tv_sec;
6994bf5d 909 bno = dtogd(fs, bno);
b6407c9d 910 if (size == fs->fs_bsize) {
10adeb11 911 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) {
ffd90e52
KM
912 printf("dev = 0x%x, block = %d, fs = %s\n",
913 ip->i_dev, bno, fs->fs_fsmnt);
ced3a252 914 panic("blkfree: freeing free block");
6459ebe0 915 }
10adeb11 916 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno));
0947395d
KM
917 cgp->cg_cs.cs_nbfree++;
918 fs->fs_cstotal.cs_nbfree++;
b6407c9d 919 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 920 i = cbtocylno(fs, bno);
10adeb11
KM
921 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
922 cg_blktot(cgp)[i]++;
07670f7d 923 } else {
a8580723 924 bbase = bno - fragnum(fs, bno);
f3c028b7
KM
925 /*
926 * decrement the counts associated with the old frags
927 */
10adeb11 928 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 929 fragacct(fs, blk, cgp->cg_frsum, -1);
f3c028b7
KM
930 /*
931 * deallocate the fragment
932 */
d995d89d 933 frags = numfrags(fs, size);
f3c028b7 934 for (i = 0; i < frags; i++) {
10adeb11 935 if (isset(cg_blksfree(cgp), bno + i)) {
ffd90e52
KM
936 printf("dev = 0x%x, block = %d, fs = %s\n",
937 ip->i_dev, bno + i, fs->fs_fsmnt);
ced3a252 938 panic("blkfree: freeing free frag");
ffd90e52 939 }
10adeb11 940 setbit(cg_blksfree(cgp), bno + i);
07670f7d 941 }
ae851115
KM
942 cgp->cg_cs.cs_nffree += i;
943 fs->fs_cstotal.cs_nffree += i;
944 fs->fs_cs(fs, cg).cs_nffree += i;
f3c028b7
KM
945 /*
946 * add back in counts associated with the new frags
947 */
10adeb11 948 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 949 fragacct(fs, blk, cgp->cg_frsum, 1);
f3c028b7
KM
950 /*
951 * if a complete block has been reassembled, account for it
952 */
9523729e
KM
953 if (isblock(fs, cg_blksfree(cgp),
954 (daddr_t)fragstoblks(fs, bbase))) {
b6407c9d
KM
955 cgp->cg_cs.cs_nffree -= fs->fs_frag;
956 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
957 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
0947395d
KM
958 cgp->cg_cs.cs_nbfree++;
959 fs->fs_cstotal.cs_nbfree++;
b6407c9d 960 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 961 i = cbtocylno(fs, bbase);
10adeb11
KM
962 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
963 cg_blktot(cgp)[i]++;
07670f7d
KM
964 }
965 }
e3fe2d69 966 fs->fs_fmod++;
e3fe2d69
KM
967 bdwrite(bp);
968}
969
502770a3
KM
970/*
971 * Free an inode.
972 *
973 * The specified inode is placed back in the free map.
974 */
f7287e4b
KM
975ifree(ip, ino, mode)
976 struct inode *ip;
e3fe2d69
KM
977 ino_t ino;
978 int mode;
979{
980 register struct fs *fs;
981 register struct cg *cgp;
7188ac27
KM
982 struct buf *bp;
983 int error, cg;
e3fe2d69 984
f7287e4b 985 fs = ip->i_fs;
ffd90e52
KM
986 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
987 printf("dev = 0x%x, ino = %d, fs = %s\n",
988 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 989 panic("ifree: range");
ffd90e52 990 }
6994bf5d 991 cg = itog(fs, ino);
ec67a3ce
MK
992#ifdef SECSIZE
993 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
994 fs->fs_dbsize);
995#else SECSIZE
7188ac27 996 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 997 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
998 if (error) {
999 brelse(bp);
1000 return;
1001 }
ec67a3ce 1002#endif SECSIZE
e5476900 1003 cgp = bp->b_un.b_cg;
7188ac27 1004 if (!cg_chkmagic(cgp)) {
d995d89d 1005 brelse(bp);
e3fe2d69 1006 return;
d995d89d 1007 }
95d0807c 1008 cgp->cg_time = time.tv_sec;
e3fe2d69 1009 ino %= fs->fs_ipg;
10adeb11 1010 if (isclr(cg_inosused(cgp), ino)) {
ffd90e52
KM
1011 printf("dev = 0x%x, ino = %d, fs = %s\n",
1012 ip->i_dev, ino, fs->fs_fsmnt);
b9ccf514
KM
1013 if (fs->fs_ronly == 0)
1014 panic("ifree: freeing free inode");
ffd90e52 1015 }
10adeb11 1016 clrbit(cg_inosused(cgp), ino);
4e0c7b8a
KM
1017 if (ino < cgp->cg_irotor)
1018 cgp->cg_irotor = ino;
0947395d
KM
1019 cgp->cg_cs.cs_nifree++;
1020 fs->fs_cstotal.cs_nifree++;
b6407c9d 1021 fs->fs_cs(fs, cg).cs_nifree++;
e3fe2d69 1022 if ((mode & IFMT) == IFDIR) {
0947395d
KM
1023 cgp->cg_cs.cs_ndir--;
1024 fs->fs_cstotal.cs_ndir--;
b6407c9d 1025 fs->fs_cs(fs, cg).cs_ndir--;
e3fe2d69
KM
1026 }
1027 fs->fs_fmod++;
1028 bdwrite(bp);
1029}
1030
743f1ef7 1031/*
502770a3
KM
1032 * Find a block of the specified size in the specified cylinder group.
1033 *
743f1ef7
KM
1034 * It is a panic if a request is made to find a block if none are
1035 * available.
1036 */
1037daddr_t
1038mapsearch(fs, cgp, bpref, allocsiz)
1039 register struct fs *fs;
1040 register struct cg *cgp;
1041 daddr_t bpref;
1042 int allocsiz;
1043{
1044 daddr_t bno;
1045 int start, len, loc, i;
1046 int blk, field, subfield, pos;
1047
1048 /*
1049 * find the fragment by searching through the free block
1050 * map for an appropriate bit pattern
1051 */
1052 if (bpref)
6994bf5d 1053 start = dtogd(fs, bpref) / NBBY;
743f1ef7
KM
1054 else
1055 start = cgp->cg_frotor / NBBY;
942bd18b 1056 len = howmany(fs->fs_fpg, NBBY) - start;
9523729e
KM
1057 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start],
1058 (u_char *)fragtbl[fs->fs_frag],
1059 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
743f1ef7 1060 if (loc == 0) {
e5476900
KM
1061 len = start + 1;
1062 start = 0;
9523729e
KM
1063 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0],
1064 (u_char *)fragtbl[fs->fs_frag],
1065 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
4e0c7b8a
KM
1066 if (loc == 0) {
1067 printf("start = %d, len = %d, fs = %s\n",
1068 start, len, fs->fs_fsmnt);
1069 panic("alloccg: map corrupted");
e5889092 1070 /* NOTREACHED */
4e0c7b8a 1071 }
743f1ef7
KM
1072 }
1073 bno = (start + len - loc) * NBBY;
1074 cgp->cg_frotor = bno;
1075 /*
1076 * found the byte in the map
1077 * sift through the bits to find the selected frag
1078 */
ae851115 1079 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
10adeb11 1080 blk = blkmap(fs, cg_blksfree(cgp), bno);
743f1ef7
KM
1081 blk <<= 1;
1082 field = around[allocsiz];
1083 subfield = inside[allocsiz];
b6407c9d 1084 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
ae851115
KM
1085 if ((blk & field) == subfield)
1086 return (bno + pos);
743f1ef7
KM
1087 field <<= 1;
1088 subfield <<= 1;
1089 }
1090 }
ffd90e52 1091 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
743f1ef7 1092 panic("alloccg: block not in map");
e5476900 1093 return (-1);
743f1ef7
KM
1094}
1095
e3fe2d69 1096/*
502770a3
KM
1097 * Fserr prints the name of a file system with an error diagnostic.
1098 *
1099 * The form of the error message is:
e3fe2d69
KM
1100 * fs: error message
1101 */
e2546752 1102fserr(fs, uid, cp)
e3fe2d69 1103 struct fs *fs;
e2546752 1104 uid_t uid;
e3fe2d69
KM
1105 char *cp;
1106{
1107
e2546752 1108 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp);
e3fe2d69 1109}