merge in vnodes
[unix-history] / usr / src / sys / ufs / ffs / ffs_alloc.c
CommitLineData
da7c5cc6 1/*
7188ac27 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
202a4bd9 3 * All rights reserved.
da7c5cc6 4 *
202a4bd9 5 * Redistribution and use in source and binary forms are permitted
616d42db
KB
6 * provided that the above copyright notice and this paragraph are
7 * duplicated in all such forms and that any documentation,
8 * advertising materials, and other materials related to such
9 * distribution and use acknowledge that the software was developed
10 * by the University of California, Berkeley. The name of the
11 * University may not be used to endorse or promote products derived
12 * from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
202a4bd9 16 *
7188ac27 17 * @(#)ffs_alloc.c 7.9 (Berkeley) %G%
da7c5cc6 18 */
e3fe2d69 19
94368568
JB
20#include "param.h"
21#include "systm.h"
22#include "mount.h"
94368568 23#include "buf.h"
94368568 24#include "user.h"
7188ac27 25#include "vnode.h"
94368568 26#include "kernel.h"
7580177b 27#include "syslog.h"
aec7dd3b 28#include "cmap.h"
7188ac27
KM
29#include "../ufs/quota.h"
30#include "../ufs/inode.h"
31#include "../ufs/fs.h"
e3fe2d69 32
daaf7bee 33extern u_long hashalloc();
4f083fd7
SL
34extern ino_t ialloccg();
35extern daddr_t alloccg();
743f1ef7
KM
36extern daddr_t alloccgblk();
37extern daddr_t fragextend();
38extern daddr_t blkpref();
39extern daddr_t mapsearch();
1d7a08c5 40extern int inside[], around[];
b6407c9d 41extern unsigned char *fragtbl[];
e3fe2d69 42
502770a3
KM
43/*
44 * Allocate a block in the file system.
45 *
46 * The size of the requested block is given, which must be some
47 * multiple of fs_fsize and <= fs_bsize.
48 * A preference may be optionally specified. If a preference is given
49 * the following hierarchy is used to allocate a block:
50 * 1) allocate the requested block.
51 * 2) allocate a rotationally optimal block in the same cylinder.
52 * 3) allocate a block in the same cylinder group.
53 * 4) quadradically rehash into other cylinder groups, until an
54 * available block is located.
55 * If no block preference is given the following heirarchy is used
56 * to allocate a block:
57 * 1) allocate a block in the cylinder group that contains the
58 * inode for the file.
59 * 2) quadradically rehash into other cylinder groups, until an
60 * available block is located.
61 */
7188ac27 62alloc(ip, bpref, size, bpp, flags)
f3c028b7 63 register struct inode *ip;
e3fe2d69
KM
64 daddr_t bpref;
65 int size;
7188ac27
KM
66 struct buf **bpp;
67 int flags;
e3fe2d69
KM
68{
69 daddr_t bno;
70 register struct fs *fs;
f3c028b7 71 register struct buf *bp;
7188ac27 72 int cg, error;
e3fe2d69 73
7188ac27 74 *bpp = 0;
f7287e4b 75 fs = ip->i_fs;
ffd90e52
KM
76 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
77 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
78 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
b6407c9d 79 panic("alloc: bad size");
ffd90e52 80 }
b6407c9d 81 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
0947395d 82 goto nospace;
240a4664 83 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
e3fe2d69 84 goto nospace;
b4567e9c 85#ifdef QUOTA
7188ac27
KM
86 if (error = chkdq(ip, (long)btodb(size), 0))
87 return (error);
ca90a6bf 88#endif
260e5e3c
KM
89 if (bpref >= fs->fs_size)
90 bpref = 0;
e3fe2d69 91 if (bpref == 0)
6994bf5d 92 cg = itog(fs, ip->i_number);
e3fe2d69 93 else
6994bf5d 94 cg = dtog(fs, bpref);
4f083fd7
SL
95 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
96 (u_long (*)())alloccg);
6459ebe0 97 if (bno <= 0)
e3fe2d69 98 goto nospace;
2523b389
SL
99 ip->i_blocks += btodb(size);
100 ip->i_flag |= IUPD|ICHG;
ec67a3ce
MK
101#ifdef SECSIZE
102 bp = getblk(ip->i_dev, fsbtodb(fs, bno), size, fs->fs_dbsize);
103#else SECSIZE
7188ac27
KM
104 bp = getblk(ip->i_devvp, fsbtodb(fs, bno), size);
105 if (flags & B_CLRBUF)
106 clrbuf(bp);
107 *bpp = bp;
108 return (0);
e3fe2d69
KM
109nospace:
110 fserr(fs, "file system full");
111 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 112 return (ENOSPC);
e3fe2d69
KM
113}
114
502770a3
KM
115/*
116 * Reallocate a fragment to a bigger size
117 *
118 * The number and size of the old block is given, and a preference
119 * and new size is also specified. The allocator attempts to extend
120 * the original block. Failing that, the regular block allocator is
121 * invoked to get an appropriate block.
122 */
7188ac27 123realloccg(ip, bprev, bpref, osize, nsize, bpp)
f7287e4b 124 register struct inode *ip;
743f1ef7 125 daddr_t bprev, bpref;
07670f7d 126 int osize, nsize;
7188ac27 127 struct buf **bpp;
07670f7d 128{
07670f7d 129 register struct fs *fs;
7188ac27 130 struct buf *bp, *obp;
f7fa0c54 131 int cg, request;
5d934daa 132 daddr_t bno, bn;
7188ac27 133 int i, error, count;
07670f7d 134
7188ac27 135 *bpp = 0;
f7287e4b 136 fs = ip->i_fs;
d995d89d 137 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
ffd90e52
KM
138 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
139 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
140 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
b6407c9d 141 panic("realloccg: bad size");
ffd90e52 142 }
240a4664 143 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
0947395d 144 goto nospace;
ffd90e52
KM
145 if (bprev == 0) {
146 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
147 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
502770a3 148 panic("realloccg: bad bprev");
ffd90e52 149 }
b4567e9c 150#ifdef QUOTA
7188ac27
KM
151 if (error = chkdq(ip, (long)btodb(nsize - osize), 0))
152 return (error);
ca90a6bf 153#endif
ae851115 154 cg = dtog(fs, bprev);
f7287e4b 155 bno = fragextend(ip, cg, (long)bprev, osize, nsize);
f3c028b7 156 if (bno != 0) {
9d6d37ce 157 do {
ec67a3ce
MK
158#ifdef SECSIZE
159 bp = bread(ip->i_dev, fsbtodb(fs, bno), osize,
160 fs->fs_dbsize);
161#else SECSIZE
7188ac27
KM
162 error = bread(ip->i_devvp, fsbtodb(fs, bno),
163 osize, &bp);
164 if (error) {
9d6d37ce 165 brelse(bp);
7188ac27 166 return (error);
9d6d37ce
BJ
167 }
168 } while (brealloc(bp, nsize) == 0);
169 bp->b_flags |= B_DONE;
4f083fd7 170 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
2523b389
SL
171 ip->i_blocks += btodb(nsize - osize);
172 ip->i_flag |= IUPD|ICHG;
7188ac27
KM
173 *bpp = bp;
174 return (0);
f3c028b7 175 }
260e5e3c
KM
176 if (bpref >= fs->fs_size)
177 bpref = 0;
aec7dd3b 178 switch ((int)fs->fs_optim) {
f8484b5f
KM
179 case FS_OPTSPACE:
180 /*
181 * Allocate an exact sized fragment. Although this makes
182 * best use of space, we will waste time relocating it if
183 * the file continues to grow. If the fragmentation is
184 * less than half of the minimum free reserve, we choose
185 * to begin optimizing for time.
186 */
f7fa0c54 187 request = nsize;
f8484b5f
KM
188 if (fs->fs_minfree < 5 ||
189 fs->fs_cstotal.cs_nffree >
190 fs->fs_dsize * fs->fs_minfree / (2 * 100))
191 break;
192 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
193 fs->fs_fsmnt);
194 fs->fs_optim = FS_OPTTIME;
195 break;
196 case FS_OPTTIME:
197 /*
198 * At this point we have discovered a file that is trying
199 * to grow a small fragment to a larger fragment. To save
200 * time, we allocate a full sized block, then free the
201 * unused portion. If the file continues to grow, the
202 * `fragextend' call above will be able to grow it in place
203 * without further copying. If aberrant programs cause
204 * disk fragmentation to grow within 2% of the free reserve,
205 * we choose to begin optimizing for space.
206 */
f7fa0c54 207 request = fs->fs_bsize;
f8484b5f
KM
208 if (fs->fs_cstotal.cs_nffree <
209 fs->fs_dsize * (fs->fs_minfree - 2) / 100)
210 break;
211 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
212 fs->fs_fsmnt);
213 fs->fs_optim = FS_OPTSPACE;
214 break;
215 default:
216 printf("dev = 0x%x, optim = %d, fs = %s\n",
217 ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
218 panic("realloccg: bad optim");
219 /* NOTREACHED */
220 }
f7fa0c54 221 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
4f083fd7 222 (u_long (*)())alloccg);
6459ebe0 223 if (bno > 0) {
ec67a3ce
MK
224#ifdef SECSIZE
225 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize,
226 fs->fs_dbsize);
227#else SECSIZE
7188ac27
KM
228 error = bread(ip->i_devvp, fsbtodb(fs, bprev), osize, &obp);
229 if (error) {
d995d89d 230 brelse(obp);
7188ac27 231 return (error);
d995d89d 232 }
5d934daa 233 bn = fsbtodb(fs, bno);
ec67a3ce
MK
234#ifdef SECSIZE
235 bp = getblk(ip->i_dev, bn, nsize, fs->fs_dbsize);
236#else SECSIZE
7188ac27 237 bp = getblk(ip->i_devvp, bn, nsize);
ec67a3ce 238#endif SECSIZE
260ede8e 239 bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize);
ec67a3ce
MK
240 count = howmany(osize, CLBYTES);
241 for (i = 0; i < count; i++)
242#ifdef SECSIZE
243 munhash(ip->i_dev, bn + i * CLBYTES / fs->fs_dbsize);
244#else SECSIZE
245 munhash(ip->i_dev, bn + i * CLBYTES / DEV_BSIZE);
246#endif SECSIZE
260ede8e
KM
247 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
248 if (obp->b_flags & B_DELWRI) {
249 obp->b_flags &= ~B_DELWRI;
250 u.u_ru.ru_oublock--; /* delete charge */
251 }
f3c028b7 252 brelse(obp);
ced3a252 253 blkfree(ip, bprev, (off_t)osize);
f7fa0c54 254 if (nsize < request)
ced3a252 255 blkfree(ip, bno + numfrags(fs, nsize),
f7fa0c54 256 (off_t)(request - nsize));
2523b389
SL
257 ip->i_blocks += btodb(nsize - osize);
258 ip->i_flag |= IUPD|ICHG;
7188ac27
KM
259 *bpp = bp;
260 return (0);
f3c028b7 261 }
0947395d 262nospace:
f3c028b7
KM
263 /*
264 * no space available
265 */
07670f7d
KM
266 fserr(fs, "file system full");
267 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 268 return (ENOSPC);
07670f7d
KM
269}
270
502770a3
KM
271/*
272 * Allocate an inode in the file system.
273 *
274 * A preference may be optionally specified. If a preference is given
275 * the following hierarchy is used to allocate an inode:
276 * 1) allocate the requested inode.
277 * 2) allocate an inode in the same cylinder group.
278 * 3) quadradically rehash into other cylinder groups, until an
279 * available inode is located.
280 * If no inode preference is given the following heirarchy is used
281 * to allocate an inode:
282 * 1) allocate an inode in cylinder group 0.
283 * 2) quadradically rehash into other cylinder groups, until an
284 * available inode is located.
285 */
7188ac27 286ialloc(pip, ipref, mode, ipp)
f7287e4b 287 register struct inode *pip;
e3fe2d69
KM
288 ino_t ipref;
289 int mode;
7188ac27 290 struct inode **ipp;
e3fe2d69 291{
daaf7bee 292 ino_t ino;
e3fe2d69
KM
293 register struct fs *fs;
294 register struct inode *ip;
7188ac27 295 int cg, error;
e3fe2d69 296
7188ac27 297 *ipp = 0;
f7287e4b 298 fs = pip->i_fs;
0947395d 299 if (fs->fs_cstotal.cs_nifree == 0)
e3fe2d69 300 goto noinodes;
b4567e9c 301#ifdef QUOTA
7188ac27
KM
302 if (error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0))
303 return (error);
ca90a6bf 304#endif
260e5e3c
KM
305 if (ipref >= fs->fs_ncg * fs->fs_ipg)
306 ipref = 0;
6994bf5d 307 cg = itog(fs, ipref);
f7287e4b 308 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
e3fe2d69
KM
309 if (ino == 0)
310 goto noinodes;
7188ac27
KM
311 error = iget(pip, ino, ipp);
312 ip = *ipp;
313 if (error) {
bcd8515e 314 ifree(pip, ino, 0);
7188ac27 315 return (error);
e3fe2d69 316 }
ffd90e52
KM
317 if (ip->i_mode) {
318 printf("mode = 0%o, inum = %d, fs = %s\n",
319 ip->i_mode, ip->i_number, fs->fs_fsmnt);
e3fe2d69 320 panic("ialloc: dup alloc");
ffd90e52 321 }
2523b389
SL
322 if (ip->i_blocks) { /* XXX */
323 printf("free inode %s/%d had %d blocks\n",
324 fs->fs_fsmnt, ino, ip->i_blocks);
325 ip->i_blocks = 0;
326 }
7188ac27 327 return (0);
e3fe2d69
KM
328noinodes:
329 fserr(fs, "out of inodes");
ae851115 330 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
7188ac27 331 return (ENOSPC);
e3fe2d69
KM
332}
333
743f1ef7 334/*
502770a3
KM
335 * Find a cylinder to place a directory.
336 *
337 * The policy implemented by this algorithm is to select from
338 * among those cylinder groups with above the average number of
339 * free inodes, the one with the smallest number of directories.
743f1ef7 340 */
4f083fd7 341ino_t
f7287e4b 342dirpref(fs)
e3fe2d69 343 register struct fs *fs;
f7287e4b 344{
743f1ef7 345 int cg, minndir, mincg, avgifree;
e3fe2d69 346
0947395d 347 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
743f1ef7 348 minndir = fs->fs_ipg;
e3fe2d69 349 mincg = 0;
743f1ef7 350 for (cg = 0; cg < fs->fs_ncg; cg++)
b6407c9d
KM
351 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
352 fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
e3fe2d69 353 mincg = cg;
b6407c9d 354 minndir = fs->fs_cs(fs, cg).cs_ndir;
e3fe2d69 355 }
4f083fd7 356 return ((ino_t)(fs->fs_ipg * mincg));
e3fe2d69
KM
357}
358
743f1ef7 359/*
4f083fd7
SL
360 * Select the desired position for the next block in a file. The file is
361 * logically divided into sections. The first section is composed of the
362 * direct blocks. Each additional section contains fs_maxbpg blocks.
363 *
364 * If no blocks have been allocated in the first section, the policy is to
365 * request a block in the same cylinder group as the inode that describes
366 * the file. If no blocks have been allocated in any other section, the
367 * policy is to place the section in a cylinder group with a greater than
368 * average number of free blocks. An appropriate cylinder group is found
16e7863f
KM
369 * by using a rotor that sweeps the cylinder groups. When a new group of
370 * blocks is needed, the sweep begins in the cylinder group following the
371 * cylinder group from which the previous allocation was made. The sweep
372 * continues until a cylinder group with greater than the average number
373 * of free blocks is found. If the allocation is for the first block in an
374 * indirect block, the information on the previous allocation is unavailable;
375 * here a best guess is made based upon the logical block number being
376 * allocated.
4f083fd7
SL
377 *
378 * If a section is already partially allocated, the policy is to
379 * contiguously allocate fs_maxcontig blocks. The end of one of these
380 * contiguous blocks and the beginning of the next is physically separated
381 * so that the disk head will be in transit between them for at least
382 * fs_rotdelay milliseconds. This is to allow time for the processor to
383 * schedule another I/O transfer.
743f1ef7 384 */
daaf7bee 385daddr_t
4f083fd7
SL
386blkpref(ip, lbn, indx, bap)
387 struct inode *ip;
388 daddr_t lbn;
389 int indx;
390 daddr_t *bap;
f7287e4b 391{
4f083fd7 392 register struct fs *fs;
16e7863f
KM
393 register int cg;
394 int avgbfree, startcg;
4f083fd7 395 daddr_t nextblk;
743f1ef7 396
4f083fd7
SL
397 fs = ip->i_fs;
398 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
399 if (lbn < NDADDR) {
400 cg = itog(fs, ip->i_number);
b6407c9d 401 return (fs->fs_fpg * cg + fs->fs_frag);
743f1ef7 402 }
4f083fd7
SL
403 /*
404 * Find a cylinder with greater than average number of
405 * unused data blocks.
406 */
16e7863f
KM
407 if (indx == 0 || bap[indx - 1] == 0)
408 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
409 else
410 startcg = dtog(fs, bap[indx - 1]) + 1;
411 startcg %= fs->fs_ncg;
4f083fd7 412 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
16e7863f 413 for (cg = startcg; cg < fs->fs_ncg; cg++)
4f083fd7
SL
414 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
415 fs->fs_cgrotor = cg;
416 return (fs->fs_fpg * cg + fs->fs_frag);
417 }
16e7863f 418 for (cg = 0; cg <= startcg; cg++)
4f083fd7
SL
419 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
420 fs->fs_cgrotor = cg;
421 return (fs->fs_fpg * cg + fs->fs_frag);
422 }
423 return (NULL);
424 }
425 /*
426 * One or more previous blocks have been laid out. If less
427 * than fs_maxcontig previous blocks are contiguous, the
428 * next block is requested contiguously, otherwise it is
429 * requested rotationally delayed by fs_rotdelay milliseconds.
430 */
431 nextblk = bap[indx - 1] + fs->fs_frag;
432 if (indx > fs->fs_maxcontig &&
240a4664 433 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
4f083fd7
SL
434 != nextblk)
435 return (nextblk);
436 if (fs->fs_rotdelay != 0)
437 /*
438 * Here we convert ms of delay to frags as:
439 * (frags) = (ms) * (rev/sec) * (sect/rev) /
440 * ((sect/frag) * (ms/sec))
441 * then round up to the next block.
442 */
443 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
444 (NSPF(fs) * 1000), fs->fs_frag);
445 return (nextblk);
743f1ef7
KM
446}
447
502770a3
KM
448/*
449 * Implement the cylinder overflow algorithm.
450 *
451 * The policy implemented by this algorithm is:
452 * 1) allocate the block in its requested cylinder group.
453 * 2) quadradically rehash on the cylinder group number.
454 * 3) brute force search for a free block.
455 */
daaf7bee
KM
456/*VARARGS5*/
457u_long
f7287e4b
KM
458hashalloc(ip, cg, pref, size, allocator)
459 struct inode *ip;
e3fe2d69
KM
460 int cg;
461 long pref;
462 int size; /* size for data blocks, mode for inodes */
daaf7bee 463 u_long (*allocator)();
e3fe2d69 464{
f7287e4b 465 register struct fs *fs;
e3fe2d69
KM
466 long result;
467 int i, icg = cg;
468
f7287e4b 469 fs = ip->i_fs;
e3fe2d69
KM
470 /*
471 * 1: preferred cylinder group
472 */
f7287e4b 473 result = (*allocator)(ip, cg, pref, size);
e3fe2d69
KM
474 if (result)
475 return (result);
476 /*
477 * 2: quadratic rehash
478 */
479 for (i = 1; i < fs->fs_ncg; i *= 2) {
480 cg += i;
481 if (cg >= fs->fs_ncg)
482 cg -= fs->fs_ncg;
f7287e4b 483 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
484 if (result)
485 return (result);
486 }
487 /*
488 * 3: brute force search
620b3290
SL
489 * Note that we start at i == 2, since 0 was checked initially,
490 * and 1 is always checked in the quadratic rehash.
e3fe2d69 491 */
2136305e 492 cg = (icg + 2) % fs->fs_ncg;
620b3290 493 for (i = 2; i < fs->fs_ncg; i++) {
f7287e4b 494 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
495 if (result)
496 return (result);
497 cg++;
498 if (cg == fs->fs_ncg)
499 cg = 0;
500 }
ae851115 501 return (NULL);
e3fe2d69
KM
502}
503
502770a3
KM
504/*
505 * Determine whether a fragment can be extended.
506 *
507 * Check to see if the necessary fragments are available, and
508 * if they are, allocate them.
509 */
07670f7d 510daddr_t
f7287e4b
KM
511fragextend(ip, cg, bprev, osize, nsize)
512 struct inode *ip;
07670f7d 513 int cg;
f3c028b7 514 long bprev;
07670f7d
KM
515 int osize, nsize;
516{
f7287e4b 517 register struct fs *fs;
f3c028b7 518 register struct cg *cgp;
7188ac27 519 struct buf *bp;
f3c028b7
KM
520 long bno;
521 int frags, bbase;
7188ac27 522 int i, error;
07670f7d 523
f7287e4b 524 fs = ip->i_fs;
a8580723 525 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
e5476900 526 return (NULL);
d995d89d 527 frags = numfrags(fs, nsize);
a8580723
KM
528 bbase = fragnum(fs, bprev);
529 if (bbase > fragnum(fs, (bprev + frags - 1))) {
ec67a3ce 530 /* cannot extend across a block boundary */
ae851115 531 return (NULL);
f3c028b7 532 }
ec67a3ce
MK
533#ifdef SECSIZE
534 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
535 fs->fs_dbsize);
536#else SECSIZE
7188ac27
KM
537 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
538 (int)fs->fs_cgsize, &bp);
539 if (error) {
540 brelse(bp);
541 return (NULL);
542 }
ec67a3ce 543#endif SECSIZE
e5476900 544 cgp = bp->b_un.b_cg;
7188ac27 545 if (!cg_chkmagic(cgp)) {
d995d89d 546 brelse(bp);
ae851115 547 return (NULL);
d995d89d 548 }
95d0807c 549 cgp->cg_time = time.tv_sec;
6994bf5d 550 bno = dtogd(fs, bprev);
d995d89d 551 for (i = numfrags(fs, osize); i < frags; i++)
10adeb11 552 if (isclr(cg_blksfree(cgp), bno + i)) {
aca50d72 553 brelse(bp);
ae851115 554 return (NULL);
aca50d72
KM
555 }
556 /*
557 * the current fragment can be extended
558 * deduct the count on fragment being extended into
559 * increase the count on the remaining fragment (if any)
560 * allocate the extended piece
561 */
562 for (i = frags; i < fs->fs_frag - bbase; i++)
10adeb11 563 if (isclr(cg_blksfree(cgp), bno + i))
f3c028b7 564 break;
d995d89d 565 cgp->cg_frsum[i - numfrags(fs, osize)]--;
aca50d72
KM
566 if (i != frags)
567 cgp->cg_frsum[i - frags]++;
d995d89d 568 for (i = numfrags(fs, osize); i < frags; i++) {
10adeb11 569 clrbit(cg_blksfree(cgp), bno + i);
aca50d72
KM
570 cgp->cg_cs.cs_nffree--;
571 fs->fs_cstotal.cs_nffree--;
572 fs->fs_cs(fs, cg).cs_nffree--;
f3c028b7 573 }
aca50d72
KM
574 fs->fs_fmod++;
575 bdwrite(bp);
576 return (bprev);
07670f7d
KM
577}
578
502770a3
KM
579/*
580 * Determine whether a block can be allocated.
581 *
582 * Check to see if a block of the apprpriate size is available,
583 * and if it is, allocate it.
584 */
4f083fd7 585daddr_t
f7287e4b
KM
586alloccg(ip, cg, bpref, size)
587 struct inode *ip;
e3fe2d69
KM
588 int cg;
589 daddr_t bpref;
590 int size;
591{
f7287e4b 592 register struct fs *fs;
f3c028b7 593 register struct cg *cgp;
7188ac27 594 struct buf *bp;
f3c028b7 595 register int i;
7188ac27 596 int error, bno, frags, allocsiz;
e3fe2d69 597
f7287e4b 598 fs = ip->i_fs;
b6407c9d 599 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
ae851115 600 return (NULL);
ec67a3ce
MK
601#ifdef SECSIZE
602 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
603 fs->fs_dbsize);
604#else SECSIZE
7188ac27
KM
605 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
606 (int)fs->fs_cgsize, &bp);
607 if (error) {
608 brelse(bp);
609 return (NULL);
610 }
ec67a3ce 611#endif SECSIZE
e5476900 612 cgp = bp->b_un.b_cg;
7188ac27 613 if (!cg_chkmagic(cgp) ||
0f538882 614 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
d995d89d 615 brelse(bp);
ae851115 616 return (NULL);
d995d89d 617 }
95d0807c 618 cgp->cg_time = time.tv_sec;
b6407c9d 619 if (size == fs->fs_bsize) {
daaf7bee 620 bno = alloccgblk(fs, cgp, bpref);
f3c028b7
KM
621 bdwrite(bp);
622 return (bno);
623 }
624 /*
625 * check to see if any fragments are already available
626 * allocsiz is the size which will be allocated, hacking
627 * it down to a smaller size if necessary
628 */
d995d89d 629 frags = numfrags(fs, size);
b6407c9d 630 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
f3c028b7
KM
631 if (cgp->cg_frsum[allocsiz] != 0)
632 break;
b6407c9d 633 if (allocsiz == fs->fs_frag) {
f3c028b7
KM
634 /*
635 * no fragments were available, so a block will be
636 * allocated, and hacked up
637 */
0947395d 638 if (cgp->cg_cs.cs_nbfree == 0) {
f3c028b7 639 brelse(bp);
ae851115 640 return (NULL);
f3c028b7 641 }
daaf7bee 642 bno = alloccgblk(fs, cgp, bpref);
6994bf5d 643 bpref = dtogd(fs, bno);
b6407c9d 644 for (i = frags; i < fs->fs_frag; i++)
10adeb11 645 setbit(cg_blksfree(cgp), bpref + i);
b6407c9d 646 i = fs->fs_frag - frags;
0947395d
KM
647 cgp->cg_cs.cs_nffree += i;
648 fs->fs_cstotal.cs_nffree += i;
b6407c9d 649 fs->fs_cs(fs, cg).cs_nffree += i;
961945a8 650 fs->fs_fmod++;
f3c028b7
KM
651 cgp->cg_frsum[i]++;
652 bdwrite(bp);
653 return (bno);
654 }
743f1ef7 655 bno = mapsearch(fs, cgp, bpref, allocsiz);
0f538882
KM
656 if (bno < 0) {
657 brelse(bp);
ae851115 658 return (NULL);
0f538882 659 }
f3c028b7 660 for (i = 0; i < frags; i++)
10adeb11 661 clrbit(cg_blksfree(cgp), bno + i);
0947395d
KM
662 cgp->cg_cs.cs_nffree -= frags;
663 fs->fs_cstotal.cs_nffree -= frags;
b6407c9d 664 fs->fs_cs(fs, cg).cs_nffree -= frags;
961945a8 665 fs->fs_fmod++;
f3c028b7
KM
666 cgp->cg_frsum[allocsiz]--;
667 if (frags != allocsiz)
668 cgp->cg_frsum[allocsiz - frags]++;
669 bdwrite(bp);
670 return (cg * fs->fs_fpg + bno);
671}
672
502770a3
KM
673/*
674 * Allocate a block in a cylinder group.
675 *
676 * This algorithm implements the following policy:
677 * 1) allocate the requested block.
678 * 2) allocate a rotationally optimal block in the same cylinder.
679 * 3) allocate the next available block on the block rotor for the
680 * specified cylinder group.
681 * Note that this routine only allocates fs_bsize blocks; these
682 * blocks may be fragmented by the routine that allocates them.
683 */
f3c028b7 684daddr_t
daaf7bee 685alloccgblk(fs, cgp, bpref)
f7287e4b 686 register struct fs *fs;
f3c028b7
KM
687 register struct cg *cgp;
688 daddr_t bpref;
689{
743f1ef7 690 daddr_t bno;
ae851115 691 int cylno, pos, delta;
743f1ef7 692 short *cylbp;
aca50d72 693 register int i;
f3c028b7 694
743f1ef7
KM
695 if (bpref == 0) {
696 bpref = cgp->cg_rotor;
aca50d72
KM
697 goto norot;
698 }
a8580723 699 bpref = blknum(fs, bpref);
6994bf5d 700 bpref = dtogd(fs, bpref);
aca50d72
KM
701 /*
702 * if the requested block is available, use it
703 */
10adeb11 704 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {
aca50d72
KM
705 bno = bpref;
706 goto gotit;
707 }
aca50d72
KM
708 /*
709 * check for a block available on the same cylinder
aca50d72
KM
710 */
711 cylno = cbtocylno(fs, bpref);
10adeb11 712 if (cg_blktot(cgp)[cylno] == 0)
502770a3
KM
713 goto norot;
714 if (fs->fs_cpc == 0) {
715 /*
716 * block layout info is not available, so just have
717 * to take any block in this cylinder.
718 */
719 bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
720 goto norot;
721 }
aca50d72
KM
722 /*
723 * check the summary information to see if a block is
724 * available in the requested cylinder starting at the
4f083fd7 725 * requested rotational position and proceeding around.
aca50d72 726 */
10adeb11 727 cylbp = cg_blks(fs, cgp, cylno);
4f083fd7 728 pos = cbtorpos(fs, bpref);
10adeb11 729 for (i = pos; i < fs->fs_nrpos; i++)
aca50d72
KM
730 if (cylbp[i] > 0)
731 break;
10adeb11 732 if (i == fs->fs_nrpos)
aca50d72 733 for (i = 0; i < pos; i++)
743f1ef7
KM
734 if (cylbp[i] > 0)
735 break;
aca50d72
KM
736 if (cylbp[i] > 0) {
737 /*
738 * found a rotational position, now find the actual
739 * block. A panic if none is actually there.
740 */
741 pos = cylno % fs->fs_cpc;
742 bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
10adeb11 743 if (fs_postbl(fs, pos)[i] == -1) {
ffd90e52
KM
744 printf("pos = %d, i = %d, fs = %s\n",
745 pos, i, fs->fs_fsmnt);
aca50d72 746 panic("alloccgblk: cyl groups corrupted");
ffd90e52 747 }
10adeb11
KM
748 for (i = fs_postbl(fs, pos)[i];; ) {
749 if (isblock(fs, cg_blksfree(cgp), bno + i)) {
240a4664 750 bno = blkstofrags(fs, (bno + i));
aca50d72 751 goto gotit;
743f1ef7 752 }
10adeb11
KM
753 delta = fs_rotbl(fs)[i];
754 if (delta <= 0 ||
755 delta + i > fragstoblks(fs, fs->fs_fpg))
aca50d72 756 break;
ae851115 757 i += delta;
743f1ef7 758 }
ffd90e52 759 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
aca50d72 760 panic("alloccgblk: can't find blk in cyl");
e3fe2d69 761 }
aca50d72
KM
762norot:
763 /*
764 * no blocks in the requested cylinder, so take next
765 * available one in this cylinder group.
766 */
b32450f4 767 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
6459ebe0 768 if (bno < 0)
ae851115 769 return (NULL);
743f1ef7 770 cgp->cg_rotor = bno;
e3fe2d69 771gotit:
10adeb11 772 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno));
0947395d
KM
773 cgp->cg_cs.cs_nbfree--;
774 fs->fs_cstotal.cs_nbfree--;
b6407c9d 775 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
502770a3 776 cylno = cbtocylno(fs, bno);
10adeb11
KM
777 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
778 cg_blktot(cgp)[cylno]--;
e3fe2d69 779 fs->fs_fmod++;
743f1ef7 780 return (cgp->cg_cgx * fs->fs_fpg + bno);
e3fe2d69 781}
10adeb11 782
502770a3
KM
783/*
784 * Determine whether an inode can be allocated.
785 *
786 * Check to see if an inode is available, and if it is,
787 * allocate it using the following policy:
788 * 1) allocate the requested inode.
789 * 2) allocate the next available inode after the requested
790 * inode in the specified cylinder group.
791 */
4f083fd7 792ino_t
f7287e4b
KM
793ialloccg(ip, cg, ipref, mode)
794 struct inode *ip;
e3fe2d69
KM
795 int cg;
796 daddr_t ipref;
797 int mode;
798{
f7287e4b 799 register struct fs *fs;
f3c028b7 800 register struct cg *cgp;
4e0c7b8a 801 struct buf *bp;
7188ac27 802 int error, start, len, loc, map, i;
e3fe2d69 803
f7287e4b 804 fs = ip->i_fs;
b6407c9d 805 if (fs->fs_cs(fs, cg).cs_nifree == 0)
ae851115 806 return (NULL);
ec67a3ce
MK
807#ifdef SECSIZE
808 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
809 fs->fs_dbsize);
810#else SECSIZE
7188ac27
KM
811 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
812 (int)fs->fs_cgsize, &bp);
813 if (error) {
814 brelse(bp);
815 return (NULL);
816 }
ec67a3ce 817#endif SECSIZE
e5476900 818 cgp = bp->b_un.b_cg;
7188ac27 819 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
d995d89d 820 brelse(bp);
ae851115 821 return (NULL);
d995d89d 822 }
95d0807c 823 cgp->cg_time = time.tv_sec;
e3fe2d69
KM
824 if (ipref) {
825 ipref %= fs->fs_ipg;
10adeb11 826 if (isclr(cg_inosused(cgp), ipref))
e3fe2d69 827 goto gotit;
4e0c7b8a
KM
828 }
829 start = cgp->cg_irotor / NBBY;
830 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
10adeb11 831 loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
4e0c7b8a 832 if (loc == 0) {
e5889092
KM
833 len = start + 1;
834 start = 0;
10adeb11 835 loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
e5889092
KM
836 if (loc == 0) {
837 printf("cg = %s, irotor = %d, fs = %s\n",
838 cg, cgp->cg_irotor, fs->fs_fsmnt);
839 panic("ialloccg: map corrupted");
840 /* NOTREACHED */
841 }
4e0c7b8a
KM
842 }
843 i = start + len - loc;
10adeb11 844 map = cg_inosused(cgp)[i];
4e0c7b8a
KM
845 ipref = i * NBBY;
846 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
847 if ((map & i) == 0) {
e3fe2d69
KM
848 cgp->cg_irotor = ipref;
849 goto gotit;
850 }
851 }
4e0c7b8a
KM
852 printf("fs = %s\n", fs->fs_fsmnt);
853 panic("ialloccg: block not in map");
854 /* NOTREACHED */
e3fe2d69 855gotit:
10adeb11 856 setbit(cg_inosused(cgp), ipref);
0947395d
KM
857 cgp->cg_cs.cs_nifree--;
858 fs->fs_cstotal.cs_nifree--;
b6407c9d 859 fs->fs_cs(fs, cg).cs_nifree--;
e3fe2d69
KM
860 fs->fs_fmod++;
861 if ((mode & IFMT) == IFDIR) {
0947395d
KM
862 cgp->cg_cs.cs_ndir++;
863 fs->fs_cstotal.cs_ndir++;
b6407c9d 864 fs->fs_cs(fs, cg).cs_ndir++;
e3fe2d69
KM
865 }
866 bdwrite(bp);
867 return (cg * fs->fs_ipg + ipref);
868}
869
502770a3
KM
870/*
871 * Free a block or fragment.
872 *
873 * The specified block or fragment is placed back in the
874 * free map. If a fragment is deallocated, a possible
875 * block reassembly is checked.
876 */
ced3a252 877blkfree(ip, bno, size)
f7287e4b 878 register struct inode *ip;
e3fe2d69 879 daddr_t bno;
daaf7bee 880 off_t size;
e3fe2d69
KM
881{
882 register struct fs *fs;
883 register struct cg *cgp;
7188ac27
KM
884 struct buf *bp;
885 int error, cg, blk, frags, bbase;
f3c028b7 886 register int i;
e3fe2d69 887
f7287e4b 888 fs = ip->i_fs;
ffd90e52
KM
889 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
890 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
891 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
ced3a252 892 panic("blkfree: bad size");
ffd90e52 893 }
6994bf5d 894 cg = dtog(fs, bno);
6459ebe0
KM
895 if (badblock(fs, bno)) {
896 printf("bad block %d, ino %d\n", bno, ip->i_number);
e3fe2d69 897 return;
6459ebe0 898 }
ec67a3ce
MK
899#ifdef SECSIZE
900 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
901 fs->fs_dbsize);
902#else SECSIZE
7188ac27
KM
903 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
904 (int)fs->fs_cgsize, &bp);
905 if (error) {
906 brelse(bp);
907 return;
908 }
ec67a3ce 909#endif SECSIZE
e5476900 910 cgp = bp->b_un.b_cg;
7188ac27 911 if (!cg_chkmagic(cgp)) {
d995d89d 912 brelse(bp);
e3fe2d69 913 return;
d995d89d 914 }
95d0807c 915 cgp->cg_time = time.tv_sec;
6994bf5d 916 bno = dtogd(fs, bno);
b6407c9d 917 if (size == fs->fs_bsize) {
10adeb11 918 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) {
ffd90e52
KM
919 printf("dev = 0x%x, block = %d, fs = %s\n",
920 ip->i_dev, bno, fs->fs_fsmnt);
ced3a252 921 panic("blkfree: freeing free block");
6459ebe0 922 }
10adeb11 923 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno));
0947395d
KM
924 cgp->cg_cs.cs_nbfree++;
925 fs->fs_cstotal.cs_nbfree++;
b6407c9d 926 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 927 i = cbtocylno(fs, bno);
10adeb11
KM
928 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
929 cg_blktot(cgp)[i]++;
07670f7d 930 } else {
a8580723 931 bbase = bno - fragnum(fs, bno);
f3c028b7
KM
932 /*
933 * decrement the counts associated with the old frags
934 */
10adeb11 935 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 936 fragacct(fs, blk, cgp->cg_frsum, -1);
f3c028b7
KM
937 /*
938 * deallocate the fragment
939 */
d995d89d 940 frags = numfrags(fs, size);
f3c028b7 941 for (i = 0; i < frags; i++) {
10adeb11 942 if (isset(cg_blksfree(cgp), bno + i)) {
ffd90e52
KM
943 printf("dev = 0x%x, block = %d, fs = %s\n",
944 ip->i_dev, bno + i, fs->fs_fsmnt);
ced3a252 945 panic("blkfree: freeing free frag");
ffd90e52 946 }
10adeb11 947 setbit(cg_blksfree(cgp), bno + i);
07670f7d 948 }
ae851115
KM
949 cgp->cg_cs.cs_nffree += i;
950 fs->fs_cstotal.cs_nffree += i;
951 fs->fs_cs(fs, cg).cs_nffree += i;
f3c028b7
KM
952 /*
953 * add back in counts associated with the new frags
954 */
10adeb11 955 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 956 fragacct(fs, blk, cgp->cg_frsum, 1);
f3c028b7
KM
957 /*
958 * if a complete block has been reassembled, account for it
959 */
9523729e
KM
960 if (isblock(fs, cg_blksfree(cgp),
961 (daddr_t)fragstoblks(fs, bbase))) {
b6407c9d
KM
962 cgp->cg_cs.cs_nffree -= fs->fs_frag;
963 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
964 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
0947395d
KM
965 cgp->cg_cs.cs_nbfree++;
966 fs->fs_cstotal.cs_nbfree++;
b6407c9d 967 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 968 i = cbtocylno(fs, bbase);
10adeb11
KM
969 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
970 cg_blktot(cgp)[i]++;
07670f7d
KM
971 }
972 }
e3fe2d69 973 fs->fs_fmod++;
e3fe2d69
KM
974 bdwrite(bp);
975}
976
502770a3
KM
977/*
978 * Free an inode.
979 *
980 * The specified inode is placed back in the free map.
981 */
f7287e4b
KM
982ifree(ip, ino, mode)
983 struct inode *ip;
e3fe2d69
KM
984 ino_t ino;
985 int mode;
986{
987 register struct fs *fs;
988 register struct cg *cgp;
7188ac27
KM
989 struct buf *bp;
990 int error, cg;
e3fe2d69 991
f7287e4b 992 fs = ip->i_fs;
ffd90e52
KM
993 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
994 printf("dev = 0x%x, ino = %d, fs = %s\n",
995 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 996 panic("ifree: range");
ffd90e52 997 }
6994bf5d 998 cg = itog(fs, ino);
ec67a3ce
MK
999#ifdef SECSIZE
1000 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1001 fs->fs_dbsize);
1002#else SECSIZE
7188ac27
KM
1003 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1004 (int)fs->fs_cgsize, &bp);
1005 if (error) {
1006 brelse(bp);
1007 return;
1008 }
ec67a3ce 1009#endif SECSIZE
e5476900 1010 cgp = bp->b_un.b_cg;
7188ac27 1011 if (!cg_chkmagic(cgp)) {
d995d89d 1012 brelse(bp);
e3fe2d69 1013 return;
d995d89d 1014 }
95d0807c 1015 cgp->cg_time = time.tv_sec;
e3fe2d69 1016 ino %= fs->fs_ipg;
10adeb11 1017 if (isclr(cg_inosused(cgp), ino)) {
ffd90e52
KM
1018 printf("dev = 0x%x, ino = %d, fs = %s\n",
1019 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 1020 panic("ifree: freeing free inode");
ffd90e52 1021 }
10adeb11 1022 clrbit(cg_inosused(cgp), ino);
4e0c7b8a
KM
1023 if (ino < cgp->cg_irotor)
1024 cgp->cg_irotor = ino;
0947395d
KM
1025 cgp->cg_cs.cs_nifree++;
1026 fs->fs_cstotal.cs_nifree++;
b6407c9d 1027 fs->fs_cs(fs, cg).cs_nifree++;
e3fe2d69 1028 if ((mode & IFMT) == IFDIR) {
0947395d
KM
1029 cgp->cg_cs.cs_ndir--;
1030 fs->fs_cstotal.cs_ndir--;
b6407c9d 1031 fs->fs_cs(fs, cg).cs_ndir--;
e3fe2d69
KM
1032 }
1033 fs->fs_fmod++;
1034 bdwrite(bp);
1035}
1036
743f1ef7 1037/*
502770a3
KM
1038 * Find a block of the specified size in the specified cylinder group.
1039 *
743f1ef7
KM
1040 * It is a panic if a request is made to find a block if none are
1041 * available.
1042 */
1043daddr_t
1044mapsearch(fs, cgp, bpref, allocsiz)
1045 register struct fs *fs;
1046 register struct cg *cgp;
1047 daddr_t bpref;
1048 int allocsiz;
1049{
1050 daddr_t bno;
1051 int start, len, loc, i;
1052 int blk, field, subfield, pos;
1053
1054 /*
1055 * find the fragment by searching through the free block
1056 * map for an appropriate bit pattern
1057 */
1058 if (bpref)
6994bf5d 1059 start = dtogd(fs, bpref) / NBBY;
743f1ef7
KM
1060 else
1061 start = cgp->cg_frotor / NBBY;
942bd18b 1062 len = howmany(fs->fs_fpg, NBBY) - start;
9523729e
KM
1063 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start],
1064 (u_char *)fragtbl[fs->fs_frag],
1065 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
743f1ef7 1066 if (loc == 0) {
e5476900
KM
1067 len = start + 1;
1068 start = 0;
9523729e
KM
1069 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0],
1070 (u_char *)fragtbl[fs->fs_frag],
1071 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
4e0c7b8a
KM
1072 if (loc == 0) {
1073 printf("start = %d, len = %d, fs = %s\n",
1074 start, len, fs->fs_fsmnt);
1075 panic("alloccg: map corrupted");
e5889092 1076 /* NOTREACHED */
4e0c7b8a 1077 }
743f1ef7
KM
1078 }
1079 bno = (start + len - loc) * NBBY;
1080 cgp->cg_frotor = bno;
1081 /*
1082 * found the byte in the map
1083 * sift through the bits to find the selected frag
1084 */
ae851115 1085 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
10adeb11 1086 blk = blkmap(fs, cg_blksfree(cgp), bno);
743f1ef7
KM
1087 blk <<= 1;
1088 field = around[allocsiz];
1089 subfield = inside[allocsiz];
b6407c9d 1090 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
ae851115
KM
1091 if ((blk & field) == subfield)
1092 return (bno + pos);
743f1ef7
KM
1093 field <<= 1;
1094 subfield <<= 1;
1095 }
1096 }
ffd90e52 1097 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
743f1ef7 1098 panic("alloccg: block not in map");
e5476900 1099 return (-1);
743f1ef7
KM
1100}
1101
e3fe2d69 1102/*
502770a3
KM
1103 * Fserr prints the name of a file system with an error diagnostic.
1104 *
1105 * The form of the error message is:
e3fe2d69
KM
1106 * fs: error message
1107 */
1108fserr(fs, cp)
1109 struct fs *fs;
1110 char *cp;
1111{
1112
283ffc90 1113 log(LOG_ERR, "%s: %s\n", fs->fs_fsmnt, cp);
e3fe2d69 1114}