major overhaul to handle logical buffer pool (not quite right yet)
[unix-history] / usr / src / sys / ufs / lfs / lfs_alloc.c
CommitLineData
da7c5cc6 1/*
7188ac27 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
202a4bd9 3 * All rights reserved.
da7c5cc6 4 *
202a4bd9 5 * Redistribution and use in source and binary forms are permitted
616d42db
KB
6 * provided that the above copyright notice and this paragraph are
7 * duplicated in all such forms and that any documentation,
8 * advertising materials, and other materials related to such
9 * distribution and use acknowledge that the software was developed
10 * by the University of California, Berkeley. The name of the
11 * University may not be used to endorse or promote products derived
12 * from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
202a4bd9 16 *
0c36425b 17 * @(#)lfs_alloc.c 7.13 (Berkeley) %G%
da7c5cc6 18 */
e3fe2d69 19
94368568
JB
20#include "param.h"
21#include "systm.h"
22#include "mount.h"
94368568 23#include "buf.h"
94368568 24#include "user.h"
7188ac27 25#include "vnode.h"
94368568 26#include "kernel.h"
7580177b 27#include "syslog.h"
aec7dd3b 28#include "cmap.h"
7188ac27
KM
29#include "../ufs/quota.h"
30#include "../ufs/inode.h"
31#include "../ufs/fs.h"
e3fe2d69 32
daaf7bee 33extern u_long hashalloc();
4f083fd7
SL
34extern ino_t ialloccg();
35extern daddr_t alloccg();
743f1ef7
KM
36extern daddr_t alloccgblk();
37extern daddr_t fragextend();
38extern daddr_t blkpref();
39extern daddr_t mapsearch();
1d7a08c5 40extern int inside[], around[];
b6407c9d 41extern unsigned char *fragtbl[];
e3fe2d69 42
502770a3
KM
43/*
44 * Allocate a block in the file system.
45 *
46 * The size of the requested block is given, which must be some
47 * multiple of fs_fsize and <= fs_bsize.
48 * A preference may be optionally specified. If a preference is given
49 * the following hierarchy is used to allocate a block:
50 * 1) allocate the requested block.
51 * 2) allocate a rotationally optimal block in the same cylinder.
52 * 3) allocate a block in the same cylinder group.
53 * 4) quadradically rehash into other cylinder groups, until an
54 * available block is located.
55 * If no block preference is given the following heirarchy is used
56 * to allocate a block:
57 * 1) allocate a block in the cylinder group that contains the
58 * inode for the file.
59 * 2) quadradically rehash into other cylinder groups, until an
60 * available block is located.
61 */
0c36425b 62alloc(ip, lbn, bpref, size, bnp)
f3c028b7 63 register struct inode *ip;
0c36425b 64 daddr_t lbn, bpref;
e3fe2d69 65 int size;
0c36425b 66 daddr_t *bnp;
e3fe2d69
KM
67{
68 daddr_t bno;
69 register struct fs *fs;
f3c028b7 70 register struct buf *bp;
7188ac27 71 int cg, error;
e3fe2d69 72
0c36425b 73 *bnp = 0;
f7287e4b 74 fs = ip->i_fs;
ffd90e52
KM
75 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
76 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
77 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
b6407c9d 78 panic("alloc: bad size");
ffd90e52 79 }
b6407c9d 80 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
0947395d 81 goto nospace;
240a4664 82 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
e3fe2d69 83 goto nospace;
b4567e9c 84#ifdef QUOTA
7188ac27
KM
85 if (error = chkdq(ip, (long)btodb(size), 0))
86 return (error);
ca90a6bf 87#endif
260e5e3c
KM
88 if (bpref >= fs->fs_size)
89 bpref = 0;
e3fe2d69 90 if (bpref == 0)
6994bf5d 91 cg = itog(fs, ip->i_number);
e3fe2d69 92 else
6994bf5d 93 cg = dtog(fs, bpref);
4f083fd7
SL
94 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
95 (u_long (*)())alloccg);
0c36425b
KM
96 if (bno > 0) {
97 ip->i_blocks += btodb(size);
98 ip->i_flag |= IUPD|ICHG;
99 *bnp = bno;
100 return (0);
101 }
e3fe2d69
KM
102nospace:
103 fserr(fs, "file system full");
104 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 105 return (ENOSPC);
e3fe2d69
KM
106}
107
502770a3
KM
108/*
109 * Reallocate a fragment to a bigger size
110 *
111 * The number and size of the old block is given, and a preference
112 * and new size is also specified. The allocator attempts to extend
113 * the original block. Failing that, the regular block allocator is
114 * invoked to get an appropriate block.
115 */
0c36425b 116realloccg(ip, lbprev, bpref, osize, nsize, bpp)
f7287e4b 117 register struct inode *ip;
0c36425b
KM
118 off_t lbprev;
119 daddr_t bpref;
07670f7d 120 int osize, nsize;
7188ac27 121 struct buf **bpp;
07670f7d 122{
07670f7d 123 register struct fs *fs;
7188ac27 124 struct buf *bp, *obp;
f7fa0c54 125 int cg, request;
0c36425b 126 daddr_t bprev, bno, bn;
7188ac27 127 int i, error, count;
07670f7d 128
7188ac27 129 *bpp = 0;
f7287e4b 130 fs = ip->i_fs;
d995d89d 131 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
ffd90e52
KM
132 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
133 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
134 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
b6407c9d 135 panic("realloccg: bad size");
ffd90e52 136 }
240a4664 137 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
0947395d 138 goto nospace;
0c36425b 139 if ((bprev = ip->i_db[lbprev]) == 0) {
ffd90e52
KM
140 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
141 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
502770a3 142 panic("realloccg: bad bprev");
ffd90e52 143 }
b4567e9c 144#ifdef QUOTA
7188ac27
KM
145 if (error = chkdq(ip, (long)btodb(nsize - osize), 0))
146 return (error);
ca90a6bf 147#endif
0c36425b
KM
148 /*
149 * Allocate the extra space in the buffer.
150 */
151 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) {
152 brelse(bp);
153 return (error);
154 }
155 brealloc(bp, nsize);
156 bp->b_flags |= B_DONE;
157 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
158 ip->i_blocks += btodb(nsize - osize);
159 ip->i_flag |= IUPD|ICHG;
160 /*
161 * Check for extension in the existing location.
162 */
ae851115 163 cg = dtog(fs, bprev);
0c36425b
KM
164 if (bno = fragextend(ip, cg, (long)bprev, osize, nsize)) {
165 if (bp->b_blkno != bno)
166 panic("bad blockno");
7188ac27
KM
167 *bpp = bp;
168 return (0);
f3c028b7 169 }
0c36425b
KM
170 /*
171 * Allocate a new disk location.
172 */
260e5e3c
KM
173 if (bpref >= fs->fs_size)
174 bpref = 0;
aec7dd3b 175 switch ((int)fs->fs_optim) {
f8484b5f
KM
176 case FS_OPTSPACE:
177 /*
178 * Allocate an exact sized fragment. Although this makes
179 * best use of space, we will waste time relocating it if
180 * the file continues to grow. If the fragmentation is
181 * less than half of the minimum free reserve, we choose
182 * to begin optimizing for time.
183 */
f7fa0c54 184 request = nsize;
f8484b5f
KM
185 if (fs->fs_minfree < 5 ||
186 fs->fs_cstotal.cs_nffree >
187 fs->fs_dsize * fs->fs_minfree / (2 * 100))
188 break;
189 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
190 fs->fs_fsmnt);
191 fs->fs_optim = FS_OPTTIME;
192 break;
193 case FS_OPTTIME:
194 /*
195 * At this point we have discovered a file that is trying
196 * to grow a small fragment to a larger fragment. To save
197 * time, we allocate a full sized block, then free the
198 * unused portion. If the file continues to grow, the
199 * `fragextend' call above will be able to grow it in place
200 * without further copying. If aberrant programs cause
201 * disk fragmentation to grow within 2% of the free reserve,
202 * we choose to begin optimizing for space.
203 */
f7fa0c54 204 request = fs->fs_bsize;
f8484b5f
KM
205 if (fs->fs_cstotal.cs_nffree <
206 fs->fs_dsize * (fs->fs_minfree - 2) / 100)
207 break;
208 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
209 fs->fs_fsmnt);
210 fs->fs_optim = FS_OPTSPACE;
211 break;
212 default:
213 printf("dev = 0x%x, optim = %d, fs = %s\n",
214 ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
215 panic("realloccg: bad optim");
216 /* NOTREACHED */
217 }
f7fa0c54 218 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
4f083fd7 219 (u_long (*)())alloccg);
6459ebe0 220 if (bno > 0) {
ec67a3ce
MK
221#ifdef SECSIZE
222 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize,
223 fs->fs_dbsize);
224#else SECSIZE
0c36425b 225 bp->b_blkno = bn = fsbtodb(fs, bno);
ec67a3ce
MK
226 count = howmany(osize, CLBYTES);
227 for (i = 0; i < count; i++)
228#ifdef SECSIZE
229 munhash(ip->i_dev, bn + i * CLBYTES / fs->fs_dbsize);
230#else SECSIZE
231 munhash(ip->i_dev, bn + i * CLBYTES / DEV_BSIZE);
232#endif SECSIZE
ced3a252 233 blkfree(ip, bprev, (off_t)osize);
f7fa0c54 234 if (nsize < request)
ced3a252 235 blkfree(ip, bno + numfrags(fs, nsize),
f7fa0c54 236 (off_t)(request - nsize));
2523b389
SL
237 ip->i_blocks += btodb(nsize - osize);
238 ip->i_flag |= IUPD|ICHG;
7188ac27
KM
239 *bpp = bp;
240 return (0);
f3c028b7 241 }
0c36425b 242 brelse(bp);
0947395d 243nospace:
f3c028b7
KM
244 /*
245 * no space available
246 */
07670f7d
KM
247 fserr(fs, "file system full");
248 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 249 return (ENOSPC);
07670f7d
KM
250}
251
502770a3
KM
252/*
253 * Allocate an inode in the file system.
254 *
255 * A preference may be optionally specified. If a preference is given
256 * the following hierarchy is used to allocate an inode:
257 * 1) allocate the requested inode.
258 * 2) allocate an inode in the same cylinder group.
259 * 3) quadradically rehash into other cylinder groups, until an
260 * available inode is located.
261 * If no inode preference is given the following heirarchy is used
262 * to allocate an inode:
263 * 1) allocate an inode in cylinder group 0.
264 * 2) quadradically rehash into other cylinder groups, until an
265 * available inode is located.
266 */
7188ac27 267ialloc(pip, ipref, mode, ipp)
f7287e4b 268 register struct inode *pip;
e3fe2d69
KM
269 ino_t ipref;
270 int mode;
7188ac27 271 struct inode **ipp;
e3fe2d69 272{
daaf7bee 273 ino_t ino;
e3fe2d69
KM
274 register struct fs *fs;
275 register struct inode *ip;
7188ac27 276 int cg, error;
e3fe2d69 277
7188ac27 278 *ipp = 0;
f7287e4b 279 fs = pip->i_fs;
0947395d 280 if (fs->fs_cstotal.cs_nifree == 0)
e3fe2d69 281 goto noinodes;
b4567e9c 282#ifdef QUOTA
7188ac27
KM
283 if (error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0))
284 return (error);
ca90a6bf 285#endif
260e5e3c
KM
286 if (ipref >= fs->fs_ncg * fs->fs_ipg)
287 ipref = 0;
6994bf5d 288 cg = itog(fs, ipref);
f7287e4b 289 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
e3fe2d69
KM
290 if (ino == 0)
291 goto noinodes;
7188ac27 292 error = iget(pip, ino, ipp);
7188ac27 293 if (error) {
bcd8515e 294 ifree(pip, ino, 0);
7188ac27 295 return (error);
e3fe2d69 296 }
fb92d0ab 297 ip = *ipp;
ffd90e52
KM
298 if (ip->i_mode) {
299 printf("mode = 0%o, inum = %d, fs = %s\n",
300 ip->i_mode, ip->i_number, fs->fs_fsmnt);
e3fe2d69 301 panic("ialloc: dup alloc");
ffd90e52 302 }
2523b389
SL
303 if (ip->i_blocks) { /* XXX */
304 printf("free inode %s/%d had %d blocks\n",
305 fs->fs_fsmnt, ino, ip->i_blocks);
306 ip->i_blocks = 0;
307 }
7e0dee76 308 ip->i_flags = 0;
fb92d0ab
KM
309 /*
310 * Set up a new generation number for this inode.
311 */
312 if (++nextgennumber < (u_long)time.tv_sec)
313 nextgennumber = time.tv_sec;
314 ip->i_gen = nextgennumber;
7188ac27 315 return (0);
e3fe2d69
KM
316noinodes:
317 fserr(fs, "out of inodes");
ae851115 318 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
7188ac27 319 return (ENOSPC);
e3fe2d69
KM
320}
321
743f1ef7 322/*
502770a3
KM
323 * Find a cylinder to place a directory.
324 *
325 * The policy implemented by this algorithm is to select from
326 * among those cylinder groups with above the average number of
327 * free inodes, the one with the smallest number of directories.
743f1ef7 328 */
4f083fd7 329ino_t
f7287e4b 330dirpref(fs)
e3fe2d69 331 register struct fs *fs;
f7287e4b 332{
743f1ef7 333 int cg, minndir, mincg, avgifree;
e3fe2d69 334
0947395d 335 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
743f1ef7 336 minndir = fs->fs_ipg;
e3fe2d69 337 mincg = 0;
743f1ef7 338 for (cg = 0; cg < fs->fs_ncg; cg++)
b6407c9d
KM
339 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
340 fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
e3fe2d69 341 mincg = cg;
b6407c9d 342 minndir = fs->fs_cs(fs, cg).cs_ndir;
e3fe2d69 343 }
4f083fd7 344 return ((ino_t)(fs->fs_ipg * mincg));
e3fe2d69
KM
345}
346
743f1ef7 347/*
4f083fd7
SL
348 * Select the desired position for the next block in a file. The file is
349 * logically divided into sections. The first section is composed of the
350 * direct blocks. Each additional section contains fs_maxbpg blocks.
351 *
352 * If no blocks have been allocated in the first section, the policy is to
353 * request a block in the same cylinder group as the inode that describes
354 * the file. If no blocks have been allocated in any other section, the
355 * policy is to place the section in a cylinder group with a greater than
356 * average number of free blocks. An appropriate cylinder group is found
16e7863f
KM
357 * by using a rotor that sweeps the cylinder groups. When a new group of
358 * blocks is needed, the sweep begins in the cylinder group following the
359 * cylinder group from which the previous allocation was made. The sweep
360 * continues until a cylinder group with greater than the average number
361 * of free blocks is found. If the allocation is for the first block in an
362 * indirect block, the information on the previous allocation is unavailable;
363 * here a best guess is made based upon the logical block number being
364 * allocated.
4f083fd7
SL
365 *
366 * If a section is already partially allocated, the policy is to
367 * contiguously allocate fs_maxcontig blocks. The end of one of these
368 * contiguous blocks and the beginning of the next is physically separated
369 * so that the disk head will be in transit between them for at least
370 * fs_rotdelay milliseconds. This is to allow time for the processor to
371 * schedule another I/O transfer.
743f1ef7 372 */
daaf7bee 373daddr_t
4f083fd7
SL
374blkpref(ip, lbn, indx, bap)
375 struct inode *ip;
376 daddr_t lbn;
377 int indx;
378 daddr_t *bap;
f7287e4b 379{
4f083fd7 380 register struct fs *fs;
16e7863f
KM
381 register int cg;
382 int avgbfree, startcg;
4f083fd7 383 daddr_t nextblk;
743f1ef7 384
4f083fd7
SL
385 fs = ip->i_fs;
386 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
387 if (lbn < NDADDR) {
388 cg = itog(fs, ip->i_number);
b6407c9d 389 return (fs->fs_fpg * cg + fs->fs_frag);
743f1ef7 390 }
4f083fd7
SL
391 /*
392 * Find a cylinder with greater than average number of
393 * unused data blocks.
394 */
16e7863f
KM
395 if (indx == 0 || bap[indx - 1] == 0)
396 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
397 else
398 startcg = dtog(fs, bap[indx - 1]) + 1;
399 startcg %= fs->fs_ncg;
4f083fd7 400 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
16e7863f 401 for (cg = startcg; cg < fs->fs_ncg; cg++)
4f083fd7
SL
402 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
403 fs->fs_cgrotor = cg;
404 return (fs->fs_fpg * cg + fs->fs_frag);
405 }
16e7863f 406 for (cg = 0; cg <= startcg; cg++)
4f083fd7
SL
407 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
408 fs->fs_cgrotor = cg;
409 return (fs->fs_fpg * cg + fs->fs_frag);
410 }
411 return (NULL);
412 }
413 /*
414 * One or more previous blocks have been laid out. If less
415 * than fs_maxcontig previous blocks are contiguous, the
416 * next block is requested contiguously, otherwise it is
417 * requested rotationally delayed by fs_rotdelay milliseconds.
418 */
419 nextblk = bap[indx - 1] + fs->fs_frag;
420 if (indx > fs->fs_maxcontig &&
240a4664 421 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
4f083fd7
SL
422 != nextblk)
423 return (nextblk);
424 if (fs->fs_rotdelay != 0)
425 /*
426 * Here we convert ms of delay to frags as:
427 * (frags) = (ms) * (rev/sec) * (sect/rev) /
428 * ((sect/frag) * (ms/sec))
429 * then round up to the next block.
430 */
431 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
432 (NSPF(fs) * 1000), fs->fs_frag);
433 return (nextblk);
743f1ef7
KM
434}
435
502770a3
KM
436/*
437 * Implement the cylinder overflow algorithm.
438 *
439 * The policy implemented by this algorithm is:
440 * 1) allocate the block in its requested cylinder group.
441 * 2) quadradically rehash on the cylinder group number.
442 * 3) brute force search for a free block.
443 */
daaf7bee
KM
444/*VARARGS5*/
445u_long
f7287e4b
KM
446hashalloc(ip, cg, pref, size, allocator)
447 struct inode *ip;
e3fe2d69
KM
448 int cg;
449 long pref;
450 int size; /* size for data blocks, mode for inodes */
daaf7bee 451 u_long (*allocator)();
e3fe2d69 452{
f7287e4b 453 register struct fs *fs;
e3fe2d69
KM
454 long result;
455 int i, icg = cg;
456
f7287e4b 457 fs = ip->i_fs;
e3fe2d69
KM
458 /*
459 * 1: preferred cylinder group
460 */
f7287e4b 461 result = (*allocator)(ip, cg, pref, size);
e3fe2d69
KM
462 if (result)
463 return (result);
464 /*
465 * 2: quadratic rehash
466 */
467 for (i = 1; i < fs->fs_ncg; i *= 2) {
468 cg += i;
469 if (cg >= fs->fs_ncg)
470 cg -= fs->fs_ncg;
f7287e4b 471 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
472 if (result)
473 return (result);
474 }
475 /*
476 * 3: brute force search
620b3290
SL
477 * Note that we start at i == 2, since 0 was checked initially,
478 * and 1 is always checked in the quadratic rehash.
e3fe2d69 479 */
2136305e 480 cg = (icg + 2) % fs->fs_ncg;
620b3290 481 for (i = 2; i < fs->fs_ncg; i++) {
f7287e4b 482 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
483 if (result)
484 return (result);
485 cg++;
486 if (cg == fs->fs_ncg)
487 cg = 0;
488 }
ae851115 489 return (NULL);
e3fe2d69
KM
490}
491
502770a3
KM
492/*
493 * Determine whether a fragment can be extended.
494 *
495 * Check to see if the necessary fragments are available, and
496 * if they are, allocate them.
497 */
07670f7d 498daddr_t
f7287e4b
KM
499fragextend(ip, cg, bprev, osize, nsize)
500 struct inode *ip;
07670f7d 501 int cg;
f3c028b7 502 long bprev;
07670f7d
KM
503 int osize, nsize;
504{
f7287e4b 505 register struct fs *fs;
f3c028b7 506 register struct cg *cgp;
7188ac27 507 struct buf *bp;
f3c028b7
KM
508 long bno;
509 int frags, bbase;
7188ac27 510 int i, error;
07670f7d 511
f7287e4b 512 fs = ip->i_fs;
a8580723 513 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
e5476900 514 return (NULL);
d995d89d 515 frags = numfrags(fs, nsize);
a8580723
KM
516 bbase = fragnum(fs, bprev);
517 if (bbase > fragnum(fs, (bprev + frags - 1))) {
ec67a3ce 518 /* cannot extend across a block boundary */
ae851115 519 return (NULL);
f3c028b7 520 }
ec67a3ce
MK
521#ifdef SECSIZE
522 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
523 fs->fs_dbsize);
524#else SECSIZE
7188ac27 525 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 526 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
527 if (error) {
528 brelse(bp);
529 return (NULL);
530 }
ec67a3ce 531#endif SECSIZE
e5476900 532 cgp = bp->b_un.b_cg;
7188ac27 533 if (!cg_chkmagic(cgp)) {
d995d89d 534 brelse(bp);
ae851115 535 return (NULL);
d995d89d 536 }
ad9250ee 537 cgp->cg_time = time.tv_sec;
6994bf5d 538 bno = dtogd(fs, bprev);
d995d89d 539 for (i = numfrags(fs, osize); i < frags; i++)
10adeb11 540 if (isclr(cg_blksfree(cgp), bno + i)) {
aca50d72 541 brelse(bp);
ae851115 542 return (NULL);
aca50d72
KM
543 }
544 /*
545 * the current fragment can be extended
546 * deduct the count on fragment being extended into
547 * increase the count on the remaining fragment (if any)
548 * allocate the extended piece
549 */
550 for (i = frags; i < fs->fs_frag - bbase; i++)
10adeb11 551 if (isclr(cg_blksfree(cgp), bno + i))
f3c028b7 552 break;
d995d89d 553 cgp->cg_frsum[i - numfrags(fs, osize)]--;
aca50d72
KM
554 if (i != frags)
555 cgp->cg_frsum[i - frags]++;
d995d89d 556 for (i = numfrags(fs, osize); i < frags; i++) {
10adeb11 557 clrbit(cg_blksfree(cgp), bno + i);
aca50d72
KM
558 cgp->cg_cs.cs_nffree--;
559 fs->fs_cstotal.cs_nffree--;
560 fs->fs_cs(fs, cg).cs_nffree--;
f3c028b7 561 }
aca50d72
KM
562 fs->fs_fmod++;
563 bdwrite(bp);
564 return (bprev);
07670f7d
KM
565}
566
502770a3
KM
567/*
568 * Determine whether a block can be allocated.
569 *
570 * Check to see if a block of the apprpriate size is available,
571 * and if it is, allocate it.
572 */
4f083fd7 573daddr_t
f7287e4b
KM
574alloccg(ip, cg, bpref, size)
575 struct inode *ip;
e3fe2d69
KM
576 int cg;
577 daddr_t bpref;
578 int size;
579{
f7287e4b 580 register struct fs *fs;
f3c028b7 581 register struct cg *cgp;
7188ac27 582 struct buf *bp;
f3c028b7 583 register int i;
7188ac27 584 int error, bno, frags, allocsiz;
e3fe2d69 585
f7287e4b 586 fs = ip->i_fs;
b6407c9d 587 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
ae851115 588 return (NULL);
ec67a3ce
MK
589#ifdef SECSIZE
590 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
591 fs->fs_dbsize);
592#else SECSIZE
7188ac27 593 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 594 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
595 if (error) {
596 brelse(bp);
597 return (NULL);
598 }
ec67a3ce 599#endif SECSIZE
e5476900 600 cgp = bp->b_un.b_cg;
7188ac27 601 if (!cg_chkmagic(cgp) ||
0f538882 602 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
d995d89d 603 brelse(bp);
ae851115 604 return (NULL);
d995d89d 605 }
ad9250ee 606 cgp->cg_time = time.tv_sec;
b6407c9d 607 if (size == fs->fs_bsize) {
daaf7bee 608 bno = alloccgblk(fs, cgp, bpref);
f3c028b7
KM
609 bdwrite(bp);
610 return (bno);
611 }
612 /*
613 * check to see if any fragments are already available
614 * allocsiz is the size which will be allocated, hacking
615 * it down to a smaller size if necessary
616 */
d995d89d 617 frags = numfrags(fs, size);
b6407c9d 618 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
f3c028b7
KM
619 if (cgp->cg_frsum[allocsiz] != 0)
620 break;
b6407c9d 621 if (allocsiz == fs->fs_frag) {
f3c028b7
KM
622 /*
623 * no fragments were available, so a block will be
624 * allocated, and hacked up
625 */
0947395d 626 if (cgp->cg_cs.cs_nbfree == 0) {
f3c028b7 627 brelse(bp);
ae851115 628 return (NULL);
f3c028b7 629 }
daaf7bee 630 bno = alloccgblk(fs, cgp, bpref);
6994bf5d 631 bpref = dtogd(fs, bno);
b6407c9d 632 for (i = frags; i < fs->fs_frag; i++)
10adeb11 633 setbit(cg_blksfree(cgp), bpref + i);
b6407c9d 634 i = fs->fs_frag - frags;
0947395d
KM
635 cgp->cg_cs.cs_nffree += i;
636 fs->fs_cstotal.cs_nffree += i;
b6407c9d 637 fs->fs_cs(fs, cg).cs_nffree += i;
961945a8 638 fs->fs_fmod++;
f3c028b7
KM
639 cgp->cg_frsum[i]++;
640 bdwrite(bp);
641 return (bno);
642 }
743f1ef7 643 bno = mapsearch(fs, cgp, bpref, allocsiz);
0f538882
KM
644 if (bno < 0) {
645 brelse(bp);
ae851115 646 return (NULL);
0f538882 647 }
f3c028b7 648 for (i = 0; i < frags; i++)
10adeb11 649 clrbit(cg_blksfree(cgp), bno + i);
0947395d
KM
650 cgp->cg_cs.cs_nffree -= frags;
651 fs->fs_cstotal.cs_nffree -= frags;
b6407c9d 652 fs->fs_cs(fs, cg).cs_nffree -= frags;
961945a8 653 fs->fs_fmod++;
f3c028b7
KM
654 cgp->cg_frsum[allocsiz]--;
655 if (frags != allocsiz)
656 cgp->cg_frsum[allocsiz - frags]++;
657 bdwrite(bp);
658 return (cg * fs->fs_fpg + bno);
659}
660
502770a3
KM
661/*
662 * Allocate a block in a cylinder group.
663 *
664 * This algorithm implements the following policy:
665 * 1) allocate the requested block.
666 * 2) allocate a rotationally optimal block in the same cylinder.
667 * 3) allocate the next available block on the block rotor for the
668 * specified cylinder group.
669 * Note that this routine only allocates fs_bsize blocks; these
670 * blocks may be fragmented by the routine that allocates them.
671 */
f3c028b7 672daddr_t
daaf7bee 673alloccgblk(fs, cgp, bpref)
f7287e4b 674 register struct fs *fs;
f3c028b7
KM
675 register struct cg *cgp;
676 daddr_t bpref;
677{
743f1ef7 678 daddr_t bno;
ae851115 679 int cylno, pos, delta;
743f1ef7 680 short *cylbp;
aca50d72 681 register int i;
f3c028b7 682
743f1ef7
KM
683 if (bpref == 0) {
684 bpref = cgp->cg_rotor;
aca50d72
KM
685 goto norot;
686 }
a8580723 687 bpref = blknum(fs, bpref);
6994bf5d 688 bpref = dtogd(fs, bpref);
aca50d72
KM
689 /*
690 * if the requested block is available, use it
691 */
10adeb11 692 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {
aca50d72
KM
693 bno = bpref;
694 goto gotit;
695 }
aca50d72
KM
696 /*
697 * check for a block available on the same cylinder
aca50d72
KM
698 */
699 cylno = cbtocylno(fs, bpref);
10adeb11 700 if (cg_blktot(cgp)[cylno] == 0)
502770a3
KM
701 goto norot;
702 if (fs->fs_cpc == 0) {
703 /*
704 * block layout info is not available, so just have
705 * to take any block in this cylinder.
706 */
707 bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
708 goto norot;
709 }
aca50d72
KM
710 /*
711 * check the summary information to see if a block is
712 * available in the requested cylinder starting at the
4f083fd7 713 * requested rotational position and proceeding around.
aca50d72 714 */
10adeb11 715 cylbp = cg_blks(fs, cgp, cylno);
4f083fd7 716 pos = cbtorpos(fs, bpref);
10adeb11 717 for (i = pos; i < fs->fs_nrpos; i++)
aca50d72
KM
718 if (cylbp[i] > 0)
719 break;
10adeb11 720 if (i == fs->fs_nrpos)
aca50d72 721 for (i = 0; i < pos; i++)
743f1ef7
KM
722 if (cylbp[i] > 0)
723 break;
aca50d72
KM
724 if (cylbp[i] > 0) {
725 /*
726 * found a rotational position, now find the actual
727 * block. A panic if none is actually there.
728 */
729 pos = cylno % fs->fs_cpc;
730 bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
10adeb11 731 if (fs_postbl(fs, pos)[i] == -1) {
ffd90e52
KM
732 printf("pos = %d, i = %d, fs = %s\n",
733 pos, i, fs->fs_fsmnt);
aca50d72 734 panic("alloccgblk: cyl groups corrupted");
ffd90e52 735 }
10adeb11
KM
736 for (i = fs_postbl(fs, pos)[i];; ) {
737 if (isblock(fs, cg_blksfree(cgp), bno + i)) {
240a4664 738 bno = blkstofrags(fs, (bno + i));
aca50d72 739 goto gotit;
743f1ef7 740 }
10adeb11
KM
741 delta = fs_rotbl(fs)[i];
742 if (delta <= 0 ||
743 delta + i > fragstoblks(fs, fs->fs_fpg))
aca50d72 744 break;
ae851115 745 i += delta;
743f1ef7 746 }
ffd90e52 747 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
aca50d72 748 panic("alloccgblk: can't find blk in cyl");
e3fe2d69 749 }
aca50d72
KM
750norot:
751 /*
752 * no blocks in the requested cylinder, so take next
753 * available one in this cylinder group.
754 */
b32450f4 755 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
6459ebe0 756 if (bno < 0)
ae851115 757 return (NULL);
743f1ef7 758 cgp->cg_rotor = bno;
e3fe2d69 759gotit:
10adeb11 760 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno));
0947395d
KM
761 cgp->cg_cs.cs_nbfree--;
762 fs->fs_cstotal.cs_nbfree--;
b6407c9d 763 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
502770a3 764 cylno = cbtocylno(fs, bno);
10adeb11
KM
765 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
766 cg_blktot(cgp)[cylno]--;
e3fe2d69 767 fs->fs_fmod++;
743f1ef7 768 return (cgp->cg_cgx * fs->fs_fpg + bno);
e3fe2d69 769}
10adeb11 770
502770a3
KM
771/*
772 * Determine whether an inode can be allocated.
773 *
774 * Check to see if an inode is available, and if it is,
775 * allocate it using the following policy:
776 * 1) allocate the requested inode.
777 * 2) allocate the next available inode after the requested
778 * inode in the specified cylinder group.
779 */
4f083fd7 780ino_t
f7287e4b
KM
781ialloccg(ip, cg, ipref, mode)
782 struct inode *ip;
e3fe2d69
KM
783 int cg;
784 daddr_t ipref;
785 int mode;
786{
f7287e4b 787 register struct fs *fs;
f3c028b7 788 register struct cg *cgp;
4e0c7b8a 789 struct buf *bp;
7188ac27 790 int error, start, len, loc, map, i;
e3fe2d69 791
f7287e4b 792 fs = ip->i_fs;
b6407c9d 793 if (fs->fs_cs(fs, cg).cs_nifree == 0)
ae851115 794 return (NULL);
ec67a3ce
MK
795#ifdef SECSIZE
796 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
797 fs->fs_dbsize);
798#else SECSIZE
7188ac27 799 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 800 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
801 if (error) {
802 brelse(bp);
803 return (NULL);
804 }
ec67a3ce 805#endif SECSIZE
e5476900 806 cgp = bp->b_un.b_cg;
7188ac27 807 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
d995d89d 808 brelse(bp);
ae851115 809 return (NULL);
d995d89d 810 }
ad9250ee 811 cgp->cg_time = time.tv_sec;
e3fe2d69
KM
812 if (ipref) {
813 ipref %= fs->fs_ipg;
10adeb11 814 if (isclr(cg_inosused(cgp), ipref))
e3fe2d69 815 goto gotit;
4e0c7b8a
KM
816 }
817 start = cgp->cg_irotor / NBBY;
818 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
10adeb11 819 loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
4e0c7b8a 820 if (loc == 0) {
e5889092
KM
821 len = start + 1;
822 start = 0;
10adeb11 823 loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
e5889092
KM
824 if (loc == 0) {
825 printf("cg = %s, irotor = %d, fs = %s\n",
826 cg, cgp->cg_irotor, fs->fs_fsmnt);
827 panic("ialloccg: map corrupted");
828 /* NOTREACHED */
829 }
4e0c7b8a
KM
830 }
831 i = start + len - loc;
10adeb11 832 map = cg_inosused(cgp)[i];
4e0c7b8a
KM
833 ipref = i * NBBY;
834 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
835 if ((map & i) == 0) {
e3fe2d69
KM
836 cgp->cg_irotor = ipref;
837 goto gotit;
838 }
839 }
4e0c7b8a
KM
840 printf("fs = %s\n", fs->fs_fsmnt);
841 panic("ialloccg: block not in map");
842 /* NOTREACHED */
e3fe2d69 843gotit:
10adeb11 844 setbit(cg_inosused(cgp), ipref);
0947395d
KM
845 cgp->cg_cs.cs_nifree--;
846 fs->fs_cstotal.cs_nifree--;
b6407c9d 847 fs->fs_cs(fs, cg).cs_nifree--;
e3fe2d69
KM
848 fs->fs_fmod++;
849 if ((mode & IFMT) == IFDIR) {
0947395d
KM
850 cgp->cg_cs.cs_ndir++;
851 fs->fs_cstotal.cs_ndir++;
b6407c9d 852 fs->fs_cs(fs, cg).cs_ndir++;
e3fe2d69
KM
853 }
854 bdwrite(bp);
855 return (cg * fs->fs_ipg + ipref);
856}
857
502770a3
KM
858/*
859 * Free a block or fragment.
860 *
861 * The specified block or fragment is placed back in the
862 * free map. If a fragment is deallocated, a possible
863 * block reassembly is checked.
864 */
ced3a252 865blkfree(ip, bno, size)
f7287e4b 866 register struct inode *ip;
e3fe2d69 867 daddr_t bno;
daaf7bee 868 off_t size;
e3fe2d69
KM
869{
870 register struct fs *fs;
871 register struct cg *cgp;
7188ac27
KM
872 struct buf *bp;
873 int error, cg, blk, frags, bbase;
f3c028b7 874 register int i;
e3fe2d69 875
f7287e4b 876 fs = ip->i_fs;
ffd90e52
KM
877 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
878 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
879 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
ced3a252 880 panic("blkfree: bad size");
ffd90e52 881 }
6994bf5d 882 cg = dtog(fs, bno);
6459ebe0
KM
883 if (badblock(fs, bno)) {
884 printf("bad block %d, ino %d\n", bno, ip->i_number);
e3fe2d69 885 return;
6459ebe0 886 }
ec67a3ce
MK
887#ifdef SECSIZE
888 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
889 fs->fs_dbsize);
890#else SECSIZE
7188ac27 891 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 892 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
893 if (error) {
894 brelse(bp);
895 return;
896 }
ec67a3ce 897#endif SECSIZE
e5476900 898 cgp = bp->b_un.b_cg;
7188ac27 899 if (!cg_chkmagic(cgp)) {
d995d89d 900 brelse(bp);
e3fe2d69 901 return;
d995d89d 902 }
ad9250ee 903 cgp->cg_time = time.tv_sec;
6994bf5d 904 bno = dtogd(fs, bno);
b6407c9d 905 if (size == fs->fs_bsize) {
10adeb11 906 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) {
ffd90e52
KM
907 printf("dev = 0x%x, block = %d, fs = %s\n",
908 ip->i_dev, bno, fs->fs_fsmnt);
ced3a252 909 panic("blkfree: freeing free block");
6459ebe0 910 }
10adeb11 911 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno));
0947395d
KM
912 cgp->cg_cs.cs_nbfree++;
913 fs->fs_cstotal.cs_nbfree++;
b6407c9d 914 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 915 i = cbtocylno(fs, bno);
10adeb11
KM
916 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
917 cg_blktot(cgp)[i]++;
07670f7d 918 } else {
a8580723 919 bbase = bno - fragnum(fs, bno);
f3c028b7
KM
920 /*
921 * decrement the counts associated with the old frags
922 */
10adeb11 923 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 924 fragacct(fs, blk, cgp->cg_frsum, -1);
f3c028b7
KM
925 /*
926 * deallocate the fragment
927 */
d995d89d 928 frags = numfrags(fs, size);
f3c028b7 929 for (i = 0; i < frags; i++) {
10adeb11 930 if (isset(cg_blksfree(cgp), bno + i)) {
ffd90e52
KM
931 printf("dev = 0x%x, block = %d, fs = %s\n",
932 ip->i_dev, bno + i, fs->fs_fsmnt);
ced3a252 933 panic("blkfree: freeing free frag");
ffd90e52 934 }
10adeb11 935 setbit(cg_blksfree(cgp), bno + i);
07670f7d 936 }
ae851115
KM
937 cgp->cg_cs.cs_nffree += i;
938 fs->fs_cstotal.cs_nffree += i;
939 fs->fs_cs(fs, cg).cs_nffree += i;
f3c028b7
KM
940 /*
941 * add back in counts associated with the new frags
942 */
10adeb11 943 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 944 fragacct(fs, blk, cgp->cg_frsum, 1);
f3c028b7
KM
945 /*
946 * if a complete block has been reassembled, account for it
947 */
9523729e
KM
948 if (isblock(fs, cg_blksfree(cgp),
949 (daddr_t)fragstoblks(fs, bbase))) {
b6407c9d
KM
950 cgp->cg_cs.cs_nffree -= fs->fs_frag;
951 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
952 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
0947395d
KM
953 cgp->cg_cs.cs_nbfree++;
954 fs->fs_cstotal.cs_nbfree++;
b6407c9d 955 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 956 i = cbtocylno(fs, bbase);
10adeb11
KM
957 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
958 cg_blktot(cgp)[i]++;
07670f7d
KM
959 }
960 }
e3fe2d69 961 fs->fs_fmod++;
e3fe2d69
KM
962 bdwrite(bp);
963}
964
502770a3
KM
965/*
966 * Free an inode.
967 *
968 * The specified inode is placed back in the free map.
969 */
f7287e4b
KM
970ifree(ip, ino, mode)
971 struct inode *ip;
e3fe2d69
KM
972 ino_t ino;
973 int mode;
974{
975 register struct fs *fs;
976 register struct cg *cgp;
7188ac27
KM
977 struct buf *bp;
978 int error, cg;
e3fe2d69 979
f7287e4b 980 fs = ip->i_fs;
ffd90e52
KM
981 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
982 printf("dev = 0x%x, ino = %d, fs = %s\n",
983 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 984 panic("ifree: range");
ffd90e52 985 }
6994bf5d 986 cg = itog(fs, ino);
ec67a3ce
MK
987#ifdef SECSIZE
988 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
989 fs->fs_dbsize);
990#else SECSIZE
7188ac27 991 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 992 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
993 if (error) {
994 brelse(bp);
995 return;
996 }
ec67a3ce 997#endif SECSIZE
e5476900 998 cgp = bp->b_un.b_cg;
7188ac27 999 if (!cg_chkmagic(cgp)) {
d995d89d 1000 brelse(bp);
e3fe2d69 1001 return;
d995d89d 1002 }
ad9250ee 1003 cgp->cg_time = time.tv_sec;
e3fe2d69 1004 ino %= fs->fs_ipg;
10adeb11 1005 if (isclr(cg_inosused(cgp), ino)) {
ffd90e52
KM
1006 printf("dev = 0x%x, ino = %d, fs = %s\n",
1007 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 1008 panic("ifree: freeing free inode");
ffd90e52 1009 }
10adeb11 1010 clrbit(cg_inosused(cgp), ino);
4e0c7b8a
KM
1011 if (ino < cgp->cg_irotor)
1012 cgp->cg_irotor = ino;
0947395d
KM
1013 cgp->cg_cs.cs_nifree++;
1014 fs->fs_cstotal.cs_nifree++;
b6407c9d 1015 fs->fs_cs(fs, cg).cs_nifree++;
e3fe2d69 1016 if ((mode & IFMT) == IFDIR) {
0947395d
KM
1017 cgp->cg_cs.cs_ndir--;
1018 fs->fs_cstotal.cs_ndir--;
b6407c9d 1019 fs->fs_cs(fs, cg).cs_ndir--;
e3fe2d69
KM
1020 }
1021 fs->fs_fmod++;
1022 bdwrite(bp);
1023}
1024
743f1ef7 1025/*
502770a3
KM
1026 * Find a block of the specified size in the specified cylinder group.
1027 *
743f1ef7
KM
1028 * It is a panic if a request is made to find a block if none are
1029 * available.
1030 */
1031daddr_t
1032mapsearch(fs, cgp, bpref, allocsiz)
1033 register struct fs *fs;
1034 register struct cg *cgp;
1035 daddr_t bpref;
1036 int allocsiz;
1037{
1038 daddr_t bno;
1039 int start, len, loc, i;
1040 int blk, field, subfield, pos;
1041
1042 /*
1043 * find the fragment by searching through the free block
1044 * map for an appropriate bit pattern
1045 */
1046 if (bpref)
6994bf5d 1047 start = dtogd(fs, bpref) / NBBY;
743f1ef7
KM
1048 else
1049 start = cgp->cg_frotor / NBBY;
942bd18b 1050 len = howmany(fs->fs_fpg, NBBY) - start;
9523729e
KM
1051 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start],
1052 (u_char *)fragtbl[fs->fs_frag],
1053 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
743f1ef7 1054 if (loc == 0) {
e5476900
KM
1055 len = start + 1;
1056 start = 0;
9523729e
KM
1057 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0],
1058 (u_char *)fragtbl[fs->fs_frag],
1059 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
4e0c7b8a
KM
1060 if (loc == 0) {
1061 printf("start = %d, len = %d, fs = %s\n",
1062 start, len, fs->fs_fsmnt);
1063 panic("alloccg: map corrupted");
e5889092 1064 /* NOTREACHED */
4e0c7b8a 1065 }
743f1ef7
KM
1066 }
1067 bno = (start + len - loc) * NBBY;
1068 cgp->cg_frotor = bno;
1069 /*
1070 * found the byte in the map
1071 * sift through the bits to find the selected frag
1072 */
ae851115 1073 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
10adeb11 1074 blk = blkmap(fs, cg_blksfree(cgp), bno);
743f1ef7
KM
1075 blk <<= 1;
1076 field = around[allocsiz];
1077 subfield = inside[allocsiz];
b6407c9d 1078 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
ae851115
KM
1079 if ((blk & field) == subfield)
1080 return (bno + pos);
743f1ef7
KM
1081 field <<= 1;
1082 subfield <<= 1;
1083 }
1084 }
ffd90e52 1085 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
743f1ef7 1086 panic("alloccg: block not in map");
e5476900 1087 return (-1);
743f1ef7
KM
1088}
1089
e3fe2d69 1090/*
502770a3
KM
1091 * Fserr prints the name of a file system with an error diagnostic.
1092 *
1093 * The form of the error message is:
e3fe2d69
KM
1094 * fs: error message
1095 */
1096fserr(fs, cp)
1097 struct fs *fs;
1098 char *cp;
1099{
1100
283ffc90 1101 log(LOG_ERR, "%s: %s\n", fs->fs_fsmnt, cp);
e3fe2d69 1102}