vnode layer defines f_fsid and f_flags for statfs()
[unix-history] / usr / src / sys / ufs / lfs / lfs_alloc.c
CommitLineData
da7c5cc6 1/*
7188ac27 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
202a4bd9 3 * All rights reserved.
da7c5cc6 4 *
202a4bd9 5 * Redistribution and use in source and binary forms are permitted
616d42db
KB
6 * provided that the above copyright notice and this paragraph are
7 * duplicated in all such forms and that any documentation,
8 * advertising materials, and other materials related to such
9 * distribution and use acknowledge that the software was developed
10 * by the University of California, Berkeley. The name of the
11 * University may not be used to endorse or promote products derived
12 * from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
202a4bd9 16 *
a937f856 17 * @(#)lfs_alloc.c 7.11 (Berkeley) %G%
da7c5cc6 18 */
e3fe2d69 19
94368568
JB
20#include "param.h"
21#include "systm.h"
22#include "mount.h"
94368568 23#include "buf.h"
94368568 24#include "user.h"
7188ac27 25#include "vnode.h"
94368568 26#include "kernel.h"
7580177b 27#include "syslog.h"
aec7dd3b 28#include "cmap.h"
7188ac27
KM
29#include "../ufs/quota.h"
30#include "../ufs/inode.h"
31#include "../ufs/fs.h"
e3fe2d69 32
daaf7bee 33extern u_long hashalloc();
4f083fd7
SL
34extern ino_t ialloccg();
35extern daddr_t alloccg();
743f1ef7
KM
36extern daddr_t alloccgblk();
37extern daddr_t fragextend();
38extern daddr_t blkpref();
39extern daddr_t mapsearch();
1d7a08c5 40extern int inside[], around[];
b6407c9d 41extern unsigned char *fragtbl[];
e3fe2d69 42
502770a3
KM
43/*
44 * Allocate a block in the file system.
45 *
46 * The size of the requested block is given, which must be some
47 * multiple of fs_fsize and <= fs_bsize.
48 * A preference may be optionally specified. If a preference is given
49 * the following hierarchy is used to allocate a block:
50 * 1) allocate the requested block.
51 * 2) allocate a rotationally optimal block in the same cylinder.
52 * 3) allocate a block in the same cylinder group.
53 * 4) quadradically rehash into other cylinder groups, until an
54 * available block is located.
55 * If no block preference is given the following heirarchy is used
56 * to allocate a block:
57 * 1) allocate a block in the cylinder group that contains the
58 * inode for the file.
59 * 2) quadradically rehash into other cylinder groups, until an
60 * available block is located.
61 */
7188ac27 62alloc(ip, bpref, size, bpp, flags)
f3c028b7 63 register struct inode *ip;
e3fe2d69
KM
64 daddr_t bpref;
65 int size;
7188ac27
KM
66 struct buf **bpp;
67 int flags;
e3fe2d69
KM
68{
69 daddr_t bno;
70 register struct fs *fs;
f3c028b7 71 register struct buf *bp;
7188ac27 72 int cg, error;
e3fe2d69 73
7188ac27 74 *bpp = 0;
f7287e4b 75 fs = ip->i_fs;
ffd90e52
KM
76 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
77 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
78 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
b6407c9d 79 panic("alloc: bad size");
ffd90e52 80 }
b6407c9d 81 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
0947395d 82 goto nospace;
240a4664 83 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
e3fe2d69 84 goto nospace;
b4567e9c 85#ifdef QUOTA
7188ac27
KM
86 if (error = chkdq(ip, (long)btodb(size), 0))
87 return (error);
ca90a6bf 88#endif
260e5e3c
KM
89 if (bpref >= fs->fs_size)
90 bpref = 0;
e3fe2d69 91 if (bpref == 0)
6994bf5d 92 cg = itog(fs, ip->i_number);
e3fe2d69 93 else
6994bf5d 94 cg = dtog(fs, bpref);
4f083fd7
SL
95 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
96 (u_long (*)())alloccg);
6459ebe0 97 if (bno <= 0)
e3fe2d69 98 goto nospace;
2523b389
SL
99 ip->i_blocks += btodb(size);
100 ip->i_flag |= IUPD|ICHG;
ec67a3ce
MK
101#ifdef SECSIZE
102 bp = getblk(ip->i_dev, fsbtodb(fs, bno), size, fs->fs_dbsize);
103#else SECSIZE
7188ac27
KM
104 bp = getblk(ip->i_devvp, fsbtodb(fs, bno), size);
105 if (flags & B_CLRBUF)
106 clrbuf(bp);
107 *bpp = bp;
108 return (0);
e3fe2d69
KM
109nospace:
110 fserr(fs, "file system full");
111 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 112 return (ENOSPC);
e3fe2d69
KM
113}
114
502770a3
KM
115/*
116 * Reallocate a fragment to a bigger size
117 *
118 * The number and size of the old block is given, and a preference
119 * and new size is also specified. The allocator attempts to extend
120 * the original block. Failing that, the regular block allocator is
121 * invoked to get an appropriate block.
122 */
7188ac27 123realloccg(ip, bprev, bpref, osize, nsize, bpp)
f7287e4b 124 register struct inode *ip;
743f1ef7 125 daddr_t bprev, bpref;
07670f7d 126 int osize, nsize;
7188ac27 127 struct buf **bpp;
07670f7d 128{
07670f7d 129 register struct fs *fs;
7188ac27 130 struct buf *bp, *obp;
f7fa0c54 131 int cg, request;
5d934daa 132 daddr_t bno, bn;
7188ac27 133 int i, error, count;
07670f7d 134
7188ac27 135 *bpp = 0;
f7287e4b 136 fs = ip->i_fs;
d995d89d 137 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
ffd90e52
KM
138 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
139 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
140 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
b6407c9d 141 panic("realloccg: bad size");
ffd90e52 142 }
240a4664 143 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
0947395d 144 goto nospace;
ffd90e52
KM
145 if (bprev == 0) {
146 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
147 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
502770a3 148 panic("realloccg: bad bprev");
ffd90e52 149 }
b4567e9c 150#ifdef QUOTA
7188ac27
KM
151 if (error = chkdq(ip, (long)btodb(nsize - osize), 0))
152 return (error);
ca90a6bf 153#endif
ae851115 154 cg = dtog(fs, bprev);
f7287e4b 155 bno = fragextend(ip, cg, (long)bprev, osize, nsize);
f3c028b7 156 if (bno != 0) {
9d6d37ce 157 do {
ec67a3ce
MK
158#ifdef SECSIZE
159 bp = bread(ip->i_dev, fsbtodb(fs, bno), osize,
160 fs->fs_dbsize);
161#else SECSIZE
7188ac27 162 error = bread(ip->i_devvp, fsbtodb(fs, bno),
a937f856 163 osize, NOCRED, &bp);
7188ac27 164 if (error) {
9d6d37ce 165 brelse(bp);
7188ac27 166 return (error);
9d6d37ce
BJ
167 }
168 } while (brealloc(bp, nsize) == 0);
169 bp->b_flags |= B_DONE;
4f083fd7 170 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
2523b389
SL
171 ip->i_blocks += btodb(nsize - osize);
172 ip->i_flag |= IUPD|ICHG;
7188ac27
KM
173 *bpp = bp;
174 return (0);
f3c028b7 175 }
260e5e3c
KM
176 if (bpref >= fs->fs_size)
177 bpref = 0;
aec7dd3b 178 switch ((int)fs->fs_optim) {
f8484b5f
KM
179 case FS_OPTSPACE:
180 /*
181 * Allocate an exact sized fragment. Although this makes
182 * best use of space, we will waste time relocating it if
183 * the file continues to grow. If the fragmentation is
184 * less than half of the minimum free reserve, we choose
185 * to begin optimizing for time.
186 */
f7fa0c54 187 request = nsize;
f8484b5f
KM
188 if (fs->fs_minfree < 5 ||
189 fs->fs_cstotal.cs_nffree >
190 fs->fs_dsize * fs->fs_minfree / (2 * 100))
191 break;
192 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
193 fs->fs_fsmnt);
194 fs->fs_optim = FS_OPTTIME;
195 break;
196 case FS_OPTTIME:
197 /*
198 * At this point we have discovered a file that is trying
199 * to grow a small fragment to a larger fragment. To save
200 * time, we allocate a full sized block, then free the
201 * unused portion. If the file continues to grow, the
202 * `fragextend' call above will be able to grow it in place
203 * without further copying. If aberrant programs cause
204 * disk fragmentation to grow within 2% of the free reserve,
205 * we choose to begin optimizing for space.
206 */
f7fa0c54 207 request = fs->fs_bsize;
f8484b5f
KM
208 if (fs->fs_cstotal.cs_nffree <
209 fs->fs_dsize * (fs->fs_minfree - 2) / 100)
210 break;
211 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
212 fs->fs_fsmnt);
213 fs->fs_optim = FS_OPTSPACE;
214 break;
215 default:
216 printf("dev = 0x%x, optim = %d, fs = %s\n",
217 ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
218 panic("realloccg: bad optim");
219 /* NOTREACHED */
220 }
f7fa0c54 221 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
4f083fd7 222 (u_long (*)())alloccg);
6459ebe0 223 if (bno > 0) {
ec67a3ce
MK
224#ifdef SECSIZE
225 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize,
226 fs->fs_dbsize);
227#else SECSIZE
a937f856
KM
228 error = bread(ip->i_devvp, fsbtodb(fs, bprev),
229 osize, NOCRED, &obp);
7188ac27 230 if (error) {
d995d89d 231 brelse(obp);
7188ac27 232 return (error);
d995d89d 233 }
5d934daa 234 bn = fsbtodb(fs, bno);
ec67a3ce
MK
235#ifdef SECSIZE
236 bp = getblk(ip->i_dev, bn, nsize, fs->fs_dbsize);
237#else SECSIZE
7188ac27 238 bp = getblk(ip->i_devvp, bn, nsize);
ec67a3ce 239#endif SECSIZE
260ede8e 240 bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize);
ec67a3ce
MK
241 count = howmany(osize, CLBYTES);
242 for (i = 0; i < count; i++)
243#ifdef SECSIZE
244 munhash(ip->i_dev, bn + i * CLBYTES / fs->fs_dbsize);
245#else SECSIZE
246 munhash(ip->i_dev, bn + i * CLBYTES / DEV_BSIZE);
247#endif SECSIZE
260ede8e
KM
248 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
249 if (obp->b_flags & B_DELWRI) {
250 obp->b_flags &= ~B_DELWRI;
251 u.u_ru.ru_oublock--; /* delete charge */
252 }
f3c028b7 253 brelse(obp);
ced3a252 254 blkfree(ip, bprev, (off_t)osize);
f7fa0c54 255 if (nsize < request)
ced3a252 256 blkfree(ip, bno + numfrags(fs, nsize),
f7fa0c54 257 (off_t)(request - nsize));
2523b389
SL
258 ip->i_blocks += btodb(nsize - osize);
259 ip->i_flag |= IUPD|ICHG;
7188ac27
KM
260 *bpp = bp;
261 return (0);
f3c028b7 262 }
0947395d 263nospace:
f3c028b7
KM
264 /*
265 * no space available
266 */
07670f7d
KM
267 fserr(fs, "file system full");
268 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
7188ac27 269 return (ENOSPC);
07670f7d
KM
270}
271
502770a3
KM
272/*
273 * Allocate an inode in the file system.
274 *
275 * A preference may be optionally specified. If a preference is given
276 * the following hierarchy is used to allocate an inode:
277 * 1) allocate the requested inode.
278 * 2) allocate an inode in the same cylinder group.
279 * 3) quadradically rehash into other cylinder groups, until an
280 * available inode is located.
281 * If no inode preference is given the following heirarchy is used
282 * to allocate an inode:
283 * 1) allocate an inode in cylinder group 0.
284 * 2) quadradically rehash into other cylinder groups, until an
285 * available inode is located.
286 */
7188ac27 287ialloc(pip, ipref, mode, ipp)
f7287e4b 288 register struct inode *pip;
e3fe2d69
KM
289 ino_t ipref;
290 int mode;
7188ac27 291 struct inode **ipp;
e3fe2d69 292{
daaf7bee 293 ino_t ino;
e3fe2d69
KM
294 register struct fs *fs;
295 register struct inode *ip;
7188ac27 296 int cg, error;
e3fe2d69 297
7188ac27 298 *ipp = 0;
f7287e4b 299 fs = pip->i_fs;
0947395d 300 if (fs->fs_cstotal.cs_nifree == 0)
e3fe2d69 301 goto noinodes;
b4567e9c 302#ifdef QUOTA
7188ac27
KM
303 if (error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0))
304 return (error);
ca90a6bf 305#endif
260e5e3c
KM
306 if (ipref >= fs->fs_ncg * fs->fs_ipg)
307 ipref = 0;
6994bf5d 308 cg = itog(fs, ipref);
f7287e4b 309 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
e3fe2d69
KM
310 if (ino == 0)
311 goto noinodes;
7188ac27 312 error = iget(pip, ino, ipp);
7188ac27 313 if (error) {
bcd8515e 314 ifree(pip, ino, 0);
7188ac27 315 return (error);
e3fe2d69 316 }
fb92d0ab 317 ip = *ipp;
ffd90e52
KM
318 if (ip->i_mode) {
319 printf("mode = 0%o, inum = %d, fs = %s\n",
320 ip->i_mode, ip->i_number, fs->fs_fsmnt);
e3fe2d69 321 panic("ialloc: dup alloc");
ffd90e52 322 }
2523b389
SL
323 if (ip->i_blocks) { /* XXX */
324 printf("free inode %s/%d had %d blocks\n",
325 fs->fs_fsmnt, ino, ip->i_blocks);
326 ip->i_blocks = 0;
327 }
fb92d0ab
KM
328 /*
329 * Set up a new generation number for this inode.
330 */
331 if (++nextgennumber < (u_long)time.tv_sec)
332 nextgennumber = time.tv_sec;
333 ip->i_gen = nextgennumber;
7188ac27 334 return (0);
e3fe2d69
KM
335noinodes:
336 fserr(fs, "out of inodes");
ae851115 337 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
7188ac27 338 return (ENOSPC);
e3fe2d69
KM
339}
340
743f1ef7 341/*
502770a3
KM
342 * Find a cylinder to place a directory.
343 *
344 * The policy implemented by this algorithm is to select from
345 * among those cylinder groups with above the average number of
346 * free inodes, the one with the smallest number of directories.
743f1ef7 347 */
4f083fd7 348ino_t
f7287e4b 349dirpref(fs)
e3fe2d69 350 register struct fs *fs;
f7287e4b 351{
743f1ef7 352 int cg, minndir, mincg, avgifree;
e3fe2d69 353
0947395d 354 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
743f1ef7 355 minndir = fs->fs_ipg;
e3fe2d69 356 mincg = 0;
743f1ef7 357 for (cg = 0; cg < fs->fs_ncg; cg++)
b6407c9d
KM
358 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
359 fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
e3fe2d69 360 mincg = cg;
b6407c9d 361 minndir = fs->fs_cs(fs, cg).cs_ndir;
e3fe2d69 362 }
4f083fd7 363 return ((ino_t)(fs->fs_ipg * mincg));
e3fe2d69
KM
364}
365
743f1ef7 366/*
4f083fd7
SL
367 * Select the desired position for the next block in a file. The file is
368 * logically divided into sections. The first section is composed of the
369 * direct blocks. Each additional section contains fs_maxbpg blocks.
370 *
371 * If no blocks have been allocated in the first section, the policy is to
372 * request a block in the same cylinder group as the inode that describes
373 * the file. If no blocks have been allocated in any other section, the
374 * policy is to place the section in a cylinder group with a greater than
375 * average number of free blocks. An appropriate cylinder group is found
16e7863f
KM
376 * by using a rotor that sweeps the cylinder groups. When a new group of
377 * blocks is needed, the sweep begins in the cylinder group following the
378 * cylinder group from which the previous allocation was made. The sweep
379 * continues until a cylinder group with greater than the average number
380 * of free blocks is found. If the allocation is for the first block in an
381 * indirect block, the information on the previous allocation is unavailable;
382 * here a best guess is made based upon the logical block number being
383 * allocated.
4f083fd7
SL
384 *
385 * If a section is already partially allocated, the policy is to
386 * contiguously allocate fs_maxcontig blocks. The end of one of these
387 * contiguous blocks and the beginning of the next is physically separated
388 * so that the disk head will be in transit between them for at least
389 * fs_rotdelay milliseconds. This is to allow time for the processor to
390 * schedule another I/O transfer.
743f1ef7 391 */
daaf7bee 392daddr_t
4f083fd7
SL
393blkpref(ip, lbn, indx, bap)
394 struct inode *ip;
395 daddr_t lbn;
396 int indx;
397 daddr_t *bap;
f7287e4b 398{
4f083fd7 399 register struct fs *fs;
16e7863f
KM
400 register int cg;
401 int avgbfree, startcg;
4f083fd7 402 daddr_t nextblk;
743f1ef7 403
4f083fd7
SL
404 fs = ip->i_fs;
405 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
406 if (lbn < NDADDR) {
407 cg = itog(fs, ip->i_number);
b6407c9d 408 return (fs->fs_fpg * cg + fs->fs_frag);
743f1ef7 409 }
4f083fd7
SL
410 /*
411 * Find a cylinder with greater than average number of
412 * unused data blocks.
413 */
16e7863f
KM
414 if (indx == 0 || bap[indx - 1] == 0)
415 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
416 else
417 startcg = dtog(fs, bap[indx - 1]) + 1;
418 startcg %= fs->fs_ncg;
4f083fd7 419 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
16e7863f 420 for (cg = startcg; cg < fs->fs_ncg; cg++)
4f083fd7
SL
421 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
422 fs->fs_cgrotor = cg;
423 return (fs->fs_fpg * cg + fs->fs_frag);
424 }
16e7863f 425 for (cg = 0; cg <= startcg; cg++)
4f083fd7
SL
426 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
427 fs->fs_cgrotor = cg;
428 return (fs->fs_fpg * cg + fs->fs_frag);
429 }
430 return (NULL);
431 }
432 /*
433 * One or more previous blocks have been laid out. If less
434 * than fs_maxcontig previous blocks are contiguous, the
435 * next block is requested contiguously, otherwise it is
436 * requested rotationally delayed by fs_rotdelay milliseconds.
437 */
438 nextblk = bap[indx - 1] + fs->fs_frag;
439 if (indx > fs->fs_maxcontig &&
240a4664 440 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
4f083fd7
SL
441 != nextblk)
442 return (nextblk);
443 if (fs->fs_rotdelay != 0)
444 /*
445 * Here we convert ms of delay to frags as:
446 * (frags) = (ms) * (rev/sec) * (sect/rev) /
447 * ((sect/frag) * (ms/sec))
448 * then round up to the next block.
449 */
450 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
451 (NSPF(fs) * 1000), fs->fs_frag);
452 return (nextblk);
743f1ef7
KM
453}
454
502770a3
KM
455/*
456 * Implement the cylinder overflow algorithm.
457 *
458 * The policy implemented by this algorithm is:
459 * 1) allocate the block in its requested cylinder group.
460 * 2) quadradically rehash on the cylinder group number.
461 * 3) brute force search for a free block.
462 */
daaf7bee
KM
463/*VARARGS5*/
464u_long
f7287e4b
KM
465hashalloc(ip, cg, pref, size, allocator)
466 struct inode *ip;
e3fe2d69
KM
467 int cg;
468 long pref;
469 int size; /* size for data blocks, mode for inodes */
daaf7bee 470 u_long (*allocator)();
e3fe2d69 471{
f7287e4b 472 register struct fs *fs;
e3fe2d69
KM
473 long result;
474 int i, icg = cg;
475
f7287e4b 476 fs = ip->i_fs;
e3fe2d69
KM
477 /*
478 * 1: preferred cylinder group
479 */
f7287e4b 480 result = (*allocator)(ip, cg, pref, size);
e3fe2d69
KM
481 if (result)
482 return (result);
483 /*
484 * 2: quadratic rehash
485 */
486 for (i = 1; i < fs->fs_ncg; i *= 2) {
487 cg += i;
488 if (cg >= fs->fs_ncg)
489 cg -= fs->fs_ncg;
f7287e4b 490 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
491 if (result)
492 return (result);
493 }
494 /*
495 * 3: brute force search
620b3290
SL
496 * Note that we start at i == 2, since 0 was checked initially,
497 * and 1 is always checked in the quadratic rehash.
e3fe2d69 498 */
2136305e 499 cg = (icg + 2) % fs->fs_ncg;
620b3290 500 for (i = 2; i < fs->fs_ncg; i++) {
f7287e4b 501 result = (*allocator)(ip, cg, 0, size);
e3fe2d69
KM
502 if (result)
503 return (result);
504 cg++;
505 if (cg == fs->fs_ncg)
506 cg = 0;
507 }
ae851115 508 return (NULL);
e3fe2d69
KM
509}
510
502770a3
KM
511/*
512 * Determine whether a fragment can be extended.
513 *
514 * Check to see if the necessary fragments are available, and
515 * if they are, allocate them.
516 */
07670f7d 517daddr_t
f7287e4b
KM
518fragextend(ip, cg, bprev, osize, nsize)
519 struct inode *ip;
07670f7d 520 int cg;
f3c028b7 521 long bprev;
07670f7d
KM
522 int osize, nsize;
523{
f7287e4b 524 register struct fs *fs;
f3c028b7 525 register struct cg *cgp;
7188ac27 526 struct buf *bp;
f3c028b7
KM
527 long bno;
528 int frags, bbase;
7188ac27 529 int i, error;
07670f7d 530
f7287e4b 531 fs = ip->i_fs;
a8580723 532 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
e5476900 533 return (NULL);
d995d89d 534 frags = numfrags(fs, nsize);
a8580723
KM
535 bbase = fragnum(fs, bprev);
536 if (bbase > fragnum(fs, (bprev + frags - 1))) {
ec67a3ce 537 /* cannot extend across a block boundary */
ae851115 538 return (NULL);
f3c028b7 539 }
ec67a3ce
MK
540#ifdef SECSIZE
541 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
542 fs->fs_dbsize);
543#else SECSIZE
7188ac27 544 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 545 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
546 if (error) {
547 brelse(bp);
548 return (NULL);
549 }
ec67a3ce 550#endif SECSIZE
e5476900 551 cgp = bp->b_un.b_cg;
7188ac27 552 if (!cg_chkmagic(cgp)) {
d995d89d 553 brelse(bp);
ae851115 554 return (NULL);
d995d89d 555 }
ad9250ee 556 cgp->cg_time = time.tv_sec;
6994bf5d 557 bno = dtogd(fs, bprev);
d995d89d 558 for (i = numfrags(fs, osize); i < frags; i++)
10adeb11 559 if (isclr(cg_blksfree(cgp), bno + i)) {
aca50d72 560 brelse(bp);
ae851115 561 return (NULL);
aca50d72
KM
562 }
563 /*
564 * the current fragment can be extended
565 * deduct the count on fragment being extended into
566 * increase the count on the remaining fragment (if any)
567 * allocate the extended piece
568 */
569 for (i = frags; i < fs->fs_frag - bbase; i++)
10adeb11 570 if (isclr(cg_blksfree(cgp), bno + i))
f3c028b7 571 break;
d995d89d 572 cgp->cg_frsum[i - numfrags(fs, osize)]--;
aca50d72
KM
573 if (i != frags)
574 cgp->cg_frsum[i - frags]++;
d995d89d 575 for (i = numfrags(fs, osize); i < frags; i++) {
10adeb11 576 clrbit(cg_blksfree(cgp), bno + i);
aca50d72
KM
577 cgp->cg_cs.cs_nffree--;
578 fs->fs_cstotal.cs_nffree--;
579 fs->fs_cs(fs, cg).cs_nffree--;
f3c028b7 580 }
aca50d72
KM
581 fs->fs_fmod++;
582 bdwrite(bp);
583 return (bprev);
07670f7d
KM
584}
585
502770a3
KM
586/*
587 * Determine whether a block can be allocated.
588 *
589 * Check to see if a block of the apprpriate size is available,
590 * and if it is, allocate it.
591 */
4f083fd7 592daddr_t
f7287e4b
KM
593alloccg(ip, cg, bpref, size)
594 struct inode *ip;
e3fe2d69
KM
595 int cg;
596 daddr_t bpref;
597 int size;
598{
f7287e4b 599 register struct fs *fs;
f3c028b7 600 register struct cg *cgp;
7188ac27 601 struct buf *bp;
f3c028b7 602 register int i;
7188ac27 603 int error, bno, frags, allocsiz;
e3fe2d69 604
f7287e4b 605 fs = ip->i_fs;
b6407c9d 606 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
ae851115 607 return (NULL);
ec67a3ce
MK
608#ifdef SECSIZE
609 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
610 fs->fs_dbsize);
611#else SECSIZE
7188ac27 612 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 613 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
614 if (error) {
615 brelse(bp);
616 return (NULL);
617 }
ec67a3ce 618#endif SECSIZE
e5476900 619 cgp = bp->b_un.b_cg;
7188ac27 620 if (!cg_chkmagic(cgp) ||
0f538882 621 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
d995d89d 622 brelse(bp);
ae851115 623 return (NULL);
d995d89d 624 }
ad9250ee 625 cgp->cg_time = time.tv_sec;
b6407c9d 626 if (size == fs->fs_bsize) {
daaf7bee 627 bno = alloccgblk(fs, cgp, bpref);
f3c028b7
KM
628 bdwrite(bp);
629 return (bno);
630 }
631 /*
632 * check to see if any fragments are already available
633 * allocsiz is the size which will be allocated, hacking
634 * it down to a smaller size if necessary
635 */
d995d89d 636 frags = numfrags(fs, size);
b6407c9d 637 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
f3c028b7
KM
638 if (cgp->cg_frsum[allocsiz] != 0)
639 break;
b6407c9d 640 if (allocsiz == fs->fs_frag) {
f3c028b7
KM
641 /*
642 * no fragments were available, so a block will be
643 * allocated, and hacked up
644 */
0947395d 645 if (cgp->cg_cs.cs_nbfree == 0) {
f3c028b7 646 brelse(bp);
ae851115 647 return (NULL);
f3c028b7 648 }
daaf7bee 649 bno = alloccgblk(fs, cgp, bpref);
6994bf5d 650 bpref = dtogd(fs, bno);
b6407c9d 651 for (i = frags; i < fs->fs_frag; i++)
10adeb11 652 setbit(cg_blksfree(cgp), bpref + i);
b6407c9d 653 i = fs->fs_frag - frags;
0947395d
KM
654 cgp->cg_cs.cs_nffree += i;
655 fs->fs_cstotal.cs_nffree += i;
b6407c9d 656 fs->fs_cs(fs, cg).cs_nffree += i;
961945a8 657 fs->fs_fmod++;
f3c028b7
KM
658 cgp->cg_frsum[i]++;
659 bdwrite(bp);
660 return (bno);
661 }
743f1ef7 662 bno = mapsearch(fs, cgp, bpref, allocsiz);
0f538882
KM
663 if (bno < 0) {
664 brelse(bp);
ae851115 665 return (NULL);
0f538882 666 }
f3c028b7 667 for (i = 0; i < frags; i++)
10adeb11 668 clrbit(cg_blksfree(cgp), bno + i);
0947395d
KM
669 cgp->cg_cs.cs_nffree -= frags;
670 fs->fs_cstotal.cs_nffree -= frags;
b6407c9d 671 fs->fs_cs(fs, cg).cs_nffree -= frags;
961945a8 672 fs->fs_fmod++;
f3c028b7
KM
673 cgp->cg_frsum[allocsiz]--;
674 if (frags != allocsiz)
675 cgp->cg_frsum[allocsiz - frags]++;
676 bdwrite(bp);
677 return (cg * fs->fs_fpg + bno);
678}
679
502770a3
KM
680/*
681 * Allocate a block in a cylinder group.
682 *
683 * This algorithm implements the following policy:
684 * 1) allocate the requested block.
685 * 2) allocate a rotationally optimal block in the same cylinder.
686 * 3) allocate the next available block on the block rotor for the
687 * specified cylinder group.
688 * Note that this routine only allocates fs_bsize blocks; these
689 * blocks may be fragmented by the routine that allocates them.
690 */
f3c028b7 691daddr_t
daaf7bee 692alloccgblk(fs, cgp, bpref)
f7287e4b 693 register struct fs *fs;
f3c028b7
KM
694 register struct cg *cgp;
695 daddr_t bpref;
696{
743f1ef7 697 daddr_t bno;
ae851115 698 int cylno, pos, delta;
743f1ef7 699 short *cylbp;
aca50d72 700 register int i;
f3c028b7 701
743f1ef7
KM
702 if (bpref == 0) {
703 bpref = cgp->cg_rotor;
aca50d72
KM
704 goto norot;
705 }
a8580723 706 bpref = blknum(fs, bpref);
6994bf5d 707 bpref = dtogd(fs, bpref);
aca50d72
KM
708 /*
709 * if the requested block is available, use it
710 */
10adeb11 711 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {
aca50d72
KM
712 bno = bpref;
713 goto gotit;
714 }
aca50d72
KM
715 /*
716 * check for a block available on the same cylinder
aca50d72
KM
717 */
718 cylno = cbtocylno(fs, bpref);
10adeb11 719 if (cg_blktot(cgp)[cylno] == 0)
502770a3
KM
720 goto norot;
721 if (fs->fs_cpc == 0) {
722 /*
723 * block layout info is not available, so just have
724 * to take any block in this cylinder.
725 */
726 bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
727 goto norot;
728 }
aca50d72
KM
729 /*
730 * check the summary information to see if a block is
731 * available in the requested cylinder starting at the
4f083fd7 732 * requested rotational position and proceeding around.
aca50d72 733 */
10adeb11 734 cylbp = cg_blks(fs, cgp, cylno);
4f083fd7 735 pos = cbtorpos(fs, bpref);
10adeb11 736 for (i = pos; i < fs->fs_nrpos; i++)
aca50d72
KM
737 if (cylbp[i] > 0)
738 break;
10adeb11 739 if (i == fs->fs_nrpos)
aca50d72 740 for (i = 0; i < pos; i++)
743f1ef7
KM
741 if (cylbp[i] > 0)
742 break;
aca50d72
KM
743 if (cylbp[i] > 0) {
744 /*
745 * found a rotational position, now find the actual
746 * block. A panic if none is actually there.
747 */
748 pos = cylno % fs->fs_cpc;
749 bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
10adeb11 750 if (fs_postbl(fs, pos)[i] == -1) {
ffd90e52
KM
751 printf("pos = %d, i = %d, fs = %s\n",
752 pos, i, fs->fs_fsmnt);
aca50d72 753 panic("alloccgblk: cyl groups corrupted");
ffd90e52 754 }
10adeb11
KM
755 for (i = fs_postbl(fs, pos)[i];; ) {
756 if (isblock(fs, cg_blksfree(cgp), bno + i)) {
240a4664 757 bno = blkstofrags(fs, (bno + i));
aca50d72 758 goto gotit;
743f1ef7 759 }
10adeb11
KM
760 delta = fs_rotbl(fs)[i];
761 if (delta <= 0 ||
762 delta + i > fragstoblks(fs, fs->fs_fpg))
aca50d72 763 break;
ae851115 764 i += delta;
743f1ef7 765 }
ffd90e52 766 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
aca50d72 767 panic("alloccgblk: can't find blk in cyl");
e3fe2d69 768 }
aca50d72
KM
769norot:
770 /*
771 * no blocks in the requested cylinder, so take next
772 * available one in this cylinder group.
773 */
b32450f4 774 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
6459ebe0 775 if (bno < 0)
ae851115 776 return (NULL);
743f1ef7 777 cgp->cg_rotor = bno;
e3fe2d69 778gotit:
10adeb11 779 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno));
0947395d
KM
780 cgp->cg_cs.cs_nbfree--;
781 fs->fs_cstotal.cs_nbfree--;
b6407c9d 782 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
502770a3 783 cylno = cbtocylno(fs, bno);
10adeb11
KM
784 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
785 cg_blktot(cgp)[cylno]--;
e3fe2d69 786 fs->fs_fmod++;
743f1ef7 787 return (cgp->cg_cgx * fs->fs_fpg + bno);
e3fe2d69 788}
10adeb11 789
502770a3
KM
790/*
791 * Determine whether an inode can be allocated.
792 *
793 * Check to see if an inode is available, and if it is,
794 * allocate it using the following policy:
795 * 1) allocate the requested inode.
796 * 2) allocate the next available inode after the requested
797 * inode in the specified cylinder group.
798 */
4f083fd7 799ino_t
f7287e4b
KM
800ialloccg(ip, cg, ipref, mode)
801 struct inode *ip;
e3fe2d69
KM
802 int cg;
803 daddr_t ipref;
804 int mode;
805{
f7287e4b 806 register struct fs *fs;
f3c028b7 807 register struct cg *cgp;
4e0c7b8a 808 struct buf *bp;
7188ac27 809 int error, start, len, loc, map, i;
e3fe2d69 810
f7287e4b 811 fs = ip->i_fs;
b6407c9d 812 if (fs->fs_cs(fs, cg).cs_nifree == 0)
ae851115 813 return (NULL);
ec67a3ce
MK
814#ifdef SECSIZE
815 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
816 fs->fs_dbsize);
817#else SECSIZE
7188ac27 818 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 819 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
820 if (error) {
821 brelse(bp);
822 return (NULL);
823 }
ec67a3ce 824#endif SECSIZE
e5476900 825 cgp = bp->b_un.b_cg;
7188ac27 826 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
d995d89d 827 brelse(bp);
ae851115 828 return (NULL);
d995d89d 829 }
ad9250ee 830 cgp->cg_time = time.tv_sec;
e3fe2d69
KM
831 if (ipref) {
832 ipref %= fs->fs_ipg;
10adeb11 833 if (isclr(cg_inosused(cgp), ipref))
e3fe2d69 834 goto gotit;
4e0c7b8a
KM
835 }
836 start = cgp->cg_irotor / NBBY;
837 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
10adeb11 838 loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
4e0c7b8a 839 if (loc == 0) {
e5889092
KM
840 len = start + 1;
841 start = 0;
10adeb11 842 loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
e5889092
KM
843 if (loc == 0) {
844 printf("cg = %s, irotor = %d, fs = %s\n",
845 cg, cgp->cg_irotor, fs->fs_fsmnt);
846 panic("ialloccg: map corrupted");
847 /* NOTREACHED */
848 }
4e0c7b8a
KM
849 }
850 i = start + len - loc;
10adeb11 851 map = cg_inosused(cgp)[i];
4e0c7b8a
KM
852 ipref = i * NBBY;
853 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
854 if ((map & i) == 0) {
e3fe2d69
KM
855 cgp->cg_irotor = ipref;
856 goto gotit;
857 }
858 }
4e0c7b8a
KM
859 printf("fs = %s\n", fs->fs_fsmnt);
860 panic("ialloccg: block not in map");
861 /* NOTREACHED */
e3fe2d69 862gotit:
10adeb11 863 setbit(cg_inosused(cgp), ipref);
0947395d
KM
864 cgp->cg_cs.cs_nifree--;
865 fs->fs_cstotal.cs_nifree--;
b6407c9d 866 fs->fs_cs(fs, cg).cs_nifree--;
e3fe2d69
KM
867 fs->fs_fmod++;
868 if ((mode & IFMT) == IFDIR) {
0947395d
KM
869 cgp->cg_cs.cs_ndir++;
870 fs->fs_cstotal.cs_ndir++;
b6407c9d 871 fs->fs_cs(fs, cg).cs_ndir++;
e3fe2d69
KM
872 }
873 bdwrite(bp);
874 return (cg * fs->fs_ipg + ipref);
875}
876
502770a3
KM
877/*
878 * Free a block or fragment.
879 *
880 * The specified block or fragment is placed back in the
881 * free map. If a fragment is deallocated, a possible
882 * block reassembly is checked.
883 */
ced3a252 884blkfree(ip, bno, size)
f7287e4b 885 register struct inode *ip;
e3fe2d69 886 daddr_t bno;
daaf7bee 887 off_t size;
e3fe2d69
KM
888{
889 register struct fs *fs;
890 register struct cg *cgp;
7188ac27
KM
891 struct buf *bp;
892 int error, cg, blk, frags, bbase;
f3c028b7 893 register int i;
e3fe2d69 894
f7287e4b 895 fs = ip->i_fs;
ffd90e52
KM
896 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
897 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
898 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
ced3a252 899 panic("blkfree: bad size");
ffd90e52 900 }
6994bf5d 901 cg = dtog(fs, bno);
6459ebe0
KM
902 if (badblock(fs, bno)) {
903 printf("bad block %d, ino %d\n", bno, ip->i_number);
e3fe2d69 904 return;
6459ebe0 905 }
ec67a3ce
MK
906#ifdef SECSIZE
907 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
908 fs->fs_dbsize);
909#else SECSIZE
7188ac27 910 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 911 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
912 if (error) {
913 brelse(bp);
914 return;
915 }
ec67a3ce 916#endif SECSIZE
e5476900 917 cgp = bp->b_un.b_cg;
7188ac27 918 if (!cg_chkmagic(cgp)) {
d995d89d 919 brelse(bp);
e3fe2d69 920 return;
d995d89d 921 }
ad9250ee 922 cgp->cg_time = time.tv_sec;
6994bf5d 923 bno = dtogd(fs, bno);
b6407c9d 924 if (size == fs->fs_bsize) {
10adeb11 925 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) {
ffd90e52
KM
926 printf("dev = 0x%x, block = %d, fs = %s\n",
927 ip->i_dev, bno, fs->fs_fsmnt);
ced3a252 928 panic("blkfree: freeing free block");
6459ebe0 929 }
10adeb11 930 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno));
0947395d
KM
931 cgp->cg_cs.cs_nbfree++;
932 fs->fs_cstotal.cs_nbfree++;
b6407c9d 933 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 934 i = cbtocylno(fs, bno);
10adeb11
KM
935 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
936 cg_blktot(cgp)[i]++;
07670f7d 937 } else {
a8580723 938 bbase = bno - fragnum(fs, bno);
f3c028b7
KM
939 /*
940 * decrement the counts associated with the old frags
941 */
10adeb11 942 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 943 fragacct(fs, blk, cgp->cg_frsum, -1);
f3c028b7
KM
944 /*
945 * deallocate the fragment
946 */
d995d89d 947 frags = numfrags(fs, size);
f3c028b7 948 for (i = 0; i < frags; i++) {
10adeb11 949 if (isset(cg_blksfree(cgp), bno + i)) {
ffd90e52
KM
950 printf("dev = 0x%x, block = %d, fs = %s\n",
951 ip->i_dev, bno + i, fs->fs_fsmnt);
ced3a252 952 panic("blkfree: freeing free frag");
ffd90e52 953 }
10adeb11 954 setbit(cg_blksfree(cgp), bno + i);
07670f7d 955 }
ae851115
KM
956 cgp->cg_cs.cs_nffree += i;
957 fs->fs_cstotal.cs_nffree += i;
958 fs->fs_cs(fs, cg).cs_nffree += i;
f3c028b7
KM
959 /*
960 * add back in counts associated with the new frags
961 */
10adeb11 962 blk = blkmap(fs, cg_blksfree(cgp), bbase);
b6407c9d 963 fragacct(fs, blk, cgp->cg_frsum, 1);
f3c028b7
KM
964 /*
965 * if a complete block has been reassembled, account for it
966 */
9523729e
KM
967 if (isblock(fs, cg_blksfree(cgp),
968 (daddr_t)fragstoblks(fs, bbase))) {
b6407c9d
KM
969 cgp->cg_cs.cs_nffree -= fs->fs_frag;
970 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
971 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
0947395d
KM
972 cgp->cg_cs.cs_nbfree++;
973 fs->fs_cstotal.cs_nbfree++;
b6407c9d 974 fs->fs_cs(fs, cg).cs_nbfree++;
502770a3 975 i = cbtocylno(fs, bbase);
10adeb11
KM
976 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
977 cg_blktot(cgp)[i]++;
07670f7d
KM
978 }
979 }
e3fe2d69 980 fs->fs_fmod++;
e3fe2d69
KM
981 bdwrite(bp);
982}
983
502770a3
KM
984/*
985 * Free an inode.
986 *
987 * The specified inode is placed back in the free map.
988 */
f7287e4b
KM
989ifree(ip, ino, mode)
990 struct inode *ip;
e3fe2d69
KM
991 ino_t ino;
992 int mode;
993{
994 register struct fs *fs;
995 register struct cg *cgp;
7188ac27
KM
996 struct buf *bp;
997 int error, cg;
e3fe2d69 998
f7287e4b 999 fs = ip->i_fs;
ffd90e52
KM
1000 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
1001 printf("dev = 0x%x, ino = %d, fs = %s\n",
1002 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 1003 panic("ifree: range");
ffd90e52 1004 }
6994bf5d 1005 cg = itog(fs, ino);
ec67a3ce
MK
1006#ifdef SECSIZE
1007 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1008 fs->fs_dbsize);
1009#else SECSIZE
7188ac27 1010 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
a937f856 1011 (int)fs->fs_cgsize, NOCRED, &bp);
7188ac27
KM
1012 if (error) {
1013 brelse(bp);
1014 return;
1015 }
ec67a3ce 1016#endif SECSIZE
e5476900 1017 cgp = bp->b_un.b_cg;
7188ac27 1018 if (!cg_chkmagic(cgp)) {
d995d89d 1019 brelse(bp);
e3fe2d69 1020 return;
d995d89d 1021 }
ad9250ee 1022 cgp->cg_time = time.tv_sec;
e3fe2d69 1023 ino %= fs->fs_ipg;
10adeb11 1024 if (isclr(cg_inosused(cgp), ino)) {
ffd90e52
KM
1025 printf("dev = 0x%x, ino = %d, fs = %s\n",
1026 ip->i_dev, ino, fs->fs_fsmnt);
e3fe2d69 1027 panic("ifree: freeing free inode");
ffd90e52 1028 }
10adeb11 1029 clrbit(cg_inosused(cgp), ino);
4e0c7b8a
KM
1030 if (ino < cgp->cg_irotor)
1031 cgp->cg_irotor = ino;
0947395d
KM
1032 cgp->cg_cs.cs_nifree++;
1033 fs->fs_cstotal.cs_nifree++;
b6407c9d 1034 fs->fs_cs(fs, cg).cs_nifree++;
e3fe2d69 1035 if ((mode & IFMT) == IFDIR) {
0947395d
KM
1036 cgp->cg_cs.cs_ndir--;
1037 fs->fs_cstotal.cs_ndir--;
b6407c9d 1038 fs->fs_cs(fs, cg).cs_ndir--;
e3fe2d69
KM
1039 }
1040 fs->fs_fmod++;
1041 bdwrite(bp);
1042}
1043
743f1ef7 1044/*
502770a3
KM
1045 * Find a block of the specified size in the specified cylinder group.
1046 *
743f1ef7
KM
1047 * It is a panic if a request is made to find a block if none are
1048 * available.
1049 */
1050daddr_t
1051mapsearch(fs, cgp, bpref, allocsiz)
1052 register struct fs *fs;
1053 register struct cg *cgp;
1054 daddr_t bpref;
1055 int allocsiz;
1056{
1057 daddr_t bno;
1058 int start, len, loc, i;
1059 int blk, field, subfield, pos;
1060
1061 /*
1062 * find the fragment by searching through the free block
1063 * map for an appropriate bit pattern
1064 */
1065 if (bpref)
6994bf5d 1066 start = dtogd(fs, bpref) / NBBY;
743f1ef7
KM
1067 else
1068 start = cgp->cg_frotor / NBBY;
942bd18b 1069 len = howmany(fs->fs_fpg, NBBY) - start;
9523729e
KM
1070 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start],
1071 (u_char *)fragtbl[fs->fs_frag],
1072 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
743f1ef7 1073 if (loc == 0) {
e5476900
KM
1074 len = start + 1;
1075 start = 0;
9523729e
KM
1076 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0],
1077 (u_char *)fragtbl[fs->fs_frag],
1078 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
4e0c7b8a
KM
1079 if (loc == 0) {
1080 printf("start = %d, len = %d, fs = %s\n",
1081 start, len, fs->fs_fsmnt);
1082 panic("alloccg: map corrupted");
e5889092 1083 /* NOTREACHED */
4e0c7b8a 1084 }
743f1ef7
KM
1085 }
1086 bno = (start + len - loc) * NBBY;
1087 cgp->cg_frotor = bno;
1088 /*
1089 * found the byte in the map
1090 * sift through the bits to find the selected frag
1091 */
ae851115 1092 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
10adeb11 1093 blk = blkmap(fs, cg_blksfree(cgp), bno);
743f1ef7
KM
1094 blk <<= 1;
1095 field = around[allocsiz];
1096 subfield = inside[allocsiz];
b6407c9d 1097 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
ae851115
KM
1098 if ((blk & field) == subfield)
1099 return (bno + pos);
743f1ef7
KM
1100 field <<= 1;
1101 subfield <<= 1;
1102 }
1103 }
ffd90e52 1104 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
743f1ef7 1105 panic("alloccg: block not in map");
e5476900 1106 return (-1);
743f1ef7
KM
1107}
1108
e3fe2d69 1109/*
502770a3
KM
1110 * Fserr prints the name of a file system with an error diagnostic.
1111 *
1112 * The form of the error message is:
e3fe2d69
KM
1113 * fs: error message
1114 */
1115fserr(fs, cp)
1116 struct fs *fs;
1117 char *cp;
1118{
1119
283ffc90 1120 log(LOG_ERR, "%s: %s\n", fs->fs_fsmnt, cp);
e3fe2d69 1121}