* Copyright (c) 1982 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
* @(#)ffs_alloc.c 6.14 (Berkeley) %G%
extern u_long
hashalloc();
extern daddr_t
alloccg();
extern daddr_t
alloccgblk();
extern daddr_t
fragextend();
extern daddr_t
blkpref();
extern daddr_t
mapsearch();
extern int inside
[], around
[];
extern unsigned char *fragtbl
[];
* Allocate a block in the file system.
* The size of the requested block is given, which must be some
* multiple of fs_fsize and <= fs_bsize.
* A preference may be optionally specified. If a preference is given
* the following hierarchy is used to allocate a block:
* 1) allocate the requested block.
* 2) allocate a rotationally optimal block in the same cylinder.
* 3) allocate a block in the same cylinder group.
* 4) quadradically rehash into other cylinder groups, until an
* available block is located.
* If no block preference is given the following heirarchy is used
* 1) allocate a block in the cylinder group that contains the
* 2) quadradically rehash into other cylinder groups, until an
* available block is located.
register struct inode
*ip
;
if ((unsigned)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
ip
->i_dev
, fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
panic("alloc: bad size");
if (size
== fs
->fs_bsize
&& fs
->fs_cstotal
.cs_nbfree
== 0)
if (u
.u_uid
!= 0 && freespace(fs
, fs
->fs_minfree
) <= 0)
u
.u_error
= chkdq(ip
, (long)btodb(size
), 0);
if (bpref
>= fs
->fs_size
)
cg
= itog(fs
, ip
->i_number
);
bno
= (daddr_t
)hashalloc(ip
, cg
, (long)bpref
, size
,
ip
->i_blocks
+= btodb(size
);
bp
= getblk(ip
->i_dev
, fsbtodb(fs
, bno
), size
);
fserr(fs
, "file system full");
uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
* Reallocate a fragment to a bigger size
* The number and size of the old block is given, and a preference
* and new size is also specified. The allocator attempts to extend
* the original block. Failing that, the regular block allocator is
* invoked to get an appropriate block.
realloccg(ip
, bprev
, bpref
, osize
, nsize
)
register struct inode
*ip
;
register struct buf
*bp
, *obp
;
if ((unsigned)osize
> fs
->fs_bsize
|| fragoff(fs
, osize
) != 0 ||
(unsigned)nsize
> fs
->fs_bsize
|| fragoff(fs
, nsize
) != 0) {
printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
ip
->i_dev
, fs
->fs_bsize
, osize
, nsize
, fs
->fs_fsmnt
);
panic("realloccg: bad size");
if (u
.u_uid
!= 0 && freespace(fs
, fs
->fs_minfree
) <= 0)
printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
ip
->i_dev
, fs
->fs_bsize
, bprev
, fs
->fs_fsmnt
);
panic("realloccg: bad bprev");
u
.u_error
= chkdq(ip
, (long)btodb(nsize
- osize
), 0);
bno
= fragextend(ip
, cg
, (long)bprev
, osize
, nsize
);
bp
= bread(ip
->i_dev
, fsbtodb(fs
, bno
), osize
);
if (bp
->b_flags
& B_ERROR
) {
} while (brealloc(bp
, nsize
) == 0);
bzero(bp
->b_un
.b_addr
+ osize
, (unsigned)nsize
- osize
);
ip
->i_blocks
+= btodb(nsize
- osize
);
if (bpref
>= fs
->fs_size
)
bno
= (daddr_t
)hashalloc(ip
, cg
, (long)bpref
, fs
->fs_bsize
,
obp
= bread(ip
->i_dev
, fsbtodb(fs
, bprev
), osize
);
if (obp
->b_flags
& B_ERROR
) {
bp
= getblk(ip
->i_dev
, fsbtodb(fs
, bno
), nsize
);
bcopy(obp
->b_un
.b_addr
, bp
->b_un
.b_addr
, (u_int
)osize
);
bzero(bp
->b_un
.b_addr
+ osize
, (unsigned)nsize
- osize
);
if (obp
->b_flags
& B_DELWRI
) {
obp
->b_flags
&= ~B_DELWRI
;
u
.u_ru
.ru_oublock
--; /* delete charge */
free(ip
, bprev
, (off_t
)osize
);
if (nsize
< fs
->fs_bsize
)
free(ip
, bno
+ numfrags(fs
, nsize
),
(off_t
)(fs
->fs_bsize
- nsize
));
ip
->i_blocks
+= btodb(nsize
- osize
);
fserr(fs
, "file system full");
uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
* Allocate an inode in the file system.
* A preference may be optionally specified. If a preference is given
* the following hierarchy is used to allocate an inode:
* 1) allocate the requested inode.
* 2) allocate an inode in the same cylinder group.
* 3) quadradically rehash into other cylinder groups, until an
* available inode is located.
* If no inode preference is given the following heirarchy is used
* 1) allocate an inode in cylinder group 0.
* 2) quadradically rehash into other cylinder groups, until an
* available inode is located.
register struct inode
*pip
;
register struct inode
*ip
;
if (fs
->fs_cstotal
.cs_nifree
== 0)
u
.u_error
= chkiq(pip
->i_dev
, (struct inode
*)NULL
, u
.u_uid
, 0);
if (ipref
>= fs
->fs_ncg
* fs
->fs_ipg
)
ino
= (ino_t
)hashalloc(pip
, cg
, (long)ipref
, mode
, ialloccg
);
ip
= iget(pip
->i_dev
, pip
->i_fs
, ino
);
printf("mode = 0%o, inum = %d, fs = %s\n",
ip
->i_mode
, ip
->i_number
, fs
->fs_fsmnt
);
panic("ialloc: dup alloc");
if (ip
->i_blocks
) { /* XXX */
printf("free inode %s/%d had %d blocks\n",
fs
->fs_fsmnt
, ino
, ip
->i_blocks
);
fserr(fs
, "out of inodes");
uprintf("\n%s: create/symlink failed, no inodes free\n", fs
->fs_fsmnt
);
* Find a cylinder to place a directory.
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
int cg
, minndir
, mincg
, avgifree
;
avgifree
= fs
->fs_cstotal
.cs_nifree
/ fs
->fs_ncg
;
for (cg
= 0; cg
< fs
->fs_ncg
; cg
++)
if (fs
->fs_cs(fs
, cg
).cs_ndir
< minndir
&&
fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
) {
minndir
= fs
->fs_cs(fs
, cg
).cs_ndir
;
return ((ino_t
)(fs
->fs_ipg
* mincg
));
* Select the desired position for the next block in a file. The file is
* logically divided into sections. The first section is composed of the
* direct blocks. Each additional section contains fs_maxbpg blocks.
* If no blocks have been allocated in the first section, the policy is to
* request a block in the same cylinder group as the inode that describes
* the file. If no blocks have been allocated in any other section, the
* policy is to place the section in a cylinder group with a greater than
* average number of free blocks. An appropriate cylinder group is found
* by using a rotor that sweeps the cylinder groups. When a new group of
* blocks is needed, the sweep begins in the cylinder group following the
* cylinder group from which the previous allocation was made. The sweep
* continues until a cylinder group with greater than the average number
* of free blocks is found. If the allocation is for the first block in an
* indirect block, the information on the previous allocation is unavailable;
* here a best guess is made based upon the logical block number being
* If a section is already partially allocated, the policy is to
* contiguously allocate fs_maxcontig blocks. The end of one of these
* contiguous blocks and the beginning of the next is physically separated
* so that the disk head will be in transit between them for at least
* fs_rotdelay milliseconds. This is to allow time for the processor to
* schedule another I/O transfer.
blkpref(ip
, lbn
, indx
, bap
)
if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0) {
cg
= itog(fs
, ip
->i_number
);
return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
* Find a cylinder with greater than average number of
if (indx
== 0 || bap
[indx
- 1] == 0)
startcg
= itog(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
startcg
= dtog(fs
, bap
[indx
- 1]) + 1;
avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
for (cg
= 0; cg
<= startcg
; cg
++)
if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
* One or more previous blocks have been laid out. If less
* than fs_maxcontig previous blocks are contiguous, the
* next block is requested contiguously, otherwise it is
* requested rotationally delayed by fs_rotdelay milliseconds.
nextblk
= bap
[indx
- 1] + fs
->fs_frag
;
if (indx
> fs
->fs_maxcontig
&&
bap
[indx
- fs
->fs_maxcontig
] + blkstofrags(fs
, fs
->fs_maxcontig
)
if (fs
->fs_rotdelay
!= 0)
* Here we convert ms of delay to frags as:
* (frags) = (ms) * (rev/sec) * (sect/rev) /
* ((sect/frag) * (ms/sec))
* then round up to the next block.
nextblk
+= roundup(fs
->fs_rotdelay
* fs
->fs_rps
* fs
->fs_nsect
/
(NSPF(fs
) * 1000), fs
->fs_frag
);
* Implement the cylinder overflow algorithm.
* The policy implemented by this algorithm is:
* 1) allocate the block in its requested cylinder group.
* 2) quadradically rehash on the cylinder group number.
* 3) brute force search for a free block.
hashalloc(ip
, cg
, pref
, size
, allocator
)
int size
; /* size for data blocks, mode for inodes */
* 1: preferred cylinder group
result
= (*allocator
)(ip
, cg
, pref
, size
);
for (i
= 1; i
< fs
->fs_ncg
; i
*= 2) {
result
= (*allocator
)(ip
, cg
, 0, size
);
* Note that we start at i == 2, since 0 was checked initially,
* and 1 is always checked in the quadratic rehash.
cg
= (icg
+ 2) % fs
->fs_ncg
;
for (i
= 2; i
< fs
->fs_ncg
; i
++) {
result
= (*allocator
)(ip
, cg
, 0, size
);
* Determine whether a fragment can be extended.
* Check to see if the necessary fragments are available, and
* if they are, allocate them.
fragextend(ip
, cg
, bprev
, osize
, nsize
)
if (fs
->fs_cs(fs
, cg
).cs_nffree
< numfrags(fs
, nsize
- osize
))
frags
= numfrags(fs
, nsize
);
bbase
= fragnum(fs
, bprev
);
if (bbase
> fragnum(fs
, (bprev
+ frags
- 1))) {
/* cannot extend across a block boundry */
bp
= bread(ip
->i_dev
, fsbtodb(fs
, cgtod(fs
, cg
)), (int)fs
->fs_cgsize
);
if (bp
->b_flags
& B_ERROR
|| cgp
->cg_magic
!= CG_MAGIC
) {
cgp
->cg_time
= time
.tv_sec
;
for (i
= numfrags(fs
, osize
); i
< frags
; i
++)
if (isclr(cgp
->cg_free
, bno
+ i
)) {
* the current fragment can be extended
* deduct the count on fragment being extended into
* increase the count on the remaining fragment (if any)
* allocate the extended piece
for (i
= frags
; i
< fs
->fs_frag
- bbase
; i
++)
if (isclr(cgp
->cg_free
, bno
+ i
))
cgp
->cg_frsum
[i
- numfrags(fs
, osize
)]--;
cgp
->cg_frsum
[i
- frags
]++;
for (i
= numfrags(fs
, osize
); i
< frags
; i
++) {
clrbit(cgp
->cg_free
, bno
+ i
);
fs
->fs_cstotal
.cs_nffree
--;
fs
->fs_cs(fs
, cg
).cs_nffree
--;
* Determine whether a block can be allocated.
* Check to see if a block of the apprpriate size is available,
* and if it is, allocate it.
alloccg(ip
, cg
, bpref
, size
)
if (fs
->fs_cs(fs
, cg
).cs_nbfree
== 0 && size
== fs
->fs_bsize
)
bp
= bread(ip
->i_dev
, fsbtodb(fs
, cgtod(fs
, cg
)), (int)fs
->fs_cgsize
);
if (bp
->b_flags
& B_ERROR
|| cgp
->cg_magic
!= CG_MAGIC
||
(cgp
->cg_cs
.cs_nbfree
== 0 && size
== fs
->fs_bsize
)) {
cgp
->cg_time
= time
.tv_sec
;
if (size
== fs
->fs_bsize
) {
bno
= alloccgblk(fs
, cgp
, bpref
);
* check to see if any fragments are already available
* allocsiz is the size which will be allocated, hacking
* it down to a smaller size if necessary
frags
= numfrags(fs
, size
);
for (allocsiz
= frags
; allocsiz
< fs
->fs_frag
; allocsiz
++)
if (cgp
->cg_frsum
[allocsiz
] != 0)
if (allocsiz
== fs
->fs_frag
) {
* no fragments were available, so a block will be
* allocated, and hacked up
if (cgp
->cg_cs
.cs_nbfree
== 0) {
bno
= alloccgblk(fs
, cgp
, bpref
);
for (i
= frags
; i
< fs
->fs_frag
; i
++)
setbit(cgp
->cg_free
, bpref
+ i
);
cgp
->cg_cs
.cs_nffree
+= i
;
fs
->fs_cstotal
.cs_nffree
+= i
;
fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
bno
= mapsearch(fs
, cgp
, bpref
, allocsiz
);
for (i
= 0; i
< frags
; i
++)
clrbit(cgp
->cg_free
, bno
+ i
);
cgp
->cg_cs
.cs_nffree
-= frags
;
fs
->fs_cstotal
.cs_nffree
-= frags
;
fs
->fs_cs(fs
, cg
).cs_nffree
-= frags
;
cgp
->cg_frsum
[allocsiz
]--;
cgp
->cg_frsum
[allocsiz
- frags
]++;
return (cg
* fs
->fs_fpg
+ bno
);
* Allocate a block in a cylinder group.
* This algorithm implements the following policy:
* 1) allocate the requested block.
* 2) allocate a rotationally optimal block in the same cylinder.
* 3) allocate the next available block on the block rotor for the
* specified cylinder group.
* Note that this routine only allocates fs_bsize blocks; these
* blocks may be fragmented by the routine that allocates them.
alloccgblk(fs
, cgp
, bpref
)
bpref
= blknum(fs
, bpref
);
bpref
= dtogd(fs
, bpref
);
* if the requested block is available, use it
if (isblock(fs
, cgp
->cg_free
, fragstoblks(fs
, bpref
))) {
* check for a block available on the same cylinder
cylno
= cbtocylno(fs
, bpref
);
if (cgp
->cg_btot
[cylno
] == 0)
* block layout info is not available, so just have
* to take any block in this cylinder.
bpref
= howmany(fs
->fs_spc
* cylno
, NSPF(fs
));
* check the summary information to see if a block is
* available in the requested cylinder starting at the
* requested rotational position and proceeding around.
cylbp
= cgp
->cg_b
[cylno
];
pos
= cbtorpos(fs
, bpref
);
for (i
= pos
; i
< NRPOS
; i
++)
for (i
= 0; i
< pos
; i
++)
* found a rotational position, now find the actual
* block. A panic if none is actually there.
pos
= cylno
% fs
->fs_cpc
;
bno
= (cylno
- pos
) * fs
->fs_spc
/ NSPB(fs
);
if (fs
->fs_postbl
[pos
][i
] == -1) {
printf("pos = %d, i = %d, fs = %s\n",
panic("alloccgblk: cyl groups corrupted");
for (i
= fs
->fs_postbl
[pos
][i
];; ) {
if (isblock(fs
, cgp
->cg_free
, bno
+ i
)) {
bno
= blkstofrags(fs
, (bno
+ i
));
if (delta
<= 0 || delta
> MAXBPC
- i
)
printf("pos = %d, i = %d, fs = %s\n", pos
, i
, fs
->fs_fsmnt
);
panic("alloccgblk: can't find blk in cyl");
* no blocks in the requested cylinder, so take next
* available one in this cylinder group.
bno
= mapsearch(fs
, cgp
, bpref
, (int)fs
->fs_frag
);
clrblock(fs
, cgp
->cg_free
, (long)fragstoblks(fs
, bno
));
fs
->fs_cstotal
.cs_nbfree
--;
fs
->fs_cs(fs
, cgp
->cg_cgx
).cs_nbfree
--;
cylno
= cbtocylno(fs
, bno
);
cgp
->cg_b
[cylno
][cbtorpos(fs
, bno
)]--;
return (cgp
->cg_cgx
* fs
->fs_fpg
+ bno
);
* Determine whether an inode can be allocated.
* Check to see if an inode is available, and if it is,
* allocate it using the following policy:
* 1) allocate the requested inode.
* 2) allocate the next available inode after the requested
* inode in the specified cylinder group.
ialloccg(ip
, cg
, ipref
, mode
)
int start
, len
, loc
, map
, i
;
if (fs
->fs_cs(fs
, cg
).cs_nifree
== 0)
bp
= bread(ip
->i_dev
, fsbtodb(fs
, cgtod(fs
, cg
)), (int)fs
->fs_cgsize
);
if (bp
->b_flags
& B_ERROR
|| cgp
->cg_magic
!= CG_MAGIC
||
cgp
->cg_cs
.cs_nifree
== 0) {
cgp
->cg_time
= time
.tv_sec
;
if (isclr(cgp
->cg_iused
, ipref
))
start
= cgp
->cg_irotor
/ NBBY
;
len
= howmany(fs
->fs_ipg
- cgp
->cg_irotor
, NBBY
);
loc
= skpc(0xff, len
, &cgp
->cg_iused
[start
]);
loc
= skpc(0xff, len
, &cgp
->cg_iused
[0]);
printf("cg = %s, irotor = %d, fs = %s\n",
cg
, cgp
->cg_irotor
, fs
->fs_fsmnt
);
panic("ialloccg: map corrupted");
for (i
= 1; i
< (1 << NBBY
); i
<<= 1, ipref
++) {
printf("fs = %s\n", fs
->fs_fsmnt
);
panic("ialloccg: block not in map");
setbit(cgp
->cg_iused
, ipref
);
fs
->fs_cstotal
.cs_nifree
--;
fs
->fs_cs(fs
, cg
).cs_nifree
--;
if ((mode
& IFMT
) == IFDIR
) {
fs
->fs_cstotal
.cs_ndir
++;
fs
->fs_cs(fs
, cg
).cs_ndir
++;
return (cg
* fs
->fs_ipg
+ ipref
);
* Free a block or fragment.
* The specified block or fragment is placed back in the
* free map. If a fragment is deallocated, a possible
* block reassembly is checked.
register struct inode
*ip
;
int cg
, blk
, frags
, bbase
;
if ((unsigned)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
ip
->i_dev
, fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
printf("bad block %d, ino %d\n", bno
, ip
->i_number
);
bp
= bread(ip
->i_dev
, fsbtodb(fs
, cgtod(fs
, cg
)), (int)fs
->fs_cgsize
);
if (bp
->b_flags
& B_ERROR
|| cgp
->cg_magic
!= CG_MAGIC
) {
cgp
->cg_time
= time
.tv_sec
;
if (size
== fs
->fs_bsize
) {
if (isblock(fs
, cgp
->cg_free
, fragstoblks(fs
, bno
))) {
printf("dev = 0x%x, block = %d, fs = %s\n",
ip
->i_dev
, bno
, fs
->fs_fsmnt
);
panic("free: freeing free block");
setblock(fs
, cgp
->cg_free
, fragstoblks(fs
, bno
));
fs
->fs_cstotal
.cs_nbfree
++;
fs
->fs_cs(fs
, cg
).cs_nbfree
++;
cgp
->cg_b
[i
][cbtorpos(fs
, bno
)]++;
bbase
= bno
- fragnum(fs
, bno
);
* decrement the counts associated with the old frags
blk
= blkmap(fs
, cgp
->cg_free
, bbase
);
fragacct(fs
, blk
, cgp
->cg_frsum
, -1);
* deallocate the fragment
frags
= numfrags(fs
, size
);
for (i
= 0; i
< frags
; i
++) {
if (isset(cgp
->cg_free
, bno
+ i
)) {
printf("dev = 0x%x, block = %d, fs = %s\n",
ip
->i_dev
, bno
+ i
, fs
->fs_fsmnt
);
panic("free: freeing free frag");
setbit(cgp
->cg_free
, bno
+ i
);
cgp
->cg_cs
.cs_nffree
+= i
;
fs
->fs_cstotal
.cs_nffree
+= i
;
fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
* add back in counts associated with the new frags
blk
= blkmap(fs
, cgp
->cg_free
, bbase
);
fragacct(fs
, blk
, cgp
->cg_frsum
, 1);
* if a complete block has been reassembled, account for it
if (isblock(fs
, cgp
->cg_free
, fragstoblks(fs
, bbase
))) {
cgp
->cg_cs
.cs_nffree
-= fs
->fs_frag
;
fs
->fs_cstotal
.cs_nffree
-= fs
->fs_frag
;
fs
->fs_cs(fs
, cg
).cs_nffree
-= fs
->fs_frag
;
fs
->fs_cstotal
.cs_nbfree
++;
fs
->fs_cs(fs
, cg
).cs_nbfree
++;
i
= cbtocylno(fs
, bbase
);
cgp
->cg_b
[i
][cbtorpos(fs
, bbase
)]++;
* The specified inode is placed back in the free map.
if ((unsigned)ino
>= fs
->fs_ipg
*fs
->fs_ncg
) {
printf("dev = 0x%x, ino = %d, fs = %s\n",
ip
->i_dev
, ino
, fs
->fs_fsmnt
);
bp
= bread(ip
->i_dev
, fsbtodb(fs
, cgtod(fs
, cg
)), (int)fs
->fs_cgsize
);
if (bp
->b_flags
& B_ERROR
|| cgp
->cg_magic
!= CG_MAGIC
) {
cgp
->cg_time
= time
.tv_sec
;
if (isclr(cgp
->cg_iused
, ino
)) {
printf("dev = 0x%x, ino = %d, fs = %s\n",
ip
->i_dev
, ino
, fs
->fs_fsmnt
);
panic("ifree: freeing free inode");
clrbit(cgp
->cg_iused
, ino
);
if (ino
< cgp
->cg_irotor
)
fs
->fs_cstotal
.cs_nifree
++;
fs
->fs_cs(fs
, cg
).cs_nifree
++;
if ((mode
& IFMT
) == IFDIR
) {
fs
->fs_cstotal
.cs_ndir
--;
fs
->fs_cs(fs
, cg
).cs_ndir
--;
* Find a block of the specified size in the specified cylinder group.
* It is a panic if a request is made to find a block if none are
mapsearch(fs
, cgp
, bpref
, allocsiz
)
int blk
, field
, subfield
, pos
;
* find the fragment by searching through the free block
* map for an appropriate bit pattern
start
= dtogd(fs
, bpref
) / NBBY
;
start
= cgp
->cg_frotor
/ NBBY
;
len
= howmany(fs
->fs_fpg
, NBBY
) - start
;
loc
= scanc((unsigned)len
, (caddr_t
)&cgp
->cg_free
[start
],
(caddr_t
)fragtbl
[fs
->fs_frag
],
(int)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
loc
= scanc((unsigned)len
, (caddr_t
)&cgp
->cg_free
[0],
(caddr_t
)fragtbl
[fs
->fs_frag
],
(int)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
printf("start = %d, len = %d, fs = %s\n",
start
, len
, fs
->fs_fsmnt
);
panic("alloccg: map corrupted");
bno
= (start
+ len
- loc
) * NBBY
;
* found the byte in the map
* sift through the bits to find the selected frag
for (i
= bno
+ NBBY
; bno
< i
; bno
+= fs
->fs_frag
) {
blk
= blkmap(fs
, cgp
->cg_free
, bno
);
field
= around
[allocsiz
];
subfield
= inside
[allocsiz
];
for (pos
= 0; pos
<= fs
->fs_frag
- allocsiz
; pos
++) {
if ((blk
& field
) == subfield
)
printf("bno = %d, fs = %s\n", bno
, fs
->fs_fsmnt
);
panic("alloccg: block not in map");
* Fserr prints the name of a file system with an error diagnostic.
* The form of the error message is:
log(KERN_FAIL
, "%s: %s\n", fs
->fs_fsmnt
, cp
);