assign a new generation number when allocating an inode
[unix-history] / usr / src / sys / ufs / ffs / ffs_inode.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms are permitted
6 * provided that the above copyright notice and this paragraph are
7 * duplicated in all such forms and that any documentation,
8 * advertising materials, and other materials related to such
9 * distribution and use acknowledge that the software was developed
10 * by the University of California, Berkeley. The name of the
11 * University may not be used to endorse or promote products derived
12 * from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16 *
17 * @(#)ffs_inode.c 7.7 (Berkeley) %G%
18 */
19
20#include "param.h"
21#include "systm.h"
22#include "mount.h"
23#include "user.h"
24#include "file.h"
25#include "buf.h"
26#include "cmap.h"
27#include "vnode.h"
28#include "../ufs/inode.h"
29#include "../ufs/fs.h"
30#include "../ufs/ufsmount.h"
31#ifdef QUOTA
32#include "../ufs/quota.h"
33#endif
34#include "kernel.h"
35#include "malloc.h"
36
37#define INOHSZ 512
38#if ((INOHSZ&(INOHSZ-1)) == 0)
39#define INOHASH(dev,ino) (((dev)+(ino))&(INOHSZ-1))
40#else
41#define INOHASH(dev,ino) (((unsigned)((dev)+(ino)))%INOHSZ)
42#endif
43
44#define INSFREE(ip) {\
45 if (ifreeh) { \
46 *ifreet = (ip); \
47 (ip)->i_freeb = ifreet; \
48 } else { \
49 ifreeh = (ip); \
50 (ip)->i_freeb = &ifreeh; \
51 } \
52 (ip)->i_freef = NULL; \
53 ifreet = &(ip)->i_freef; \
54}
55
56union ihead { /* inode LRU cache, Chris Maltby */
57 union ihead *ih_head[2];
58 struct inode *ih_chain[2];
59} ihead[INOHSZ];
60
61struct inode *ifreeh, **ifreet, *bdevlisth;
62
63/*
64 * Initialize hash links for inodes
65 * and build inode free list.
66 */
67ihinit()
68{
69 register int i;
70 register struct inode *ip = inode;
71 register union ihead *ih = ihead;
72
73 for (i = INOHSZ; --i >= 0; ih++) {
74 ih->ih_head[0] = ih;
75 ih->ih_head[1] = ih;
76 }
77 ifreeh = ip;
78 ifreet = &ip->i_freef;
79 ip->i_freeb = &ifreeh;
80 ip->i_forw = ip;
81 ip->i_back = ip;
82 ITOV(ip)->v_data = (qaddr_t)ip;
83 for (i = ninode; --i > 0; ) {
84 ++ip;
85 ip->i_forw = ip;
86 ip->i_back = ip;
87 ITOV(ip)->v_data = (qaddr_t)ip;
88 *ifreet = ip;
89 ip->i_freeb = ifreet;
90 ifreet = &ip->i_freef;
91 }
92 ip->i_freef = NULL;
93}
94
95/*
96 * Look up an vnode/inode by device,inumber.
97 * If it is in core (in the inode structure),
98 * honor the locking protocol.
99 * If it is not in core, read it in from the
100 * specified device.
101 * Callers must check for mount points!!
102 * In all cases, a pointer to a locked
103 * inode structure is returned.
104 */
105iget(xp, ino, ipp)
106 struct inode *xp;
107 ino_t ino;
108 struct inode **ipp;
109{
110 dev_t dev = xp->i_dev;
111 struct mount *mntp = ITOV(xp)->v_mount;
112 register struct fs *fs = VFSTOUFS(mntp)->um_fs;
113 register struct inode *ip, *iq;
114 register struct vnode *vp;
115 struct inode *nip;
116 struct buf *bp;
117 struct dinode tdip, *dp;
118 union ihead *ih;
119 int error;
120
121loop:
122 ih = &ihead[INOHASH(dev, ino)];
123 for (ip = ih->ih_chain[0]; ip != (struct inode *)ih; ip = ip->i_forw)
124 if (ino == ip->i_number && dev == ip->i_dev) {
125 /*
126 * Following is essentially an inline expanded
127 * copy of igrab(), expanded inline for speed,
128 * and so that the test for a mounted on inode
129 * can be deferred until after we are sure that
130 * the inode isn't busy.
131 */
132 if ((ip->i_flag&ILOCKED) != 0) {
133 ip->i_flag |= IWANT;
134 sleep((caddr_t)ip, PINOD);
135 goto loop;
136 }
137 vp = ITOV(ip);
138 if (vp->v_count == 0) { /* ino on free list */
139 if (iq = ip->i_freef)
140 iq->i_freeb = ip->i_freeb;
141 else
142 ifreet = ip->i_freeb;
143 *ip->i_freeb = iq;
144 ip->i_freef = NULL;
145 ip->i_freeb = NULL;
146 }
147 ILOCK(ip);
148 vp->v_count++;
149 *ipp = ip;
150 return(0);
151 }
152 if (error = getnewino(dev, ino, &nip)) {
153 *ipp = 0;
154 return (error);
155 }
156 ip = nip;
157 /*
158 * Read in the disk contents for the inode.
159 */
160 if (error = bread(VFSTOUFS(mntp)->um_devvp, fsbtodb(fs, itod(fs, ino)),
161 (int)fs->fs_bsize, &bp)) {
162 /*
163 * The inode doesn't contain anything useful, so it would
164 * be misleading to leave it on its hash chain. Iput() will
165 * take care of putting it back on the free list. We also
166 * lose its inumber, just in case.
167 */
168 remque(ip);
169 ip->i_forw = ip;
170 ip->i_back = ip;
171 ip->i_number = 0;
172 INSFREE(ip);
173 iunlock(ip);
174 ip->i_flag = 0;
175 brelse(bp);
176 *ipp = 0;
177 return(error);
178 }
179 /*
180 * Check to see if the new inode represents a block device
181 * for which we already have an inode (either because of
182 * bdevvp() or because of a different inode representing
183 * the same block device). If such an alias exists, put the
184 * just allocated inode back on the free list, and replace
185 * the contents of the existing inode with the contents of
186 * the new inode.
187 */
188 dp = bp->b_un.b_dino;
189 dp += itoo(fs, ino);
190 if ((dp->di_mode & IFMT) != IFBLK) {
191 ip->i_ic = dp->di_ic;
192 brelse(bp);
193 } else {
194again:
195 for (iq = bdevlisth; iq; iq = iq->i_devlst) {
196 if (dp->di_rdev != ITOV(iq)->v_rdev)
197 continue;
198 igrab(iq);
199 if (dp->di_rdev != ITOV(iq)->v_rdev) {
200 iput(iq);
201 goto again;
202 }
203 /*
204 * Discard unneeded inode.
205 */
206 remque(ip);
207 ip->i_forw = ip;
208 ip->i_back = ip;
209 ip->i_number = 0;
210 INSFREE(ip);
211 iunlock(ip);
212 ip->i_flag = 0;
213 /*
214 * Reinitialize aliased inode.
215 * We must release the buffer that we just read
216 * before doing the iupdat() to avoid a possible
217 * deadlock with updating an inode in the same
218 * disk block.
219 */
220 ip = iq;
221 vp = ITOV(iq);
222 tdip.di_ic = dp->di_ic;
223 brelse(bp);
224 error = iupdat(ip, &time, &time, 1);
225 ip->i_ic = tdip.di_ic;
226 remque(ip);
227 insque(ip, ih);
228 ip->i_dev = dev;
229 ip->i_number = ino;
230 if (ip->i_devvp) {
231 vrele(ip->i_devvp);
232 ip->i_devvp = 0;
233 }
234 cache_purge(vp);
235 break;
236 }
237 if (iq == 0) {
238 ip->i_ic = dp->di_ic;
239 brelse(bp);
240 ip->i_devlst = bdevlisth;
241 bdevlisth = ip;
242 }
243 }
244 /*
245 * Finish inode initialization.
246 */
247 ip->i_fs = fs;
248 ip->i_devvp = VFSTOUFS(mntp)->um_devvp;
249 ip->i_devvp->v_count++;
250 /*
251 * Initialize the associated vnode
252 */
253 vp = ITOV(ip);
254 vinit(vp, mntp, IFTOVT(ip->i_mode), &ufs_vnodeops);
255 if (vp->v_type == VCHR || vp->v_type == VBLK) {
256 vp->v_rdev = ip->i_rdev;
257 vp->v_op = &blk_vnodeops;
258 }
259 if (ino == ROOTINO)
260 vp->v_flag |= VROOT;
261#ifdef QUOTA
262 if (ip->i_mode != 0)
263 ip->i_dquot = inoquota(ip);
264#endif
265 *ipp = ip;
266 return (0);
267}
268
269/*
270 * Allocate a new inode.
271 *
272 * Put it onto its hash chain and lock it so that other requests for
273 * this inode will block if they arrive while we are sleeping waiting
274 * for old data structures to be purged or for the contents of the disk
275 * portion of this inode to be read.
276 */
277getnewino(dev, ino, ipp)
278 dev_t dev;
279 ino_t ino;
280 struct inode **ipp;
281{
282 union ihead *ih;
283 register struct inode *ip, *iq;
284 register struct vnode *vp;
285
286 /*
287 * Remove the next inode from the free list.
288 */
289 if ((ip = ifreeh) == NULL) {
290 tablefull("inode");
291 *ipp = 0;
292 return(ENFILE);
293 }
294 vp = ITOV(ip);
295 if (vp->v_count)
296 panic("free inode isn't");
297 if (iq = ip->i_freef)
298 iq->i_freeb = &ifreeh;
299 ifreeh = iq;
300 ip->i_freef = NULL;
301 ip->i_freeb = NULL;
302 /*
303 * Now to take inode off the hash chain it was on
304 * (initially, or after an iflush, it is on a "hash chain"
305 * consisting entirely of itself, and pointed to by no-one)
306 * and put it on the chain for its new (ino, dev) pair.
307 */
308 remque(ip);
309 ip->i_dev = dev;
310 ip->i_number = ino;
311 if (dev != NODEV) {
312 ih = &ihead[INOHASH(dev, ino)];
313 insque(ip, ih);
314 }
315 ip->i_flag = 0;
316 ILOCK(ip);
317 ip->i_lastr = 0;
318#endif SECSIZE
319 /*
320 * Purge old data structures associated with the inode.
321 */
322 cache_purge(vp);
323 if (ip->i_devvp) {
324 vrele(ip->i_devvp);
325 ip->i_devvp = 0;
326 }
327#ifdef QUOTA
328 dqrele(ip->i_dquot);
329 ip->i_dquot = NODQUOT;
330#endif
331 if (vp->v_type == VBLK) {
332 if (bdevlisth == ip) {
333 bdevlisth = ip->i_devlst;
334 } else {
335 for (iq = bdevlisth; iq; iq = iq->i_devlst) {
336 if (iq->i_devlst != ip)
337 continue;
338 iq->i_devlst = ip->i_devlst;
339 break;
340 }
341 if (iq == NULL)
342 panic("missing bdev");
343 }
344 }
345 *ipp = ip;
346 return (0);
347}
348
349/*
350 * Convert a pointer to an inode into a reference to an inode.
351 *
352 * This is basically the internal piece of iget (after the
353 * inode pointer is located) but without the test for mounted
354 * filesystems. It is caller's responsibility to check that
355 * the inode pointer is valid.
356 */
357igrab(ip)
358 register struct inode *ip;
359{
360 register struct vnode *vp = ITOV(ip);
361
362 while ((ip->i_flag&ILOCKED) != 0) {
363 ip->i_flag |= IWANT;
364 sleep((caddr_t)ip, PINOD);
365 }
366 if (vp->v_count == 0) { /* ino on free list */
367 register struct inode *iq;
368
369 if (iq = ip->i_freef)
370 iq->i_freeb = ip->i_freeb;
371 else
372 ifreet = ip->i_freeb;
373 *ip->i_freeb = iq;
374 ip->i_freef = NULL;
375 ip->i_freeb = NULL;
376 }
377 vp->v_count++;
378 ILOCK(ip);
379}
380
381/*
382 * Create a vnode for a block device.
383 * Used for root filesystem, argdev, and swap areas.
384 */
385bdevvp(dev, vpp)
386 dev_t dev;
387 struct vnode **vpp;
388{
389 register struct inode *ip;
390 register struct vnode *vp;
391 struct inode *nip;
392 int error;
393
394 /*
395 * Check for the existence of an existing vnode.
396 */
397again:
398 for (ip = bdevlisth; ip; ip = ip->i_devlst) {
399 vp = ITOV(ip);
400 if (dev != vp->v_rdev)
401 continue;
402 igrab(ip);
403 if (dev != vp->v_rdev) {
404 iput(ip);
405 goto again;
406 }
407 IUNLOCK(ip);
408 *vpp = vp;
409 return (0);
410 }
411 if (error = getnewino(NODEV, (ino_t)0, &nip)) {
412 *vpp = 0;
413 return (error);
414 }
415 ip = nip;
416 ip->i_fs = 0;
417 ip->i_devlst = bdevlisth;
418 bdevlisth = ip;
419 vp = ITOV(ip);
420 vinit(vp, 0, VBLK, &blk_vnodeops);
421 vp->v_rdev = dev;
422 IUNLOCK(ip);
423 *vpp = vp;
424 return (0);
425}
426
427/*
428 * Decrement reference count of
429 * an inode structure.
430 * On the last reference,
431 * write the inode out and if necessary,
432 * truncate and deallocate the file.
433 */
434iput(ip)
435 register struct inode *ip;
436{
437
438 if ((ip->i_flag & ILOCKED) == 0)
439 panic("iput");
440 IUNLOCK(ip);
441 vrele(ITOV(ip));
442}
443
444
445ufs_inactive(vp)
446 struct vnode *vp;
447{
448 register struct inode *ip = VTOI(vp);
449 int mode, error;
450
451 if (ITOV(ip)->v_count != 0)
452 panic("ufs_inactive: not inactive");
453 ILOCK(ip);
454 if (ip->i_nlink <= 0 && (ITOV(ip)->v_mount->m_flag&M_RDONLY) == 0) {
455 error = itrunc(ip, (u_long)0);
456 mode = ip->i_mode;
457 ip->i_mode = 0;
458 ip->i_rdev = 0;
459 ip->i_flag |= IUPD|ICHG;
460 ifree(ip, ip->i_number, mode);
461#ifdef QUOTA
462 (void) chkiq(ip->i_dev, ip, ip->i_uid, 0);
463 dqrele(ip->i_dquot);
464 ip->i_dquot = NODQUOT;
465#endif
466 }
467 IUPDAT(ip, &time, &time, 0);
468 IUNLOCK(ip);
469 ip->i_flag = 0;
470 /*
471 * Put the inode on the end of the free list.
472 * Possibly in some cases it would be better to
473 * put the inode at the head of the free list,
474 * (eg: where i_mode == 0 || i_number == 0).
475 */
476 INSFREE(ip);
477 return (error);
478}
479
480/*
481 * Check accessed and update flags on
482 * an inode structure.
483 * If any is on, update the inode
484 * with the current time.
485 * If waitfor is given, then must insure
486 * i/o order so wait for write to complete.
487 */
488iupdat(ip, ta, tm, waitfor)
489 register struct inode *ip;
490 struct timeval *ta, *tm;
491 int waitfor;
492{
493 struct buf *bp;
494 struct vnode *vp = ITOV(ip);
495 struct dinode *dp;
496 register struct fs *fs;
497
498 fs = ip->i_fs;
499 if ((ip->i_flag & (IUPD|IACC|ICHG|IMOD)) == 0)
500 return (0);
501 if (vp->v_mount->m_flag & M_RDONLY)
502 return (0);
503 error = bread(ip->i_devvp, fsbtodb(fs, itod(fs, ip->i_number)),
504 (int)fs->fs_bsize, &bp);
505 if (error) {
506 brelse(bp);
507 return (error);
508 }
509 if (ip->i_flag&IACC)
510 ip->i_atime = ta->tv_sec;
511 if (ip->i_flag&IUPD)
512 ip->i_mtime = tm->tv_sec;
513 if (ip->i_flag&ICHG)
514 ip->i_ctime = time.tv_sec;
515 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
516 dp = bp->b_un.b_dino + itoo(fs, ip->i_number);
517 dp->di_ic = ip->i_ic;
518 if (waitfor) {
519 return (bwrite(bp));
520 } else {
521 bdwrite(bp);
522 return (0);
523 }
524}
525
526#define SINGLE 0 /* index of single indirect block */
527#define DOUBLE 1 /* index of double indirect block */
528#define TRIPLE 2 /* index of triple indirect block */
529/*
530 * Truncate the inode ip to at most
531 * length size. Free affected disk
532 * blocks -- the blocks of the file
533 * are removed in reverse order.
534 *
535 * NB: triple indirect blocks are untested.
536 */
537itrunc(oip, length)
538 register struct inode *oip;
539 u_long length;
540{
541 register daddr_t lastblock;
542 daddr_t bn, lbn, lastiblock[NIADDR];
543 register struct fs *fs;
544 register struct inode *ip;
545 struct buf *bp;
546 int offset, osize, size, level;
547 long count, nblocks, blocksreleased = 0;
548 register int i;
549 int error, allerror = 0;
550 struct inode tip;
551
552 if (oip->i_size <= length) {
553 oip->i_flag |= ICHG|IUPD;
554 error = iupdat(oip, &time, &time, 1);
555 return (error);
556 }
557 /*
558 * Calculate index into inode's block list of
559 * last direct and indirect blocks (if any)
560 * which we want to keep. Lastblock is -1 when
561 * the file is truncated to 0.
562 */
563 fs = oip->i_fs;
564 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
565 lastiblock[SINGLE] = lastblock - NDADDR;
566 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
567 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
568 nblocks = btodb(fs->fs_bsize);
569 /*
570 * Update the size of the file. If the file is not being
571 * truncated to a block boundry, the contents of the
572 * partial block following the end of the file must be
573 * zero'ed in case it ever become accessable again because
574 * of subsequent file growth.
575 */
576 osize = oip->i_size;
577 offset = blkoff(fs, length);
578 if (offset == 0) {
579 oip->i_size = length;
580 } else {
581 lbn = lblkno(fs, length);
582 error = balloc(oip, lbn, offset, &bn, B_CLRBUF);
583 if (error)
584 return (error);
585 if ((long)bn < 0)
586 panic("itrunc: hole");
587 oip->i_size = length;
588 size = blksize(fs, oip, lbn);
589 count = howmany(size, CLBYTES);
590 munhash(oip->i_devvp, bn + i * CLBYTES / DEV_BSIZE);
591 error = bread(oip->i_devvp, bn, size, &bp);
592 if (error) {
593 oip->i_size = osize;
594 brelse(bp);
595 return (error);
596 }
597 bzero(bp->b_un.b_addr + offset, (unsigned)(size - offset));
598 bdwrite(bp);
599 }
600 /*
601 * Update file and block pointers
602 * on disk before we start freeing blocks.
603 * If we crash before free'ing blocks below,
604 * the blocks will be returned to the free list.
605 * lastiblock values are also normalized to -1
606 * for calls to indirtrunc below.
607 */
608 tip = *oip;
609 tip.i_size = osize;
610 for (level = TRIPLE; level >= SINGLE; level--)
611 if (lastiblock[level] < 0) {
612 oip->i_ib[level] = 0;
613 lastiblock[level] = -1;
614 }
615 for (i = NDADDR - 1; i > lastblock; i--)
616 oip->i_db[i] = 0;
617 oip->i_flag |= ICHG|IUPD;
618 allerror = syncip(oip);
619
620 /*
621 * Indirect blocks first.
622 */
623 ip = &tip;
624 for (level = TRIPLE; level >= SINGLE; level--) {
625 bn = ip->i_ib[level];
626 if (bn != 0) {
627 error = indirtrunc(ip, bn, lastiblock[level], level,
628 &count);
629 if (error)
630 allerror = error;
631 blocksreleased += count;
632 if (lastiblock[level] < 0) {
633 ip->i_ib[level] = 0;
634 blkfree(ip, bn, (off_t)fs->fs_bsize);
635 blocksreleased += nblocks;
636 }
637 }
638 if (lastiblock[level] >= 0)
639 goto done;
640 }
641
642 /*
643 * All whole direct blocks or frags.
644 */
645 for (i = NDADDR - 1; i > lastblock; i--) {
646 register off_t bsize;
647
648 bn = ip->i_db[i];
649 if (bn == 0)
650 continue;
651 ip->i_db[i] = 0;
652 bsize = (off_t)blksize(fs, ip, i);
653 blkfree(ip, bn, bsize);
654 blocksreleased += btodb(bsize);
655 }
656 if (lastblock < 0)
657 goto done;
658
659 /*
660 * Finally, look for a change in size of the
661 * last direct block; release any frags.
662 */
663 bn = ip->i_db[lastblock];
664 if (bn != 0) {
665 off_t oldspace, newspace;
666
667 /*
668 * Calculate amount of space we're giving
669 * back as old block size minus new block size.
670 */
671 oldspace = blksize(fs, ip, lastblock);
672 ip->i_size = length;
673 newspace = blksize(fs, ip, lastblock);
674 if (newspace == 0)
675 panic("itrunc: newspace");
676 if (oldspace - newspace > 0) {
677 /*
678 * Block number of space to be free'd is
679 * the old block # plus the number of frags
680 * required for the storage we're keeping.
681 */
682 bn += numfrags(fs, newspace);
683 blkfree(ip, bn, oldspace - newspace);
684 blocksreleased += btodb(oldspace - newspace);
685 }
686 }
687done:
688/* BEGIN PARANOIA */
689 for (level = SINGLE; level <= TRIPLE; level++)
690 if (ip->i_ib[level] != oip->i_ib[level])
691 panic("itrunc1");
692 for (i = 0; i < NDADDR; i++)
693 if (ip->i_db[i] != oip->i_db[i])
694 panic("itrunc2");
695/* END PARANOIA */
696 oip->i_blocks -= blocksreleased;
697 if (oip->i_blocks < 0) /* sanity */
698 oip->i_blocks = 0;
699 oip->i_flag |= ICHG;
700#ifdef QUOTA
701 (void) chkdq(oip, -blocksreleased, 0);
702#endif
703 return (allerror);
704}
705
706/*
707 * Release blocks associated with the inode ip and
708 * stored in the indirect block bn. Blocks are free'd
709 * in LIFO order up to (but not including) lastbn. If
710 * level is greater than SINGLE, the block is an indirect
711 * block and recursive calls to indirtrunc must be used to
712 * cleanse other indirect blocks.
713 *
714 * NB: triple indirect blocks are untested.
715 */
716indirtrunc(ip, bn, lastbn, level, countp)
717 register struct inode *ip;
718 daddr_t bn, lastbn;
719 int level;
720 long *countp;
721{
722 register int i;
723 struct buf *bp;
724 register struct fs *fs = ip->i_fs;
725 register daddr_t *bap;
726 daddr_t *copy, nb, last;
727 long blkcount, factor;
728 int nblocks, blocksreleased = 0;
729 int error, allerror = 0;
730
731 /*
732 * Calculate index in current block of last
733 * block to be kept. -1 indicates the entire
734 * block so we need not calculate the index.
735 */
736 factor = 1;
737 for (i = SINGLE; i < level; i++)
738 factor *= NINDIR(fs);
739 last = lastbn;
740 if (lastbn > 0)
741 last /= factor;
742 nblocks = btodb(fs->fs_bsize);
743 /*
744 * Get buffer of block pointers, zero those
745 * entries corresponding to blocks to be free'd,
746 * and update on disk copy first.
747 */
748#ifdef SECSIZE
749 bp = bread(ip->i_dev, fsbtodb(fs, bn), (int)fs->fs_bsize,
750 fs->fs_dbsize);
751#else SECSIZE
752 error = bread(ip->i_devvp, fsbtodb(fs, bn), (int)fs->fs_bsize, &bp);
753 if (error) {
754 brelse(bp);
755 *countp = 0;
756 return (error);
757 }
758 bap = bp->b_un.b_daddr;
759 MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
760 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
761 bzero((caddr_t)&bap[last + 1],
762 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
763 error = bwrite(bp);
764 if (error)
765 allerror = error;
766 bap = copy;
767
768 /*
769 * Recursively free totally unused blocks.
770 */
771 for (i = NINDIR(fs) - 1; i > last; i--) {
772 nb = bap[i];
773 if (nb == 0)
774 continue;
775 if (level > SINGLE) {
776 error = indirtrunc(ip, nb, (daddr_t)-1, level - 1,
777 &blkcount);
778 if (error)
779 allerror = error;
780 blocksreleased += blkcount;
781 }
782 blkfree(ip, nb, (off_t)fs->fs_bsize);
783 blocksreleased += nblocks;
784 }
785
786 /*
787 * Recursively free last partial block.
788 */
789 if (level > SINGLE && lastbn >= 0) {
790 last = lastbn % factor;
791 nb = bap[i];
792 if (nb != 0) {
793 error = indirtrunc(ip, nb, last, level - 1, &blkcount);
794 if (error)
795 allerror = error;
796 blocksreleased += blkcount;
797 }
798 }
799 FREE(copy, M_TEMP);
800 *countp = blocksreleased;
801 return (allerror);
802}
803
804/*
805 * Remove any inodes in the inode cache belonging to dev.
806 *
807 * There should not be any active ones, return error if any are found
808 * (nb: this is a user error, not a system err).
809 */
810#ifdef QUOTA
811iflush(dev, iq)
812 dev_t dev;
813 struct inode *iq;
814#else
815iflush(dev)
816 dev_t dev;
817#endif
818{
819 register struct inode *ip;
820
821 for (ip = inode; ip < inodeNINODE; ip++) {
822#ifdef QUOTA
823 if (ip != iq && ip->i_dev == dev)
824#else
825 if (ip->i_dev == dev)
826#endif
827 if (ITOV(ip)->v_count)
828 return (EBUSY);
829 else {
830 remque(ip);
831 ip->i_forw = ip;
832 ip->i_back = ip;
833 /*
834 * as v_count == 0, the inode was on the free
835 * list already, just leave it there, it will
836 * fall off the bottom eventually. We could
837 * perhaps move it to the head of the free
838 * list, but as umounts are done so
839 * infrequently, we would gain very little,
840 * while making the code bigger.
841 */
842#ifdef QUOTA
843 dqrele(ip->i_dquot);
844 ip->i_dquot = NODQUOT;
845#endif
846 if (ip->i_devvp) {
847 vrele(ip->i_devvp);
848 ip->i_devvp = 0;
849 }
850 }
851 }
852 return (0);
853}
854
855/*
856 * Lock an inode. If its already locked, set the WANT bit and sleep.
857 */
858ilock(ip)
859 register struct inode *ip;
860{
861
862 while (ip->i_flag & ILOCKED) {
863 ip->i_flag |= IWANT;
864 (void) sleep((caddr_t)ip, PINOD);
865 }
866 ip->i_flag |= ILOCKED;
867}
868
869/*
870 * Unlock an inode. If WANT bit is on, wakeup.
871 */
872iunlock(ip)
873 register struct inode *ip;
874{
875
876 if ((ip->i_flag & ILOCKED) == 0)
877 printf("unlocking unlocked inode %d on dev 0x%x\n",
878 ip->i_number, ip->i_dev);
879 ip->i_flag &= ~ILOCKED;
880 if (ip->i_flag&IWANT) {
881 ip->i_flag &= ~IWANT;
882 wakeup((caddr_t)ip);
883 }
884}
885
886/*
887 * Check mode permission on inode pointer. Mode is READ, WRITE or EXEC.
888 * The mode is shifted to select the owner/group/other fields. The
889 * super user is granted all permissions.
890 *
891 * NB: Called from vnode op table. It seems this could all be done
892 * using vattr's but...
893 */
894iaccess(ip, mode, cred)
895 register struct inode *ip;
896 register int mode;
897 struct ucred *cred;
898{
899 register gid_t *gp;
900 register struct vnode *vp = ITOV(ip);
901 int i;
902
903 /*
904 * If you're the super-user,
905 * you always get access.
906 */
907 if (cred->cr_uid == 0)
908 return (0);
909 /*
910 * Access check is based on only one of owner, group, public.
911 * If not owner, then check group. If not a member of the
912 * group, then check public access.
913 */
914 if (cred->cr_uid != ip->i_uid) {
915 mode >>= 3;
916 gp = cred->cr_groups;
917 for (i = 0; i < cred->cr_ngroups; i++, gp++)
918 if (ip->i_gid == *gp)
919 goto found;
920 mode >>= 3;
921found:
922 ;
923 }
924 if ((ip->i_mode & mode) != 0)
925 return (0);
926 return (EACCES);
927}