Replace #ifdef DIANOSTIC with #ifdef DIAGNOSTIC, this typo has been around
[unix-history] / sys / ufs / ufs_inode.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
1eb58e01 33 * from: @(#)ufs_inode.c 7.40 (Berkeley) 5/8/91
4630f671 34 * $Id: ufs_inode.c,v 1.4 1993/11/07 17:53:44 wollman Exp $
15637ed4
RG
35 */
36
37#include "param.h"
38#include "systm.h"
39#include "mount.h"
40#include "proc.h"
41#include "file.h"
42#include "buf.h"
43#include "vnode.h"
44#include "kernel.h"
45#include "malloc.h"
46
47#include "quota.h"
48#include "inode.h"
49#include "fs.h"
50#include "ufsmount.h"
51
52#define INOHSZ 512
53#if ((INOHSZ&(INOHSZ-1)) == 0)
54#define INOHASH(dev,ino) (((dev)+(ino))&(INOHSZ-1))
55#else
56#define INOHASH(dev,ino) (((unsigned)((dev)+(ino)))%INOHSZ)
57#endif
58
bbc3f849
GW
59u_long nextgennumber; /* next generation number to assign */
60
15637ed4
RG
61union ihead {
62 union ihead *ih_head[2];
63 struct inode *ih_chain[2];
64} ihead[INOHSZ];
65
66int prtactive; /* 1 => print out reclaim of active vnodes */
67
68/*
69 * Initialize hash links for inodes.
70 */
71ufs_init()
72{
73 register int i;
74 register union ihead *ih = ihead;
75
76#ifndef lint
77 if (VN_MAXPRIVATE < sizeof(struct inode))
78 panic("ihinit: too small");
79#endif /* not lint */
80 for (i = INOHSZ; --i >= 0; ih++) {
81 ih->ih_head[0] = ih;
82 ih->ih_head[1] = ih;
83 }
84#ifdef QUOTA
85 dqinit();
86#endif /* QUOTA */
87}
88
89/*
90 * Look up a UFS dinode number to find its incore vnode.
91 * If it is not in core, read it in from the specified device.
92 * If it is in core, wait for the lock bit to clear, then
93 * return the inode locked. Detection and handling of mount
94 * points must be done by the calling routine.
95 */
96iget(xp, ino, ipp)
97 struct inode *xp;
98 ino_t ino;
99 struct inode **ipp;
100{
101 dev_t dev = xp->i_dev;
102 struct mount *mntp = ITOV(xp)->v_mount;
103 register struct fs *fs = VFSTOUFS(mntp)->um_fs;
104 extern struct vnodeops ufs_vnodeops, spec_inodeops;
105 register struct inode *ip, *iq;
106 register struct vnode *vp;
107 struct vnode *nvp;
108 struct buf *bp;
109 struct dinode *dp;
110 union ihead *ih;
111 int i, error;
112
113 ih = &ihead[INOHASH(dev, ino)];
114loop:
115 for (ip = ih->ih_chain[0]; ip != (struct inode *)ih; ip = ip->i_forw) {
116 if (ino != ip->i_number || dev != ip->i_dev)
117 continue;
118 if ((ip->i_flag&ILOCKED) != 0) {
119 ip->i_flag |= IWANT;
4630f671 120 tsleep((caddr_t)ip, PINOD, "iget", 0);
15637ed4
RG
121 goto loop;
122 }
123 if (vget(ITOV(ip)))
124 goto loop;
125 *ipp = ip;
126 return(0);
127 }
128 /*
129 * Allocate a new inode.
130 */
131 if (error = getnewvnode(VT_UFS, mntp, &ufs_vnodeops, &nvp)) {
132 *ipp = 0;
133 return (error);
134 }
135 ip = VTOI(nvp);
136 ip->i_vnode = nvp;
137 ip->i_flag = 0;
138 ip->i_devvp = 0;
139 ip->i_mode = 0;
140 ip->i_diroff = 0;
141 ip->i_lockf = 0;
142#ifdef QUOTA
143 for (i = 0; i < MAXQUOTAS; i++)
144 ip->i_dquot[i] = NODQUOT;
145#endif
ef7ff598
JH
146 for (i=0; i < DI_SPARE_SZ; i++)
147 ip->i_di_spare[i] = (unsigned long)0L;
15637ed4
RG
148 /*
149 * Put it onto its hash chain and lock it so that other requests for
150 * this inode will block if they arrive while we are sleeping waiting
151 * for old data structures to be purged or for the contents of the
152 * disk portion of this inode to be read.
153 */
154 ip->i_dev = dev;
155 ip->i_number = ino;
156 insque(ip, ih);
157 ILOCK(ip);
158 /*
159 * Read in the disk contents for the inode.
160 */
161 if (error = bread(VFSTOUFS(mntp)->um_devvp, fsbtodb(fs, itod(fs, ino)),
162 (int)fs->fs_bsize, NOCRED, &bp)) {
163 /*
164 * The inode does not contain anything useful, so it would
165 * be misleading to leave it on its hash chain.
166 * Iput() will take care of putting it back on the free list.
167 */
168 remque(ip);
169 ip->i_forw = ip;
170 ip->i_back = ip;
171 /*
172 * Unlock and discard unneeded inode.
173 */
174 iput(ip);
175 brelse(bp);
176 *ipp = 0;
177 return (error);
178 }
179 dp = bp->b_un.b_dino;
180 dp += itoo(fs, ino);
181 ip->i_din = *dp;
182 brelse(bp);
183 /*
184 * Initialize the associated vnode
185 */
186 vp = ITOV(ip);
187 vp->v_type = IFTOVT(ip->i_mode);
188 if (vp->v_type == VFIFO) {
189#ifdef FIFO
190 extern struct vnodeops fifo_inodeops;
191 vp->v_op = &fifo_inodeops;
192#else
193 iput(ip);
194 *ipp = 0;
195 return (EOPNOTSUPP);
196#endif /* FIFO */
197 }
198 if (vp->v_type == VCHR || vp->v_type == VBLK) {
199 vp->v_op = &spec_inodeops;
200 if (nvp = checkalias(vp, ip->i_rdev, mntp)) {
201 /*
202 * Reinitialize aliased inode.
203 */
204 vp = nvp;
205 iq = VTOI(vp);
206 iq->i_vnode = vp;
207 iq->i_flag = 0;
208 ILOCK(iq);
209 iq->i_din = ip->i_din;
210 iq->i_dev = dev;
211 iq->i_number = ino;
212 insque(iq, ih);
213 /*
214 * Discard unneeded vnode
215 */
216 ip->i_mode = 0;
217 iput(ip);
218 ip = iq;
219 }
220 }
221 if (ino == ROOTINO)
222 vp->v_flag |= VROOT;
223 /*
224 * Finish inode initialization.
225 */
226 ip->i_fs = fs;
227 ip->i_devvp = VFSTOUFS(mntp)->um_devvp;
228 VREF(ip->i_devvp);
229 /*
230 * Set up a generation number for this inode if it does not
231 * already have one. This should only happen on old filesystems.
232 */
233 if (ip->i_gen == 0) {
234 if (++nextgennumber < (u_long)time.tv_sec)
235 nextgennumber = time.tv_sec;
236 ip->i_gen = nextgennumber;
237 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
238 ip->i_flag |= IMOD;
239 }
240 *ipp = ip;
241 return (0);
242}
243
244/*
245 * Unlock and decrement the reference count of an inode structure.
246 */
247iput(ip)
248 register struct inode *ip;
249{
250
251 if ((ip->i_flag & ILOCKED) == 0)
252 panic("iput");
253 IUNLOCK(ip);
254 vrele(ITOV(ip));
255}
256
257/*
258 * Last reference to an inode, write the inode out and if necessary,
259 * truncate and deallocate the file.
260 */
261ufs_inactive(vp, p)
262 struct vnode *vp;
263 struct proc *p;
264{
265 register struct inode *ip = VTOI(vp);
266 int mode, error = 0;
267
268 if (prtactive && vp->v_usecount != 0)
269 vprint("ufs_inactive: pushing active", vp);
270 /*
271 * Get rid of inodes related to stale file handles.
272 */
273 if (ip->i_mode == 0) {
274 if ((vp->v_flag & VXLOCK) == 0)
275 vgone(vp);
276 return (0);
277 }
278 ILOCK(ip);
279 if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
280#ifdef QUOTA
281 if (!getinoquota(ip))
282 (void) chkiq(ip, -1, NOCRED, 0);
283#endif
284 error = itrunc(ip, (u_long)0, 0);
285 mode = ip->i_mode;
286 ip->i_mode = 0;
287 ip->i_rdev = 0;
288 ip->i_flag |= IUPD|ICHG;
289 ifree(ip, ip->i_number, mode);
290 }
291 IUPDAT(ip, &time, &time, 0);
292 IUNLOCK(ip);
293 ip->i_flag = 0;
294 /*
295 * If we are done with the inode, reclaim it
296 * so that it can be reused immediately.
297 */
298 if (vp->v_usecount == 0 && ip->i_mode == 0)
299 vgone(vp);
300 return (error);
301}
302
303/*
304 * Reclaim an inode so that it can be used for other purposes.
305 */
306ufs_reclaim(vp)
307 register struct vnode *vp;
308{
309 register struct inode *ip = VTOI(vp);
310 int i;
311
312 if (prtactive && vp->v_usecount != 0)
313 vprint("ufs_reclaim: pushing active", vp);
314 /*
315 * Remove the inode from its hash chain.
316 */
317 remque(ip);
318 ip->i_forw = ip;
319 ip->i_back = ip;
320 /*
321 * Purge old data structures associated with the inode.
322 */
323 cache_purge(vp);
324 if (ip->i_devvp) {
325 vrele(ip->i_devvp);
326 ip->i_devvp = 0;
327 }
328#ifdef QUOTA
329 for (i = 0; i < MAXQUOTAS; i++) {
330 if (ip->i_dquot[i] != NODQUOT) {
331 dqrele(vp, ip->i_dquot[i]);
332 ip->i_dquot[i] = NODQUOT;
333 }
334 }
335#endif
336 ip->i_flag = 0;
337 return (0);
338}
339
340/*
341 * Update the access, modified, and inode change times as specified
342 * by the IACC, IMOD, and ICHG flags respectively. The IUPD flag
343 * is used to specify that the inode needs to be updated but that
344 * the times have already been set. The access and modified times
345 * are taken from the second and third parameters; the inode change
346 * time is always taken from the current time. If waitfor is set,
347 * then wait for the disk write of the inode to complete.
348 */
349iupdat(ip, ta, tm, waitfor)
350 register struct inode *ip;
351 struct timeval *ta, *tm;
352 int waitfor;
353{
354 struct buf *bp;
355 struct vnode *vp = ITOV(ip);
356 struct dinode *dp;
357 register struct fs *fs;
358 int error;
359
360 fs = ip->i_fs;
361 if ((ip->i_flag & (IUPD|IACC|ICHG|IMOD)) == 0)
362 return (0);
363 if (vp->v_mount->mnt_flag & MNT_RDONLY)
364 return (0);
365 error = bread(ip->i_devvp, fsbtodb(fs, itod(fs, ip->i_number)),
366 (int)fs->fs_bsize, NOCRED, &bp);
367 if (error) {
368 brelse(bp);
369 return (error);
370 }
371 if (ip->i_flag&IACC)
372 ip->i_atime = ta->tv_sec;
373 if (ip->i_flag&IUPD)
374 ip->i_mtime = tm->tv_sec;
375 if (ip->i_flag&ICHG)
376 ip->i_ctime = time.tv_sec;
377 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
378 dp = bp->b_un.b_dino + itoo(fs, ip->i_number);
379 *dp = ip->i_din;
380 if (waitfor) {
381 return (bwrite(bp));
382 } else {
383 bdwrite(bp);
384 return (0);
385 }
386}
387
388#define SINGLE 0 /* index of single indirect block */
389#define DOUBLE 1 /* index of double indirect block */
390#define TRIPLE 2 /* index of triple indirect block */
391/*
392 * Truncate the inode ip to at most length size. Free affected disk
393 * blocks -- the blocks of the file are removed in reverse order.
394 *
395 * NB: triple indirect blocks are untested.
396 */
397itrunc(oip, length, flags)
398 register struct inode *oip;
399 u_long length;
400 int flags;
401{
402 register daddr_t lastblock;
403 daddr_t bn, lbn, lastiblock[NIADDR];
404 register struct fs *fs;
405 register struct inode *ip;
406 struct buf *bp;
407 int offset, osize, size, level;
408 long count, nblocks, blocksreleased = 0;
409 register int i;
410 int aflags, error, allerror;
411 struct inode tip;
412
413 vnode_pager_setsize(ITOV(oip), length);
ef7ff598
JH
414 if (FASTLINK(oip)) {
415 if (length != 0)
416 panic("itrunc fastlink to non-zero");
417 bzero(oip->i_symlink, MAXFASTLINK);
418 oip->i_size = 0;
419 oip->i_din.di_spare[0] = 0;
420 }
15637ed4
RG
421 if (oip->i_size <= length) {
422 oip->i_flag |= ICHG|IUPD;
423 error = iupdat(oip, &time, &time, 1);
424 return (error);
425 }
426 /*
427 * Calculate index into inode's block list of
428 * last direct and indirect blocks (if any)
429 * which we want to keep. Lastblock is -1 when
430 * the file is truncated to 0.
431 */
432 fs = oip->i_fs;
433 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
434 lastiblock[SINGLE] = lastblock - NDADDR;
435 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
436 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
437 nblocks = btodb(fs->fs_bsize);
438 /*
439 * Update the size of the file. If the file is not being
440 * truncated to a block boundry, the contents of the
441 * partial block following the end of the file must be
442 * zero'ed in case it ever become accessable again because
443 * of subsequent file growth.
444 */
445 osize = oip->i_size;
446 offset = blkoff(fs, length);
447 if (offset == 0) {
448 oip->i_size = length;
449 } else {
450 lbn = lblkno(fs, length);
451 aflags = B_CLRBUF;
452 if (flags & IO_SYNC)
453 aflags |= B_SYNC;
454#ifdef QUOTA
455 if (error = getinoquota(oip))
456 return (error);
457#endif
458 if (error = balloc(oip, lbn, offset, &bp, aflags))
459 return (error);
460 oip->i_size = length;
461 size = blksize(fs, oip, lbn);
462 (void) vnode_pager_uncache(ITOV(oip));
463 bzero(bp->b_un.b_addr + offset, (unsigned)(size - offset));
464 allocbuf(bp, size);
465 if (flags & IO_SYNC)
466 bwrite(bp);
467 else
468 bdwrite(bp);
469 }
470 /*
471 * Update file and block pointers
472 * on disk before we start freeing blocks.
473 * If we crash before free'ing blocks below,
474 * the blocks will be returned to the free list.
475 * lastiblock values are also normalized to -1
476 * for calls to indirtrunc below.
477 */
478 tip = *oip;
479 tip.i_size = osize;
480 for (level = TRIPLE; level >= SINGLE; level--)
481 if (lastiblock[level] < 0) {
482 oip->i_ib[level] = 0;
483 lastiblock[level] = -1;
484 }
485 for (i = NDADDR - 1; i > lastblock; i--)
486 oip->i_db[i] = 0;
487 oip->i_flag |= ICHG|IUPD;
488 vinvalbuf(ITOV(oip), (length > 0));
489 allerror = iupdat(oip, &time, &time, MNT_WAIT);
490
491 /*
492 * Indirect blocks first.
493 */
494 ip = &tip;
495 for (level = TRIPLE; level >= SINGLE; level--) {
496 bn = ip->i_ib[level];
497 if (bn != 0) {
498 error = indirtrunc(ip, bn, lastiblock[level], level,
499 &count);
500 if (error)
501 allerror = error;
502 blocksreleased += count;
503 if (lastiblock[level] < 0) {
504 ip->i_ib[level] = 0;
505 blkfree(ip, bn, (off_t)fs->fs_bsize);
506 blocksreleased += nblocks;
507 }
508 }
509 if (lastiblock[level] >= 0)
510 goto done;
511 }
512
513 /*
514 * All whole direct blocks or frags.
515 */
516 for (i = NDADDR - 1; i > lastblock; i--) {
517 register off_t bsize;
518
519 bn = ip->i_db[i];
520 if (bn == 0)
521 continue;
522 ip->i_db[i] = 0;
523 bsize = (off_t)blksize(fs, ip, i);
524 blkfree(ip, bn, bsize);
525 blocksreleased += btodb(bsize);
526 }
527 if (lastblock < 0)
528 goto done;
529
530 /*
531 * Finally, look for a change in size of the
532 * last direct block; release any frags.
533 */
534 bn = ip->i_db[lastblock];
535 if (bn != 0) {
536 off_t oldspace, newspace;
537
538 /*
539 * Calculate amount of space we're giving
540 * back as old block size minus new block size.
541 */
542 oldspace = blksize(fs, ip, lastblock);
543 ip->i_size = length;
544 newspace = blksize(fs, ip, lastblock);
545 if (newspace == 0)
546 panic("itrunc: newspace");
547 if (oldspace - newspace > 0) {
548 /*
549 * Block number of space to be free'd is
550 * the old block # plus the number of frags
551 * required for the storage we're keeping.
552 */
553 bn += numfrags(fs, newspace);
554 blkfree(ip, bn, oldspace - newspace);
555 blocksreleased += btodb(oldspace - newspace);
556 }
557 }
558done:
559/* BEGIN PARANOIA */
560 for (level = SINGLE; level <= TRIPLE; level++)
561 if (ip->i_ib[level] != oip->i_ib[level])
562 panic("itrunc1");
563 for (i = 0; i < NDADDR; i++)
564 if (ip->i_db[i] != oip->i_db[i])
565 panic("itrunc2");
566/* END PARANOIA */
567 oip->i_blocks -= blocksreleased;
568 if (oip->i_blocks < 0) /* sanity */
569 oip->i_blocks = 0;
570 oip->i_flag |= ICHG;
571#ifdef QUOTA
572 if (!getinoquota(oip))
573 (void) chkdq(oip, -blocksreleased, NOCRED, 0);
574#endif
575 return (allerror);
576}
577
578/*
579 * Release blocks associated with the inode ip and
580 * stored in the indirect block bn. Blocks are free'd
581 * in LIFO order up to (but not including) lastbn. If
582 * level is greater than SINGLE, the block is an indirect
583 * block and recursive calls to indirtrunc must be used to
584 * cleanse other indirect blocks.
585 *
586 * NB: triple indirect blocks are untested.
587 */
588indirtrunc(ip, bn, lastbn, level, countp)
589 register struct inode *ip;
590 daddr_t bn, lastbn;
591 int level;
592 long *countp;
593{
594 register int i;
595 struct buf *bp;
596 register struct fs *fs = ip->i_fs;
597 register daddr_t *bap;
598 daddr_t *copy, nb, last;
599 long blkcount, factor;
600 int nblocks, blocksreleased = 0;
601 int error, allerror = 0;
602
603 /*
604 * Calculate index in current block of last
605 * block to be kept. -1 indicates the entire
606 * block so we need not calculate the index.
607 */
608 factor = 1;
609 for (i = SINGLE; i < level; i++)
610 factor *= NINDIR(fs);
611 last = lastbn;
612 if (lastbn > 0)
613 last /= factor;
614 nblocks = btodb(fs->fs_bsize);
615 /*
616 * Get buffer of block pointers, zero those
617 * entries corresponding to blocks to be free'd,
618 * and update on disk copy first.
619 */
620 error = bread(ip->i_devvp, fsbtodb(fs, bn), (int)fs->fs_bsize,
621 NOCRED, &bp);
622 if (error) {
623 brelse(bp);
624 *countp = 0;
625 return (error);
626 }
627 bap = bp->b_un.b_daddr;
628 MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
629 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
630 bzero((caddr_t)&bap[last + 1],
631 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
632 if (last == -1)
633 bp->b_flags |= B_INVAL;
634 error = bwrite(bp);
635 if (error)
636 allerror = error;
637 bap = copy;
638
639 /*
640 * Recursively free totally unused blocks.
641 */
642 for (i = NINDIR(fs) - 1; i > last; i--) {
643 nb = bap[i];
644 if (nb == 0)
645 continue;
646 if (level > SINGLE) {
647 error = indirtrunc(ip, nb, (daddr_t)-1, level - 1,
648 &blkcount);
649 if (error)
650 allerror = error;
651 blocksreleased += blkcount;
652 }
653 blkfree(ip, nb, (off_t)fs->fs_bsize);
654 blocksreleased += nblocks;
655 }
656
657 /*
658 * Recursively free last partial block.
659 */
660 if (level > SINGLE && lastbn >= 0) {
661 last = lastbn % factor;
662 nb = bap[i];
663 if (nb != 0) {
664 error = indirtrunc(ip, nb, last, level - 1, &blkcount);
665 if (error)
666 allerror = error;
667 blocksreleased += blkcount;
668 }
669 }
670 FREE(copy, M_TEMP);
671 *countp = blocksreleased;
672 return (allerror);
673}
674
675/*
676 * Lock an inode. If its already locked, set the WANT bit and sleep.
677 */
678ilock(ip)
679 register struct inode *ip;
680{
681
682 while (ip->i_flag & ILOCKED) {
683 ip->i_flag |= IWANT;
684 if (ip->i_spare0 == curproc->p_pid)
685 panic("locking against myself");
686 ip->i_spare1 = curproc->p_pid;
4630f671 687 (void) tsleep((caddr_t)ip, PINOD, "ilock", 0);
15637ed4
RG
688 }
689 ip->i_spare1 = 0;
690 ip->i_spare0 = curproc->p_pid;
691 ip->i_flag |= ILOCKED;
692}
693
694/*
695 * Unlock an inode. If WANT bit is on, wakeup.
696 */
697iunlock(ip)
698 register struct inode *ip;
699{
700
701 if ((ip->i_flag & ILOCKED) == 0)
702 vprint("iunlock: unlocked inode", ITOV(ip));
703 ip->i_spare0 = 0;
704 ip->i_flag &= ~ILOCKED;
705 if (ip->i_flag&IWANT) {
706 ip->i_flag &= ~IWANT;
707 wakeup((caddr_t)ip);
708 }
709}