add global flag to allow forcible unmounts to be attempted;
[unix-history] / usr / src / sys / ufs / lfs / lfs_vfsops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)lfs_vfsops.c 7.54 (Berkeley) %G%
8 */
9
10#include "param.h"
11#include "systm.h"
12#include "namei.h"
13#include "proc.h"
14#include "kernel.h"
15#include "vnode.h"
16#include "specdev.h"
17#include "mount.h"
18#include "buf.h"
19#include "file.h"
20#include "disklabel.h"
21#include "ioctl.h"
22#include "errno.h"
23#include "malloc.h"
24#include "ioctl.h"
25#include "disklabel.h"
26#include "stat.h"
27
28#include "quota.h"
29#include "fs.h"
30#include "ufsmount.h"
31#include "inode.h"
32
33struct vfsops ufs_vfsops = {
34 ufs_mount,
35 ufs_start,
36 ufs_unmount,
37 ufs_root,
38 ufs_quotactl,
39 ufs_statfs,
40 ufs_sync,
41 ufs_fhtovp,
42 ufs_vptofh,
43 ufs_init
44};
45
46/*
47 * Flag to allow forcible unmounting.
48 */
49int doforce = 1;
50
51/*
52 * Called by vfs_mountroot when ufs is going to be mounted as root.
53 *
54 * Name is updated by mount(8) after booting.
55 */
56#define ROOTNAME "root_device"
57
58ufs_mountroot()
59{
60 register struct mount *mp;
61 extern struct vnode *rootvp;
62 struct proc *p = curproc; /* XXX */
63 struct ufsmount *ump;
64 register struct fs *fs;
65 u_int size;
66 int error;
67
68 mp = (struct mount *)malloc((u_long)sizeof(struct mount),
69 M_MOUNT, M_WAITOK);
70 mp->mnt_op = &ufs_vfsops;
71 mp->mnt_flag = MNT_RDONLY;
72 mp->mnt_exroot = 0;
73 mp->mnt_mounth = NULLVP;
74 error = mountfs(rootvp, mp, p);
75 if (error) {
76 free((caddr_t)mp, M_MOUNT);
77 return (error);
78 }
79 if (error = vfs_lock(mp)) {
80 (void)ufs_unmount(mp, 0, p);
81 free((caddr_t)mp, M_MOUNT);
82 return (error);
83 }
84 rootfs = mp;
85 mp->mnt_next = mp;
86 mp->mnt_prev = mp;
87 mp->mnt_vnodecovered = NULLVP;
88 ump = VFSTOUFS(mp);
89 fs = ump->um_fs;
90 bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt));
91 fs->fs_fsmnt[0] = '/';
92 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
93 MNAMELEN);
94 (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
95 &size);
96 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
97 (void) ufs_statfs(mp, &mp->mnt_stat, p);
98 vfs_unlock(mp);
99 inittodr(fs->fs_time);
100 return (0);
101}
102
103/*
104 * VFS Operations.
105 *
106 * mount system call
107 */
108ufs_mount(mp, path, data, ndp, p)
109 register struct mount *mp;
110 char *path;
111 caddr_t data;
112 struct nameidata *ndp;
113 struct proc *p;
114{
115 struct vnode *devvp;
116 struct ufs_args args;
117 struct ufsmount *ump;
118 register struct fs *fs;
119 u_int size;
120 int error;
121
122 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
123 return (error);
124 /*
125 * Process export requests.
126 */
127 if ((args.exflags & MNT_EXPORTED) || (mp->mnt_flag & MNT_EXPORTED)) {
128 if (args.exflags & MNT_EXPORTED)
129 mp->mnt_flag |= MNT_EXPORTED;
130 else
131 mp->mnt_flag &= ~MNT_EXPORTED;
132 if (args.exflags & MNT_EXRDONLY)
133 mp->mnt_flag |= MNT_EXRDONLY;
134 else
135 mp->mnt_flag &= ~MNT_EXRDONLY;
136 mp->mnt_exroot = args.exroot;
137 }
138 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
139 if ((error = getmdev(&devvp, args.fspec, ndp, p)) != 0)
140 return (error);
141 error = mountfs(devvp, mp, p);
142 } else {
143 ump = VFSTOUFS(mp);
144 fs = ump->um_fs;
145 if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
146 fs->fs_ronly = 0;
147 /*
148 * Verify that the specified device is the one that
149 * is really being used for the root file system.
150 */
151 if (args.fspec == 0)
152 return (0);
153 if ((error = getmdev(&devvp, args.fspec, ndp, p)) != 0)
154 return (error);
155 if (devvp != ump->um_devvp)
156 error = EINVAL; /* needs translation */
157 else
158 vrele(devvp);
159 }
160 if (error) {
161 vrele(devvp);
162 return (error);
163 }
164 ump = VFSTOUFS(mp);
165 fs = ump->um_fs;
166 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
167 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
168 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
169 MNAMELEN);
170 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
171 &size);
172 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
173 (void) ufs_statfs(mp, &mp->mnt_stat, p);
174 return (0);
175}
176
177/*
178 * Common code for mount and mountroot
179 */
180mountfs(devvp, mp, p)
181 register struct vnode *devvp;
182 struct mount *mp;
183 struct proc *p;
184{
185 register struct ufsmount *ump = (struct ufsmount *)0;
186 struct buf *bp = NULL;
187 register struct fs *fs;
188 dev_t dev = devvp->v_rdev;
189 struct partinfo dpart;
190 int havepart = 0, blks;
191 caddr_t base, space;
192 int havepart = 0, blks;
193 int error, i, size;
194 int needclose = 0;
195 int ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
196 extern struct vnode *rootvp;
197
198 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p))
199 return (error);
200 needclose = 1;
201 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
202 size = DEV_BSIZE;
203 else {
204 havepart = 1;
205 size = dpart.disklab->d_secsize;
206 }
207 if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp))
208 goto out;
209 fs = bp->b_un.b_fs;
210 error = EINVAL; /* XXX needs translation */
211 goto out;
212 }
213 ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
214 ump->um_fs = (struct fs *)malloc((u_long)fs->fs_sbsize, M_SUPERBLK,
215 M_WAITOK);
216 bcopy((caddr_t)bp->b_un.b_addr, (caddr_t)ump->um_fs,
217 (u_int)fs->fs_sbsize);
218 if (fs->fs_sbsize < SBSIZE)
219 bp->b_flags |= B_INVAL;
220 brelse(bp);
221 bp = NULL;
222 fs = ump->um_fs;
223 fs->fs_ronly = ronly;
224 if (ronly == 0)
225 fs->fs_fmod = 1;
226 if (havepart) {
227 dpart.part->p_fstype = FS_BSDFFS;
228 dpart.part->p_fsize = fs->fs_fsize;
229 dpart.part->p_frag = fs->fs_frag;
230 dpart.part->p_cpg = fs->fs_cpg;
231 }
232#ifdef SECSIZE
233 /*
234 * If we have a disk label, force per-partition
235 * filesystem information to be correct
236 * and set correct current fsbtodb shift.
237 */
238#endif SECSIZE
239 if (havepart) {
240 dpart.part->p_fstype = FS_BSDFFS;
241 dpart.part->p_fsize = fs->fs_fsize;
242 dpart.part->p_frag = fs->fs_frag;
243#ifdef SECSIZE
244#ifdef tahoe
245 /*
246 * Save the original fsbtodb shift to restore on updates.
247 * (Console doesn't understand fsbtodb changes.)
248 */
249 fs->fs_sparecon[0] = fs->fs_fsbtodb;
250#endif
251 i = fs->fs_fsize / size;
252 for (fs->fs_fsbtodb = 0; i > 1; i >>= 1)
253 fs->fs_fsbtodb++;
254#endif SECSIZE
255 fs->fs_dbsize = size;
256 }
257 blks = howmany(fs->fs_cssize, fs->fs_fsize);
258 base = space = (caddr_t)malloc((u_long)fs->fs_cssize, M_SUPERBLK,
259 M_WAITOK);
260 for (i = 0; i < blks; i += fs->fs_frag) {
261 size = fs->fs_bsize;
262 if (i + fs->fs_frag > blks)
263 size = (blks - i) * fs->fs_fsize;
264#ifdef SECSIZE
265 tp = bread(dev, fsbtodb(fs, fs->fs_csaddr + i), size,
266 fs->fs_dbsize);
267#else SECSIZE
268 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
269 NOCRED, &bp);
270 if (error) {
271 free((caddr_t)base, M_SUPERBLK);
272 goto out;
273 }
274 bcopy((caddr_t)bp->b_un.b_addr, space, (u_int)size);
275 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
276 space += size;
277 brelse(bp);
278 bp = NULL;
279 }
280 mp->mnt_data = (qaddr_t)ump;
281 mp->mnt_stat.f_fsid.val[0] = (long)dev;
282 mp->mnt_stat.f_fsid.val[1] = MOUNT_UFS;
283 mp->mnt_flag |= MNT_LOCAL;
284 ump->um_mountp = mp;
285 ump->um_dev = dev;
286 ump->um_devvp = devvp;
287 for (i = 0; i < MAXQUOTAS; i++)
288 ump->um_quotas[i] = NULLVP;
289 devvp->v_specflags |= SI_MOUNTEDON;
290
291 /* Sanity checks for old file systems. XXX */
292 fs->fs_npsect = MAX(fs->fs_npsect, fs->fs_nsect); /* XXX */
293 fs->fs_interleave = MAX(fs->fs_interleave, 1); /* XXX */
294 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
295 fs->fs_nrpos = 8; /* XXX */
296
297 return (0);
298out:
299 if (bp)
300 brelse(bp);
301 if (needclose)
302 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
303 if (ump) {
304 free((caddr_t)ump->um_fs, M_SUPERBLK);
305 free((caddr_t)ump, M_UFSMNT);
306 mp->mnt_data = (qaddr_t)0;
307 }
308 return (error);
309}
310
311/*
312 * Make a filesystem operational.
313 * Nothing to do at the moment.
314 */
315/* ARGSUSED */
316ufs_start(mp, flags, p)
317 struct mount *mp;
318 int flags;
319 struct proc *p;
320{
321
322 return (0);
323}
324
325/*
326 * unmount system call
327 */
328ufs_unmount(mp, mntflags, p)
329 struct mount *mp;
330 int mntflags;
331 struct proc *p;
332{
333 register struct ufsmount *ump;
334 register struct fs *fs;
335 int i, error, ronly, flags = 0;
336
337 if (mntflags & MNT_FORCE) {
338 if (!doforce || mp == rootfs)
339 return (EINVAL);
340 flags |= FORCECLOSE;
341 }
342 mntflushbuf(mp, 0);
343 if (mntinvalbuf(mp))
344 return (EBUSY);
345 ump = VFSTOUFS(mp);
346 return (error);
347#ifdef QUOTA
348 if (mp->mnt_flag & MNT_QUOTA) {
349 if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags))
350 return (error);
351 for (i = 0; i < MAXQUOTAS; i++) {
352 if (ump->um_quotas[i] == NULLVP)
353 continue;
354 quotaoff(mp, i);
355 }
356 /*
357 * Here we fall through to vflush again to ensure
358 * that we have gotten rid of all the system vnodes.
359 */
360 }
361#endif
362 if (error = vflush(mp, NULLVP, flags))
363 return (error);
364 fs = ump->um_fs;
365 ronly = !fs->fs_ronly;
366 error = closei(dev, IFBLK, fs->fs_ronly? FREAD : FREAD|FWRITE);
367 irele(ip);
368 return (error);
369}
370
371/*
372 * Return root of a filesystem
373 */
374ufs_root(mp, vpp)
375 struct mount *mp;
376 struct vnode **vpp;
377{
378 register struct inode *ip;
379 struct inode *nip;
380 struct vnode tvp;
381 int error;
382
383 tvp.v_mount = mp;
384 ip = VTOI(&tvp);
385 ip->i_vnode = &tvp;
386 ip->i_dev = VFSTOUFS(mp)->um_dev;
387 error = iget(ip, (ino_t)ROOTINO, &nip);
388 if (error)
389 return (error);
390 *vpp = ITOV(nip);
391 return (0);
392}
393
394/*
395 * Do operations associated with quotas
396 */
397ufs_quotactl(mp, cmds, uid, arg, p)
398 struct mount *mp;
399 int cmds;
400 uid_t uid;
401 caddr_t arg;
402 struct proc *p;
403{
404 struct ufsmount *ump = VFSTOUFS(mp);
405 int cmd, type, error;
406
407#ifndef QUOTA
408 return (EOPNOTSUPP);
409#else
410 if (uid == -1)
411 uid = p->p_cred->p_ruid;
412 cmd = cmds >> SUBCMDSHIFT;
413
414 switch (cmd) {
415 case Q_GETQUOTA:
416 case Q_SYNC:
417 if (uid == p->p_cred->p_ruid)
418 break;
419 /* fall through */
420 default:
421 if (error = suser(p->p_ucred, &p->p_acflag))
422 return (error);
423 }
424
425 type = cmd & SUBCMDMASK;
426 if ((u_int)type >= MAXQUOTAS)
427 return (EINVAL);
428
429 switch (cmd) {
430
431 case Q_QUOTAON:
432 return (quotaon(p, mp, type, arg));
433
434 case Q_QUOTAOFF:
435 if (vfs_busy(mp))
436 return (0);
437 error = quotaoff(mp, type);
438 vfs_unbusy(mp);
439 return (error);
440
441 case Q_SETQUOTA:
442 return (setquota(mp, uid, type, arg));
443
444 case Q_SETUSE:
445 return (setuse(mp, uid, type, arg));
446
447 case Q_GETQUOTA:
448 return (getquota(mp, uid, type, arg));
449
450 case Q_SYNC:
451 if (vfs_busy(mp))
452 return (0);
453 error = qsync(mp);
454 vfs_unbusy(mp);
455 return (error);
456
457 default:
458 return (EINVAL);
459 }
460 /* NOTREACHED */
461#endif
462}
463
464/*
465 * Get file system statistics.
466 */
467ufs_statfs(mp, sbp, p)
468 struct mount *mp;
469 register struct statfs *sbp;
470 struct proc *p;
471{
472 register struct ufsmount *ump;
473 register struct fs *fs;
474
475 ump = VFSTOUFS(mp);
476 fs = ump->um_fs;
477 if (fs->fs_magic != FS_MAGIC)
478 panic("ufs_statfs");
479 sbp->f_type = MOUNT_UFS;
480 sbp->f_fsize = fs->fs_fsize;
481 sbp->f_bsize = fs->fs_bsize;
482 sbp->f_blocks = fs->fs_dsize;
483 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
484 fs->fs_cstotal.cs_nffree;
485 sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) -
486 (fs->fs_dsize - sbp->f_bfree);
487 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
488 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
489 if (sbp != &mp->mnt_stat) {
490 bcopy((caddr_t)mp->mnt_stat.f_mntonname,
491 (caddr_t)&sbp->f_mntonname[0], MNAMELEN);
492 bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
493 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
494 }
495 return (0);
496}
497
498int syncprt = 0;
499
500/*
501 * Go through the disk queues to initiate sandbagged IO;
502 * go through the inodes to write those that have been modified;
503 * initiate the writing of the super block if it has been modified.
504 *
505 * Note: we are always called with the filesystem marked `MPBUSY'.
506 */
507ufs_sync(mp, waitfor)
508 struct mount *mp;
509 int waitfor;
510{
511 register struct vnode *vp;
512 register struct inode *ip;
513 register struct ufsmount *ump = VFSTOUFS(mp);
514 register struct fs *fs;
515 int error, allerror = 0;
516
517 if (syncprt)
518 bufstats();
519 fs = ump->um_fs;
520 /*
521 * Write back modified superblock.
522 * Consistency check that the superblock
523 * is still in the buffer cache.
524 */
525 if (fs->fs_fmod != 0) {
526 if (fs->fs_ronly != 0) { /* XXX */
527 printf("fs = %s\n", fs->fs_fsmnt);
528 panic("update: rofs mod");
529 }
530 fs->fs_fmod = 0;
531 fs->fs_time = time.tv_sec;
532 allerror = sbupdate(ump, waitfor);
533 }
534 /*
535 * Write back each (modified) inode.
536 */
537loop:
538 for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
539 /*
540 * If the vnode that we are about to sync is no longer
541 * associated with this mount point, start over.
542 */
543 if (vp->v_mount != mp)
544 goto loop;
545 if (VOP_ISLOCKED(vp))
546 continue;
547 ip = VTOI(vp);
548 if ((ip->i_flag & (IMOD|IACC|IUPD|ICHG)) == 0 &&
549 vp->v_dirtyblkhd == NULL)
550 continue;
551 if (vget(vp))
552 goto loop;
553 if (vp->v_dirtyblkhd)
554 vflushbuf(vp, 0);
555 if ((ip->i_flag & (IMOD|IACC|IUPD|ICHG)) &&
556 (error = iupdat(ip, &time, &time, 0)))
557 allerror = error;
558 vput(vp);
559 }
560 /*
561 * Force stale file system control information to be flushed.
562 */
563 vflushbuf(ump->um_devvp, waitfor == MNT_WAIT ? B_SYNC : 0);
564#ifdef QUOTA
565 qsync(mp);
566#endif
567 return (allerror);
568}
569
570/*
571 * Write a superblock and associated information back to disk.
572 */
573sbupdate(mp, waitfor)
574 struct ufsmount *mp;
575 int waitfor;
576{
577 register struct fs *fs = mp->um_fs;
578 register struct buf *bp;
579 int blks;
580 caddr_t space;
581 int i, size, error = 0;
582
583#ifdef SECSIZE
584 bp = getblk(mp->m_dev, (daddr_t)fsbtodb(fs, SBOFF / fs->fs_fsize),
585 (int)fs->fs_sbsize, fs->fs_dbsize);
586#else SECSIZE
587 bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize);
588#endif SECSIZE
589 bcopy((caddr_t)fs, bp->b_un.b_addr, (u_int)fs->fs_sbsize);
590 /* Restore compatibility to old file systems. XXX */
591 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
592 bp->b_un.b_fs->fs_nrpos = -1; /* XXX */
593#ifdef SECSIZE
594#ifdef tahoe
595 /* restore standard fsbtodb shift */
596 bp->b_un.b_fs->fs_fsbtodb = fs->fs_sparecon[0];
597 bp->b_un.b_fs->fs_sparecon[0] = 0;
598#endif
599#endif SECSIZE
600 if (waitfor == MNT_WAIT)
601 error = bwrite(bp);
602 else
603 bawrite(bp);
604 blks = howmany(fs->fs_cssize, fs->fs_fsize);
605 space = (caddr_t)fs->fs_csp[0];
606 for (i = 0; i < blks; i += fs->fs_frag) {
607 size = fs->fs_bsize;
608 if (i + fs->fs_frag > blks)
609 size = (blks - i) * fs->fs_fsize;
610#ifdef SECSIZE
611 bp = getblk(mp->m_dev, fsbtodb(fs, fs->fs_csaddr + i), size,
612 fs->fs_dbsize);
613#else SECSIZE
614 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), size);
615#endif SECSIZE
616 bcopy(space, bp->b_un.b_addr, (u_int)size);
617 space += size;
618 if (waitfor == MNT_WAIT)
619 error = bwrite(bp);
620 else
621 bawrite(bp);
622 }
623 return (error);
624}
625
626/*
627 * Print out statistics on the current allocation of the buffer pool.
628 * Can be enabled to print out on every ``sync'' by setting "syncprt"
629 * above.
630 */
631bufstats()
632{
633 int s, i, j, count;
634 register struct buf *bp, *dp;
635 int counts[MAXBSIZE/CLBYTES+1];
636 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
637
638 for (bp = bfreelist, i = 0; bp < &bfreelist[BQUEUES]; bp++, i++) {
639 count = 0;
640 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
641 counts[j] = 0;
642 s = splbio();
643 for (dp = bp->av_forw; dp != bp; dp = dp->av_forw) {
644 counts[dp->b_bufsize/CLBYTES]++;
645 count++;
646 }
647 splx(s);
648 printf("%s: total-%d", bname[i], count);
649 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
650 if (counts[j] != 0)
651 printf(", %d-%d", j * CLBYTES, counts[j]);
652 printf("\n");
653 }
654}
655
656/*
657 * File handle to vnode
658 *
659 * Have to be really careful about stale file handles:
660 * - check that the inode number is in range
661 * - call iget() to get the locked inode
662 * - check for an unallocated inode (i_mode == 0)
663 * - check that the generation number matches
664 */
665ufs_fhtovp(mp, fhp, vpp)
666 register struct mount *mp;
667 struct fid *fhp;
668 struct vnode **vpp;
669{
670 register struct ufid *ufhp;
671 register struct fs *fs;
672 register struct inode *ip;
673 struct inode *nip;
674 struct vnode tvp;
675 int error;
676
677 ufhp = (struct ufid *)fhp;
678 fs = VFSTOUFS(mp)->um_fs;
679 if (ufhp->ufid_ino < ROOTINO ||
680 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) {
681 *vpp = NULLVP;
682 return (EINVAL);
683 }
684 tvp.v_mount = mp;
685 ip = VTOI(&tvp);
686 ip->i_vnode = &tvp;
687 ip->i_dev = VFSTOUFS(mp)->um_dev;
688 if (error = iget(ip, ufhp->ufid_ino, &nip)) {
689 *vpp = NULLVP;
690 return (error);
691 }
692 ip = nip;
693 if (ip->i_mode == 0) {
694 iput(ip);
695 *vpp = NULLVP;
696 return (EINVAL);
697 }
698 if (ip->i_gen != ufhp->ufid_gen) {
699 iput(ip);
700 *vpp = NULLVP;
701 return (EINVAL);
702 }
703 *vpp = ITOV(ip);
704 return (0);
705}
706
707/*
708 * Vnode pointer to File handle
709 */
710/* ARGSUSED */
711ufs_vptofh(vp, fhp)
712 struct vnode *vp;
713 struct fid *fhp;
714{
715 register struct inode *ip = VTOI(vp);
716 register struct ufid *ufhp;
717
718 ufhp = (struct ufid *)fhp;
719 ufhp->ufid_len = sizeof(struct ufid);
720 ufhp->ufid_ino = ip->i_number;
721 ufhp->ufid_gen = ip->i_gen;
722 return (0);
723}
724
725/*
726 * Check that the user's argument is a reasonable
727 * thing on which to mount, and return the device number if so.
728 */
729getmdev(devvpp, fname, ndp, p)
730 struct vnode **devvpp;
731 caddr_t fname;
732 register struct nameidata *ndp;
733 struct proc *p;
734{
735 register struct vnode *vp;
736 int error;
737
738 ndp->ni_nameiop = LOOKUP | FOLLOW;
739 ndp->ni_segflg = UIO_USERSPACE;
740 ndp->ni_dirp = fname;
741 if (error = namei(ndp, p))
742 return (error);
743 vp = ndp->ni_vp;
744 if (vp->v_type != VBLK) {
745 vrele(vp);
746 return (ENOTBLK);
747 }
748 if (major(vp->v_rdev) >= nblkdev) {
749 vrele(vp);
750 return (ENXIO);
751 }
752 *devvpp = vp;
753 return (0);
754}