reorganization to move ufsmount ops to be vnode ops;
[unix-history] / usr / src / sys / miscfs / specfs / spec_vnops.c
CommitLineData
a1d35437
KM
1/*
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
dbf0c423 5 * %sccs.include.redist.c%
a1d35437 6 *
17c64659 7 * @(#)spec_vnops.c 7.39 (Berkeley) %G%
a1d35437
KM
8 */
9
17c64659
KB
10#include <sys/param.h>
11#include <sys/proc.h>
12#include <sys/systm.h>
13#include <sys/kernel.h>
14#include <sys/conf.h>
15#include <sys/buf.h>
16#include <sys/mount.h>
17#include <sys/namei.h>
18#include <sys/vnode.h>
19#include <sys/specdev.h>
20#include <sys/stat.h>
21#include <sys/errno.h>
22#include <sys/ioctl.h>
23#include <sys/file.h>
24#include <sys/disklabel.h>
a1d35437 25
ccee3c59
MK
26/* symbolic sleep message strings for devices */
27char devopn[] = "devopn";
28char devio[] = "devio";
29char devwait[] = "devwait";
30char devin[] = "devin";
31char devout[] = "devout";
32char devioc[] = "devioc";
33char devcls[] = "devcls";
34
ad27f720 35struct vnodeops spec_vnodeops = {
7bbe72a5 36 spec_lookup, /* lookup */
68a834b0
KM
37 spec_create, /* create */
38 spec_mknod, /* mknod */
7bbe72a5
KM
39 spec_open, /* open */
40 spec_close, /* close */
68a834b0
KM
41 spec_access, /* access */
42 spec_getattr, /* getattr */
43 spec_setattr, /* setattr */
7bbe72a5
KM
44 spec_read, /* read */
45 spec_write, /* write */
46 spec_ioctl, /* ioctl */
47 spec_select, /* select */
68a834b0
KM
48 spec_mmap, /* mmap */
49 spec_fsync, /* fsync */
50 spec_seek, /* seek */
51 spec_remove, /* remove */
52 spec_link, /* link */
53 spec_rename, /* rename */
54 spec_mkdir, /* mkdir */
55 spec_rmdir, /* rmdir */
56 spec_symlink, /* symlink */
57 spec_readdir, /* readdir */
58 spec_readlink, /* readlink */
59 spec_abortop, /* abortop */
60 spec_inactive, /* inactive */
61 spec_reclaim, /* reclaim */
7bbe72a5
KM
62 spec_lock, /* lock */
63 spec_unlock, /* unlock */
b9a4d0ff 64 spec_bmap, /* bmap */
7bbe72a5 65 spec_strategy, /* strategy */
b9a4d0ff 66 spec_print, /* print */
68a834b0 67 spec_islocked, /* islocked */
a4128336 68 spec_advlock, /* advlock */
a1d35437
KM
69};
70
59b0713e
KM
71/*
72 * Trivial lookup routine that always fails.
73 */
68a834b0 74spec_lookup(vp, ndp, p)
59b0713e
KM
75 struct vnode *vp;
76 struct nameidata *ndp;
68a834b0 77 struct proc *p;
59b0713e
KM
78{
79
80 ndp->ni_dvp = vp;
81 ndp->ni_vp = NULL;
82 return (ENOTDIR);
83}
84
a1d35437 85/*
e15ce6a3
MK
86 * Open a special file: Don't allow open if fs is mounted -nodev,
87 * and don't allow opens of block devices that are currently mounted.
88 * Otherwise, call device driver open function.
a1d35437 89 */
3d5d83ff 90/* ARGSUSED */
68a834b0 91spec_open(vp, mode, cred, p)
a1d35437
KM
92 register struct vnode *vp;
93 int mode;
94 struct ucred *cred;
68a834b0 95 struct proc *p;
a1d35437
KM
96{
97 dev_t dev = (dev_t)vp->v_rdev;
98 register int maj = major(dev);
5b2e9327 99 int error;
a1d35437 100
54fb9dc2 101 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
786053cd
KM
102 return (ENXIO);
103
a1d35437
KM
104 switch (vp->v_type) {
105
106 case VCHR:
107 if ((u_int)maj >= nchrdev)
108 return (ENXIO);
b3bf09c7
KM
109 VOP_UNLOCK(vp);
110 error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
111 VOP_LOCK(vp);
112 return (error);
a1d35437
KM
113
114 case VBLK:
115 if ((u_int)maj >= nblkdev)
116 return (ENXIO);
17c64659 117 if (error = ufs_mountedon(vp))
5b2e9327 118 return (error);
8429d022 119 return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
a1d35437
KM
120 }
121 return (0);
122}
123
a1d35437
KM
124/*
125 * Vnode op for read
126 */
fe6cdffe 127/* ARGSUSED */
43444338 128spec_read(vp, uio, ioflag, cred)
a1d35437 129 register struct vnode *vp;
7d4e5ac1 130 register struct uio *uio;
a1d35437
KM
131 int ioflag;
132 struct ucred *cred;
133{
68a834b0 134 struct proc *p = uio->uio_procp;
7d4e5ac1
KM
135 struct buf *bp;
136 daddr_t bn;
137 long bsize, bscale;
138 struct partinfo dpart;
139 register int n, on;
140 int error = 0;
a1d35437 141
68a834b0 142#ifdef DIAGNOSTIC
43444338
KM
143 if (uio->uio_rw != UIO_READ)
144 panic("spec_read mode");
68a834b0
KM
145 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
146 panic("spec_read proc");
147#endif
43444338
KM
148 if (uio->uio_resid == 0)
149 return (0);
43444338
KM
150
151 switch (vp->v_type) {
152
153 case VCHR:
3d5d83ff 154 VOP_UNLOCK(vp);
43444338 155 error = (*cdevsw[major(vp->v_rdev)].d_read)
f473ecf6 156 (vp->v_rdev, uio, ioflag);
43444338
KM
157 VOP_LOCK(vp);
158 return (error);
159
160 case VBLK:
161 if (uio->uio_offset < 0)
162 return (EINVAL);
7d4e5ac1
KM
163 bsize = BLKDEV_IOSIZE;
164 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
8429d022 165 (caddr_t)&dpart, FREAD, p) == 0) {
7d4e5ac1
KM
166 if (dpart.part->p_fstype == FS_BSDFFS &&
167 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
168 bsize = dpart.part->p_frag *
169 dpart.part->p_fsize;
170 }
171 bscale = bsize / DEV_BSIZE;
172 do {
173 bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
174 on = uio->uio_offset % bsize;
175 n = MIN((unsigned)(bsize - on), uio->uio_resid);
176 if (vp->v_lastr + bscale == bn)
177 error = breada(vp, bn, (int)bsize, bn + bscale,
178 (int)bsize, NOCRED, &bp);
179 else
180 error = bread(vp, bn, (int)bsize, NOCRED, &bp);
181 vp->v_lastr = bn;
182 n = MIN(n, bsize - bp->b_resid);
183 if (error) {
184 brelse(bp);
185 return (error);
186 }
187 error = uiomove(bp->b_un.b_addr + on, n, uio);
188 if (n + on == bsize)
189 bp->b_flags |= B_AGE;
190 brelse(bp);
191 } while (error == 0 && uio->uio_resid > 0 && n != 0);
192 return (error);
43444338
KM
193
194 default:
195 panic("spec_read type");
196 }
197 /* NOTREACHED */
a1d35437
KM
198}
199
200/*
201 * Vnode op for write
202 */
fe6cdffe 203/* ARGSUSED */
43444338 204spec_write(vp, uio, ioflag, cred)
a1d35437 205 register struct vnode *vp;
7d4e5ac1 206 register struct uio *uio;
a1d35437
KM
207 int ioflag;
208 struct ucred *cred;
209{
68a834b0 210 struct proc *p = uio->uio_procp;
7d4e5ac1
KM
211 struct buf *bp;
212 daddr_t bn;
213 int bsize, blkmask;
214 struct partinfo dpart;
9db58063
KM
215 register int n, on;
216 int error = 0;
a1d35437 217
68a834b0 218#ifdef DIAGNOSTIC
43444338
KM
219 if (uio->uio_rw != UIO_WRITE)
220 panic("spec_write mode");
68a834b0
KM
221 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
222 panic("spec_write proc");
223#endif
43444338
KM
224
225 switch (vp->v_type) {
226
227 case VCHR:
3d5d83ff 228 VOP_UNLOCK(vp);
43444338 229 error = (*cdevsw[major(vp->v_rdev)].d_write)
f473ecf6 230 (vp->v_rdev, uio, ioflag);
43444338
KM
231 VOP_LOCK(vp);
232 return (error);
233
234 case VBLK:
235 if (uio->uio_resid == 0)
236 return (0);
237 if (uio->uio_offset < 0)
238 return (EINVAL);
7d4e5ac1
KM
239 bsize = BLKDEV_IOSIZE;
240 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
8429d022 241 (caddr_t)&dpart, FREAD, p) == 0) {
7d4e5ac1
KM
242 if (dpart.part->p_fstype == FS_BSDFFS &&
243 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
244 bsize = dpart.part->p_frag *
245 dpart.part->p_fsize;
246 }
247 blkmask = (bsize / DEV_BSIZE) - 1;
248 do {
249 bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
250 on = uio->uio_offset % bsize;
251 n = MIN((unsigned)(bsize - on), uio->uio_resid);
7d4e5ac1
KM
252 if (n == bsize)
253 bp = getblk(vp, bn, bsize);
254 else
255 error = bread(vp, bn, bsize, NOCRED, &bp);
256 n = MIN(n, bsize - bp->b_resid);
257 if (error) {
258 brelse(bp);
259 return (error);
260 }
261 error = uiomove(bp->b_un.b_addr + on, n, uio);
262 if (n + on == bsize) {
263 bp->b_flags |= B_AGE;
264 bawrite(bp);
265 } else
266 bdwrite(bp);
267 } while (error == 0 && uio->uio_resid > 0 && n != 0);
268 return (error);
43444338
KM
269
270 default:
271 panic("spec_write type");
272 }
273 /* NOTREACHED */
a1d35437
KM
274}
275
276/*
277 * Device ioctl operation.
278 */
3d5d83ff 279/* ARGSUSED */
68a834b0 280spec_ioctl(vp, com, data, fflag, cred, p)
a1d35437 281 struct vnode *vp;
b9a4d0ff 282 int com;
a1d35437
KM
283 caddr_t data;
284 int fflag;
285 struct ucred *cred;
68a834b0 286 struct proc *p;
a1d35437 287{
3d5d83ff 288 dev_t dev = vp->v_rdev;
a1d35437
KM
289
290 switch (vp->v_type) {
291
292 case VCHR:
8429d022
MK
293 return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
294 fflag, p));
a1d35437
KM
295
296 case VBLK:
b9a4d0ff
KM
297 if (com == 0 && (int)data == B_TAPE)
298 if (bdevsw[major(dev)].d_flags & B_TAPE)
299 return (0);
300 else
301 return (1);
8429d022
MK
302 return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
303 fflag, p));
a1d35437
KM
304
305 default:
ad27f720 306 panic("spec_ioctl");
a1d35437
KM
307 /* NOTREACHED */
308 }
309}
310
3d5d83ff 311/* ARGSUSED */
68a834b0 312spec_select(vp, which, fflags, cred, p)
a1d35437 313 struct vnode *vp;
fbb80d56 314 int which, fflags;
a1d35437 315 struct ucred *cred;
68a834b0 316 struct proc *p;
a1d35437 317{
a1d35437
KM
318 register dev_t dev;
319
320 switch (vp->v_type) {
321
322 default:
323 return (1); /* XXX */
324
325 case VCHR:
3d5d83ff 326 dev = vp->v_rdev;
8429d022 327 return (*cdevsw[major(dev)].d_select)(dev, which, p);
a1d35437
KM
328 }
329}
330
331/*
332 * Just call the device strategy routine
333 */
ad27f720 334spec_strategy(bp)
a1d35437
KM
335 register struct buf *bp;
336{
b9a4d0ff 337
a1d35437
KM
338 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
339 return (0);
340}
341
b9a4d0ff
KM
342/*
343 * This is a noop, simply returning what one has been given.
344 */
345spec_bmap(vp, bn, vpp, bnp)
346 struct vnode *vp;
347 daddr_t bn;
348 struct vnode **vpp;
349 daddr_t *bnp;
350{
351
352 if (vpp != NULL)
353 *vpp = vp;
354 if (bnp != NULL)
355 *bnp = bn;
356 return (0);
357}
358
1c00bf64
KM
359/*
360 * At the moment we do not do any locking.
361 */
ff4fb102 362/* ARGSUSED */
ad27f720 363spec_lock(vp)
a1d35437
KM
364 struct vnode *vp;
365{
a1d35437 366
a1d35437
KM
367 return (0);
368}
369
ff4fb102 370/* ARGSUSED */
ad27f720 371spec_unlock(vp)
a1d35437
KM
372 struct vnode *vp;
373{
a1d35437 374
a1d35437
KM
375 return (0);
376}
377
a1d35437
KM
378/*
379 * Device close routine
380 */
3d5d83ff 381/* ARGSUSED */
68a834b0 382spec_close(vp, flag, cred, p)
3d5d83ff 383 register struct vnode *vp;
a1d35437
KM
384 int flag;
385 struct ucred *cred;
68a834b0 386 struct proc *p;
a1d35437
KM
387{
388 dev_t dev = vp->v_rdev;
e15ce6a3 389 int (*devclose) __P((dev_t, int, int, struct proc *));
ccee3c59 390 int mode;
a1d35437 391
3d5d83ff
KM
392 switch (vp->v_type) {
393
394 case VCHR:
f4b3ea62
KM
395 /*
396 * If the vnode is locked, then we are in the midst
397 * of forcably closing the device, otherwise we only
398 * close on last reference.
399 */
3ab57521 400 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
3d5d83ff 401 return (0);
e15ce6a3 402 devclose = cdevsw[major(dev)].d_close;
1c00bf64 403 mode = S_IFCHR;
3d5d83ff
KM
404 break;
405
406 case VBLK:
407 /*
408 * On last close of a block device (that isn't mounted)
409 * we must invalidate any in core blocks, so that
410 * we can, for instance, change floppy disks.
411 */
b9a4d0ff
KM
412 vflushbuf(vp, 0);
413 if (vinvalbuf(vp, 1))
4a29fa35 414 return (0);
3d5d83ff 415 /*
f4b3ea62
KM
416 * We do not want to really close the device if it
417 * is still in use unless we are trying to close it
418 * forcibly. Since every use (buffer, vnode, swap, cmap)
3ab57521
KM
419 * holds a reference to the vnode, and because we mark
420 * any other vnodes that alias this device, when the
421 * sum of the reference counts on all the aliased
422 * vnodes descends to one, we are on last close.
3d5d83ff 423 */
3ab57521 424 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
3d5d83ff 425 return (0);
e15ce6a3 426 devclose = bdevsw[major(dev)].d_close;
1c00bf64 427 mode = S_IFBLK;
3d5d83ff
KM
428 break;
429
430 default:
ad27f720 431 panic("spec_close: not special");
3d5d83ff
KM
432 }
433
e15ce6a3 434 return ((*devclose)(dev, flag, mode, p));
a1d35437
KM
435}
436
b9a4d0ff
KM
437/*
438 * Print out the contents of a special device vnode.
439 */
440spec_print(vp)
441 struct vnode *vp;
442{
443
444 printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
445 minor(vp->v_rdev));
446}
447
a4128336
KM
448/*
449 * Special device advisory byte-level locks.
450 */
68a834b0 451/* ARGSUSED */
a4128336
KM
452spec_advlock(vp, id, op, fl, flags)
453 struct vnode *vp;
454 caddr_t id;
455 int op;
456 struct flock *fl;
457 int flags;
458{
459
460 return (EOPNOTSUPP);
461}
462
a1d35437 463/*
7bbe72a5
KM
464 * Special device failed operation
465 */
466spec_ebadf()
467{
468
469 return (EBADF);
470}
471
472/*
473 * Special device bad operation
a1d35437 474 */
ad27f720 475spec_badop()
a1d35437
KM
476{
477
ad27f720 478 panic("spec_badop called");
59b0713e 479 /* NOTREACHED */
a1d35437 480}