reorganize and clean up John Heideman's layout
[unix-history] / usr / src / sys / miscfs / specfs / spec_vnops.c
CommitLineData
a1d35437
KM
1/*
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
dbf0c423 5 * %sccs.include.redist.c%
a1d35437 6 *
ffcee610 7 * @(#)spec_vnops.c 7.40 (Berkeley) %G%
a1d35437
KM
8 */
9
17c64659
KB
10#include <sys/param.h>
11#include <sys/proc.h>
12#include <sys/systm.h>
13#include <sys/kernel.h>
14#include <sys/conf.h>
15#include <sys/buf.h>
16#include <sys/mount.h>
17#include <sys/namei.h>
18#include <sys/vnode.h>
19#include <sys/specdev.h>
20#include <sys/stat.h>
21#include <sys/errno.h>
22#include <sys/ioctl.h>
23#include <sys/file.h>
24#include <sys/disklabel.h>
a1d35437 25
ccee3c59
MK
26/* symbolic sleep message strings for devices */
27char devopn[] = "devopn";
28char devio[] = "devio";
29char devwait[] = "devwait";
30char devin[] = "devin";
31char devout[] = "devout";
32char devioc[] = "devioc";
33char devcls[] = "devcls";
34
ad27f720 35struct vnodeops spec_vnodeops = {
7bbe72a5 36 spec_lookup, /* lookup */
68a834b0
KM
37 spec_create, /* create */
38 spec_mknod, /* mknod */
7bbe72a5
KM
39 spec_open, /* open */
40 spec_close, /* close */
68a834b0
KM
41 spec_access, /* access */
42 spec_getattr, /* getattr */
43 spec_setattr, /* setattr */
7bbe72a5
KM
44 spec_read, /* read */
45 spec_write, /* write */
46 spec_ioctl, /* ioctl */
47 spec_select, /* select */
68a834b0
KM
48 spec_mmap, /* mmap */
49 spec_fsync, /* fsync */
50 spec_seek, /* seek */
51 spec_remove, /* remove */
52 spec_link, /* link */
53 spec_rename, /* rename */
54 spec_mkdir, /* mkdir */
55 spec_rmdir, /* rmdir */
56 spec_symlink, /* symlink */
57 spec_readdir, /* readdir */
58 spec_readlink, /* readlink */
59 spec_abortop, /* abortop */
60 spec_inactive, /* inactive */
61 spec_reclaim, /* reclaim */
7bbe72a5
KM
62 spec_lock, /* lock */
63 spec_unlock, /* unlock */
b9a4d0ff 64 spec_bmap, /* bmap */
7bbe72a5 65 spec_strategy, /* strategy */
b9a4d0ff 66 spec_print, /* print */
68a834b0 67 spec_islocked, /* islocked */
a4128336 68 spec_advlock, /* advlock */
ffcee610
KM
69 spec_blkatoff, /* blkatoff */
70 spec_vget, /* vget */
71 spec_valloc, /* valloc */
72 spec_vfree, /* vfree */
73 spec_truncate, /* truncate */
74 spec_update, /* update */
75 spec_bwrite, /* bwrite */
a1d35437
KM
76};
77
59b0713e
KM
78/*
79 * Trivial lookup routine that always fails.
80 */
68a834b0 81spec_lookup(vp, ndp, p)
59b0713e
KM
82 struct vnode *vp;
83 struct nameidata *ndp;
68a834b0 84 struct proc *p;
59b0713e
KM
85{
86
87 ndp->ni_dvp = vp;
88 ndp->ni_vp = NULL;
89 return (ENOTDIR);
90}
91
a1d35437 92/*
e15ce6a3
MK
93 * Open a special file: Don't allow open if fs is mounted -nodev,
94 * and don't allow opens of block devices that are currently mounted.
95 * Otherwise, call device driver open function.
a1d35437 96 */
3d5d83ff 97/* ARGSUSED */
68a834b0 98spec_open(vp, mode, cred, p)
a1d35437
KM
99 register struct vnode *vp;
100 int mode;
101 struct ucred *cred;
68a834b0 102 struct proc *p;
a1d35437
KM
103{
104 dev_t dev = (dev_t)vp->v_rdev;
105 register int maj = major(dev);
5b2e9327 106 int error;
a1d35437 107
54fb9dc2 108 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
786053cd
KM
109 return (ENXIO);
110
a1d35437
KM
111 switch (vp->v_type) {
112
113 case VCHR:
114 if ((u_int)maj >= nchrdev)
115 return (ENXIO);
b3bf09c7
KM
116 VOP_UNLOCK(vp);
117 error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
118 VOP_LOCK(vp);
119 return (error);
a1d35437
KM
120
121 case VBLK:
122 if ((u_int)maj >= nblkdev)
123 return (ENXIO);
17c64659 124 if (error = ufs_mountedon(vp))
5b2e9327 125 return (error);
8429d022 126 return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
a1d35437
KM
127 }
128 return (0);
129}
130
a1d35437
KM
131/*
132 * Vnode op for read
133 */
fe6cdffe 134/* ARGSUSED */
43444338 135spec_read(vp, uio, ioflag, cred)
a1d35437 136 register struct vnode *vp;
7d4e5ac1 137 register struct uio *uio;
a1d35437
KM
138 int ioflag;
139 struct ucred *cred;
140{
68a834b0 141 struct proc *p = uio->uio_procp;
7d4e5ac1
KM
142 struct buf *bp;
143 daddr_t bn;
144 long bsize, bscale;
145 struct partinfo dpart;
146 register int n, on;
147 int error = 0;
a1d35437 148
68a834b0 149#ifdef DIAGNOSTIC
43444338
KM
150 if (uio->uio_rw != UIO_READ)
151 panic("spec_read mode");
68a834b0
KM
152 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
153 panic("spec_read proc");
154#endif
43444338
KM
155 if (uio->uio_resid == 0)
156 return (0);
43444338
KM
157
158 switch (vp->v_type) {
159
160 case VCHR:
3d5d83ff 161 VOP_UNLOCK(vp);
43444338 162 error = (*cdevsw[major(vp->v_rdev)].d_read)
f473ecf6 163 (vp->v_rdev, uio, ioflag);
43444338
KM
164 VOP_LOCK(vp);
165 return (error);
166
167 case VBLK:
168 if (uio->uio_offset < 0)
169 return (EINVAL);
7d4e5ac1
KM
170 bsize = BLKDEV_IOSIZE;
171 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
8429d022 172 (caddr_t)&dpart, FREAD, p) == 0) {
7d4e5ac1
KM
173 if (dpart.part->p_fstype == FS_BSDFFS &&
174 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
175 bsize = dpart.part->p_frag *
176 dpart.part->p_fsize;
177 }
178 bscale = bsize / DEV_BSIZE;
179 do {
180 bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
181 on = uio->uio_offset % bsize;
182 n = MIN((unsigned)(bsize - on), uio->uio_resid);
183 if (vp->v_lastr + bscale == bn)
184 error = breada(vp, bn, (int)bsize, bn + bscale,
185 (int)bsize, NOCRED, &bp);
186 else
187 error = bread(vp, bn, (int)bsize, NOCRED, &bp);
188 vp->v_lastr = bn;
189 n = MIN(n, bsize - bp->b_resid);
190 if (error) {
191 brelse(bp);
192 return (error);
193 }
194 error = uiomove(bp->b_un.b_addr + on, n, uio);
195 if (n + on == bsize)
196 bp->b_flags |= B_AGE;
197 brelse(bp);
198 } while (error == 0 && uio->uio_resid > 0 && n != 0);
199 return (error);
43444338
KM
200
201 default:
202 panic("spec_read type");
203 }
204 /* NOTREACHED */
a1d35437
KM
205}
206
207/*
208 * Vnode op for write
209 */
fe6cdffe 210/* ARGSUSED */
43444338 211spec_write(vp, uio, ioflag, cred)
a1d35437 212 register struct vnode *vp;
7d4e5ac1 213 register struct uio *uio;
a1d35437
KM
214 int ioflag;
215 struct ucred *cred;
216{
68a834b0 217 struct proc *p = uio->uio_procp;
7d4e5ac1
KM
218 struct buf *bp;
219 daddr_t bn;
220 int bsize, blkmask;
221 struct partinfo dpart;
9db58063
KM
222 register int n, on;
223 int error = 0;
a1d35437 224
68a834b0 225#ifdef DIAGNOSTIC
43444338
KM
226 if (uio->uio_rw != UIO_WRITE)
227 panic("spec_write mode");
68a834b0
KM
228 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
229 panic("spec_write proc");
230#endif
43444338
KM
231
232 switch (vp->v_type) {
233
234 case VCHR:
3d5d83ff 235 VOP_UNLOCK(vp);
43444338 236 error = (*cdevsw[major(vp->v_rdev)].d_write)
f473ecf6 237 (vp->v_rdev, uio, ioflag);
43444338
KM
238 VOP_LOCK(vp);
239 return (error);
240
241 case VBLK:
242 if (uio->uio_resid == 0)
243 return (0);
244 if (uio->uio_offset < 0)
245 return (EINVAL);
7d4e5ac1
KM
246 bsize = BLKDEV_IOSIZE;
247 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
8429d022 248 (caddr_t)&dpart, FREAD, p) == 0) {
7d4e5ac1
KM
249 if (dpart.part->p_fstype == FS_BSDFFS &&
250 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
251 bsize = dpart.part->p_frag *
252 dpart.part->p_fsize;
253 }
254 blkmask = (bsize / DEV_BSIZE) - 1;
255 do {
256 bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
257 on = uio->uio_offset % bsize;
258 n = MIN((unsigned)(bsize - on), uio->uio_resid);
7d4e5ac1
KM
259 if (n == bsize)
260 bp = getblk(vp, bn, bsize);
261 else
262 error = bread(vp, bn, bsize, NOCRED, &bp);
263 n = MIN(n, bsize - bp->b_resid);
264 if (error) {
265 brelse(bp);
266 return (error);
267 }
268 error = uiomove(bp->b_un.b_addr + on, n, uio);
269 if (n + on == bsize) {
270 bp->b_flags |= B_AGE;
271 bawrite(bp);
272 } else
273 bdwrite(bp);
274 } while (error == 0 && uio->uio_resid > 0 && n != 0);
275 return (error);
43444338
KM
276
277 default:
278 panic("spec_write type");
279 }
280 /* NOTREACHED */
a1d35437
KM
281}
282
283/*
284 * Device ioctl operation.
285 */
3d5d83ff 286/* ARGSUSED */
68a834b0 287spec_ioctl(vp, com, data, fflag, cred, p)
a1d35437 288 struct vnode *vp;
b9a4d0ff 289 int com;
a1d35437
KM
290 caddr_t data;
291 int fflag;
292 struct ucred *cred;
68a834b0 293 struct proc *p;
a1d35437 294{
3d5d83ff 295 dev_t dev = vp->v_rdev;
a1d35437
KM
296
297 switch (vp->v_type) {
298
299 case VCHR:
8429d022
MK
300 return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
301 fflag, p));
a1d35437
KM
302
303 case VBLK:
b9a4d0ff
KM
304 if (com == 0 && (int)data == B_TAPE)
305 if (bdevsw[major(dev)].d_flags & B_TAPE)
306 return (0);
307 else
308 return (1);
8429d022
MK
309 return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
310 fflag, p));
a1d35437
KM
311
312 default:
ad27f720 313 panic("spec_ioctl");
a1d35437
KM
314 /* NOTREACHED */
315 }
316}
317
3d5d83ff 318/* ARGSUSED */
68a834b0 319spec_select(vp, which, fflags, cred, p)
a1d35437 320 struct vnode *vp;
fbb80d56 321 int which, fflags;
a1d35437 322 struct ucred *cred;
68a834b0 323 struct proc *p;
a1d35437 324{
a1d35437
KM
325 register dev_t dev;
326
327 switch (vp->v_type) {
328
329 default:
330 return (1); /* XXX */
331
332 case VCHR:
3d5d83ff 333 dev = vp->v_rdev;
8429d022 334 return (*cdevsw[major(dev)].d_select)(dev, which, p);
a1d35437
KM
335 }
336}
337
338/*
339 * Just call the device strategy routine
340 */
ad27f720 341spec_strategy(bp)
a1d35437
KM
342 register struct buf *bp;
343{
b9a4d0ff 344
a1d35437
KM
345 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
346 return (0);
347}
348
b9a4d0ff
KM
349/*
350 * This is a noop, simply returning what one has been given.
351 */
352spec_bmap(vp, bn, vpp, bnp)
353 struct vnode *vp;
354 daddr_t bn;
355 struct vnode **vpp;
356 daddr_t *bnp;
357{
358
359 if (vpp != NULL)
360 *vpp = vp;
361 if (bnp != NULL)
362 *bnp = bn;
363 return (0);
364}
365
1c00bf64
KM
366/*
367 * At the moment we do not do any locking.
368 */
ff4fb102 369/* ARGSUSED */
ad27f720 370spec_lock(vp)
a1d35437
KM
371 struct vnode *vp;
372{
a1d35437 373
a1d35437
KM
374 return (0);
375}
376
ff4fb102 377/* ARGSUSED */
ad27f720 378spec_unlock(vp)
a1d35437
KM
379 struct vnode *vp;
380{
a1d35437 381
a1d35437
KM
382 return (0);
383}
384
a1d35437
KM
385/*
386 * Device close routine
387 */
3d5d83ff 388/* ARGSUSED */
68a834b0 389spec_close(vp, flag, cred, p)
3d5d83ff 390 register struct vnode *vp;
a1d35437
KM
391 int flag;
392 struct ucred *cred;
68a834b0 393 struct proc *p;
a1d35437
KM
394{
395 dev_t dev = vp->v_rdev;
e15ce6a3 396 int (*devclose) __P((dev_t, int, int, struct proc *));
ccee3c59 397 int mode;
a1d35437 398
3d5d83ff
KM
399 switch (vp->v_type) {
400
401 case VCHR:
f4b3ea62
KM
402 /*
403 * If the vnode is locked, then we are in the midst
404 * of forcably closing the device, otherwise we only
405 * close on last reference.
406 */
3ab57521 407 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
3d5d83ff 408 return (0);
e15ce6a3 409 devclose = cdevsw[major(dev)].d_close;
1c00bf64 410 mode = S_IFCHR;
3d5d83ff
KM
411 break;
412
413 case VBLK:
414 /*
415 * On last close of a block device (that isn't mounted)
416 * we must invalidate any in core blocks, so that
417 * we can, for instance, change floppy disks.
418 */
b9a4d0ff
KM
419 vflushbuf(vp, 0);
420 if (vinvalbuf(vp, 1))
4a29fa35 421 return (0);
3d5d83ff 422 /*
f4b3ea62
KM
423 * We do not want to really close the device if it
424 * is still in use unless we are trying to close it
425 * forcibly. Since every use (buffer, vnode, swap, cmap)
3ab57521
KM
426 * holds a reference to the vnode, and because we mark
427 * any other vnodes that alias this device, when the
428 * sum of the reference counts on all the aliased
429 * vnodes descends to one, we are on last close.
3d5d83ff 430 */
3ab57521 431 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
3d5d83ff 432 return (0);
e15ce6a3 433 devclose = bdevsw[major(dev)].d_close;
1c00bf64 434 mode = S_IFBLK;
3d5d83ff
KM
435 break;
436
437 default:
ad27f720 438 panic("spec_close: not special");
3d5d83ff
KM
439 }
440
e15ce6a3 441 return ((*devclose)(dev, flag, mode, p));
a1d35437
KM
442}
443
b9a4d0ff
KM
444/*
445 * Print out the contents of a special device vnode.
446 */
447spec_print(vp)
448 struct vnode *vp;
449{
450
451 printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
452 minor(vp->v_rdev));
453}
454
a4128336
KM
455/*
456 * Special device advisory byte-level locks.
457 */
68a834b0 458/* ARGSUSED */
a4128336
KM
459spec_advlock(vp, id, op, fl, flags)
460 struct vnode *vp;
461 caddr_t id;
462 int op;
463 struct flock *fl;
464 int flags;
465{
466
467 return (EOPNOTSUPP);
468}
469
a1d35437 470/*
7bbe72a5
KM
471 * Special device failed operation
472 */
473spec_ebadf()
474{
475
476 return (EBADF);
477}
478
479/*
480 * Special device bad operation
a1d35437 481 */
ad27f720 482spec_badop()
a1d35437
KM
483{
484
ad27f720 485 panic("spec_badop called");
59b0713e 486 /* NOTREACHED */
a1d35437 487}