reorganize and clean up John Heideman's layout
[unix-history] / usr / src / sys / miscfs / specfs / spec_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)spec_vnops.c 7.40 (Berkeley) %G%
8 */
9
10#include <sys/param.h>
11#include <sys/proc.h>
12#include <sys/systm.h>
13#include <sys/kernel.h>
14#include <sys/conf.h>
15#include <sys/buf.h>
16#include <sys/mount.h>
17#include <sys/namei.h>
18#include <sys/vnode.h>
19#include <sys/specdev.h>
20#include <sys/stat.h>
21#include <sys/errno.h>
22#include <sys/ioctl.h>
23#include <sys/file.h>
24#include <sys/disklabel.h>
25
26/* symbolic sleep message strings for devices */
27char devopn[] = "devopn";
28char devio[] = "devio";
29char devwait[] = "devwait";
30char devin[] = "devin";
31char devout[] = "devout";
32char devioc[] = "devioc";
33char devcls[] = "devcls";
34
35struct vnodeops spec_vnodeops = {
36 spec_lookup, /* lookup */
37 spec_create, /* create */
38 spec_mknod, /* mknod */
39 spec_open, /* open */
40 spec_close, /* close */
41 spec_access, /* access */
42 spec_getattr, /* getattr */
43 spec_setattr, /* setattr */
44 spec_read, /* read */
45 spec_write, /* write */
46 spec_ioctl, /* ioctl */
47 spec_select, /* select */
48 spec_mmap, /* mmap */
49 spec_fsync, /* fsync */
50 spec_seek, /* seek */
51 spec_remove, /* remove */
52 spec_link, /* link */
53 spec_rename, /* rename */
54 spec_mkdir, /* mkdir */
55 spec_rmdir, /* rmdir */
56 spec_symlink, /* symlink */
57 spec_readdir, /* readdir */
58 spec_readlink, /* readlink */
59 spec_abortop, /* abortop */
60 spec_inactive, /* inactive */
61 spec_reclaim, /* reclaim */
62 spec_lock, /* lock */
63 spec_unlock, /* unlock */
64 spec_bmap, /* bmap */
65 spec_strategy, /* strategy */
66 spec_print, /* print */
67 spec_islocked, /* islocked */
68 spec_advlock, /* advlock */
69 spec_blkatoff, /* blkatoff */
70 spec_vget, /* vget */
71 spec_valloc, /* valloc */
72 spec_vfree, /* vfree */
73 spec_truncate, /* truncate */
74 spec_update, /* update */
75 spec_bwrite, /* bwrite */
76};
77
78/*
79 * Trivial lookup routine that always fails.
80 */
81spec_lookup(vp, ndp, p)
82 struct vnode *vp;
83 struct nameidata *ndp;
84 struct proc *p;
85{
86
87 ndp->ni_dvp = vp;
88 ndp->ni_vp = NULL;
89 return (ENOTDIR);
90}
91
92/*
93 * Open a special file: Don't allow open if fs is mounted -nodev,
94 * and don't allow opens of block devices that are currently mounted.
95 * Otherwise, call device driver open function.
96 */
97/* ARGSUSED */
98spec_open(vp, mode, cred, p)
99 register struct vnode *vp;
100 int mode;
101 struct ucred *cred;
102 struct proc *p;
103{
104 dev_t dev = (dev_t)vp->v_rdev;
105 register int maj = major(dev);
106 int error;
107
108 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
109 return (ENXIO);
110
111 switch (vp->v_type) {
112
113 case VCHR:
114 if ((u_int)maj >= nchrdev)
115 return (ENXIO);
116 VOP_UNLOCK(vp);
117 error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
118 VOP_LOCK(vp);
119 return (error);
120
121 case VBLK:
122 if ((u_int)maj >= nblkdev)
123 return (ENXIO);
124 if (error = ufs_mountedon(vp))
125 return (error);
126 return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
127 }
128 return (0);
129}
130
131/*
132 * Vnode op for read
133 */
134/* ARGSUSED */
135spec_read(vp, uio, ioflag, cred)
136 register struct vnode *vp;
137 register struct uio *uio;
138 int ioflag;
139 struct ucred *cred;
140{
141 struct proc *p = uio->uio_procp;
142 struct buf *bp;
143 daddr_t bn;
144 long bsize, bscale;
145 struct partinfo dpart;
146 register int n, on;
147 int error = 0;
148
149#ifdef DIAGNOSTIC
150 if (uio->uio_rw != UIO_READ)
151 panic("spec_read mode");
152 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
153 panic("spec_read proc");
154#endif
155 if (uio->uio_resid == 0)
156 return (0);
157
158 switch (vp->v_type) {
159
160 case VCHR:
161 VOP_UNLOCK(vp);
162 error = (*cdevsw[major(vp->v_rdev)].d_read)
163 (vp->v_rdev, uio, ioflag);
164 VOP_LOCK(vp);
165 return (error);
166
167 case VBLK:
168 if (uio->uio_offset < 0)
169 return (EINVAL);
170 bsize = BLKDEV_IOSIZE;
171 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
172 (caddr_t)&dpart, FREAD, p) == 0) {
173 if (dpart.part->p_fstype == FS_BSDFFS &&
174 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
175 bsize = dpart.part->p_frag *
176 dpart.part->p_fsize;
177 }
178 bscale = bsize / DEV_BSIZE;
179 do {
180 bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
181 on = uio->uio_offset % bsize;
182 n = MIN((unsigned)(bsize - on), uio->uio_resid);
183 if (vp->v_lastr + bscale == bn)
184 error = breada(vp, bn, (int)bsize, bn + bscale,
185 (int)bsize, NOCRED, &bp);
186 else
187 error = bread(vp, bn, (int)bsize, NOCRED, &bp);
188 vp->v_lastr = bn;
189 n = MIN(n, bsize - bp->b_resid);
190 if (error) {
191 brelse(bp);
192 return (error);
193 }
194 error = uiomove(bp->b_un.b_addr + on, n, uio);
195 if (n + on == bsize)
196 bp->b_flags |= B_AGE;
197 brelse(bp);
198 } while (error == 0 && uio->uio_resid > 0 && n != 0);
199 return (error);
200
201 default:
202 panic("spec_read type");
203 }
204 /* NOTREACHED */
205}
206
207/*
208 * Vnode op for write
209 */
210/* ARGSUSED */
211spec_write(vp, uio, ioflag, cred)
212 register struct vnode *vp;
213 register struct uio *uio;
214 int ioflag;
215 struct ucred *cred;
216{
217 struct proc *p = uio->uio_procp;
218 struct buf *bp;
219 daddr_t bn;
220 int bsize, blkmask;
221 struct partinfo dpart;
222 register int n, on;
223 int error = 0;
224
225#ifdef DIAGNOSTIC
226 if (uio->uio_rw != UIO_WRITE)
227 panic("spec_write mode");
228 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
229 panic("spec_write proc");
230#endif
231
232 switch (vp->v_type) {
233
234 case VCHR:
235 VOP_UNLOCK(vp);
236 error = (*cdevsw[major(vp->v_rdev)].d_write)
237 (vp->v_rdev, uio, ioflag);
238 VOP_LOCK(vp);
239 return (error);
240
241 case VBLK:
242 if (uio->uio_resid == 0)
243 return (0);
244 if (uio->uio_offset < 0)
245 return (EINVAL);
246 bsize = BLKDEV_IOSIZE;
247 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
248 (caddr_t)&dpart, FREAD, p) == 0) {
249 if (dpart.part->p_fstype == FS_BSDFFS &&
250 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
251 bsize = dpart.part->p_frag *
252 dpart.part->p_fsize;
253 }
254 blkmask = (bsize / DEV_BSIZE) - 1;
255 do {
256 bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
257 on = uio->uio_offset % bsize;
258 n = MIN((unsigned)(bsize - on), uio->uio_resid);
259 if (n == bsize)
260 bp = getblk(vp, bn, bsize);
261 else
262 error = bread(vp, bn, bsize, NOCRED, &bp);
263 n = MIN(n, bsize - bp->b_resid);
264 if (error) {
265 brelse(bp);
266 return (error);
267 }
268 error = uiomove(bp->b_un.b_addr + on, n, uio);
269 if (n + on == bsize) {
270 bp->b_flags |= B_AGE;
271 bawrite(bp);
272 } else
273 bdwrite(bp);
274 } while (error == 0 && uio->uio_resid > 0 && n != 0);
275 return (error);
276
277 default:
278 panic("spec_write type");
279 }
280 /* NOTREACHED */
281}
282
283/*
284 * Device ioctl operation.
285 */
286/* ARGSUSED */
287spec_ioctl(vp, com, data, fflag, cred, p)
288 struct vnode *vp;
289 int com;
290 caddr_t data;
291 int fflag;
292 struct ucred *cred;
293 struct proc *p;
294{
295 dev_t dev = vp->v_rdev;
296
297 switch (vp->v_type) {
298
299 case VCHR:
300 return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
301 fflag, p));
302
303 case VBLK:
304 if (com == 0 && (int)data == B_TAPE)
305 if (bdevsw[major(dev)].d_flags & B_TAPE)
306 return (0);
307 else
308 return (1);
309 return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
310 fflag, p));
311
312 default:
313 panic("spec_ioctl");
314 /* NOTREACHED */
315 }
316}
317
318/* ARGSUSED */
319spec_select(vp, which, fflags, cred, p)
320 struct vnode *vp;
321 int which, fflags;
322 struct ucred *cred;
323 struct proc *p;
324{
325 register dev_t dev;
326
327 switch (vp->v_type) {
328
329 default:
330 return (1); /* XXX */
331
332 case VCHR:
333 dev = vp->v_rdev;
334 return (*cdevsw[major(dev)].d_select)(dev, which, p);
335 }
336}
337
338/*
339 * Just call the device strategy routine
340 */
341spec_strategy(bp)
342 register struct buf *bp;
343{
344
345 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
346 return (0);
347}
348
349/*
350 * This is a noop, simply returning what one has been given.
351 */
352spec_bmap(vp, bn, vpp, bnp)
353 struct vnode *vp;
354 daddr_t bn;
355 struct vnode **vpp;
356 daddr_t *bnp;
357{
358
359 if (vpp != NULL)
360 *vpp = vp;
361 if (bnp != NULL)
362 *bnp = bn;
363 return (0);
364}
365
366/*
367 * At the moment we do not do any locking.
368 */
369/* ARGSUSED */
370spec_lock(vp)
371 struct vnode *vp;
372{
373
374 return (0);
375}
376
377/* ARGSUSED */
378spec_unlock(vp)
379 struct vnode *vp;
380{
381
382 return (0);
383}
384
385/*
386 * Device close routine
387 */
388/* ARGSUSED */
389spec_close(vp, flag, cred, p)
390 register struct vnode *vp;
391 int flag;
392 struct ucred *cred;
393 struct proc *p;
394{
395 dev_t dev = vp->v_rdev;
396 int (*devclose) __P((dev_t, int, int, struct proc *));
397 int mode;
398
399 switch (vp->v_type) {
400
401 case VCHR:
402 /*
403 * If the vnode is locked, then we are in the midst
404 * of forcably closing the device, otherwise we only
405 * close on last reference.
406 */
407 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
408 return (0);
409 devclose = cdevsw[major(dev)].d_close;
410 mode = S_IFCHR;
411 break;
412
413 case VBLK:
414 /*
415 * On last close of a block device (that isn't mounted)
416 * we must invalidate any in core blocks, so that
417 * we can, for instance, change floppy disks.
418 */
419 vflushbuf(vp, 0);
420 if (vinvalbuf(vp, 1))
421 return (0);
422 /*
423 * We do not want to really close the device if it
424 * is still in use unless we are trying to close it
425 * forcibly. Since every use (buffer, vnode, swap, cmap)
426 * holds a reference to the vnode, and because we mark
427 * any other vnodes that alias this device, when the
428 * sum of the reference counts on all the aliased
429 * vnodes descends to one, we are on last close.
430 */
431 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
432 return (0);
433 devclose = bdevsw[major(dev)].d_close;
434 mode = S_IFBLK;
435 break;
436
437 default:
438 panic("spec_close: not special");
439 }
440
441 return ((*devclose)(dev, flag, mode, p));
442}
443
444/*
445 * Print out the contents of a special device vnode.
446 */
447spec_print(vp)
448 struct vnode *vp;
449{
450
451 printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
452 minor(vp->v_rdev));
453}
454
455/*
456 * Special device advisory byte-level locks.
457 */
458/* ARGSUSED */
459spec_advlock(vp, id, op, fl, flags)
460 struct vnode *vp;
461 caddr_t id;
462 int op;
463 struct flock *fl;
464 int flags;
465{
466
467 return (EOPNOTSUPP);
468}
469
470/*
471 * Special device failed operation
472 */
473spec_ebadf()
474{
475
476 return (EBADF);
477}
478
479/*
480 * Special device bad operation
481 */
482spec_badop()
483{
484
485 panic("spec_badop called");
486 /* NOTREACHED */
487}