add flags field to specinfo to mark mounted block devices
[unix-history] / usr / src / sys / miscfs / specfs / spec_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms are permitted
6 * provided that the above copyright notice and this paragraph are
7 * duplicated in all such forms and that any documentation,
8 * advertising materials, and other materials related to such
9 * distribution and use acknowledge that the software was developed
10 * by the University of California, Berkeley. The name of the
11 * University may not be used to endorse or promote products derived
12 * from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16 *
17 * @(#)spec_vnops.c 7.22 (Berkeley) %G%
18 */
19
20#include "param.h"
21#include "systm.h"
22#include "user.h"
23#include "kernel.h"
24#include "conf.h"
25#include "buf.h"
26#include "mount.h"
27#include "vnode.h"
28#include "stat.h"
29#include "errno.h"
30#include "ioctl.h"
31#include "file.h"
32#include "disklabel.h"
33
34int spec_lookup(),
35 spec_open(),
36 spec_read(),
37 spec_write(),
38 spec_strategy(),
39 spec_bmap(),
40 spec_ioctl(),
41 spec_select(),
42 spec_lock(),
43 spec_unlock(),
44 spec_close(),
45 spec_print(),
46 spec_ebadf(),
47 spec_badop(),
48 spec_nullop();
49
50struct vnodeops spec_vnodeops = {
51 spec_lookup, /* lookup */
52 spec_badop, /* create */
53 spec_badop, /* mknod */
54 spec_open, /* open */
55 spec_close, /* close */
56 spec_ebadf, /* access */
57 spec_ebadf, /* getattr */
58 spec_ebadf, /* setattr */
59 spec_read, /* read */
60 spec_write, /* write */
61 spec_ioctl, /* ioctl */
62 spec_select, /* select */
63 spec_badop, /* mmap */
64 spec_nullop, /* fsync */
65 spec_badop, /* seek */
66 spec_badop, /* remove */
67 spec_badop, /* link */
68 spec_badop, /* rename */
69 spec_badop, /* mkdir */
70 spec_badop, /* rmdir */
71 spec_badop, /* symlink */
72 spec_badop, /* readdir */
73 spec_badop, /* readlink */
74 spec_badop, /* abortop */
75 spec_nullop, /* inactive */
76 spec_nullop, /* reclaim */
77 spec_lock, /* lock */
78 spec_unlock, /* unlock */
79 spec_bmap, /* bmap */
80 spec_strategy, /* strategy */
81 spec_print, /* print */
82 spec_nullop, /* islocked */
83};
84
85/*
86 * Trivial lookup routine that always fails.
87 */
88spec_lookup(vp, ndp)
89 struct vnode *vp;
90 struct nameidata *ndp;
91{
92
93 ndp->ni_dvp = vp;
94 ndp->ni_vp = NULL;
95 return (ENOTDIR);
96}
97
98/*
99 * Open called to allow handler
100 * of special files to initialize and
101 * validate before actual IO.
102 */
103/* ARGSUSED */
104spec_open(vp, mode, cred)
105 register struct vnode *vp;
106 int mode;
107 struct ucred *cred;
108{
109 dev_t dev = (dev_t)vp->v_rdev;
110 register int maj = major(dev);
111
112 if (vp->v_mount && (vp->v_mount->m_flag & M_NODEV))
113 return (ENXIO);
114
115 switch (vp->v_type) {
116
117 case VCHR:
118 if ((u_int)maj >= nchrdev)
119 return (ENXIO);
120 return ((*cdevsw[maj].d_open)(dev, mode, S_IFCHR));
121
122 case VBLK:
123 if ((u_int)maj >= nblkdev)
124 return (ENXIO);
125 return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK));
126 }
127 return (0);
128}
129
130/*
131 * Vnode op for read
132 */
133spec_read(vp, uio, ioflag, cred)
134 register struct vnode *vp;
135 register struct uio *uio;
136 int ioflag;
137 struct ucred *cred;
138{
139 struct buf *bp;
140 daddr_t bn;
141 long bsize, bscale;
142 struct partinfo dpart;
143 register int n, on;
144 int error = 0;
145 extern int mem_no;
146
147 if (uio->uio_rw != UIO_READ)
148 panic("spec_read mode");
149 if (uio->uio_resid == 0)
150 return (0);
151
152 switch (vp->v_type) {
153
154 case VCHR:
155 /*
156 * Negative offsets allowed only for /dev/kmem
157 */
158 if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
159 return (EINVAL);
160 VOP_UNLOCK(vp);
161 error = (*cdevsw[major(vp->v_rdev)].d_read)
162 (vp->v_rdev, uio, ioflag);
163 VOP_LOCK(vp);
164 return (error);
165
166 case VBLK:
167 if (uio->uio_offset < 0)
168 return (EINVAL);
169 bsize = BLKDEV_IOSIZE;
170 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
171 (caddr_t)&dpart, FREAD) == 0) {
172 if (dpart.part->p_fstype == FS_BSDFFS &&
173 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
174 bsize = dpart.part->p_frag *
175 dpart.part->p_fsize;
176 }
177 bscale = bsize / DEV_BSIZE;
178 do {
179 bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
180 on = uio->uio_offset % bsize;
181 n = MIN((unsigned)(bsize - on), uio->uio_resid);
182 if (vp->v_lastr + bscale == bn)
183 error = breada(vp, bn, (int)bsize, bn + bscale,
184 (int)bsize, NOCRED, &bp);
185 else
186 error = bread(vp, bn, (int)bsize, NOCRED, &bp);
187 vp->v_lastr = bn;
188 n = MIN(n, bsize - bp->b_resid);
189 if (error) {
190 brelse(bp);
191 return (error);
192 }
193 error = uiomove(bp->b_un.b_addr + on, n, uio);
194 if (n + on == bsize)
195 bp->b_flags |= B_AGE;
196 brelse(bp);
197 } while (error == 0 && uio->uio_resid > 0 && n != 0);
198 return (error);
199
200 default:
201 panic("spec_read type");
202 }
203 /* NOTREACHED */
204}
205
206/*
207 * Vnode op for write
208 */
209spec_write(vp, uio, ioflag, cred)
210 register struct vnode *vp;
211 register struct uio *uio;
212 int ioflag;
213 struct ucred *cred;
214{
215 struct buf *bp;
216 daddr_t bn;
217 int bsize, blkmask;
218 struct partinfo dpart;
219 register int n, on, i;
220 int count, error = 0;
221 extern int mem_no;
222
223 if (uio->uio_rw != UIO_WRITE)
224 panic("spec_write mode");
225
226 switch (vp->v_type) {
227
228 case VCHR:
229 /*
230 * Negative offsets allowed only for /dev/kmem
231 */
232 if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
233 return (EINVAL);
234 VOP_UNLOCK(vp);
235 error = (*cdevsw[major(vp->v_rdev)].d_write)
236 (vp->v_rdev, uio, ioflag);
237 VOP_LOCK(vp);
238 return (error);
239
240 case VBLK:
241 if (uio->uio_resid == 0)
242 return (0);
243 if (uio->uio_offset < 0)
244 return (EINVAL);
245 bsize = BLKDEV_IOSIZE;
246 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
247 (caddr_t)&dpart, FREAD) == 0) {
248 if (dpart.part->p_fstype == FS_BSDFFS &&
249 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
250 bsize = dpart.part->p_frag *
251 dpart.part->p_fsize;
252 }
253 blkmask = (bsize / DEV_BSIZE) - 1;
254 do {
255 bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
256 on = uio->uio_offset % bsize;
257 n = MIN((unsigned)(bsize - on), uio->uio_resid);
258 count = howmany(bsize, CLBYTES);
259 for (i = 0; i < count; i++)
260 munhash(vp, bn + i * (CLBYTES / DEV_BSIZE));
261 if (n == bsize)
262 bp = getblk(vp, bn, bsize);
263 else
264 error = bread(vp, bn, bsize, NOCRED, &bp);
265 n = MIN(n, bsize - bp->b_resid);
266 if (error) {
267 brelse(bp);
268 return (error);
269 }
270 error = uiomove(bp->b_un.b_addr + on, n, uio);
271 if (n + on == bsize) {
272 bp->b_flags |= B_AGE;
273 bawrite(bp);
274 } else
275 bdwrite(bp);
276 } while (error == 0 && uio->uio_resid > 0 && n != 0);
277 return (error);
278
279 default:
280 panic("spec_write type");
281 }
282 /* NOTREACHED */
283}
284
285/*
286 * Device ioctl operation.
287 */
288/* ARGSUSED */
289spec_ioctl(vp, com, data, fflag, cred)
290 struct vnode *vp;
291 int com;
292 caddr_t data;
293 int fflag;
294 struct ucred *cred;
295{
296 dev_t dev = vp->v_rdev;
297
298 switch (vp->v_type) {
299
300 case VCHR:
301 return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data, fflag));
302
303 case VBLK:
304 if (com == 0 && (int)data == B_TAPE)
305 if (bdevsw[major(dev)].d_flags & B_TAPE)
306 return (0);
307 else
308 return (1);
309 return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data, fflag));
310
311 default:
312 panic("spec_ioctl");
313 /* NOTREACHED */
314 }
315}
316
317/* ARGSUSED */
318spec_select(vp, which, fflags, cred)
319 struct vnode *vp;
320 int which, fflags;
321 struct ucred *cred;
322{
323 register dev_t dev;
324
325 switch (vp->v_type) {
326
327 default:
328 return (1); /* XXX */
329
330 case VCHR:
331 dev = vp->v_rdev;
332 return (*cdevsw[major(dev)].d_select)(dev, which);
333 }
334}
335
336/*
337 * Just call the device strategy routine
338 */
339spec_strategy(bp)
340 register struct buf *bp;
341{
342
343 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
344 return (0);
345}
346
347/*
348 * This is a noop, simply returning what one has been given.
349 */
350spec_bmap(vp, bn, vpp, bnp)
351 struct vnode *vp;
352 daddr_t bn;
353 struct vnode **vpp;
354 daddr_t *bnp;
355{
356
357 if (vpp != NULL)
358 *vpp = vp;
359 if (bnp != NULL)
360 *bnp = bn;
361 return (0);
362}
363
364/*
365 * At the moment we do not do any locking.
366 */
367/* ARGSUSED */
368spec_lock(vp)
369 struct vnode *vp;
370{
371
372 return (0);
373}
374
375/* ARGSUSED */
376spec_unlock(vp)
377 struct vnode *vp;
378{
379
380 return (0);
381}
382
383/*
384 * Device close routine
385 */
386/* ARGSUSED */
387spec_close(vp, flag, cred)
388 register struct vnode *vp;
389 int flag;
390 struct ucred *cred;
391{
392 dev_t dev = vp->v_rdev;
393 int (*cfunc)();
394 int error, mode;
395
396 switch (vp->v_type) {
397
398 case VCHR:
399 /*
400 * If the vnode is locked, then we are in the midst
401 * of forcably closing the device, otherwise we only
402 * close on last reference.
403 */
404 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
405 return (0);
406 cfunc = cdevsw[major(dev)].d_close;
407 mode = S_IFCHR;
408 break;
409
410 case VBLK:
411 /*
412 * On last close of a block device (that isn't mounted)
413 * we must invalidate any in core blocks, so that
414 * we can, for instance, change floppy disks.
415 */
416 vflushbuf(vp, 0);
417 if (vinvalbuf(vp, 1))
418 return (0);
419 /*
420 * We do not want to really close the device if it
421 * is still in use unless we are trying to close it
422 * forcibly. Since every use (buffer, vnode, swap, cmap)
423 * holds a reference to the vnode, and because we mark
424 * any other vnodes that alias this device, when the
425 * sum of the reference counts on all the aliased
426 * vnodes descends to one, we are on last close.
427 */
428 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
429 return (0);
430 cfunc = bdevsw[major(dev)].d_close;
431 mode = S_IFBLK;
432 break;
433
434 default:
435 panic("spec_close: not special");
436 }
437
438 if (setjmp(&u.u_qsave)) {
439 /*
440 * If device close routine is interrupted,
441 * must return so closef can clean up.
442 */
443 error = EINTR;
444 } else
445 error = (*cfunc)(dev, flag, mode);
446 return (error);
447}
448
449/*
450 * Print out the contents of a special device vnode.
451 */
452spec_print(vp)
453 struct vnode *vp;
454{
455
456 printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
457 minor(vp->v_rdev));
458}
459
460/*
461 * Special device failed operation
462 */
463spec_ebadf()
464{
465
466 return (EBADF);
467}
468
469/*
470 * Special device bad operation
471 */
472spec_badop()
473{
474
475 panic("spec_badop called");
476 /* NOTREACHED */
477}
478
479/*
480 * Special device null operation
481 */
482spec_nullop()
483{
484
485 return (0);
486}