* Copyright (c) 1989 The Regents of the University of California.
* %sccs.include.redist.c%
* @(#)spec_vnops.c 7.33 (Berkeley) %G%
/* symbolic sleep message strings for devices */
char devopn
[] = "devopn";
char devwait
[] = "devwait";
char devout
[] = "devout";
char devioc
[] = "devioc";
char devcls
[] = "devcls";
struct vnodeops spec_vnodeops
= {
spec_lookup
, /* lookup */
spec_ebadf
, /* getattr */
spec_ebadf
, /* setattr */
spec_select
, /* select */
spec_badop
, /* symlink */
spec_badop
, /* readdir */
spec_badop
, /* readlink */
spec_badop
, /* abortop */
spec_unlock
, /* unlock */
spec_strategy
, /* strategy */
spec_advlock
, /* advlock */
* Trivial lookup routine that always fails.
* Open called to allow handler
* of special files to initialize and
* validate before actual IO.
spec_open(vp
, mode
, cred
)
register struct vnode
*vp
;
struct proc
*p
= curproc
; /* XXX */
dev_t dev
= (dev_t
)vp
->v_rdev
;
register int maj
= major(dev
);
if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_NODEV
))
if ((u_int
)maj
>= nchrdev
)
return ((*cdevsw
[maj
].d_open
)(dev
, mode
, S_IFCHR
, p
));
if ((u_int
)maj
>= nblkdev
)
if (error
= mountedon(vp
))
return ((*bdevsw
[maj
].d_open
)(dev
, mode
, S_IFBLK
, p
));
spec_read(vp
, uio
, ioflag
, cred
)
register struct vnode
*vp
;
register struct uio
*uio
;
struct proc
*p
= curproc
; /* XXX */
if (uio
->uio_rw
!= UIO_READ
)
* Negative offsets allowed only for /dev/kmem
if (uio
->uio_offset
< 0 && major(vp
->v_rdev
) != mem_no
)
error
= (*cdevsw
[major(vp
->v_rdev
)].d_read
)
(vp
->v_rdev
, uio
, ioflag
, p
);
if ((*bdevsw
[major(vp
->v_rdev
)].d_ioctl
)(vp
->v_rdev
, DIOCGPART
,
(caddr_t
)&dpart
, FREAD
, p
) == 0) {
if (dpart
.part
->p_fstype
== FS_BSDFFS
&&
dpart
.part
->p_frag
!= 0 && dpart
.part
->p_fsize
!= 0)
bsize
= dpart
.part
->p_frag
*
bscale
= bsize
/ DEV_BSIZE
;
bn
= (uio
->uio_offset
/ DEV_BSIZE
) &~ (bscale
- 1);
on
= uio
->uio_offset
% bsize
;
n
= MIN((unsigned)(bsize
- on
), uio
->uio_resid
);
if (vp
->v_lastr
+ bscale
== bn
)
error
= breada(vp
, bn
, (int)bsize
, bn
+ bscale
,
(int)bsize
, NOCRED
, &bp
);
error
= bread(vp
, bn
, (int)bsize
, NOCRED
, &bp
);
n
= MIN(n
, bsize
- bp
->b_resid
);
error
= uiomove(bp
->b_un
.b_addr
+ on
, n
, uio
);
} while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
spec_write(vp
, uio
, ioflag
, cred
)
register struct vnode
*vp
;
register struct uio
*uio
;
struct proc
*p
= curproc
; /* XXX */
if (uio
->uio_rw
!= UIO_WRITE
)
panic("spec_write mode");
* Negative offsets allowed only for /dev/kmem
if (uio
->uio_offset
< 0 && major(vp
->v_rdev
) != mem_no
)
error
= (*cdevsw
[major(vp
->v_rdev
)].d_write
)
(vp
->v_rdev
, uio
, ioflag
, p
);
if ((*bdevsw
[major(vp
->v_rdev
)].d_ioctl
)(vp
->v_rdev
, DIOCGPART
,
(caddr_t
)&dpart
, FREAD
, p
) == 0) {
if (dpart
.part
->p_fstype
== FS_BSDFFS
&&
dpart
.part
->p_frag
!= 0 && dpart
.part
->p_fsize
!= 0)
bsize
= dpart
.part
->p_frag
*
blkmask
= (bsize
/ DEV_BSIZE
) - 1;
bn
= (uio
->uio_offset
/ DEV_BSIZE
) &~ blkmask
;
on
= uio
->uio_offset
% bsize
;
n
= MIN((unsigned)(bsize
- on
), uio
->uio_resid
);
bp
= getblk(vp
, bn
, bsize
);
error
= bread(vp
, bn
, bsize
, NOCRED
, &bp
);
n
= MIN(n
, bsize
- bp
->b_resid
);
error
= uiomove(bp
->b_un
.b_addr
+ on
, n
, uio
);
} while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
panic("spec_write type");
* Device ioctl operation.
spec_ioctl(vp
, com
, data
, fflag
, cred
)
struct proc
*p
= curproc
; /* XXX */
return ((*cdevsw
[major(dev
)].d_ioctl
)(dev
, com
, data
,
if (com
== 0 && (int)data
== B_TAPE
)
if (bdevsw
[major(dev
)].d_flags
& B_TAPE
)
return ((*bdevsw
[major(dev
)].d_ioctl
)(dev
, com
, data
,
spec_select(vp
, which
, fflags
, cred
)
struct proc
*p
= curproc
; /* XXX */
return (*cdevsw
[major(dev
)].d_select
)(dev
, which
, p
);
* Just call the device strategy routine
(*bdevsw
[major(bp
->b_dev
)].d_strategy
)(bp
);
* This is a noop, simply returning what one has been given.
spec_bmap(vp
, bn
, vpp
, bnp
)
* At the moment we do not do any locking.
spec_close(vp
, flag
, cred
)
register struct vnode
*vp
;
struct proc
*p
= curproc
; /* XXX */
int (*cfunc
) __P((dev_t
, int, int, struct proc
*));
* If the vnode is locked, then we are in the midst
* of forcably closing the device, otherwise we only
* close on last reference.
if (vcount(vp
) > 1 && (vp
->v_flag
& VXLOCK
) == 0)
cfunc
= cdevsw
[major(dev
)].d_close
;
* On last close of a block device (that isn't mounted)
* we must invalidate any in core blocks, so that
* we can, for instance, change floppy disks.
* We do not want to really close the device if it
* is still in use unless we are trying to close it
* forcibly. Since every use (buffer, vnode, swap, cmap)
* holds a reference to the vnode, and because we mark
* any other vnodes that alias this device, when the
* sum of the reference counts on all the aliased
* vnodes descends to one, we are on last close.
if (vcount(vp
) > 1 && (vp
->v_flag
& VXLOCK
) == 0)
cfunc
= bdevsw
[major(dev
)].d_close
;
panic("spec_close: not special");
return ((*cfunc
)(dev
, flag
, mode
, p
));
* Print out the contents of a special device vnode.
printf("tag VT_NON, dev %d, %d\n", major(vp
->v_rdev
),
* Special device advisory byte-level locks.
spec_advlock(vp
, id
, op
, fl
, flags
)
* Special device failed operation
* Special device bad operation
panic("spec_badop called");