ec_rxstart doesn't eists
[unix-history] / usr / src / sys / kern / kern_physio.c
CommitLineData
5dc2581e
KB
1/*-
2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * %sccs.include.proprietary.c%
da7c5cc6 6 *
4fa687fa 7 * @(#)kern_physio.c 7.23 (Berkeley) %G%
da7c5cc6 8 */
961945a8 9
251f56ba
KB
10#include <sys/param.h>
11#include <sys/systm.h>
12#include <sys/buf.h>
13#include <sys/conf.h>
14#include <sys/proc.h>
251f56ba
KB
15#include <sys/trace.h>
16#include <sys/map.h>
17#include <sys/vnode.h>
18#include <sys/specdev.h>
d301d150 19
edc76308 20#ifdef HPUXCOMPAT
251f56ba 21#include <sys/user.h>
edc76308
MK
22#endif
23
251f56ba
KB
24static void freeswbuf __P((struct buf *));
25static struct buf *getswbuf __P((int));
edc76308 26
663dbc72 27/*
d99a6abd
KM
28 * This routine does device I/O for a user process.
29 *
4fa687fa 30 * If the user has the proper access privileges, the process is
663dbc72 31 * marked 'delayed unlock' and the pages involved in the I/O are
d99a6abd 32 * faulted and locked. After the completion of the I/O, the pages
663dbc72
BJ
33 * are unlocked.
34 */
d6d7360b
BJ
35physio(strat, bp, dev, rw, mincnt, uio)
36 int (*strat)();
37 register struct buf *bp;
38 dev_t dev;
39 int rw;
c5648f55 40 u_int (*mincnt)();
d6d7360b 41 struct uio *uio;
663dbc72 42{
a196746e 43 register struct iovec *iov;
58c3cad7 44 register int requested, done;
8429d022 45 register struct proc *p = curproc;
663dbc72 46 char *a;
c5648f55 47 int s, allocbuf = 0, error = 0;
ec67a3ce
MK
48#ifdef SECSIZE
49 int bsize;
50 struct partinfo dpart;
51#endif SECSIZE
663dbc72 52
ec67a3ce
MK
53#ifdef SECSIZE
54 if ((unsigned)major(dev) < nchrdev &&
55 (*cdevsw[major(dev)].d_ioctl)(dev, DIOCGPART, (caddr_t)&dpart,
56 FREAD) == 0)
57 bsize = dpart.disklab->d_secsize;
58 else
59 bsize = DEV_BSIZE;
60#endif SECSIZE
61 for (;;) {
62 if (uio->uio_iovcnt == 0)
63 return (0);
64 iov = uio->uio_iov;
65 if (useracc(iov->iov_base, (u_int)iov->iov_len,
66 rw==B_READ? B_WRITE : B_READ) == NULL)
67 return (EFAULT);
68 s = splbio();
69 while (bp->b_flags&B_BUSY) {
70 bp->b_flags |= B_WANTED;
71 sleep((caddr_t)bp, PRIBIO+1);
72 }
c5648f55
KB
73 if (!allocbuf) { /* only if sharing caller's buffer */
74 s = splbio();
75 while (bp->b_flags&B_BUSY) {
76 bp->b_flags |= B_WANTED;
77 sleep((caddr_t)bp, PRIBIO+1);
78 }
79 splx(s);
80 }
ec67a3ce
MK
81 bp->b_error = 0;
82 bp->b_proc = u.u_procp;
83#ifdef SECSIZE
84 bp->b_blksize = bsize;
85#endif SECSIZE
86 bp->b_un.b_addr = iov->iov_base;
87 while (iov->iov_len > 0) {
88 bp->b_flags = B_BUSY | B_PHYS | rw;
89 bp->b_dev = dev;
90#ifdef SECSIZE
91 bp->b_blkno = uio->uio_offset / bsize;
92#else SECSIZE
93 bp->b_blkno = btodb(uio->uio_offset);
94#endif SECSIZE
95 bp->b_bcount = iov->iov_len;
96 (*mincnt)(bp);
97 c = bp->b_bcount;
98 u.u_procp->p_flag |= SPHYSIO;
99 vslock(a = bp->b_un.b_addr, c);
100 physstrat(bp, strat, PRIBIO);
101 (void) splbio();
102 vsunlock(a, c, rw);
103 u.u_procp->p_flag &= ~SPHYSIO;
104 if (bp->b_flags&B_WANTED)
105 wakeup((caddr_t)bp);
106 splx(s);
107 c -= bp->b_resid;
108 bp->b_un.b_addr += c;
109 iov->iov_len -= c;
110 uio->uio_resid -= c;
111 uio->uio_offset += c;
112 /* temp kludge for tape drives */
113 if (bp->b_resid || (bp->b_flags&B_ERROR))
114 break;
115 }
116 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS);
117 error = geterror(bp);
ec67a3ce
MK
118 if (bp->b_resid || error)
119 return (error);
120 uio->uio_iov++;
121 uio->uio_iovcnt--;
663dbc72 122 }
cd682858
KM
123#if defined(hp300)
124 DCIU();
125#endif
c5648f55
KB
126 if (allocbuf)
127 freeswbuf(bp);
128 return (error);
663dbc72
BJ
129}
130
d99a6abd
KM
131/*
132 * Calculate the maximum size of I/O request that can be requested
133 * in a single operation. This limit is necessary to prevent a single
134 * process from being able to lock more than a fixed amount of memory
135 * in the kernel.
136 */
c5648f55 137u_int
663dbc72 138minphys(bp)
d6d7360b 139 struct buf *bp;
663dbc72 140{
35a494b8
SL
141 if (bp->b_bcount > MAXPHYS)
142 bp->b_bcount = MAXPHYS;
663dbc72 143}
c5648f55 144
251f56ba 145static struct buf *
c5648f55
KB
146getswbuf(prio)
147 int prio;
148{
149 int s;
150 struct buf *bp;
151
152 s = splbio();
153 while (bswlist.av_forw == NULL) {
154 bswlist.b_flags |= B_WANTED;
155 sleep((caddr_t)&bswlist, prio);
156 }
157 bp = bswlist.av_forw;
158 bswlist.av_forw = bp->av_forw;
159 splx(s);
160 return (bp);
161}
162
251f56ba 163static void
c5648f55
KB
164freeswbuf(bp)
165 struct buf *bp;
166{
167 int s;
168
169 s = splbio();
170 bp->av_forw = bswlist.av_forw;
171 bswlist.av_forw = bp;
343a57bd
KM
172 if (bp->b_vp)
173 brelvp(bp);
c5648f55
KB
174 if (bswlist.b_flags & B_WANTED) {
175 bswlist.b_flags &= ~B_WANTED;
176 wakeup((caddr_t)&bswlist);
8429d022 177 wakeup((caddr_t)pageproc);
c5648f55
KB
178 }
179 splx(s);
180}
181
d99a6abd
KM
182/*
183 * Do a read on a device for a user process.
184 */
c5648f55
KB
185rawread(dev, uio)
186 dev_t dev;
187 struct uio *uio;
188{
189 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
190 dev, B_READ, minphys, uio));
191}
192
d99a6abd
KM
193/*
194 * Do a write on a device for a user process.
195 */
c5648f55
KB
196rawwrite(dev, uio)
197 dev_t dev;
198 struct uio *uio;
199{
200 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
201 dev, B_WRITE, minphys, uio));
202}