LFS version 1; move ufs down a level
[unix-history] / usr / src / sys / kern / kern_physio.c
CommitLineData
5dc2581e
KB
1/*-
2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * %sccs.include.proprietary.c%
da7c5cc6 6 *
d341bf94 7 * @(#)kern_physio.c 7.21 (Berkeley) %G%
da7c5cc6 8 */
961945a8 9
94368568
JB
10#include "param.h"
11#include "systm.h"
94368568
JB
12#include "buf.h"
13#include "conf.h"
14#include "proc.h"
15#include "seg.h"
94368568
JB
16#include "trace.h"
17#include "map.h"
c4ec2128 18#include "vnode.h"
0f93ba7b 19#include "specdev.h"
d301d150 20
edc76308
MK
21#ifdef HPUXCOMPAT
22#include "user.h"
23#endif
24
25static struct buf *getswbuf();
26static freeswbuf();
27
663dbc72 28/*
d99a6abd
KM
29 * This routine does device I/O for a user process.
30 *
663dbc72
BJ
31 * If the user has the proper access privilidges, the process is
32 * marked 'delayed unlock' and the pages involved in the I/O are
d99a6abd 33 * faulted and locked. After the completion of the I/O, the pages
663dbc72
BJ
34 * are unlocked.
35 */
d6d7360b
BJ
36physio(strat, bp, dev, rw, mincnt, uio)
37 int (*strat)();
38 register struct buf *bp;
39 dev_t dev;
40 int rw;
c5648f55 41 u_int (*mincnt)();
d6d7360b 42 struct uio *uio;
663dbc72 43{
a196746e 44 register struct iovec *iov;
58c3cad7 45 register int requested, done;
8429d022 46 register struct proc *p = curproc;
663dbc72 47 char *a;
c5648f55 48 int s, allocbuf = 0, error = 0;
ec67a3ce
MK
49#ifdef SECSIZE
50 int bsize;
51 struct partinfo dpart;
52#endif SECSIZE
663dbc72 53
ec67a3ce
MK
54#ifdef SECSIZE
55 if ((unsigned)major(dev) < nchrdev &&
56 (*cdevsw[major(dev)].d_ioctl)(dev, DIOCGPART, (caddr_t)&dpart,
57 FREAD) == 0)
58 bsize = dpart.disklab->d_secsize;
59 else
60 bsize = DEV_BSIZE;
61#endif SECSIZE
62 for (;;) {
63 if (uio->uio_iovcnt == 0)
64 return (0);
65 iov = uio->uio_iov;
66 if (useracc(iov->iov_base, (u_int)iov->iov_len,
67 rw==B_READ? B_WRITE : B_READ) == NULL)
68 return (EFAULT);
69 s = splbio();
70 while (bp->b_flags&B_BUSY) {
71 bp->b_flags |= B_WANTED;
72 sleep((caddr_t)bp, PRIBIO+1);
73 }
c5648f55
KB
74 if (!allocbuf) { /* only if sharing caller's buffer */
75 s = splbio();
76 while (bp->b_flags&B_BUSY) {
77 bp->b_flags |= B_WANTED;
78 sleep((caddr_t)bp, PRIBIO+1);
79 }
80 splx(s);
81 }
ec67a3ce
MK
82 bp->b_error = 0;
83 bp->b_proc = u.u_procp;
84#ifdef SECSIZE
85 bp->b_blksize = bsize;
86#endif SECSIZE
87 bp->b_un.b_addr = iov->iov_base;
88 while (iov->iov_len > 0) {
89 bp->b_flags = B_BUSY | B_PHYS | rw;
90 bp->b_dev = dev;
91#ifdef SECSIZE
92 bp->b_blkno = uio->uio_offset / bsize;
93#else SECSIZE
94 bp->b_blkno = btodb(uio->uio_offset);
95#endif SECSIZE
96 bp->b_bcount = iov->iov_len;
97 (*mincnt)(bp);
98 c = bp->b_bcount;
99 u.u_procp->p_flag |= SPHYSIO;
100 vslock(a = bp->b_un.b_addr, c);
101 physstrat(bp, strat, PRIBIO);
102 (void) splbio();
103 vsunlock(a, c, rw);
104 u.u_procp->p_flag &= ~SPHYSIO;
105 if (bp->b_flags&B_WANTED)
106 wakeup((caddr_t)bp);
107 splx(s);
108 c -= bp->b_resid;
109 bp->b_un.b_addr += c;
110 iov->iov_len -= c;
111 uio->uio_resid -= c;
112 uio->uio_offset += c;
113 /* temp kludge for tape drives */
114 if (bp->b_resid || (bp->b_flags&B_ERROR))
115 break;
116 }
117 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS);
118 error = geterror(bp);
ec67a3ce
MK
119 if (bp->b_resid || error)
120 return (error);
121 uio->uio_iov++;
122 uio->uio_iovcnt--;
663dbc72 123 }
cd682858
KM
124#if defined(hp300)
125 DCIU();
126#endif
c5648f55
KB
127 if (allocbuf)
128 freeswbuf(bp);
129 return (error);
663dbc72
BJ
130}
131
d99a6abd
KM
132/*
133 * Calculate the maximum size of I/O request that can be requested
134 * in a single operation. This limit is necessary to prevent a single
135 * process from being able to lock more than a fixed amount of memory
136 * in the kernel.
137 */
c5648f55 138u_int
663dbc72 139minphys(bp)
d6d7360b 140 struct buf *bp;
663dbc72 141{
35a494b8
SL
142 if (bp->b_bcount > MAXPHYS)
143 bp->b_bcount = MAXPHYS;
663dbc72 144}
c5648f55
KB
145
146static
147struct buf *
148getswbuf(prio)
149 int prio;
150{
151 int s;
152 struct buf *bp;
153
154 s = splbio();
155 while (bswlist.av_forw == NULL) {
156 bswlist.b_flags |= B_WANTED;
157 sleep((caddr_t)&bswlist, prio);
158 }
159 bp = bswlist.av_forw;
160 bswlist.av_forw = bp->av_forw;
161 splx(s);
162 return (bp);
163}
164
165static
166freeswbuf(bp)
167 struct buf *bp;
168{
169 int s;
170
171 s = splbio();
172 bp->av_forw = bswlist.av_forw;
173 bswlist.av_forw = bp;
343a57bd
KM
174 if (bp->b_vp)
175 brelvp(bp);
c5648f55
KB
176 if (bswlist.b_flags & B_WANTED) {
177 bswlist.b_flags &= ~B_WANTED;
178 wakeup((caddr_t)&bswlist);
8429d022 179 wakeup((caddr_t)pageproc);
c5648f55
KB
180 }
181 splx(s);
182}
183
d99a6abd
KM
184/*
185 * Do a read on a device for a user process.
186 */
c5648f55
KB
187rawread(dev, uio)
188 dev_t dev;
189 struct uio *uio;
190{
191 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
192 dev, B_READ, minphys, uio));
193}
194
d99a6abd
KM
195/*
196 * Do a write on a device for a user process.
197 */
c5648f55
KB
198rawwrite(dev, uio)
199 dev_t dev;
200 struct uio *uio;
201{
202 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
203 dev, B_WRITE, minphys, uio));
204}