lint and interface cleanups
[unix-history] / usr / src / sys / vax / if / if_uba.c
CommitLineData
f1b2fa5b 1/* if_uba.c 4.3 81/11/29 */
0468e5fa
BJ
2
3#include "../h/param.h"
4#include "../h/systm.h"
5#include "../h/mbuf.h"
6#include "../h/map.h"
7#include "../h/pte.h"
8a13b737 8#include "../h/buf.h"
0468e5fa
BJ
9#include "../h/ubareg.h"
10#include "../h/ubavar.h"
11#include "../h/cmap.h"
12#include "../h/mtpr.h"
8a13b737
BJ
13#include "../h/vmmac.h"
14#include "../net/in.h"
15#include "../net/in_systm.h"
16#include "../net/if.h"
0468e5fa
BJ
17#include "../net/if_uba.h"
18
19/*
20 * Routines supporting UNIBUS network interfaces.
21 *
22 * TODO:
23 * Support interfaces using only one BDP statically.
24 */
25
26/*
27 * Init UNIBUS for interface on uban whose headers of size hlen are to
28 * end on a page boundary. We allocate a UNIBUS map register for the page
29 * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
30 * doing this twice: once for reading and once for writing. We also
31 * allocate page frames in the mbuffer pool for these pages.
32 */
33if_ubainit(ifu, uban, hlen, nmr)
34 register struct ifuba *ifu;
35 int uban, hlen, nmr;
36{
8a13b737
BJ
37 register caddr_t cp = (caddr_t)m_pgalloc(2 * (nmr + 1));
38 int i;
0468e5fa 39
8a13b737 40COUNT(IF_UBAINIT);
0468e5fa
BJ
41 if (cp == 0)
42 return (0);
8a13b737
BJ
43 ifu->ifu_uban = uban;
44 ifu->ifu_uba = uba_hd[uban].uh_uba;
45 ifu->ifu_r.ifrw_addr = cp + NBPG - hlen;
46 ifu->ifu_w.ifrw_addr = ifu->ifu_r.ifrw_addr + (nmr + 1) * NBPG;
47 if (if_ubaalloc(ifu, &ifu->ifu_r) == 0)
0468e5fa 48 goto bad;
8a13b737 49 if (if_ubaalloc(ifu, &ifu->ifu_w) == 0)
0468e5fa
BJ
50 goto bad2;
51 for (i = 0; i < IF_NUBAMR; i++)
8a13b737
BJ
52 ifu->ifu_wmap[i] = ifu->ifu_w.ifrw_mr[i+1];
53 ifu->ifu_xswapd = 0;
0468e5fa
BJ
54 return (1);
55bad2:
8a13b737 56 ubarelse(ifu->ifu_uban, &ifu->ifu_r.ifrw_info);
0468e5fa
BJ
57bad:
58 m_pgfree(cp, 2 * (nmr + 1));
59 return (0);
60}
61
62/*
63 * Setup either a ifrw structure by allocating UNIBUS map registers,
64 * a buffered data path, and initializing the fields of the ifrw structure
65 * to minimize run-time overhead.
66 */
67static
68if_ubaalloc(ifu, ifrw)
69 struct ifuba *ifu;
70 register struct ifrw *ifrw;
71{
72 register int info;
73
8a13b737 74COUNT(IF_UBAALLOC);
0468e5fa 75 info =
8a13b737 76 uballoc(ifu->ifu_uban, ifrw->ifrw_addr, IF_NUBAMR*NBPG + ifu->ifu_hlen,
0468e5fa
BJ
77 UBA_NEED16|UBA_NEEDBDP);
78 if (info == 0)
8a13b737 79 return (0);
0468e5fa
BJ
80 ifrw->ifrw_info = info;
81 ifrw->ifrw_bdp = UBAI_BDP(info);
8a13b737
BJ
82 ifrw->ifrw_proto = UBAMR_MRV | (UBAI_MR(info) << UBAMR_DPSHIFT);
83 ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1];
84 return (1);
0468e5fa
BJ
85}
86
87/*
f1b2fa5b
BJ
88 * Pull read data off a interface.
89 * Len is length of data, with local net header stripped.
90 * Off is non-zero if a trailer protocol was used, and
91 * gives the offset of the trailer information.
92 * We copy the trailer information and then all the normal
93 * data into mbufs. When full cluster sized units are present
94 * on the interface on cluster boundaries we can get them more
95 * easily by remapping, and take advantage of this here.
0468e5fa
BJ
96 */
97struct mbuf *
f1b2fa5b 98if_rubaget(ifu, totlen, off0)
0468e5fa 99 register struct ifuba *ifu;
f1b2fa5b 100 int totlen, off0;
0468e5fa
BJ
101{
102 register struct mbuf *m;
103 register caddr_t cp;
f1b2fa5b
BJ
104 struct mbuf **mp, *p, *top;
105 int len, off = off0;
0468e5fa 106
8a13b737 107COUNT(IF_RUBAGET);
0468e5fa 108
f1b2fa5b
BJ
109 top = 0;
110 mp = &top;
111 while (totlen > 0) {
0468e5fa
BJ
112 MGET(m, 0);
113 if (m == 0)
8a13b737 114 goto bad;
f1b2fa5b
BJ
115 if (off) {
116 len = totlen - off;
117 cp = ifu->ifu_r.ifrw_addr + ifu->ifu_hlen + off;
118 } else
119 len = totlen;
0468e5fa
BJ
120 if (len >= CLSIZE) {
121 struct pte *cpte, *ppte;
122 int i, x, *ip;
123
124 MCLGET(p, 1);
125 if (p == 0)
126 goto nopage;
127 m->m_len = CLSIZE;
128 m->m_off = (int)p - (int)m;
129 if ((int)cp & CLOFF)
130 goto copy;
131
132 /*
133 * Cluster size data on cluster size boundary.
134 * Input by remapping newly allocated pages to
135 * UNIBUS, and taking pages with data already
136 * in them.
137 *
138 * Cpte is the pte of the virtual memory which
139 * is mapped to the UNIBUS, and ppte is the pte
140 * for the fresh pages. We switch the memory
141 * copies of these pte's, to make the allocated
142 * virtual memory contain the data (using the old
143 * physical pages). We have to rewrite
144 * the UNIBUS map so that the newly allocated
145 * pages will be used for the next UNIBUS read,
146 * and invalidate the kernel translations
147 * for the virtual addresses of the pages
148 * we are flipping.
149 *
150 * The idea here is that this is supposed
151 * to take less time than copying the data.
152 */
153 cpte = &Mbmap[mtocl(cp)];
154 ppte = &Mbmap[mtocl(p)];
8a13b737 155 x = btop(cp - ifu->ifu_r.ifrw_addr);
0468e5fa
BJ
156 ip = (int *)&ifu->ifu_r.ifrw_mr[x+1];
157 for (i = 0; i < CLSIZE; i++) {
158 struct pte t;
159 t = *ppte; *ppte = *cpte; *cpte = t;
160 *ip++ =
8a13b737 161 cpte++->pg_pfnum|ifu->ifu_r.ifrw_proto;
0468e5fa 162 mtpr(TBIS, cp);
8a13b737 163 cp += NBPG;
0468e5fa 164 mtpr(TBIS, (caddr_t)p);
8a13b737 165 p += NBPG / sizeof (*p);
0468e5fa
BJ
166 }
167 goto nocopy;
168 }
169nopage:
170 m->m_len = MIN(MLEN, len);
171 m->m_off = MMINOFF;
172copy:
173 bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);
174 cp += m->m_len;
175nocopy:
f1b2fa5b
BJ
176 *mp = m;
177 mp = &m->m_next;
178 if (off) {
179 off += m->m_len;
180 if (off == totlen) {
181 cp = ifu->ifu_r.ifrw_addr + ifu->ifu_hlen;
182 off = 0;
183 totlen -= off0;
184 }
185 }
0468e5fa
BJ
186 }
187 return (top);
188bad:
189 m_freem(top);
190 return (0);
191}
192
193/*
194 * Map a chain of mbufs onto a network interface
195 * in preparation for an i/o operation.
196 * The argument chain of mbufs includes the local network
197 * header which is copied to be in the mapped, aligned
198 * i/o space.
199 */
200if_wubaput(ifu, m)
201 register struct ifuba *ifu;
202 register struct mbuf *m;
203{
204 register struct mbuf *mp;
205 register caddr_t cp, dp;
206 register int i;
207 int xswapd = ifu->ifu_xswapd;
208 int x;
209
8a13b737 210COUNT(IF_WUBAPUT);
0468e5fa
BJ
211 ifu->ifu_xswapd = 0;
212 cp = ifu->ifu_w.ifrw_addr;
213 while (m) {
214 dp = mtod(m, char *);
215 if (claligned(cp) && claligned(dp)) {
216 struct pte *pte; int *ip;
217 pte = &Mbmap[mtocl(dp)];
218 x = btop(cp - ifu->ifu_w.ifrw_addr);
8a13b737 219 ip = (int *)&ifu->ifu_w.ifrw_mr[x + 1];
0468e5fa
BJ
220 for (i = 0; i < CLSIZE; i++)
221 *ip++ =
222 ifu->ifu_w.ifrw_proto | pte++->pg_pfnum;
223 ifu->ifu_xswapd |= 1 << (x>>CLSHIFT);
224 } else
225 bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
226 cp += m->m_len;
227 MFREE(m, mp); /* XXX too soon! */
228 m = mp;
229 }
230 xswapd &= ~ifu->ifu_xswapd;
231 if (xswapd)
232 while (i = ffs(xswapd)) {
233 i--;
234 xswapd &= ~(1<<i);
235 i <<= CLSHIFT;
236 for (x = 0; x < CLSIZE; x++) {
8a13b737 237 ifu->ifu_w.ifrw_mr[i] = ifu->ifu_wmap[i];
0468e5fa
BJ
238 i++;
239 }
240 }
f1b2fa5b 241 return (cp - ifu->ifu_w.ifrw_addr);
0468e5fa 242}