add DESTDIR
[unix-history] / usr / src / sys / vax / uba / uba.c
CommitLineData
39d536e6 1/* uba.c 4.54 82/10/21 */
c14fd247
BJ
2
3#include "../h/param.h"
3f3a34c3 4#include "../h/systm.h"
c14fd247
BJ
5#include "../h/map.h"
6#include "../h/pte.h"
c14fd247 7#include "../h/buf.h"
b7333467 8#include "../h/vm.h"
c14fd247
BJ
9#include "../h/dir.h"
10#include "../h/user.h"
11#include "../h/proc.h"
2e74ef16 12#include "../h/conf.h"
b7333467
BJ
13#include "../h/dk.h"
14
896962b1
BJ
15#include "../vax/cpu.h"
16#include "../vax/mtpr.h"
17#include "../vax/nexus.h"
18#include "../vaxuba/ubareg.h"
19#include "../vaxuba/ubavar.h"
20
5af3f3f7
BJ
21#if VAX780
22char ubasr_bits[] = UBASR_BITS;
23#endif
24
b7333467
BJ
25/*
26 * Do transfer on device argument. The controller
27 * and uba involved are implied by the device.
28 * We queue for resource wait in the uba code if necessary.
29 * We return 1 if the transfer was started, 0 if it was not.
30 * If you call this routine with the head of the queue for a
31 * UBA, it will automatically remove the device from the UBA
32 * queue before it returns. If some other device is given
33 * as argument, it will be added to the request queue if the
34 * request cannot be started immediately. This means that
35 * passing a device which is on the queue but not at the head
36 * of the request queue is likely to be a disaster.
37 */
38ubago(ui)
5ab42896 39 register struct uba_device *ui;
b7333467 40{
5ab42896 41 register struct uba_ctlr *um = ui->ui_mi;
b7333467
BJ
42 register struct uba_hd *uh;
43 register int s, unit;
44
45 uh = &uba_hd[um->um_ubanum];
46 s = spl6();
28ca05a9 47 if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
0801d37f 48 goto rwait;
b7333467
BJ
49 um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
50 UBA_NEEDBDP|UBA_CANTWAIT);
0801d37f
BJ
51 if (um->um_ubinfo == 0)
52 goto rwait;
0801d37f 53 uh->uh_users++;
28ca05a9 54 if (um->um_driver->ud_xclu)
0801d37f 55 uh->uh_xclu = 1;
b7333467
BJ
56 splx(s);
57 if (ui->ui_dk >= 0) {
58 unit = ui->ui_dk;
59 dk_busy |= 1<<unit;
cc7ff771
BJ
60 dk_xfer[unit]++;
61 dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
b7333467
BJ
62 }
63 if (uh->uh_actf == ui)
64 uh->uh_actf = ui->ui_forw;
65 (*um->um_driver->ud_dgo)(um);
b7333467 66 return (1);
0801d37f
BJ
67rwait:
68 if (uh->uh_actf != ui) {
69 ui->ui_forw = NULL;
70 if (uh->uh_actf == NULL)
71 uh->uh_actf = ui;
72 else
73 uh->uh_actl->ui_forw = ui;
74 uh->uh_actl = ui;
75 }
76 splx(s);
77 return (0);
78}
79
80ubadone(um)
5ab42896 81 register struct uba_ctlr *um;
0801d37f
BJ
82{
83 register struct uba_hd *uh = &uba_hd[um->um_ubanum];
84
28ca05a9 85 if (um->um_driver->ud_xclu)
0801d37f
BJ
86 uh->uh_xclu = 0;
87 uh->uh_users--;
0801d37f 88 ubarelse(um->um_ubanum, &um->um_ubinfo);
b7333467 89}
c14fd247
BJ
90
91/*
3f3a34c3
BJ
92 * Allocate and setup UBA map registers, and bdp's
93 * Flags says whether bdp is needed, whether the caller can't
94 * wait (e.g. if the caller is at interrupt level).
c14fd247 95 *
b7333467 96 * Return value:
c14fd247
BJ
97 * Bits 0-8 Byte offset
98 * Bits 9-17 Start map reg. no.
99 * Bits 18-27 No. mapping reg's
100 * Bits 28-31 BDP no.
101 */
3f3a34c3
BJ
102ubasetup(uban, bp, flags)
103 struct buf *bp;
c14fd247 104{
3f3a34c3 105 register struct uba_hd *uh = &uba_hd[uban];
bc3a8383 106 register int temp;
c14fd247
BJ
107 int npf, reg, bdp;
108 unsigned v;
109 register struct pte *pte, *io;
110 struct proc *rp;
111 int a, o, ubinfo;
112
10f66600
SL
113#if VAX730
114 if (cpu == VAX_730)
a3812a04
BJ
115 flags &= ~UBA_NEEDBDP;
116#endif
c14fd247
BJ
117 v = btop(bp->b_un.b_addr);
118 o = (int)bp->b_un.b_addr & PGOFSET;
119 npf = btoc(bp->b_bcount + o) + 1;
120 a = spl6();
c84ff1f9 121 while ((reg = rmalloc(uh->uh_map, npf)) == 0) {
dd56673b
BJ
122 if (flags & UBA_CANTWAIT) {
123 splx(a);
3f3a34c3 124 return (0);
dd56673b 125 }
3f3a34c3
BJ
126 uh->uh_mrwant++;
127 sleep((caddr_t)uh->uh_map, PSWP);
c14fd247 128 }
c14fd247 129 bdp = 0;
3f3a34c3
BJ
130 if (flags & UBA_NEEDBDP) {
131 while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
132 if (flags & UBA_CANTWAIT) {
c84ff1f9 133 rmfree(uh->uh_map, npf, reg);
dd56673b 134 splx(a);
3f3a34c3
BJ
135 return (0);
136 }
137 uh->uh_bdpwant++;
138 sleep((caddr_t)uh->uh_map, PSWP);
c14fd247 139 }
658110d5 140 uh->uh_bdpfree &= ~(1 << (bdp-1));
64614526
BJ
141 } else if (flags & UBA_HAVEBDP)
142 bdp = (flags >> 28) & 0xf;
c14fd247 143 splx(a);
658110d5 144 reg--;
c14fd247 145 ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
5ab42896 146 temp = (bdp << 21) | UBAMR_MRV;
c14fd247 147 if (bdp && (o & 01))
5ab42896 148 temp |= UBAMR_BO;
309cfbf4
BJ
149 rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
150 if ((bp->b_flags & B_PHYS) == 0)
da1392b6 151 pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
309cfbf4
BJ
152 else if (bp->b_flags & B_UAREA)
153 pte = &rp->p_addr[v];
154 else if (bp->b_flags & B_PAGET)
155 pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
156 else
157 pte = vtopte(rp, v);
158 io = &uh->uh_uba->uba_map[reg];
159 while (--npf != 0) {
160 if (pte->pg_pfnum == 0)
161 panic("uba zero uentry");
162 *(int *)io++ = pte++->pg_pfnum | temp;
c14fd247
BJ
163 }
164 *(int *)io++ = 0;
165 return (ubinfo);
166}
167
c14fd247 168/*
b7333467 169 * Non buffer setup interface... set up a buffer and call ubasetup.
c14fd247 170 */
3f3a34c3 171uballoc(uban, addr, bcnt, flags)
a0eab615 172 int uban;
c14fd247 173 caddr_t addr;
a0eab615 174 int bcnt, flags;
c14fd247 175{
89e0f717 176 struct buf ubabuf;
c14fd247 177
c14fd247
BJ
178 ubabuf.b_un.b_addr = addr;
179 ubabuf.b_flags = B_BUSY;
180 ubabuf.b_bcount = bcnt;
89e0f717 181 /* that's all the fields ubasetup() needs */
3f3a34c3 182 return (ubasetup(uban, &ubabuf, flags));
c14fd247
BJ
183}
184
b28deaf8 185/*
b7333467
BJ
186 * Release resources on uba uban, and then unblock resource waiters.
187 * The map register parameter is by value since we need to block
188 * against uba resets on 11/780's.
b28deaf8 189 */
3f3a34c3 190ubarelse(uban, amr)
b28deaf8 191 int *amr;
c14fd247 192{
3f3a34c3 193 register struct uba_hd *uh = &uba_hd[uban];
b7333467 194 register int bdp, reg, npf, s;
b28deaf8 195 int mr;
c14fd247 196
b7333467
BJ
197 /*
198 * Carefully see if we should release the space, since
199 * it may be released asynchronously at uba reset time.
200 */
201 s = spl6();
b28deaf8
BJ
202 mr = *amr;
203 if (mr == 0) {
b7333467
BJ
204 /*
205 * A ubareset() occurred before we got around
206 * to releasing the space... no need to bother.
207 */
208 splx(s);
b28deaf8
BJ
209 return;
210 }
88149598 211 *amr = 0;
b7333467 212 splx(s); /* let interrupts in, we're safe for a while */
c14fd247
BJ
213 bdp = (mr >> 28) & 0x0f;
214 if (bdp) {
27bf6b55 215 switch (cpu) {
5aa9d5ea
RE
216#if VAX780
217 case VAX_780:
5ab42896 218 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
5aa9d5ea
RE
219 break;
220#endif
221#if VAX750
222 case VAX_750:
5ab42896
BJ
223 uh->uh_uba->uba_dpr[bdp] |=
224 UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
5aa9d5ea
RE
225 break;
226#endif
227 }
b7333467 228 uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */
3f3a34c3
BJ
229 if (uh->uh_bdpwant) {
230 uh->uh_bdpwant = 0;
231 wakeup((caddr_t)uh->uh_map);
c14fd247
BJ
232 }
233 }
b7333467
BJ
234 /*
235 * Put back the registers in the resource map.
236 * The map code must not be reentered, so we do this
237 * at high ipl.
238 */
c14fd247
BJ
239 npf = (mr >> 18) & 0x3ff;
240 reg = ((mr >> 9) & 0x1ff) + 1;
b7333467 241 s = spl6();
c84ff1f9 242 rmfree(uh->uh_map, npf, reg);
b7333467
BJ
243 splx(s);
244
245 /*
246 * Wakeup sleepers for map registers,
247 * and also, if there are processes blocked in dgo(),
248 * give them a chance at the UNIBUS.
249 */
3f3a34c3
BJ
250 if (uh->uh_mrwant) {
251 uh->uh_mrwant = 0;
252 wakeup((caddr_t)uh->uh_map);
c14fd247 253 }
b7333467
BJ
254 while (uh->uh_actf && ubago(uh->uh_actf))
255 ;
c14fd247
BJ
256}
257
27bf6b55 258ubapurge(um)
5ab42896 259 register struct uba_ctlr *um;
27bf6b55
BJ
260{
261 register struct uba_hd *uh = um->um_hd;
262 register int bdp = (um->um_ubinfo >> 28) & 0x0f;
263
264 switch (cpu) {
265#if VAX780
266 case VAX_780:
5ab42896 267 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
27bf6b55
BJ
268 break;
269#endif
270#if VAX750
271 case VAX_750:
5ab42896 272 uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
27bf6b55
BJ
273 break;
274#endif
275 }
276}
277
9305c2b9
BJ
278ubainitmaps(uhp)
279 register struct uba_hd *uhp;
280{
281
282 rminit(uhp->uh_map, NUBMREG, 1, "uba", UAMSIZ);
283 switch (cpu) {
284#if VAX780
285 case VAX_780:
286 uhp->uh_bdpfree = (1<<NBDP780) - 1;
287 break;
288#endif
289#if VAX750
290 case VAX_750:
291 uhp->uh_bdpfree = (1<<NBDP750) - 1;
292 break;
293#endif
10f66600
SL
294#if VAX730
295 case VAX_730:
9305c2b9
BJ
296 break;
297#endif
298 }
299}
300
b7333467
BJ
301/*
302 * Generate a reset on uba number uban. Then
303 * call each device in the character device table,
304 * giving it a chance to clean up so as to be able to continue.
305 */
3f3a34c3 306ubareset(uban)
b7333467 307 int uban;
2e74ef16 308{
2e74ef16 309 register struct cdevsw *cdp;
a3cb8f60 310 register struct uba_hd *uh = &uba_hd[uban];
49c84d3f 311 int s;
2e74ef16 312
4ea0bfc4 313 s = spl6();
a3cb8f60
BJ
314 uh->uh_users = 0;
315 uh->uh_zvcnt = 0;
316 uh->uh_xclu = 0;
317 uh->uh_hangcnt = 0;
318 uh->uh_actf = uh->uh_actl = 0;
319 uh->uh_bdpwant = 0;
320 uh->uh_mrwant = 0;
9305c2b9 321 ubainitmaps(uh);
a3cb8f60
BJ
322 wakeup((caddr_t)&uh->uh_bdpwant);
323 wakeup((caddr_t)&uh->uh_mrwant);
5ab42896
BJ
324 printf("uba%d: reset", uban);
325 ubainit(uh->uh_uba);
2e74ef16 326 for (cdp = cdevsw; cdp->d_open; cdp++)
3f3a34c3 327 (*cdp->d_reset)(uban);
4c3f4cb1
BJ
328#ifdef INET
329 ifubareset(uban);
330#endif
2e74ef16 331 printf("\n");
4ea0bfc4 332 splx(s);
2e74ef16 333}
3f3a34c3 334
b7333467
BJ
335/*
336 * Init a uba. This is called with a pointer
337 * rather than a virtual address since it is called
338 * by code which runs with memory mapping disabled.
339 * In these cases we really don't need the interrupts
340 * enabled, but since we run with ipl high, we don't care
341 * if they are, they will never happen anyways.
342 */
5aa9d5ea
RE
343ubainit(uba)
344 register struct uba_regs *uba;
3f3a34c3
BJ
345{
346
5ab42896
BJ
347 switch (cpu) {
348#if VAX780
d2f165e5 349 case VAX_780:
5ab42896
BJ
350 uba->uba_cr = UBACR_ADINIT;
351 uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
352 while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
353 ;
354 break;
355#endif
356#if VAX750
d2f165e5 357 case VAX_750:
a3812a04 358#endif
10f66600
SL
359#if VAX730
360 case VAX_730:
fed9edca 361#endif
10f66600 362#if defined(VAX750) || defined(VAX730)
fed9edca
BJ
363 mtpr(IUR, 0);
364 /* give devices time to recover from power fail */
365/* THIS IS PROBABLY UNNECESSARY */
366 DELAY(500000);
367/* END PROBABLY UNNECESSARY */
5ab42896
BJ
368 break;
369#endif
370 }
3f3a34c3
BJ
371}
372
39d536e6 373#ifdef VAX780
3e04ba6a
BJ
374int ubawedgecnt = 10;
375int ubacrazy = 500;
b7333467
BJ
376/*
377 * This routine is called by the locore code to
378 * process a UBA error on an 11/780. The arguments are passed
379 * on the stack, and value-result (through some trickery).
380 * In particular, the uvec argument is used for further
381 * uba processing so the result aspect of it is very important.
382 * It must not be declared register.
383 */
5aa9d5ea 384/*ARGSUSED*/
3f3a34c3
BJ
385ubaerror(uban, uh, xx, uvec, uba)
386 register int uban;
387 register struct uba_hd *uh;
388 int uvec;
389 register struct uba_regs *uba;
390{
391 register sr, s;
392
393 if (uvec == 0) {
394 uh->uh_zvcnt++;
395 if (uh->uh_zvcnt > 250000) {
5af3f3f7 396 printf("uba%d: too many zero vectors\n");
3f3a34c3
BJ
397 ubareset(uban);
398 }
399 uvec = 0;
400 return;
401 }
402 if (uba->uba_cnfgr & NEX_CFGFLT) {
5af3f3f7
BJ
403 printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
404 uban, uba->uba_sr, ubasr_bits,
d2f165e5 405 uba->uba_cnfgr, NEXFLT_BITS);
3f3a34c3
BJ
406 ubareset(uban);
407 uvec = 0;
408 return;
409 }
410 sr = uba->uba_sr;
411 s = spl7();
ec28fe15
BJ
412 printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
413 uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
3f3a34c3
BJ
414 splx(s);
415 uba->uba_sr = sr;
5ab42896 416 uvec &= UBABRRVR_DIV;
3e04ba6a
BJ
417 if (++uh->uh_errcnt % ubawedgecnt == 0) {
418 if (uh->uh_errcnt > ubacrazy)
419 panic("uba crazy");
420 printf("ERROR LIMIT ");
421 ubareset(uban);
422 uvec = 0;
423 return;
424 }
3f3a34c3
BJ
425 return;
426}
427#endif
1c1f6ecf
BF
428
429/*
430 * This routine is called by a driver for a device with on-board Unibus
431 * memory. It removes the memory block from the Unibus resource map
432 * and clears the map registers for the block.
433 *
434 * Arguments are the Unibus number, the Unibus address of the memory
a26646de
BF
435 * block, its size in blocks of 512 bytes, and a flag indicating whether
436 * to allocate the unibus space form the resource map or whether it already
437 * has been.
1c1f6ecf 438 *
a26646de 439 * Returns > 0 if successful, 0 if not.
1c1f6ecf 440 */
bc3a8383
BJ
441ubamem(uban, addr, size, doalloc)
442 int uban, addr, size, doalloc;
1c1f6ecf
BF
443{
444 register struct uba_hd *uh = &uba_hd[uban];
445 register int *m;
446 register int i, a, s;
447
bc3a8383 448 if (doalloc) {
a26646de
BF
449 s = spl6();
450 a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */
451 splx(s);
452 } else
453 a = (addr>>9)+1;
1c1f6ecf 454 if (a) {
a26646de 455 m = (int *) &uh->uh_uba->uba_map[a-1];
1c1f6ecf
BF
456 for (i=0; i<size; i++)
457 *m++ = 0; /* All off, especially 'valid' */
a26646de
BF
458#if VAX780
459 if (cpu == VAX_780) { /* map disable */
460 i = (addr+size*512+8191)/8192;
461 uh->uh_uba->uba_cr |= i<<26;
462 }
463#endif
1c1f6ecf
BF
464 }
465 return(a);
466}
8e61f556
SL
467
468/*
469 * Map a virtual address into users address space. Actually all we
470 * do is turn on the user mode write protection bits for the particular
471 * page of memory involved.
472 */
473maptouser(vaddress)
474 caddr_t vaddress;
475{
476
477 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
478}
479
480unmaptouser(vaddress)
481 caddr_t vaddress;
482{
483
484 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
485}