changes for char dev uio'ing
[unix-history] / usr / src / sys / vax / uba / uba.c
CommitLineData
740e4029 1/* uba.c 4.48 82/08/13 */
c14fd247
BJ
2
3#include "../h/param.h"
3f3a34c3
BJ
4#include "../h/systm.h"
5#include "../h/cpu.h"
c14fd247
BJ
6#include "../h/map.h"
7#include "../h/pte.h"
c14fd247 8#include "../h/buf.h"
b7333467 9#include "../h/vm.h"
5ab42896
BJ
10#include "../h/ubareg.h"
11#include "../h/ubavar.h"
c14fd247
BJ
12#include "../h/dir.h"
13#include "../h/user.h"
14#include "../h/proc.h"
2e74ef16 15#include "../h/conf.h"
868a6a95 16#include "../h/mtpr.h"
3f3a34c3 17#include "../h/nexus.h"
b7333467
BJ
18#include "../h/dk.h"
19
5af3f3f7
BJ
20#if VAX780
21char ubasr_bits[] = UBASR_BITS;
22#endif
23
b7333467
BJ
24/*
25 * Do transfer on device argument. The controller
26 * and uba involved are implied by the device.
27 * We queue for resource wait in the uba code if necessary.
28 * We return 1 if the transfer was started, 0 if it was not.
29 * If you call this routine with the head of the queue for a
30 * UBA, it will automatically remove the device from the UBA
31 * queue before it returns. If some other device is given
32 * as argument, it will be added to the request queue if the
33 * request cannot be started immediately. This means that
34 * passing a device which is on the queue but not at the head
35 * of the request queue is likely to be a disaster.
36 */
37ubago(ui)
5ab42896 38 register struct uba_device *ui;
b7333467 39{
5ab42896 40 register struct uba_ctlr *um = ui->ui_mi;
b7333467
BJ
41 register struct uba_hd *uh;
42 register int s, unit;
43
44 uh = &uba_hd[um->um_ubanum];
45 s = spl6();
28ca05a9 46 if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
0801d37f 47 goto rwait;
b7333467
BJ
48 um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
49 UBA_NEEDBDP|UBA_CANTWAIT);
0801d37f
BJ
50 if (um->um_ubinfo == 0)
51 goto rwait;
0801d37f 52 uh->uh_users++;
28ca05a9 53 if (um->um_driver->ud_xclu)
0801d37f 54 uh->uh_xclu = 1;
b7333467
BJ
55 splx(s);
56 if (ui->ui_dk >= 0) {
57 unit = ui->ui_dk;
58 dk_busy |= 1<<unit;
cc7ff771
BJ
59 dk_xfer[unit]++;
60 dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
b7333467
BJ
61 }
62 if (uh->uh_actf == ui)
63 uh->uh_actf = ui->ui_forw;
64 (*um->um_driver->ud_dgo)(um);
b7333467 65 return (1);
0801d37f
BJ
66rwait:
67 if (uh->uh_actf != ui) {
68 ui->ui_forw = NULL;
69 if (uh->uh_actf == NULL)
70 uh->uh_actf = ui;
71 else
72 uh->uh_actl->ui_forw = ui;
73 uh->uh_actl = ui;
74 }
75 splx(s);
76 return (0);
77}
78
79ubadone(um)
5ab42896 80 register struct uba_ctlr *um;
0801d37f
BJ
81{
82 register struct uba_hd *uh = &uba_hd[um->um_ubanum];
83
28ca05a9 84 if (um->um_driver->ud_xclu)
0801d37f
BJ
85 uh->uh_xclu = 0;
86 uh->uh_users--;
0801d37f 87 ubarelse(um->um_ubanum, &um->um_ubinfo);
b7333467 88}
c14fd247
BJ
89
90/*
3f3a34c3
BJ
91 * Allocate and setup UBA map registers, and bdp's
92 * Flags says whether bdp is needed, whether the caller can't
93 * wait (e.g. if the caller is at interrupt level).
c14fd247 94 *
b7333467 95 * Return value:
c14fd247
BJ
96 * Bits 0-8 Byte offset
97 * Bits 9-17 Start map reg. no.
98 * Bits 18-27 No. mapping reg's
99 * Bits 28-31 BDP no.
100 */
3f3a34c3
BJ
101ubasetup(uban, bp, flags)
102 struct buf *bp;
c14fd247 103{
3f3a34c3 104 register struct uba_hd *uh = &uba_hd[uban];
c14fd247
BJ
105 register int temp, i;
106 int npf, reg, bdp;
107 unsigned v;
108 register struct pte *pte, *io;
109 struct proc *rp;
110 int a, o, ubinfo;
111
10f66600
SL
112#if VAX730
113 if (cpu == VAX_730)
a3812a04
BJ
114 flags &= ~UBA_NEEDBDP;
115#endif
c14fd247
BJ
116 v = btop(bp->b_un.b_addr);
117 o = (int)bp->b_un.b_addr & PGOFSET;
118 npf = btoc(bp->b_bcount + o) + 1;
119 a = spl6();
c84ff1f9 120 while ((reg = rmalloc(uh->uh_map, npf)) == 0) {
dd56673b
BJ
121 if (flags & UBA_CANTWAIT) {
122 splx(a);
3f3a34c3 123 return (0);
dd56673b 124 }
3f3a34c3
BJ
125 uh->uh_mrwant++;
126 sleep((caddr_t)uh->uh_map, PSWP);
c14fd247 127 }
c14fd247 128 bdp = 0;
3f3a34c3
BJ
129 if (flags & UBA_NEEDBDP) {
130 while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
131 if (flags & UBA_CANTWAIT) {
c84ff1f9 132 rmfree(uh->uh_map, npf, reg);
dd56673b 133 splx(a);
3f3a34c3
BJ
134 return (0);
135 }
136 uh->uh_bdpwant++;
137 sleep((caddr_t)uh->uh_map, PSWP);
c14fd247 138 }
658110d5 139 uh->uh_bdpfree &= ~(1 << (bdp-1));
64614526
BJ
140 } else if (flags & UBA_HAVEBDP)
141 bdp = (flags >> 28) & 0xf;
c14fd247 142 splx(a);
658110d5 143 reg--;
c14fd247 144 ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
5ab42896 145 temp = (bdp << 21) | UBAMR_MRV;
c14fd247 146 if (bdp && (o & 01))
5ab42896 147 temp |= UBAMR_BO;
309cfbf4
BJ
148 rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
149 if ((bp->b_flags & B_PHYS) == 0)
da1392b6 150 pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
309cfbf4
BJ
151 else if (bp->b_flags & B_UAREA)
152 pte = &rp->p_addr[v];
153 else if (bp->b_flags & B_PAGET)
154 pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
155 else
156 pte = vtopte(rp, v);
157 io = &uh->uh_uba->uba_map[reg];
158 while (--npf != 0) {
159 if (pte->pg_pfnum == 0)
160 panic("uba zero uentry");
161 *(int *)io++ = pte++->pg_pfnum | temp;
c14fd247
BJ
162 }
163 *(int *)io++ = 0;
164 return (ubinfo);
165}
166
c14fd247 167/*
b7333467 168 * Non buffer setup interface... set up a buffer and call ubasetup.
c14fd247 169 */
3f3a34c3 170uballoc(uban, addr, bcnt, flags)
a0eab615 171 int uban;
c14fd247 172 caddr_t addr;
a0eab615 173 int bcnt, flags;
c14fd247 174{
89e0f717 175 struct buf ubabuf;
c14fd247 176
c14fd247
BJ
177 ubabuf.b_un.b_addr = addr;
178 ubabuf.b_flags = B_BUSY;
179 ubabuf.b_bcount = bcnt;
89e0f717 180 /* that's all the fields ubasetup() needs */
3f3a34c3 181 return (ubasetup(uban, &ubabuf, flags));
c14fd247
BJ
182}
183
b28deaf8 184/*
b7333467
BJ
185 * Release resources on uba uban, and then unblock resource waiters.
186 * The map register parameter is by value since we need to block
187 * against uba resets on 11/780's.
b28deaf8 188 */
3f3a34c3 189ubarelse(uban, amr)
b28deaf8 190 int *amr;
c14fd247 191{
3f3a34c3 192 register struct uba_hd *uh = &uba_hd[uban];
b7333467 193 register int bdp, reg, npf, s;
b28deaf8 194 int mr;
c14fd247 195
b7333467
BJ
196 /*
197 * Carefully see if we should release the space, since
198 * it may be released asynchronously at uba reset time.
199 */
200 s = spl6();
b28deaf8
BJ
201 mr = *amr;
202 if (mr == 0) {
b7333467
BJ
203 /*
204 * A ubareset() occurred before we got around
205 * to releasing the space... no need to bother.
206 */
207 splx(s);
b28deaf8
BJ
208 return;
209 }
88149598 210 *amr = 0;
b7333467 211 splx(s); /* let interrupts in, we're safe for a while */
c14fd247
BJ
212 bdp = (mr >> 28) & 0x0f;
213 if (bdp) {
27bf6b55 214 switch (cpu) {
5aa9d5ea
RE
215#if VAX780
216 case VAX_780:
5ab42896 217 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
5aa9d5ea
RE
218 break;
219#endif
220#if VAX750
221 case VAX_750:
5ab42896
BJ
222 uh->uh_uba->uba_dpr[bdp] |=
223 UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
5aa9d5ea
RE
224 break;
225#endif
226 }
b7333467 227 uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */
3f3a34c3
BJ
228 if (uh->uh_bdpwant) {
229 uh->uh_bdpwant = 0;
230 wakeup((caddr_t)uh->uh_map);
c14fd247
BJ
231 }
232 }
b7333467
BJ
233 /*
234 * Put back the registers in the resource map.
235 * The map code must not be reentered, so we do this
236 * at high ipl.
237 */
c14fd247
BJ
238 npf = (mr >> 18) & 0x3ff;
239 reg = ((mr >> 9) & 0x1ff) + 1;
b7333467 240 s = spl6();
c84ff1f9 241 rmfree(uh->uh_map, npf, reg);
b7333467
BJ
242 splx(s);
243
244 /*
245 * Wakeup sleepers for map registers,
246 * and also, if there are processes blocked in dgo(),
247 * give them a chance at the UNIBUS.
248 */
3f3a34c3
BJ
249 if (uh->uh_mrwant) {
250 uh->uh_mrwant = 0;
251 wakeup((caddr_t)uh->uh_map);
c14fd247 252 }
b7333467
BJ
253 while (uh->uh_actf && ubago(uh->uh_actf))
254 ;
c14fd247
BJ
255}
256
27bf6b55 257ubapurge(um)
5ab42896 258 register struct uba_ctlr *um;
27bf6b55
BJ
259{
260 register struct uba_hd *uh = um->um_hd;
261 register int bdp = (um->um_ubinfo >> 28) & 0x0f;
262
263 switch (cpu) {
264#if VAX780
265 case VAX_780:
5ab42896 266 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
27bf6b55
BJ
267 break;
268#endif
269#if VAX750
270 case VAX_750:
5ab42896 271 uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
27bf6b55
BJ
272 break;
273#endif
274 }
275}
276
9305c2b9
BJ
277ubainitmaps(uhp)
278 register struct uba_hd *uhp;
279{
280
281 rminit(uhp->uh_map, NUBMREG, 1, "uba", UAMSIZ);
282 switch (cpu) {
283#if VAX780
284 case VAX_780:
285 uhp->uh_bdpfree = (1<<NBDP780) - 1;
286 break;
287#endif
288#if VAX750
289 case VAX_750:
290 uhp->uh_bdpfree = (1<<NBDP750) - 1;
291 break;
292#endif
10f66600
SL
293#if VAX730
294 case VAX_730:
9305c2b9
BJ
295 break;
296#endif
297 }
298}
299
b7333467
BJ
300/*
301 * Generate a reset on uba number uban. Then
302 * call each device in the character device table,
303 * giving it a chance to clean up so as to be able to continue.
304 */
3f3a34c3 305ubareset(uban)
b7333467 306 int uban;
2e74ef16 307{
2e74ef16 308 register struct cdevsw *cdp;
a3cb8f60 309 register struct uba_hd *uh = &uba_hd[uban];
49c84d3f 310 int s;
2e74ef16 311
4ea0bfc4 312 s = spl6();
a3cb8f60
BJ
313 uh->uh_users = 0;
314 uh->uh_zvcnt = 0;
315 uh->uh_xclu = 0;
316 uh->uh_hangcnt = 0;
317 uh->uh_actf = uh->uh_actl = 0;
318 uh->uh_bdpwant = 0;
319 uh->uh_mrwant = 0;
9305c2b9 320 ubainitmaps(uh);
a3cb8f60
BJ
321 wakeup((caddr_t)&uh->uh_bdpwant);
322 wakeup((caddr_t)&uh->uh_mrwant);
5ab42896
BJ
323 printf("uba%d: reset", uban);
324 ubainit(uh->uh_uba);
2e74ef16 325 for (cdp = cdevsw; cdp->d_open; cdp++)
3f3a34c3 326 (*cdp->d_reset)(uban);
4c3f4cb1
BJ
327#ifdef INET
328 ifubareset(uban);
329#endif
2e74ef16 330 printf("\n");
4ea0bfc4 331 splx(s);
2e74ef16 332}
3f3a34c3 333
b7333467
BJ
334/*
335 * Init a uba. This is called with a pointer
336 * rather than a virtual address since it is called
337 * by code which runs with memory mapping disabled.
338 * In these cases we really don't need the interrupts
339 * enabled, but since we run with ipl high, we don't care
340 * if they are, they will never happen anyways.
341 */
5aa9d5ea
RE
342ubainit(uba)
343 register struct uba_regs *uba;
3f3a34c3
BJ
344{
345
5ab42896
BJ
346 switch (cpu) {
347#if VAX780
d2f165e5 348 case VAX_780:
5ab42896
BJ
349 uba->uba_cr = UBACR_ADINIT;
350 uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
351 while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
352 ;
353 break;
354#endif
355#if VAX750
d2f165e5 356 case VAX_750:
a3812a04 357#endif
10f66600
SL
358#if VAX730
359 case VAX_730:
fed9edca 360#endif
10f66600 361#if defined(VAX750) || defined(VAX730)
fed9edca
BJ
362 mtpr(IUR, 0);
363 /* give devices time to recover from power fail */
364/* THIS IS PROBABLY UNNECESSARY */
365 DELAY(500000);
366/* END PROBABLY UNNECESSARY */
5ab42896
BJ
367 break;
368#endif
369 }
3f3a34c3
BJ
370}
371
5ab42896 372#if VAX780
b7333467
BJ
373/*
374 * Check to make sure the UNIBUS adaptor is not hung,
375 * with an interrupt in the register to be presented,
376 * but not presenting it for an extended period (5 seconds).
377 */
3f3a34c3
BJ
378unhang()
379{
380 register int uban;
381
382 for (uban = 0; uban < numuba; uban++) {
383 register struct uba_hd *uh = &uba_hd[uban];
384 register struct uba_regs *up = uh->uh_uba;
385
386 if (up->uba_sr == 0)
387 return;
ba14e45e 388 up->uba_sr = UBASR_CRD|UBASR_LEB;
3f3a34c3 389 uh->uh_hangcnt++;
7780575a 390 if (uh->uh_hangcnt > 5*hz) {
3f3a34c3 391 uh->uh_hangcnt = 0;
5af3f3f7 392 printf("uba%d: hung\n", uban);
3f3a34c3
BJ
393 ubareset(uban);
394 }
395 }
396}
397
b7333467
BJ
398/*
399 * This is a timeout routine which decrements the ``i forgot to
400 * interrupt'' counts, on an 11/780. This prevents slowly growing
401 * counts from causing a UBA reset since we are interested only
402 * in hang situations.
403 */
3f3a34c3
BJ
404ubawatch()
405{
406 register struct uba_hd *uh;
407 register int uban;
408
c84ff1f9
BJ
409 if (panicstr)
410 return;
3f3a34c3
BJ
411 for (uban = 0; uban < numuba; uban++) {
412 uh = &uba_hd[uban];
413 if (uh->uh_hangcnt)
414 uh->uh_hangcnt--;
415 }
416}
417
3e04ba6a
BJ
418int ubawedgecnt = 10;
419int ubacrazy = 500;
b7333467
BJ
420/*
421 * This routine is called by the locore code to
422 * process a UBA error on an 11/780. The arguments are passed
423 * on the stack, and value-result (through some trickery).
424 * In particular, the uvec argument is used for further
425 * uba processing so the result aspect of it is very important.
426 * It must not be declared register.
427 */
5aa9d5ea 428/*ARGSUSED*/
3f3a34c3
BJ
429ubaerror(uban, uh, xx, uvec, uba)
430 register int uban;
431 register struct uba_hd *uh;
432 int uvec;
433 register struct uba_regs *uba;
434{
435 register sr, s;
436
437 if (uvec == 0) {
438 uh->uh_zvcnt++;
439 if (uh->uh_zvcnt > 250000) {
5af3f3f7 440 printf("uba%d: too many zero vectors\n");
3f3a34c3
BJ
441 ubareset(uban);
442 }
443 uvec = 0;
444 return;
445 }
446 if (uba->uba_cnfgr & NEX_CFGFLT) {
5af3f3f7
BJ
447 printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
448 uban, uba->uba_sr, ubasr_bits,
d2f165e5 449 uba->uba_cnfgr, NEXFLT_BITS);
3f3a34c3
BJ
450 ubareset(uban);
451 uvec = 0;
452 return;
453 }
454 sr = uba->uba_sr;
455 s = spl7();
ec28fe15
BJ
456 printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
457 uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
3f3a34c3
BJ
458 splx(s);
459 uba->uba_sr = sr;
5ab42896 460 uvec &= UBABRRVR_DIV;
3e04ba6a
BJ
461 if (++uh->uh_errcnt % ubawedgecnt == 0) {
462 if (uh->uh_errcnt > ubacrazy)
463 panic("uba crazy");
464 printf("ERROR LIMIT ");
465 ubareset(uban);
466 uvec = 0;
467 return;
468 }
3f3a34c3
BJ
469 return;
470}
471#endif
d5bb1a61 472
cc7ff771 473#ifdef notdef
d5bb1a61
BJ
474/*
475 * This routine allows remapping of previously
476 * allocated UNIBUS bdp and map resources
477 * onto different memory addresses.
478 * It should only be used by routines which need
479 * small fixed length mappings for long periods of time
480 * (like the ARPANET ACC IMP interface).
481 * It only maps kernel addresses.
482 */
483ubaremap(uban, ubinfo, addr)
484 int uban;
485 register unsigned ubinfo;
486 caddr_t addr;
487{
488 register struct uba_hd *uh = &uba_hd[uban];
489 register struct pte *pte, *io;
490 register int temp, bdp;
491 int npf, o;
492
493 o = (int)addr & PGOFSET;
494 bdp = (ubinfo >> 28) & 0xf;
495 npf = (ubinfo >> 18) & 0x3ff;
496 io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff];
497 temp = (bdp << 21) | UBAMR_MRV;
498
499 /*
500 * If using buffered data path initiate purge
501 * of old data and set byte offset bit if next
502 * transfer will be from odd address.
503 */
504 if (bdp) {
505 switch (cpu) {
506#if VAX780
507 case VAX_780:
508 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
509 break;
510#endif
511#if VAX750
512 case VAX_750:
513 uh->uh_uba->uba_dpr[bdp] |=
514 UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
515 break;
516#endif
517 }
518 if (o & 1)
519 temp |= UBAMR_BO;
520 }
521
522 /*
523 * Set up the map registers, leaving an invalid reg
524 * at the end to guard against wild unibus transfers.
525 */
526 pte = &Sysmap[btop(((int)addr)&0x7fffffff)];
527 while (--npf != 0)
528 *(int *)io++ = pte++->pg_pfnum | temp;
529 *(int *)io = 0;
530
531 /*
532 * Return effective UNIBUS address.
533 */
534 return (ubinfo | o);
535}
2752c877 536#endif
1c1f6ecf
BF
537
538/*
539 * This routine is called by a driver for a device with on-board Unibus
540 * memory. It removes the memory block from the Unibus resource map
541 * and clears the map registers for the block.
542 *
543 * Arguments are the Unibus number, the Unibus address of the memory
a26646de
BF
544 * block, its size in blocks of 512 bytes, and a flag indicating whether
545 * to allocate the unibus space form the resource map or whether it already
546 * has been.
1c1f6ecf 547 *
a26646de 548 * Returns > 0 if successful, 0 if not.
1c1f6ecf
BF
549 */
550
a26646de 551ubamem(uban, addr, size, alloc)
1c1f6ecf
BF
552{
553 register struct uba_hd *uh = &uba_hd[uban];
554 register int *m;
555 register int i, a, s;
556
a26646de
BF
557 if (alloc) {
558 s = spl6();
559 a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */
560 splx(s);
561 } else
562 a = (addr>>9)+1;
1c1f6ecf 563 if (a) {
a26646de 564 m = (int *) &uh->uh_uba->uba_map[a-1];
1c1f6ecf
BF
565 for (i=0; i<size; i++)
566 *m++ = 0; /* All off, especially 'valid' */
a26646de
BF
567#if VAX780
568 if (cpu == VAX_780) { /* map disable */
569 i = (addr+size*512+8191)/8192;
570 uh->uh_uba->uba_cr |= i<<26;
571 }
572#endif
1c1f6ecf
BF
573 }
574 return(a);
575}
8e61f556
SL
576
577/*
578 * Map a virtual address into users address space. Actually all we
579 * do is turn on the user mode write protection bits for the particular
580 * page of memory involved.
581 */
582maptouser(vaddress)
583 caddr_t vaddress;
584{
585
586 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
587}
588
589unmaptouser(vaddress)
590 caddr_t vaddress;
591{
592
593 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
594}