+ while (uh->uh_actf && ubago(uh->uh_actf))
+ ;
+}
+
+ubapurge(um)
+ register struct uba_ctlr *um;
+{
+ register struct uba_hd *uh = um->um_hd;
+ register int bdp = (um->um_ubinfo >> 28) & 0x0f;
+
+ switch (cpu) {
+#if defined(VAX780) || defined(VAX8600)
+ case VAX_8600:
+ case VAX_780:
+ uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
+ break;
+#endif
+#if VAX750
+ case VAX_750:
+ uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
+ break;
+#endif
+ }
+}
+
+ubainitmaps(uhp)
+ register struct uba_hd *uhp;
+{
+
+ rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ);
+ switch (cpu) {
+#if defined(VAX780) || defined(VAX8600)
+ case VAX_8600:
+ case VAX_780:
+ uhp->uh_bdpfree = (1<<NBDP780) - 1;
+ break;
+#endif
+#if VAX750
+ case VAX_750:
+ uhp->uh_bdpfree = (1<<NBDP750) - 1;
+ break;
+#endif
+#if VAX730
+ case VAX_730:
+ break;
+#endif
+ }
+}
+
+/*
+ * Generate a reset on uba number uban. Then
+ * call each device in the character device table,
+ * giving it a chance to clean up so as to be able to continue.
+ */
+ubareset(uban)
+ int uban;
+{
+ register struct cdevsw *cdp;
+ register struct uba_hd *uh = &uba_hd[uban];
+ int s;
+
+ s = spluba();
+ uh->uh_users = 0;
+ uh->uh_zvcnt = 0;
+ uh->uh_xclu = 0;
+ uh->uh_actf = uh->uh_actl = 0;
+ uh->uh_bdpwant = 0;
+ uh->uh_mrwant = 0;
+ ubainitmaps(uh);
+ wakeup((caddr_t)&uh->uh_bdpwant);
+ wakeup((caddr_t)&uh->uh_mrwant);
+ printf("uba%d: reset", uban);
+ ubainit(uh->uh_uba);
+ ubameminit(uban);
+ for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
+ (*cdp->d_reset)(uban);
+ ifubareset(uban);
+ printf("\n");
+ splx(s);
+}
+
+/*
+ * Init a uba. This is called with a pointer
+ * rather than a virtual address since it is called
+ * by code which runs with memory mapping disabled.
+ * In these cases we really don't need the interrupts
+ * enabled, but since we run with ipl high, we don't care
+ * if they are, they will never happen anyways.
+ */
+ubainit(uba)
+ register struct uba_regs *uba;
+{
+
+ switch (cpu) {
+#if defined(VAX780) || defined(VAX8600)
+ case VAX_8600:
+ case VAX_780:
+ uba->uba_cr = UBACR_ADINIT;
+ uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
+ while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
+ ;
+ break;
+#endif
+#if VAX750
+ case VAX_750:
+#endif
+#if VAX730
+ case VAX_730:
+#endif
+#if defined(VAX750) || defined(VAX730)
+ mtpr(IUR, 0);
+ /* give devices time to recover from power fail */
+/* THIS IS PROBABLY UNNECESSARY */
+ DELAY(500000);
+/* END PROBABLY UNNECESSARY */
+ break;
+#endif
+ }
+}
+
+#if defined(VAX780) || defined(VAX8600)
+int ubawedgecnt = 10;
+int ubacrazy = 500;
+int zvcnt_max = 5000; /* in 8 sec */
+/*
+ * This routine is called by the locore code to process a UBA
+ * error on an 11/780 or 8600. The arguments are passed
+ * on the stack, and value-result (through some trickery).
+ * In particular, the uvec argument is used for further
+ * uba processing so the result aspect of it is very important.
+ * It must not be declared register.
+ */
+/*ARGSUSED*/
+ubaerror(uban, uh, ipl, uvec, uba)
+ register int uban;
+ register struct uba_hd *uh;
+ int ipl, uvec;
+ register struct uba_regs *uba;
+{
+ register sr, s;
+
+ if (uvec == 0) {
+ /*
+ * Declare dt as unsigned so that negative values
+ * are handled as >8 below, in case time was set back.
+ */
+ u_long dt = time.tv_sec - uh->uh_zvtime;
+
+ uh->uh_zvtotal++;
+ if (dt > 8) {
+ uh->uh_zvtime = time.tv_sec;
+ uh->uh_zvcnt = 0;
+ }
+ if (++uh->uh_zvcnt > zvcnt_max) {
+ printf("uba%d: too many zero vectors (%d in <%d sec)\n",
+ uban, uh->uh_zvcnt, dt + 1);
+ printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n",
+ ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
+ uba->uba_cnfgr&0xff);
+ printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
+ uba->uba_sr, ubasr_bits, uba->uba_dcr,
+ (uba->uba_dcr&0x8000000)?"":"NOT ");
+ ubareset(uban);
+ }
+ return;
+ }
+ if (uba->uba_cnfgr & NEX_CFGFLT) {
+ printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
+ uban, uba->uba_sr, ubasr_bits,
+ uba->uba_cnfgr, NEXFLT_BITS);
+ ubareset(uban);
+ uvec = 0;
+ return;
+ }
+ sr = uba->uba_sr;
+ s = spluba();
+ printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
+ uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
+ splx(s);
+ uba->uba_sr = sr;
+ uvec &= UBABRRVR_DIV;
+ if (++uh->uh_errcnt % ubawedgecnt == 0) {
+ if (uh->uh_errcnt > ubacrazy)
+ panic("uba crazy");
+ printf("ERROR LIMIT ");
+ ubareset(uban);
+ uvec = 0;
+ return;
+ }
+ return;
+}
+#endif
+
+/*
+ * Look for devices with unibus memory, allow them to configure, then disable
+ * map registers as necessary. Called during autoconfiguration and ubareset.
+ * The device ubamem routine returns 0 on success, 1 on success if it is fully
+ * configured (has no csr or interrupt, so doesn't need to be probed),
+ * and -1 on failure.
+ */
+ubameminit(uban)
+{
+ register struct uba_device *ui;
+ register struct uba_hd *uh = &uba_hd[uban];
+ caddr_t umembase = umem[uban] + 0x3e000, addr;
+#define ubaoff(off) ((int)(off) & 0x1fff)
+
+ uh->uh_lastmem = 0;
+ for (ui = ubdinit; ui->ui_driver; ui++) {
+ if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
+ continue;
+ if (ui->ui_driver->ud_ubamem) {
+ /*
+ * During autoconfiguration, need to fudge ui_addr.
+ */
+ addr = ui->ui_addr;
+ ui->ui_addr = umembase + ubaoff(addr);
+ switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
+ case 1:
+ ui->ui_alive = 1;
+ /* FALLTHROUGH */
+ case 0:
+ ui->ui_ubanum = uban;
+ break;
+ }
+ ui->ui_addr = addr;
+ }
+ }
+#if defined(VAX780) || defined(VAX8600)
+ /*
+ * On a 780, throw away any map registers disabled by rounding
+ * the map disable in the configuration register
+ * up to the next 8K boundary, or below the last unibus memory.
+ */
+ if ((cpu == VAX_780) || (cpu == VAX_8600)) {
+ register i;
+
+ i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
+ while (i)
+ (void) rmget(uh->uh_map, 1, i--);
+ }
+#endif
+}
+
+/*
+ * Allocate UNIBUS memory. Allocates and initializes
+ * sufficient mapping registers for access. On a 780,
+ * the configuration register is setup to disable UBA
+ * response on DMA transfers to addresses controlled
+ * by the disabled mapping registers.
+ * On a 780, should only be called from ubameminit, or in ascending order
+ * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
+ * the last unibus memory would free unusable map registers.
+ * Doalloc is 1 to allocate, 0 to deallocate.
+ */
+ubamem(uban, addr, npg, doalloc)
+ int uban, addr, npg, doalloc;
+{
+ register struct uba_hd *uh = &uba_hd[uban];
+ register int a;
+ int s;
+
+ a = (addr >> 9) + 1;
+ s = spluba();
+ if (doalloc)
+ a = rmget(uh->uh_map, npg, a);
+ else
+ rmfree(uh->uh_map, (long)npg, (long)a);
+ splx(s);
+ if (a) {
+ register int i, *m;
+
+ m = (int *)&uh->uh_uba->uba_map[a - 1];
+ for (i = 0; i < npg; i++)
+ *m++ = 0; /* All off, especially 'valid' */
+ i = addr + npg * 512;
+ if (doalloc && i > uh->uh_lastmem)
+ uh->uh_lastmem = i;
+ else if (doalloc == 0 && i == uh->uh_lastmem)
+ uh->uh_lastmem = addr;
+#if defined(VAX780) || defined(VAX8600)
+ /*
+ * On a 780, set up the map register disable
+ * field in the configuration register. Beware
+ * of callers that request memory ``out of order''
+ * or in sections other than 8K multiples.
+ * Ubameminit handles such requests properly, however.
+ */
+ if ((cpu == VAX_780) || (cpu == VAX_8600)) {
+ i = uh->uh_uba->uba_cr &~ 0x7c000000;
+ i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
+ uh->uh_uba->uba_cr = i;
+ }
+#endif
+ }
+ return (a);
+}
+
+#include "ik.h"
+#include "vs.h"
+#if NIK > 0 || NVS > 0
+/*
+ * Map a virtual address into users address space. Actually all we
+ * do is turn on the user mode write protection bits for the particular
+ * page of memory involved.
+ */
+maptouser(vaddress)
+ caddr_t vaddress;
+{
+
+ Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);