+
+/*
+ * Look for devices with unibus memory, allow them to configure, then disable
+ * map registers as necessary. Called during autoconfiguration and ubareset.
+ * The device ubamem routine returns 0 on success, 1 on success if it is fully
+ * configured (has no csr or interrupt, so doesn't need to be probed),
+ * and -1 on failure.
+ */
+ubameminit(uban)
+{
+ register struct uba_device *ui;
+ register struct uba_hd *uh = &uba_hd[uban];
+ caddr_t umembase = umem[uban] + 0x3e000, addr;
+#define ubaoff(off) ((int)(off) & 0x1fff)
+
+ uh->uh_lastmem = 0;
+ for (ui = ubdinit; ui->ui_driver; ui++) {
+ if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
+ continue;
+ if (ui->ui_driver->ud_ubamem) {
+ /*
+ * During autoconfiguration, need to fudge ui_addr.
+ */
+ addr = ui->ui_addr;
+ ui->ui_addr = umembase + ubaoff(addr);
+ switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
+ case 1:
+ ui->ui_alive = 1;
+ /* FALLTHROUGH */
+ case 0:
+ ui->ui_ubanum = uban;
+ break;
+ }
+ ui->ui_addr = addr;
+ }
+ }
+#if defined(VAX780) || defined(VAX8600)
+ /*
+ * On a 780, throw away any map registers disabled by rounding
+ * the map disable in the configuration register
+ * up to the next 8K boundary, or below the last unibus memory.
+ */
+ if ((cpu == VAX_780) || (cpu == VAX_8600)) {
+ register i;
+
+ i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
+ while (i)
+ (void) rmget(uh->uh_map, 1, i--);
+ }
+#endif
+}
+
+/*
+ * Allocate UNIBUS memory. Allocates and initializes
+ * sufficient mapping registers for access. On a 780,
+ * the configuration register is setup to disable UBA
+ * response on DMA transfers to addresses controlled
+ * by the disabled mapping registers.
+ * On a 780, should only be called from ubameminit, or in ascending order
+ * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
+ * the last unibus memory would free unusable map registers.
+ * Doalloc is 1 to allocate, 0 to deallocate.
+ */
+ubamem(uban, addr, npg, doalloc)
+ int uban, addr, npg, doalloc;
+{
+ register struct uba_hd *uh = &uba_hd[uban];
+ register int a;
+ int s;
+
+ a = (addr >> 9) + 1;
+ s = spluba();
+ if (doalloc)
+ a = rmget(uh->uh_map, npg, a);
+ else
+ rmfree(uh->uh_map, (long)npg, (long)a);
+ splx(s);
+ if (a) {
+ register int i, *m;
+
+ m = (int *)&uh->uh_uba->uba_map[a - 1];
+ for (i = 0; i < npg; i++)
+ *m++ = 0; /* All off, especially 'valid' */
+ i = addr + npg * 512;
+ if (doalloc && i > uh->uh_lastmem)
+ uh->uh_lastmem = i;
+ else if (doalloc == 0 && i == uh->uh_lastmem)
+ uh->uh_lastmem = addr;
+#if defined(VAX780) || defined(VAX8600)
+ /*
+ * On a 780, set up the map register disable
+ * field in the configuration register. Beware
+ * of callers that request memory ``out of order''
+ * or in sections other than 8K multiples.
+ * Ubameminit handles such requests properly, however.
+ */
+ if ((cpu == VAX_780) || (cpu == VAX_8600)) {
+ i = uh->uh_uba->uba_cr &~ 0x7c000000;
+ i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
+ uh->uh_uba->uba_cr = i;
+ }
+#endif
+ }
+ return (a);
+}
+
+#include "ik.h"
+#include "vs.h"
+#if NIK > 0 || NVS > 0
+/*
+ * Map a virtual address into users address space. Actually all we
+ * do is turn on the user mode write protection bits for the particular
+ * page of memory involved.
+ */
+maptouser(vaddress)
+ caddr_t vaddress;
+{
+
+ Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
+}
+
+unmaptouser(vaddress)
+ caddr_t vaddress;
+{
+
+ Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
+}
+#endif