+}
+
+/*
+ * Allocate UNIBUS memory. Allocates and initializes
+ * sufficient mapping registers for access. On a 780,
+ * the configuration register is setup to disable UBA
+ * response on DMA transfers to addresses controlled
+ * by the disabled mapping registers.
+ * On a 780, should only be called from ubameminit, or in ascending order
+ * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
+ * the last unibus memory would free unusable map registers.
+ * Doalloc is 1 to allocate, 0 to deallocate.
+ */
+ubamem(uban, addr, npg, doalloc)
+ int uban, addr, npg, doalloc;
+{
+ register struct uba_hd *uh = &uba_hd[uban];
+ register int a;
+ int s;
+
+ a = (addr >> 9) + 1;
+ s = spluba();
+ if (doalloc)
+ a = rmget(uh->uh_map, npg, a);
+ else
+ rmfree(uh->uh_map, (long)npg, (long)a);
+ splx(s);
+ if (a) {
+ register int i, *m;
+
+ m = (int *)&uh->uh_uba->uba_map[a - 1];
+ for (i = 0; i < npg; i++)
+ *m++ = 0; /* All off, especially 'valid' */
+ i = addr + npg * 512;
+ if (doalloc && i > uh->uh_lastmem)
+ uh->uh_lastmem = i;
+ else if (doalloc == 0 && i == uh->uh_lastmem)
+ uh->uh_lastmem = addr;
+#if defined(VAX780) || defined(VAX8600)
+ /*
+ * On a 780, set up the map register disable
+ * field in the configuration register. Beware
+ * of callers that request memory ``out of order''
+ * or in sections other than 8K multiples.
+ * Ubameminit handles such requests properly, however.
+ */
+ if ((cpu == VAX_780) || (cpu == VAX_8600)) {
+ i = uh->uh_uba->uba_cr &~ 0x7c000000;
+ i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
+ uh->uh_uba->uba_cr = i;