+
+#ifdef notdef
+/*
+ * This routine allows remapping of previously
+ * allocated UNIBUS bdp and map resources
+ * onto different memory addresses.
+ * It should only be used by routines which need
+ * small fixed length mappings for long periods of time
+ * (like the ARPANET ACC IMP interface).
+ * It only maps kernel addresses.
+ */
+ubaremap(uban, ubinfo, addr)
+ int uban;
+ register unsigned ubinfo;
+ caddr_t addr;
+{
+ register struct uba_hd *uh = &uba_hd[uban];
+ register struct pte *pte, *io;
+ register int temp, bdp;
+ int npf, o;
+
+ o = (int)addr & PGOFSET;
+ bdp = (ubinfo >> 28) & 0xf;
+ npf = (ubinfo >> 18) & 0x3ff;
+ io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff];
+ temp = (bdp << 21) | UBAMR_MRV;
+
+ /*
+ * If using buffered data path initiate purge
+ * of old data and set byte offset bit if next
+ * transfer will be from odd address.
+ */
+ if (bdp) {
+ switch (cpu) {
+#if VAX780
+ case VAX_780:
+ uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
+ break;
+#endif
+#if VAX750
+ case VAX_750:
+ uh->uh_uba->uba_dpr[bdp] |=
+ UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
+ break;
+#endif
+ }
+ if (o & 1)
+ temp |= UBAMR_BO;
+ }
+
+ /*
+ * Set up the map registers, leaving an invalid reg
+ * at the end to guard against wild unibus transfers.
+ */
+ pte = &Sysmap[btop(((int)addr)&0x7fffffff)];
+ while (--npf != 0)
+ *(int *)io++ = pte++->pg_pfnum | temp;
+ *(int *)io = 0;
+
+ /*
+ * Return effective UNIBUS address.
+ */
+ return (ubinfo | o);
+}
+#endif
+
+/*
+ * This routine is called by a driver for a device with on-board Unibus
+ * memory. It removes the memory block from the Unibus resource map
+ * and clears the map registers for the block.
+ *
+ * Arguments are the Unibus number, the Unibus address of the memory
+ * block, its size in blocks of 512 bytes, and a flag indicating whether
+ * to allocate the unibus space form the resource map or whether it already
+ * has been.
+ *
+ * Returns > 0 if successful, 0 if not.
+ */
+
+ubamem(uban, addr, size, alloc)
+{
+ register struct uba_hd *uh = &uba_hd[uban];
+ register int *m;
+ register int i, a, s;
+
+ if (alloc) {
+ s = spl6();
+ a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */
+ splx(s);
+ } else
+ a = (addr>>9)+1;
+ if (a) {
+ m = (int *) &uh->uh_uba->uba_map[a-1];
+ for (i=0; i<size; i++)
+ *m++ = 0; /* All off, especially 'valid' */
+#if VAX780
+ if (cpu == VAX_780) { /* map disable */
+ i = (addr+size*512+8191)/8192;
+ uh->uh_uba->uba_cr |= i<<26;
+ }
+#endif
+ }
+ return(a);
+}
+
+/*
+ * Map a virtual address into users address space. Actually all we
+ * do is turn on the user mode write protection bits for the particular
+ * page of memory involved.
+ */
+maptouser(vaddress)
+ caddr_t vaddress;
+{
+
+ Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
+}
+
+unmaptouser(vaddress)
+ caddr_t vaddress;
+{
+
+ Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
+}