/*
- * Copyright (c) 1987 Carnegie-Mellon University
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
- * The Mach Operating System project at Carnegie-Mellon University.
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
*
- * The CMU software License Agreement specifies the terms and conditions
- * for use and redistribution.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
- * @(#)pmap.c 7.1 (Berkeley) %G%
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.c 7.5 (Berkeley) 5/10/91
*/
/*
* For 68020/68030 machines with HP, 68551, or 68030 MMUs
* (models 320,350,318,319,330,340,360,370,345,375)
* Don't even pay lip service to multiprocessor support.
+ *
+ * XXX will only work for PAGE_SIZE == NBPG (hppagesperpage == 1)
+ * right now because of the assumed one-to-one relationship of PT
+ * pages to STEs.
*/
/*
*/
#include "param.h"
-#include "../vm/vm_param.h"
-#include "user.h"
#include "proc.h"
-#include "lock.h"
#include "malloc.h"
+#include "user.h"
+
+#include "pte.h"
-#include "../vm/pmap.h"
-#include "../vm/vm_map.h"
-#include "../vm/vm_kern.h"
-#include "../vm/vm_prot.h"
-#include "../vm/vm_page.h"
+#include "vm/vm.h"
+#include "vm/vm_kern.h"
+#include "vm/vm_page.h"
+#include "vm/vm_statistics.h"
-#include "machine/cpu.h"
+#include "../include/cpu.h"
/*
* Allocate various and sundry SYSMAPs used in the days of old VM
#define PVF_REMOVE 0x02
#define PVF_PROTECT 0x04
#define PVF_TOTAL 0x80
+
+extern vm_offset_t pager_sva, pager_eva;
#endif
/*
vm_offset_t addr, addr2;
vm_size_t npg, s;
int rv;
- extern vm_offset_t DIObase;
+ extern char kstack[];
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in locore.
*/
- addr = DIObase;
- (void) vm_map_find(kernel_map, VM_OBJECT_NULL, (vm_offset_t) 0,
- &addr, hp300_ptob(IOMAPSIZE), FALSE);
- if (addr != DIObase)
+ addr = (vm_offset_t) intiobase;
+ (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
+ &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
+ if (addr != (vm_offset_t)intiobase)
goto bogons;
addr = (vm_offset_t) Sysmap;
vm_object_reference(kernel_object);
if (addr != (vm_offset_t)Sysmap)
goto bogons;
- addr = (vm_offset_t) &u;
+ addr = (vm_offset_t) kstack;
vm_object_reference(kernel_object);
(void) vm_map_find(kernel_map, kernel_object, addr,
&addr, hp300_ptob(UPAGES), FALSE);
- if (addr != (vm_offset_t)&u)
+ if (addr != (vm_offset_t)kstack)
bogons:
panic("pmap_init: bogons in the VM system!\n");
* Allocate physical memory for kernel PT pages and their management.
* We need 1 PT page per possible task plus some slop.
*/
- npg = min(atop(HP_MAX_KPTSIZE), nproc+16);
+ npg = min(atop(HP_MAX_KPTSIZE), maxproc+16);
s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
/*
* we already have kernel PT pages.
*/
addr = 0;
- rv = vm_map_find(kernel_map, VM_OBJECT_NULL, 0, &addr, s, TRUE);
+ rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
panic("pmap_init: kernel PT too small");
vm_map_remove(kernel_map, addr, addr + s);
* map where we want it.
*/
addr = HP_PTBASE;
- s = min(HP_PTMAXSIZE, nproc*HP_MAX_PTSIZE);
+ s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE);
addr2 = addr + s;
- rv = vm_map_find(kernel_map, VM_OBJECT_NULL, 0, &addr, s, TRUE);
+ rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
if (rv != KERN_SUCCESS)
panic("pmap_init: cannot allocate space for PT map");
pmap_reference(vm_map_pmap(kernel_map));
pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
- if (pt_map == VM_MAP_NULL)
+ if (pt_map == NULL)
panic("pmap_init: cannot create pt_map");
rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
if (rv != KERN_SUCCESS)
* Software use map does not need a pmap
*/
if (size)
- return(PMAP_NULL);
+ return(NULL);
/* XXX: is it ok to wait here? */
pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
- if (pmap == PMAP_NULL)
+#ifdef notifwewait
+ if (pmap == NULL)
panic("pmap_create: cannot allocate a pmap");
+#endif
+ bzero(pmap, sizeof(*pmap));
+ pmap_pinit(pmap);
+ return (pmap);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+void
+pmap_pinit(pmap)
+ register struct pmap *pmap;
+{
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ printf("pmap_pinit(%x)\n", pmap);
+#endif
/*
* No need to allocate page table space yet but we do need a
* valid segment table. Initially, we point everyone at the
* "null" segment table. On the first pmap_enter, a real
* segment table will be allocated.
*/
- pmap->pm_ptab = PT_ENTRY_NULL;
pmap->pm_stab = Segtabzero;
pmap->pm_stchanged = TRUE;
- pmap->pm_sref = 0;
pmap->pm_count = 1;
simple_lock_init(&pmap->pm_lock);
- pmap->pm_stats.resident_count = 0;
- pmap->pm_stats.wired_count = 0;
- pmap->pm_ptpages = 0;
- return(pmap);
}
/*
if (pmapdebug & PDB_FOLLOW)
printf("pmap_destroy(%x)\n", pmap);
#endif
- if (pmap == PMAP_NULL)
+ if (pmap == NULL)
return;
simple_lock(&pmap->pm_lock);
count = --pmap->pm_count;
simple_unlock(&pmap->pm_lock);
- if (count)
- return;
+ if (count == 0) {
+ pmap_release(pmap);
+ free((caddr_t)pmap, M_VMPMAP);
+ }
+}
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap)
+ register struct pmap *pmap;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_release(%x)\n", pmap);
+#endif
+#ifdef notdef /* DIAGNOSTIC */
+ /* count would be 0 from pmap_destroy... */
+ simple_lock(&pmap->pm_lock);
+ if (pmap->pm_count != 1)
+ panic("pmap_release count");
+#endif
if (pmap->pm_ptab)
kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
HP_MAX_PTSIZE);
if (pmap->pm_stab != Segtabzero)
kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
- free((caddr_t)pmap, M_VMPMAP);
}
/*
if (pmapdebug & PDB_FOLLOW)
printf("pmap_reference(%x)\n", pmap);
#endif
- if (pmap != PMAP_NULL) {
+ if (pmap != NULL) {
simple_lock(&pmap->pm_lock);
pmap->pm_count++;
simple_unlock(&pmap->pm_lock);
printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
#endif
- if (pmap == PMAP_NULL)
+ if (pmap == NULL)
return;
#ifdef DEBUG
*pv = *npv;
free((caddr_t)npv, M_VMPVENT);
} else
- pv->pv_pmap = PMAP_NULL;
+ pv->pv_pmap = NULL;
#ifdef DEBUG
remove_stats.pvfirst++;
#endif
pv = npv;
}
#ifdef DEBUG
- if (npv == PV_ENTRY_NULL)
+ if (npv == NULL)
panic("pmap_remove: PA not in pv_tab");
#endif
ste = (int *)npv->pv_ptste;
* If only one mapping left we no longer need to cache inhibit
*/
if (pv->pv_pmap &&
- pv->pv_next == PV_ENTRY_NULL && (pv->pv_flags & PV_CI)) {
+ pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
#ifdef DEBUG
if (pmapdebug & PDB_CACHE)
printf("remove: clearing CI for pa %x\n", pa);
ptpmap->pm_stab,
ptpmap->pm_sref - 1);
if ((pmapdebug & PDB_PARANOIA) &&
- ptpmap->pm_stab != trunc_page(ste))
+ ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
panic("remove: bogus ste");
#endif
if (--(ptpmap->pm_sref) == 0) {
* pointer for current process so
* update now to reload hardware.
*/
- if (ptpmap == u.u_procp->p_map->pmap)
+ if (ptpmap == curproc->p_vmspace->vm_map.pmap)
PMAP_ACTIVATE(ptpmap,
- (struct pcb *)u.u_procp->p_addr);
+ (struct pcb *)curproc->p_addr, 1);
}
}
if (ptpmap == kernel_pmap)
}
/*
- * Routine: pmap_remove_all
- * Function:
- * Removes this physical page from
- * all physical maps in which it resides.
- * Reflects back modify bits to the pager.
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
*/
void
-pmap_remove_all(pa)
- vm_offset_t pa;
+pmap_page_protect(pa, prot)
+ vm_offset_t pa;
+ vm_prot_t prot;
{
register pv_entry_t pv;
int s;
#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
- printf("pmap_remove_all(%x)\n", pa);
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
+ prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
+ printf("pmap_page_protect(%x, %x)\n", pa, prot);
#endif
- /*
- * Not one of ours
- */
if (pa < vm_first_phys || pa >= vm_last_phys)
return;
- pv = pa_to_pvh(pa);
- s = splimp();
- /*
- * Do it the easy way for now
- */
- while (pv->pv_pmap != PMAP_NULL) {
+ switch (prot) {
+ case VM_PROT_ALL:
+ break;
+ /* copy_on_write */
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_changebit(pa, PG_RO, TRUE);
+ break;
+ /* remove_all */
+ default:
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ while (pv->pv_pmap != NULL) {
#ifdef DEBUG
- if (!pmap_ste_v(pmap_ste(pv->pv_pmap, pv->pv_va)) ||
- pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
- panic("pmap_remove_all: bad mapping");
+ if (!pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)) ||
+ pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
+ panic("pmap_page_protect: bad mapping");
#endif
- pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
+ pmap_remove(pv->pv_pmap, pv->pv_va,
+ pv->pv_va + PAGE_SIZE);
+ }
+ splx(s);
+ break;
}
- splx(s);
-}
-
-/*
- * Routine: pmap_copy_on_write
- * Function:
- * Remove write privileges from all
- * physical maps for this physical page.
- */
-void
-pmap_copy_on_write(pa)
- vm_offset_t pa;
-{
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
- printf("pmap_copy_on_write(%x)\n", pa);
-#endif
- pmap_changebit(pa, PG_RO, TRUE);
}
/*
if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
#endif
- if (pmap == PMAP_NULL)
+ if (pmap == NULL)
return;
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
printf("pmap_enter(%x, %x, %x, %x, %x)\n",
pmap, va, pa, prot, wired);
#endif
- if (pmap == PMAP_NULL)
+ if (pmap == NULL)
return;
#ifdef DEBUG
/*
* For user mapping, allocate kernel VM resources if necessary.
*/
- if (pmap->pm_ptab == PT_ENTRY_NULL)
+ if (pmap->pm_ptab == NULL)
pmap->pm_ptab = (pt_entry_t *)
kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
/*
* No entries yet, use header as the first entry
*/
- if (pv->pv_pmap == PMAP_NULL) {
+ if (pv->pv_pmap == NULL) {
#ifdef DEBUG
enter_stats.firstpv++;
#endif
pv->pv_va = va;
pv->pv_pmap = pmap;
- pv->pv_next = PV_ENTRY_NULL;
- pv->pv_ptste = ST_ENTRY_NULL;
- pv->pv_ptpmap = PMAP_NULL;
+ pv->pv_next = NULL;
+ pv->pv_ptste = NULL;
+ pv->pv_ptpmap = NULL;
pv->pv_flags = 0;
}
/*
npv->pv_va = va;
npv->pv_pmap = pmap;
npv->pv_next = pv->pv_next;
- npv->pv_ptste = ST_ENTRY_NULL;
- npv->pv_ptpmap = PMAP_NULL;
+ npv->pv_ptste = NULL;
+ npv->pv_ptpmap = NULL;
pv->pv_next = npv;
#ifdef DEBUG
if (!npv->pv_next)
if (pmapdebug & PDB_FOLLOW)
printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
#endif
- if (pmap == PMAP_NULL)
+ if (pmap == NULL)
return;
pte = pmap_pte(pmap, va);
if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
break;
} while (pv = pv->pv_next);
- if (pv == PV_ENTRY_NULL)
+ if (pv == NULL)
continue;
#ifdef DEBUG
if (pv->pv_va < (vm_offset_t)Sysmap ||
if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
printf("pmap_activate(%x, %x)\n", pmap, pcbp);
#endif
- PMAP_ACTIVATE(pmap, pcbp);
-}
-
-/*
- * Routine: pmap_kernel
- * Function:
- * Returns the physical map handle for the kernel.
- */
-pmap_t
-pmap_kernel()
-{
- return (kernel_pmap);
+ PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);
}
/*
if (pa < vm_first_phys || pa >= vm_last_phys)
return;
pv = pa_to_pvh(pa);
- if (pv->pv_ptste == ST_ENTRY_NULL)
+ if (pv->pv_ptste == NULL)
return;
#ifdef DEBUG
if (pv->pv_va != sva || pv->pv_next) {
/*
* Mark it unmodified to avoid pageout
*/
- pmap_clear_modify(pa);
+ pmap_changebit(pa, PG_M, FALSE);
#ifdef DEBUG
if (pmapdebug & PDB_PTPAGE)
printf("pmap_pageable: PT page %x(%x) unmodified\n",
* Not found, check current mappings returning
* immediately if found.
*/
- if (pv->pv_pmap != PMAP_NULL) {
+ if (pv->pv_pmap != NULL) {
for (; pv; pv = pv->pv_next) {
pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
ix = 0;
* Loop over all current mappings setting/clearing as appropos
* If setting RO do we need to clear the VAC?
*/
- if (pv->pv_pmap != PMAP_NULL) {
+ if (pv->pv_pmap != NULL) {
#ifdef DEBUG
int toflush = 0;
#endif
toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
#endif
va = pv->pv_va;
+
+ /*
+ * XXX don't write protect pager mappings
+ */
+ if (bit == PG_RO) {
+ extern vm_offset_t pager_sva, pager_eva;
+
+ if (va >= pager_sva && va < pager_eva)
+ continue;
+ }
+
pte = (int *) pmap_pte(pv->pv_pmap, va);
/*
* Flush VAC to ensure we get correct state of HW bits
* XXX may have changed segment table pointer for current
* process so update now to reload hardware.
*/
- if (pmap == u.u_procp->p_map->pmap)
- PMAP_ACTIVATE(pmap, (struct pcb *)u.u_procp->p_addr);
+ if (pmap == curproc->p_vmspace->vm_map.pmap)
+ PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
printf("enter: pmap %x stab %x\n",
} while (pv = pv->pv_next);
}
#ifdef DEBUG
- if (pv == PV_ENTRY_NULL)
+ if (pv == NULL)
panic("pmap_enter_ptpage: PT page not entered");
#endif
pv->pv_ptste = ste;