+/*
+ * Invalidate a single page denoted by pmap/va.
+ * If (pte != NULL), it is the already computed PTE for the page.
+ * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
+ * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
+ */
+/* static */
+void
+pmap_remove_mapping(pmap, va, pte, flags)
+ register pmap_t pmap;
+ register vm_offset_t va;
+ register pt_entry_t *pte;
+ int flags;
+{
+ register vm_offset_t pa;
+ register pv_entry_t pv, npv;
+ pmap_t ptpmap;
+ int *ste, s, bits;
+#ifdef DEBUG
+ pt_entry_t opte;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove_mapping(%x, %x, %x, %x)\n",
+ pmap, va, pte, flags);
+#endif
+
+ /*
+ * PTE not provided, compute it from pmap and va.
+ */
+ if (pte == PT_ENTRY_NULL) {
+ pte = pmap_pte(pmap, va);
+ if (*(int *)pte == PG_NV)
+ return;
+ }
+#ifdef HAVEVAC
+ if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
+ /*
+ * Purge kernel side of VAC to ensure we get the correct
+ * state of any hardware maintained bits.
+ */
+ DCIS();
+#ifdef PMAPSTATS
+ remove_stats.sflushes++;
+#endif
+ /*
+ * If this is a non-CI user mapping for the current process,
+ * flush the VAC. Note that the kernel side was flushed
+ * above so we don't worry about non-CI kernel mappings.
+ */
+ if (pmap == curproc->p_vmspace->vm_map.pmap &&
+ !pmap_pte_ci(pte)) {
+ DCIU();
+#ifdef PMAPSTATS
+ remove_stats.uflushes++;
+#endif
+ }
+ }
+#endif
+ pa = pmap_pte_pa(pte);
+#ifdef DEBUG
+ opte = *pte;
+#endif
+#ifdef PMAPSTATS
+ remove_stats.removes++;
+#endif
+ /*
+ * Update statistics
+ */
+ if (pmap_pte_w(pte))
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * Invalidate the PTE after saving the reference modify info.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_REMOVE)
+ printf("remove: invalidating pte at %x\n", pte);
+#endif
+ bits = *(int *)pte & (PG_U|PG_M);
+ *(int *)pte = PG_NV;
+ if ((flags & PRM_TFLUSH) && active_pmap(pmap))
+ TBIS(va);
+ /*
+ * For user mappings decrement the wiring count on
+ * the PT page. We do this after the PTE has been
+ * invalidated because vm_map_pageable winds up in
+ * pmap_pageable which clears the modify bit for the
+ * PT page.
+ */
+ if (pmap != kernel_pmap) {
+#if defined(DEBUG) && NCPUS == 1
+ /*
+ * XXX this recursive use of the VM won't work on a MP
+ * (or when otherwise debugging simple locks). We might
+ * be called with the page queue lock held (e.g. from
+ * the pageout daemon) and vm_map_pageable might call
+ * vm_fault_unwire which would try to lock the page queues
+ * again. For debugging we hack and drop the lock.
+ */
+ int hadit = !simple_lock_try(&vm_page_queue_lock);
+ simple_unlock(&vm_page_queue_lock);
+#endif
+ (void) vm_map_pageable(pt_map, trunc_page(pte),
+ round_page(pte+1), TRUE);
+#ifdef DEBUG
+ if (pmapdebug & PDB_WIRING)
+ pmap_check_wiring("remove", trunc_page(pte));
+#if NCPUS == 1
+ if (hadit)
+ simple_lock(&vm_page_queue_lock);
+#endif
+#endif
+ }
+ /*
+ * If this isn't a managed page, we are all done.
+ */
+ if (pa < vm_first_phys || pa >= vm_last_phys)
+ return;
+ /*
+ * Otherwise remove it from the PV table
+ * (raise IPL since we may be called at interrupt time).
+ */
+ pv = pa_to_pvh(pa);
+ ste = (int *)0;
+ s = splimp();
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ ste = (int *)pv->pv_ptste;
+ ptpmap = pv->pv_ptpmap;
+ npv = pv->pv_next;
+ if (npv) {
+ npv->pv_flags = pv->pv_flags;
+ *pv = *npv;
+ free((caddr_t)npv, M_VMPVENT);
+ } else
+ pv->pv_pmap = NULL;
+#ifdef PMAPSTATS
+ remove_stats.pvfirst++;
+#endif
+ } else {
+ for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+#ifdef PMAPSTATS
+ remove_stats.pvsearch++;
+#endif
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ break;
+ pv = npv;
+ }
+#ifdef DEBUG
+ if (npv == NULL)
+ panic("pmap_remove: PA not in pv_tab");
+#endif
+ ste = (int *)npv->pv_ptste;
+ ptpmap = npv->pv_ptpmap;
+ pv->pv_next = npv->pv_next;
+ free((caddr_t)npv, M_VMPVENT);
+ pv = pa_to_pvh(pa);
+ }
+#ifdef HAVEVAC
+ /*
+ * If only one mapping left we no longer need to cache inhibit
+ */
+ if (pmap_aliasmask &&
+ pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_CACHE)
+ printf("remove: clearing CI for pa %x\n", pa);
+#endif
+ pv->pv_flags &= ~PV_CI;
+ pmap_changebit(pa, PG_CI, FALSE);
+#ifdef DEBUG
+ if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
+ (PDB_CACHE|PDB_PVDUMP))
+ pmap_pvdump(pa);
+#endif
+ }
+#endif
+ /*
+ * If this was a PT page we must also remove the
+ * mapping from the associated segment table.
+ */
+ if (ste) {
+#ifdef PMAPSTATS
+ remove_stats.ptinvalid++;
+#endif
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+ printf("remove: ste was %x@%x pte was %x@%x\n",
+ *ste, ste, *(int *)&opte, pmap_pte(pmap, va));
+#endif
+#if defined(HP380)
+ if (mmutype == MMU_68040) {
+ int *este = &ste[NPTEPG/SG4_LEV3SIZE];
+
+ while (ste < este)
+ *ste++ = SG_NV;
+#ifdef DEBUG
+ ste -= NPTEPG/SG4_LEV3SIZE;
+#endif
+ } else
+#endif
+ *ste = SG_NV;
+ /*
+ * If it was a user PT page, we decrement the
+ * reference count on the segment table as well,
+ * freeing it if it is now empty.
+ */
+ if (ptpmap != kernel_pmap) {
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
+ printf("remove: stab %x, refcnt %d\n",
+ ptpmap->pm_stab, ptpmap->pm_sref - 1);
+ if ((pmapdebug & PDB_PARANOIA) &&
+ ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
+ panic("remove: bogus ste");
+#endif
+ if (--(ptpmap->pm_sref) == 0) {
+#ifdef DEBUG
+ if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
+ printf("remove: free stab %x\n",
+ ptpmap->pm_stab);
+#endif
+ kmem_free_wakeup(st_map,
+ (vm_offset_t)ptpmap->pm_stab,
+ HP_STSIZE);
+ ptpmap->pm_stab = Segtabzero;
+ ptpmap->pm_stpa = Segtabzeropa;
+#if defined(HP380)
+ if (mmutype == MMU_68040)
+ ptpmap->pm_stfree = protostfree;
+#endif
+ ptpmap->pm_stchanged = TRUE;
+ /*
+ * XXX may have changed segment table
+ * pointer for current process so
+ * update now to reload hardware.
+ */
+ if (ptpmap == curproc->p_vmspace->vm_map.pmap)
+ PMAP_ACTIVATE(ptpmap,
+ (struct pcb *)curproc->p_addr, 1);
+ }
+#ifdef DEBUG
+ else if (ptpmap->pm_sref < 0)
+ panic("remove: sref < 0");
+#endif
+ }
+#if 0
+ /*
+ * XXX this should be unnecessary as we have been
+ * flushing individual mappings as we go.
+ */
+ if (ptpmap == kernel_pmap)
+ TBIAS();
+ else
+ TBIAU();
+#endif
+ pv->pv_flags &= ~PV_PTPAGE;
+ ptpmap->pm_ptpages--;
+ }
+ /*
+ * Update saved attributes for managed page
+ */
+ pmap_attributes[pa_index(pa)] |= bits;
+ splx(s);
+}
+