BSD 4_4_Lite2 release
[unix-history] / usr / src / sys / hp300 / hp300 / pmap.c
index 6bc315c..6fe1246 100644 (file)
@@ -1,26 +1,69 @@
 /* 
 /* 
- * Copyright (c) 1987 Carnegie-Mellon University
- * Copyright (c) 1991 Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1991, 1993
+ *     The Regents of the University of California.  All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  *
  * This code is derived from software contributed to Berkeley by
- * The Mach Operating System project at Carnegie-Mellon University.
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
  *
  *
- * The CMU software License Agreement specifies the terms and conditions
- * for use and redistribution.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
  *
  *
- *     @(#)pmap.c      7.5 (Berkeley) %G%
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)pmap.c      8.7 (Berkeley) 5/17/95
  */
 
 /*
  */
 
 /*
- *     HP9000/300 series physical map management code.
- *     For 68020/68030 machines with HP, 68551, or 68030 MMUs
- *             (models 320,350,318,319,330,340,360,370,345,375)
+ * HP9000/300 series physical map management code.
+ *
+ * Supports:
+ *     68020 with HP MMU       models 320, 350
+ *     68020 with 68551 MMU    models 318, 319, 330 (all untested)
+ *     68030 with on-chip MMU  models 340, 360, 370, 345, 375, 400
+ *     68040 with on-chip MMU  models 380, 425, 433
+ *
+ * Notes:
  *     Don't even pay lip service to multiprocessor support.
  *
  *     Don't even pay lip service to multiprocessor support.
  *
- *     XXX will only work for PAGE_SIZE == NBPG (hppagesperpage == 1)
- *     right now because of the assumed one-to-one relationship of PT
- *     pages to STEs.
+ *     We assume TLB entries don't have process tags (except for the
+ *     supervisor/user distinction) so we only invalidate TLB entries
+ *     when changing mappings for the current (or kernel) pmap.  This is
+ *     technically not true for the 68551 but we flush the TLB on every
+ *     context switch, so it effectively winds up that way.
+ *
+ *     Bitwise and/or operations are significantly faster than bitfield
+ *     references so we use them when accessing STE/PTEs in the pmap_pte_*
+ *     macros.  Note also that the two are not always equivalent; e.g.:
+ *             (*(int *)pte & PG_PROT) [4] != pte->pg_prot [1]
+ *     and a couple of routines that deal with protection and wiring take
+ *     some shortcuts that assume the and/or definitions.
+ *
+ *     This implementation will only work for PAGE_SIZE == NBPG
+ *     (i.e. 4096 bytes).
  */
 
 /*
  */
 
 /*
  *     and to when physical maps must be made correct.
  */
 
  *     and to when physical maps must be made correct.
  */
 
-#include "param.h"
-#include "proc.h"
-#include "malloc.h"
-#include "user.h"
-
-#include "pte.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
 
 
-#include "vm/vm.h"
-#include "vm/vm_kern.h"
-#include "vm/vm_page.h"
-#include "vm/vm_statistics.h"
+#include <hp300/hp300/pte.h>
 
 
-#include "../include/cpu.h"
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
 
 
-/*
- * Allocate various and sundry SYSMAPs used in the days of old VM
- * and not yet converted.  XXX.
- */
-#define BSDVM_COMPAT   1
+#include <machine/cpu.h>
 
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
 struct {
        int collectscans;
        int collectpages;
 struct {
        int collectscans;
        int collectpages;
@@ -81,8 +118,10 @@ struct {
        int kernel;     /* entering kernel mapping */
        int user;       /* entering user mapping */
        int ptpneeded;  /* needed to allocate a PT page */
        int kernel;     /* entering kernel mapping */
        int user;       /* entering user mapping */
        int ptpneeded;  /* needed to allocate a PT page */
+       int nochange;   /* no change at all */
        int pwchange;   /* no mapping change, just wiring or protection */
        int wchange;    /* no mapping change, just wiring */
        int pwchange;   /* no mapping change, just wiring or protection */
        int wchange;    /* no mapping change, just wiring */
+       int pchange;    /* no mapping change, just protection */
        int mchange;    /* was mapped but mapping to different page */
        int managed;    /* a managed page */
        int firstpv;    /* first mapping for this PA */
        int mchange;    /* was mapped but mapping to different page */
        int managed;    /* a managed page */
        int firstpv;    /* first mapping for this PA */
@@ -100,7 +139,23 @@ struct {
        int uflushes;
        int sflushes;
 } remove_stats;
        int uflushes;
        int sflushes;
 } remove_stats;
+struct {
+       int calls;
+       int changed;
+       int alreadyro;
+       int alreadyrw;
+} protect_stats;
+struct chgstats {
+       int setcalls;
+       int sethits;
+       int setmiss;
+       int clrcalls;
+       int clrhits;
+       int clrmiss;
+} changebit_stats[16];
+#endif
 
 
+#ifdef DEBUG
 int debugmap = 0;
 int pmapdebug = 0x2000;
 #define PDB_FOLLOW     0x0001
 int debugmap = 0;
 int pmapdebug = 0x2000;
 #define PDB_FOLLOW     0x0001
@@ -114,15 +169,23 @@ int pmapdebug = 0x2000;
 #define PDB_COLLECT    0x0100
 #define PDB_PROTECT    0x0200
 #define PDB_SEGTAB     0x0400
 #define PDB_COLLECT    0x0100
 #define PDB_PROTECT    0x0200
 #define PDB_SEGTAB     0x0400
+#define PDB_MULTIMAP   0x0800
 #define PDB_PARANOIA   0x2000
 #define PDB_WIRING     0x4000
 #define PDB_PVDUMP     0x8000
 
 #define PDB_PARANOIA   0x2000
 #define PDB_WIRING     0x4000
 #define PDB_PVDUMP     0x8000
 
+#ifdef HAVEVAC
 int pmapvacflush = 0;
 #define        PVF_ENTER       0x01
 #define        PVF_REMOVE      0x02
 #define        PVF_PROTECT     0x04
 #define        PVF_TOTAL       0x80
 int pmapvacflush = 0;
 #define        PVF_ENTER       0x01
 #define        PVF_REMOVE      0x02
 #define        PVF_PROTECT     0x04
 #define        PVF_TOTAL       0x80
+#endif
+
+#if defined(HP380)
+int dowriteback = 1;   /* 68040: enable writeback caching */
+int dokwriteback = 1;  /* 68040: enable writeback caching of kernel AS */
+#endif
 
 extern vm_offset_t pager_sva, pager_eva;
 #endif
 
 extern vm_offset_t pager_sva, pager_eva;
 #endif
@@ -130,23 +193,45 @@ extern vm_offset_t pager_sva, pager_eva;
 /*
  * Get STEs and PTEs for user/kernel address space
  */
 /*
  * Get STEs and PTEs for user/kernel address space
  */
-#define        pmap_ste(m, v)  (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
-#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
+#if defined(HP380)
+#define        pmap_ste1(m, v) \
+       (&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
+/* XXX assumes physically contiguous ST pages (if more than one) */
+#define pmap_ste2(m, v) \
+       (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
+                       - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
+#define        pmap_ste(m, v)  \
+       (&((m)->pm_stab[(vm_offset_t)(v) \
+                       >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
+#define pmap_ste_v(m, v) \
+       (mmutype == MMU_68040 \
+        ? ((*(int *)pmap_ste1(m, v) & SG_V) && \
+           (*(int *)pmap_ste2(m, v) & SG_V)) \
+        : (*(int *)pmap_ste(m, v) & SG_V))
+#else
+#define        pmap_ste(m, v)   (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
+#define pmap_ste_v(m, v) (*(int *)pmap_ste(m, v) & SG_V)
+#endif
 
 
+#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
 #define pmap_pte_pa(pte)       (*(int *)(pte) & PG_FRAME)
 #define pmap_pte_pa(pte)       (*(int *)(pte) & PG_FRAME)
-
-#define pmap_ste_v(pte)                ((pte)->sg_v)
-#define pmap_pte_w(pte)                ((pte)->pg_w)
-#define pmap_pte_ci(pte)       ((pte)->pg_ci)
-#define pmap_pte_m(pte)                ((pte)->pg_m)
-#define pmap_pte_u(pte)                ((pte)->pg_u)
-#define pmap_pte_v(pte)                ((pte)->pg_v)
-#define pmap_pte_set_w(pte, v)         ((pte)->pg_w = (v))
-#define pmap_pte_set_prot(pte, v)      ((pte)->pg_prot = (v))
+#define pmap_pte_w(pte)                (*(int *)(pte) & PG_W)
+#define pmap_pte_ci(pte)       (*(int *)(pte) & PG_CI)
+#define pmap_pte_m(pte)                (*(int *)(pte) & PG_M)
+#define pmap_pte_u(pte)                (*(int *)(pte) & PG_U)
+#define pmap_pte_prot(pte)     (*(int *)(pte) & PG_PROT)
+#define pmap_pte_v(pte)                (*(int *)(pte) & PG_V)
+
+#define pmap_pte_set_w(pte, v) \
+       if (v) *(int *)(pte) |= PG_W; else *(int *)(pte) &= ~PG_W
+#define pmap_pte_set_prot(pte, v) \
+       if (v) *(int *)(pte) |= PG_PROT; else *(int *)(pte) &= ~PG_PROT
+#define pmap_pte_w_chg(pte, nw)                ((nw) ^ pmap_pte_w(pte))
+#define pmap_pte_prot_chg(pte, np)     ((np) ^ pmap_pte_prot(pte))
 
 /*
  * Given a map and a machine independent protection code,
 
 /*
  * Given a map and a machine independent protection code,
- * convert to a vax protection code.
+ * convert to an hp300 protection code.
  */
 #define pte_prot(m, p) (protection_codes[p])
 int    protection_codes[8];
  */
 #define pte_prot(m, p) (protection_codes[p])
 int    protection_codes[8];
@@ -173,16 +258,11 @@ struct kpt_page *kpt_pages;
  */
 st_entry_t     *Sysseg;
 pt_entry_t     *Sysmap, *Sysptmap;
  */
 st_entry_t     *Sysseg;
 pt_entry_t     *Sysmap, *Sysptmap;
-st_entry_t     *Segtabzero;
-#if BSDVM_COMPAT
-vm_size_t      Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
-#else
+st_entry_t     *Segtabzero, *Segtabzeropa;
 vm_size_t      Sysptsize = VM_KERNEL_PT_PAGES;
 vm_size_t      Sysptsize = VM_KERNEL_PT_PAGES;
-#endif
 
 struct pmap    kernel_pmap_store;
 
 struct pmap    kernel_pmap_store;
-pmap_t         kernel_pmap;
-vm_map_t       pt_map;
+vm_map_t       st_map, pt_map;
 
 vm_offset_t            avail_start;    /* PA of first available physical page */
 vm_offset_t    avail_end;      /* PA of last available physical page */
 
 vm_offset_t            avail_start;    /* PA of first available physical page */
 vm_offset_t    avail_end;      /* PA of last available physical page */
@@ -191,110 +271,60 @@ vm_offset_t      virtual_avail;  /* VA of first avail page (after kernel bss)*/
 vm_offset_t    virtual_end;    /* VA of last avail page (end of kernel AS) */
 vm_offset_t    vm_first_phys;  /* PA of first managed page */
 vm_offset_t    vm_last_phys;   /* PA just past last managed page */
 vm_offset_t    virtual_end;    /* VA of last avail page (end of kernel AS) */
 vm_offset_t    vm_first_phys;  /* PA of first managed page */
 vm_offset_t    vm_last_phys;   /* PA just past last managed page */
-int            hppagesperpage; /* PAGE_SIZE / HP_PAGE_SIZE */
 boolean_t      pmap_initialized = FALSE;       /* Has pmap_init completed? */
 boolean_t      pmap_initialized = FALSE;       /* Has pmap_init completed? */
-int            pmap_aliasmask; /* seperation at which VA aliasing ok */
 char           *pmap_attributes;       /* reference and modify bits */
 char           *pmap_attributes;       /* reference and modify bits */
-
-boolean_t      pmap_testbit();
-void           pmap_enter_ptpage();
-
-#if BSDVM_COMPAT
-#include "msgbuf.h"
+#ifdef HAVEVAC
+int            pmap_aliasmask; /* seperation at which VA aliasing ok */
+#endif
+#if defined(HP380)
+int            protostfree;    /* prototype (default) free ST map */
+#endif
 
 /*
 
 /*
- * All those kernel PT submaps that BSD is so fond of
+ * Internal routines
  */
  */
-struct pte     *CMAP1, *CMAP2, *mmap;
-caddr_t                CADDR1, CADDR2, vmmap;
-struct pte     *msgbufmap;
-struct msgbuf  *msgbufp;
+void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
+boolean_t pmap_testbit __P((vm_offset_t, int));
+void pmap_changebit    __P((vm_offset_t, int, boolean_t));
+void pmap_enter_ptpage __P((pmap_t, vm_offset_t));
+#ifdef DEBUG
+void pmap_pvdump       __P((vm_offset_t));
+void pmap_check_wiring __P((char *, vm_offset_t));
 #endif
 
 #endif
 
+/* pmap_remove_mapping flags */
+#define        PRM_TFLUSH      1
+#define        PRM_CFLUSH      2
+
 /*
 /*
- *     Bootstrap the system enough to run with virtual memory.
- *     Map the kernel's code and data, and allocate the system page table.
+ * Bootstrap memory allocator. This function allows for early dynamic
+ * memory allocation until the virtual memory system has been bootstrapped.
+ * After that point, either kmem_alloc or malloc should be used. This
+ * function works by stealing pages from the (to be) managed page pool,
+ * stealing virtual address space, then mapping the pages and zeroing them.
  *
  *
- *     On the HP this is called after mapping has already been enabled
- *     and just syncs the pmap module with what has already been done.
- *     [We can't call it easily with mapping off since the kernel is not
- *     mapped with PA == VA, hence we would have to relocate every address
- *     from the linked base (virtual) address 0 to the actual (physical)
- *     address of 0xFFxxxxxx.]
+ * It should be used from pmap_bootstrap till vm_page_startup, afterwards
+ * it cannot be used, and will generate a panic if tried. Note that this
+ * memory will never be freed, and in essence it is wired down.
  */
  */
-void
-pmap_bootstrap(firstaddr, loadaddr)
-       vm_offset_t firstaddr;
-       vm_offset_t loadaddr;
+void *
+pmap_bootstrap_alloc(size)
+       int size;
 {
 {
-#if BSDVM_COMPAT
-       vm_offset_t va;
-       struct pte *pte;
-#endif
-       extern vm_offset_t maxmem, physmem;
-
-       avail_start = firstaddr;
-       avail_end = maxmem << PGSHIFT;
-
-       /* XXX: allow for msgbuf */
-       avail_end -= hp300_round_page(sizeof(struct msgbuf));
-
-       mem_size = physmem << PGSHIFT;
-       virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
-       virtual_end = VM_MAX_KERNEL_ADDRESS;
-       hppagesperpage = PAGE_SIZE / HP_PAGE_SIZE;
-
-       /*
-        * Determine VA aliasing distance if any
-        */
-       if (ectype == EC_VIRT)
-               switch (machineid) {
-               case HP_320:
-                       pmap_aliasmask = 0x3fff;        /* 16k */
-                       break;
-               case HP_350:
-                       pmap_aliasmask = 0x7fff;        /* 32k */
-                       break;
-               }
-
-       /*
-        * Initialize protection array.
-        */
-       hp300_protection_init();
-
-       /*
-        * The kernel's pmap is statically allocated so we don't
-        * have to use pmap_create, which is unlikely to work
-        * correctly at this part of the boot sequence.
-        */
-       kernel_pmap = &kernel_pmap_store;
-
-       /*
-        * Kernel page/segment table allocated in locore,
-        * just initialize pointers.
-        */
-       kernel_pmap->pm_stab = Sysseg;
-       kernel_pmap->pm_ptab = Sysmap;
-
-       simple_lock_init(&kernel_pmap->pm_lock);
-       kernel_pmap->pm_count = 1;
-
-#if BSDVM_COMPAT
-       /*
-        * Allocate all the submaps we need
-        */
-#define        SYSMAP(c, p, v, n)      \
-       v = (c)va; va += ((n)*HP_PAGE_SIZE); p = pte; pte += (n);
-
-       va = virtual_avail;
-       pte = pmap_pte(kernel_pmap, va);
-
-       SYSMAP(caddr_t          ,CMAP1          ,CADDR1    ,1           )
-       SYSMAP(caddr_t          ,CMAP2          ,CADDR2    ,1           )
-       SYSMAP(caddr_t          ,mmap           ,vmmap     ,1           )
-       SYSMAP(struct msgbuf *  ,msgbufmap      ,msgbufp   ,1           )
-       virtual_avail = va;
-#endif
+       extern boolean_t vm_page_startup_initialized;
+       vm_offset_t val;
+       
+       if (vm_page_startup_initialized)
+               panic("pmap_bootstrap_alloc: called after startup initialized");
+       size = round_page(size);
+       val = virtual_avail;
+
+       virtual_avail = pmap_map(virtual_avail, avail_start,
+               avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
+       avail_start += size;
+
+       blkclr ((caddr_t) val, size);
+       return ((void *) val);
 }
 
 /*
 }
 
 /*
@@ -362,14 +392,16 @@ bogons:
        s = round_page(s);
        addr = (vm_offset_t) kmem_alloc(kernel_map, s);
        Segtabzero = (st_entry_t *) addr;
        s = round_page(s);
        addr = (vm_offset_t) kmem_alloc(kernel_map, s);
        Segtabzero = (st_entry_t *) addr;
+       Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr);
        addr += HP_STSIZE;
        pv_table = (pv_entry_t) addr;
        addr += sizeof(struct pv_entry) * npg;
        pmap_attributes = (char *) addr;
 #ifdef DEBUG
        if (pmapdebug & PDB_INIT)
        addr += HP_STSIZE;
        pv_table = (pv_entry_t) addr;
        addr += sizeof(struct pv_entry) * npg;
        pmap_attributes = (char *) addr;
 #ifdef DEBUG
        if (pmapdebug & PDB_INIT)
-               printf("pmap_init: %x bytes (%x pgs): seg %x tbl %x attr %x\n",
-                      s, npg, Segtabzero, pv_table, pmap_attributes);
+               printf("pmap_init: %x bytes: npg %x s0 %x(%x) tbl %x atr %x\n",
+                      s, npg, Segtabzero, Segtabzeropa,
+                      pv_table, pmap_attributes);
 #endif
 
        /*
 #endif
 
        /*
@@ -405,13 +437,21 @@ bogons:
                kpt_pages->kpt_va = addr2;
                kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
        } while (addr != addr2);
                kpt_pages->kpt_va = addr2;
                kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
        } while (addr != addr2);
-#ifdef DEBUG
+#ifdef PMAPSTATS
        kpt_stats.kpttotal = atop(s);
        kpt_stats.kpttotal = atop(s);
+#endif
+#ifdef DEBUG
        if (pmapdebug & PDB_INIT)
                printf("pmap_init: KPT: %d pages from %x to %x\n",
                       atop(s), addr, addr + s);
 #endif
 
        if (pmapdebug & PDB_INIT)
                printf("pmap_init: KPT: %d pages from %x to %x\n",
                       atop(s), addr, addr + s);
 #endif
 
+       /*
+        * Allocate the segment table map
+        */
+       s = maxproc * HP_STSIZE;
+       st_map = kmem_suballoc(kernel_map, &addr, &addr2, s, TRUE);
+
        /*
         * Slightly modified version of kmem_suballoc() to get page table
         * map where we want it.
        /*
         * Slightly modified version of kmem_suballoc() to get page table
         * map where we want it.
@@ -434,6 +474,14 @@ bogons:
                printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
 #endif
 
                printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
 #endif
 
+#if defined(HP380)
+       if (mmutype == MMU_68040) {
+               protostfree = ~l2tobm(0);
+               for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
+                       protostfree &= ~l2tobm(rv);
+       }
+#endif
+
        /*
         * Now it is safe to enable pv_table recording.
         */
        /*
         * Now it is safe to enable pv_table recording.
         */
@@ -527,6 +575,11 @@ pmap_pinit(pmap)
         * segment table will be allocated.
         */
        pmap->pm_stab = Segtabzero;
         * segment table will be allocated.
         */
        pmap->pm_stab = Segtabzero;
+       pmap->pm_stpa = Segtabzeropa;
+#if defined(HP380)
+       if (mmutype == MMU_68040)
+               pmap->pm_stfree = protostfree;
+#endif
        pmap->pm_stchanged = TRUE;
        pmap->pm_count = 1;
        simple_lock_init(&pmap->pm_lock);
        pmap->pm_stchanged = TRUE;
        pmap->pm_count = 1;
        simple_lock_init(&pmap->pm_lock);
@@ -583,7 +636,8 @@ pmap_release(pmap)
                kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
                                 HP_MAX_PTSIZE);
        if (pmap->pm_stab != Segtabzero)
                kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
                                 HP_MAX_PTSIZE);
        if (pmap->pm_stab != Segtabzero)
-               kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
+               kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
+                                HP_STSIZE);
 }
 
 /*
 }
 
 /*
@@ -613,19 +667,14 @@ pmap_reference(pmap)
 void
 pmap_remove(pmap, sva, eva)
        register pmap_t pmap;
 void
 pmap_remove(pmap, sva, eva)
        register pmap_t pmap;
-       vm_offset_t sva, eva;
+       register vm_offset_t sva, eva;
 {
 {
-       register vm_offset_t pa, va;
+       register vm_offset_t nssva;
        register pt_entry_t *pte;
        register pt_entry_t *pte;
-       register pv_entry_t pv, npv;
-       register int ix;
-       pmap_t ptpmap;
-       int *ste, s, bits;
-       boolean_t firstpage = TRUE;
-       boolean_t flushcache = FALSE;
-#ifdef DEBUG
-       pt_entry_t opte;
+       boolean_t firstpage, needcflush;
+       int flags;
 
 
+#ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
                printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
 #endif
        if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
                printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
 #endif
@@ -633,240 +682,101 @@ pmap_remove(pmap, sva, eva)
        if (pmap == NULL)
                return;
 
        if (pmap == NULL)
                return;
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
        remove_stats.calls++;
 #endif
        remove_stats.calls++;
 #endif
-       for (va = sva; va < eva; va += PAGE_SIZE) {
-               /*
-                * Weed out invalid mappings.
-                * Note: we assume that the segment table is always allocated.
-                */
-               if (!pmap_ste_v(pmap_ste(pmap, va))) {
-                       /* XXX: avoid address wrap around */
-                       if (va >= hp300_trunc_seg((vm_offset_t)-1))
-                               break;
-                       va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
-                       continue;
-               }
-               pte = pmap_pte(pmap, va);
-               pa = pmap_pte_pa(pte);
-               if (pa == 0)
-                       continue;
-               /*
-                * Invalidating a non-CI page, must flush external VAC
-                * unless it is a supervisor mapping and we have already
-                * flushed the supervisor side.
-                */
-               if (pmap_aliasmask && !pmap_pte_ci(pte) &&
-                   !(pmap == kernel_pmap && firstpage))
-                       flushcache = TRUE;
-#ifdef DEBUG
-               opte = *pte;
-               remove_stats.removes++;
-#endif
-               /*
-                * Update statistics
-                */
-               if (pmap_pte_w(pte))
-                       pmap->pm_stats.wired_count--;
-               pmap->pm_stats.resident_count--;
-
-               /*
-                * Invalidate the PTEs.
-                * XXX: should cluster them up and invalidate as many
-                * as possible at once.
-                */
-#ifdef DEBUG
-               if (pmapdebug & PDB_REMOVE)
-                       printf("remove: invalidating %x ptes at %x\n",
-                              hppagesperpage, pte);
-#endif
-               /*
-                * Flush VAC to ensure we get the correct state of any
-                * hardware maintained bits.
-                */
-               if (firstpage && pmap_aliasmask) {
-                       firstpage = FALSE;
-                       if (pmap == kernel_pmap)
-                               flushcache = FALSE;
-                       DCIS();
-#ifdef DEBUG
-                       remove_stats.sflushes++;
-#endif
-               }
-               bits = ix = 0;
-               do {
-                       bits |= *(int *)pte & (PG_U|PG_M);
-                       *(int *)pte++ = PG_NV;
-                       TBIS(va + ix * HP_PAGE_SIZE);
-               } while (++ix != hppagesperpage);
-
-               /*
-                * For user mappings decrement the wiring count on
-                * the PT page.  We do this after the PTE has been
-                * invalidated because vm_map_pageable winds up in
-                * pmap_pageable which clears the modify bit for the
-                * PT page.
-                */
-               if (pmap != kernel_pmap) {
-                       pte = pmap_pte(pmap, va);
-                       vm_map_pageable(pt_map, trunc_page(pte),
-                                       round_page(pte+1), TRUE);
-#ifdef DEBUG
-                       if (pmapdebug & PDB_WIRING)
-                               pmap_check_wiring("remove", trunc_page(pte));
-#endif
-               }
+       firstpage = TRUE;
+       needcflush = FALSE;
+       flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
+       while (sva < eva) {
+               nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
+               if (nssva == 0 || nssva > eva)
+                       nssva = eva;
                /*
                /*
-                * Remove from the PV table (raise IPL since we
-                * may be called at interrupt time).
+                * If VA belongs to an unallocated segment,
+                * skip to the next segment boundary.
                 */
                 */
-               if (pa < vm_first_phys || pa >= vm_last_phys)
+               if (!pmap_ste_v(pmap, sva)) {
+                       sva = nssva;
                        continue;
                        continue;
-               pv = pa_to_pvh(pa);
-               ste = (int *)0;
-               s = splimp();
-               /*
-                * If it is the first entry on the list, it is actually
-                * in the header and we must copy the following entry up
-                * to the header.  Otherwise we must search the list for
-                * the entry.  In either case we free the now unused entry.
-                */
-               if (pmap == pv->pv_pmap && va == pv->pv_va) {
-                       ste = (int *)pv->pv_ptste;
-                       ptpmap = pv->pv_ptpmap;
-                       npv = pv->pv_next;
-                       if (npv) {
-                               *pv = *npv;
-                               free((caddr_t)npv, M_VMPVENT);
-                       } else
-                               pv->pv_pmap = NULL;
-#ifdef DEBUG
-                       remove_stats.pvfirst++;
-#endif
-               } else {
-                       for (npv = pv->pv_next; npv; npv = npv->pv_next) {
-#ifdef DEBUG
-                               remove_stats.pvsearch++;
-#endif
-                               if (pmap == npv->pv_pmap && va == npv->pv_va)
-                                       break;
-                               pv = npv;
-                       }
-#ifdef DEBUG
-                       if (npv == NULL)
-                               panic("pmap_remove: PA not in pv_tab");
-#endif
-                       ste = (int *)npv->pv_ptste;
-                       ptpmap = npv->pv_ptpmap;
-                       pv->pv_next = npv->pv_next;
-                       free((caddr_t)npv, M_VMPVENT);
-                       pv = pa_to_pvh(pa);
                }
                /*
                }
                /*
-                * If only one mapping left we no longer need to cache inhibit
+                * Invalidate every valid mapping within this segment.
                 */
                 */
-               if (pv->pv_pmap &&
-                   pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
-#ifdef DEBUG
-                       if (pmapdebug & PDB_CACHE)
-                               printf("remove: clearing CI for pa %x\n", pa);
-#endif
-                       pv->pv_flags &= ~PV_CI;
-                       pmap_changebit(pa, PG_CI, FALSE);
-#ifdef DEBUG
-                       if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
-                           (PDB_CACHE|PDB_PVDUMP))
-                               pmap_pvdump(pa);
-#endif
-               }
-
-               /*
-                * If this was a PT page we must also remove the
-                * mapping from the associated segment table.
-                */
-               if (ste) {
-#ifdef DEBUG
-                       remove_stats.ptinvalid++;
-                       if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) {
-                               printf("remove: ste was %x@%x pte was %x@%x\n",
-                                      *ste, ste,
-                                      *(int *)&opte, pmap_pte(pmap, va));
-                       }
-#endif
-                       *ste = SG_NV;
-                       /*
-                        * If it was a user PT page, we decrement the
-                        * reference count on the segment table as well,
-                        * freeing it if it is now empty.
-                        */
-                       if (ptpmap != kernel_pmap) {
-#ifdef DEBUG
-                               if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
-                                       printf("remove: stab %x, refcnt %d\n",
-                                              ptpmap->pm_stab,
-                                              ptpmap->pm_sref - 1);
-                               if ((pmapdebug & PDB_PARANOIA) &&
-                                   ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
-                                       panic("remove: bogus ste");
-#endif
-                               if (--(ptpmap->pm_sref) == 0) {
-#ifdef DEBUG
-                                       if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
-                                       printf("remove: free stab %x\n",
-                                              ptpmap->pm_stab);
+               pte = pmap_pte(pmap, sva);
+               while (sva < nssva) {
+                       if (pmap_pte_v(pte)) {
+#ifdef HAVEVAC
+                               if (pmap_aliasmask) {
+                                       /*
+                                        * Purge kernel side of VAC to ensure
+                                        * we get the correct state of any
+                                        * hardware maintained bits.
+                                        */
+                                       if (firstpage) {
+                                               DCIS();
+#ifdef PMAPSTATS
+                                               remove_stats.sflushes++;
 #endif
 #endif
-                                       kmem_free(kernel_map,
-                                                 (vm_offset_t)ptpmap->pm_stab,
-                                                 HP_STSIZE);
-                                       ptpmap->pm_stab = Segtabzero;
-                                       ptpmap->pm_stchanged = TRUE;
+                                       }
                                        /*
                                        /*
-                                        * XXX may have changed segment table
-                                        * pointer for current process so
-                                        * update now to reload hardware.
+                                        * Remember if we may need to
+                                        * flush the VAC due to a non-CI
+                                        * mapping.
                                         */
                                         */
-                                       if (ptpmap == curproc->p_vmspace->vm_map.pmap)
-                                               PMAP_ACTIVATE(ptpmap,
-                                                       (struct pcb *)curproc->p_addr, 1);
+                                       if (!needcflush && !pmap_pte_ci(pte))
+                                               needcflush = TRUE;
+
                                }
                                }
+#endif
+                               pmap_remove_mapping(pmap, sva, pte, flags);
+                               firstpage = FALSE;
                        }
                        }
-                       if (ptpmap == kernel_pmap)
-                               TBIAS();
-                       else
-                               TBIAU();
-                       pv->pv_flags &= ~PV_PTPAGE;
-                       ptpmap->pm_ptpages--;
+                       pte++;
+                       sva += PAGE_SIZE;
                }
                }
-               /*
-                * Update saved attributes for managed page
-                */
-               pmap_attributes[pa_index(pa)] |= bits;
-               splx(s);
        }
        }
+       /*
+        * Didn't do anything, no need for cache flushes
+        */
+       if (firstpage)
+               return;
+#ifdef HAVEVAC
+       /*
+        * In a couple of cases, we don't need to worry about flushing
+        * the VAC:
+        *      1. if this is a kernel mapping,
+        *         we have already done it
+        *      2. if it is a user mapping not for the current process,
+        *         it won't be there
+        */
+       if (pmap_aliasmask &&
+           (pmap == kernel_pmap || pmap != curproc->p_vmspace->vm_map.pmap))
+               needcflush = FALSE;
 #ifdef DEBUG
 #ifdef DEBUG
-       if (pmapvacflush & PVF_REMOVE) {
+       if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) {
                if (pmapvacflush & PVF_TOTAL)
                        DCIA();
                else if (pmap == kernel_pmap)
                        DCIS();
                else
                        DCIU();
                if (pmapvacflush & PVF_TOTAL)
                        DCIA();
                else if (pmap == kernel_pmap)
                        DCIS();
                else
                        DCIU();
-       }
+       } else
 #endif
 #endif
-       if (flushcache) {
+       if (needcflush) {
                if (pmap == kernel_pmap) {
                        DCIS();
                if (pmap == kernel_pmap) {
                        DCIS();
-#ifdef DEBUG
+#ifdef PMAPSTATS
                        remove_stats.sflushes++;
 #endif
                } else {
                        DCIU();
                        remove_stats.sflushes++;
 #endif
                } else {
                        DCIU();
-#ifdef DEBUG
+#ifdef PMAPSTATS
                        remove_stats.uflushes++;
 #endif
                }
        }
                        remove_stats.uflushes++;
 #endif
                }
        }
+#endif
 }
 
 /*
 }
 
 /*
@@ -891,29 +801,42 @@ pmap_page_protect(pa, prot)
                return;
 
        switch (prot) {
                return;
 
        switch (prot) {
+       case VM_PROT_READ|VM_PROT_WRITE:
        case VM_PROT_ALL:
        case VM_PROT_ALL:
-               break;
+               return;
        /* copy_on_write */
        case VM_PROT_READ:
        case VM_PROT_READ|VM_PROT_EXECUTE:
                pmap_changebit(pa, PG_RO, TRUE);
        /* copy_on_write */
        case VM_PROT_READ:
        case VM_PROT_READ|VM_PROT_EXECUTE:
                pmap_changebit(pa, PG_RO, TRUE);
-               break;
+               return;
        /* remove_all */
        default:
        /* remove_all */
        default:
-               pv = pa_to_pvh(pa);
-               s = splimp();
-               while (pv->pv_pmap != NULL) {
+               break;
+       }
+       pv = pa_to_pvh(pa);
+       s = splimp();
+       while (pv->pv_pmap != NULL) {
+               register pt_entry_t *pte;
+
+               pte = pmap_pte(pv->pv_pmap, pv->pv_va);
 #ifdef DEBUG
 #ifdef DEBUG
-                       if (!pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)) ||
-                           pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
-                               panic("pmap_page_protect: bad mapping");
+               if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
+                   pmap_pte_pa(pte) != pa)
+                       panic("pmap_page_protect: bad mapping");
+#endif
+               if (!pmap_pte_w(pte))
+                       pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+                                           pte, PRM_TFLUSH|PRM_CFLUSH);
+               else {
+                       pv = pv->pv_next;
+#ifdef DEBUG
+                       if (pmapdebug & PDB_PARANOIA)
+                               printf("%s wired mapping for %x not removed\n",
+                                      "pmap_page_protect:", pa);
 #endif
 #endif
-                       pmap_remove(pv->pv_pmap, pv->pv_va,
-                                   pv->pv_va + PAGE_SIZE);
                }
                }
-               splx(s);
-               break;
        }
        }
+       splx(s);
 }
 
 /*
 }
 
 /*
@@ -923,22 +846,25 @@ pmap_page_protect(pa, prot)
 void
 pmap_protect(pmap, sva, eva, prot)
        register pmap_t pmap;
 void
 pmap_protect(pmap, sva, eva, prot)
        register pmap_t pmap;
-       vm_offset_t     sva, eva;
-       vm_prot_t       prot;
+       register vm_offset_t sva, eva;
+       vm_prot_t prot;
 {
 {
+       register vm_offset_t nssva;
        register pt_entry_t *pte;
        register pt_entry_t *pte;
-       register vm_offset_t va;
-       register int ix;
-       int hpprot;
-       boolean_t firstpage = TRUE;
+       boolean_t firstpage, needtflush;
+       int isro;
 
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
                printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
 #endif
 
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
                printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
 #endif
+
        if (pmap == NULL)
                return;
 
        if (pmap == NULL)
                return;
 
+#ifdef PMAPSTATS
+       protect_stats.calls++;
+#endif
        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                pmap_remove(pmap, sva, eva);
                return;
        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                pmap_remove(pmap, sva, eva);
                return;
@@ -946,48 +872,74 @@ pmap_protect(pmap, sva, eva, prot)
        if (prot & VM_PROT_WRITE)
                return;
 
        if (prot & VM_PROT_WRITE)
                return;
 
-       pte = pmap_pte(pmap, sva);
-       hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
-       for (va = sva; va < eva; va += PAGE_SIZE) {
+       isro = pte_prot(pmap, prot);
+       needtflush = active_pmap(pmap);
+       firstpage = TRUE;
+       while (sva < eva) {
+               nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
+               if (nssva == 0 || nssva > eva)
+                       nssva = eva;
                /*
                /*
-                * Page table page is not allocated.
-                * Skip it, we don't want to force allocation
-                * of unnecessary PTE pages just to set the protection.
+                * If VA belongs to an unallocated segment,
+                * skip to the next segment boundary.
                 */
                 */
-               if (!pmap_ste_v(pmap_ste(pmap, va))) {
-                       /* XXX: avoid address wrap around */
-                       if (va >= hp300_trunc_seg((vm_offset_t)-1))
-                               break;
-                       va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
-                       pte = pmap_pte(pmap, va);
-                       pte += hppagesperpage;
+               if (!pmap_ste_v(pmap, sva)) {
+                       sva = nssva;
                        continue;
                }
                /*
                        continue;
                }
                /*
-                * Page not valid.  Again, skip it.
-                * Should we do this?  Or set protection anyway?
+                * Change protection on mapping if it is valid and doesn't
+                * already have the correct protection.
                 */
                 */
-               if (!pmap_pte_v(pte)) {
-                       pte += hppagesperpage;
-                       continue;
-               }
-               /*
-                * Flush VAC to ensure we get correct state of HW bits
-                * so we don't clobber them.
-                */
-               if (firstpage && pmap_aliasmask) {
-                       firstpage = FALSE;
-                       DCIS();
+               pte = pmap_pte(pmap, sva);
+               while (sva < nssva) {
+                       if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
+#ifdef HAVEVAC
+                               /*
+                                * Purge kernel side of VAC to ensure we
+                                * get the correct state of any hardware
+                                * maintained bits.
+                                *
+                                * XXX do we need to clear the VAC in
+                                * general to reflect the new protection?
+                                */
+                               if (firstpage && pmap_aliasmask)
+                                       DCIS();
+#endif
+#if defined(HP380)
+                               /*
+                                * Clear caches if making RO (see section
+                                * "7.3 Cache Coherency" in the manual).
+                                */
+                               if (isro && mmutype == MMU_68040) {
+                                       vm_offset_t pa = pmap_pte_pa(pte);
+
+                                       DCFP(pa);
+                                       ICPP(pa);
+                               }
+#endif
+                               pmap_pte_set_prot(pte, isro);
+                               if (needtflush)
+                                       TBIS(sva);
+#ifdef PMAPSTATS
+                               protect_stats.changed++;
+#endif
+                               firstpage = FALSE;
+                       }
+#ifdef PMAPSTATS
+                       else if (pmap_pte_v(pte)) {
+                               if (isro)
+                                       protect_stats.alreadyro++;
+                               else
+                                       protect_stats.alreadyrw++;
+                       }
+#endif
+                       pte++;
+                       sva += PAGE_SIZE;
                }
                }
-               ix = 0;
-               do {
-                       /* clear VAC here if PG_RO? */
-                       pmap_pte_set_prot(pte++, hpprot);
-                       TBIS(va + ix * HP_PAGE_SIZE);
-               } while (++ix != hppagesperpage);
        }
        }
-#ifdef DEBUG
-       if (hpprot && (pmapvacflush & PVF_PROTECT)) {
+#if defined(HAVEVAC) && defined(DEBUG)
+       if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) {
                if (pmapvacflush & PVF_TOTAL)
                        DCIA();
                else if (pmap == kernel_pmap)
                if (pmapvacflush & PVF_TOTAL)
                        DCIA();
                else if (pmap == kernel_pmap)
@@ -1019,7 +971,7 @@ pmap_enter(pmap, va, pa, prot, wired)
        boolean_t wired;
 {
        register pt_entry_t *pte;
        boolean_t wired;
 {
        register pt_entry_t *pte;
-       register int npte, ix;
+       register int npte;
        vm_offset_t opa;
        boolean_t cacheable = TRUE;
        boolean_t checkpv = TRUE;
        vm_offset_t opa;
        boolean_t cacheable = TRUE;
        boolean_t checkpv = TRUE;
@@ -1032,7 +984,7 @@ pmap_enter(pmap, va, pa, prot, wired)
        if (pmap == NULL)
                return;
 
        if (pmap == NULL)
                return;
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
        if (pmap == kernel_pmap)
                enter_stats.kernel++;
        else
        if (pmap == kernel_pmap)
                enter_stats.kernel++;
        else
@@ -1048,9 +1000,10 @@ pmap_enter(pmap, va, pa, prot, wired)
        /*
         * Segment table entry not valid, we need a new PT page
         */
        /*
         * Segment table entry not valid, we need a new PT page
         */
-       if (!pmap_ste_v(pmap_ste(pmap, va)))
+       if (!pmap_ste_v(pmap, va))
                pmap_enter_ptpage(pmap, va);
 
                pmap_enter_ptpage(pmap, va);
 
+       pa = hp300_trunc_page(pa);
        pte = pmap_pte(pmap, va);
        opa = pmap_pte_pa(pte);
 #ifdef DEBUG
        pte = pmap_pte(pmap, va);
        opa = pmap_pte_pa(pte);
 #ifdef DEBUG
@@ -1062,7 +1015,7 @@ pmap_enter(pmap, va, pa, prot, wired)
         * Mapping has not changed, must be protection or wiring change.
         */
        if (opa == pa) {
         * Mapping has not changed, must be protection or wiring change.
         */
        if (opa == pa) {
-#ifdef DEBUG
+#ifdef PMAPSTATS
                enter_stats.pwchange++;
 #endif
                /*
                enter_stats.pwchange++;
 #endif
                /*
@@ -1071,7 +1024,7 @@ pmap_enter(pmap, va, pa, prot, wired)
                 * resident as long as there are valid mappings in them.
                 * Hence, if a user page is wired, the PT page will be also.
                 */
                 * resident as long as there are valid mappings in them.
                 * Hence, if a user page is wired, the PT page will be also.
                 */
-               if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+               if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
 #ifdef DEBUG
                        if (pmapdebug & PDB_ENTER)
                                printf("enter: wiring change -> %x\n", wired);
 #ifdef DEBUG
                        if (pmapdebug & PDB_ENTER)
                                printf("enter: wiring change -> %x\n", wired);
@@ -1080,10 +1033,17 @@ pmap_enter(pmap, va, pa, prot, wired)
                                pmap->pm_stats.wired_count++;
                        else
                                pmap->pm_stats.wired_count--;
                                pmap->pm_stats.wired_count++;
                        else
                                pmap->pm_stats.wired_count--;
-#ifdef DEBUG
-                       enter_stats.wchange++;
+#ifdef PMAPSTATS
+                       if (pmap_pte_prot(pte) == pte_prot(pmap, prot))
+                               enter_stats.wchange++;
 #endif
                }
 #endif
                }
+#ifdef PMAPSTATS
+               else if (pmap_pte_prot(pte) != pte_prot(pmap, prot))
+                       enter_stats.pchange++;
+               else
+                       enter_stats.nochange++;
+#endif
                /*
                 * Retain cache inhibition status
                 */
                /*
                 * Retain cache inhibition status
                 */
@@ -1102,8 +1062,8 @@ pmap_enter(pmap, va, pa, prot, wired)
                if (pmapdebug & PDB_ENTER)
                        printf("enter: removing old mapping %x\n", va);
 #endif
                if (pmapdebug & PDB_ENTER)
                        printf("enter: removing old mapping %x\n", va);
 #endif
-               pmap_remove(pmap, va, va + PAGE_SIZE);
-#ifdef DEBUG
+               pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);
+#ifdef PMAPSTATS
                enter_stats.mchange++;
 #endif
        }
                enter_stats.mchange++;
 #endif
        }
@@ -1114,8 +1074,8 @@ pmap_enter(pmap, va, pa, prot, wired)
         * is a valid mapping in the page.
         */
        if (pmap != kernel_pmap)
         * is a valid mapping in the page.
         */
        if (pmap != kernel_pmap)
-               vm_map_pageable(pt_map, trunc_page(pte),
-                               round_page(pte+1), FALSE);
+               (void) vm_map_pageable(pt_map, trunc_page(pte),
+                                      round_page(pte+1), FALSE);
 
        /*
         * Enter on the PV list if part of our managed memory
 
        /*
         * Enter on the PV list if part of our managed memory
@@ -1126,7 +1086,7 @@ pmap_enter(pmap, va, pa, prot, wired)
                register pv_entry_t pv, npv;
                int s;
 
                register pv_entry_t pv, npv;
                int s;
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
                enter_stats.managed++;
 #endif
                pv = pa_to_pvh(pa);
                enter_stats.managed++;
 #endif
                pv = pa_to_pvh(pa);
@@ -1140,7 +1100,7 @@ pmap_enter(pmap, va, pa, prot, wired)
                 * No entries yet, use header as the first entry
                 */
                if (pv->pv_pmap == NULL) {
                 * No entries yet, use header as the first entry
                 */
                if (pv->pv_pmap == NULL) {
-#ifdef DEBUG
+#ifdef PMAPSTATS
                        enter_stats.firstpv++;
 #endif
                        pv->pv_va = va;
                        enter_stats.firstpv++;
 #endif
                        pv->pv_va = va;
@@ -1167,11 +1127,13 @@ pmap_enter(pmap, va, pa, prot, wired)
                        npv->pv_next = pv->pv_next;
                        npv->pv_ptste = NULL;
                        npv->pv_ptpmap = NULL;
                        npv->pv_next = pv->pv_next;
                        npv->pv_ptste = NULL;
                        npv->pv_ptpmap = NULL;
+                       npv->pv_flags = 0;
                        pv->pv_next = npv;
                        pv->pv_next = npv;
-#ifdef DEBUG
+#ifdef PMAPSTATS
                        if (!npv->pv_next)
                                enter_stats.secondpv++;
 #endif
                        if (!npv->pv_next)
                                enter_stats.secondpv++;
 #endif
+#ifdef HAVEVAC
                        /*
                         * Since there is another logical mapping for the
                         * same page we may need to cache-inhibit the
                        /*
                         * Since there is another logical mapping for the
                         * same page we may need to cache-inhibit the
@@ -1215,11 +1177,12 @@ pmap_enter(pmap, va, pa, prot, wired)
 #endif
                                        cacheable = FALSE;
                                        pv->pv_flags |= PV_CI;
 #endif
                                        cacheable = FALSE;
                                        pv->pv_flags |= PV_CI;
-#ifdef DEBUG
+#ifdef PMAPSTATS
                                        enter_stats.ci++;
 #endif
                                }
                        }
                                        enter_stats.ci++;
 #endif
                                }
                        }
+#endif
                }
                splx(s);
        }
                }
                splx(s);
        }
@@ -1229,7 +1192,7 @@ pmap_enter(pmap, va, pa, prot, wired)
         */
        else if (pmap_initialized) {
                checkpv = cacheable = FALSE;
         */
        else if (pmap_initialized) {
                checkpv = cacheable = FALSE;
-#ifdef DEBUG
+#ifdef PMAPSTATS
                enter_stats.unmanaged++;
 #endif
        }
                enter_stats.unmanaged++;
 #endif
        }
@@ -1242,34 +1205,48 @@ pmap_enter(pmap, va, pa, prot, wired)
                pmap->pm_stats.wired_count++;
 
 validate:
                pmap->pm_stats.wired_count++;
 
 validate:
+#ifdef HAVEVAC
        /*
        /*
-        * Flush VAC to ensure we get correct state of HW bits
-        * so we don't clobber them.
+        * Purge kernel side of VAC to ensure we get correct state
+        * of HW bits so we don't clobber them.
         */
        if (pmap_aliasmask)
                DCIS();
         */
        if (pmap_aliasmask)
                DCIS();
+#endif
        /*
        /*
-        * Now validate mapping with desired protection/wiring.
-        * Assume uniform modified and referenced status for all
-        * HP pages in a MACH page.
+        * Build the new PTE.
         */
         */
-       npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
-       npte |= (*(int *)pte & (PG_M|PG_U));
+       npte = pa | pte_prot(pmap, prot) | (*(int *)pte & (PG_M|PG_U)) | PG_V;
        if (wired)
                npte |= PG_W;
        if (!checkpv && !cacheable)
                npte |= PG_CI;
        if (wired)
                npte |= PG_W;
        if (!checkpv && !cacheable)
                npte |= PG_CI;
+#if defined(HP380)
+       if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
+#ifdef DEBUG
+               if (dowriteback && (dokwriteback || pmap != kernel_pmap))
+#endif
+               npte |= PG_CCB;
+#endif
 #ifdef DEBUG
        if (pmapdebug & PDB_ENTER)
                printf("enter: new pte value %x\n", npte);
 #endif
 #ifdef DEBUG
        if (pmapdebug & PDB_ENTER)
                printf("enter: new pte value %x\n", npte);
 #endif
-       ix = 0;
-       do {
-               *(int *)pte++ = npte;
+       /*
+        * Remember if this was a wiring-only change.
+        * If so, we need not flush the TLB and caches.
+        */
+       wired = ((*(int *)pte ^ npte) == PG_W);
+#if defined(HP380)
+       if (mmutype == MMU_68040 && !wired) {
+               DCFP(pa);
+               ICPP(pa);
+       }
+#endif
+       *(int *)pte = npte;
+       if (!wired && active_pmap(pmap))
                TBIS(va);
                TBIS(va);
-               npte += HP_PAGE_SIZE;
-               va += HP_PAGE_SIZE;
-       } while (++ix != hppagesperpage);
+#ifdef HAVEVAC
        /*
         * The following is executed if we are entering a second
         * (or greater) mapping for a physical page and the mappings
        /*
         * The following is executed if we are entering a second
         * (or greater) mapping for a physical page and the mappings
@@ -1280,7 +1257,7 @@ validate:
        if (checkpv && !cacheable) {
                pmap_changebit(pa, PG_CI, TRUE);
                DCIA();
        if (checkpv && !cacheable) {
                pmap_changebit(pa, PG_CI, TRUE);
                DCIA();
-#ifdef DEBUG
+#ifdef PMAPSTATS
                enter_stats.flushes++;
 #endif
 #ifdef DEBUG
                enter_stats.flushes++;
 #endif
 #ifdef DEBUG
@@ -1298,10 +1275,11 @@ validate:
                else
                        DCIU();
        }
                else
                        DCIU();
        }
-       if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
-               va -= PAGE_SIZE;
+#endif
+#endif
+#ifdef DEBUG
+       if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
                pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
                pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
-       }
 #endif
 }
 
 #endif
 }
 
@@ -1319,7 +1297,6 @@ pmap_change_wiring(pmap, va, wired)
        boolean_t       wired;
 {
        register pt_entry_t *pte;
        boolean_t       wired;
 {
        register pt_entry_t *pte;
-       register int ix;
 
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
 
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
@@ -1335,7 +1312,7 @@ pmap_change_wiring(pmap, va, wired)
         * Should this ever happen?  Ignore it for now,
         * we don't want to force allocation of unnecessary PTE pages.
         */
         * Should this ever happen?  Ignore it for now,
         * we don't want to force allocation of unnecessary PTE pages.
         */
-       if (!pmap_ste_v(pmap_ste(pmap, va))) {
+       if (!pmap_ste_v(pmap, va)) {
                if (pmapdebug & PDB_PARANOIA)
                        printf("pmap_change_wiring: invalid STE for %x\n", va);
                return;
                if (pmapdebug & PDB_PARANOIA)
                        printf("pmap_change_wiring: invalid STE for %x\n", va);
                return;
@@ -1349,20 +1326,18 @@ pmap_change_wiring(pmap, va, wired)
                        printf("pmap_change_wiring: invalid PTE for %x\n", va);
        }
 #endif
                        printf("pmap_change_wiring: invalid PTE for %x\n", va);
        }
 #endif
-       if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+       /*
+        * If wiring actually changed (always?) set the wire bit and
+        * update the wire count.  Note that wiring is not a hardware
+        * characteristic so there is no need to invalidate the TLB.
+        */
+       if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
+               pmap_pte_set_w(pte, wired);
                if (wired)
                        pmap->pm_stats.wired_count++;
                else
                        pmap->pm_stats.wired_count--;
        }
                if (wired)
                        pmap->pm_stats.wired_count++;
                else
                        pmap->pm_stats.wired_count--;
        }
-       /*
-        * Wiring is not a hardware characteristic so there is no need
-        * to invalidate TLB.
-        */
-       ix = 0;
-       do {
-               pmap_pte_set_w(pte++, wired);
-       } while (++ix != hppagesperpage);
 }
 
 /*
 }
 
 /*
@@ -1384,7 +1359,7 @@ pmap_extract(pmap, va)
                printf("pmap_extract(%x, %x) -> ", pmap, va);
 #endif
        pa = 0;
                printf("pmap_extract(%x, %x) -> ", pmap, va);
 #endif
        pa = 0;
-       if (pmap && pmap_ste_v(pmap_ste(pmap, va)))
+       if (pmap && pmap_ste_v(pmap, va))
                pa = *(int *)pmap_pte(pmap, va);
        if (pa)
                pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
                pa = *(int *)pmap_pte(pmap, va);
        if (pa)
                pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
@@ -1464,6 +1439,8 @@ pmap_collect(pmap)
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_collect(%x)\n", pmap);
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_collect(%x)\n", pmap);
+#endif
+#ifdef PMAPSTATS
        kpt_stats.collectscans++;
 #endif
        s = splimp();
        kpt_stats.collectscans++;
 #endif
        s = splimp();
@@ -1511,11 +1488,12 @@ ok:
 #endif
                /*
                 * If all entries were invalid we can remove the page.
 #endif
                /*
                 * If all entries were invalid we can remove the page.
-                * We call pmap_remove to take care of invalidating ST
-                * and Sysptmap entries.
+                * We call pmap_remove_entry to take care of invalidating
+                * ST and Sysptmap entries.
                 */
                kpa = pmap_extract(pmap, pv->pv_va);
                 */
                kpa = pmap_extract(pmap, pv->pv_va);
-               pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE);
+               pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
+                                   PRM_TFLUSH|PRM_CFLUSH);
                /*
                 * Use the physical address to locate the original
                 * (kmem_alloc assigned) address for the page and put
                /*
                 * Use the physical address to locate the original
                 * (kmem_alloc assigned) address for the page and put
@@ -1536,9 +1514,11 @@ ok:
                *pkpt = kpt->kpt_next;
                kpt->kpt_next = kpt_free_list;
                kpt_free_list = kpt;
                *pkpt = kpt->kpt_next;
                kpt->kpt_next = kpt_free_list;
                kpt_free_list = kpt;
-#ifdef DEBUG
+#ifdef PMAPSTATS
                kpt_stats.kptinuse--;
                kpt_stats.collectpages++;
                kpt_stats.kptinuse--;
                kpt_stats.collectpages++;
+#endif
+#ifdef DEBUG
                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                        pmapdebug = opmapdebug;
 
                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                        pmapdebug = opmapdebug;
 
@@ -1571,21 +1551,29 @@ pmap_activate(pmap, pcbp)
  *     page by mapping the page into virtual memory and using
  *     bzero to clear its contents, one machine dependent page
  *     at a time.
  *     page by mapping the page into virtual memory and using
  *     bzero to clear its contents, one machine dependent page
  *     at a time.
+ *
+ *     XXX this is a bad implementation for virtual cache machines
+ *     (320/350) because pmap_enter doesn't cache-inhibit the temporary
+ *     kernel mapping and we wind up with data cached for that KVA.
+ *     It is probably a win for physical cache machines (370/380)
+ *     as the cache loading is not wasted.
  */
  */
+void
 pmap_zero_page(phys)
 pmap_zero_page(phys)
-       register vm_offset_t    phys;
+       vm_offset_t phys;
 {
 {
-       register int ix;
+       register vm_offset_t kva;
+       extern caddr_t CADDR1;
 
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_zero_page(%x)\n", phys);
 #endif
 
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_zero_page(%x)\n", phys);
 #endif
-       phys >>= PG_SHIFT;
-       ix = 0;
-       do {
-               clearseg(phys++);
-       } while (++ix != hppagesperpage);
+       kva = (vm_offset_t) CADDR1;
+       pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+       bzero((caddr_t)kva, HP_PAGE_SIZE);
+       pmap_remove_mapping(kernel_pmap, kva, PT_ENTRY_NULL,
+                           PRM_TFLUSH|PRM_CFLUSH);
 }
 
 /*
 }
 
 /*
@@ -1593,25 +1581,34 @@ pmap_zero_page(phys)
  *     page by mapping the page into virtual memory and using
  *     bcopy to copy the page, one machine dependent page at a
  *     time.
  *     page by mapping the page into virtual memory and using
  *     bcopy to copy the page, one machine dependent page at a
  *     time.
+ *
+ *
+ *     XXX this is a bad implementation for virtual cache machines
+ *     (320/350) because pmap_enter doesn't cache-inhibit the temporary
+ *     kernel mapping and we wind up with data cached for that KVA.
+ *     It is probably a win for physical cache machines (370/380)
+ *     as the cache loading is not wasted.
  */
  */
+void
 pmap_copy_page(src, dst)
 pmap_copy_page(src, dst)
-       register vm_offset_t    src, dst;
+       vm_offset_t src, dst;
 {
 {
-       register int ix;
+       register vm_offset_t skva, dkva;
+       extern caddr_t CADDR1, CADDR2;
 
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_copy_page(%x, %x)\n", src, dst);
 #endif
 
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_copy_page(%x, %x)\n", src, dst);
 #endif
-       src >>= PG_SHIFT;
-       dst >>= PG_SHIFT;
-       ix = 0;
-       do {
-               physcopyseg(src++, dst++);
-       } while (++ix != hppagesperpage);
+       skva = (vm_offset_t) CADDR1;
+       dkva = (vm_offset_t) CADDR2;
+       pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
+       pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+       copypage((caddr_t)skva, (caddr_t)dkva);
+       /* CADDR1 and CADDR2 are virtually contiguous */
+       pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
 }
 
 }
 
-
 /*
  *     Routine:        pmap_pageable
  *     Function:
 /*
  *     Routine:        pmap_pageable
  *     Function:
@@ -1626,6 +1623,7 @@ pmap_copy_page(src, dst)
  *             will specify that these pages are to be wired
  *             down (or not) as appropriate.
  */
  *             will specify that these pages are to be wired
  *             down (or not) as appropriate.
  */
+void
 pmap_pageable(pmap, sva, eva, pageable)
        pmap_t          pmap;
        vm_offset_t     sva, eva;
 pmap_pageable(pmap, sva, eva, pageable)
        pmap_t          pmap;
        vm_offset_t     sva, eva;
@@ -1653,7 +1651,7 @@ pmap_pageable(pmap, sva, eva, pageable)
                        printf("pmap_pageable(%x, %x, %x, %x)\n",
                               pmap, sva, eva, pageable);
 #endif
                        printf("pmap_pageable(%x, %x, %x, %x)\n",
                               pmap, sva, eva, pageable);
 #endif
-               if (!pmap_ste_v(pmap_ste(pmap, sva)))
+               if (!pmap_ste_v(pmap, sva))
                        return;
                pa = pmap_pte_pa(pmap_pte(pmap, sva));
                if (pa < vm_first_phys || pa >= vm_last_phys)
                        return;
                pa = pmap_pte_pa(pmap_pte(pmap, sva));
                if (pa < vm_first_phys || pa >= vm_last_phys)
@@ -1673,6 +1671,11 @@ pmap_pageable(pmap, sva, eva, pageable)
                 */
                pmap_changebit(pa, PG_M, FALSE);
 #ifdef DEBUG
                 */
                pmap_changebit(pa, PG_M, FALSE);
 #ifdef DEBUG
+               if ((PHYS_TO_VM_PAGE(pa)->flags & PG_CLEAN) == 0) {
+                       printf("pa %x: flags=%x: not clean\n",
+                              pa, PHYS_TO_VM_PAGE(pa)->flags);
+                       PHYS_TO_VM_PAGE(pa)->flags |= PG_CLEAN;
+               }
                if (pmapdebug & PDB_PTPAGE)
                        printf("pmap_pageable: PT page %x(%x) unmodified\n",
                               sva, *(int *)pmap_pte(pmap, sva));
                if (pmapdebug & PDB_PTPAGE)
                        printf("pmap_pageable: PT page %x(%x) unmodified\n",
                               sva, *(int *)pmap_pte(pmap, sva));
@@ -1762,34 +1765,315 @@ pmap_phys_address(ppn)
        return(hp300_ptob(ppn));
 }
 
        return(hp300_ptob(ppn));
 }
 
+#ifdef HPUXCOMPAT
+/*
+ * 'PUX hack for dealing with the so called multi-mapped address space.
+ * The first 256mb is mapped in at every 256mb region from 0x10000000
+ * up to 0xF0000000.  This allows for 15 bits of tag information.
+ *
+ * We implement this at the segment table level, the machine independent
+ * VM knows nothing about it.
+ */
+pmap_mapmulti(pmap, va)
+       pmap_t pmap;
+       vm_offset_t va;
+{
+       int *ste, *bste;
+
+#ifdef DEBUG
+       if (pmapdebug & PDB_MULTIMAP) {
+               ste = (int *)pmap_ste(pmap, HPMMBASEADDR(va));
+               printf("pmap_mapmulti(%x, %x): bste %x(%x)",
+                      pmap, va, ste, *ste);
+               ste = (int *)pmap_ste(pmap, va);
+               printf(" ste %x(%x)\n", ste, *ste);
+       }
+#endif
+       bste = (int *) pmap_ste(pmap, HPMMBASEADDR(va));
+       ste = (int *) pmap_ste(pmap, va);
+       if (*ste == SG_NV && (*bste & SG_V)) {
+               *ste = *bste;
+               TBIAU();
+               return (KERN_SUCCESS);
+       }
+       return (KERN_INVALID_ADDRESS);
+}
+#endif
+
 /*
  * Miscellaneous support routines follow
  */
 
 /*
  * Miscellaneous support routines follow
  */
 
+/*
+ * Invalidate a single page denoted by pmap/va.
+ * If (pte != NULL), it is the already computed PTE for the page.
+ * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
+ * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
+ */
 /* static */
 /* static */
-hp300_protection_init()
+void
+pmap_remove_mapping(pmap, va, pte, flags)
+       register pmap_t pmap;
+       register vm_offset_t va;
+       register pt_entry_t *pte;
+       int flags;
 {
 {
-       register int *kp, prot;
-
-       kp = protection_codes;
-       for (prot = 0; prot < 8; prot++) {
-               switch (prot) {
-               case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
-                       *kp++ = 0;
-                       break;
-               case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
-               case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
-               case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
-                       *kp++ = PG_RO;
-                       break;
-               case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
-               case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
-               case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
-               case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
-                       *kp++ = PG_RW;
-                       break;
+       register vm_offset_t pa;
+       register pv_entry_t pv, npv;
+       pmap_t ptpmap;
+       int *ste, s, bits;
+#ifdef DEBUG
+       pt_entry_t opte;
+
+       if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+               printf("pmap_remove_mapping(%x, %x, %x, %x)\n",
+                      pmap, va, pte, flags);
+#endif
+
+       /*
+        * PTE not provided, compute it from pmap and va.
+        */
+       if (pte == PT_ENTRY_NULL) {
+               pte = pmap_pte(pmap, va);
+               if (*(int *)pte == PG_NV)
+                       return;
+       }
+#ifdef HAVEVAC
+       if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
+               /*
+                * Purge kernel side of VAC to ensure we get the correct
+                * state of any hardware maintained bits.
+                */
+               DCIS();
+#ifdef PMAPSTATS
+               remove_stats.sflushes++;
+#endif
+               /*
+                * If this is a non-CI user mapping for the current process,
+                * flush the VAC.  Note that the kernel side was flushed
+                * above so we don't worry about non-CI kernel mappings.
+                */
+               if (pmap == curproc->p_vmspace->vm_map.pmap &&
+                   !pmap_pte_ci(pte)) {
+                       DCIU();
+#ifdef PMAPSTATS
+                       remove_stats.uflushes++;
+#endif
+               }
+       }
+#endif
+       pa = pmap_pte_pa(pte);
+#ifdef DEBUG
+       opte = *pte;
+#endif
+#ifdef PMAPSTATS
+       remove_stats.removes++;
+#endif
+       /*
+        * Update statistics
+        */
+       if (pmap_pte_w(pte))
+               pmap->pm_stats.wired_count--;
+       pmap->pm_stats.resident_count--;
+
+       /*
+        * Invalidate the PTE after saving the reference modify info.
+        */
+#ifdef DEBUG
+       if (pmapdebug & PDB_REMOVE)
+               printf("remove: invalidating pte at %x\n", pte);
+#endif
+       bits = *(int *)pte & (PG_U|PG_M);
+       *(int *)pte = PG_NV;
+       if ((flags & PRM_TFLUSH) && active_pmap(pmap))
+               TBIS(va);
+       /*
+        * For user mappings decrement the wiring count on
+        * the PT page.  We do this after the PTE has been
+        * invalidated because vm_map_pageable winds up in
+        * pmap_pageable which clears the modify bit for the
+        * PT page.
+        */
+       if (pmap != kernel_pmap) {
+#if defined(DEBUG) && NCPUS == 1
+               /*
+                * XXX this recursive use of the VM won't work on a MP
+                * (or when otherwise debugging simple locks).  We might
+                * be called with the page queue lock held (e.g. from
+                * the pageout daemon) and vm_map_pageable might call
+                * vm_fault_unwire which would try to lock the page queues
+                * again.  For debugging we hack and drop the lock.
+                */
+               int hadit = !simple_lock_try(&vm_page_queue_lock);
+               simple_unlock(&vm_page_queue_lock);
+#endif
+               (void) vm_map_pageable(pt_map, trunc_page(pte),
+                                      round_page(pte+1), TRUE);
+#ifdef DEBUG
+               if (pmapdebug & PDB_WIRING)
+                       pmap_check_wiring("remove", trunc_page(pte));
+#if NCPUS == 1
+               if (hadit)
+                       simple_lock(&vm_page_queue_lock);
+#endif
+#endif
+       }
+       /*
+        * If this isn't a managed page, we are all done.
+        */
+       if (pa < vm_first_phys || pa >= vm_last_phys)
+               return;
+       /*
+        * Otherwise remove it from the PV table
+        * (raise IPL since we may be called at interrupt time).
+        */
+       pv = pa_to_pvh(pa);
+       ste = (int *)0;
+       s = splimp();
+       /*
+        * If it is the first entry on the list, it is actually
+        * in the header and we must copy the following entry up
+        * to the header.  Otherwise we must search the list for
+        * the entry.  In either case we free the now unused entry.
+        */
+       if (pmap == pv->pv_pmap && va == pv->pv_va) {
+               ste = (int *)pv->pv_ptste;
+               ptpmap = pv->pv_ptpmap;
+               npv = pv->pv_next;
+               if (npv) {
+                       npv->pv_flags = pv->pv_flags;
+                       *pv = *npv;
+                       free((caddr_t)npv, M_VMPVENT);
+               } else
+                       pv->pv_pmap = NULL;
+#ifdef PMAPSTATS
+               remove_stats.pvfirst++;
+#endif
+       } else {
+               for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+#ifdef PMAPSTATS
+                       remove_stats.pvsearch++;
+#endif
+                       if (pmap == npv->pv_pmap && va == npv->pv_va)
+                               break;
+                       pv = npv;
+               }
+#ifdef DEBUG
+               if (npv == NULL)
+                       panic("pmap_remove: PA not in pv_tab");
+#endif
+               ste = (int *)npv->pv_ptste;
+               ptpmap = npv->pv_ptpmap;
+               pv->pv_next = npv->pv_next;
+               free((caddr_t)npv, M_VMPVENT);
+               pv = pa_to_pvh(pa);
+       }
+#ifdef HAVEVAC
+       /*
+        * If only one mapping left we no longer need to cache inhibit
+        */
+       if (pmap_aliasmask &&
+           pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
+#ifdef DEBUG
+               if (pmapdebug & PDB_CACHE)
+                       printf("remove: clearing CI for pa %x\n", pa);
+#endif
+               pv->pv_flags &= ~PV_CI;
+               pmap_changebit(pa, PG_CI, FALSE);
+#ifdef DEBUG
+               if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
+                   (PDB_CACHE|PDB_PVDUMP))
+                       pmap_pvdump(pa);
+#endif
+       }
+#endif
+       /*
+        * If this was a PT page we must also remove the
+        * mapping from the associated segment table.
+        */
+       if (ste) {
+#ifdef PMAPSTATS
+               remove_stats.ptinvalid++;
+#endif
+#ifdef DEBUG
+               if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+                       printf("remove: ste was %x@%x pte was %x@%x\n",
+                              *ste, ste, *(int *)&opte, pmap_pte(pmap, va));
+#endif
+#if defined(HP380)
+               if (mmutype == MMU_68040) {
+                       int *este = &ste[NPTEPG/SG4_LEV3SIZE];
+
+                       while (ste < este)
+                               *ste++ = SG_NV;
+#ifdef DEBUG
+                       ste -= NPTEPG/SG4_LEV3SIZE;
+#endif
+               } else
+#endif
+               *ste = SG_NV;
+               /*
+                * If it was a user PT page, we decrement the
+                * reference count on the segment table as well,
+                * freeing it if it is now empty.
+                */
+               if (ptpmap != kernel_pmap) {
+#ifdef DEBUG
+                       if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
+                               printf("remove: stab %x, refcnt %d\n",
+                                      ptpmap->pm_stab, ptpmap->pm_sref - 1);
+                       if ((pmapdebug & PDB_PARANOIA) &&
+                           ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
+                               panic("remove: bogus ste");
+#endif
+                       if (--(ptpmap->pm_sref) == 0) {
+#ifdef DEBUG
+                               if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
+                                       printf("remove: free stab %x\n",
+                                              ptpmap->pm_stab);
+#endif
+                               kmem_free_wakeup(st_map,
+                                                (vm_offset_t)ptpmap->pm_stab,
+                                                HP_STSIZE);
+                               ptpmap->pm_stab = Segtabzero;
+                               ptpmap->pm_stpa = Segtabzeropa;
+#if defined(HP380)
+                               if (mmutype == MMU_68040)
+                                       ptpmap->pm_stfree = protostfree;
+#endif
+                               ptpmap->pm_stchanged = TRUE;
+                               /*
+                                * XXX may have changed segment table
+                                * pointer for current process so
+                                * update now to reload hardware.
+                                */
+                               if (ptpmap == curproc->p_vmspace->vm_map.pmap)
+                                       PMAP_ACTIVATE(ptpmap,
+                                           (struct pcb *)curproc->p_addr, 1);
+                       }
+#ifdef DEBUG
+                       else if (ptpmap->pm_sref < 0)
+                               panic("remove: sref < 0");
+#endif
                }
                }
+#if 0
+               /*
+                * XXX this should be unnecessary as we have been
+                * flushing individual mappings as we go.
+                */
+               if (ptpmap == kernel_pmap)
+                       TBIAS();
+               else
+                       TBIAU();
+#endif
+               pv->pv_flags &= ~PV_PTPAGE;
+               ptpmap->pm_ptpages--;
        }
        }
+       /*
+        * Update saved attributes for managed page
+        */
+       pmap_attributes[pa_index(pa)] |= bits;
+       splx(s);
 }
 
 /* static */
 }
 
 /* static */
@@ -1799,7 +2083,7 @@ pmap_testbit(pa, bit)
        int bit;
 {
        register pv_entry_t pv;
        int bit;
 {
        register pv_entry_t pv;
-       register int *pte, ix;
+       register int *pte;
        int s;
 
        if (pa < vm_first_phys || pa >= vm_last_phys)
        int s;
 
        if (pa < vm_first_phys || pa >= vm_last_phys)
@@ -1814,11 +2098,13 @@ pmap_testbit(pa, bit)
                splx(s);
                return(TRUE);
        }
                splx(s);
                return(TRUE);
        }
+#ifdef HAVEVAC
        /*
         * Flush VAC to get correct state of any hardware maintained bits.
         */
        if (pmap_aliasmask && (bit & (PG_U|PG_M)))
                DCIS();
        /*
         * Flush VAC to get correct state of any hardware maintained bits.
         */
        if (pmap_aliasmask && (bit & (PG_U|PG_M)))
                DCIS();
+#endif
        /*
         * Not found, check current mappings returning
         * immediately if found.
        /*
         * Not found, check current mappings returning
         * immediately if found.
@@ -1826,13 +2112,10 @@ pmap_testbit(pa, bit)
        if (pv->pv_pmap != NULL) {
                for (; pv; pv = pv->pv_next) {
                        pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
        if (pv->pv_pmap != NULL) {
                for (; pv; pv = pv->pv_next) {
                        pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
-                       ix = 0;
-                       do {
-                               if (*pte++ & bit) {
-                                       splx(s);
-                                       return(TRUE);
-                               }
-                       } while (++ix != hppagesperpage);
+                       if (*pte & bit) {
+                               splx(s);
+                               return(TRUE);
+                       }
                }
        }
        splx(s);
                }
        }
        splx(s);
@@ -1840,16 +2123,20 @@ pmap_testbit(pa, bit)
 }
 
 /* static */
 }
 
 /* static */
+void
 pmap_changebit(pa, bit, setem)
        register vm_offset_t pa;
        int bit;
        boolean_t setem;
 {
        register pv_entry_t pv;
 pmap_changebit(pa, bit, setem)
        register vm_offset_t pa;
        int bit;
        boolean_t setem;
 {
        register pv_entry_t pv;
-       register int *pte, npte, ix;
+       register int *pte, npte;
        vm_offset_t va;
        int s;
        boolean_t firstpage = TRUE;
        vm_offset_t va;
        int s;
        boolean_t firstpage = TRUE;
+#ifdef PMAPSTATS
+       struct chgstats *chgp;
+#endif
 
 #ifdef DEBUG
        if (pmapdebug & PDB_BITS)
 
 #ifdef DEBUG
        if (pmapdebug & PDB_BITS)
@@ -1859,6 +2146,13 @@ pmap_changebit(pa, bit, setem)
        if (pa < vm_first_phys || pa >= vm_last_phys)
                return;
 
        if (pa < vm_first_phys || pa >= vm_last_phys)
                return;
 
+#ifdef PMAPSTATS
+       chgp = &changebit_stats[(bit>>2)-1];
+       if (setem)
+               chgp->setcalls++;
+       else
+               chgp->clrcalls++;
+#endif
        pv = pa_to_pvh(pa);
        s = splimp();
        /*
        pv = pa_to_pvh(pa);
        s = splimp();
        /*
@@ -1891,6 +2185,7 @@ pmap_changebit(pa, bit, setem)
                        }
 
                        pte = (int *) pmap_pte(pv->pv_pmap, va);
                        }
 
                        pte = (int *) pmap_pte(pv->pv_pmap, va);
+#ifdef HAVEVAC
                        /*
                         * Flush VAC to ensure we get correct state of HW bits
                         * so we don't clobber them.
                        /*
                         * Flush VAC to ensure we get correct state of HW bits
                         * so we don't clobber them.
@@ -1899,21 +2194,46 @@ pmap_changebit(pa, bit, setem)
                                firstpage = FALSE;
                                DCIS();
                        }
                                firstpage = FALSE;
                                DCIS();
                        }
-                       ix = 0;
-                       do {
+#endif
+                       if (setem)
+                               npte = *pte | bit;
+                       else
+                               npte = *pte & ~bit;
+                       if (*pte != npte) {
+#if defined(HP380)
+                               /*
+                                * If we are changing caching status or
+                                * protection make sure the caches are
+                                * flushed (but only once).
+                                */
+                               if (firstpage && mmutype == MMU_68040 &&
+                                   (bit == PG_RO && setem ||
+                                    (bit & PG_CMASK))) {
+                                       firstpage = FALSE;
+                                       DCFP(pa);
+                                       ICPP(pa);
+                               }
+#endif
+                               *pte = npte;
+                               if (active_pmap(pv->pv_pmap))
+                                       TBIS(va);
+#ifdef PMAPSTATS
                                if (setem)
                                if (setem)
-                                       npte = *pte | bit;
+                                       chgp->sethits++;
                                else
                                else
-                                       npte = *pte & ~bit;
-                               if (*pte != npte) {
-                                       *pte = npte;
-                                       TBIS(va);
-                               }
-                               va += HP_PAGE_SIZE;
-                               pte++;
-                       } while (++ix != hppagesperpage);
+                                       chgp->clrhits++;
+#endif
+                       }
+#ifdef PMAPSTATS
+                       else {
+                               if (setem)
+                                       chgp->setmiss++;
+                               else
+                                       chgp->clrmiss++;
+                       }
+#endif
                }
                }
-#ifdef DEBUG
+#if defined(HAVEVAC) && defined(DEBUG)
                if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
                        if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
                                DCIA();
                if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
                        if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
                                DCIA();
@@ -1941,18 +2261,31 @@ pmap_enter_ptpage(pmap, va)
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
                printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
                printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
+#endif
+#ifdef PMAPSTATS
        enter_stats.ptpneeded++;
 #endif
        /*
         * Allocate a segment table if necessary.  Note that it is allocated
        enter_stats.ptpneeded++;
 #endif
        /*
         * Allocate a segment table if necessary.  Note that it is allocated
-        * from kernel_map and not pt_map.  This keeps user page tables
+        * from a private map and not pt_map.  This keeps user page tables
         * aligned on segment boundaries in the kernel address space.
         * The segment table is wired down.  It will be freed whenever the
         * reference count drops to zero.
         */
        if (pmap->pm_stab == Segtabzero) {
                pmap->pm_stab = (st_entry_t *)
         * aligned on segment boundaries in the kernel address space.
         * The segment table is wired down.  It will be freed whenever the
         * reference count drops to zero.
         */
        if (pmap->pm_stab == Segtabzero) {
                pmap->pm_stab = (st_entry_t *)
-                       kmem_alloc(kernel_map, HP_STSIZE);
+                       kmem_alloc(st_map, HP_STSIZE);
+               pmap->pm_stpa = (st_entry_t *)
+                       pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
+#if defined(HP380)
+               if (mmutype == MMU_68040) {
+#ifdef DEBUG
+                       if (dowriteback && dokwriteback)
+#endif
+                       pmap_changebit((vm_offset_t)pmap->pm_stpa, PG_CCB, 0);
+                       pmap->pm_stfree = protostfree;
+               }
+#endif
                pmap->pm_stchanged = TRUE;
                /*
                 * XXX may have changed segment table pointer for current
                pmap->pm_stchanged = TRUE;
                /*
                 * XXX may have changed segment table pointer for current
@@ -1962,12 +2295,51 @@ pmap_enter_ptpage(pmap, va)
                        PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
 #ifdef DEBUG
                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
                        PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
 #ifdef DEBUG
                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
-                       printf("enter: pmap %x stab %x\n",
-                              pmap, pmap->pm_stab);
+                       printf("enter: pmap %x stab %x(%x)\n",
+                              pmap, pmap->pm_stab, pmap->pm_stpa);
 #endif
        }
 
        ste = pmap_ste(pmap, va);
 #endif
        }
 
        ste = pmap_ste(pmap, va);
+#if defined(HP380)
+       /*
+        * Allocate level 2 descriptor block if necessary
+        */
+       if (mmutype == MMU_68040) {
+               if (!ste->sg_v) {
+                       int ix;
+                       caddr_t addr;
+                       
+                       ix = bmtol2(pmap->pm_stfree);
+                       if (ix == -1)
+                               panic("enter: out of address space"); /* XXX */
+                       pmap->pm_stfree &= ~l2tobm(ix);
+                       addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
+                       bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
+                       addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
+                       *(int *)ste = (u_int)addr | SG_RW | SG_U | SG_V;
+#ifdef DEBUG
+                       if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+                               printf("enter: alloc ste2 %d(%x)\n", ix, addr);
+#endif
+               }
+               ste = pmap_ste2(pmap, va);
+               /*
+                * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
+                * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
+                * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
+                * PT page--the unit of allocation.  We set `ste' to point
+                * to the first entry of that chunk which is validated in its
+                * entirety below.
+                */
+               ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
+#ifdef DEBUG
+               if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+                       printf("enter: ste2 %x (%x)\n",
+                              pmap_ste2(pmap, va), ste);
+#endif
+       }
+#endif
        va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
 
        /*
        va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
 
        /*
@@ -1992,7 +2364,7 @@ pmap_enter_ptpage(pmap, va)
                        if ((kpt = kpt_free_list) == (struct kpt_page *)0)
                                panic("pmap_enter_ptpage: can't get KPT page");
                }
                        if ((kpt = kpt_free_list) == (struct kpt_page *)0)
                                panic("pmap_enter_ptpage: can't get KPT page");
                }
-#ifdef DEBUG
+#ifdef PMAPSTATS
                if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
                        kpt_stats.kptmaxuse = kpt_stats.kptinuse;
 #endif
                if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
                        kpt_stats.kptmaxuse = kpt_stats.kptinuse;
 #endif
@@ -2000,14 +2372,15 @@ pmap_enter_ptpage(pmap, va)
                kpt->kpt_next = kpt_used_list;
                kpt_used_list = kpt;
                ptpa = kpt->kpt_pa;
                kpt->kpt_next = kpt_used_list;
                kpt_used_list = kpt;
                ptpa = kpt->kpt_pa;
-               bzero(kpt->kpt_va, HP_PAGE_SIZE);
+               bzero((caddr_t)kpt->kpt_va, HP_PAGE_SIZE);
                pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
 #ifdef DEBUG
                pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
 #ifdef DEBUG
-               if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
+               if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
+                       int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
+
                        printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
                        printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
-                              ste - pmap_ste(pmap, 0),
-                              *(int *)&Sysptmap[ste - pmap_ste(pmap, 0)],
-                              kpt->kpt_va);
+                              ix, *(int *)&Sysptmap[ix], kpt->kpt_va);
+               }
 #endif
                splx(s);
        }
 #endif
                splx(s);
        }
@@ -2020,15 +2393,41 @@ pmap_enter_ptpage(pmap, va)
                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
                        printf("enter: about to fault UPT pg at %x\n", va);
 #endif
                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
                        printf("enter: about to fault UPT pg at %x\n", va);
 #endif
-               if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
-                   != KERN_SUCCESS)
+               s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
+               if (s != KERN_SUCCESS) {
+                       printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
                        panic("pmap_enter: vm_fault failed");
                        panic("pmap_enter: vm_fault failed");
+               }
                ptpa = pmap_extract(kernel_pmap, va);
                ptpa = pmap_extract(kernel_pmap, va);
+               /*
+                * Mark the page clean now to avoid its pageout (and
+                * hence creation of a pager) between now and when it
+                * is wired; i.e. while it is on a paging queue.
+                */
+               PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
 #ifdef DEBUG
 #ifdef DEBUG
-               PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE;
+               PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
 #endif
        }
 #endif
        }
-
+#if defined(HP380)
+       /*
+        * Turn off copyback caching of page table pages,
+        * could get ugly otherwise.
+        */
+#ifdef DEBUG
+       if (dowriteback && dokwriteback)
+#endif
+       if (mmutype == MMU_68040) {
+               int *pte = (int *)pmap_pte(kernel_pmap, va);
+#ifdef DEBUG
+               if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
+                       printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
+                              pmap == kernel_pmap ? "Kernel" : "User",
+                              va, ptpa, pte, *pte);
+#endif
+               pmap_changebit(ptpa, PG_CCB, 0);
+       }
+#endif
        /*
         * Locate the PV entry in the kernel for this PT page and
         * record the STE address.  This is so that we can invalidate
        /*
         * Locate the PV entry in the kernel for this PT page and
         * record the STE address.  This is so that we can invalidate
@@ -2062,6 +2461,16 @@ pmap_enter_ptpage(pmap, va)
         * it would be difficult to identify ST pages in pmap_pageable to
         * release them.  We also avoid the overhead of vm_map_pageable.
         */
         * it would be difficult to identify ST pages in pmap_pageable to
         * release them.  We also avoid the overhead of vm_map_pageable.
         */
+#if defined(HP380)
+       if (mmutype == MMU_68040) {
+               st_entry_t *este;
+
+               for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
+                       *(int *)ste = ptpa | SG_U | SG_RW | SG_V;
+                       ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
+               }
+       } else
+#endif
        *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
        if (pmap != kernel_pmap) {
                pmap->pm_sref++;
        *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
        if (pmap != kernel_pmap) {
                pmap->pm_sref++;
@@ -2071,6 +2480,7 @@ pmap_enter_ptpage(pmap, va)
                               pmap->pm_stab, pmap->pm_sref);
 #endif
        }
                               pmap->pm_stab, pmap->pm_sref);
 #endif
        }
+#if 0
        /*
         * Flush stale TLB info.
         */
        /*
         * Flush stale TLB info.
         */
@@ -2078,11 +2488,14 @@ pmap_enter_ptpage(pmap, va)
                TBIAS();
        else
                TBIAU();
                TBIAS();
        else
                TBIAU();
+#endif
        pmap->pm_ptpages++;
        splx(s);
 }
 
 #ifdef DEBUG
        pmap->pm_ptpages++;
        splx(s);
 }
 
 #ifdef DEBUG
+/* static */
+void
 pmap_pvdump(pa)
        vm_offset_t pa;
 {
 pmap_pvdump(pa)
        vm_offset_t pa;
 {
@@ -2096,6 +2509,8 @@ pmap_pvdump(pa)
        printf("\n");
 }
 
        printf("\n");
 }
 
+/* static */
+void
 pmap_check_wiring(str, va)
        char *str;
        vm_offset_t va;
 pmap_check_wiring(str, va)
        char *str;
        vm_offset_t va;
@@ -2104,7 +2519,7 @@ pmap_check_wiring(str, va)
        register int count, *pte;
 
        va = trunc_page(va);
        register int count, *pte;
 
        va = trunc_page(va);
-       if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) ||
+       if (!pmap_ste_v(kernel_pmap, va) ||
            !pmap_pte_v(pmap_pte(kernel_pmap, va)))
                return;
 
            !pmap_pte_v(pmap_pte(kernel_pmap, va)))
                return;