BSD 4_4_Lite2 release
[unix-history] / usr / src / sys / hp300 / hp300 / pmap.c
index faeb78e..6fe1246 100644 (file)
@@ -6,9 +6,35 @@
  * the Systems Programming Group of the University of Utah Computer
  * Science Department.
  *
  * the Systems Programming Group of the University of Utah Computer
  * Science Department.
  *
- * %sccs.include.redist.c%
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
  *
  *
- *     @(#)pmap.c      8.3 (Berkeley) %G%
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)pmap.c      8.7 (Berkeley) 5/17/95
  */
 
 /*
  */
 
 /*
@@ -236,7 +262,7 @@ st_entry_t  *Segtabzero, *Segtabzeropa;
 vm_size_t      Sysptsize = VM_KERNEL_PT_PAGES;
 
 struct pmap    kernel_pmap_store;
 vm_size_t      Sysptsize = VM_KERNEL_PT_PAGES;
 
 struct pmap    kernel_pmap_store;
-vm_map_t       pt_map;
+vm_map_t       st_map, pt_map;
 
 vm_offset_t            avail_start;    /* PA of first available physical page */
 vm_offset_t    avail_end;      /* PA of last available physical page */
 
 vm_offset_t            avail_start;    /* PA of first available physical page */
 vm_offset_t    avail_end;      /* PA of last available physical page */
@@ -420,6 +446,12 @@ bogons:
                       atop(s), addr, addr + s);
 #endif
 
                       atop(s), addr, addr + s);
 #endif
 
+       /*
+        * Allocate the segment table map
+        */
+       s = maxproc * HP_STSIZE;
+       st_map = kmem_suballoc(kernel_map, &addr, &addr2, s, TRUE);
+
        /*
         * Slightly modified version of kmem_suballoc() to get page table
         * map where we want it.
        /*
         * Slightly modified version of kmem_suballoc() to get page table
         * map where we want it.
@@ -604,7 +636,8 @@ pmap_release(pmap)
                kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
                                 HP_MAX_PTSIZE);
        if (pmap->pm_stab != Segtabzero)
                kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
                                 HP_MAX_PTSIZE);
        if (pmap->pm_stab != Segtabzero)
-               kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
+               kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
+                                HP_STSIZE);
 }
 
 /*
 }
 
 /*
@@ -770,29 +803,40 @@ pmap_page_protect(pa, prot)
        switch (prot) {
        case VM_PROT_READ|VM_PROT_WRITE:
        case VM_PROT_ALL:
        switch (prot) {
        case VM_PROT_READ|VM_PROT_WRITE:
        case VM_PROT_ALL:
-               break;
+               return;
        /* copy_on_write */
        case VM_PROT_READ:
        case VM_PROT_READ|VM_PROT_EXECUTE:
                pmap_changebit(pa, PG_RO, TRUE);
        /* copy_on_write */
        case VM_PROT_READ:
        case VM_PROT_READ|VM_PROT_EXECUTE:
                pmap_changebit(pa, PG_RO, TRUE);
-               break;
+               return;
        /* remove_all */
        default:
        /* remove_all */
        default:
-               pv = pa_to_pvh(pa);
-               s = splimp();
-               while (pv->pv_pmap != NULL) {
+               break;
+       }
+       pv = pa_to_pvh(pa);
+       s = splimp();
+       while (pv->pv_pmap != NULL) {
+               register pt_entry_t *pte;
+
+               pte = pmap_pte(pv->pv_pmap, pv->pv_va);
 #ifdef DEBUG
 #ifdef DEBUG
-                       if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
-                           pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
-                               panic("pmap_page_protect: bad mapping");
+               if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
+                   pmap_pte_pa(pte) != pa)
+                       panic("pmap_page_protect: bad mapping");
 #endif
 #endif
+               if (!pmap_pte_w(pte))
                        pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
                        pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
-                                           PT_ENTRY_NULL,
-                                           PRM_TFLUSH|PRM_CFLUSH);
+                                           pte, PRM_TFLUSH|PRM_CFLUSH);
+               else {
+                       pv = pv->pv_next;
+#ifdef DEBUG
+                       if (pmapdebug & PDB_PARANOIA)
+                               printf("%s wired mapping for %x not removed\n",
+                                      "pmap_page_protect:", pa);
+#endif
                }
                }
-               splx(s);
-               break;
        }
        }
+       splx(s);
 }
 
 /*
 }
 
 /*
@@ -1627,6 +1671,11 @@ pmap_pageable(pmap, sva, eva, pageable)
                 */
                pmap_changebit(pa, PG_M, FALSE);
 #ifdef DEBUG
                 */
                pmap_changebit(pa, PG_M, FALSE);
 #ifdef DEBUG
+               if ((PHYS_TO_VM_PAGE(pa)->flags & PG_CLEAN) == 0) {
+                       printf("pa %x: flags=%x: not clean\n",
+                              pa, PHYS_TO_VM_PAGE(pa)->flags);
+                       PHYS_TO_VM_PAGE(pa)->flags |= PG_CLEAN;
+               }
                if (pmapdebug & PDB_PTPAGE)
                        printf("pmap_pageable: PT page %x(%x) unmodified\n",
                               sva, *(int *)pmap_pte(pmap, sva));
                if (pmapdebug & PDB_PTPAGE)
                        printf("pmap_pageable: PT page %x(%x) unmodified\n",
                               sva, *(int *)pmap_pte(pmap, sva));
@@ -1846,11 +1895,27 @@ pmap_remove_mapping(pmap, va, pte, flags)
         * PT page.
         */
        if (pmap != kernel_pmap) {
         * PT page.
         */
        if (pmap != kernel_pmap) {
+#if defined(DEBUG) && NCPUS == 1
+               /*
+                * XXX this recursive use of the VM won't work on a MP
+                * (or when otherwise debugging simple locks).  We might
+                * be called with the page queue lock held (e.g. from
+                * the pageout daemon) and vm_map_pageable might call
+                * vm_fault_unwire which would try to lock the page queues
+                * again.  For debugging we hack and drop the lock.
+                */
+               int hadit = !simple_lock_try(&vm_page_queue_lock);
+               simple_unlock(&vm_page_queue_lock);
+#endif
                (void) vm_map_pageable(pt_map, trunc_page(pte),
                                       round_page(pte+1), TRUE);
 #ifdef DEBUG
                if (pmapdebug & PDB_WIRING)
                        pmap_check_wiring("remove", trunc_page(pte));
                (void) vm_map_pageable(pt_map, trunc_page(pte),
                                       round_page(pte+1), TRUE);
 #ifdef DEBUG
                if (pmapdebug & PDB_WIRING)
                        pmap_check_wiring("remove", trunc_page(pte));
+#if NCPUS == 1
+               if (hadit)
+                       simple_lock(&vm_page_queue_lock);
+#endif
 #endif
        }
        /*
 #endif
        }
        /*
@@ -1967,9 +2032,9 @@ pmap_remove_mapping(pmap, va, pte, flags)
                                        printf("remove: free stab %x\n",
                                               ptpmap->pm_stab);
 #endif
                                        printf("remove: free stab %x\n",
                                               ptpmap->pm_stab);
 #endif
-                               kmem_free(kernel_map,
-                                         (vm_offset_t)ptpmap->pm_stab,
-                                         HP_STSIZE);
+                               kmem_free_wakeup(st_map,
+                                                (vm_offset_t)ptpmap->pm_stab,
+                                                HP_STSIZE);
                                ptpmap->pm_stab = Segtabzero;
                                ptpmap->pm_stpa = Segtabzeropa;
 #if defined(HP380)
                                ptpmap->pm_stab = Segtabzero;
                                ptpmap->pm_stpa = Segtabzeropa;
 #if defined(HP380)
@@ -1986,6 +2051,10 @@ pmap_remove_mapping(pmap, va, pte, flags)
                                        PMAP_ACTIVATE(ptpmap,
                                            (struct pcb *)curproc->p_addr, 1);
                        }
                                        PMAP_ACTIVATE(ptpmap,
                                            (struct pcb *)curproc->p_addr, 1);
                        }
+#ifdef DEBUG
+                       else if (ptpmap->pm_sref < 0)
+                               panic("remove: sref < 0");
+#endif
                }
 #if 0
                /*
                }
 #if 0
                /*
@@ -2198,14 +2267,14 @@ pmap_enter_ptpage(pmap, va)
 #endif
        /*
         * Allocate a segment table if necessary.  Note that it is allocated
 #endif
        /*
         * Allocate a segment table if necessary.  Note that it is allocated
-        * from kernel_map and not pt_map.  This keeps user page tables
+        * from a private map and not pt_map.  This keeps user page tables
         * aligned on segment boundaries in the kernel address space.
         * The segment table is wired down.  It will be freed whenever the
         * reference count drops to zero.
         */
        if (pmap->pm_stab == Segtabzero) {
                pmap->pm_stab = (st_entry_t *)
         * aligned on segment boundaries in the kernel address space.
         * The segment table is wired down.  It will be freed whenever the
         * reference count drops to zero.
         */
        if (pmap->pm_stab == Segtabzero) {
                pmap->pm_stab = (st_entry_t *)
-                       kmem_alloc(kernel_map, HP_STSIZE);
+                       kmem_alloc(st_map, HP_STSIZE);
                pmap->pm_stpa = (st_entry_t *)
                        pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
 #if defined(HP380)
                pmap->pm_stpa = (st_entry_t *)
                        pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
 #if defined(HP380)
@@ -2213,7 +2282,7 @@ pmap_enter_ptpage(pmap, va)
 #ifdef DEBUG
                        if (dowriteback && dokwriteback)
 #endif
 #ifdef DEBUG
                        if (dowriteback && dokwriteback)
 #endif
-                       pmap_changebit((vm_offset_t)pmap->pm_stab, PG_CCB, 0);
+                       pmap_changebit((vm_offset_t)pmap->pm_stpa, PG_CCB, 0);
                        pmap->pm_stfree = protostfree;
                }
 #endif
                        pmap->pm_stfree = protostfree;
                }
 #endif