* Copyright (c) 1992 OMRON Corporation.
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* from: hp300/hp300/pmap.c 8.2 (Berkeley) 11/14/93
* @(#)pmap.c 8.2 (Berkeley) 12/6/93
* LUNA physical map management code taken from:
* HP9000/300 series physical map management code.
* 68030 with on-chip MMU (LUNA-I)
* 68040 with on-chip MMU (LUNA-II)
* Don't even pay lip service to multiprocessor support.
* We assume TLB entries don't have process tags (except for the
* supervisor/user distinction) so we only invalidate TLB entries
* when changing mappings for the current (or kernel) pmap. This is
* technically not true for the 68551 but we flush the TLB on every
* context switch, so it effectively winds up that way.
* Bitwise and/or operations are significantly faster than bitfield
* references so we use them when accessing STE/PTEs in the pmap_pte_*
* macros. Note also that the two are not always equivalent; e.g.:
* (*(int *)pte & PG_PROT) [4] != pte->pg_prot [1]
* and a couple of routines that deal with protection and wiring take
* some shortcuts that assume the and/or definitions.
* This implementation will only work for PAGE_SIZE == NBPG
* Manages physical address maps.
* In addition to hardware address maps, this
* module is called upon to provide software-use-only
* maps which may or may not be stored in the same
* form as hardware maps. These pseudo-maps are
* used to store intermediate results from copy
* operations to and from address spaces.
* Since the information managed by this module is
* also stored by the logical address mapping module,
* this module may throw away valid virtual-to-physical
* mappings at almost any time. However, invalidations
* of virtual-to-physical mappings must be done as
* In order to cope with hardware architectures which
* make virtual-to-physical map invalidates expensive,
* this module may delay invalidate or reduced protection
* operations until such time as they are actually
* necessary. This module is given full information as
* to which processors are currently using which maps,
* and to when physical maps must be made correct.
#include <luna68k/luna68k/pte.h>
int kernel
; /* entering kernel mapping */
int user
; /* entering user mapping */
int ptpneeded
; /* needed to allocate a PT page */
int nochange
; /* no change at all */
int pwchange
; /* no mapping change, just wiring or protection */
int wchange
; /* no mapping change, just wiring */
int pchange
; /* no mapping change, just protection */
int mchange
; /* was mapped but mapping to different page */
int managed
; /* a managed page */
int firstpv
; /* first mapping for this PA */
int secondpv
; /* second mapping for this PA */
int ci
; /* cache inhibited */
int unmanaged
; /* not a managed page */
int flushes
; /* cache flushes */
#define PDB_FOLLOW 0x0001
#define PDB_REMOVE 0x0008
#define PDB_CREATE 0x0010
#define PDB_PTPAGE 0x0020
#define PDB_COLLECT 0x0100
#define PDB_PROTECT 0x0200
#define PDB_SEGTAB 0x0400
#define PDB_MULTIMAP 0x0800
#define PDB_PARANOIA 0x2000
#define PDB_WIRING 0x4000
#define PDB_PVDUMP 0x8000
int dowriteback
= 1; /* 68040: enable writeback caching */
int dokwriteback
= 1; /* 68040: enable writeback caching of kernel AS */
extern vm_offset_t pager_sva
, pager_eva
;
* Get STEs and PTEs for user/kernel address space
#define pmap_ste1(m, v) \
(&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
/* XXX assumes physically contiguous ST pages (if more than one) */
#define pmap_ste2(m, v) \
(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
(&((m)->pm_stab[(vm_offset_t)(v) \
>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
#define pmap_ste_v(m, v) \
? ((*(int *)pmap_ste1(m, v) & SG_V) && \
(*(int *)pmap_ste2(m, v) & SG_V)) \
: (*(int *)pmap_ste(m, v) & SG_V))
#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
#define pmap_ste_v(m, v) (*(int *)pmap_ste(m, v) & SG_V)
#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
#define pmap_pte_w(pte) (*(int *)(pte) & PG_W)
#define pmap_pte_ci(pte) (*(int *)(pte) & PG_CI)
#define pmap_pte_m(pte) (*(int *)(pte) & PG_M)
#define pmap_pte_u(pte) (*(int *)(pte) & PG_U)
#define pmap_pte_prot(pte) (*(int *)(pte) & PG_PROT)
#define pmap_pte_v(pte) (*(int *)(pte) & PG_V)
#define pmap_pte_set_w(pte, v) \
if (v) *(int *)(pte) |= PG_W; else *(int *)(pte) &= ~PG_W
#define pmap_pte_set_prot(pte, v) \
if (v) *(int *)(pte) |= PG_PROT; else *(int *)(pte) &= ~PG_PROT
#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
* Given a map and a machine independent protection code,
* convert to an luna protection code.
#define pte_prot(m, p) (protection_codes[p])
* Kernel page table page management.
struct kpt_page
*kpt_next
; /* link on either used or free list */
vm_offset_t kpt_va
; /* always valid kernel VA */
vm_offset_t kpt_pa
; /* PA of this page (for speed) */
struct kpt_page
*kpt_free_list
, *kpt_used_list
;
struct kpt_page
*kpt_pages
;
* Kernel segment/page table and page table map.
* The page table map gives us a level of indirection we need to dynamically
* expand the page table. It is essentially a copy of the segment table
* with PTEs instead of STEs. All are initialized in locore at boot time.
* Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
* Segtabzero is an empty segment table which all processes share til they
pt_entry_t
*Sysmap
, *Sysptmap
;
st_entry_t
*Segtabzero
, *Segtabzeropa
;
vm_size_t Sysptsize
= VM_KERNEL_PT_PAGES
;
struct pmap kernel_pmap_store
;
vm_offset_t avail_start
; /* PA of first available physical page */
vm_offset_t avail_end
; /* PA of last available physical page */
vm_size_t mem_size
; /* memory size in bytes */
vm_offset_t virtual_avail
; /* VA of first avail page (after kernel bss)*/
vm_offset_t virtual_end
; /* VA of last avail page (end of kernel AS) */
vm_offset_t vm_first_phys
; /* PA of first managed page */
vm_offset_t vm_last_phys
; /* PA just past last managed page */
boolean_t pmap_initialized
= FALSE
; /* Has pmap_init completed? */
char *pmap_attributes
; /* reference and modify bits */
int pmap_aliasmask
; /* seperation at which VA aliasing ok */
int protostfree
; /* prototype (default) free ST map */
void pmap_remove_mapping
__P((pmap_t
, vm_offset_t
, pt_entry_t
*, int));
boolean_t pmap_testbit
__P((vm_offset_t
, int));
void pmap_changebit
__P((vm_offset_t
, int, boolean_t
));
void pmap_enter_ptpage
__P((pmap_t
, vm_offset_t
));
void pmap_pvdump
__P((vm_offset_t
));
void pmap_check_wiring
__P((char *, vm_offset_t
));
/* pmap_remove_mapping flags */
* Bootstrap memory allocator. This function allows for early dynamic
* memory allocation until the virtual memory system has been bootstrapped.
* After that point, either kmem_alloc or malloc should be used. This
* function works by stealing pages from the (to be) managed page pool,
* stealing virtual address space, then mapping the pages and zeroing them.
* It should be used from pmap_bootstrap till vm_page_startup, afterwards
* it cannot be used, and will generate a panic if tried. Note that this
* memory will never be freed, and in essence it is wired down.
pmap_bootstrap_alloc(size
) {
extern boolean_t vm_page_startup_initialized
;
if (vm_page_startup_initialized
)
panic("pmap_bootstrap_alloc: called after startup initialized");
virtual_avail
= pmap_map(virtual_avail
, avail_start
,
avail_start
+ size
, VM_PROT_READ
|VM_PROT_WRITE
);
blkclr ((caddr_t
) val
, size
);
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
pmap_init(phys_start
, phys_end
)
vm_offset_t phys_start
, phys_end
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_init(%x, %x)\n", phys_start
, phys_end
);
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in locore.
addr
= (vm_offset_t
) Sysmap
;
vm_object_reference(kernel_object
);
(void) vm_map_find(kernel_map
, kernel_object
, addr
,
&addr
, LUNA_MAX_PTSIZE
, FALSE
);
* If this fails it is probably because the static portion of
* the kernel page table isn't big enough and we overran the
* page table map. Need to adjust pmap_size() in luna_init.c.
if (addr
!= (vm_offset_t
)Sysmap
)
addr
= (vm_offset_t
) kstack
;
vm_object_reference(kernel_object
);
(void) vm_map_find(kernel_map
, kernel_object
, addr
,
&addr
, luna_ptob(UPAGES
), FALSE
);
if (addr
!= (vm_offset_t
)kstack
)
panic("pmap_init: bogons in the VM system!\n");
if (pmapdebug
& PDB_INIT
) {
printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
Sysseg
, Sysmap
, Sysptmap
);
printf(" pstart %x, pend %x, vstart %x, vend %x\n",
avail_start
, avail_end
, virtual_avail
, virtual_end
);
* Allocate memory for random pmap data structures. Includes the
* initial segment table, pv_head_table and pmap_attributes.
npg
= atop(phys_end
- phys_start
);
s
= (vm_size_t
) (LUNA_STSIZE
+ sizeof(struct pv_entry
) * npg
+ npg
);
addr
= (vm_offset_t
) kmem_alloc(kernel_map
, s
);
Segtabzero
= (st_entry_t
*) addr
;
Segtabzeropa
= (st_entry_t
*) pmap_extract(kernel_pmap
, addr
);
pv_table
= (pv_entry_t
) addr
;
addr
+= sizeof(struct pv_entry
) * npg
;
pmap_attributes
= (char *) addr
;
if (pmapdebug
& PDB_INIT
)
printf("pmap_init: %x bytes: npg %x s0 %x(%x) tbl %x atr %x\n",
s
, npg
, Segtabzero
, Segtabzeropa
,
pv_table
, pmap_attributes
);
* Allocate physical memory for kernel PT pages and their management.
* We need 1 PT page per possible task plus some slop.
npg
= min(atop(LUNA_MAX_KPTSIZE
), maxproc
+16);
s
= ptoa(npg
) + round_page(npg
* sizeof(struct kpt_page
));
* Verify that space will be allocated in region for which
* we already have kernel PT pages.
rv
= vm_map_find(kernel_map
, NULL
, 0, &addr
, s
, TRUE
);
if (rv
!= KERN_SUCCESS
|| addr
+ s
>= (vm_offset_t
)Sysmap
)
panic("pmap_init: kernel PT too small");
vm_map_remove(kernel_map
, addr
, addr
+ s
);
* Now allocate the space and link the pages together to
* form the KPT free list.
addr
= (vm_offset_t
) kmem_alloc(kernel_map
, s
);
kpt_pages
= &((struct kpt_page
*)addr2
)[npg
];
kpt_free_list
= (struct kpt_page
*) 0;
(--kpt_pages
)->kpt_next
= kpt_free_list
;
kpt_free_list
= kpt_pages
;
kpt_pages
->kpt_va
= addr2
;
kpt_pages
->kpt_pa
= pmap_extract(kernel_pmap
, addr2
);
kpt_stats
.kpttotal
= atop(s
);
if (pmapdebug
& PDB_INIT
)
printf("pmap_init: KPT: %d pages from %x to %x\n",
atop(s
), addr
, addr
+ s
);
* Slightly modified version of kmem_suballoc() to get page table
s
= min(LUNA_PTMAXSIZE
, maxproc
*LUNA_MAX_PTSIZE
);
rv
= vm_map_find(kernel_map
, NULL
, 0, &addr
, s
, TRUE
);
panic("pmap_init: cannot allocate space for PT map");
pmap_reference(vm_map_pmap(kernel_map
));
pt_map
= vm_map_create(vm_map_pmap(kernel_map
), addr
, addr2
, TRUE
);
panic("pmap_init: cannot create pt_map");
rv
= vm_map_submap(kernel_map
, addr
, addr2
, pt_map
);
panic("pmap_init: cannot map range to pt_map");
if (pmapdebug
& PDB_INIT
)
printf("pmap_init: pt_map [%x - %x)\n", addr
, addr2
);
if (mmutype
== MMU_68040
) {
protostfree
= ~l2tobm(0);
for (rv
= MAXUL2SIZE
; rv
< sizeof(protostfree
)*NBBY
; rv
++)
protostfree
&= ~l2tobm(rv
);
* Now it is safe to enable pv_table recording.
vm_first_phys
= phys_start
;
* Used to map a range of physical addresses into kernel
* For now, VM is already on, we only need to map the
pmap_map(virt
, start
, end
, prot
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_map(%x, %x, %x, %x)\n", virt
, start
, end
, prot
);
pmap_enter(kernel_pmap
, virt
, start
, prot
, FALSE
);
* Create and return a physical map.
* If the size specified for the map
* is zero, the map is an actual physical
* map, and may be referenced by the
* If the size specified is non-zero,
* the map will be used in software only, and
* is bounded by that size.
if (pmapdebug
& (PDB_FOLLOW
|PDB_CREATE
))
printf("pmap_create(%x)\n", size
);
* Software use map does not need a pmap
/* XXX: is it ok to wait here? */
pmap
= (pmap_t
) malloc(sizeof *pmap
, M_VMPMAP
, M_WAITOK
);
panic("pmap_create: cannot allocate a pmap");
bzero(pmap
, sizeof(*pmap
));
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
register struct pmap
*pmap
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_CREATE
))
printf("pmap_pinit(%x)\n", pmap
);
* No need to allocate page table space yet but we do need a
* valid segment table. Initially, we point everyone at the
* "null" segment table. On the first pmap_enter, a real
* segment table will be allocated.
pmap
->pm_stab
= Segtabzero
;
pmap
->pm_stpa
= Segtabzeropa
;
if (mmutype
== MMU_68040
)
pmap
->pm_stfree
= protostfree
;
pmap
->pm_stchanged
= TRUE
;
simple_lock_init(&pmap
->pm_lock
);
* Retire the given physical map from service.
* Should only be called if the map contains
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_destroy(%x)\n", pmap
);
simple_lock(&pmap
->pm_lock
);
count
= --pmap
->pm_count
;
simple_unlock(&pmap
->pm_lock
);
free((caddr_t
)pmap
, M_VMPMAP
);
* Release any resources held by the given physical map.
* Called when a pmap initialized by pmap_pinit is being released.
* Should only be called if the map contains no valid mappings.
register struct pmap
*pmap
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_release(%x)\n", pmap
);
#ifdef notdef /* DIAGNOSTIC */
/* count would be 0 from pmap_destroy... */
simple_lock(&pmap
->pm_lock
);
panic("pmap_release count");
kmem_free_wakeup(pt_map
, (vm_offset_t
)pmap
->pm_ptab
,
if (pmap
->pm_stab
!= Segtabzero
)
kmem_free(kernel_map
, (vm_offset_t
)pmap
->pm_stab
, LUNA_STSIZE
);
* Add a reference to the specified pmap.
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_reference(%x)\n", pmap
);
simple_lock(&pmap
->pm_lock
);
simple_unlock(&pmap
->pm_lock
);
* Remove the given range of addresses from the specified map.
* It is assumed that the start and end are properly
* rounded to the page size.
pmap_remove(pmap
, sva
, eva
)
register vm_offset_t sva
, eva
;
register vm_offset_t nssva
;
register pt_entry_t
*pte
;
boolean_t firstpage
, needcflush
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_REMOVE
|PDB_PROTECT
))
printf("pmap_remove(%x, %x, %x)\n", pmap
, sva
, eva
);
flags
= active_pmap(pmap
) ? PRM_TFLUSH
: 0;
nssva
= luna_trunc_seg(sva
) + LUNA_SEG_SIZE
;
if (nssva
== 0 || nssva
> eva
)
* If VA belongs to an unallocated segment,
* skip to the next segment boundary.
if (!pmap_ste_v(pmap
, sva
)) {
* Invalidate every valid mapping within this segment.
pte
= pmap_pte(pmap
, sva
);
* Purge kernel side of VAC to ensure
* we get the correct state of any
* hardware maintained bits.
* Remember if we may need to
* flush the VAC due to a non-CI
if (!needcflush
&& !pmap_pte_ci(pte
))
pmap_remove_mapping(pmap
, sva
, pte
, flags
);
* Didn't do anything, no need for cache flushes
* In a couple of cases, we don't need to worry about flushing
* 1. if this is a kernel mapping,
* we have already done it
* 2. if it is a user mapping not for the current process,
(pmap
== kernel_pmap
|| pmap
!= curproc
->p_vmspace
->vm_map
.pmap
))
if (pmap_aliasmask
&& (pmapvacflush
& PVF_REMOVE
)) {
if (pmapvacflush
& PVF_TOTAL
)
else if (pmap
== kernel_pmap
)
if (pmap
== kernel_pmap
) {
* Lower the permission for all mappings to a given page.
pmap_page_protect(pa
, prot
)
if ((pmapdebug
& (PDB_FOLLOW
|PDB_PROTECT
)) ||
prot
== VM_PROT_NONE
&& (pmapdebug
& PDB_REMOVE
))
printf("pmap_page_protect(%x, %x)\n", pa
, prot
);
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
case VM_PROT_READ
|VM_PROT_WRITE
:
case VM_PROT_READ
|VM_PROT_EXECUTE
:
pmap_changebit(pa
, PG_RO
, TRUE
);
while (pv
->pv_pmap
!= NULL
) {
if (!pmap_ste_v(pv
->pv_pmap
, pv
->pv_va
) ||
pmap_pte_pa(pmap_pte(pv
->pv_pmap
,pv
->pv_va
)) != pa
)
panic("pmap_page_protect: bad mapping");
pmap_remove_mapping(pv
->pv_pmap
, pv
->pv_va
,
* Set the physical protection on the
* specified range of this map as requested.
pmap_protect(pmap
, sva
, eva
, prot
)
register vm_offset_t sva
, eva
;
register vm_offset_t nssva
;
register pt_entry_t
*pte
;
boolean_t firstpage
, needtflush
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_PROTECT
))
printf("pmap_protect(%x, %x, %x, %x)\n", pmap
, sva
, eva
, prot
);
if ((prot
& VM_PROT_READ
) == VM_PROT_NONE
) {
pmap_remove(pmap
, sva
, eva
);
if (prot
& VM_PROT_WRITE
)
isro
= pte_prot(pmap
, prot
);
needtflush
= active_pmap(pmap
);
nssva
= luna_trunc_seg(sva
) + LUNA_SEG_SIZE
;
if (nssva
== 0 || nssva
> eva
)
* If VA belongs to an unallocated segment,
* skip to the next segment boundary.
if (!pmap_ste_v(pmap
, sva
)) {
* Change protection on mapping if it is valid and doesn't
* already have the correct protection.
pte
= pmap_pte(pmap
, sva
);
if (pmap_pte_v(pte
) && pmap_pte_prot_chg(pte
, isro
)) {
* Purge kernel side of VAC to ensure we
* get the correct state of any hardware
* XXX do we need to clear the VAC in
* general to reflect the new protection?
if (firstpage
&& pmap_aliasmask
)
* Clear caches if making RO (see section
* "7.3 Cache Coherency" in the manual).
if (isro
&& mmutype
== MMU_68040
) {
vm_offset_t pa
= pmap_pte_pa(pte
);
pmap_pte_set_prot(pte
, isro
);
else if (pmap_pte_v(pte
)) {
protect_stats
.alreadyro
++;
protect_stats
.alreadyrw
++;
#if defined(HAVEVAC) && defined(DEBUG)
if (pmap_aliasmask
&& (pmapvacflush
& PVF_PROTECT
)) {
if (pmapvacflush
& PVF_TOTAL
)
else if (pmap
== kernel_pmap
)
* Insert the given physical page (p) at
* the specified virtual address (v) in the
* target physical map with the protection requested.
* If specified, the page will be wired down, meaning
* that the related pte can not be reclaimed.
* NB: This is the only routine which MAY NOT lazy-evaluate
* or lose information. That is, this routine must actually
* insert this page into the given map NOW.
pmap_enter(pmap
, va
, pa
, prot
, wired
)
register pt_entry_t
*pte
;
boolean_t cacheable
= TRUE
;
boolean_t checkpv
= TRUE
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_ENTER
))
printf("pmap_enter(%x, %x, %x, %x, %x)\n",
pmap
, va
, pa
, prot
, wired
);
* For user mapping, allocate kernel VM resources if necessary.
if (pmap
->pm_ptab
== NULL
)
pmap
->pm_ptab
= (pt_entry_t
*)
kmem_alloc_wait(pt_map
, LUNA_MAX_PTSIZE
);
* Segment table entry not valid, we need a new PT page
if (!pmap_ste_v(pmap
, va
))
pmap_enter_ptpage(pmap
, va
);
pa
= luna_trunc_page(pa
);
pte
= pmap_pte(pmap
, va
);
if (pmapdebug
& PDB_ENTER
)
printf("enter: pte %x, *pte %x\n", pte
, *(int *)pte
);
* Mapping has not changed, must be protection or wiring change.
* Wiring change, just update stats.
* We don't worry about wiring PT pages as they remain
* resident as long as there are valid mappings in them.
* Hence, if a user page is wired, the PT page will be also.
if (pmap_pte_w_chg(pte
, wired
? PG_W
: 0)) {
if (pmapdebug
& PDB_ENTER
)
printf("enter: wiring change -> %x\n", wired
);
pmap
->pm_stats
.wired_count
++;
pmap
->pm_stats
.wired_count
--;
if (pmap_pte_prot(pte
) == pte_prot(pmap
, prot
))
else if (pmap_pte_prot(pte
) != pte_prot(pmap
, prot
))
* Retain cache inhibition status
* Mapping has changed, invalidate old range and fall through to
* handle validating new mapping.
if (pmapdebug
& PDB_ENTER
)
printf("enter: removing old mapping %x\n", va
);
pmap_remove_mapping(pmap
, va
, pte
, PRM_TFLUSH
|PRM_CFLUSH
);
* If this is a new user mapping, increment the wiring count
* on this PT page. PT pages are wired down as long as there
* is a valid mapping in the page.
(void) vm_map_pageable(pt_map
, trunc_page(pte
),
round_page(pte
+1), FALSE
);
* Enter on the PV list if part of our managed memory
* Note that we raise IPL while manipulating pv_table
* since pmap_enter can be called at interrupt time.
if (pa
>= vm_first_phys
&& pa
< vm_last_phys
) {
register pv_entry_t pv
, npv
;
if (pmapdebug
& PDB_ENTER
)
printf("enter: pv at %x: %x/%x/%x\n",
pv
, pv
->pv_va
, pv
->pv_pmap
, pv
->pv_next
);
* No entries yet, use header as the first entry
if (pv
->pv_pmap
== NULL
) {
* There is at least one other VA mapping this page.
* Place this entry after the header.
for (npv
= pv
; npv
; npv
= npv
->pv_next
)
if (pmap
== npv
->pv_pmap
&& va
== npv
->pv_va
)
panic("pmap_enter: already in pv_tab");
malloc(sizeof *npv
, M_VMPVENT
, M_NOWAIT
);
npv
->pv_next
= pv
->pv_next
;
* Since there is another logical mapping for the
* same page we may need to cache-inhibit the
* descriptors on those CPUs with external VACs.
* We don't need to CI if:
* - No two mappings belong to the same user pmaps.
* Since the cache is flushed on context switches
* there is no problem between user processes.
* - Mappings within a single pmap are a certain
* magic distance apart. VAs at these appropriate
* boundaries map to the same cache entries or
* otherwise don't conflict.
* To keep it simple, we only check for these special
* cases if there are only two mappings, otherwise we
* Note that there are no aliasing problems with the
* on-chip data-cache when the WA bit is set.
if (pv
->pv_flags
& PV_CI
) {
if (pmapdebug
& PDB_CACHE
)
printf("enter: pa %x already CI'ed\n",
checkpv
= cacheable
= FALSE
;
} else if (npv
->pv_next
||
pv
->pv_pmap
== kernel_pmap
) &&
((pv
->pv_va
& pmap_aliasmask
) !=
(va
& pmap_aliasmask
)))) {
if (pmapdebug
& PDB_CACHE
)
printf("enter: pa %x CI'ing all\n",
* Assumption: if it is not part of our managed memory
* then it must be device memory which may be volitile.
else if (pmap_initialized
) {
checkpv
= cacheable
= FALSE
;
pmap
->pm_stats
.resident_count
++;
pmap
->pm_stats
.wired_count
++;
* Purge kernel side of VAC to ensure we get correct state
* of HW bits so we don't clobber them.
npte
= pa
| pte_prot(pmap
, prot
) | (*(int *)pte
& (PG_M
|PG_U
)) | PG_V
;
if (!checkpv
&& !cacheable
)
if (mmutype
== MMU_68040
&& (npte
& (PG_PROT
|PG_CI
)) == PG_RW
)
if (dowriteback
&& (dokwriteback
|| pmap
!= kernel_pmap
))
if (pmapdebug
& PDB_ENTER
)
printf("enter: new pte value %x\n", npte
);
* Remember if this was a wiring-only change.
* If so, we need not flush the TLB and caches.
wired
= ((*(int *)pte
^ npte
) == PG_W
);
if (mmutype
== MMU_68040
&& !wired
) {
if (!wired
&& active_pmap(pmap
))
* The following is executed if we are entering a second
* (or greater) mapping for a physical page and the mappings
* may create an aliasing problem. In this case we must
* cache inhibit the descriptors involved and flush any
if (checkpv
&& !cacheable
) {
pmap_changebit(pa
, PG_CI
, TRUE
);
if ((pmapdebug
& (PDB_CACHE
|PDB_PVDUMP
)) ==
else if (pmapvacflush
& PVF_ENTER
) {
if (pmapvacflush
& PVF_TOTAL
)
else if (pmap
== kernel_pmap
)
if ((pmapdebug
& PDB_WIRING
) && pmap
!= kernel_pmap
)
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap
, va
)));
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
* The mapping must already exist in the pmap.
pmap_change_wiring(pmap
, va
, wired
)
register pt_entry_t
*pte
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_change_wiring(%x, %x, %x)\n", pmap
, va
, wired
);
pte
= pmap_pte(pmap
, va
);
* Page table page is not allocated.
* Should this ever happen? Ignore it for now,
* we don't want to force allocation of unnecessary PTE pages.
if (!pmap_ste_v(pmap
, va
)) {
if (pmapdebug
& PDB_PARANOIA
)
printf("pmap_change_wiring: invalid STE for %x\n", va
);
* Page not valid. Should this ever happen?
* Just continue and change wiring anyway.
if (pmapdebug
& PDB_PARANOIA
)
printf("pmap_change_wiring: invalid PTE for %x\n", va
);
* If wiring actually changed (always?) set the wire bit and
* update the wire count. Note that wiring is not a hardware
* characteristic so there is no need to invalidate the TLB.
if (pmap_pte_w_chg(pte
, wired
? PG_W
: 0)) {
pmap_pte_set_w(pte
, wired
);
pmap
->pm_stats
.wired_count
++;
pmap
->pm_stats
.wired_count
--;
* Extract the physical page address associated
* with the given map/virtual_address pair.
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_extract(%x, %x) -> ", pmap
, va
);
if (pmap
&& pmap_ste_v(pmap
, va
))
pa
= *(int *)pmap_pte(pmap
, va
);
pa
= (pa
& PG_FRAME
) | (va
& ~PG_FRAME
);
if (pmapdebug
& PDB_FOLLOW
)
* Copy the range specified by src_addr/len
* from the source map to the range dst_addr/len
* in the destination map.
* This routine is only advisory and need not do anything.
void pmap_copy(dst_pmap
, src_pmap
, dst_addr
, len
, src_addr
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_copy(%x, %x, %x, %x, %x)\n",
dst_pmap
, src_pmap
, dst_addr
, len
, src_addr
);
* Require that all active physical maps contain no
* incorrect entries NOW. [This update includes
* forcing updates of any address map caching.]
* Generally used to insure that a thread about
* to run will see a semantically correct world.
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_update()\n");
* Garbage collects the physical map system for
* pages which are no longer used.
* Success need not be guaranteed -- that is, there
* may well be pages which are not referenced, but
* others may be collected.
* Called by the pageout daemon when pages are scarce.
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_collect(%x)\n", pmap
);
kpt_stats
.collectscans
++;
for (pa
= vm_first_phys
; pa
< vm_last_phys
; pa
+= PAGE_SIZE
) {
register struct kpt_page
*kpt
, **pkpt
;
* Locate physical pages which are being used as kernel
if (pv
->pv_pmap
!= kernel_pmap
|| !(pv
->pv_flags
& PV_PTPAGE
))
if (pv
->pv_ptste
&& pv
->pv_ptpmap
== kernel_pmap
)
} while (pv
= pv
->pv_next
);
if (pv
->pv_va
< (vm_offset_t
)Sysmap
||
pv
->pv_va
>= (vm_offset_t
)Sysmap
+ LUNA_MAX_PTSIZE
)
printf("collect: kernel PT VA out of range\n");
pte
= (int *)(pv
->pv_va
+ LUNA_PAGE_SIZE
);
while (--pte
>= (int *)pv
->pv_va
&& *pte
== PG_NV
)
if (pte
>= (int *)pv
->pv_va
)
if (pmapdebug
& (PDB_PTPAGE
|PDB_COLLECT
)) {
printf("collect: freeing KPT page at %x (ste %x@%x)\n",
pv
->pv_va
, *(int *)pv
->pv_ptste
, pv
->pv_ptste
);
ste
= (int *)pv
->pv_ptste
;
* If all entries were invalid we can remove the page.
* We call pmap_remove_entry to take care of invalidating
* ST and Sysptmap entries.
kpa
= pmap_extract(pmap
, pv
->pv_va
);
pmap_remove_mapping(pmap
, pv
->pv_va
, PT_ENTRY_NULL
,
* Use the physical address to locate the original
* (kmem_alloc assigned) address for the page and put
* that page back on the free list.
for (pkpt
= &kpt_used_list
, kpt
= *pkpt
;
kpt
!= (struct kpt_page
*)0;
pkpt
= &kpt
->kpt_next
, kpt
= *pkpt
)
if (kpt
== (struct kpt_page
*)0)
panic("pmap_collect: lost a KPT page");
if (pmapdebug
& (PDB_PTPAGE
|PDB_COLLECT
))
printf("collect: %x (%x) to free list\n",
kpt
->kpt_next
= kpt_free_list
;
kpt_stats
.collectpages
++;
if (pmapdebug
& (PDB_PTPAGE
|PDB_COLLECT
))
printf("collect: kernel STE at %x still valid (%x)\n",
ste
= (int *)&Sysptmap
[(st_entry_t
*)ste
-pmap_ste(kernel_pmap
, 0)];
printf("collect: kernel PTmap at %x still valid (%x)\n",
pmap_activate(pmap
, pcbp
)
if (pmapdebug
& (PDB_FOLLOW
|PDB_SEGTAB
))
printf("pmap_activate(%x, %x)\n", pmap
, pcbp
);
PMAP_ACTIVATE(pmap
, pcbp
, pmap
== curproc
->p_vmspace
->vm_map
.pmap
);
* pmap_zero_page zeros the specified (machine independent)
* page by mapping the page into virtual memory and using
* bzero to clear its contents, one machine dependent page
* XXX this is a bad implementation for virtual cache machines
* (320/350) because pmap_enter doesn't cache-inhibit the temporary
* kernel mapping and we wind up with data cached for that KVA.
* It is probably a win for physical cache machines (370/380)
* as the cache loading is not wasted.
register vm_offset_t kva
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_zero_page(%x)\n", phys
);
kva
= (vm_offset_t
) CADDR1
;
pmap_enter(kernel_pmap
, kva
, phys
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
bzero((caddr_t
)kva
, LUNA_PAGE_SIZE
);
pmap_remove_mapping(kernel_pmap
, kva
, PT_ENTRY_NULL
,
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using
* bcopy to copy the page, one machine dependent page at a
* XXX this is a bad implementation for virtual cache machines
* (320/350) because pmap_enter doesn't cache-inhibit the temporary
* kernel mapping and we wind up with data cached for that KVA.
* It is probably a win for physical cache machines (370/380)
* as the cache loading is not wasted.
register vm_offset_t skva
, dkva
;
extern caddr_t CADDR1
, CADDR2
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_copy_page(%x, %x)\n", src
, dst
);
skva
= (vm_offset_t
) CADDR1
;
dkva
= (vm_offset_t
) CADDR2
;
pmap_enter(kernel_pmap
, skva
, src
, VM_PROT_READ
, TRUE
);
pmap_enter(kernel_pmap
, dkva
, dst
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
copypage((caddr_t
)skva
, (caddr_t
)dkva
);
/* CADDR1 and CADDR2 are virtually contiguous */
pmap_remove(kernel_pmap
, skva
, skva
+2*PAGE_SIZE
);
* Make the specified pages (by pmap, offset)
* pageable (or not) as requested.
* A page which is not pageable may not take
* a fault; therefore, its page table entry
* must remain valid for the duration.
* This routine is merely advisory; pmap_enter
* will specify that these pages are to be wired
* down (or not) as appropriate.
pmap_pageable(pmap
, sva
, eva
, pageable
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_pageable(%x, %x, %x, %x)\n",
pmap
, sva
, eva
, pageable
);
* If we are making a PT page pageable then all valid
* mappings must be gone from that page. Hence it should
* be all zeros and there is no need to clean it.
* - we are called with only one page at a time
* - PT pages have only one pv_table entry
if (pmap
== kernel_pmap
&& pageable
&& sva
+ PAGE_SIZE
== eva
) {
if ((pmapdebug
& (PDB_FOLLOW
|PDB_PTPAGE
)) == PDB_PTPAGE
)
printf("pmap_pageable(%x, %x, %x, %x)\n",
pmap
, sva
, eva
, pageable
);
if (!pmap_ste_v(pmap
, sva
))
pa
= pmap_pte_pa(pmap_pte(pmap
, sva
));
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
if (pv
->pv_ptste
== NULL
)
if (pv
->pv_va
!= sva
|| pv
->pv_next
) {
printf("pmap_pageable: bad PT page va %x next %x\n",
* Mark it unmodified to avoid pageout
pmap_changebit(pa
, PG_M
, FALSE
);
if (pmapdebug
& PDB_PTPAGE
)
printf("pmap_pageable: PT page %x(%x) unmodified\n",
sva
, *(int *)pmap_pte(pmap
, sva
));
if (pmapdebug
& PDB_WIRING
)
pmap_check_wiring("pageable", sva
);
* Clear the modify bits on the specified physical page.
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_clear_modify(%x)\n", pa
);
pmap_changebit(pa
, PG_M
, FALSE
);
* Clear the reference bit on the specified physical page.
void pmap_clear_reference(pa
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_clear_reference(%x)\n", pa
);
pmap_changebit(pa
, PG_U
, FALSE
);
* Return whether or not the specified physical page is referenced
if (pmapdebug
& PDB_FOLLOW
) {
boolean_t rv
= pmap_testbit(pa
, PG_U
);
printf("pmap_is_referenced(%x) -> %c\n", pa
, "FT"[rv
]);
return(pmap_testbit(pa
, PG_U
));
* Return whether or not the specified physical page is modified
if (pmapdebug
& PDB_FOLLOW
) {
boolean_t rv
= pmap_testbit(pa
, PG_M
);
printf("pmap_is_modified(%x) -> %c\n", pa
, "FT"[rv
]);
return(pmap_testbit(pa
, PG_M
));
* Miscellaneous support routines follow
* Invalidate a single page denoted by pmap/va.
* If (pte != NULL), it is the already computed PTE for the page.
* If (flags & PRM_TFLUSH), we must invalidate any TLB information.
* If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
pmap_remove_mapping(pmap
, va
, pte
, flags
)
register pt_entry_t
*pte
;
register pv_entry_t pv
, npv
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_REMOVE
|PDB_PROTECT
))
printf("pmap_remove_mapping(%x, %x, %x, %x)\n",
* PTE not provided, compute it from pmap and va.
if (pte
== PT_ENTRY_NULL
) {
pte
= pmap_pte(pmap
, va
);
if (*(int *)pte
== PG_NV
)
if (pmap_aliasmask
&& (flags
& PRM_CFLUSH
)) {
* Purge kernel side of VAC to ensure we get the correct
* state of any hardware maintained bits.
* If this is a non-CI user mapping for the current process,
* flush the VAC. Note that the kernel side was flushed
* above so we don't worry about non-CI kernel mappings.
if (pmap
== curproc
->p_vmspace
->vm_map
.pmap
&&
pmap
->pm_stats
.wired_count
--;
pmap
->pm_stats
.resident_count
--;
* Invalidate the PTE after saving the reference modify info.
if (pmapdebug
& PDB_REMOVE
)
printf("remove: invalidating pte at %x\n", pte
);
bits
= *(int *)pte
& (PG_U
|PG_M
);
if ((flags
& PRM_TFLUSH
) && active_pmap(pmap
))
* For user mappings decrement the wiring count on
* the PT page. We do this after the PTE has been
* invalidated because vm_map_pageable winds up in
* pmap_pageable which clears the modify bit for the
if (pmap
!= kernel_pmap
) {
(void) vm_map_pageable(pt_map
, trunc_page(pte
),
round_page(pte
+1), TRUE
);
if (pmapdebug
& PDB_WIRING
)
pmap_check_wiring("remove", trunc_page(pte
));
* If this isn't a managed page, we are all done.
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
* Otherwise remove it from the PV table
* (raise IPL since we may be called at interrupt time).
* If it is the first entry on the list, it is actually
* in the header and we must copy the following entry up
* to the header. Otherwise we must search the list for
* the entry. In either case we free the now unused entry.
if (pmap
== pv
->pv_pmap
&& va
== pv
->pv_va
) {
ste
= (int *)pv
->pv_ptste
;
npv
->pv_flags
= pv
->pv_flags
;
free((caddr_t
)npv
, M_VMPVENT
);
for (npv
= pv
->pv_next
; npv
; npv
= npv
->pv_next
) {
if (pmap
== npv
->pv_pmap
&& va
== npv
->pv_va
)
panic("pmap_remove: PA not in pv_tab");
ste
= (int *)npv
->pv_ptste
;
pv
->pv_next
= npv
->pv_next
;
free((caddr_t
)npv
, M_VMPVENT
);
* If only one mapping left we no longer need to cache inhibit
pv
->pv_pmap
&& pv
->pv_next
== NULL
&& (pv
->pv_flags
& PV_CI
)) {
if (pmapdebug
& PDB_CACHE
)
printf("remove: clearing CI for pa %x\n", pa
);
pmap_changebit(pa
, PG_CI
, FALSE
);
if ((pmapdebug
& (PDB_CACHE
|PDB_PVDUMP
)) ==
* If this was a PT page we must also remove the
* mapping from the associated segment table.
remove_stats
.ptinvalid
++;
if (pmapdebug
& (PDB_REMOVE
|PDB_PTPAGE
))
printf("remove: ste was %x@%x pte was %x@%x\n",
*ste
, ste
, *(int *)&opte
, pmap_pte(pmap
, va
));
if (mmutype
== MMU_68040
) {
int *este
= &ste
[NPTEPG
/SG4_LEV3SIZE
];
ste
-= NPTEPG
/SG4_LEV3SIZE
;
* If it was a user PT page, we decrement the
* reference count on the segment table as well,
* freeing it if it is now empty.
if (ptpmap
!= kernel_pmap
) {
if (pmapdebug
& (PDB_REMOVE
|PDB_SEGTAB
))
printf("remove: stab %x, refcnt %d\n",
ptpmap
->pm_stab
, ptpmap
->pm_sref
- 1);
if ((pmapdebug
& PDB_PARANOIA
) &&
ptpmap
->pm_stab
!= (st_entry_t
*)trunc_page(ste
))
panic("remove: bogus ste");
if (--(ptpmap
->pm_sref
) == 0) {
if (pmapdebug
&(PDB_REMOVE
|PDB_SEGTAB
))
printf("remove: free stab %x\n",
(vm_offset_t
)ptpmap
->pm_stab
,
ptpmap
->pm_stab
= Segtabzero
;
ptpmap
->pm_stpa
= Segtabzeropa
;
if (mmutype
== MMU_68040
)
ptpmap
->pm_stfree
= protostfree
;
ptpmap
->pm_stchanged
= TRUE
;
* XXX may have changed segment table
* pointer for current process so
* update now to reload hardware.
if (ptpmap
== curproc
->p_vmspace
->vm_map
.pmap
)
(struct pcb
*)curproc
->p_addr
, 1);
* XXX this should be unnecessary as we have been
* flushing individual mappings as we go.
if (ptpmap
== kernel_pmap
)
pv
->pv_flags
&= ~PV_PTPAGE
;
* Update saved attributes for managed page
pmap_attributes
[pa_index(pa
)] |= bits
;
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
if (pmap_attributes
[pa_index(pa
)] & bit
) {
* Flush VAC to get correct state of any hardware maintained bits.
if (pmap_aliasmask
&& (bit
& (PG_U
|PG_M
)))
* Not found, check current mappings returning
if (pv
->pv_pmap
!= NULL
) {
for (; pv
; pv
= pv
->pv_next
) {
pte
= (int *) pmap_pte(pv
->pv_pmap
, pv
->pv_va
);
pmap_changebit(pa
, bit
, setem
)
boolean_t firstpage
= TRUE
;
if (pmapdebug
& PDB_BITS
)
printf("pmap_changebit(%x, %x, %s)\n",
pa
, bit
, setem
? "set" : "clear");
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
chgp
= &changebit_stats
[(bit
>>2)-1];
* Clear saved attributes (modify, reference)
pmap_attributes
[pa_index(pa
)] &= ~bit
;
* Loop over all current mappings setting/clearing as appropos
* If setting RO do we need to clear the VAC?
if (pv
->pv_pmap
!= NULL
) {
for (; pv
; pv
= pv
->pv_next
) {
toflush
|= (pv
->pv_pmap
== kernel_pmap
) ? 2 : 1;
* XXX don't write protect pager mappings
extern vm_offset_t pager_sva
, pager_eva
;
if (va
>= pager_sva
&& va
< pager_eva
)
pte
= (int *) pmap_pte(pv
->pv_pmap
, va
);
* Flush VAC to ensure we get correct state of HW bits
* so we don't clobber them.
if (firstpage
&& pmap_aliasmask
) {
* If we are changing caching status or
* protection make sure the caches are
* flushed (but only once).
if (firstpage
&& mmutype
== MMU_68040
&&
(bit
== PG_RO
&& setem
||
if (active_pmap(pv
->pv_pmap
))
#if defined(HAVEVAC) && defined(DEBUG)
if (setem
&& bit
== PG_RO
&& (pmapvacflush
& PVF_PROTECT
)) {
if ((pmapvacflush
& PVF_TOTAL
) || toflush
== 3)
pmap_enter_ptpage(pmap
, va
)
register vm_offset_t ptpa
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_ENTER
|PDB_PTPAGE
))
printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap
, va
);
* Allocate a segment table if necessary. Note that it is allocated
* from kernel_map and not pt_map. This keeps user page tables
* aligned on segment boundaries in the kernel address space.
* The segment table is wired down. It will be freed whenever the
* reference count drops to zero.
if (pmap
->pm_stab
== Segtabzero
) {
pmap
->pm_stab
= (st_entry_t
*)
kmem_alloc(kernel_map
, LUNA_STSIZE
);
pmap
->pm_stpa
= (st_entry_t
*)
pmap_extract(kernel_pmap
, (vm_offset_t
)pmap
->pm_stab
);
if (mmutype
== MMU_68040
) {
if (dowriteback
&& dokwriteback
)
pmap_changebit((vm_offset_t
)pmap
->pm_stab
, PG_CCB
, 0);
pmap
->pm_stfree
= protostfree
;
pmap
->pm_stchanged
= TRUE
;
* XXX may have changed segment table pointer for current
* process so update now to reload hardware.
if (pmap
== curproc
->p_vmspace
->vm_map
.pmap
)
PMAP_ACTIVATE(pmap
, (struct pcb
*)curproc
->p_addr
, 1);
if (pmapdebug
& (PDB_ENTER
|PDB_PTPAGE
|PDB_SEGTAB
))
printf("enter: pmap %x stab %x(%x)\n",
pmap
, pmap
->pm_stab
, pmap
->pm_stpa
);
ste
= pmap_ste(pmap
, va
);
* Allocate level 2 descriptor block if necessary
if (mmutype
== MMU_68040
) {
ix
= bmtol2(pmap
->pm_stfree
);
panic("enter: out of address space"); /* XXX */
pmap
->pm_stfree
&= ~l2tobm(ix
);
addr
= (caddr_t
)&pmap
->pm_stab
[ix
*SG4_LEV2SIZE
];
bzero(addr
, SG4_LEV2SIZE
*sizeof(st_entry_t
));
addr
= (caddr_t
)&pmap
->pm_stpa
[ix
*SG4_LEV2SIZE
];
*(int *)ste
= (u_int
)addr
| SG_RW
| SG_U
| SG_V
;
if (pmapdebug
& (PDB_ENTER
|PDB_PTPAGE
|PDB_SEGTAB
))
printf("enter: alloc ste2 %d(%x)\n", ix
, addr
);
ste
= pmap_ste2(pmap
, va
);
* Since a level 2 descriptor maps a block of SG4_LEV3SIZE
* level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
* (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
* PT page--the unit of allocation. We set `ste' to point
* to the first entry of that chunk which is validated in its
ste
= (st_entry_t
*)((int)ste
& ~(NBPG
/SG4_LEV3SIZE
-1));
if (pmapdebug
& (PDB_ENTER
|PDB_PTPAGE
|PDB_SEGTAB
))
printf("enter: ste2 %x (%x)\n",
pmap_ste2(pmap
, va
), ste
);
va
= trunc_page((vm_offset_t
)pmap_pte(pmap
, va
));
* In the kernel we allocate a page from the kernel PT page
* free list and map it into the kernel page table map (via
if (pmap
== kernel_pmap
) {
register struct kpt_page
*kpt
;
if ((kpt
= kpt_free_list
) == (struct kpt_page
*)0) {
* Try once to free up unused ones.
if (pmapdebug
& PDB_COLLECT
)
printf("enter: no KPT pages, collecting...\n");
pmap_collect(kernel_pmap
);
if ((kpt
= kpt_free_list
) == (struct kpt_page
*)0)
panic("pmap_enter_ptpage: can't get KPT page");
if (++kpt_stats
.kptinuse
> kpt_stats
.kptmaxuse
)
kpt_stats
.kptmaxuse
= kpt_stats
.kptinuse
;
kpt_free_list
= kpt
->kpt_next
;
kpt
->kpt_next
= kpt_used_list
;
bzero((caddr_t
)kpt
->kpt_va
, LUNA_PAGE_SIZE
);
pmap_enter(pmap
, va
, ptpa
, VM_PROT_DEFAULT
, TRUE
);
if (pmapdebug
& (PDB_ENTER
|PDB_PTPAGE
)) {
int ix
= pmap_ste(pmap
, va
) - pmap_ste(pmap
, 0);
printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
ix
, *(int *)&Sysptmap
[ix
], kpt
->kpt_va
);
* For user processes we just simulate a fault on that location
* letting the VM system allocate a zero-filled page.
if (pmapdebug
& (PDB_ENTER
|PDB_PTPAGE
))
printf("enter: about to fault UPT pg at %x\n", va
);
s
= vm_fault(pt_map
, va
, VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va
, s
);
panic("pmap_enter: vm_fault failed");
ptpa
= pmap_extract(kernel_pmap
, va
);
* Mark the page clean now to avoid its pageout (and
* hence creation of a pager) between now and when it
* is wired; i.e. while it is on a paging queue.
PHYS_TO_VM_PAGE(ptpa
)->flags
|= PG_CLEAN
;
PHYS_TO_VM_PAGE(ptpa
)->flags
|= PG_PTPAGE
;
* Turn off copyback caching of page table pages,
* could get ugly otherwise.
if (dowriteback
&& dokwriteback
)
if (mmutype
== MMU_68040
) {
int *pte
= (int *)pmap_pte(kernel_pmap
, va
);
if ((pmapdebug
& PDB_PARANOIA
) && (*pte
& PG_CCB
) == 0)
printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
pmap
== kernel_pmap
? "Kernel" : "User",
pmap_changebit(ptpa
, PG_CCB
, 0);
* Locate the PV entry in the kernel for this PT page and
* record the STE address. This is so that we can invalidate
* the STE when we remove the mapping for the page.
pv
->pv_flags
|= PV_PTPAGE
;
if (pv
->pv_pmap
== kernel_pmap
&& pv
->pv_va
== va
)
} while (pv
= pv
->pv_next
);
panic("pmap_enter_ptpage: PT page not entered");
if (pmapdebug
& (PDB_ENTER
|PDB_PTPAGE
))
printf("enter: new PT page at PA %x, ste at %x\n", ptpa
, ste
);
* Map the new PT page into the segment table.
* Also increment the reference count on the segment table if this
* was a user page table page. Note that we don't use vm_map_pageable
* to keep the count like we do for PT pages, this is mostly because
* it would be difficult to identify ST pages in pmap_pageable to
* release them. We also avoid the overhead of vm_map_pageable.
if (mmutype
== MMU_68040
) {
for (este
= &ste
[NPTEPG
/SG4_LEV3SIZE
]; ste
< este
; ste
++) {
*(int *)ste
= ptpa
| SG_U
| SG_RW
| SG_V
;
ptpa
+= SG4_LEV3SIZE
* sizeof(st_entry_t
);
*(int *)ste
= (ptpa
& SG_FRAME
) | SG_RW
| SG_V
;
if (pmap
!= kernel_pmap
) {
if (pmapdebug
& (PDB_ENTER
|PDB_PTPAGE
|PDB_SEGTAB
))
printf("enter: stab %x refcnt %d\n",
pmap
->pm_stab
, pmap
->pm_sref
);
for (pv
= pa_to_pvh(pa
); pv
; pv
= pv
->pv_next
)
printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
pv
->pv_pmap
, pv
->pv_va
, pv
->pv_ptste
, pv
->pv_ptpmap
,
pmap_check_wiring(str
, va
)
register int count
, *pte
;
if (!pmap_ste_v(kernel_pmap
, va
) ||
!pmap_pte_v(pmap_pte(kernel_pmap
, va
)))
if (!vm_map_lookup_entry(pt_map
, va
, &entry
)) {
printf("wired_check: entry for %x not found\n", va
);
for (pte
= (int *)va
; pte
< (int *)(va
+PAGE_SIZE
); pte
++)
if (entry
->wired_count
!= count
)
printf("*%s*: %x: w%d/a%d\n",
str
, va
, entry
->wired_count
, count
);