* Copyright (c) 1991 Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department and William Jolitz of UUNET Technologies Inc.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* @(#)pmap.c 7.7 (Berkeley) 5/12/91
* Derived from hp300 version by Mike Hibler, this version by William
* Jolitz uses a recursive map [a pde points to the page directory] to
* map the page tables using the pagetables themselves. This is done to
* reduce the impact on kernel virtual memory for lots of sparse address
* space, and to reduce the cost of memory to each process.
* Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90
* Reno i386 version, from Mike Hibler's hp300 version.
* Manages physical address maps.
* In addition to hardware address maps, this
* module is called upon to provide software-use-only
* maps which may or may not be stored in the same
* form as hardware maps. These pseudo-maps are
* used to store intermediate results from copy
* operations to and from address spaces.
* Since the information managed by this module is
* also stored by the logical address mapping module,
* this module may throw away valid virtual-to-physical
* mappings at almost any time. However, invalidations
* of virtual-to-physical mappings must be done as
* In order to cope with hardware architectures which
* make virtual-to-physical map invalidates expensive,
* this module may delay invalidate or reduced protection
* operations until such time as they are actually
* necessary. This module is given full information as
* to which processors are currently using which maps,
* and to when physical maps must be made correct.
/*#include "vm/vm_pageout.h"*/
/*#include "machine/isa.h"*/
* Allocate various and sundry SYSMAPs used in the days of old VM
* and not yet converted. XXX.
int kernel
; /* entering kernel mapping */
int user
; /* entering user mapping */
int ptpneeded
; /* needed to allocate a PT page */
int pwchange
; /* no mapping change, just wiring or protection */
int wchange
; /* no mapping change, just wiring */
int mchange
; /* was mapped but mapping to different page */
int managed
; /* a managed page */
int firstpv
; /* first mapping for this PA */
int secondpv
; /* second mapping for this PA */
int ci
; /* cache inhibited */
int unmanaged
; /* not a managed page */
int flushes
; /* cache flushes */
int pmapdebug
= 0 /* 0xffff */;
#define PDB_FOLLOW 0x0001
#define PDB_REMOVE 0x0008
#define PDB_CREATE 0x0010
#define PDB_PTPAGE 0x0020
#define PDB_COLLECT 0x0100
#define PDB_PROTECT 0x0200
#define PDB_PDRTAB 0x0400
#define PDB_PARANOIA 0x2000
#define PDB_WIRING 0x4000
#define PDB_PVDUMP 0x8000
* Get PDEs and PTEs for user/kernel address space
#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
#define pmap_pde_v(pte) ((pte)->pd_v)
#define pmap_pte_w(pte) ((pte)->pg_w)
/* #define pmap_pte_ci(pte) ((pte)->pg_ci) */
#define pmap_pte_m(pte) ((pte)->pg_m)
#define pmap_pte_u(pte) ((pte)->pg_u)
#define pmap_pte_v(pte) ((pte)->pg_v)
#define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v))
#define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v))
* Given a map and a machine independent protection code,
* convert to a vax protection code.
#define pte_prot(m, p) (protection_codes[p])
struct pmap kernel_pmap_store
;
vm_offset_t avail_start
; /* PA of first available physical page */
vm_offset_t avail_end
; /* PA of last available physical page */
vm_size_t mem_size
; /* memory size in bytes */
vm_offset_t virtual_avail
; /* VA of first avail page (after kernel bss)*/
vm_offset_t virtual_end
; /* VA of last avail page (end of kernel AS) */
vm_offset_t vm_first_phys
; /* PA of first managed page */
vm_offset_t vm_last_phys
; /* PA just past last managed page */
int i386pagesperpage
; /* PAGE_SIZE / I386_PAGE_SIZE */
boolean_t pmap_initialized
= FALSE
; /* Has pmap_init completed? */
char *pmap_attributes
; /* reference and modify bits */
boolean_t
pmap_testbit();
void pmap_clear_modify();
* All those kernel PT submaps that BSD is so fond of
struct pte
*CMAP1
, *CMAP2
, *mmap
;
caddr_t CADDR1
, CADDR2
, vmmap
;
* Bootstrap the system enough to run with virtual memory.
* Map the kernel's code and data, and allocate the system page table.
* On the I386 this is called after mapping has already been enabled
* and just syncs the pmap module with what has already been done.
* [We can't call it easily with mapping off since the kernel is not
* mapped with PA == VA, hence we would have to relocate every address
* from the linked base (virtual) address 0xFE000000 to the actual
* (physical) address starting relative to 0]
extern vm_offset_t atdevbase
;
pmap_bootstrap(firstaddr
, loadaddr
)
extern vm_offset_t maxmem
, physmem
;
firstaddr
= 0x100000; /*XXX basemem completely fucked (again) */
avail_end
= maxmem
<< PG_SHIFT
;
/* XXX: allow for msgbuf */
avail_end
-= i386_round_page(sizeof(struct msgbuf
));
mem_size
= physmem
<< PG_SHIFT
;
virtual_avail
= atdevbase
+ 0x100000 - 0xa0000 + 10*NBPG
;
virtual_end
= VM_MAX_KERNEL_ADDRESS
;
i386pagesperpage
= PAGE_SIZE
/ I386_PAGE_SIZE
;
* Initialize protection array.
* The kernel's pmap is statically allocated so we don't
* have to use pmap_create, which is unlikely to work
* correctly at this part of the boot sequence.
kernel_pmap
= &kernel_pmap_store
;
* Create Kernel page directory table and page maps.
* [ currently done in locore. i have wild and crazy ideas -wfj ]
bzero(firstaddr
, 4*NBPG
);
kernel_pmap
->pm_pdir
= firstaddr
+ VM_MIN_KERNEL_ADDRESS
;
kernel_pmap
->pm_ptab
= firstaddr
+ VM_MIN_KERNEL_ADDRESS
+ NBPG
;
for (x
= i386_btod(VM_MIN_KERNEL_ADDRESS
);
x
< i386_btod(VM_MIN_KERNEL_ADDRESS
)+3; x
++) {
pde
= kernel_pmap
->pm_pdir
+ x
;
*(int *)pde
= firstaddr
+ x
*NBPG
| PG_V
| PG_KW
;
kernel_pmap
->pm_pdir
= (pd_entry_t
*)(0xfe000000 + IdlePTD
);
simple_lock_init(&kernel_pmap
->pm_lock
);
kernel_pmap
->pm_count
= 1;
* Allocate all the submaps we need
#define SYSMAP(c, p, v, n) \
v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);
pte
= pmap_pte(kernel_pmap
, va
);
SYSMAP(caddr_t
,CMAP1
,CADDR1
,1 )
SYSMAP(caddr_t
,CMAP2
,CADDR2
,1 )
SYSMAP(caddr_t
,mmap
,vmmap
,1 )
SYSMAP(struct msgbuf
* ,msgbufmap
,msgbufp
,1 )
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
pmap_init(phys_start
, phys_end
)
vm_offset_t phys_start
, phys_end
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_init(%x, %x)\n", phys_start
, phys_end
);
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in locore.
(void) vm_map_find(kernel_map
, NULL
, (vm_offset_t
) 0,
&addr
, (0x100000-0xa0000), FALSE
);
addr
= (vm_offset_t
) 0xfe000000+KPTphys
/* *NBPG */;
vm_object_reference(kernel_object
);
(void) vm_map_find(kernel_map
, kernel_object
, addr
,
* Allocate memory for random pmap data structures. Includes the
* pv_head_table and pmap_attributes.
npg
= atop(phys_end
- phys_start
);
s
= (vm_size_t
) (sizeof(struct pv_entry
) * npg
+ npg
);
addr
= (vm_offset_t
) kmem_alloc(kernel_map
, s
);
pv_table
= (pv_entry_t
) addr
;
addr
+= sizeof(struct pv_entry
) * npg
;
pmap_attributes
= (char *) addr
;
if (pmapdebug
& PDB_INIT
)
printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",
s
, npg
, pv_table
, pmap_attributes
);
* Now it is safe to enable pv_table recording.
vm_first_phys
= phys_start
;
* Used to map a range of physical addresses into kernel
* For now, VM is already on, we only need to map the
pmap_map(virt
, start
, end
, prot
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_map(%x, %x, %x, %x)\n", virt
, start
, end
, prot
);
pmap_enter(kernel_pmap
, virt
, start
, prot
, FALSE
);
* Create and return a physical map.
* If the size specified for the map
* is zero, the map is an actual physical
* map, and may be referenced by the
* If the size specified is non-zero,
* the map will be used in software only, and
* is bounded by that size.
* [ just allocate a ptd and mark it uninitialize -- should we track
* with a table which process has which ptd? -wfj ]
if (pmapdebug
& (PDB_FOLLOW
|PDB_CREATE
))
printf("pmap_create(%x)\n", size
);
* Software use map does not need a pmap
/* XXX: is it ok to wait here? */
pmap
= (pmap_t
) malloc(sizeof *pmap
, M_VMPMAP
, M_WAITOK
);
panic("pmap_create: cannot allocate a pmap");
bzero(pmap
, sizeof(*pmap
));
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
register struct pmap
*pmap
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_CREATE
))
pg("pmap_pinit(%x)\n", pmap
);
* No need to allocate page table space yet but we do need a
* valid page directory table.
pmap
->pm_pdir
= (pd_entry_t
*) kmem_alloc(kernel_map
, NBPG
);
/* wire in kernel global address entries */
bcopy(PTD
+KPTDI_FIRST
, pmap
->pm_pdir
+KPTDI_FIRST
,
(KPTDI_LAST
-KPTDI_FIRST
+1)*4);
/* install self-referential address mapping entry */
*(int *)(pmap
->pm_pdir
+PTDPTDI
) =
(int)pmap_extract(kernel_pmap
, pmap
->pm_pdir
) | PG_V
| PG_URKW
;
simple_lock_init(&pmap
->pm_lock
);
* Retire the given physical map from service.
* Should only be called if the map contains
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_destroy(%x)\n", pmap
);
simple_lock(&pmap
->pm_lock
);
count
= --pmap
->pm_count
;
simple_unlock(&pmap
->pm_lock
);
free((caddr_t
)pmap
, M_VMPMAP
);
* Release any resources held by the given physical map.
* Called when a pmap initialized by pmap_pinit is being released.
* Should only be called if the map contains no valid mappings.
register struct pmap
*pmap
;
if (pmapdebug
& PDB_FOLLOW
)
pg("pmap_release(%x)\n", pmap
);
#ifdef notdef /* DIAGNOSTIC */
/* count would be 0 from pmap_destroy... */
simple_lock(&pmap
->pm_lock
);
panic("pmap_release count");
kmem_free(kernel_map
, (vm_offset_t
)pmap
->pm_pdir
, NBPG
);
* Add a reference to the specified pmap.
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_reference(%x)", pmap
);
simple_lock(&pmap
->pm_lock
);
simple_unlock(&pmap
->pm_lock
);
* Remove the given range of addresses from the specified map.
* It is assumed that the start and end are properly
* rounded to the page size.
pmap_remove(pmap
, sva
, eva
)
register struct pmap
*pmap
;
register vm_offset_t pa
, va
;
register pt_entry_t
*pte
;
register pv_entry_t pv
, npv
;
boolean_t firstpage
= TRUE
;
boolean_t flushcache
= FALSE
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_REMOVE
|PDB_PROTECT
))
pg("pmap_remove(%x, %x, %x)", pmap
, sva
, eva
);
for (va
= sva
; va
< eva
; va
+= PAGE_SIZE
) {
* Weed out invalid mappings.
* Note: we assume that the page directory table is
* always allocated, and in kernel virtual.
if (!pmap_pde_v(pmap_pde(pmap
, va
)))
pte
= pmap_pte(pmap
, va
);
pmap
->pm_stats
.wired_count
--;
pmap
->pm_stats
.resident_count
--;
* XXX: should cluster them up and invalidate as many
if (pmapdebug
& PDB_REMOVE
)
printf("remove: inv %x ptes at %x(%x) ",
i386pagesperpage
, pte
, *(int *)pte
);
bits
|= *(int *)pte
& (PG_U
|PG_M
);
/*TBIS(va + ix * I386_PAGE_SIZE);*/
} while (++ix
!= i386pagesperpage
);
if (pmap
== &curproc
->p_vmspace
->vm_pmap
)
pmap_activate(pmap
, (struct pcb
*)curproc
->p_addr
);
/* are we current address space or kernel? */
/*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
load_cr3(curpcb->pcb_ptd);*/
reduce wiring count on page table pages as references drop
* Remove from the PV table (raise IPL since we
* may be called at interrupt time).
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
* If it is the first entry on the list, it is actually
* in the header and we must copy the following entry up
* to the header. Otherwise we must search the list for
* the entry. In either case we free the now unused entry.
if (pmap
== pv
->pv_pmap
&& va
== pv
->pv_va
) {
free((caddr_t
)npv
, M_VMPVENT
);
for (npv
= pv
->pv_next
; npv
; npv
= npv
->pv_next
) {
if (pmap
== npv
->pv_pmap
&& va
== npv
->pv_va
)
panic("pmap_remove: PA not in pv_tab");
pv
->pv_next
= npv
->pv_next
;
free((caddr_t
)npv
, M_VMPVENT
);
[tally number of pagetable pages
, if sharing of ptpages adjust here
]
* Update saved attributes for managed page
pmap_attributes
[pa_index(pa
)] |= bits
;
[cache
and tlb flushing
, if needed
]
* Routine: pmap_remove_all
* Removes this physical page from
* all physical maps in which it resides.
* Reflects back modify bits to the pager.
if (pmapdebug
& (PDB_FOLLOW
|PDB_REMOVE
|PDB_PROTECT
))
printf("pmap_remove_all(%x)", pa
);
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
* Do it the easy way for now
while (pv
->pv_pmap
!= NULL
) {
if (!pmap_pde_v(pmap_pde(pv
->pv_pmap
, pv
->pv_va
)) ||
pmap_pte_pa(pmap_pte(pv
->pv_pmap
, pv
->pv_va
)) != pa
)
panic("pmap_remove_all: bad mapping");
pmap_remove(pv
->pv_pmap
, pv
->pv_va
, pv
->pv_va
+ PAGE_SIZE
);
* Routine: pmap_copy_on_write
* Remove write privileges from all
* physical maps for this physical page.
if (pmapdebug
& (PDB_FOLLOW
|PDB_PROTECT
))
printf("pmap_copy_on_write(%x)", pa
);
pmap_changebit(pa
, PG_RO
, TRUE
);
* Set the physical protection on the
* specified range of this map as requested.
pmap_protect(pmap
, sva
, eva
, prot
)
register pt_entry_t
*pte
;
boolean_t firstpage
= TRUE
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_PROTECT
))
printf("pmap_protect(%x, %x, %x, %x)", pmap
, sva
, eva
, prot
);
if ((prot
& VM_PROT_READ
) == VM_PROT_NONE
) {
pmap_remove(pmap
, sva
, eva
);
if (prot
& VM_PROT_WRITE
)
pte
= pmap_pte(pmap
, sva
);
for (va
= sva
; va
< eva
; va
+= PAGE_SIZE
) {
* Page table page is not allocated.
* Skip it, we don't want to force allocation
* of unnecessary PTE pages just to set the protection.
if (!pmap_pde_v(pmap_pde(pmap
, va
))) {
/* XXX: avoid address wrap around */
if (va
>= i386_trunc_pdr((vm_offset_t
)-1))
va
= i386_round_pdr(va
+ PAGE_SIZE
) - PAGE_SIZE
;
pte
= pmap_pte(pmap
, va
);
* Page not valid. Again, skip it.
* Should we do this? Or set protection anyway?
i386prot
= pte_prot(pmap
, prot
);
/* clear VAC here if PG_RO? */
pmap_pte_set_prot(pte
++, i386prot
);
/*TBIS(va + ix * I386_PAGE_SIZE);*/
} while (++ix
!= i386pagesperpage
);
if (pmap
== &curproc
->p_vmspace
->vm_pmap
)
pmap_activate(pmap
, (struct pcb
*)curproc
->p_addr
);
* Insert the given physical page (p) at
* the specified virtual address (v) in the
* target physical map with the protection requested.
* If specified, the page will be wired down, meaning
* that the related pte can not be reclaimed.
* NB: This is the only routine which MAY NOT lazy-evaluate
* or lose information. That is, this routine must actually
* insert this page into the given map NOW.
pmap_enter(pmap
, va
, pa
, prot
, wired
)
register pt_entry_t
*pte
;
boolean_t cacheable
= TRUE
;
boolean_t checkpv
= TRUE
;
if (pmapdebug
& (PDB_FOLLOW
|PDB_ENTER
))
printf("pmap_enter(%x, %x, %x, %x, %x)",
pmap
, va
, pa
, prot
, wired
);
if(va
> VM_MAX_KERNEL_ADDRESS
)panic("pmap_enter: toobig");
/* also, should not muck with PTD va! */
* Page Directory table entry not valid, we need a new PT page
if (!pmap_pde_v(pmap_pde(pmap
, va
))) {
pg("ptdi %x", pmap
->pm_pdir
[PTDPTDI
]);
pte
= pmap_pte(pmap
, va
);
if (pmapdebug
& PDB_ENTER
)
printf("enter: pte %x, *pte %x ", pte
, *(int *)pte
);
* Mapping has not changed, must be protection or wiring change.
* Wiring change, just update stats.
* We don't worry about wiring PT pages as they remain
* resident as long as there are valid mappings in them.
* Hence, if a user page is wired, the PT page will be also.
if (wired
&& !pmap_pte_w(pte
) || !wired
&& pmap_pte_w(pte
)) {
if (pmapdebug
& PDB_ENTER
)
pg("enter: wiring change -> %x ", wired
);
pmap
->pm_stats
.wired_count
++;
pmap
->pm_stats
.wired_count
--;
* Mapping has changed, invalidate old range and fall through to
* handle validating new mapping.
if (pmapdebug
& PDB_ENTER
)
printf("enter: removing old mapping %x pa %x ", va
, opa
);
pmap_remove(pmap
, va
, va
+ PAGE_SIZE
);
* Enter on the PV list if part of our managed memory
* Note that we raise IPL while manipulating pv_table
* since pmap_enter can be called at interrupt time.
if (pa
>= vm_first_phys
&& pa
< vm_last_phys
) {
register pv_entry_t pv
, npv
;
if (pmapdebug
& PDB_ENTER
)
printf("enter: pv at %x: %x/%x/%x ",
pv
, pv
->pv_va
, pv
->pv_pmap
, pv
->pv_next
);
* No entries yet, use header as the first entry
if (pv
->pv_pmap
== NULL
) {
* There is at least one other VA mapping this page.
* Place this entry after the header.
/*printf("second time: ");*/
for (npv
= pv
; npv
; npv
= npv
->pv_next
)
if (pmap
== npv
->pv_pmap
&& va
== npv
->pv_va
)
panic("pmap_enter: already in pv_tab");
malloc(sizeof *npv
, M_VMPVENT
, M_NOWAIT
);
npv
->pv_next
= pv
->pv_next
;
* Assumption: if it is not part of our managed memory
* then it must be device memory which may be volitile.
checkpv
= cacheable
= FALSE
;
pmap
->pm_stats
.resident_count
++;
pmap
->pm_stats
.wired_count
++;
* Now validate mapping with desired protection/wiring.
* Assume uniform modified and referenced status for all
* I386 pages in a MACH page.
npte
= (pa
& PG_FRAME
) | pte_prot(pmap
, prot
) | PG_V
;
npte
|= (*(int *)pte
& (PG_M
|PG_U
));
else if(va
< UPT_MAX_ADDRESS
)
if (pmapdebug
& PDB_ENTER
)
printf("enter: new pte value %x ", npte
);
} while (++ix
!= i386pagesperpage
);
/*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/
* Lower the permission for all mappings to a given page.
pmap_page_protect(phys
, prot
)
case VM_PROT_READ
|VM_PROT_EXECUTE
:
pmap_copy_on_write(phys
);
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
* The mapping must already exist in the pmap.
pmap_change_wiring(pmap
, va
, wired
)
register pt_entry_t
*pte
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_change_wiring(%x, %x, %x)", pmap
, va
, wired
);
pte
= pmap_pte(pmap
, va
);
* Page table page is not allocated.
* Should this ever happen? Ignore it for now,
* we don't want to force allocation of unnecessary PTE pages.
if (!pmap_pde_v(pmap_pde(pmap
, va
))) {
if (pmapdebug
& PDB_PARANOIA
)
pg("pmap_change_wiring: invalid PDE for %x ", va
);
* Page not valid. Should this ever happen?
* Just continue and change wiring anyway.
if (pmapdebug
& PDB_PARANOIA
)
pg("pmap_change_wiring: invalid PTE for %x ", va
);
if (wired
&& !pmap_pte_w(pte
) || !wired
&& pmap_pte_w(pte
)) {
pmap
->pm_stats
.wired_count
++;
pmap
->pm_stats
.wired_count
--;
* Wiring is not a hardware characteristic so there is no need
pmap_pte_set_w(pte
++, wired
);
} while (++ix
!= i386pagesperpage
);
* Extract the page table entry associated
* with the given map/virtual_address pair.
* [ what about induced faults -wfj]
struct pte
*pmap_pte(pmap
, va
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_pte(%x, %x) ->\n", pmap
, va
);
if (pmap
&& pmap_pde_v(pmap_pde(pmap
, va
))) {
/* are we current address space or kernel? */
if (pmap
->pm_pdir
[PTDPTDI
].pd_pfnum
== PTDpde
.pd_pfnum
return ((struct pte
*) vtopte(va
));
/* otherwise, we are alternate address space */
if (pmap
->pm_pdir
[PTDPTDI
].pd_pfnum
APTDpde
= pmap
->pm_pdir
[PTDPTDI
];
return((struct pte
*) avtopte(va
));
* Extract the physical page address associated
* with the given map/virtual_address pair.
if (pmapdebug
& PDB_FOLLOW
)
pg("pmap_extract(%x, %x) -> ", pmap
, va
);
if (pmap
&& pmap_pde_v(pmap_pde(pmap
, va
))) {
pa
= *(int *) pmap_pte(pmap
, va
);
pa
= (pa
& PG_FRAME
) | (va
& ~PG_FRAME
);
if (pmapdebug
& PDB_FOLLOW
)
* Copy the range specified by src_addr/len
* from the source map to the range dst_addr/len
* in the destination map.
* This routine is only advisory and need not do anything.
void pmap_copy(dst_pmap
, src_pmap
, dst_addr
, len
, src_addr
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_copy(%x, %x, %x, %x, %x)",
dst_pmap
, src_pmap
, dst_addr
, len
, src_addr
);
* Require that all active physical maps contain no
* incorrect entries NOW. [This update includes
* forcing updates of any address map caching.]
* Generally used to insure that a thread about
* to run will see a semantically correct world.
if (pmapdebug
& PDB_FOLLOW
)
* Garbage collects the physical map system for
* pages which are no longer used.
* Success need not be guaranteed -- that is, there
* may well be pages which are not referenced, but
* others may be collected.
* Called by the pageout daemon when pages are scarce.
* [ needs to be written -wfj ]
printf("pmap_collect(%x) ", pmap
);
/* [ macro again?, should I force kstack into user map here? -wfj ] */
pmap_activate(pmap
, pcbp
)
if (pmapdebug
& (PDB_FOLLOW
|PDB_PDRTAB
))
pg("pmap_activate(%x, %x) ", pmap
, pcbp
);
PMAP_ACTIVATE(pmap
, pcbp
);
for(x=0x3f6; x < 0x3fA; x++)
printf("%x ", pmap->pm_pdir[x]);*/
/*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/
* Returns the physical map handle for the kernel.
* pmap_zero_page zeros the specified (machine independent)
* page by mapping the page into virtual memory and using
* bzero to clear its contents, one machine dependent page
register vm_offset_t phys
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_zero_page(%x)", phys
);
} while (++ix
!= i386pagesperpage
);
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using
* bcopy to copy the page, one machine dependent page at a
register vm_offset_t src
, dst
;
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_copy_page(%x, %x)", src
, dst
);
physcopyseg(src
++, dst
++);
} while (++ix
!= i386pagesperpage
);
* Make the specified pages (by pmap, offset)
* pageable (or not) as requested.
* A page which is not pageable may not take
* a fault; therefore, its page table entry
* must remain valid for the duration.
* This routine is merely advisory; pmap_enter
* will specify that these pages are to be wired
* down (or not) as appropriate.
pmap_pageable(pmap
, sva
, eva
, pageable
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_pageable(%x, %x, %x, %x)",
pmap
, sva
, eva
, pageable
);
* If we are making a PT page pageable then all valid
* mappings must be gone from that page. Hence it should
* be all zeros and there is no need to clean it.
* - we are called with only one page at a time
* - PT pages have only one pv_table entry
if (pmap
== kernel_pmap
&& pageable
&& sva
+ PAGE_SIZE
== eva
) {
if ((pmapdebug
& (PDB_FOLLOW
|PDB_PTPAGE
)) == PDB_PTPAGE
)
printf("pmap_pageable(%x, %x, %x, %x)",
pmap
, sva
, eva
, pageable
);
/*if (!pmap_pde_v(pmap_pde(pmap, sva)))
if(pmap_pte(pmap
, sva
) == 0)
pa
= pmap_pte_pa(pmap_pte(pmap
, sva
));
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
if (pv
->pv_va
!= sva
|| pv
->pv_next
) {
pg("pmap_pageable: bad PT page va %x next %x\n",
* Mark it unmodified to avoid pageout
#ifdef needsomethinglikethis
if (pmapdebug
& PDB_PTPAGE
)
pg("pmap_pageable: PT page %x(%x) unmodified\n",
sva
, *(int *)pmap_pte(pmap
, sva
));
if (pmapdebug
& PDB_WIRING
)
pmap_check_wiring("pageable", sva
);
* Clear the modify bits on the specified physical page.
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_clear_modify(%x)", pa
);
pmap_changebit(pa
, PG_M
, FALSE
);
* Clear the reference bit on the specified physical page.
void pmap_clear_reference(pa
)
if (pmapdebug
& PDB_FOLLOW
)
printf("pmap_clear_reference(%x)", pa
);
pmap_changebit(pa
, PG_U
, FALSE
);
* Return whether or not the specified physical page is referenced
if (pmapdebug
& PDB_FOLLOW
) {
boolean_t rv
= pmap_testbit(pa
, PG_U
);
printf("pmap_is_referenced(%x) -> %c", pa
, "FT"[rv
]);
return(pmap_testbit(pa
, PG_U
));
* Return whether or not the specified physical page is modified
if (pmapdebug
& PDB_FOLLOW
) {
boolean_t rv
= pmap_testbit(pa
, PG_M
);
printf("pmap_is_modified(%x) -> %c", pa
, "FT"[rv
]);
return(pmap_testbit(pa
, PG_M
));
* Miscellaneous support routines follow
for (prot
= 0; prot
< 8; prot
++) {
case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_NONE
:
case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_NONE
:
case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_NONE
:
case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_NONE
:
case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
if (pmap_attributes
[pa_index(pa
)] & bit
) {
* Not found, check current mappings returning
if (pv
->pv_pmap
!= NULL
) {
for (; pv
; pv
= pv
->pv_next
) {
pte
= (int *) pmap_pte(pv
->pv_pmap
, pv
->pv_va
);
} while (++ix
!= i386pagesperpage
);
pmap_changebit(pa
, bit
, setem
)
register int *pte
, npte
, ix
;
boolean_t firstpage
= TRUE
;
if (pmapdebug
& PDB_BITS
)
printf("pmap_changebit(%x, %x, %s)",
pa
, bit
, setem
? "set" : "clear");
if (pa
< vm_first_phys
|| pa
>= vm_last_phys
)
* Clear saved attributes (modify, reference)
pmap_attributes
[pa_index(pa
)] &= ~bit
;
* Loop over all current mappings setting/clearing as appropos
* If setting RO do we need to clear the VAC?
if (pv
->pv_pmap
!= NULL
) {
for (; pv
; pv
= pv
->pv_next
) {
toflush
|= (pv
->pv_pmap
== kernel_pmap
) ? 2 : 1;
* XXX don't write protect pager mappings
extern vm_offset_t pager_sva
, pager_eva
;
if (va
>= pager_sva
&& va
< pager_eva
)
pte
= (int *) pmap_pte(pv
->pv_pmap
, va
);
} while (++ix
!= i386pagesperpage
);
if (pv
->pv_pmap
== &curproc
->p_vmspace
->vm_pmap
)
pmap_activate(pv
->pv_pmap
, (struct pcb
*)curproc
->p_addr
);
if (setem
&& bit
== PG_RO
&& (pmapvacflush
& PVF_PROTECT
)) {
if ((pmapvacflush
& PVF_TOTAL
) || toflush
== 3)
for (pv
= pa_to_pvh(pa
); pv
; pv
= pv
->pv_next
) {
printf(" -> pmap %x, va %x, flags %x",
pv
->pv_pmap
, pv
->pv_va
, pv
->pv_flags
);
pmap_check_wiring(str
, va
)
register int count
, *pte
;
if (!pmap_pde_v(pmap_pde(kernel_pmap
, va
)) ||
!pmap_pte_v(pmap_pte(kernel_pmap
, va
)))
if (!vm_map_lookup_entry(pt_map
, va
, &entry
)) {
pg("wired_check: entry for %x not found\n", va
);
for (pte
= (int *)va
; pte
< (int *)(va
+PAGE_SIZE
); pte
++)
if (entry
->wired_count
!= count
)
pg("*%s*: %x: w%d/a%d\n",
str
, va
, entry
->wired_count
, count
);
/* print address space of pmap*/
if(pm
== kernel_pmap
) return;
for (i
= 0; i
< 1024; i
++)
for (j
= 0; j
< 1024 ; j
++) {
if (pm
== kernel_pmap
&& va
< 0xfe000000)
if (pm
!= kernel_pmap
&& va
> UPT_MAX_ADDRESS
)
printf("%x:%x ", va
, *(int *)ptep
);