* Copyright (c) 1991 Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
* %sccs.include.redist.c%
* @(#)vm_page.c 7.10 (Berkeley) %G%
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
* Carnegie Mellon requests users of this software to return to
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
* Resident memory management module.
* Associated with page of user-allocatable memory is a
queue_head_t
*vm_page_buckets
; /* Array of buckets */
int vm_page_bucket_count
= 0; /* How big is array? */
int vm_page_hash_mask
; /* Mask for hash function */
simple_lock_data_t bucket_lock
; /* lock for all buckets XXX */
queue_head_t vm_page_queue_free
;
queue_head_t vm_page_queue_active
;
queue_head_t vm_page_queue_inactive
;
simple_lock_data_t vm_page_queue_lock
;
simple_lock_data_t vm_page_queue_free_lock
;
/* has physical page allocation been initialized? */
boolean_t vm_page_startup_initialized
;
vm_offset_t first_phys_addr
;
vm_offset_t last_phys_addr
;
* Sets the page size, perhaps based upon the memory
* size. Must be called before any use of page-size
* Sets page_shift and page_mask from cnt.v_page_size.
if (cnt
.v_page_size
== 0)
cnt
.v_page_size
= DEFAULT_PAGE_SIZE
;
page_mask
= cnt
.v_page_size
- 1;
if ((page_mask
& cnt
.v_page_size
) != 0)
panic("vm_set_page_size: page size not a power of two");
for (page_shift
= 0; ; page_shift
++)
if ((1 << page_shift
) == cnt
.v_page_size
)
* Initializes the resident memory module.
* Allocates memory for the page cells, and
* for the object/offset-to-page hash table headers.
* Each page cell is initialized and placed on the free list.
void vm_page_startup(start
, end
)
extern vm_offset_t kentry_data
;
extern vm_size_t kentry_data_size
;
simple_lock_init(&vm_page_queue_free_lock
);
simple_lock_init(&vm_page_queue_lock
);
* Initialize the queue headers for the free queue,
* the active queue and the inactive queue.
queue_init(&vm_page_queue_free
);
queue_init(&vm_page_queue_active
);
queue_init(&vm_page_queue_inactive
);
* Calculate the number of hash table buckets.
* The number of buckets MUST BE a power of 2, and
* the actual value is the next power of 2 greater
* than the number of physical pages in the system.
* This computation can be tweaked if desired.
if (vm_page_bucket_count
== 0) {
vm_page_bucket_count
= 1;
while (vm_page_bucket_count
< atop(*end
- *start
))
vm_page_bucket_count
<<= 1;
vm_page_hash_mask
= vm_page_bucket_count
- 1;
* Allocate (and initialize) the hash table buckets.
vm_page_buckets
= (queue_t
) pmap_bootstrap_alloc(vm_page_bucket_count
* sizeof(struct queue_entry
));
bucket
= vm_page_buckets
;
for (i
= vm_page_bucket_count
; i
--;) {
simple_lock_init(&bucket_lock
);
* Truncate the remainder of physical memory to our page size.
* Pre-allocate maps and map entries that cannot be dynamically
* allocated via malloc(). The maps include the kernel_map and
* kmem_map which must be initialized before malloc() will
* work (obviously). Also could include pager maps which would
* be allocated before kmeminit.
* Allow some kernel map entries... this should be plenty
* since people shouldn't be cluttering up the kernel
* map (they should use their own maps).
kentry_data_size
= MAX_KMAP
* sizeof(struct vm_map
) +
MAX_KMAPENT
* sizeof(struct vm_map_entry
);
kentry_data
= (vm_offset_t
) pmap_bootstrap_alloc(kentry_data_size
);
* Compute the number of pages of memory that will be
* available for use (taking into account the overhead
* of a page structure per page).
cnt
.v_free_count
= npages
=
(*end
- *start
)/(PAGE_SIZE
+ sizeof(struct vm_page
));
* Record the extent of physical memory that the
* virtual memory system manages.
first_page
+= npages
*sizeof(struct vm_page
);
first_page
= atop(round_page(first_page
));
last_page
= first_page
+ npages
- 1;
first_phys_addr
= ptoa(first_page
);
last_phys_addr
= ptoa(last_page
) + PAGE_MASK
;
* Allocate and clear the mem entry structures.
m
= vm_page_array
= (vm_page_t
)
pmap_bootstrap_alloc(npages
* sizeof(struct vm_page
));
* Initialize the mem entry structures now, and
* put them in the free queue.
m
->copy_on_write
= FALSE
;
if (pmap_isvalidphys(m
->phys_addr
)) {
queue_enter(&vm_page_queue_free
, m
, vm_page_t
, pageq
);
/* perhaps iomem needs it's own type, or dev pager? */
queue_enter(&vm_page_queue_free
, m
, vm_page_t
, pageq
);
* Initialize vm_pages_needed lock here - don't wait for pageout
simple_lock_init(&vm_pages_needed_lock
);
/* from now on, pmap_bootstrap_alloc can't be used */
vm_page_startup_initialized
= TRUE
;
* Distributes the object/offset key pair among hash buckets.
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
#define vm_page_hash(object, offset) \
(((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
* vm_page_insert: [ internal use only ]
* Inserts the given mem entry into the object/object-page
* The object and page must be locked.
static void vm_page_insert(mem
, object
, offset
)
register vm_object_t object
;
register vm_offset_t offset
;
panic("vm_page_insert: already inserted");
* Record the object/offset pair in this page
* Insert it into the object_object/offset hash table
bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
simple_lock(&bucket_lock
);
queue_enter(bucket
, mem
, vm_page_t
, hashq
);
simple_unlock(&bucket_lock
);
* Now link into the object's list of backed pages.
queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
* And show that the object has one more resident
object
->resident_page_count
++;
* vm_page_remove: [ internal use only ]
* NOTE: used by device pager as well -wfj
* Removes the given mem entry from the object/offset-page
* table and the object page list.
* The object and page must be locked.
* Remove from the object_object/offset hash table
bucket
= &vm_page_buckets
[vm_page_hash(mem
->object
, mem
->offset
)];
simple_lock(&bucket_lock
);
queue_remove(bucket
, mem
, vm_page_t
, hashq
);
simple_unlock(&bucket_lock
);
* Now remove from the object's list of backed pages.
queue_remove(&mem
->object
->memq
, mem
, vm_page_t
, listq
);
* And show that the object has one fewer resident
mem
->object
->resident_page_count
--;
* Returns the page associated with the object/offset
* pair specified; if none is found, NULL is returned.
* The object must be locked. No side effects.
vm_page_t
vm_page_lookup(object
, offset
)
register vm_object_t object
;
register vm_offset_t offset
;
* Search the hash table for this object/offset pair
bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
simple_lock(&bucket_lock
);
mem
= (vm_page_t
) queue_first(bucket
);
while (!queue_end(bucket
, (queue_entry_t
) mem
)) {
if ((mem
->object
== object
) && (mem
->offset
== offset
)) {
simple_unlock(&bucket_lock
);
mem
= (vm_page_t
) queue_next(&mem
->hashq
);
simple_unlock(&bucket_lock
);
* Move the given memory entry from its
* current object to the specified target object/offset.
* The object must be locked.
void vm_page_rename(mem
, new_object
, new_offset
)
register vm_object_t new_object
;
if (mem
->object
== new_object
)
vm_page_lock_queues(); /* keep page from moving out from
vm_page_insert(mem
, new_object
, new_offset
);
void vm_page_init(mem
, object
, offset
)
#define vm_page_init(mem, object, offset) {\
vm_page_insert((mem), (object), (offset)); \
(mem)->fictitious = FALSE; \
(mem)->page_lock = VM_PROT_NONE; \
(mem)->unlock_request = VM_PROT_NONE; \
(mem)->laundry = FALSE; \
(mem)->inactive = FALSE; \
(mem)->copy_on_write = FALSE; \
(mem)->pagerowned = FALSE; \
#define vm_page_init(mem, object, offset) {\
vm_page_insert((mem), (object), (offset)); \
(mem)->fictitious = FALSE; \
(mem)->page_lock = VM_PROT_NONE; \
(mem)->unlock_request = VM_PROT_NONE; \
(mem)->laundry = FALSE; \
(mem)->inactive = FALSE; \
(mem)->copy_on_write = FALSE; \
vm_page_init(mem
, object
, offset
);
* Allocate and return a memory cell associated
* with this VM object/offset pair.
vm_page_t
vm_page_alloc(object
, offset
)
spl
= splimp(); /* XXX */
simple_lock(&vm_page_queue_free_lock
);
if (queue_empty(&vm_page_queue_free
)) {
simple_unlock(&vm_page_queue_free_lock
);
queue_remove_first(&vm_page_queue_free
, mem
, vm_page_t
, pageq
);
simple_unlock(&vm_page_queue_free_lock
);
vm_page_init(mem
, object
, offset
);
* Decide if we should poke the pageout daemon.
* We do this if the free count is less than the low
* water mark, or if the free count is less than the high
* water mark (but above the low water mark) and the inactive
* count is less than its target.
* We don't have the counts locked ... if they change a little,
* it doesn't really matter.
if ((cnt
.v_free_count
< cnt
.v_free_min
) ||
((cnt
.v_free_count
< cnt
.v_free_target
) &&
(cnt
.v_inactive_count
< cnt
.v_inactive_target
)))
thread_wakeup((int)&vm_pages_needed
);
* Returns the given page to the free list,
* disassociating it with any VM object.
* Object and page must be locked prior to entry.
queue_remove(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
queue_remove(&vm_page_queue_inactive
, mem
, vm_page_t
, pageq
);
simple_lock(&vm_page_queue_free_lock
);
queue_enter(&vm_page_queue_free
, mem
, vm_page_t
, pageq
);
simple_unlock(&vm_page_queue_free_lock
);
* Mark this page as wired down by yet
* another map, removing it from paging queues
* The page queues must be locked.
if (mem
->wire_count
== 0) {
queue_remove(&vm_page_queue_active
, mem
, vm_page_t
,
queue_remove(&vm_page_queue_inactive
, mem
, vm_page_t
,
* Release one wiring of this page, potentially
* enabling it to be paged again.
* The page queues must be locked.
if (mem
->wire_count
== 0) {
queue_enter(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
* Returns the given page to the inactive list,
* indicating that no physical maps have access
* to this page. [Used by the physical mapping system.]
* The page queues must be locked.
void vm_page_deactivate(m
)
* Only move active pages -- ignore locked or already
pmap_clear_reference(VM_PAGE_TO_PHYS(m
));
queue_remove(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
queue_enter(&vm_page_queue_inactive
, m
, vm_page_t
, pageq
);
if (pmap_is_modified(VM_PAGE_TO_PHYS(m
)))
* Put the specified page on the active list (if appropriate).
* The page queues must be locked.
queue_remove(&vm_page_queue_inactive
, m
, vm_page_t
,
if (m
->wire_count
== 0) {
panic("vm_page_activate: already active");
queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
* Zero-fill the specified page.
* Written as a standard pagein routine, to
* be used by the zero-fill object.
boolean_t
vm_page_zero_fill(m
)
pmap_zero_page(VM_PAGE_TO_PHYS(m
));
* Copy one page to another
void vm_page_copy(src_m
, dest_m
)
pmap_copy_page(VM_PAGE_TO_PHYS(src_m
), VM_PAGE_TO_PHYS(dest_m
));