| 1 | /* |
| 2 | * Copyright (c) 1991 Regents of the University of California. |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * This code is derived from software contributed to Berkeley by |
| 6 | * The Mach Operating System project at Carnegie-Mellon University. |
| 7 | * |
| 8 | * %sccs.include.redist.c% |
| 9 | * |
| 10 | * @(#)vm_page.c 7.10 (Berkeley) %G% |
| 11 | * |
| 12 | * |
| 13 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. |
| 14 | * All rights reserved. |
| 15 | * |
| 16 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young |
| 17 | * |
| 18 | * Permission to use, copy, modify and distribute this software and |
| 19 | * its documentation is hereby granted, provided that both the copyright |
| 20 | * notice and this permission notice appear in all copies of the |
| 21 | * software, derivative works or modified versions, and any portions |
| 22 | * thereof, and that both notices appear in supporting documentation. |
| 23 | * |
| 24 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 25 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
| 26 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 27 | * |
| 28 | * Carnegie Mellon requests users of this software to return to |
| 29 | * |
| 30 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 31 | * School of Computer Science |
| 32 | * Carnegie Mellon University |
| 33 | * Pittsburgh PA 15213-3890 |
| 34 | * |
| 35 | * any improvements or extensions that they make and grant Carnegie the |
| 36 | * rights to redistribute these changes. |
| 37 | */ |
| 38 | |
| 39 | /* |
| 40 | * Resident memory management module. |
| 41 | */ |
| 42 | |
| 43 | #include "param.h" |
| 44 | |
| 45 | #include "vm.h" |
| 46 | #include "vm_map.h" |
| 47 | #include "vm_page.h" |
| 48 | #include "vm_pageout.h" |
| 49 | |
| 50 | /* |
| 51 | * Associated with page of user-allocatable memory is a |
| 52 | * page structure. |
| 53 | */ |
| 54 | |
| 55 | queue_head_t *vm_page_buckets; /* Array of buckets */ |
| 56 | int vm_page_bucket_count = 0; /* How big is array? */ |
| 57 | int vm_page_hash_mask; /* Mask for hash function */ |
| 58 | simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ |
| 59 | |
| 60 | queue_head_t vm_page_queue_free; |
| 61 | queue_head_t vm_page_queue_active; |
| 62 | queue_head_t vm_page_queue_inactive; |
| 63 | simple_lock_data_t vm_page_queue_lock; |
| 64 | simple_lock_data_t vm_page_queue_free_lock; |
| 65 | |
| 66 | /* has physical page allocation been initialized? */ |
| 67 | boolean_t vm_page_startup_initialized; |
| 68 | |
| 69 | vm_page_t vm_page_array; |
| 70 | long first_page; |
| 71 | long last_page; |
| 72 | vm_offset_t first_phys_addr; |
| 73 | vm_offset_t last_phys_addr; |
| 74 | vm_size_t page_mask; |
| 75 | int page_shift; |
| 76 | |
| 77 | /* |
| 78 | * vm_set_page_size: |
| 79 | * |
| 80 | * Sets the page size, perhaps based upon the memory |
| 81 | * size. Must be called before any use of page-size |
| 82 | * dependent functions. |
| 83 | * |
| 84 | * Sets page_shift and page_mask from cnt.v_page_size. |
| 85 | */ |
| 86 | void vm_set_page_size() |
| 87 | { |
| 88 | |
| 89 | if (cnt.v_page_size == 0) |
| 90 | cnt.v_page_size = DEFAULT_PAGE_SIZE; |
| 91 | page_mask = cnt.v_page_size - 1; |
| 92 | if ((page_mask & cnt.v_page_size) != 0) |
| 93 | panic("vm_set_page_size: page size not a power of two"); |
| 94 | for (page_shift = 0; ; page_shift++) |
| 95 | if ((1 << page_shift) == cnt.v_page_size) |
| 96 | break; |
| 97 | } |
| 98 | |
| 99 | |
| 100 | /* |
| 101 | * vm_page_startup: |
| 102 | * |
| 103 | * Initializes the resident memory module. |
| 104 | * |
| 105 | * Allocates memory for the page cells, and |
| 106 | * for the object/offset-to-page hash table headers. |
| 107 | * Each page cell is initialized and placed on the free list. |
| 108 | */ |
| 109 | void vm_page_startup(start, end) |
| 110 | vm_offset_t *start; |
| 111 | vm_offset_t *end; |
| 112 | { |
| 113 | register vm_page_t m; |
| 114 | register queue_t bucket; |
| 115 | vm_size_t npages; |
| 116 | int i; |
| 117 | vm_offset_t pa; |
| 118 | extern vm_offset_t kentry_data; |
| 119 | extern vm_size_t kentry_data_size; |
| 120 | |
| 121 | |
| 122 | /* |
| 123 | * Initialize the locks |
| 124 | */ |
| 125 | |
| 126 | simple_lock_init(&vm_page_queue_free_lock); |
| 127 | simple_lock_init(&vm_page_queue_lock); |
| 128 | |
| 129 | /* |
| 130 | * Initialize the queue headers for the free queue, |
| 131 | * the active queue and the inactive queue. |
| 132 | */ |
| 133 | |
| 134 | queue_init(&vm_page_queue_free); |
| 135 | queue_init(&vm_page_queue_active); |
| 136 | queue_init(&vm_page_queue_inactive); |
| 137 | |
| 138 | /* |
| 139 | * Calculate the number of hash table buckets. |
| 140 | * |
| 141 | * The number of buckets MUST BE a power of 2, and |
| 142 | * the actual value is the next power of 2 greater |
| 143 | * than the number of physical pages in the system. |
| 144 | * |
| 145 | * Note: |
| 146 | * This computation can be tweaked if desired. |
| 147 | */ |
| 148 | |
| 149 | if (vm_page_bucket_count == 0) { |
| 150 | vm_page_bucket_count = 1; |
| 151 | while (vm_page_bucket_count < atop(*end - *start)) |
| 152 | vm_page_bucket_count <<= 1; |
| 153 | } |
| 154 | |
| 155 | vm_page_hash_mask = vm_page_bucket_count - 1; |
| 156 | |
| 157 | /* |
| 158 | * Allocate (and initialize) the hash table buckets. |
| 159 | */ |
| 160 | vm_page_buckets = (queue_t) pmap_bootstrap_alloc(vm_page_bucket_count |
| 161 | * sizeof(struct queue_entry)); |
| 162 | bucket = vm_page_buckets; |
| 163 | |
| 164 | for (i = vm_page_bucket_count; i--;) { |
| 165 | queue_init(bucket); |
| 166 | bucket++; |
| 167 | } |
| 168 | |
| 169 | simple_lock_init(&bucket_lock); |
| 170 | |
| 171 | /* |
| 172 | * Truncate the remainder of physical memory to our page size. |
| 173 | */ |
| 174 | |
| 175 | *end = trunc_page(*end); |
| 176 | |
| 177 | /* |
| 178 | * Pre-allocate maps and map entries that cannot be dynamically |
| 179 | * allocated via malloc(). The maps include the kernel_map and |
| 180 | * kmem_map which must be initialized before malloc() will |
| 181 | * work (obviously). Also could include pager maps which would |
| 182 | * be allocated before kmeminit. |
| 183 | * |
| 184 | * Allow some kernel map entries... this should be plenty |
| 185 | * since people shouldn't be cluttering up the kernel |
| 186 | * map (they should use their own maps). |
| 187 | */ |
| 188 | |
| 189 | kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + |
| 190 | MAX_KMAPENT * sizeof(struct vm_map_entry); |
| 191 | kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size); |
| 192 | |
| 193 | /* |
| 194 | * Compute the number of pages of memory that will be |
| 195 | * available for use (taking into account the overhead |
| 196 | * of a page structure per page). |
| 197 | */ |
| 198 | |
| 199 | cnt.v_free_count = npages = |
| 200 | (*end - *start)/(PAGE_SIZE + sizeof(struct vm_page)); |
| 201 | |
| 202 | /* |
| 203 | * Record the extent of physical memory that the |
| 204 | * virtual memory system manages. |
| 205 | */ |
| 206 | |
| 207 | first_page = *start; |
| 208 | first_page += npages*sizeof(struct vm_page); |
| 209 | first_page = atop(round_page(first_page)); |
| 210 | last_page = first_page + npages - 1; |
| 211 | |
| 212 | first_phys_addr = ptoa(first_page); |
| 213 | last_phys_addr = ptoa(last_page) + PAGE_MASK; |
| 214 | |
| 215 | |
| 216 | /* |
| 217 | * Allocate and clear the mem entry structures. |
| 218 | */ |
| 219 | |
| 220 | m = vm_page_array = (vm_page_t) |
| 221 | pmap_bootstrap_alloc(npages * sizeof(struct vm_page)); |
| 222 | |
| 223 | /* |
| 224 | * Initialize the mem entry structures now, and |
| 225 | * put them in the free queue. |
| 226 | */ |
| 227 | |
| 228 | pa = first_phys_addr; |
| 229 | while (npages--) { |
| 230 | m->copy_on_write = FALSE; |
| 231 | m->wanted = FALSE; |
| 232 | m->inactive = FALSE; |
| 233 | m->active = FALSE; |
| 234 | m->busy = FALSE; |
| 235 | m->object = NULL; |
| 236 | m->phys_addr = pa; |
| 237 | #ifdef i386 |
| 238 | if (pmap_isvalidphys(m->phys_addr)) { |
| 239 | queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); |
| 240 | } else { |
| 241 | /* perhaps iomem needs it's own type, or dev pager? */ |
| 242 | m->fictitious = 1; |
| 243 | m->busy = TRUE; |
| 244 | cnt.v_free_count--; |
| 245 | } |
| 246 | #else /* i386 */ |
| 247 | queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); |
| 248 | #endif /* i386 */ |
| 249 | m++; |
| 250 | pa += PAGE_SIZE; |
| 251 | } |
| 252 | |
| 253 | /* |
| 254 | * Initialize vm_pages_needed lock here - don't wait for pageout |
| 255 | * daemon XXX |
| 256 | */ |
| 257 | simple_lock_init(&vm_pages_needed_lock); |
| 258 | |
| 259 | /* from now on, pmap_bootstrap_alloc can't be used */ |
| 260 | vm_page_startup_initialized = TRUE; |
| 261 | } |
| 262 | |
| 263 | /* |
| 264 | * vm_page_hash: |
| 265 | * |
| 266 | * Distributes the object/offset key pair among hash buckets. |
| 267 | * |
| 268 | * NOTE: This macro depends on vm_page_bucket_count being a power of 2. |
| 269 | */ |
| 270 | #define vm_page_hash(object, offset) \ |
| 271 | (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) |
| 272 | |
| 273 | /* |
| 274 | * vm_page_insert: [ internal use only ] |
| 275 | * |
| 276 | * Inserts the given mem entry into the object/object-page |
| 277 | * table and object list. |
| 278 | * |
| 279 | * The object and page must be locked. |
| 280 | */ |
| 281 | |
| 282 | static void vm_page_insert(mem, object, offset) |
| 283 | register vm_page_t mem; |
| 284 | register vm_object_t object; |
| 285 | register vm_offset_t offset; |
| 286 | { |
| 287 | register queue_t bucket; |
| 288 | int spl; |
| 289 | |
| 290 | VM_PAGE_CHECK(mem); |
| 291 | |
| 292 | if (mem->tabled) |
| 293 | panic("vm_page_insert: already inserted"); |
| 294 | |
| 295 | /* |
| 296 | * Record the object/offset pair in this page |
| 297 | */ |
| 298 | |
| 299 | mem->object = object; |
| 300 | mem->offset = offset; |
| 301 | |
| 302 | /* |
| 303 | * Insert it into the object_object/offset hash table |
| 304 | */ |
| 305 | |
| 306 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; |
| 307 | spl = splimp(); |
| 308 | simple_lock(&bucket_lock); |
| 309 | queue_enter(bucket, mem, vm_page_t, hashq); |
| 310 | simple_unlock(&bucket_lock); |
| 311 | (void) splx(spl); |
| 312 | |
| 313 | /* |
| 314 | * Now link into the object's list of backed pages. |
| 315 | */ |
| 316 | |
| 317 | queue_enter(&object->memq, mem, vm_page_t, listq); |
| 318 | mem->tabled = TRUE; |
| 319 | |
| 320 | /* |
| 321 | * And show that the object has one more resident |
| 322 | * page. |
| 323 | */ |
| 324 | |
| 325 | object->resident_page_count++; |
| 326 | } |
| 327 | |
| 328 | /* |
| 329 | * vm_page_remove: [ internal use only ] |
| 330 | * NOTE: used by device pager as well -wfj |
| 331 | * |
| 332 | * Removes the given mem entry from the object/offset-page |
| 333 | * table and the object page list. |
| 334 | * |
| 335 | * The object and page must be locked. |
| 336 | */ |
| 337 | |
| 338 | void vm_page_remove(mem) |
| 339 | register vm_page_t mem; |
| 340 | { |
| 341 | register queue_t bucket; |
| 342 | int spl; |
| 343 | |
| 344 | VM_PAGE_CHECK(mem); |
| 345 | |
| 346 | if (!mem->tabled) |
| 347 | return; |
| 348 | |
| 349 | /* |
| 350 | * Remove from the object_object/offset hash table |
| 351 | */ |
| 352 | |
| 353 | bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; |
| 354 | spl = splimp(); |
| 355 | simple_lock(&bucket_lock); |
| 356 | queue_remove(bucket, mem, vm_page_t, hashq); |
| 357 | simple_unlock(&bucket_lock); |
| 358 | (void) splx(spl); |
| 359 | |
| 360 | /* |
| 361 | * Now remove from the object's list of backed pages. |
| 362 | */ |
| 363 | |
| 364 | queue_remove(&mem->object->memq, mem, vm_page_t, listq); |
| 365 | |
| 366 | /* |
| 367 | * And show that the object has one fewer resident |
| 368 | * page. |
| 369 | */ |
| 370 | |
| 371 | mem->object->resident_page_count--; |
| 372 | |
| 373 | mem->tabled = FALSE; |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * vm_page_lookup: |
| 378 | * |
| 379 | * Returns the page associated with the object/offset |
| 380 | * pair specified; if none is found, NULL is returned. |
| 381 | * |
| 382 | * The object must be locked. No side effects. |
| 383 | */ |
| 384 | |
| 385 | vm_page_t vm_page_lookup(object, offset) |
| 386 | register vm_object_t object; |
| 387 | register vm_offset_t offset; |
| 388 | { |
| 389 | register vm_page_t mem; |
| 390 | register queue_t bucket; |
| 391 | int spl; |
| 392 | |
| 393 | /* |
| 394 | * Search the hash table for this object/offset pair |
| 395 | */ |
| 396 | |
| 397 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; |
| 398 | |
| 399 | spl = splimp(); |
| 400 | simple_lock(&bucket_lock); |
| 401 | mem = (vm_page_t) queue_first(bucket); |
| 402 | while (!queue_end(bucket, (queue_entry_t) mem)) { |
| 403 | VM_PAGE_CHECK(mem); |
| 404 | if ((mem->object == object) && (mem->offset == offset)) { |
| 405 | simple_unlock(&bucket_lock); |
| 406 | splx(spl); |
| 407 | return(mem); |
| 408 | } |
| 409 | mem = (vm_page_t) queue_next(&mem->hashq); |
| 410 | } |
| 411 | |
| 412 | simple_unlock(&bucket_lock); |
| 413 | splx(spl); |
| 414 | return(NULL); |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * vm_page_rename: |
| 419 | * |
| 420 | * Move the given memory entry from its |
| 421 | * current object to the specified target object/offset. |
| 422 | * |
| 423 | * The object must be locked. |
| 424 | */ |
| 425 | void vm_page_rename(mem, new_object, new_offset) |
| 426 | register vm_page_t mem; |
| 427 | register vm_object_t new_object; |
| 428 | vm_offset_t new_offset; |
| 429 | { |
| 430 | if (mem->object == new_object) |
| 431 | return; |
| 432 | |
| 433 | vm_page_lock_queues(); /* keep page from moving out from |
| 434 | under pageout daemon */ |
| 435 | vm_page_remove(mem); |
| 436 | vm_page_insert(mem, new_object, new_offset); |
| 437 | vm_page_unlock_queues(); |
| 438 | } |
| 439 | |
| 440 | void vm_page_init(mem, object, offset) |
| 441 | vm_page_t mem; |
| 442 | vm_object_t object; |
| 443 | vm_offset_t offset; |
| 444 | { |
| 445 | #ifdef DEBUG |
| 446 | #define vm_page_init(mem, object, offset) {\ |
| 447 | (mem)->busy = TRUE; \ |
| 448 | (mem)->tabled = FALSE; \ |
| 449 | vm_page_insert((mem), (object), (offset)); \ |
| 450 | (mem)->absent = FALSE; \ |
| 451 | (mem)->fictitious = FALSE; \ |
| 452 | (mem)->page_lock = VM_PROT_NONE; \ |
| 453 | (mem)->unlock_request = VM_PROT_NONE; \ |
| 454 | (mem)->laundry = FALSE; \ |
| 455 | (mem)->active = FALSE; \ |
| 456 | (mem)->inactive = FALSE; \ |
| 457 | (mem)->wire_count = 0; \ |
| 458 | (mem)->clean = TRUE; \ |
| 459 | (mem)->copy_on_write = FALSE; \ |
| 460 | (mem)->fake = TRUE; \ |
| 461 | (mem)->pagerowned = FALSE; \ |
| 462 | (mem)->ptpage = FALSE; \ |
| 463 | } |
| 464 | #else |
| 465 | #define vm_page_init(mem, object, offset) {\ |
| 466 | (mem)->busy = TRUE; \ |
| 467 | (mem)->tabled = FALSE; \ |
| 468 | vm_page_insert((mem), (object), (offset)); \ |
| 469 | (mem)->absent = FALSE; \ |
| 470 | (mem)->fictitious = FALSE; \ |
| 471 | (mem)->page_lock = VM_PROT_NONE; \ |
| 472 | (mem)->unlock_request = VM_PROT_NONE; \ |
| 473 | (mem)->laundry = FALSE; \ |
| 474 | (mem)->active = FALSE; \ |
| 475 | (mem)->inactive = FALSE; \ |
| 476 | (mem)->wire_count = 0; \ |
| 477 | (mem)->clean = TRUE; \ |
| 478 | (mem)->copy_on_write = FALSE; \ |
| 479 | (mem)->fake = TRUE; \ |
| 480 | } |
| 481 | #endif |
| 482 | |
| 483 | vm_page_init(mem, object, offset); |
| 484 | } |
| 485 | |
| 486 | /* |
| 487 | * vm_page_alloc: |
| 488 | * |
| 489 | * Allocate and return a memory cell associated |
| 490 | * with this VM object/offset pair. |
| 491 | * |
| 492 | * Object must be locked. |
| 493 | */ |
| 494 | vm_page_t vm_page_alloc(object, offset) |
| 495 | vm_object_t object; |
| 496 | vm_offset_t offset; |
| 497 | { |
| 498 | register vm_page_t mem; |
| 499 | int spl; |
| 500 | |
| 501 | spl = splimp(); /* XXX */ |
| 502 | simple_lock(&vm_page_queue_free_lock); |
| 503 | if (queue_empty(&vm_page_queue_free)) { |
| 504 | simple_unlock(&vm_page_queue_free_lock); |
| 505 | splx(spl); |
| 506 | return(NULL); |
| 507 | } |
| 508 | |
| 509 | queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); |
| 510 | |
| 511 | cnt.v_free_count--; |
| 512 | simple_unlock(&vm_page_queue_free_lock); |
| 513 | splx(spl); |
| 514 | |
| 515 | vm_page_init(mem, object, offset); |
| 516 | |
| 517 | /* |
| 518 | * Decide if we should poke the pageout daemon. |
| 519 | * We do this if the free count is less than the low |
| 520 | * water mark, or if the free count is less than the high |
| 521 | * water mark (but above the low water mark) and the inactive |
| 522 | * count is less than its target. |
| 523 | * |
| 524 | * We don't have the counts locked ... if they change a little, |
| 525 | * it doesn't really matter. |
| 526 | */ |
| 527 | |
| 528 | if ((cnt.v_free_count < cnt.v_free_min) || |
| 529 | ((cnt.v_free_count < cnt.v_free_target) && |
| 530 | (cnt.v_inactive_count < cnt.v_inactive_target))) |
| 531 | thread_wakeup((int)&vm_pages_needed); |
| 532 | return(mem); |
| 533 | } |
| 534 | |
| 535 | /* |
| 536 | * vm_page_free: |
| 537 | * |
| 538 | * Returns the given page to the free list, |
| 539 | * disassociating it with any VM object. |
| 540 | * |
| 541 | * Object and page must be locked prior to entry. |
| 542 | */ |
| 543 | void vm_page_free(mem) |
| 544 | register vm_page_t mem; |
| 545 | { |
| 546 | vm_page_remove(mem); |
| 547 | if (mem->active) { |
| 548 | queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); |
| 549 | mem->active = FALSE; |
| 550 | cnt.v_active_count--; |
| 551 | } |
| 552 | |
| 553 | if (mem->inactive) { |
| 554 | queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); |
| 555 | mem->inactive = FALSE; |
| 556 | cnt.v_inactive_count--; |
| 557 | } |
| 558 | |
| 559 | if (!mem->fictitious) { |
| 560 | int spl; |
| 561 | |
| 562 | spl = splimp(); |
| 563 | simple_lock(&vm_page_queue_free_lock); |
| 564 | queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); |
| 565 | |
| 566 | cnt.v_free_count++; |
| 567 | simple_unlock(&vm_page_queue_free_lock); |
| 568 | splx(spl); |
| 569 | } |
| 570 | } |
| 571 | |
| 572 | /* |
| 573 | * vm_page_wire: |
| 574 | * |
| 575 | * Mark this page as wired down by yet |
| 576 | * another map, removing it from paging queues |
| 577 | * as necessary. |
| 578 | * |
| 579 | * The page queues must be locked. |
| 580 | */ |
| 581 | void vm_page_wire(mem) |
| 582 | register vm_page_t mem; |
| 583 | { |
| 584 | VM_PAGE_CHECK(mem); |
| 585 | |
| 586 | if (mem->wire_count == 0) { |
| 587 | if (mem->active) { |
| 588 | queue_remove(&vm_page_queue_active, mem, vm_page_t, |
| 589 | pageq); |
| 590 | cnt.v_active_count--; |
| 591 | mem->active = FALSE; |
| 592 | } |
| 593 | if (mem->inactive) { |
| 594 | queue_remove(&vm_page_queue_inactive, mem, vm_page_t, |
| 595 | pageq); |
| 596 | cnt.v_inactive_count--; |
| 597 | mem->inactive = FALSE; |
| 598 | } |
| 599 | cnt.v_wire_count++; |
| 600 | } |
| 601 | mem->wire_count++; |
| 602 | } |
| 603 | |
| 604 | /* |
| 605 | * vm_page_unwire: |
| 606 | * |
| 607 | * Release one wiring of this page, potentially |
| 608 | * enabling it to be paged again. |
| 609 | * |
| 610 | * The page queues must be locked. |
| 611 | */ |
| 612 | void vm_page_unwire(mem) |
| 613 | register vm_page_t mem; |
| 614 | { |
| 615 | VM_PAGE_CHECK(mem); |
| 616 | |
| 617 | mem->wire_count--; |
| 618 | if (mem->wire_count == 0) { |
| 619 | queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); |
| 620 | cnt.v_active_count++; |
| 621 | mem->active = TRUE; |
| 622 | cnt.v_wire_count--; |
| 623 | } |
| 624 | } |
| 625 | |
| 626 | /* |
| 627 | * vm_page_deactivate: |
| 628 | * |
| 629 | * Returns the given page to the inactive list, |
| 630 | * indicating that no physical maps have access |
| 631 | * to this page. [Used by the physical mapping system.] |
| 632 | * |
| 633 | * The page queues must be locked. |
| 634 | */ |
| 635 | void vm_page_deactivate(m) |
| 636 | register vm_page_t m; |
| 637 | { |
| 638 | VM_PAGE_CHECK(m); |
| 639 | |
| 640 | /* |
| 641 | * Only move active pages -- ignore locked or already |
| 642 | * inactive ones. |
| 643 | */ |
| 644 | |
| 645 | if (m->active) { |
| 646 | pmap_clear_reference(VM_PAGE_TO_PHYS(m)); |
| 647 | queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); |
| 648 | queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); |
| 649 | m->active = FALSE; |
| 650 | m->inactive = TRUE; |
| 651 | cnt.v_active_count--; |
| 652 | cnt.v_inactive_count++; |
| 653 | if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) |
| 654 | m->clean = FALSE; |
| 655 | m->laundry = !m->clean; |
| 656 | } |
| 657 | } |
| 658 | |
| 659 | /* |
| 660 | * vm_page_activate: |
| 661 | * |
| 662 | * Put the specified page on the active list (if appropriate). |
| 663 | * |
| 664 | * The page queues must be locked. |
| 665 | */ |
| 666 | |
| 667 | void vm_page_activate(m) |
| 668 | register vm_page_t m; |
| 669 | { |
| 670 | VM_PAGE_CHECK(m); |
| 671 | |
| 672 | if (m->inactive) { |
| 673 | queue_remove(&vm_page_queue_inactive, m, vm_page_t, |
| 674 | pageq); |
| 675 | cnt.v_inactive_count--; |
| 676 | m->inactive = FALSE; |
| 677 | } |
| 678 | if (m->wire_count == 0) { |
| 679 | if (m->active) |
| 680 | panic("vm_page_activate: already active"); |
| 681 | |
| 682 | queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); |
| 683 | m->active = TRUE; |
| 684 | cnt.v_active_count++; |
| 685 | } |
| 686 | } |
| 687 | |
| 688 | /* |
| 689 | * vm_page_zero_fill: |
| 690 | * |
| 691 | * Zero-fill the specified page. |
| 692 | * Written as a standard pagein routine, to |
| 693 | * be used by the zero-fill object. |
| 694 | */ |
| 695 | |
| 696 | boolean_t vm_page_zero_fill(m) |
| 697 | vm_page_t m; |
| 698 | { |
| 699 | VM_PAGE_CHECK(m); |
| 700 | |
| 701 | m->clean = 0; |
| 702 | pmap_zero_page(VM_PAGE_TO_PHYS(m)); |
| 703 | return(TRUE); |
| 704 | } |
| 705 | |
| 706 | /* |
| 707 | * vm_page_copy: |
| 708 | * |
| 709 | * Copy one page to another |
| 710 | */ |
| 711 | |
| 712 | void vm_page_copy(src_m, dest_m) |
| 713 | vm_page_t src_m; |
| 714 | vm_page_t dest_m; |
| 715 | { |
| 716 | VM_PAGE_CHECK(src_m); |
| 717 | VM_PAGE_CHECK(dest_m); |
| 718 | |
| 719 | dest_m->clean = 0; |
| 720 | pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); |
| 721 | } |