Commit | Line | Data |
---|---|---|
3a2b8874 WJ |
1 | /* |
2 | * Copyright (c) 1991 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
36 | * @(#)vm_page.c 7.4 (Berkeley) 5/7/91 | |
37 | * | |
38 | * | |
39 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
40 | * All rights reserved. | |
41 | * | |
42 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
43 | * | |
44 | * Permission to use, copy, modify and distribute this software and | |
45 | * its documentation is hereby granted, provided that both the copyright | |
46 | * notice and this permission notice appear in all copies of the | |
47 | * software, derivative works or modified versions, and any portions | |
48 | * thereof, and that both notices appear in supporting documentation. | |
49 | * | |
50 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
51 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
52 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
53 | * | |
54 | * Carnegie Mellon requests users of this software to return to | |
55 | * | |
56 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
57 | * School of Computer Science | |
58 | * Carnegie Mellon University | |
59 | * Pittsburgh PA 15213-3890 | |
60 | * | |
61 | * any improvements or extensions that they make and grant Carnegie the | |
62 | * rights to redistribute these changes. | |
d7251e01 PM |
63 | * |
64 | * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE | |
65 | * -------------------- ----- ---------------------- | |
66 | * CURRENT PATCH LEVEL: 1 00074 | |
67 | * -------------------- ----- ---------------------- | |
68 | * | |
69 | * 22 Jan 93 Paul Mackerras Fixed bug where pages got lost | |
70 | * | |
3a2b8874 WJ |
71 | */ |
72 | ||
73 | /* | |
74 | * Resident memory management module. | |
75 | */ | |
76 | ||
77 | #include "param.h" | |
78 | ||
79 | #include "vm.h" | |
80 | #include "vm_map.h" | |
81 | #include "vm_page.h" | |
82 | #include "vm_pageout.h" | |
83 | ||
84 | /* | |
85 | * Associated with page of user-allocatable memory is a | |
86 | * page structure. | |
87 | */ | |
88 | ||
89 | queue_head_t *vm_page_buckets; /* Array of buckets */ | |
90 | int vm_page_bucket_count = 0; /* How big is array? */ | |
91 | int vm_page_hash_mask; /* Mask for hash function */ | |
92 | simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ | |
93 | ||
94 | vm_size_t page_size = 4096; | |
95 | vm_size_t page_mask = 4095; | |
96 | int page_shift = 12; | |
97 | ||
98 | queue_head_t vm_page_queue_free; | |
99 | queue_head_t vm_page_queue_active; | |
100 | queue_head_t vm_page_queue_inactive; | |
101 | simple_lock_data_t vm_page_queue_lock; | |
102 | simple_lock_data_t vm_page_queue_free_lock; | |
103 | ||
104 | vm_page_t vm_page_array; | |
105 | long first_page; | |
106 | long last_page; | |
107 | vm_offset_t first_phys_addr; | |
108 | vm_offset_t last_phys_addr; | |
109 | ||
110 | int vm_page_free_count; | |
111 | int vm_page_active_count; | |
112 | int vm_page_inactive_count; | |
113 | int vm_page_wire_count; | |
114 | int vm_page_laundry_count; | |
115 | ||
116 | int vm_page_free_target = 0; | |
117 | int vm_page_free_min = 0; | |
118 | int vm_page_inactive_target = 0; | |
119 | int vm_page_free_reserved = 0; | |
120 | ||
121 | /* | |
122 | * vm_set_page_size: | |
123 | * | |
124 | * Sets the page size, perhaps based upon the memory | |
125 | * size. Must be called before any use of page-size | |
126 | * dependent functions. | |
127 | * | |
128 | * Sets page_shift and page_mask from page_size. | |
129 | */ | |
130 | void vm_set_page_size() | |
131 | { | |
132 | page_mask = page_size - 1; | |
133 | ||
134 | if ((page_mask & page_size) != 0) | |
135 | panic("vm_set_page_size: page size not a power of two"); | |
136 | ||
137 | for (page_shift = 0; ; page_shift++) | |
138 | if ((1 << page_shift) == page_size) | |
139 | break; | |
140 | } | |
141 | ||
142 | ||
143 | /* | |
144 | * vm_page_startup: | |
145 | * | |
146 | * Initializes the resident memory module. | |
147 | * | |
148 | * Allocates memory for the page cells, and | |
149 | * for the object/offset-to-page hash table headers. | |
150 | * Each page cell is initialized and placed on the free list. | |
151 | */ | |
152 | vm_offset_t vm_page_startup(start, end, vaddr) | |
153 | register vm_offset_t start; | |
154 | vm_offset_t end; | |
155 | register vm_offset_t vaddr; | |
156 | { | |
157 | register vm_offset_t mapped; | |
158 | register vm_page_t m; | |
159 | register queue_t bucket; | |
160 | vm_size_t npages; | |
161 | register vm_offset_t new_start; | |
162 | int i; | |
163 | vm_offset_t pa; | |
164 | ||
165 | extern vm_offset_t kentry_data; | |
166 | extern vm_size_t kentry_data_size; | |
167 | ||
168 | ||
169 | /* | |
170 | * Initialize the locks | |
171 | */ | |
172 | ||
173 | simple_lock_init(&vm_page_queue_free_lock); | |
174 | simple_lock_init(&vm_page_queue_lock); | |
175 | ||
176 | /* | |
177 | * Initialize the queue headers for the free queue, | |
178 | * the active queue and the inactive queue. | |
179 | */ | |
180 | ||
181 | queue_init(&vm_page_queue_free); | |
182 | queue_init(&vm_page_queue_active); | |
183 | queue_init(&vm_page_queue_inactive); | |
184 | ||
185 | /* | |
186 | * Allocate (and initialize) the hash table buckets. | |
187 | * | |
188 | * The number of buckets MUST BE a power of 2, and | |
189 | * the actual value is the next power of 2 greater | |
190 | * than the number of physical pages in the system. | |
191 | * | |
192 | * Note: | |
193 | * This computation can be tweaked if desired. | |
194 | */ | |
195 | ||
196 | vm_page_buckets = (queue_t) vaddr; | |
197 | bucket = vm_page_buckets; | |
198 | if (vm_page_bucket_count == 0) { | |
199 | vm_page_bucket_count = 1; | |
200 | while (vm_page_bucket_count < atop(end - start)) | |
201 | vm_page_bucket_count <<= 1; | |
202 | } | |
203 | ||
204 | vm_page_hash_mask = vm_page_bucket_count - 1; | |
205 | ||
206 | /* | |
207 | * Validate these addresses. | |
208 | */ | |
209 | ||
210 | new_start = round_page(((queue_t)start) + vm_page_bucket_count); | |
211 | mapped = vaddr; | |
212 | vaddr = pmap_map(mapped, start, new_start, | |
213 | VM_PROT_READ|VM_PROT_WRITE); | |
214 | start = new_start; | |
215 | bzero((caddr_t) mapped, vaddr - mapped); | |
216 | mapped = vaddr; | |
217 | ||
218 | for (i = vm_page_bucket_count; i--;) { | |
219 | queue_init(bucket); | |
220 | bucket++; | |
221 | } | |
222 | ||
223 | simple_lock_init(&bucket_lock); | |
224 | ||
225 | /* | |
226 | * round (or truncate) the addresses to our page size. | |
227 | */ | |
228 | ||
229 | end = trunc_page(end); | |
230 | ||
231 | /* | |
232 | * Pre-allocate maps and map entries that cannot be dynamically | |
233 | * allocated via malloc(). The maps include the kernel_map and | |
234 | * kmem_map which must be initialized before malloc() will | |
235 | * work (obviously). Also could include pager maps which would | |
236 | * be allocated before kmeminit. | |
237 | * | |
238 | * Allow some kernel map entries... this should be plenty | |
239 | * since people shouldn't be cluttering up the kernel | |
240 | * map (they should use their own maps). | |
241 | */ | |
242 | ||
243 | kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + | |
244 | MAX_KMAPENT * sizeof(struct vm_map_entry); | |
245 | kentry_data_size = round_page(kentry_data_size); | |
246 | kentry_data = (vm_offset_t) vaddr; | |
247 | vaddr += kentry_data_size; | |
248 | ||
249 | /* | |
250 | * Validate these zone addresses. | |
251 | */ | |
252 | ||
253 | new_start = start + (vaddr - mapped); | |
254 | pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE); | |
255 | bzero((caddr_t) mapped, (vaddr - mapped)); | |
256 | mapped = vaddr; | |
257 | start = new_start; | |
258 | ||
259 | /* | |
260 | * Compute the number of pages of memory that will be | |
261 | * available for use (taking into account the overhead | |
262 | * of a page structure per page). | |
263 | */ | |
264 | ||
265 | vm_page_free_count = npages = | |
266 | (end - start)/(PAGE_SIZE + sizeof(struct vm_page)); | |
267 | ||
268 | /* | |
269 | * Initialize the mem entry structures now, and | |
270 | * put them in the free queue. | |
271 | */ | |
272 | ||
273 | m = vm_page_array = (vm_page_t) vaddr; | |
274 | first_page = start; | |
275 | first_page += npages*sizeof(struct vm_page); | |
276 | first_page = atop(round_page(first_page)); | |
277 | last_page = first_page + npages - 1; | |
278 | ||
279 | first_phys_addr = ptoa(first_page); | |
280 | last_phys_addr = ptoa(last_page) + page_mask; | |
281 | ||
282 | /* | |
283 | * Validate these addresses. | |
284 | */ | |
285 | ||
286 | new_start = start + (round_page(m + npages) - mapped); | |
287 | mapped = pmap_map(mapped, start, new_start, | |
288 | VM_PROT_READ|VM_PROT_WRITE); | |
289 | start = new_start; | |
290 | ||
291 | /* | |
292 | * Clear all of the page structures | |
293 | */ | |
294 | bzero((caddr_t)m, npages * sizeof(*m)); | |
295 | ||
296 | pa = first_phys_addr; | |
297 | while (npages--) { | |
298 | m->copy_on_write = FALSE; | |
299 | m->wanted = FALSE; | |
300 | m->inactive = FALSE; | |
301 | m->active = FALSE; | |
302 | m->busy = FALSE; | |
303 | m->object = NULL; | |
304 | m->phys_addr = pa; | |
305 | queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); | |
306 | m++; | |
307 | pa += PAGE_SIZE; | |
308 | } | |
309 | ||
310 | /* | |
311 | * Initialize vm_pages_needed lock here - don't wait for pageout | |
312 | * daemon XXX | |
313 | */ | |
314 | simple_lock_init(&vm_pages_needed_lock); | |
315 | ||
316 | return(mapped); | |
317 | } | |
318 | ||
319 | /* | |
320 | * vm_page_hash: | |
321 | * | |
322 | * Distributes the object/offset key pair among hash buckets. | |
323 | * | |
324 | * NOTE: This macro depends on vm_page_bucket_count being a power of 2. | |
325 | */ | |
326 | #define vm_page_hash(object, offset) \ | |
327 | (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) | |
328 | ||
329 | /* | |
330 | * vm_page_insert: [ internal use only ] | |
331 | * | |
332 | * Inserts the given mem entry into the object/object-page | |
333 | * table and object list. | |
334 | * | |
335 | * The object and page must be locked. | |
336 | */ | |
337 | ||
338 | void vm_page_insert(mem, object, offset) | |
339 | register vm_page_t mem; | |
340 | register vm_object_t object; | |
341 | register vm_offset_t offset; | |
342 | { | |
343 | register queue_t bucket; | |
344 | int spl; | |
345 | ||
346 | VM_PAGE_CHECK(mem); | |
347 | ||
348 | if (mem->tabled) | |
349 | panic("vm_page_insert: already inserted"); | |
350 | ||
351 | /* | |
352 | * Record the object/offset pair in this page | |
353 | */ | |
354 | ||
355 | mem->object = object; | |
356 | mem->offset = offset; | |
357 | ||
358 | /* | |
359 | * Insert it into the object_object/offset hash table | |
360 | */ | |
361 | ||
362 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
363 | spl = splimp(); | |
364 | simple_lock(&bucket_lock); | |
365 | queue_enter(bucket, mem, vm_page_t, hashq); | |
366 | simple_unlock(&bucket_lock); | |
367 | (void) splx(spl); | |
368 | ||
369 | /* | |
370 | * Now link into the object's list of backed pages. | |
371 | */ | |
372 | ||
373 | queue_enter(&object->memq, mem, vm_page_t, listq); | |
374 | mem->tabled = TRUE; | |
375 | ||
376 | /* | |
377 | * And show that the object has one more resident | |
378 | * page. | |
379 | */ | |
380 | ||
381 | object->resident_page_count++; | |
382 | } | |
383 | ||
384 | /* | |
385 | * vm_page_remove: [ internal use only ] | |
386 | * | |
387 | * Removes the given mem entry from the object/offset-page | |
388 | * table and the object page list. | |
389 | * | |
390 | * The object and page must be locked. | |
391 | */ | |
392 | ||
393 | void vm_page_remove(mem) | |
394 | register vm_page_t mem; | |
395 | { | |
396 | register queue_t bucket; | |
397 | int spl; | |
398 | ||
399 | VM_PAGE_CHECK(mem); | |
400 | ||
401 | if (!mem->tabled) | |
402 | return; | |
403 | ||
404 | /* | |
405 | * Remove from the object_object/offset hash table | |
406 | */ | |
407 | ||
408 | bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; | |
409 | spl = splimp(); | |
410 | simple_lock(&bucket_lock); | |
411 | queue_remove(bucket, mem, vm_page_t, hashq); | |
412 | simple_unlock(&bucket_lock); | |
413 | (void) splx(spl); | |
414 | ||
415 | /* | |
416 | * Now remove from the object's list of backed pages. | |
417 | */ | |
418 | ||
419 | queue_remove(&mem->object->memq, mem, vm_page_t, listq); | |
420 | ||
421 | /* | |
422 | * And show that the object has one fewer resident | |
423 | * page. | |
424 | */ | |
425 | ||
426 | mem->object->resident_page_count--; | |
427 | ||
428 | mem->tabled = FALSE; | |
429 | } | |
430 | ||
431 | /* | |
432 | * vm_page_lookup: | |
433 | * | |
434 | * Returns the page associated with the object/offset | |
435 | * pair specified; if none is found, NULL is returned. | |
436 | * | |
437 | * The object must be locked. No side effects. | |
438 | */ | |
439 | ||
440 | vm_page_t vm_page_lookup(object, offset) | |
441 | register vm_object_t object; | |
442 | register vm_offset_t offset; | |
443 | { | |
444 | register vm_page_t mem; | |
445 | register queue_t bucket; | |
446 | int spl; | |
447 | ||
448 | /* | |
449 | * Search the hash table for this object/offset pair | |
450 | */ | |
451 | ||
452 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
453 | ||
454 | spl = splimp(); | |
455 | simple_lock(&bucket_lock); | |
456 | mem = (vm_page_t) queue_first(bucket); | |
457 | while (!queue_end(bucket, (queue_entry_t) mem)) { | |
458 | VM_PAGE_CHECK(mem); | |
459 | if ((mem->object == object) && (mem->offset == offset)) { | |
460 | simple_unlock(&bucket_lock); | |
461 | splx(spl); | |
462 | return(mem); | |
463 | } | |
464 | mem = (vm_page_t) queue_next(&mem->hashq); | |
465 | } | |
466 | ||
467 | simple_unlock(&bucket_lock); | |
468 | splx(spl); | |
469 | return(NULL); | |
470 | } | |
471 | ||
472 | /* | |
473 | * vm_page_rename: | |
474 | * | |
475 | * Move the given memory entry from its | |
476 | * current object to the specified target object/offset. | |
477 | * | |
478 | * The object must be locked. | |
479 | */ | |
480 | void vm_page_rename(mem, new_object, new_offset) | |
481 | register vm_page_t mem; | |
482 | register vm_object_t new_object; | |
483 | vm_offset_t new_offset; | |
484 | { | |
485 | if (mem->object == new_object) | |
486 | return; | |
487 | ||
488 | vm_page_lock_queues(); /* keep page from moving out from | |
489 | under pageout daemon */ | |
490 | vm_page_remove(mem); | |
491 | vm_page_insert(mem, new_object, new_offset); | |
492 | vm_page_unlock_queues(); | |
493 | } | |
494 | ||
495 | void vm_page_init(mem, object, offset) | |
496 | vm_page_t mem; | |
497 | vm_object_t object; | |
498 | vm_offset_t offset; | |
499 | { | |
500 | #ifdef DEBUG | |
501 | #define vm_page_init(mem, object, offset) {\ | |
502 | (mem)->busy = TRUE; \ | |
503 | (mem)->tabled = FALSE; \ | |
504 | vm_page_insert((mem), (object), (offset)); \ | |
505 | (mem)->absent = FALSE; \ | |
506 | (mem)->fictitious = FALSE; \ | |
507 | (mem)->page_lock = VM_PROT_NONE; \ | |
508 | (mem)->unlock_request = VM_PROT_NONE; \ | |
509 | (mem)->laundry = FALSE; \ | |
510 | (mem)->active = FALSE; \ | |
511 | (mem)->inactive = FALSE; \ | |
512 | (mem)->wire_count = 0; \ | |
513 | (mem)->clean = TRUE; \ | |
514 | (mem)->copy_on_write = FALSE; \ | |
515 | (mem)->fake = TRUE; \ | |
516 | (mem)->pagerowned = FALSE; \ | |
517 | (mem)->ptpage = FALSE; \ | |
518 | } | |
519 | #else | |
520 | #define vm_page_init(mem, object, offset) {\ | |
521 | (mem)->busy = TRUE; \ | |
522 | (mem)->tabled = FALSE; \ | |
523 | vm_page_insert((mem), (object), (offset)); \ | |
524 | (mem)->absent = FALSE; \ | |
525 | (mem)->fictitious = FALSE; \ | |
526 | (mem)->page_lock = VM_PROT_NONE; \ | |
527 | (mem)->unlock_request = VM_PROT_NONE; \ | |
528 | (mem)->laundry = FALSE; \ | |
529 | (mem)->active = FALSE; \ | |
530 | (mem)->inactive = FALSE; \ | |
531 | (mem)->wire_count = 0; \ | |
532 | (mem)->clean = TRUE; \ | |
533 | (mem)->copy_on_write = FALSE; \ | |
534 | (mem)->fake = TRUE; \ | |
535 | } | |
536 | #endif | |
537 | ||
538 | vm_page_init(mem, object, offset); | |
539 | } | |
540 | ||
541 | /* | |
542 | * vm_page_alloc: | |
543 | * | |
544 | * Allocate and return a memory cell associated | |
545 | * with this VM object/offset pair. | |
546 | * | |
547 | * Object must be locked. | |
548 | */ | |
549 | vm_page_t vm_page_alloc(object, offset) | |
550 | vm_object_t object; | |
551 | vm_offset_t offset; | |
552 | { | |
553 | register vm_page_t mem; | |
554 | int spl; | |
555 | ||
556 | spl = splimp(); /* XXX */ | |
557 | simple_lock(&vm_page_queue_free_lock); | |
558 | if (queue_empty(&vm_page_queue_free)) { | |
559 | simple_unlock(&vm_page_queue_free_lock); | |
560 | splx(spl); | |
561 | return(NULL); | |
562 | } | |
563 | ||
564 | queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); | |
565 | ||
566 | vm_page_free_count--; | |
567 | simple_unlock(&vm_page_queue_free_lock); | |
568 | splx(spl); | |
569 | ||
570 | vm_page_init(mem, object, offset); | |
571 | ||
572 | /* | |
573 | * Decide if we should poke the pageout daemon. | |
574 | * We do this if the free count is less than the low | |
575 | * water mark, or if the free count is less than the high | |
576 | * water mark (but above the low water mark) and the inactive | |
577 | * count is less than its target. | |
578 | * | |
579 | * We don't have the counts locked ... if they change a little, | |
580 | * it doesn't really matter. | |
581 | */ | |
582 | ||
583 | if ((vm_page_free_count < vm_page_free_min) || | |
584 | ((vm_page_free_count < vm_page_free_target) && | |
585 | (vm_page_inactive_count < vm_page_inactive_target))) | |
586 | thread_wakeup(&vm_pages_needed); | |
587 | return(mem); | |
588 | } | |
589 | ||
590 | /* | |
591 | * vm_page_free: | |
592 | * | |
593 | * Returns the given page to the free list, | |
594 | * disassociating it with any VM object. | |
595 | * | |
596 | * Object and page must be locked prior to entry. | |
597 | */ | |
598 | void vm_page_free(mem) | |
599 | register vm_page_t mem; | |
600 | { | |
601 | vm_page_remove(mem); | |
602 | if (mem->active) { | |
603 | queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); | |
604 | mem->active = FALSE; | |
605 | vm_page_active_count--; | |
606 | } | |
607 | ||
608 | if (mem->inactive) { | |
609 | queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); | |
610 | mem->inactive = FALSE; | |
611 | vm_page_inactive_count--; | |
612 | } | |
613 | ||
614 | if (!mem->fictitious) { | |
615 | int spl; | |
616 | ||
617 | spl = splimp(); | |
618 | simple_lock(&vm_page_queue_free_lock); | |
619 | queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); | |
620 | ||
621 | vm_page_free_count++; | |
622 | simple_unlock(&vm_page_queue_free_lock); | |
623 | splx(spl); | |
624 | } | |
625 | } | |
626 | ||
627 | /* | |
628 | * vm_page_wire: | |
629 | * | |
630 | * Mark this page as wired down by yet | |
631 | * another map, removing it from paging queues | |
632 | * as necessary. | |
633 | * | |
634 | * The page queues must be locked. | |
635 | */ | |
636 | void vm_page_wire(mem) | |
637 | register vm_page_t mem; | |
638 | { | |
639 | VM_PAGE_CHECK(mem); | |
640 | ||
641 | if (mem->wire_count == 0) { | |
642 | if (mem->active) { | |
643 | queue_remove(&vm_page_queue_active, mem, vm_page_t, | |
644 | pageq); | |
645 | vm_page_active_count--; | |
646 | mem->active = FALSE; | |
647 | } | |
648 | if (mem->inactive) { | |
649 | queue_remove(&vm_page_queue_inactive, mem, vm_page_t, | |
650 | pageq); | |
651 | vm_page_inactive_count--; | |
652 | mem->inactive = FALSE; | |
653 | } | |
654 | vm_page_wire_count++; | |
655 | } | |
656 | mem->wire_count++; | |
657 | } | |
658 | ||
659 | /* | |
660 | * vm_page_unwire: | |
661 | * | |
662 | * Release one wiring of this page, potentially | |
663 | * enabling it to be paged again. | |
664 | * | |
665 | * The page queues must be locked. | |
666 | */ | |
667 | void vm_page_unwire(mem) | |
668 | register vm_page_t mem; | |
669 | { | |
670 | VM_PAGE_CHECK(mem); | |
671 | ||
672 | mem->wire_count--; | |
673 | if (mem->wire_count == 0) { | |
674 | queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); | |
675 | vm_page_active_count++; | |
676 | mem->active = TRUE; | |
677 | vm_page_wire_count--; | |
678 | } | |
679 | } | |
680 | ||
681 | /* | |
682 | * vm_page_deactivate: | |
683 | * | |
684 | * Returns the given page to the inactive list, | |
685 | * indicating that no physical maps have access | |
686 | * to this page. [Used by the physical mapping system.] | |
687 | * | |
688 | * The page queues must be locked. | |
689 | */ | |
690 | void vm_page_deactivate(m) | |
691 | register vm_page_t m; | |
692 | { | |
693 | VM_PAGE_CHECK(m); | |
694 | ||
695 | /* | |
696 | * Only move active pages -- ignore locked or already | |
697 | * inactive ones. | |
d7251e01 PM |
698 | * |
699 | * XXX: sometimes we get pages which aren't wired down | |
700 | * or on any queue - we need to put them on the inactive | |
701 | * queue also, otherwise we lose track of them. | |
702 | * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. | |
3a2b8874 WJ |
703 | */ |
704 | ||
d7251e01 | 705 | if (!m->inactive && m->wire_count == 0) { |
3a2b8874 | 706 | pmap_clear_reference(VM_PAGE_TO_PHYS(m)); |
d7251e01 PM |
707 | if (m->active) { |
708 | queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); | |
709 | m->active = FALSE; | |
710 | vm_page_active_count--; | |
711 | } | |
3a2b8874 | 712 | queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); |
3a2b8874 | 713 | m->inactive = TRUE; |
3a2b8874 WJ |
714 | vm_page_inactive_count++; |
715 | if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) | |
716 | m->clean = FALSE; | |
717 | m->laundry = !m->clean; | |
718 | } | |
719 | } | |
720 | ||
721 | /* | |
722 | * vm_page_activate: | |
723 | * | |
724 | * Put the specified page on the active list (if appropriate). | |
725 | * | |
726 | * The page queues must be locked. | |
727 | */ | |
728 | ||
729 | void vm_page_activate(m) | |
730 | register vm_page_t m; | |
731 | { | |
732 | VM_PAGE_CHECK(m); | |
733 | ||
734 | if (m->inactive) { | |
735 | queue_remove(&vm_page_queue_inactive, m, vm_page_t, | |
736 | pageq); | |
737 | vm_page_inactive_count--; | |
738 | m->inactive = FALSE; | |
739 | } | |
740 | if (m->wire_count == 0) { | |
741 | if (m->active) | |
742 | panic("vm_page_activate: already active"); | |
743 | ||
744 | queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); | |
745 | m->active = TRUE; | |
746 | vm_page_active_count++; | |
747 | } | |
748 | } | |
749 | ||
750 | /* | |
751 | * vm_page_zero_fill: | |
752 | * | |
753 | * Zero-fill the specified page. | |
754 | * Written as a standard pagein routine, to | |
755 | * be used by the zero-fill object. | |
756 | */ | |
757 | ||
758 | boolean_t vm_page_zero_fill(m) | |
759 | vm_page_t m; | |
760 | { | |
761 | VM_PAGE_CHECK(m); | |
762 | ||
763 | pmap_zero_page(VM_PAGE_TO_PHYS(m)); | |
764 | return(TRUE); | |
765 | } | |
766 | ||
767 | /* | |
768 | * vm_page_copy: | |
769 | * | |
770 | * Copy one page to another | |
771 | */ | |
772 | ||
773 | void vm_page_copy(src_m, dest_m) | |
774 | vm_page_t src_m; | |
775 | vm_page_t dest_m; | |
776 | { | |
777 | VM_PAGE_CHECK(src_m); | |
778 | VM_PAGE_CHECK(dest_m); | |
779 | ||
780 | pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); | |
781 | } |