Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1991 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
36 | * @(#)vm_page.c 7.4 (Berkeley) 5/7/91 | |
37 | * | |
38 | * | |
39 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
40 | * All rights reserved. | |
41 | * | |
42 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
43 | * | |
44 | * Permission to use, copy, modify and distribute this software and | |
45 | * its documentation is hereby granted, provided that both the copyright | |
46 | * notice and this permission notice appear in all copies of the | |
47 | * software, derivative works or modified versions, and any portions | |
48 | * thereof, and that both notices appear in supporting documentation. | |
49 | * | |
50 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
51 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
52 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
53 | * | |
54 | * Carnegie Mellon requests users of this software to return to | |
55 | * | |
56 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
57 | * School of Computer Science | |
58 | * Carnegie Mellon University | |
59 | * Pittsburgh PA 15213-3890 | |
60 | * | |
61 | * any improvements or extensions that they make and grant Carnegie the | |
62 | * rights to redistribute these changes. | |
63 | * | |
64 | * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE | |
65 | * -------------------- ----- ---------------------- | |
66 | * CURRENT PATCH LEVEL: 3 00147 | |
67 | * -------------------- ----- ---------------------- | |
68 | * | |
69 | * 22 Jan 93 Paul Mackerras Fixed bug where pages got lost | |
70 | * 08 Apr 93 Yuval Yarom Several VM system fixes | |
71 | * 20 Apr 93 Paul Kranenburg Detect and prevent kernel deadlocks in | |
72 | * VM system | |
73 | * | |
74 | */ | |
75 | ||
76 | /* | |
77 | * Resident memory management module. | |
78 | */ | |
79 | ||
80 | #include "param.h" | |
81 | ||
82 | #include "vm.h" | |
83 | #include "vm_map.h" | |
84 | #include "vm_page.h" | |
85 | #include "vm_pageout.h" | |
86 | ||
87 | /* | |
88 | * Associated with page of user-allocatable memory is a | |
89 | * page structure. | |
90 | */ | |
91 | ||
92 | queue_head_t *vm_page_buckets; /* Array of buckets */ | |
93 | int vm_page_bucket_count = 0; /* How big is array? */ | |
94 | int vm_page_hash_mask; /* Mask for hash function */ | |
95 | simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ | |
96 | ||
97 | vm_size_t page_size = 4096; | |
98 | vm_size_t page_mask = 4095; | |
99 | int page_shift = 12; | |
100 | ||
101 | queue_head_t vm_page_queue_free; | |
102 | queue_head_t vm_page_queue_active; | |
103 | queue_head_t vm_page_queue_inactive; | |
104 | simple_lock_data_t vm_page_queue_lock; | |
105 | simple_lock_data_t vm_page_queue_free_lock; | |
106 | ||
107 | vm_page_t vm_page_array; | |
108 | long first_page; | |
109 | long last_page; | |
110 | vm_offset_t first_phys_addr; | |
111 | vm_offset_t last_phys_addr; | |
112 | ||
113 | int vm_page_free_count; | |
114 | int vm_page_active_count; | |
115 | int vm_page_inactive_count; | |
116 | int vm_page_wire_count; | |
117 | int vm_page_laundry_count; | |
118 | ||
119 | int vm_page_free_target = 0; | |
120 | int vm_page_free_min = 0; | |
121 | int vm_page_inactive_target = 0; | |
122 | int vm_page_free_reserved = 0; | |
123 | ||
124 | /* | |
125 | * vm_set_page_size: | |
126 | * | |
127 | * Sets the page size, perhaps based upon the memory | |
128 | * size. Must be called before any use of page-size | |
129 | * dependent functions. | |
130 | * | |
131 | * Sets page_shift and page_mask from page_size. | |
132 | */ | |
133 | void vm_set_page_size() | |
134 | { | |
135 | page_mask = page_size - 1; | |
136 | ||
137 | if ((page_mask & page_size) != 0) | |
138 | panic("vm_set_page_size: page size not a power of two"); | |
139 | ||
140 | for (page_shift = 0; ; page_shift++) | |
141 | if ((1 << page_shift) == page_size) | |
142 | break; | |
143 | } | |
144 | ||
145 | ||
146 | /* | |
147 | * vm_page_startup: | |
148 | * | |
149 | * Initializes the resident memory module. | |
150 | * | |
151 | * Allocates memory for the page cells, and | |
152 | * for the object/offset-to-page hash table headers. | |
153 | * Each page cell is initialized and placed on the free list. | |
154 | */ | |
155 | vm_offset_t vm_page_startup(start, end, vaddr) | |
156 | register vm_offset_t start; | |
157 | vm_offset_t end; | |
158 | register vm_offset_t vaddr; | |
159 | { | |
160 | register vm_offset_t mapped; | |
161 | register vm_page_t m; | |
162 | register queue_t bucket; | |
163 | vm_size_t npages; | |
164 | register vm_offset_t new_start; | |
165 | int i; | |
166 | vm_offset_t pa; | |
167 | ||
168 | extern vm_offset_t kentry_data; | |
169 | extern vm_size_t kentry_data_size; | |
170 | ||
171 | ||
172 | /* | |
173 | * Initialize the locks | |
174 | */ | |
175 | ||
176 | simple_lock_init(&vm_page_queue_free_lock); | |
177 | simple_lock_init(&vm_page_queue_lock); | |
178 | ||
179 | /* | |
180 | * Initialize the queue headers for the free queue, | |
181 | * the active queue and the inactive queue. | |
182 | */ | |
183 | ||
184 | queue_init(&vm_page_queue_free); | |
185 | queue_init(&vm_page_queue_active); | |
186 | queue_init(&vm_page_queue_inactive); | |
187 | ||
188 | /* | |
189 | * Allocate (and initialize) the hash table buckets. | |
190 | * | |
191 | * The number of buckets MUST BE a power of 2, and | |
192 | * the actual value is the next power of 2 greater | |
193 | * than the number of physical pages in the system. | |
194 | * | |
195 | * Note: | |
196 | * This computation can be tweaked if desired. | |
197 | */ | |
198 | ||
199 | vm_page_buckets = (queue_t) vaddr; | |
200 | bucket = vm_page_buckets; | |
201 | if (vm_page_bucket_count == 0) { | |
202 | vm_page_bucket_count = 1; | |
203 | while (vm_page_bucket_count < atop(end - start)) | |
204 | vm_page_bucket_count <<= 1; | |
205 | } | |
206 | ||
207 | vm_page_hash_mask = vm_page_bucket_count - 1; | |
208 | ||
209 | /* | |
210 | * Validate these addresses. | |
211 | */ | |
212 | ||
213 | new_start = round_page(((queue_t)start) + vm_page_bucket_count); | |
214 | mapped = vaddr; | |
215 | vaddr = pmap_map(mapped, start, new_start, | |
216 | VM_PROT_READ|VM_PROT_WRITE); | |
217 | start = new_start; | |
218 | bzero((caddr_t) mapped, vaddr - mapped); | |
219 | mapped = vaddr; | |
220 | ||
221 | for (i = vm_page_bucket_count; i--;) { | |
222 | queue_init(bucket); | |
223 | bucket++; | |
224 | } | |
225 | ||
226 | simple_lock_init(&bucket_lock); | |
227 | ||
228 | /* | |
229 | * round (or truncate) the addresses to our page size. | |
230 | */ | |
231 | ||
232 | end = trunc_page(end); | |
233 | ||
234 | /* | |
235 | * Pre-allocate maps and map entries that cannot be dynamically | |
236 | * allocated via malloc(). The maps include the kernel_map and | |
237 | * kmem_map which must be initialized before malloc() will | |
238 | * work (obviously). Also could include pager maps which would | |
239 | * be allocated before kmeminit. | |
240 | * | |
241 | * Allow some kernel map entries... this should be plenty | |
242 | * since people shouldn't be cluttering up the kernel | |
243 | * map (they should use their own maps). | |
244 | */ | |
245 | ||
246 | kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + | |
247 | MAX_KMAPENT * sizeof(struct vm_map_entry); | |
248 | kentry_data_size = round_page(kentry_data_size); | |
249 | kentry_data = (vm_offset_t) vaddr; | |
250 | vaddr += kentry_data_size; | |
251 | ||
252 | /* | |
253 | * Validate these zone addresses. | |
254 | */ | |
255 | ||
256 | new_start = start + (vaddr - mapped); | |
257 | pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE); | |
258 | bzero((caddr_t) mapped, (vaddr - mapped)); | |
259 | mapped = vaddr; | |
260 | start = new_start; | |
261 | ||
262 | /* | |
263 | * Compute the number of pages of memory that will be | |
264 | * available for use (taking into account the overhead | |
265 | * of a page structure per page). | |
266 | */ | |
267 | ||
268 | vm_page_free_count = npages = | |
269 | (end - start + sizeof(struct vm_page))/(PAGE_SIZE + sizeof(struct vm_page)); | |
270 | ||
271 | /* | |
272 | * Initialize the mem entry structures now, and | |
273 | * put them in the free queue. | |
274 | */ | |
275 | ||
276 | m = vm_page_array = (vm_page_t) vaddr; | |
277 | first_page = start; | |
278 | first_page += npages*sizeof(struct vm_page); | |
279 | first_page = atop(round_page(first_page)); | |
280 | last_page = first_page + npages - 1; | |
281 | ||
282 | first_phys_addr = ptoa(first_page); | |
283 | last_phys_addr = ptoa(last_page) + page_mask; | |
284 | ||
285 | /* | |
286 | * Validate these addresses. | |
287 | */ | |
288 | ||
289 | new_start = start + (round_page(m + npages) - mapped); | |
290 | mapped = pmap_map(mapped, start, new_start, | |
291 | VM_PROT_READ|VM_PROT_WRITE); | |
292 | start = new_start; | |
293 | ||
294 | /* | |
295 | * Clear all of the page structures | |
296 | */ | |
297 | bzero((caddr_t)m, npages * sizeof(*m)); | |
298 | ||
299 | pa = first_phys_addr; | |
300 | while (npages--) { | |
301 | m->copy_on_write = FALSE; | |
302 | m->wanted = FALSE; | |
303 | m->inactive = FALSE; | |
304 | m->active = FALSE; | |
305 | m->busy = FALSE; | |
306 | m->object = NULL; | |
307 | m->phys_addr = pa; | |
308 | queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); | |
309 | m++; | |
310 | pa += PAGE_SIZE; | |
311 | } | |
312 | ||
313 | /* | |
314 | * Initialize vm_pages_needed lock here - don't wait for pageout | |
315 | * daemon XXX | |
316 | */ | |
317 | simple_lock_init(&vm_pages_needed_lock); | |
318 | ||
319 | return(mapped); | |
320 | } | |
321 | ||
322 | /* | |
323 | * vm_page_hash: | |
324 | * | |
325 | * Distributes the object/offset key pair among hash buckets. | |
326 | * | |
327 | * NOTE: This macro depends on vm_page_bucket_count being a power of 2. | |
328 | */ | |
329 | #define vm_page_hash(object, offset) \ | |
330 | (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) | |
331 | ||
332 | /* | |
333 | * vm_page_insert: [ internal use only ] | |
334 | * | |
335 | * Inserts the given mem entry into the object/object-page | |
336 | * table and object list. | |
337 | * | |
338 | * The object and page must be locked. | |
339 | */ | |
340 | ||
341 | void vm_page_insert(mem, object, offset) | |
342 | register vm_page_t mem; | |
343 | register vm_object_t object; | |
344 | register vm_offset_t offset; | |
345 | { | |
346 | register queue_t bucket; | |
347 | int spl; | |
348 | ||
349 | VM_PAGE_CHECK(mem); | |
350 | ||
351 | if (mem->tabled) | |
352 | panic("vm_page_insert: already inserted"); | |
353 | ||
354 | /* | |
355 | * Record the object/offset pair in this page | |
356 | */ | |
357 | ||
358 | mem->object = object; | |
359 | mem->offset = offset; | |
360 | ||
361 | /* | |
362 | * Insert it into the object_object/offset hash table | |
363 | */ | |
364 | ||
365 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
366 | spl = splimp(); | |
367 | simple_lock(&bucket_lock); | |
368 | queue_enter(bucket, mem, vm_page_t, hashq); | |
369 | simple_unlock(&bucket_lock); | |
370 | (void) splx(spl); | |
371 | ||
372 | /* | |
373 | * Now link into the object's list of backed pages. | |
374 | */ | |
375 | ||
376 | queue_enter(&object->memq, mem, vm_page_t, listq); | |
377 | mem->tabled = TRUE; | |
378 | ||
379 | /* | |
380 | * And show that the object has one more resident | |
381 | * page. | |
382 | */ | |
383 | ||
384 | object->resident_page_count++; | |
385 | } | |
386 | ||
387 | /* | |
388 | * vm_page_remove: [ internal use only ] | |
389 | * | |
390 | * Removes the given mem entry from the object/offset-page | |
391 | * table and the object page list. | |
392 | * | |
393 | * The object and page must be locked. | |
394 | */ | |
395 | ||
396 | void vm_page_remove(mem) | |
397 | register vm_page_t mem; | |
398 | { | |
399 | register queue_t bucket; | |
400 | int spl; | |
401 | ||
402 | VM_PAGE_CHECK(mem); | |
403 | ||
404 | if (!mem->tabled) | |
405 | return; | |
406 | ||
407 | /* | |
408 | * Remove from the object_object/offset hash table | |
409 | */ | |
410 | ||
411 | bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; | |
412 | spl = splimp(); | |
413 | simple_lock(&bucket_lock); | |
414 | queue_remove(bucket, mem, vm_page_t, hashq); | |
415 | simple_unlock(&bucket_lock); | |
416 | (void) splx(spl); | |
417 | ||
418 | /* | |
419 | * Now remove from the object's list of backed pages. | |
420 | */ | |
421 | ||
422 | queue_remove(&mem->object->memq, mem, vm_page_t, listq); | |
423 | ||
424 | /* | |
425 | * And show that the object has one fewer resident | |
426 | * page. | |
427 | */ | |
428 | ||
429 | mem->object->resident_page_count--; | |
430 | ||
431 | mem->tabled = FALSE; | |
432 | } | |
433 | ||
434 | /* | |
435 | * vm_page_lookup: | |
436 | * | |
437 | * Returns the page associated with the object/offset | |
438 | * pair specified; if none is found, NULL is returned. | |
439 | * | |
440 | * The object must be locked. No side effects. | |
441 | */ | |
442 | ||
443 | vm_page_t vm_page_lookup(object, offset) | |
444 | register vm_object_t object; | |
445 | register vm_offset_t offset; | |
446 | { | |
447 | register vm_page_t mem; | |
448 | register queue_t bucket; | |
449 | int spl; | |
450 | ||
451 | /* | |
452 | * Search the hash table for this object/offset pair | |
453 | */ | |
454 | ||
455 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
456 | ||
457 | spl = splimp(); | |
458 | simple_lock(&bucket_lock); | |
459 | mem = (vm_page_t) queue_first(bucket); | |
460 | while (!queue_end(bucket, (queue_entry_t) mem)) { | |
461 | VM_PAGE_CHECK(mem); | |
462 | if ((mem->object == object) && (mem->offset == offset)) { | |
463 | simple_unlock(&bucket_lock); | |
464 | splx(spl); | |
465 | return(mem); | |
466 | } | |
467 | mem = (vm_page_t) queue_next(&mem->hashq); | |
468 | } | |
469 | ||
470 | simple_unlock(&bucket_lock); | |
471 | splx(spl); | |
472 | return(NULL); | |
473 | } | |
474 | ||
475 | /* | |
476 | * vm_page_rename: | |
477 | * | |
478 | * Move the given memory entry from its | |
479 | * current object to the specified target object/offset. | |
480 | * | |
481 | * The object must be locked. | |
482 | */ | |
483 | void vm_page_rename(mem, new_object, new_offset) | |
484 | register vm_page_t mem; | |
485 | register vm_object_t new_object; | |
486 | vm_offset_t new_offset; | |
487 | { | |
488 | if (mem->object == new_object) | |
489 | return; | |
490 | ||
491 | vm_page_lock_queues(); /* keep page from moving out from | |
492 | under pageout daemon */ | |
493 | vm_page_remove(mem); | |
494 | vm_page_insert(mem, new_object, new_offset); | |
495 | vm_page_unlock_queues(); | |
496 | } | |
497 | ||
498 | void vm_page_init(mem, object, offset) | |
499 | vm_page_t mem; | |
500 | vm_object_t object; | |
501 | vm_offset_t offset; | |
502 | { | |
503 | #ifdef DEBUG | |
504 | #define vm_page_init(mem, object, offset) {\ | |
505 | (mem)->busy = TRUE; \ | |
506 | (mem)->tabled = FALSE; \ | |
507 | vm_page_insert((mem), (object), (offset)); \ | |
508 | (mem)->absent = FALSE; \ | |
509 | (mem)->fictitious = FALSE; \ | |
510 | (mem)->page_lock = VM_PROT_NONE; \ | |
511 | (mem)->unlock_request = VM_PROT_NONE; \ | |
512 | (mem)->laundry = FALSE; \ | |
513 | (mem)->active = FALSE; \ | |
514 | (mem)->inactive = FALSE; \ | |
515 | (mem)->wire_count = 0; \ | |
516 | (mem)->clean = TRUE; \ | |
517 | (mem)->copy_on_write = FALSE; \ | |
518 | (mem)->fake = TRUE; \ | |
519 | (mem)->pagerowned = FALSE; \ | |
520 | (mem)->ptpage = FALSE; \ | |
521 | } | |
522 | #else | |
523 | #define vm_page_init(mem, object, offset) {\ | |
524 | (mem)->busy = TRUE; \ | |
525 | (mem)->tabled = FALSE; \ | |
526 | vm_page_insert((mem), (object), (offset)); \ | |
527 | (mem)->absent = FALSE; \ | |
528 | (mem)->fictitious = FALSE; \ | |
529 | (mem)->page_lock = VM_PROT_NONE; \ | |
530 | (mem)->unlock_request = VM_PROT_NONE; \ | |
531 | (mem)->laundry = FALSE; \ | |
532 | (mem)->active = FALSE; \ | |
533 | (mem)->inactive = FALSE; \ | |
534 | (mem)->wire_count = 0; \ | |
535 | (mem)->clean = TRUE; \ | |
536 | (mem)->copy_on_write = FALSE; \ | |
537 | (mem)->fake = TRUE; \ | |
538 | } | |
539 | #endif | |
540 | ||
541 | vm_page_init(mem, object, offset); | |
542 | } | |
543 | ||
544 | /* | |
545 | * vm_page_alloc: | |
546 | * | |
547 | * Allocate and return a memory cell associated | |
548 | * with this VM object/offset pair. | |
549 | * | |
550 | * Object must be locked. | |
551 | */ | |
552 | vm_page_t vm_page_alloc(object, offset) | |
553 | vm_object_t object; | |
554 | vm_offset_t offset; | |
555 | { | |
556 | register vm_page_t mem; | |
557 | int spl; | |
558 | ||
559 | spl = splimp(); /* XXX */ | |
560 | simple_lock(&vm_page_queue_free_lock); | |
561 | if ( object != kernel_object && | |
562 | object != kmem_object && | |
563 | vm_page_free_count <= vm_page_free_reserved) { | |
564 | ||
565 | simple_unlock(&vm_page_queue_free_lock); | |
566 | splx(spl); | |
567 | return(NULL); | |
568 | } | |
569 | if (queue_empty(&vm_page_queue_free)) { | |
570 | simple_unlock(&vm_page_queue_free_lock); | |
571 | splx(spl); | |
572 | return(NULL); | |
573 | } | |
574 | ||
575 | queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); | |
576 | ||
577 | vm_page_free_count--; | |
578 | simple_unlock(&vm_page_queue_free_lock); | |
579 | splx(spl); | |
580 | ||
581 | vm_page_init(mem, object, offset); | |
582 | ||
583 | /* | |
584 | * Decide if we should poke the pageout daemon. | |
585 | * We do this if the free count is less than the low | |
586 | * water mark, or if the free count is less than the high | |
587 | * water mark (but above the low water mark) and the inactive | |
588 | * count is less than its target. | |
589 | * | |
590 | * We don't have the counts locked ... if they change a little, | |
591 | * it doesn't really matter. | |
592 | */ | |
593 | ||
594 | if ((vm_page_free_count < vm_page_free_min) || | |
595 | ((vm_page_free_count < vm_page_free_target) && | |
596 | (vm_page_inactive_count < vm_page_inactive_target))) | |
597 | thread_wakeup(&vm_pages_needed); | |
598 | return(mem); | |
599 | } | |
600 | ||
601 | /* | |
602 | * vm_page_free: | |
603 | * | |
604 | * Returns the given page to the free list, | |
605 | * disassociating it with any VM object. | |
606 | * | |
607 | * Object and page must be locked prior to entry. | |
608 | */ | |
609 | void vm_page_free(mem) | |
610 | register vm_page_t mem; | |
611 | { | |
612 | vm_page_remove(mem); | |
613 | if (mem->active) { | |
614 | queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); | |
615 | mem->active = FALSE; | |
616 | vm_page_active_count--; | |
617 | } | |
618 | ||
619 | if (mem->inactive) { | |
620 | queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); | |
621 | mem->inactive = FALSE; | |
622 | vm_page_inactive_count--; | |
623 | } | |
624 | ||
625 | if (!mem->fictitious) { | |
626 | int spl; | |
627 | ||
628 | spl = splimp(); | |
629 | simple_lock(&vm_page_queue_free_lock); | |
630 | queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); | |
631 | ||
632 | vm_page_free_count++; | |
633 | simple_unlock(&vm_page_queue_free_lock); | |
634 | splx(spl); | |
635 | } | |
636 | } | |
637 | ||
638 | /* | |
639 | * vm_page_wire: | |
640 | * | |
641 | * Mark this page as wired down by yet | |
642 | * another map, removing it from paging queues | |
643 | * as necessary. | |
644 | * | |
645 | * The page queues must be locked. | |
646 | */ | |
647 | void vm_page_wire(mem) | |
648 | register vm_page_t mem; | |
649 | { | |
650 | VM_PAGE_CHECK(mem); | |
651 | ||
652 | if (mem->wire_count == 0) { | |
653 | if (mem->active) { | |
654 | queue_remove(&vm_page_queue_active, mem, vm_page_t, | |
655 | pageq); | |
656 | vm_page_active_count--; | |
657 | mem->active = FALSE; | |
658 | } | |
659 | if (mem->inactive) { | |
660 | queue_remove(&vm_page_queue_inactive, mem, vm_page_t, | |
661 | pageq); | |
662 | vm_page_inactive_count--; | |
663 | mem->inactive = FALSE; | |
664 | } | |
665 | vm_page_wire_count++; | |
666 | } | |
667 | mem->wire_count++; | |
668 | } | |
669 | ||
670 | /* | |
671 | * vm_page_unwire: | |
672 | * | |
673 | * Release one wiring of this page, potentially | |
674 | * enabling it to be paged again. | |
675 | * | |
676 | * The page queues must be locked. | |
677 | */ | |
678 | void vm_page_unwire(mem) | |
679 | register vm_page_t mem; | |
680 | { | |
681 | VM_PAGE_CHECK(mem); | |
682 | ||
683 | mem->wire_count--; | |
684 | if (mem->wire_count == 0) { | |
685 | queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); | |
686 | vm_page_active_count++; | |
687 | mem->active = TRUE; | |
688 | vm_page_wire_count--; | |
689 | } | |
690 | } | |
691 | ||
692 | /* | |
693 | * vm_page_deactivate: | |
694 | * | |
695 | * Returns the given page to the inactive list, | |
696 | * indicating that no physical maps have access | |
697 | * to this page. [Used by the physical mapping system.] | |
698 | * | |
699 | * The page queues must be locked. | |
700 | */ | |
701 | void vm_page_deactivate(m) | |
702 | register vm_page_t m; | |
703 | { | |
704 | VM_PAGE_CHECK(m); | |
705 | ||
706 | /* | |
707 | * Only move active pages -- ignore locked or already | |
708 | * inactive ones. | |
709 | * | |
710 | * XXX: sometimes we get pages which aren't wired down | |
711 | * or on any queue - we need to put them on the inactive | |
712 | * queue also, otherwise we lose track of them. | |
713 | * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. | |
714 | */ | |
715 | ||
716 | if (!m->inactive && m->wire_count == 0) { | |
717 | pmap_clear_reference(VM_PAGE_TO_PHYS(m)); | |
718 | if (m->active) { | |
719 | queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); | |
720 | m->active = FALSE; | |
721 | vm_page_active_count--; | |
722 | } | |
723 | queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); | |
724 | m->inactive = TRUE; | |
725 | vm_page_inactive_count++; | |
726 | if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) | |
727 | m->clean = FALSE; | |
728 | m->laundry = !m->clean; | |
729 | } | |
730 | } | |
731 | ||
732 | /* | |
733 | * vm_page_activate: | |
734 | * | |
735 | * Put the specified page on the active list (if appropriate). | |
736 | * | |
737 | * The page queues must be locked. | |
738 | */ | |
739 | ||
740 | void vm_page_activate(m) | |
741 | register vm_page_t m; | |
742 | { | |
743 | VM_PAGE_CHECK(m); | |
744 | ||
745 | if (m->inactive) { | |
746 | queue_remove(&vm_page_queue_inactive, m, vm_page_t, | |
747 | pageq); | |
748 | vm_page_inactive_count--; | |
749 | m->inactive = FALSE; | |
750 | } | |
751 | if (m->wire_count == 0) { | |
752 | if (m->active) | |
753 | panic("vm_page_activate: already active"); | |
754 | ||
755 | queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); | |
756 | m->active = TRUE; | |
757 | vm_page_active_count++; | |
758 | } | |
759 | } | |
760 | ||
761 | /* | |
762 | * vm_page_zero_fill: | |
763 | * | |
764 | * Zero-fill the specified page. | |
765 | * Written as a standard pagein routine, to | |
766 | * be used by the zero-fill object. | |
767 | */ | |
768 | ||
769 | boolean_t vm_page_zero_fill(m) | |
770 | vm_page_t m; | |
771 | { | |
772 | VM_PAGE_CHECK(m); | |
773 | ||
774 | pmap_zero_page(VM_PAGE_TO_PHYS(m)); | |
775 | return(TRUE); | |
776 | } | |
777 | ||
778 | /* | |
779 | * vm_page_copy: | |
780 | * | |
781 | * Copy one page to another | |
782 | */ | |
783 | ||
784 | void vm_page_copy(src_m, dest_m) | |
785 | vm_page_t src_m; | |
786 | vm_page_t dest_m; | |
787 | { | |
788 | VM_PAGE_CHECK(src_m); | |
789 | VM_PAGE_CHECK(dest_m); | |
790 | ||
791 | pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); | |
792 | } |