BSD 4_3_Net_2 development
[unix-history] / .ref-8c8a5b54e79564c14fc7a2823a21a8f048449bcf / usr / src / sys / vm / vm_page.c
CommitLineData
175f072e
KM
1/*
2 * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young
3 * Copyright (c) 1987 Carnegie-Mellon University
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * The CMU software License Agreement specifies the terms and conditions
11 * for use and redistribution.
12 *
13 * @(#)vm_page.c 7.1 (Berkeley) %G%
14 */
15
16/*
17 * Resident memory management module.
18 */
19
20#include "types.h"
21#include "../vm/vm_param.h"
22#include "../vm/vm_map.h"
23#include "../vm/vm_page.h"
24#include "../vm/vm_prot.h"
25#include "../vm/vm_statistics.h"
26#include "../vm/vm_pageout.h"
27#include "../vm/pmap.h"
28
29/*
30 * Associated with page of user-allocatable memory is a
31 * page structure.
32 */
33
34queue_head_t *vm_page_buckets; /* Array of buckets */
35int vm_page_bucket_count = 0; /* How big is array? */
36int vm_page_hash_mask; /* Mask for hash function */
37simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
38
39vm_size_t page_size = 4096;
40vm_size_t page_mask = 4095;
41int page_shift = 12;
42
43queue_head_t vm_page_queue_free;
44queue_head_t vm_page_queue_active;
45queue_head_t vm_page_queue_inactive;
46simple_lock_data_t vm_page_queue_lock;
47simple_lock_data_t vm_page_queue_free_lock;
48
49vm_page_t vm_page_array;
50long first_page;
51long last_page;
52vm_offset_t first_phys_addr;
53vm_offset_t last_phys_addr;
54
55int vm_page_free_count;
56int vm_page_active_count;
57int vm_page_inactive_count;
58int vm_page_wire_count;
59int vm_page_laundry_count;
60
61int vm_page_free_target = 0;
62int vm_page_free_min = 0;
63int vm_page_inactive_target = 0;
64int vm_page_free_reserved = 0;
65
66/*
67 * vm_set_page_size:
68 *
69 * Sets the page size, perhaps based upon the memory
70 * size. Must be called before any use of page-size
71 * dependent functions.
72 *
73 * Sets page_shift and page_mask from page_size.
74 */
75void vm_set_page_size()
76{
77 page_mask = page_size - 1;
78
79 if ((page_mask & page_size) != 0)
80 panic("vm_set_page_size: page size not a power of two");
81
82 for (page_shift = 0; ; page_shift++)
83 if ((1 << page_shift) == page_size)
84 break;
85}
86
87
88/*
89 * vm_page_startup:
90 *
91 * Initializes the resident memory module.
92 *
93 * Allocates memory for the page cells, and
94 * for the object/offset-to-page hash table headers.
95 * Each page cell is initialized and placed on the free list.
96 */
97vm_offset_t vm_page_startup(start, end, vaddr)
98 register vm_offset_t start;
99 vm_offset_t end;
100 register vm_offset_t vaddr;
101{
102 register vm_offset_t mapped;
103 register vm_page_t m;
104 register queue_t bucket;
105 vm_size_t npages;
106 register vm_offset_t new_start;
107 int i;
108 vm_offset_t pa;
109
110 extern vm_offset_t kentry_data;
111 extern vm_size_t kentry_data_size;
112
113
114 /*
115 * Initialize the locks
116 */
117
118 simple_lock_init(&vm_page_queue_free_lock);
119 simple_lock_init(&vm_page_queue_lock);
120
121 /*
122 * Initialize the queue headers for the free queue,
123 * the active queue and the inactive queue.
124 */
125
126 queue_init(&vm_page_queue_free);
127 queue_init(&vm_page_queue_active);
128 queue_init(&vm_page_queue_inactive);
129
130 /*
131 * Allocate (and initialize) the hash table buckets.
132 *
133 * The number of buckets MUST BE a power of 2, and
134 * the actual value is the next power of 2 greater
135 * than the number of physical pages in the system.
136 *
137 * Note:
138 * This computation can be tweaked if desired.
139 */
140
141 vm_page_buckets = (queue_t) vaddr;
142 bucket = vm_page_buckets;
143 if (vm_page_bucket_count == 0) {
144 vm_page_bucket_count = 1;
145 while (vm_page_bucket_count < atop(end - start))
146 vm_page_bucket_count <<= 1;
147 }
148
149 vm_page_hash_mask = vm_page_bucket_count - 1;
150
151 /*
152 * Validate these addresses.
153 */
154
155 new_start = round_page(((queue_t)start) + vm_page_bucket_count);
156 mapped = vaddr;
157 vaddr = pmap_map(mapped, start, new_start,
158 VM_PROT_READ|VM_PROT_WRITE);
159 start = new_start;
160 blkclr((caddr_t) mapped, vaddr - mapped);
161 mapped = vaddr;
162
163 for (i = vm_page_bucket_count; i--;) {
164 queue_init(bucket);
165 bucket++;
166 }
167
168 simple_lock_init(&bucket_lock);
169
170 /*
171 * round (or truncate) the addresses to our page size.
172 */
173
174 end = trunc_page(end);
175
176 /*
177 * Pre-allocate maps and map entries that cannot be dynamically
178 * allocated via malloc(). The maps include the kernel_map and
179 * kmem_map which must be initialized before malloc() will
180 * work (obviously). Also could include pager maps which would
181 * be allocated before kmeminit.
182 *
183 * Allow some kernel map entries... this should be plenty
184 * since people shouldn't be cluttering up the kernel
185 * map (they should use their own maps).
186 */
187
188 kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
189 MAX_KMAPENT * sizeof(struct vm_map_entry);
190 kentry_data_size = round_page(kentry_data_size);
191 kentry_data = (vm_offset_t) vaddr;
192 vaddr += kentry_data_size;
193
194 /*
195 * Validate these zone addresses.
196 */
197
198 new_start = start + (vaddr - mapped);
199 pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
200 blkclr((caddr_t) mapped, (vaddr - mapped));
201 mapped = vaddr;
202 start = new_start;
203
204 /*
205 * Compute the number of pages of memory that will be
206 * available for use (taking into account the overhead
207 * of a page structure per page).
208 */
209
210 vm_page_free_count = npages =
211 (end - start)/(PAGE_SIZE + sizeof(struct vm_page));
212
213 /*
214 * Initialize the mem entry structures now, and
215 * put them in the free queue.
216 */
217
218 m = vm_page_array = (vm_page_t) vaddr;
219 first_page = start;
220 first_page += npages*sizeof(struct vm_page);
221 first_page = atop(round_page(first_page));
222 last_page = first_page + npages - 1;
223
224 first_phys_addr = ptoa(first_page);
225 last_phys_addr = ptoa(last_page) + PAGE_MASK;
226
227 /*
228 * Validate these addresses.
229 */
230
231 new_start = start + (round_page(m + npages) - mapped);
232 mapped = pmap_map(mapped, start, new_start,
233 VM_PROT_READ|VM_PROT_WRITE);
234 start = new_start;
235
236 /*
237 * Clear all of the page structures
238 */
239 blkclr((caddr_t)m, npages * sizeof(*m));
240
241 pa = first_phys_addr;
242 while (npages--) {
243 m->copy_on_write = FALSE;
244 m->wanted = FALSE;
245 m->inactive = FALSE;
246 m->active = FALSE;
247 m->busy = FALSE;
248 m->object = VM_OBJECT_NULL;
249 m->phys_addr = pa;
250 queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
251 m++;
252 pa += PAGE_SIZE;
253 }
254
255 /*
256 * Initialize vm_pages_needed lock here - don't wait for pageout
257 * daemon XXX
258 */
259 simple_lock_init(&vm_pages_needed_lock);
260
261 return(mapped);
262}
263
264/*
265 * vm_page_hash:
266 *
267 * Distributes the object/offset key pair among hash buckets.
268 *
269 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
270 */
271#define vm_page_hash(object, offset) \
272 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
273
274/*
275 * vm_page_insert: [ internal use only ]
276 *
277 * Inserts the given mem entry into the object/object-page
278 * table and object list.
279 *
280 * The object and page must be locked.
281 */
282
283void vm_page_insert(mem, object, offset)
284 register vm_page_t mem;
285 register vm_object_t object;
286 register vm_offset_t offset;
287{
288 register queue_t bucket;
289 int spl;
290
291 VM_PAGE_CHECK(mem);
292
293 if (mem->tabled)
294 panic("vm_page_insert: already inserted");
295
296 /*
297 * Record the object/offset pair in this page
298 */
299
300 mem->object = object;
301 mem->offset = offset;
302
303 /*
304 * Insert it into the object_object/offset hash table
305 */
306
307 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
308 spl = splimp();
309 simple_lock(&bucket_lock);
310 queue_enter(bucket, mem, vm_page_t, hashq);
311 simple_unlock(&bucket_lock);
312 (void) splx(spl);
313
314 /*
315 * Now link into the object's list of backed pages.
316 */
317
318 queue_enter(&object->memq, mem, vm_page_t, listq);
319 mem->tabled = TRUE;
320
321 /*
322 * And show that the object has one more resident
323 * page.
324 */
325
326 object->resident_page_count++;
327}
328
329/*
330 * vm_page_remove: [ internal use only ]
331 *
332 * Removes the given mem entry from the object/offset-page
333 * table and the object page list.
334 *
335 * The object and page must be locked.
336 */
337
338void vm_page_remove(mem)
339 register vm_page_t mem;
340{
341 register queue_t bucket;
342 int spl;
343
344 VM_PAGE_CHECK(mem);
345
346 if (!mem->tabled)
347 return;
348
349 /*
350 * Remove from the object_object/offset hash table
351 */
352
353 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
354 spl = splimp();
355 simple_lock(&bucket_lock);
356 queue_remove(bucket, mem, vm_page_t, hashq);
357 simple_unlock(&bucket_lock);
358 (void) splx(spl);
359
360 /*
361 * Now remove from the object's list of backed pages.
362 */
363
364 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
365
366 /*
367 * And show that the object has one fewer resident
368 * page.
369 */
370
371 mem->object->resident_page_count--;
372
373 mem->tabled = FALSE;
374}
375
376/*
377 * vm_page_lookup:
378 *
379 * Returns the page associated with the object/offset
380 * pair specified; if none is found, VM_PAGE_NULL is returned.
381 *
382 * The object must be locked. No side effects.
383 */
384
385vm_page_t vm_page_lookup(object, offset)
386 register vm_object_t object;
387 register vm_offset_t offset;
388{
389 register vm_page_t mem;
390 register queue_t bucket;
391 int spl;
392
393 /*
394 * Search the hash table for this object/offset pair
395 */
396
397 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
398
399 spl = splimp();
400 simple_lock(&bucket_lock);
401 mem = (vm_page_t) queue_first(bucket);
402 while (!queue_end(bucket, (queue_entry_t) mem)) {
403 VM_PAGE_CHECK(mem);
404 if ((mem->object == object) && (mem->offset == offset)) {
405 simple_unlock(&bucket_lock);
406 splx(spl);
407 return(mem);
408 }
409 mem = (vm_page_t) queue_next(&mem->hashq);
410 }
411
412 simple_unlock(&bucket_lock);
413 splx(spl);
414 return(VM_PAGE_NULL);
415}
416
417/*
418 * vm_page_rename:
419 *
420 * Move the given memory entry from its
421 * current object to the specified target object/offset.
422 *
423 * The object must be locked.
424 */
425void vm_page_rename(mem, new_object, new_offset)
426 register vm_page_t mem;
427 register vm_object_t new_object;
428 vm_offset_t new_offset;
429{
430 if (mem->object == new_object)
431 return;
432
433 vm_page_lock_queues(); /* keep page from moving out from
434 under pageout daemon */
435 vm_page_remove(mem);
436 vm_page_insert(mem, new_object, new_offset);
437 vm_page_unlock_queues();
438}
439
440void vm_page_init(mem, object, offset)
441 vm_page_t mem;
442 vm_object_t object;
443 vm_offset_t offset;
444{
445#define vm_page_init(mem, object, offset) {\
446 (mem)->busy = TRUE; \
447 (mem)->tabled = FALSE; \
448 vm_page_insert((mem), (object), (offset)); \
449 (mem)->absent = FALSE; \
450 (mem)->fictitious = FALSE; \
451 (mem)->page_lock = VM_PROT_NONE; \
452 (mem)->unlock_request = VM_PROT_NONE; \
453 (mem)->laundry = FALSE; \
454 (mem)->active = FALSE; \
455 (mem)->inactive = FALSE; \
456 (mem)->wire_count = 0; \
457 (mem)->clean = TRUE; \
458 (mem)->copy_on_write = FALSE; \
459 (mem)->fake = TRUE; \
460 }
461
462 vm_page_init(mem, object, offset);
463}
464
465/*
466 * vm_page_alloc:
467 *
468 * Allocate and return a memory cell associated
469 * with this VM object/offset pair.
470 *
471 * Object must be locked.
472 */
473vm_page_t vm_page_alloc(object, offset)
474 vm_object_t object;
475 vm_offset_t offset;
476{
477 register vm_page_t mem;
478 int spl;
479
480 spl = splimp(); /* XXX */
481 simple_lock(&vm_page_queue_free_lock);
482 if (queue_empty(&vm_page_queue_free)) {
483 simple_unlock(&vm_page_queue_free_lock);
484 splx(spl);
485 return(VM_PAGE_NULL);
486 }
487
488 queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
489
490 vm_page_free_count--;
491 simple_unlock(&vm_page_queue_free_lock);
492 splx(spl);
493
494 vm_page_init(mem, object, offset);
495
496 /*
497 * Decide if we should poke the pageout daemon.
498 * We do this if the free count is less than the low
499 * water mark, or if the free count is less than the high
500 * water mark (but above the low water mark) and the inactive
501 * count is less than its target.
502 *
503 * We don't have the counts locked ... if they change a little,
504 * it doesn't really matter.
505 */
506
507 if ((vm_page_free_count < vm_page_free_min) ||
508 ((vm_page_free_count < vm_page_free_target) &&
509 (vm_page_inactive_count < vm_page_inactive_target)))
510 thread_wakeup(&vm_pages_needed);
511 return(mem);
512}
513
514/*
515 * vm_page_free:
516 *
517 * Returns the given page to the free list,
518 * disassociating it with any VM object.
519 *
520 * Object and page must be locked prior to entry.
521 */
522void vm_page_free(mem)
523 register vm_page_t mem;
524{
525 vm_page_remove(mem);
526 if (mem->active) {
527 queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
528 mem->active = FALSE;
529 vm_page_active_count--;
530 }
531
532 if (mem->inactive) {
533 queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
534 mem->inactive = FALSE;
535 vm_page_inactive_count--;
536 }
537
538 if (!mem->fictitious) {
539 int spl;
540
541 spl = splimp();
542 simple_lock(&vm_page_queue_free_lock);
543 queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
544
545 vm_page_free_count++;
546 simple_unlock(&vm_page_queue_free_lock);
547 splx(spl);
548 }
549}
550
551/*
552 * vm_page_wire:
553 *
554 * Mark this page as wired down by yet
555 * another map, removing it from paging queues
556 * as necessary.
557 *
558 * The page queues must be locked.
559 */
560void vm_page_wire(mem)
561 register vm_page_t mem;
562{
563 VM_PAGE_CHECK(mem);
564
565 if (mem->wire_count == 0) {
566 if (mem->active) {
567 queue_remove(&vm_page_queue_active, mem, vm_page_t,
568 pageq);
569 vm_page_active_count--;
570 mem->active = FALSE;
571 }
572 if (mem->inactive) {
573 queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
574 pageq);
575 vm_page_inactive_count--;
576 mem->inactive = FALSE;
577 }
578 vm_page_wire_count++;
579 }
580 mem->wire_count++;
581}
582
583/*
584 * vm_page_unwire:
585 *
586 * Release one wiring of this page, potentially
587 * enabling it to be paged again.
588 *
589 * The page queues must be locked.
590 */
591void vm_page_unwire(mem)
592 register vm_page_t mem;
593{
594 VM_PAGE_CHECK(mem);
595
596 mem->wire_count--;
597 if (mem->wire_count == 0) {
598 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
599 vm_page_active_count++;
600 mem->active = TRUE;
601 vm_page_wire_count--;
602 }
603}
604
605/*
606 * vm_page_deactivate:
607 *
608 * Returns the given page to the inactive list,
609 * indicating that no physical maps have access
610 * to this page. [Used by the physical mapping system.]
611 *
612 * The page queues must be locked.
613 */
614void vm_page_deactivate(m)
615 register vm_page_t m;
616{
617 VM_PAGE_CHECK(m);
618
619 /*
620 * Only move active pages -- ignore locked or already
621 * inactive ones.
622 */
623
624 if (m->active) {
625 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
626 queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
627 queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
628 m->active = FALSE;
629 m->inactive = TRUE;
630 vm_page_active_count--;
631 vm_page_inactive_count++;
632 if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
633 m->clean = FALSE;
634 m->laundry = !m->clean;
635 }
636}
637
638/*
639 * vm_page_activate:
640 *
641 * Put the specified page on the active list (if appropriate).
642 *
643 * The page queues must be locked.
644 */
645
646void vm_page_activate(m)
647 register vm_page_t m;
648{
649 VM_PAGE_CHECK(m);
650
651 if (m->inactive) {
652 queue_remove(&vm_page_queue_inactive, m, vm_page_t,
653 pageq);
654 vm_page_inactive_count--;
655 m->inactive = FALSE;
656 }
657 if (m->wire_count == 0) {
658 if (m->active)
659 panic("vm_page_activate: already active");
660
661 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
662 m->active = TRUE;
663 vm_page_active_count++;
664 }
665}
666
667/*
668 * vm_page_zero_fill:
669 *
670 * Zero-fill the specified page.
671 * Written as a standard pagein routine, to
672 * be used by the zero-fill object.
673 */
674
675boolean_t vm_page_zero_fill(m)
676 vm_page_t m;
677{
678 VM_PAGE_CHECK(m);
679
680 pmap_zero_page(VM_PAGE_TO_PHYS(m));
681 return(TRUE);
682}
683
684/*
685 * vm_page_copy:
686 *
687 * Copy one page to another
688 */
689
690void vm_page_copy(src_m, dest_m)
691 vm_page_t src_m;
692 vm_page_t dest_m;
693{
694 VM_PAGE_CHECK(src_m);
695 VM_PAGE_CHECK(dest_m);
696
697 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
698}