ec_rxstart doesn't eists
[unix-history] / usr / src / sys / vm / vm_page.c
CommitLineData
175f072e 1/*
175f072e
KM
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
0e24ad83 8 * %sccs.include.redist.c%
175f072e 9 *
67d63a81 10 * @(#)vm_page.c 7.10 (Berkeley) %G%
0e24ad83
KM
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
175f072e
KM
37 */
38
39/*
40 * Resident memory management module.
41 */
42
ffe0d082
MK
43#include "param.h"
44
45#include "vm.h"
46#include "vm_map.h"
47#include "vm_page.h"
48#include "vm_pageout.h"
175f072e
KM
49
50/*
51 * Associated with page of user-allocatable memory is a
52 * page structure.
53 */
54
55queue_head_t *vm_page_buckets; /* Array of buckets */
56int vm_page_bucket_count = 0; /* How big is array? */
57int vm_page_hash_mask; /* Mask for hash function */
58simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
59
175f072e
KM
60queue_head_t vm_page_queue_free;
61queue_head_t vm_page_queue_active;
62queue_head_t vm_page_queue_inactive;
63simple_lock_data_t vm_page_queue_lock;
64simple_lock_data_t vm_page_queue_free_lock;
65
544634b8
WN
66/* has physical page allocation been initialized? */
67boolean_t vm_page_startup_initialized;
68
175f072e
KM
69vm_page_t vm_page_array;
70long first_page;
71long last_page;
72vm_offset_t first_phys_addr;
73vm_offset_t last_phys_addr;
1ebacb93
KM
74vm_size_t page_mask;
75int page_shift;
175f072e
KM
76
77/*
78 * vm_set_page_size:
79 *
80 * Sets the page size, perhaps based upon the memory
81 * size. Must be called before any use of page-size
82 * dependent functions.
83 *
f92ce83a 84 * Sets page_shift and page_mask from cnt.v_page_size.
175f072e
KM
85 */
86void vm_set_page_size()
87{
175f072e 88
f92ce83a
KM
89 if (cnt.v_page_size == 0)
90 cnt.v_page_size = DEFAULT_PAGE_SIZE;
91 page_mask = cnt.v_page_size - 1;
92 if ((page_mask & cnt.v_page_size) != 0)
175f072e 93 panic("vm_set_page_size: page size not a power of two");
175f072e 94 for (page_shift = 0; ; page_shift++)
f92ce83a 95 if ((1 << page_shift) == cnt.v_page_size)
175f072e
KM
96 break;
97}
98
99
100/*
101 * vm_page_startup:
102 *
103 * Initializes the resident memory module.
104 *
105 * Allocates memory for the page cells, and
106 * for the object/offset-to-page hash table headers.
107 * Each page cell is initialized and placed on the free list.
108 */
544634b8
WN
109void vm_page_startup(start, end)
110 vm_offset_t *start;
111 vm_offset_t *end;
175f072e 112{
175f072e
KM
113 register vm_page_t m;
114 register queue_t bucket;
115 vm_size_t npages;
175f072e
KM
116 int i;
117 vm_offset_t pa;
175f072e
KM
118 extern vm_offset_t kentry_data;
119 extern vm_size_t kentry_data_size;
120
121
122 /*
123 * Initialize the locks
124 */
125
126 simple_lock_init(&vm_page_queue_free_lock);
127 simple_lock_init(&vm_page_queue_lock);
128
129 /*
130 * Initialize the queue headers for the free queue,
131 * the active queue and the inactive queue.
132 */
133
134 queue_init(&vm_page_queue_free);
135 queue_init(&vm_page_queue_active);
136 queue_init(&vm_page_queue_inactive);
137
138 /*
544634b8 139 * Calculate the number of hash table buckets.
175f072e
KM
140 *
141 * The number of buckets MUST BE a power of 2, and
142 * the actual value is the next power of 2 greater
143 * than the number of physical pages in the system.
144 *
145 * Note:
146 * This computation can be tweaked if desired.
147 */
148
175f072e
KM
149 if (vm_page_bucket_count == 0) {
150 vm_page_bucket_count = 1;
544634b8 151 while (vm_page_bucket_count < atop(*end - *start))
175f072e
KM
152 vm_page_bucket_count <<= 1;
153 }
154
155 vm_page_hash_mask = vm_page_bucket_count - 1;
156
157 /*
544634b8 158 * Allocate (and initialize) the hash table buckets.
175f072e 159 */
544634b8
WN
160 vm_page_buckets = (queue_t) pmap_bootstrap_alloc(vm_page_bucket_count
161 * sizeof(struct queue_entry));
162 bucket = vm_page_buckets;
175f072e
KM
163
164 for (i = vm_page_bucket_count; i--;) {
165 queue_init(bucket);
166 bucket++;
167 }
168
169 simple_lock_init(&bucket_lock);
170
171 /*
544634b8 172 * Truncate the remainder of physical memory to our page size.
175f072e
KM
173 */
174
544634b8 175 *end = trunc_page(*end);
175f072e
KM
176
177 /*
178 * Pre-allocate maps and map entries that cannot be dynamically
179 * allocated via malloc(). The maps include the kernel_map and
180 * kmem_map which must be initialized before malloc() will
181 * work (obviously). Also could include pager maps which would
182 * be allocated before kmeminit.
183 *
184 * Allow some kernel map entries... this should be plenty
185 * since people shouldn't be cluttering up the kernel
186 * map (they should use their own maps).
187 */
188
189 kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
190 MAX_KMAPENT * sizeof(struct vm_map_entry);
544634b8 191 kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
175f072e
KM
192
193 /*
194 * Compute the number of pages of memory that will be
195 * available for use (taking into account the overhead
196 * of a page structure per page).
197 */
198
f92ce83a 199 cnt.v_free_count = npages =
544634b8 200 (*end - *start)/(PAGE_SIZE + sizeof(struct vm_page));
175f072e
KM
201
202 /*
544634b8
WN
203 * Record the extent of physical memory that the
204 * virtual memory system manages.
175f072e
KM
205 */
206
544634b8 207 first_page = *start;
175f072e
KM
208 first_page += npages*sizeof(struct vm_page);
209 first_page = atop(round_page(first_page));
210 last_page = first_page + npages - 1;
211
212 first_phys_addr = ptoa(first_page);
213 last_phys_addr = ptoa(last_page) + PAGE_MASK;
214
cb5fb9b0 215
175f072e 216 /*
544634b8 217 * Allocate and clear the mem entry structures.
175f072e
KM
218 */
219
544634b8
WN
220 m = vm_page_array = (vm_page_t)
221 pmap_bootstrap_alloc(npages * sizeof(struct vm_page));
175f072e
KM
222
223 /*
544634b8
WN
224 * Initialize the mem entry structures now, and
225 * put them in the free queue.
175f072e 226 */
175f072e
KM
227
228 pa = first_phys_addr;
229 while (npages--) {
230 m->copy_on_write = FALSE;
231 m->wanted = FALSE;
232 m->inactive = FALSE;
233 m->active = FALSE;
234 m->busy = FALSE;
ffe0d082 235 m->object = NULL;
175f072e 236 m->phys_addr = pa;
cb5fb9b0
WN
237#ifdef i386
238 if (pmap_isvalidphys(m->phys_addr)) {
239 queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
240 } else {
241 /* perhaps iomem needs it's own type, or dev pager? */
242 m->fictitious = 1;
243 m->busy = TRUE;
f92ce83a 244 cnt.v_free_count--;
cb5fb9b0
WN
245 }
246#else /* i386 */
175f072e 247 queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
cb5fb9b0 248#endif /* i386 */
175f072e
KM
249 m++;
250 pa += PAGE_SIZE;
251 }
252
253 /*
254 * Initialize vm_pages_needed lock here - don't wait for pageout
255 * daemon XXX
256 */
257 simple_lock_init(&vm_pages_needed_lock);
258
544634b8
WN
259 /* from now on, pmap_bootstrap_alloc can't be used */
260 vm_page_startup_initialized = TRUE;
175f072e
KM
261}
262
263/*
264 * vm_page_hash:
265 *
266 * Distributes the object/offset key pair among hash buckets.
267 *
268 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
269 */
270#define vm_page_hash(object, offset) \
271 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
272
273/*
274 * vm_page_insert: [ internal use only ]
275 *
276 * Inserts the given mem entry into the object/object-page
277 * table and object list.
278 *
279 * The object and page must be locked.
280 */
281
544634b8 282static void vm_page_insert(mem, object, offset)
175f072e
KM
283 register vm_page_t mem;
284 register vm_object_t object;
285 register vm_offset_t offset;
286{
287 register queue_t bucket;
288 int spl;
289
290 VM_PAGE_CHECK(mem);
291
292 if (mem->tabled)
293 panic("vm_page_insert: already inserted");
294
295 /*
296 * Record the object/offset pair in this page
297 */
298
299 mem->object = object;
300 mem->offset = offset;
301
302 /*
303 * Insert it into the object_object/offset hash table
304 */
305
306 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
307 spl = splimp();
308 simple_lock(&bucket_lock);
309 queue_enter(bucket, mem, vm_page_t, hashq);
310 simple_unlock(&bucket_lock);
311 (void) splx(spl);
312
313 /*
314 * Now link into the object's list of backed pages.
315 */
316
317 queue_enter(&object->memq, mem, vm_page_t, listq);
318 mem->tabled = TRUE;
319
320 /*
321 * And show that the object has one more resident
322 * page.
323 */
324
325 object->resident_page_count++;
326}
327
328/*
329 * vm_page_remove: [ internal use only ]
544634b8 330 * NOTE: used by device pager as well -wfj
175f072e
KM
331 *
332 * Removes the given mem entry from the object/offset-page
333 * table and the object page list.
334 *
335 * The object and page must be locked.
336 */
337
338void vm_page_remove(mem)
339 register vm_page_t mem;
340{
341 register queue_t bucket;
342 int spl;
343
344 VM_PAGE_CHECK(mem);
345
346 if (!mem->tabled)
347 return;
348
349 /*
350 * Remove from the object_object/offset hash table
351 */
352
353 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
354 spl = splimp();
355 simple_lock(&bucket_lock);
356 queue_remove(bucket, mem, vm_page_t, hashq);
357 simple_unlock(&bucket_lock);
358 (void) splx(spl);
359
360 /*
361 * Now remove from the object's list of backed pages.
362 */
363
364 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
365
366 /*
367 * And show that the object has one fewer resident
368 * page.
369 */
370
371 mem->object->resident_page_count--;
372
373 mem->tabled = FALSE;
374}
375
376/*
377 * vm_page_lookup:
378 *
379 * Returns the page associated with the object/offset
ffe0d082 380 * pair specified; if none is found, NULL is returned.
175f072e
KM
381 *
382 * The object must be locked. No side effects.
383 */
384
385vm_page_t vm_page_lookup(object, offset)
386 register vm_object_t object;
387 register vm_offset_t offset;
388{
389 register vm_page_t mem;
390 register queue_t bucket;
391 int spl;
392
393 /*
394 * Search the hash table for this object/offset pair
395 */
396
397 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
398
399 spl = splimp();
400 simple_lock(&bucket_lock);
401 mem = (vm_page_t) queue_first(bucket);
402 while (!queue_end(bucket, (queue_entry_t) mem)) {
403 VM_PAGE_CHECK(mem);
404 if ((mem->object == object) && (mem->offset == offset)) {
405 simple_unlock(&bucket_lock);
406 splx(spl);
407 return(mem);
408 }
409 mem = (vm_page_t) queue_next(&mem->hashq);
410 }
411
412 simple_unlock(&bucket_lock);
413 splx(spl);
ffe0d082 414 return(NULL);
175f072e
KM
415}
416
417/*
418 * vm_page_rename:
419 *
420 * Move the given memory entry from its
421 * current object to the specified target object/offset.
422 *
423 * The object must be locked.
424 */
425void vm_page_rename(mem, new_object, new_offset)
426 register vm_page_t mem;
427 register vm_object_t new_object;
428 vm_offset_t new_offset;
429{
430 if (mem->object == new_object)
431 return;
432
433 vm_page_lock_queues(); /* keep page from moving out from
434 under pageout daemon */
435 vm_page_remove(mem);
436 vm_page_insert(mem, new_object, new_offset);
437 vm_page_unlock_queues();
438}
439
440void vm_page_init(mem, object, offset)
441 vm_page_t mem;
442 vm_object_t object;
443 vm_offset_t offset;
444{
e2b1a138 445#ifdef DEBUG
175f072e
KM
446#define vm_page_init(mem, object, offset) {\
447 (mem)->busy = TRUE; \
448 (mem)->tabled = FALSE; \
449 vm_page_insert((mem), (object), (offset)); \
450 (mem)->absent = FALSE; \
451 (mem)->fictitious = FALSE; \
452 (mem)->page_lock = VM_PROT_NONE; \
453 (mem)->unlock_request = VM_PROT_NONE; \
454 (mem)->laundry = FALSE; \
455 (mem)->active = FALSE; \
456 (mem)->inactive = FALSE; \
457 (mem)->wire_count = 0; \
458 (mem)->clean = TRUE; \
459 (mem)->copy_on_write = FALSE; \
460 (mem)->fake = TRUE; \
e2b1a138
MH
461 (mem)->pagerowned = FALSE; \
462 (mem)->ptpage = FALSE; \
175f072e 463 }
e2b1a138
MH
464#else
465#define vm_page_init(mem, object, offset) {\
466 (mem)->busy = TRUE; \
467 (mem)->tabled = FALSE; \
468 vm_page_insert((mem), (object), (offset)); \
469 (mem)->absent = FALSE; \
470 (mem)->fictitious = FALSE; \
471 (mem)->page_lock = VM_PROT_NONE; \
472 (mem)->unlock_request = VM_PROT_NONE; \
473 (mem)->laundry = FALSE; \
474 (mem)->active = FALSE; \
475 (mem)->inactive = FALSE; \
476 (mem)->wire_count = 0; \
477 (mem)->clean = TRUE; \
478 (mem)->copy_on_write = FALSE; \
479 (mem)->fake = TRUE; \
480 }
481#endif
175f072e
KM
482
483 vm_page_init(mem, object, offset);
484}
485
486/*
487 * vm_page_alloc:
488 *
489 * Allocate and return a memory cell associated
490 * with this VM object/offset pair.
491 *
492 * Object must be locked.
493 */
494vm_page_t vm_page_alloc(object, offset)
495 vm_object_t object;
496 vm_offset_t offset;
497{
498 register vm_page_t mem;
499 int spl;
500
501 spl = splimp(); /* XXX */
502 simple_lock(&vm_page_queue_free_lock);
503 if (queue_empty(&vm_page_queue_free)) {
504 simple_unlock(&vm_page_queue_free_lock);
505 splx(spl);
ffe0d082 506 return(NULL);
175f072e
KM
507 }
508
509 queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
510
f92ce83a 511 cnt.v_free_count--;
175f072e
KM
512 simple_unlock(&vm_page_queue_free_lock);
513 splx(spl);
514
515 vm_page_init(mem, object, offset);
516
517 /*
518 * Decide if we should poke the pageout daemon.
519 * We do this if the free count is less than the low
520 * water mark, or if the free count is less than the high
521 * water mark (but above the low water mark) and the inactive
522 * count is less than its target.
523 *
524 * We don't have the counts locked ... if they change a little,
525 * it doesn't really matter.
526 */
527
f92ce83a
KM
528 if ((cnt.v_free_count < cnt.v_free_min) ||
529 ((cnt.v_free_count < cnt.v_free_target) &&
530 (cnt.v_inactive_count < cnt.v_inactive_target)))
90314fa1 531 thread_wakeup((int)&vm_pages_needed);
175f072e
KM
532 return(mem);
533}
534
535/*
536 * vm_page_free:
537 *
538 * Returns the given page to the free list,
539 * disassociating it with any VM object.
540 *
541 * Object and page must be locked prior to entry.
542 */
543void vm_page_free(mem)
544 register vm_page_t mem;
545{
546 vm_page_remove(mem);
547 if (mem->active) {
548 queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
549 mem->active = FALSE;
f92ce83a 550 cnt.v_active_count--;
175f072e
KM
551 }
552
553 if (mem->inactive) {
554 queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
555 mem->inactive = FALSE;
f92ce83a 556 cnt.v_inactive_count--;
175f072e
KM
557 }
558
559 if (!mem->fictitious) {
560 int spl;
561
562 spl = splimp();
563 simple_lock(&vm_page_queue_free_lock);
564 queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
565
f92ce83a 566 cnt.v_free_count++;
175f072e
KM
567 simple_unlock(&vm_page_queue_free_lock);
568 splx(spl);
569 }
570}
571
572/*
573 * vm_page_wire:
574 *
575 * Mark this page as wired down by yet
576 * another map, removing it from paging queues
577 * as necessary.
578 *
579 * The page queues must be locked.
580 */
581void vm_page_wire(mem)
582 register vm_page_t mem;
583{
584 VM_PAGE_CHECK(mem);
585
586 if (mem->wire_count == 0) {
587 if (mem->active) {
588 queue_remove(&vm_page_queue_active, mem, vm_page_t,
589 pageq);
f92ce83a 590 cnt.v_active_count--;
175f072e
KM
591 mem->active = FALSE;
592 }
593 if (mem->inactive) {
594 queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
595 pageq);
f92ce83a 596 cnt.v_inactive_count--;
175f072e
KM
597 mem->inactive = FALSE;
598 }
f92ce83a 599 cnt.v_wire_count++;
175f072e
KM
600 }
601 mem->wire_count++;
602}
603
604/*
605 * vm_page_unwire:
606 *
607 * Release one wiring of this page, potentially
608 * enabling it to be paged again.
609 *
610 * The page queues must be locked.
611 */
612void vm_page_unwire(mem)
613 register vm_page_t mem;
614{
615 VM_PAGE_CHECK(mem);
616
617 mem->wire_count--;
618 if (mem->wire_count == 0) {
619 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
f92ce83a 620 cnt.v_active_count++;
175f072e 621 mem->active = TRUE;
f92ce83a 622 cnt.v_wire_count--;
175f072e
KM
623 }
624}
625
626/*
627 * vm_page_deactivate:
628 *
629 * Returns the given page to the inactive list,
630 * indicating that no physical maps have access
631 * to this page. [Used by the physical mapping system.]
632 *
633 * The page queues must be locked.
634 */
635void vm_page_deactivate(m)
636 register vm_page_t m;
637{
638 VM_PAGE_CHECK(m);
639
640 /*
641 * Only move active pages -- ignore locked or already
642 * inactive ones.
643 */
644
645 if (m->active) {
646 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
647 queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
648 queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
649 m->active = FALSE;
650 m->inactive = TRUE;
f92ce83a
KM
651 cnt.v_active_count--;
652 cnt.v_inactive_count++;
175f072e
KM
653 if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
654 m->clean = FALSE;
655 m->laundry = !m->clean;
656 }
657}
658
659/*
660 * vm_page_activate:
661 *
662 * Put the specified page on the active list (if appropriate).
663 *
664 * The page queues must be locked.
665 */
666
667void vm_page_activate(m)
668 register vm_page_t m;
669{
670 VM_PAGE_CHECK(m);
671
672 if (m->inactive) {
673 queue_remove(&vm_page_queue_inactive, m, vm_page_t,
674 pageq);
f92ce83a 675 cnt.v_inactive_count--;
175f072e
KM
676 m->inactive = FALSE;
677 }
678 if (m->wire_count == 0) {
679 if (m->active)
680 panic("vm_page_activate: already active");
681
682 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
683 m->active = TRUE;
f92ce83a 684 cnt.v_active_count++;
175f072e
KM
685 }
686}
687
688/*
689 * vm_page_zero_fill:
690 *
691 * Zero-fill the specified page.
692 * Written as a standard pagein routine, to
693 * be used by the zero-fill object.
694 */
695
696boolean_t vm_page_zero_fill(m)
697 vm_page_t m;
698{
699 VM_PAGE_CHECK(m);
700
67d63a81 701 m->clean = 0;
175f072e
KM
702 pmap_zero_page(VM_PAGE_TO_PHYS(m));
703 return(TRUE);
704}
705
706/*
707 * vm_page_copy:
708 *
709 * Copy one page to another
710 */
711
712void vm_page_copy(src_m, dest_m)
713 vm_page_t src_m;
714 vm_page_t dest_m;
715{
716 VM_PAGE_CHECK(src_m);
717 VM_PAGE_CHECK(dest_m);
718
67d63a81 719 dest_m->clean = 0;
175f072e
KM
720 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
721}