Beginning of change set for making more friendly laptop configurations.
[unix-history] / sys / vm / vm_page.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
92c70781 37 * $Id: vm_page.c,v 1.11 1994/01/31 04:32:41 davidg Exp $
1284e777
RG
38 */
39
40/*
15637ed4
RG
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
15637ed4
RG
65 */
66
67/*
68 * Resident memory management module.
69 */
70
71#include "param.h"
fde1aeb2 72#include "systm.h"
15637ed4
RG
73
74#include "vm.h"
75#include "vm_map.h"
76#include "vm_page.h"
77#include "vm_pageout.h"
55768178 78#include "proc.h"
15637ed4
RG
79
80/*
81 * Associated with page of user-allocatable memory is a
82 * page structure.
83 */
84
85queue_head_t *vm_page_buckets; /* Array of buckets */
86int vm_page_bucket_count = 0; /* How big is array? */
87int vm_page_hash_mask; /* Mask for hash function */
88simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
89
15637ed4
RG
90queue_head_t vm_page_queue_free;
91queue_head_t vm_page_queue_active;
92queue_head_t vm_page_queue_inactive;
93simple_lock_data_t vm_page_queue_lock;
94simple_lock_data_t vm_page_queue_free_lock;
95
96vm_page_t vm_page_array;
97long first_page;
98long last_page;
99vm_offset_t first_phys_addr;
100vm_offset_t last_phys_addr;
101
102int vm_page_free_count;
103int vm_page_active_count;
104int vm_page_inactive_count;
105int vm_page_wire_count;
106int vm_page_laundry_count;
55768178
DG
107int vm_page_count;
108extern int vm_pageout_pages_needed;
15637ed4
RG
109
110int vm_page_free_target = 0;
111int vm_page_free_min = 0;
112int vm_page_inactive_target = 0;
113int vm_page_free_reserved = 0;
114
a2701577
DG
115vm_size_t page_size = PAGE_SIZE;
116
15637ed4
RG
117/*
118 * vm_page_startup:
119 *
120 * Initializes the resident memory module.
121 *
122 * Allocates memory for the page cells, and
123 * for the object/offset-to-page hash table headers.
124 * Each page cell is initialized and placed on the free list.
125 */
55768178
DG
126
127vm_offset_t
128vm_page_startup(starta, enda, vaddr)
129 register vm_offset_t starta;
130 vm_offset_t enda;
15637ed4
RG
131 register vm_offset_t vaddr;
132{
133 register vm_offset_t mapped;
134 register vm_page_t m;
135 register queue_t bucket;
55768178 136 vm_size_t npages, page_range;
15637ed4
RG
137 register vm_offset_t new_start;
138 int i;
139 vm_offset_t pa;
55768178
DG
140 int nblocks;
141 vm_offset_t first_managed_page;
142 int size;
15637ed4
RG
143
144 extern vm_offset_t kentry_data;
145 extern vm_size_t kentry_data_size;
55768178
DG
146 extern vm_offset_t phys_avail[];
147/* the biggest memory array is the second group of pages */
148 vm_offset_t start;
149 vm_offset_t biggestone, biggestsize;
150
151 vm_offset_t total;
152
153 total = 0;
154 biggestsize = 0;
155 biggestone = 0;
156 nblocks = 0;
157 vaddr = round_page(vaddr);
158
159 for (i = 0; phys_avail[i + 1]; i += 2) {
160 phys_avail[i] = round_page(phys_avail[i]);
161 phys_avail[i+1] = trunc_page(phys_avail[i+1]);
162 }
163
164 for (i = 0; phys_avail[i + 1]; i += 2) {
165 int size = phys_avail[i+1] - phys_avail[i];
166 if (size > biggestsize) {
167 biggestone = i;
168 biggestsize = size;
169 }
170 ++nblocks;
171 total += size;
172 }
173
174 start = phys_avail[biggestone];
15637ed4
RG
175
176
177 /*
178 * Initialize the locks
179 */
180
181 simple_lock_init(&vm_page_queue_free_lock);
182 simple_lock_init(&vm_page_queue_lock);
183
184 /*
185 * Initialize the queue headers for the free queue,
186 * the active queue and the inactive queue.
187 */
188
189 queue_init(&vm_page_queue_free);
190 queue_init(&vm_page_queue_active);
191 queue_init(&vm_page_queue_inactive);
192
193 /*
194 * Allocate (and initialize) the hash table buckets.
195 *
196 * The number of buckets MUST BE a power of 2, and
197 * the actual value is the next power of 2 greater
198 * than the number of physical pages in the system.
199 *
200 * Note:
201 * This computation can be tweaked if desired.
202 */
15637ed4
RG
203 vm_page_buckets = (queue_t) vaddr;
204 bucket = vm_page_buckets;
205 if (vm_page_bucket_count == 0) {
206 vm_page_bucket_count = 1;
55768178 207 while (vm_page_bucket_count < atop(total))
15637ed4
RG
208 vm_page_bucket_count <<= 1;
209 }
210
55768178 211
15637ed4
RG
212 vm_page_hash_mask = vm_page_bucket_count - 1;
213
214 /*
215 * Validate these addresses.
216 */
217
55768178
DG
218 new_start = start + vm_page_bucket_count * sizeof(struct queue_entry);
219 new_start = round_page(new_start);
15637ed4
RG
220 mapped = vaddr;
221 vaddr = pmap_map(mapped, start, new_start,
222 VM_PROT_READ|VM_PROT_WRITE);
223 start = new_start;
224 bzero((caddr_t) mapped, vaddr - mapped);
225 mapped = vaddr;
226
55768178 227 for (i = 0; i< vm_page_bucket_count; i++) {
15637ed4
RG
228 queue_init(bucket);
229 bucket++;
230 }
231
232 simple_lock_init(&bucket_lock);
233
234 /*
235 * round (or truncate) the addresses to our page size.
236 */
237
15637ed4
RG
238 /*
239 * Pre-allocate maps and map entries that cannot be dynamically
240 * allocated via malloc(). The maps include the kernel_map and
241 * kmem_map which must be initialized before malloc() will
242 * work (obviously). Also could include pager maps which would
243 * be allocated before kmeminit.
244 *
245 * Allow some kernel map entries... this should be plenty
246 * since people shouldn't be cluttering up the kernel
247 * map (they should use their own maps).
248 */
249
250 kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
251 MAX_KMAPENT * sizeof(struct vm_map_entry);
252 kentry_data_size = round_page(kentry_data_size);
253 kentry_data = (vm_offset_t) vaddr;
254 vaddr += kentry_data_size;
255
256 /*
257 * Validate these zone addresses.
258 */
259
260 new_start = start + (vaddr - mapped);
261 pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
262 bzero((caddr_t) mapped, (vaddr - mapped));
55768178 263 start = round_page(new_start);
15637ed4
RG
264
265 /*
266 * Compute the number of pages of memory that will be
267 * available for use (taking into account the overhead
268 * of a page structure per page).
269 */
270
55768178
DG
271 npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
272 first_page = phys_avail[0] / PAGE_SIZE;
15637ed4 273
55768178 274 page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
15637ed4
RG
275 /*
276 * Initialize the mem entry structures now, and
277 * put them in the free queue.
278 */
279
55768178
DG
280 vm_page_array = (vm_page_t) vaddr;
281 mapped = vaddr;
15637ed4 282
15637ed4
RG
283
284 /*
285 * Validate these addresses.
286 */
287
55768178 288 new_start = round_page(start + page_range * sizeof (struct vm_page));
15637ed4
RG
289 mapped = pmap_map(mapped, start, new_start,
290 VM_PROT_READ|VM_PROT_WRITE);
291 start = new_start;
292
55768178
DG
293 first_managed_page = start / PAGE_SIZE;
294
15637ed4
RG
295 /*
296 * Clear all of the page structures
297 */
55768178
DG
298 bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
299
300 vm_page_count = 0;
301 vm_page_free_count = 0;
302 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
303 if (i == biggestone)
304 pa = ptoa(first_managed_page);
305 else
306 pa = phys_avail[i];
307 while (pa < phys_avail[i + 1] && npages-- > 0) {
308 ++vm_page_count;
309 ++vm_page_free_count;
310 m = PHYS_TO_VM_PAGE(pa);
311 m->flags = 0;
312 m->object = 0;
313 m->phys_addr = pa;
314 queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
315 pa += PAGE_SIZE;
316 }
15637ed4
RG
317 }
318
319 /*
320 * Initialize vm_pages_needed lock here - don't wait for pageout
321 * daemon XXX
322 */
323 simple_lock_init(&vm_pages_needed_lock);
324
325 return(mapped);
326}
327
328/*
329 * vm_page_hash:
330 *
331 * Distributes the object/offset key pair among hash buckets.
332 *
333 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
334 */
55768178
DG
335inline const int
336vm_page_hash(object, offset)
337 vm_object_t object;
338 vm_offset_t offset;
339{
340 return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
341}
15637ed4
RG
342
343/*
344 * vm_page_insert: [ internal use only ]
345 *
346 * Inserts the given mem entry into the object/object-page
347 * table and object list.
348 *
349 * The object and page must be locked.
55768178 350 * interrupts must be disable in this routine!!!
15637ed4
RG
351 */
352
55768178
DG
353void
354vm_page_insert(mem, object, offset)
15637ed4
RG
355 register vm_page_t mem;
356 register vm_object_t object;
357 register vm_offset_t offset;
358{
359 register queue_t bucket;
360 int spl;
361
362 VM_PAGE_CHECK(mem);
363
fd76afd7 364 if (mem->flags & PG_TABLED)
15637ed4
RG
365 panic("vm_page_insert: already inserted");
366
367 /*
368 * Record the object/offset pair in this page
369 */
370
371 mem->object = object;
372 mem->offset = offset;
373
374 /*
375 * Insert it into the object_object/offset hash table
376 */
377
378 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
15637ed4
RG
379 simple_lock(&bucket_lock);
380 queue_enter(bucket, mem, vm_page_t, hashq);
381 simple_unlock(&bucket_lock);
15637ed4
RG
382
383 /*
384 * Now link into the object's list of backed pages.
385 */
386
387 queue_enter(&object->memq, mem, vm_page_t, listq);
fd76afd7 388 mem->flags |= PG_TABLED;
15637ed4
RG
389
390 /*
391 * And show that the object has one more resident
392 * page.
393 */
394
395 object->resident_page_count++;
396}
397
398/*
399 * vm_page_remove: [ internal use only ]
400 *
401 * Removes the given mem entry from the object/offset-page
402 * table and the object page list.
403 *
404 * The object and page must be locked.
55768178
DG
405 *
406 * interrupts must be disable in this routine!!!
15637ed4
RG
407 */
408
55768178
DG
409void
410vm_page_remove(mem)
15637ed4
RG
411 register vm_page_t mem;
412{
413 register queue_t bucket;
414 int spl;
415
416 VM_PAGE_CHECK(mem);
417
55768178
DG
418 if (!(mem->flags & PG_TABLED)) {
419 printf("page not tabled?????\n");
15637ed4 420 return;
55768178 421 }
15637ed4
RG
422
423 /*
424 * Remove from the object_object/offset hash table
425 */
426
427 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
15637ed4
RG
428 simple_lock(&bucket_lock);
429 queue_remove(bucket, mem, vm_page_t, hashq);
430 simple_unlock(&bucket_lock);
15637ed4
RG
431
432 /*
433 * Now remove from the object's list of backed pages.
434 */
435
436 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
437
438 /*
439 * And show that the object has one fewer resident
440 * page.
441 */
442
443 mem->object->resident_page_count--;
55768178 444 mem->object = 0;
15637ed4 445
fd76afd7 446 mem->flags &= ~PG_TABLED;
15637ed4
RG
447}
448
449/*
450 * vm_page_lookup:
451 *
452 * Returns the page associated with the object/offset
453 * pair specified; if none is found, NULL is returned.
454 *
455 * The object must be locked. No side effects.
456 */
457
55768178
DG
458vm_page_t
459vm_page_lookup(object, offset)
15637ed4
RG
460 register vm_object_t object;
461 register vm_offset_t offset;
462{
463 register vm_page_t mem;
464 register queue_t bucket;
465 int spl;
466
467 /*
468 * Search the hash table for this object/offset pair
469 */
470
471 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
55768178 472 spl = vm_disable_intr();
15637ed4 473
15637ed4
RG
474 simple_lock(&bucket_lock);
475 mem = (vm_page_t) queue_first(bucket);
476 while (!queue_end(bucket, (queue_entry_t) mem)) {
477 VM_PAGE_CHECK(mem);
478 if ((mem->object == object) && (mem->offset == offset)) {
479 simple_unlock(&bucket_lock);
55768178 480 vm_set_intr(spl);
15637ed4
RG
481 return(mem);
482 }
483 mem = (vm_page_t) queue_next(&mem->hashq);
484 }
485
486 simple_unlock(&bucket_lock);
55768178 487 vm_set_intr(spl);
15637ed4
RG
488 return(NULL);
489}
490
491/*
492 * vm_page_rename:
493 *
494 * Move the given memory entry from its
495 * current object to the specified target object/offset.
496 *
497 * The object must be locked.
498 */
55768178
DG
499void
500vm_page_rename(mem, new_object, new_offset)
15637ed4
RG
501 register vm_page_t mem;
502 register vm_object_t new_object;
503 vm_offset_t new_offset;
504{
55768178 505 int spl;
15637ed4
RG
506 if (mem->object == new_object)
507 return;
508
509 vm_page_lock_queues(); /* keep page from moving out from
510 under pageout daemon */
55768178 511 spl = vm_disable_intr();
15637ed4
RG
512 vm_page_remove(mem);
513 vm_page_insert(mem, new_object, new_offset);
55768178 514 vm_set_intr(spl);
15637ed4
RG
515 vm_page_unlock_queues();
516}
517
15637ed4
RG
518/*
519 * vm_page_alloc:
520 *
521 * Allocate and return a memory cell associated
522 * with this VM object/offset pair.
523 *
524 * Object must be locked.
525 */
55768178
DG
526vm_page_t
527vm_page_alloc(object, offset)
15637ed4
RG
528 vm_object_t object;
529 vm_offset_t offset;
530{
531 register vm_page_t mem;
532 int spl;
533
55768178 534 spl = vm_disable_intr();
15637ed4
RG
535 simple_lock(&vm_page_queue_free_lock);
536 if ( object != kernel_object &&
537 object != kmem_object &&
55768178
DG
538 curproc != pageproc && curproc != &proc0 &&
539 vm_page_free_count < vm_page_free_reserved) {
15637ed4
RG
540
541 simple_unlock(&vm_page_queue_free_lock);
55768178
DG
542 vm_set_intr(spl);
543 /*
544 * this wakeup seems unnecessary, but there is code that
545 * might just check to see if there are free pages, and
546 * punt if there aren't. VM_WAIT does this too, but
547 * redundant wakeups aren't that bad...
548 */
549 if (curproc != pageproc)
550 wakeup((caddr_t) &vm_pages_needed);
15637ed4
RG
551 return(NULL);
552 }
553 if (queue_empty(&vm_page_queue_free)) {
554 simple_unlock(&vm_page_queue_free_lock);
55768178
DG
555 vm_set_intr(spl);
556 /*
557 * comment above re: wakeups applies here too...
558 */
a200ca2b
DG
559 if (curproc != pageproc)
560 wakeup((caddr_t) &vm_pages_needed);
15637ed4
RG
561 return(NULL);
562 }
563
564 queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
565
566 vm_page_free_count--;
567 simple_unlock(&vm_page_queue_free_lock);
15637ed4 568
fd76afd7
DG
569 mem->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
570 vm_page_insert(mem, object, offset);
571 mem->wire_count = 0;
a200ca2b 572 mem->deact = 0;
55768178 573 vm_set_intr(spl);
15637ed4 574
55768178
DG
575/*
576 * don't wakeup too often, so we wakeup the pageout daemon when
577 * we would be nearly out of memory.
578 */
579 if (curproc != pageproc &&
580 (vm_page_free_count < vm_page_free_reserved))
581 wakeup((caddr_t) &vm_pages_needed);
15637ed4 582
15637ed4
RG
583 return(mem);
584}
585
586/*
587 * vm_page_free:
588 *
589 * Returns the given page to the free list,
590 * disassociating it with any VM object.
591 *
592 * Object and page must be locked prior to entry.
593 */
55768178
DG
594void
595vm_page_free(mem)
15637ed4
RG
596 register vm_page_t mem;
597{
55768178
DG
598 int spl;
599
600 spl = vm_disable_intr();
601
15637ed4 602 vm_page_remove(mem);
a200ca2b 603 mem->deact = 0;
fd76afd7 604 if (mem->flags & PG_ACTIVE) {
15637ed4 605 queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
fd76afd7 606 mem->flags &= ~PG_ACTIVE;
15637ed4
RG
607 vm_page_active_count--;
608 }
609
fd76afd7 610 if (mem->flags & PG_INACTIVE) {
15637ed4 611 queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
fd76afd7 612 mem->flags &= ~PG_INACTIVE;
15637ed4
RG
613 vm_page_inactive_count--;
614 }
615
15637ed4 616
55768178 617 if (!(mem->flags & PG_FICTITIOUS)) {
15637ed4 618 simple_lock(&vm_page_queue_free_lock);
55768178
DG
619 if (mem->wire_count) {
620 vm_page_wire_count--;
621 mem->wire_count = 0;
622 }
15637ed4
RG
623 queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
624
625 vm_page_free_count++;
626 simple_unlock(&vm_page_queue_free_lock);
55768178
DG
627 vm_set_intr(spl);
628
629 /*
630 * if pageout daemon needs pages, then tell it that there
631 * are some free.
632 */
633 if (vm_pageout_pages_needed)
634 wakeup((caddr_t)&vm_pageout_pages_needed);
635
636 /*
637 * wakeup processes that are waiting on memory if we
638 * hit a high water mark.
639 */
640 if (vm_page_free_count == vm_page_free_min) {
641 wakeup((caddr_t)&vm_page_free_count);
642 }
643
644 /*
645 * wakeup scheduler process if we have lots of memory.
646 * this process will swapin processes.
647 */
648 if (vm_page_free_count == vm_page_free_target) {
649 wakeup((caddr_t)&proc0);
650 }
651
652 } else {
653 vm_set_intr(spl);
15637ed4 654 }
55768178 655 wakeup((caddr_t) mem);
15637ed4
RG
656}
657
658/*
659 * vm_page_wire:
660 *
661 * Mark this page as wired down by yet
662 * another map, removing it from paging queues
663 * as necessary.
664 *
665 * The page queues must be locked.
666 */
55768178
DG
667void
668vm_page_wire(mem)
15637ed4
RG
669 register vm_page_t mem;
670{
55768178 671 int spl;
15637ed4 672 VM_PAGE_CHECK(mem);
55768178 673 spl = vm_disable_intr();
15637ed4
RG
674
675 if (mem->wire_count == 0) {
fd76afd7 676 if (mem->flags & PG_ACTIVE) {
15637ed4
RG
677 queue_remove(&vm_page_queue_active, mem, vm_page_t,
678 pageq);
679 vm_page_active_count--;
fd76afd7 680 mem->flags &= ~PG_ACTIVE;
15637ed4 681 }
fd76afd7 682 if (mem->flags & PG_INACTIVE) {
15637ed4
RG
683 queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
684 pageq);
685 vm_page_inactive_count--;
fd76afd7 686 mem->flags &= ~PG_INACTIVE;
15637ed4
RG
687 }
688 vm_page_wire_count++;
689 }
690 mem->wire_count++;
55768178 691 vm_set_intr(spl);
15637ed4
RG
692}
693
694/*
695 * vm_page_unwire:
696 *
697 * Release one wiring of this page, potentially
698 * enabling it to be paged again.
699 *
700 * The page queues must be locked.
701 */
55768178
DG
702void
703vm_page_unwire(mem)
15637ed4
RG
704 register vm_page_t mem;
705{
55768178 706 int spl;
15637ed4
RG
707 VM_PAGE_CHECK(mem);
708
55768178
DG
709 spl = vm_disable_intr();
710 if (mem->wire_count != 0)
711 mem->wire_count--;
15637ed4
RG
712 if (mem->wire_count == 0) {
713 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
714 vm_page_active_count++;
fd76afd7 715 mem->flags |= PG_ACTIVE;
15637ed4 716 vm_page_wire_count--;
a200ca2b 717 vm_pageout_deact_bump(mem);
55768178
DG
718 }
719 vm_set_intr(spl);
15637ed4
RG
720}
721
722/*
723 * vm_page_deactivate:
724 *
725 * Returns the given page to the inactive list,
726 * indicating that no physical maps have access
727 * to this page. [Used by the physical mapping system.]
728 *
729 * The page queues must be locked.
730 */
55768178
DG
731void
732vm_page_deactivate(m)
15637ed4
RG
733 register vm_page_t m;
734{
55768178 735 int spl;
15637ed4
RG
736 VM_PAGE_CHECK(m);
737
738 /*
739 * Only move active pages -- ignore locked or already
740 * inactive ones.
741 *
742 * XXX: sometimes we get pages which aren't wired down
743 * or on any queue - we need to put them on the inactive
744 * queue also, otherwise we lose track of them.
745 * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
746 */
747
92c70781 748 spl = splhigh();
a200ca2b 749 m->deact = 0;
ce619eaa 750 if (!(m->flags & PG_INACTIVE) && m->wire_count == 0) {
15637ed4 751 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
fd76afd7 752 if (m->flags & PG_ACTIVE) {
15637ed4 753 queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
fd76afd7 754 m->flags &= ~PG_ACTIVE;
15637ed4
RG
755 vm_page_active_count--;
756 }
757 queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
fd76afd7 758 m->flags |= PG_INACTIVE;
15637ed4 759 vm_page_inactive_count++;
a200ca2b 760 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
55768178 761 if ((m->flags & PG_CLEAN) == 0)
fd76afd7 762 m->flags |= PG_LAUNDRY;
55768178 763 }
92c70781 764 splx(spl);
55768178
DG
765}
766
767/*
768 * vm_page_makefault
769 *
770 * Cause next access of this page to fault
771 */
772void
773vm_page_makefault(m)
774 vm_page_t m;
775{
a200ca2b 776 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
55768178
DG
777 if ((m->flags & PG_CLEAN) == 0)
778 m->flags |= PG_LAUNDRY;
15637ed4
RG
779}
780
781/*
782 * vm_page_activate:
783 *
784 * Put the specified page on the active list (if appropriate).
785 *
786 * The page queues must be locked.
787 */
55768178
DG
788void
789vm_page_activate(m)
15637ed4
RG
790 register vm_page_t m;
791{
55768178 792 int spl;
15637ed4
RG
793 VM_PAGE_CHECK(m);
794
a200ca2b 795 vm_pageout_deact_bump(m);
55768178
DG
796
797 spl = vm_disable_intr();
798
fd76afd7 799 if (m->flags & PG_INACTIVE) {
15637ed4
RG
800 queue_remove(&vm_page_queue_inactive, m, vm_page_t,
801 pageq);
802 vm_page_inactive_count--;
fd76afd7 803 m->flags &= ~PG_INACTIVE;
15637ed4
RG
804 }
805 if (m->wire_count == 0) {
fd76afd7 806 if (m->flags & PG_ACTIVE)
15637ed4
RG
807 panic("vm_page_activate: already active");
808
fd76afd7 809 m->flags |= PG_ACTIVE;
55768178
DG
810 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
811 queue_remove(&m->object->memq, m, vm_page_t, listq);
812 queue_enter(&m->object->memq, m, vm_page_t, listq);
15637ed4 813 vm_page_active_count++;
55768178 814
15637ed4 815 }
55768178
DG
816
817 vm_set_intr(spl);
15637ed4
RG
818}
819
820/*
821 * vm_page_zero_fill:
822 *
823 * Zero-fill the specified page.
824 * Written as a standard pagein routine, to
825 * be used by the zero-fill object.
826 */
827
55768178
DG
828boolean_t
829vm_page_zero_fill(m)
15637ed4
RG
830 vm_page_t m;
831{
832 VM_PAGE_CHECK(m);
833
834 pmap_zero_page(VM_PAGE_TO_PHYS(m));
835 return(TRUE);
836}
837
838/*
839 * vm_page_copy:
840 *
841 * Copy one page to another
842 */
55768178
DG
843void
844vm_page_copy(src_m, dest_m)
15637ed4
RG
845 vm_page_t src_m;
846 vm_page_t dest_m;
847{
848 VM_PAGE_CHECK(src_m);
849 VM_PAGE_CHECK(dest_m);
850
851 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
852}