Fixed broken pte bit definitions. I fixed this long ago in pte.h, but
[unix-history] / sys / vm / vm_page.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
aa27e2cd 37 * $Id: vm_page.c,v 1.16 1994/04/14 07:50:22 davidg Exp $
1284e777
RG
38 */
39
40/*
15637ed4
RG
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
15637ed4
RG
65 */
66
67/*
68 * Resident memory management module.
69 */
70
71#include "param.h"
fde1aeb2 72#include "systm.h"
15637ed4
RG
73
74#include "vm.h"
75#include "vm_map.h"
76#include "vm_page.h"
77#include "vm_pageout.h"
55768178 78#include "proc.h"
15637ed4
RG
79
80/*
81 * Associated with page of user-allocatable memory is a
82 * page structure.
83 */
84
85queue_head_t *vm_page_buckets; /* Array of buckets */
86int vm_page_bucket_count = 0; /* How big is array? */
87int vm_page_hash_mask; /* Mask for hash function */
88simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
89
15637ed4
RG
90queue_head_t vm_page_queue_free;
91queue_head_t vm_page_queue_active;
92queue_head_t vm_page_queue_inactive;
93simple_lock_data_t vm_page_queue_lock;
94simple_lock_data_t vm_page_queue_free_lock;
95
96vm_page_t vm_page_array;
97long first_page;
98long last_page;
99vm_offset_t first_phys_addr;
100vm_offset_t last_phys_addr;
101
102int vm_page_free_count;
103int vm_page_active_count;
104int vm_page_inactive_count;
105int vm_page_wire_count;
106int vm_page_laundry_count;
55768178
DG
107int vm_page_count;
108extern int vm_pageout_pages_needed;
15637ed4
RG
109
110int vm_page_free_target = 0;
111int vm_page_free_min = 0;
112int vm_page_inactive_target = 0;
113int vm_page_free_reserved = 0;
114
a2701577
DG
115vm_size_t page_size = PAGE_SIZE;
116
15637ed4
RG
117/*
118 * vm_page_startup:
119 *
120 * Initializes the resident memory module.
121 *
122 * Allocates memory for the page cells, and
123 * for the object/offset-to-page hash table headers.
124 * Each page cell is initialized and placed on the free list.
125 */
55768178
DG
126
127vm_offset_t
128vm_page_startup(starta, enda, vaddr)
129 register vm_offset_t starta;
130 vm_offset_t enda;
15637ed4
RG
131 register vm_offset_t vaddr;
132{
133 register vm_offset_t mapped;
134 register vm_page_t m;
135 register queue_t bucket;
55768178 136 vm_size_t npages, page_range;
15637ed4
RG
137 register vm_offset_t new_start;
138 int i;
139 vm_offset_t pa;
55768178
DG
140 int nblocks;
141 vm_offset_t first_managed_page;
142 int size;
15637ed4
RG
143
144 extern vm_offset_t kentry_data;
145 extern vm_size_t kentry_data_size;
55768178
DG
146 extern vm_offset_t phys_avail[];
147/* the biggest memory array is the second group of pages */
148 vm_offset_t start;
149 vm_offset_t biggestone, biggestsize;
150
151 vm_offset_t total;
152
153 total = 0;
154 biggestsize = 0;
155 biggestone = 0;
156 nblocks = 0;
157 vaddr = round_page(vaddr);
158
159 for (i = 0; phys_avail[i + 1]; i += 2) {
160 phys_avail[i] = round_page(phys_avail[i]);
161 phys_avail[i+1] = trunc_page(phys_avail[i+1]);
162 }
163
164 for (i = 0; phys_avail[i + 1]; i += 2) {
165 int size = phys_avail[i+1] - phys_avail[i];
166 if (size > biggestsize) {
167 biggestone = i;
168 biggestsize = size;
169 }
170 ++nblocks;
171 total += size;
172 }
173
174 start = phys_avail[biggestone];
15637ed4
RG
175
176
177 /*
178 * Initialize the locks
179 */
180
181 simple_lock_init(&vm_page_queue_free_lock);
182 simple_lock_init(&vm_page_queue_lock);
183
184 /*
185 * Initialize the queue headers for the free queue,
186 * the active queue and the inactive queue.
187 */
188
189 queue_init(&vm_page_queue_free);
190 queue_init(&vm_page_queue_active);
191 queue_init(&vm_page_queue_inactive);
192
193 /*
194 * Allocate (and initialize) the hash table buckets.
195 *
196 * The number of buckets MUST BE a power of 2, and
197 * the actual value is the next power of 2 greater
198 * than the number of physical pages in the system.
199 *
200 * Note:
201 * This computation can be tweaked if desired.
202 */
15637ed4
RG
203 vm_page_buckets = (queue_t) vaddr;
204 bucket = vm_page_buckets;
205 if (vm_page_bucket_count == 0) {
206 vm_page_bucket_count = 1;
55768178 207 while (vm_page_bucket_count < atop(total))
15637ed4
RG
208 vm_page_bucket_count <<= 1;
209 }
210
55768178 211
15637ed4
RG
212 vm_page_hash_mask = vm_page_bucket_count - 1;
213
214 /*
215 * Validate these addresses.
216 */
217
55768178
DG
218 new_start = start + vm_page_bucket_count * sizeof(struct queue_entry);
219 new_start = round_page(new_start);
15637ed4
RG
220 mapped = vaddr;
221 vaddr = pmap_map(mapped, start, new_start,
222 VM_PROT_READ|VM_PROT_WRITE);
223 start = new_start;
224 bzero((caddr_t) mapped, vaddr - mapped);
225 mapped = vaddr;
226
55768178 227 for (i = 0; i< vm_page_bucket_count; i++) {
15637ed4
RG
228 queue_init(bucket);
229 bucket++;
230 }
231
232 simple_lock_init(&bucket_lock);
233
234 /*
235 * round (or truncate) the addresses to our page size.
236 */
237
15637ed4
RG
238 /*
239 * Pre-allocate maps and map entries that cannot be dynamically
240 * allocated via malloc(). The maps include the kernel_map and
241 * kmem_map which must be initialized before malloc() will
242 * work (obviously). Also could include pager maps which would
243 * be allocated before kmeminit.
244 *
245 * Allow some kernel map entries... this should be plenty
246 * since people shouldn't be cluttering up the kernel
247 * map (they should use their own maps).
248 */
249
250 kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
251 MAX_KMAPENT * sizeof(struct vm_map_entry);
252 kentry_data_size = round_page(kentry_data_size);
253 kentry_data = (vm_offset_t) vaddr;
254 vaddr += kentry_data_size;
255
256 /*
257 * Validate these zone addresses.
258 */
259
260 new_start = start + (vaddr - mapped);
261 pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
262 bzero((caddr_t) mapped, (vaddr - mapped));
55768178 263 start = round_page(new_start);
15637ed4
RG
264
265 /*
266 * Compute the number of pages of memory that will be
267 * available for use (taking into account the overhead
268 * of a page structure per page).
269 */
270
55768178
DG
271 npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
272 first_page = phys_avail[0] / PAGE_SIZE;
15637ed4 273
55768178 274 page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
15637ed4
RG
275 /*
276 * Initialize the mem entry structures now, and
277 * put them in the free queue.
278 */
279
55768178
DG
280 vm_page_array = (vm_page_t) vaddr;
281 mapped = vaddr;
15637ed4 282
15637ed4
RG
283
284 /*
285 * Validate these addresses.
286 */
287
55768178 288 new_start = round_page(start + page_range * sizeof (struct vm_page));
15637ed4
RG
289 mapped = pmap_map(mapped, start, new_start,
290 VM_PROT_READ|VM_PROT_WRITE);
291 start = new_start;
292
55768178
DG
293 first_managed_page = start / PAGE_SIZE;
294
15637ed4
RG
295 /*
296 * Clear all of the page structures
297 */
55768178
DG
298 bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
299
300 vm_page_count = 0;
301 vm_page_free_count = 0;
302 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
303 if (i == biggestone)
304 pa = ptoa(first_managed_page);
305 else
306 pa = phys_avail[i];
307 while (pa < phys_avail[i + 1] && npages-- > 0) {
308 ++vm_page_count;
309 ++vm_page_free_count;
310 m = PHYS_TO_VM_PAGE(pa);
311 m->flags = 0;
312 m->object = 0;
313 m->phys_addr = pa;
41aefbec 314 m->hold_count = 0;
55768178
DG
315 queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
316 pa += PAGE_SIZE;
317 }
15637ed4
RG
318 }
319
320 /*
321 * Initialize vm_pages_needed lock here - don't wait for pageout
322 * daemon XXX
323 */
324 simple_lock_init(&vm_pages_needed_lock);
325
326 return(mapped);
327}
328
329/*
330 * vm_page_hash:
331 *
332 * Distributes the object/offset key pair among hash buckets.
333 *
334 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
335 */
55768178
DG
336inline const int
337vm_page_hash(object, offset)
338 vm_object_t object;
339 vm_offset_t offset;
340{
341 return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
342}
15637ed4
RG
343
344/*
345 * vm_page_insert: [ internal use only ]
346 *
347 * Inserts the given mem entry into the object/object-page
348 * table and object list.
349 *
350 * The object and page must be locked.
55768178 351 * interrupts must be disable in this routine!!!
15637ed4
RG
352 */
353
55768178
DG
354void
355vm_page_insert(mem, object, offset)
15637ed4
RG
356 register vm_page_t mem;
357 register vm_object_t object;
358 register vm_offset_t offset;
359{
360 register queue_t bucket;
361 int spl;
362
363 VM_PAGE_CHECK(mem);
364
fd76afd7 365 if (mem->flags & PG_TABLED)
15637ed4
RG
366 panic("vm_page_insert: already inserted");
367
368 /*
369 * Record the object/offset pair in this page
370 */
371
372 mem->object = object;
373 mem->offset = offset;
374
375 /*
376 * Insert it into the object_object/offset hash table
377 */
378
379 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
15637ed4
RG
380 simple_lock(&bucket_lock);
381 queue_enter(bucket, mem, vm_page_t, hashq);
382 simple_unlock(&bucket_lock);
15637ed4
RG
383
384 /*
385 * Now link into the object's list of backed pages.
386 */
387
388 queue_enter(&object->memq, mem, vm_page_t, listq);
fd76afd7 389 mem->flags |= PG_TABLED;
15637ed4
RG
390
391 /*
392 * And show that the object has one more resident
393 * page.
394 */
395
396 object->resident_page_count++;
397}
398
399/*
400 * vm_page_remove: [ internal use only ]
401 *
402 * Removes the given mem entry from the object/offset-page
403 * table and the object page list.
404 *
405 * The object and page must be locked.
55768178
DG
406 *
407 * interrupts must be disable in this routine!!!
15637ed4
RG
408 */
409
55768178
DG
410void
411vm_page_remove(mem)
15637ed4
RG
412 register vm_page_t mem;
413{
414 register queue_t bucket;
415 int spl;
416
417 VM_PAGE_CHECK(mem);
418
55768178
DG
419 if (!(mem->flags & PG_TABLED)) {
420 printf("page not tabled?????\n");
15637ed4 421 return;
55768178 422 }
15637ed4
RG
423
424 /*
425 * Remove from the object_object/offset hash table
426 */
427
428 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
15637ed4
RG
429 simple_lock(&bucket_lock);
430 queue_remove(bucket, mem, vm_page_t, hashq);
431 simple_unlock(&bucket_lock);
15637ed4
RG
432
433 /*
434 * Now remove from the object's list of backed pages.
435 */
436
437 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
438
439 /*
440 * And show that the object has one fewer resident
441 * page.
442 */
443
444 mem->object->resident_page_count--;
55768178 445 mem->object = 0;
15637ed4 446
fd76afd7 447 mem->flags &= ~PG_TABLED;
15637ed4
RG
448}
449
450/*
451 * vm_page_lookup:
452 *
453 * Returns the page associated with the object/offset
454 * pair specified; if none is found, NULL is returned.
455 *
456 * The object must be locked. No side effects.
457 */
458
55768178
DG
459vm_page_t
460vm_page_lookup(object, offset)
15637ed4
RG
461 register vm_object_t object;
462 register vm_offset_t offset;
463{
464 register vm_page_t mem;
465 register queue_t bucket;
466 int spl;
467
468 /*
469 * Search the hash table for this object/offset pair
470 */
471
472 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
4014f930 473 spl = splimp();
15637ed4 474
15637ed4
RG
475 simple_lock(&bucket_lock);
476 mem = (vm_page_t) queue_first(bucket);
477 while (!queue_end(bucket, (queue_entry_t) mem)) {
478 VM_PAGE_CHECK(mem);
479 if ((mem->object == object) && (mem->offset == offset)) {
480 simple_unlock(&bucket_lock);
4014f930 481 splx(spl);
15637ed4
RG
482 return(mem);
483 }
484 mem = (vm_page_t) queue_next(&mem->hashq);
485 }
486
487 simple_unlock(&bucket_lock);
4014f930 488 splx(spl);
15637ed4
RG
489 return(NULL);
490}
491
492/*
493 * vm_page_rename:
494 *
495 * Move the given memory entry from its
496 * current object to the specified target object/offset.
497 *
498 * The object must be locked.
499 */
55768178
DG
500void
501vm_page_rename(mem, new_object, new_offset)
15637ed4
RG
502 register vm_page_t mem;
503 register vm_object_t new_object;
504 vm_offset_t new_offset;
505{
55768178 506 int spl;
15637ed4
RG
507 if (mem->object == new_object)
508 return;
509
510 vm_page_lock_queues(); /* keep page from moving out from
511 under pageout daemon */
4014f930 512 spl = splimp();
15637ed4
RG
513 vm_page_remove(mem);
514 vm_page_insert(mem, new_object, new_offset);
4014f930 515 splx(spl);
15637ed4
RG
516 vm_page_unlock_queues();
517}
518
15637ed4
RG
519/*
520 * vm_page_alloc:
521 *
522 * Allocate and return a memory cell associated
523 * with this VM object/offset pair.
524 *
525 * Object must be locked.
526 */
55768178
DG
527vm_page_t
528vm_page_alloc(object, offset)
15637ed4
RG
529 vm_object_t object;
530 vm_offset_t offset;
531{
532 register vm_page_t mem;
533 int spl;
534
4014f930 535 spl = splimp();
15637ed4
RG
536 simple_lock(&vm_page_queue_free_lock);
537 if ( object != kernel_object &&
538 object != kmem_object &&
55768178
DG
539 curproc != pageproc && curproc != &proc0 &&
540 vm_page_free_count < vm_page_free_reserved) {
15637ed4
RG
541
542 simple_unlock(&vm_page_queue_free_lock);
4014f930 543 splx(spl);
55768178
DG
544 /*
545 * this wakeup seems unnecessary, but there is code that
546 * might just check to see if there are free pages, and
547 * punt if there aren't. VM_WAIT does this too, but
548 * redundant wakeups aren't that bad...
549 */
550 if (curproc != pageproc)
551 wakeup((caddr_t) &vm_pages_needed);
15637ed4
RG
552 return(NULL);
553 }
554 if (queue_empty(&vm_page_queue_free)) {
555 simple_unlock(&vm_page_queue_free_lock);
4014f930 556 splx(spl);
55768178
DG
557 /*
558 * comment above re: wakeups applies here too...
559 */
a200ca2b
DG
560 if (curproc != pageproc)
561 wakeup((caddr_t) &vm_pages_needed);
15637ed4
RG
562 return(NULL);
563 }
564
565 queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
566
567 vm_page_free_count--;
568 simple_unlock(&vm_page_queue_free_lock);
15637ed4 569
fd76afd7
DG
570 mem->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
571 vm_page_insert(mem, object, offset);
572 mem->wire_count = 0;
6fb6e221 573 mem->hold_count = 0;
4014f930
DG
574 mem->act_count = 0;
575 splx(spl);
15637ed4 576
55768178
DG
577/*
578 * don't wakeup too often, so we wakeup the pageout daemon when
579 * we would be nearly out of memory.
580 */
581 if (curproc != pageproc &&
582 (vm_page_free_count < vm_page_free_reserved))
583 wakeup((caddr_t) &vm_pages_needed);
15637ed4 584
15637ed4
RG
585 return(mem);
586}
587
588/*
589 * vm_page_free:
590 *
591 * Returns the given page to the free list,
592 * disassociating it with any VM object.
593 *
594 * Object and page must be locked prior to entry.
595 */
55768178
DG
596void
597vm_page_free(mem)
15637ed4
RG
598 register vm_page_t mem;
599{
55768178
DG
600 int spl;
601
4014f930 602 spl = splimp();
55768178 603
15637ed4 604 vm_page_remove(mem);
fd76afd7 605 if (mem->flags & PG_ACTIVE) {
15637ed4 606 queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
fd76afd7 607 mem->flags &= ~PG_ACTIVE;
15637ed4
RG
608 vm_page_active_count--;
609 }
610
fd76afd7 611 if (mem->flags & PG_INACTIVE) {
15637ed4 612 queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
fd76afd7 613 mem->flags &= ~PG_INACTIVE;
15637ed4
RG
614 vm_page_inactive_count--;
615 }
616
15637ed4 617
55768178 618 if (!(mem->flags & PG_FICTITIOUS)) {
15637ed4 619 simple_lock(&vm_page_queue_free_lock);
55768178
DG
620 if (mem->wire_count) {
621 vm_page_wire_count--;
622 mem->wire_count = 0;
623 }
15637ed4
RG
624 queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
625
626 vm_page_free_count++;
627 simple_unlock(&vm_page_queue_free_lock);
4014f930 628 splx(spl);
55768178
DG
629
630 /*
631 * if pageout daemon needs pages, then tell it that there
632 * are some free.
633 */
634 if (vm_pageout_pages_needed)
635 wakeup((caddr_t)&vm_pageout_pages_needed);
636
637 /*
638 * wakeup processes that are waiting on memory if we
639 * hit a high water mark.
640 */
641 if (vm_page_free_count == vm_page_free_min) {
642 wakeup((caddr_t)&vm_page_free_count);
643 }
644
645 /*
646 * wakeup scheduler process if we have lots of memory.
647 * this process will swapin processes.
648 */
649 if (vm_page_free_count == vm_page_free_target) {
650 wakeup((caddr_t)&proc0);
651 }
652
653 } else {
4014f930 654 splx(spl);
15637ed4 655 }
55768178 656 wakeup((caddr_t) mem);
15637ed4
RG
657}
658
659/*
660 * vm_page_wire:
661 *
662 * Mark this page as wired down by yet
663 * another map, removing it from paging queues
664 * as necessary.
665 *
666 * The page queues must be locked.
667 */
55768178
DG
668void
669vm_page_wire(mem)
15637ed4
RG
670 register vm_page_t mem;
671{
55768178 672 int spl;
15637ed4 673 VM_PAGE_CHECK(mem);
4014f930 674 spl = splimp();
15637ed4
RG
675
676 if (mem->wire_count == 0) {
fd76afd7 677 if (mem->flags & PG_ACTIVE) {
15637ed4
RG
678 queue_remove(&vm_page_queue_active, mem, vm_page_t,
679 pageq);
680 vm_page_active_count--;
fd76afd7 681 mem->flags &= ~PG_ACTIVE;
15637ed4 682 }
fd76afd7 683 if (mem->flags & PG_INACTIVE) {
15637ed4
RG
684 queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
685 pageq);
686 vm_page_inactive_count--;
fd76afd7 687 mem->flags &= ~PG_INACTIVE;
15637ed4
RG
688 }
689 vm_page_wire_count++;
690 }
691 mem->wire_count++;
4014f930 692 splx(spl);
15637ed4
RG
693}
694
695/*
696 * vm_page_unwire:
697 *
698 * Release one wiring of this page, potentially
699 * enabling it to be paged again.
700 *
701 * The page queues must be locked.
702 */
55768178
DG
703void
704vm_page_unwire(mem)
15637ed4
RG
705 register vm_page_t mem;
706{
55768178 707 int spl;
15637ed4
RG
708 VM_PAGE_CHECK(mem);
709
4014f930 710 spl = splimp();
55768178
DG
711 if (mem->wire_count != 0)
712 mem->wire_count--;
15637ed4
RG
713 if (mem->wire_count == 0) {
714 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
715 vm_page_active_count++;
fd76afd7 716 mem->flags |= PG_ACTIVE;
15637ed4 717 vm_page_wire_count--;
55768178 718 }
4014f930 719 splx(spl);
15637ed4
RG
720}
721
722/*
723 * vm_page_deactivate:
724 *
725 * Returns the given page to the inactive list,
726 * indicating that no physical maps have access
727 * to this page. [Used by the physical mapping system.]
728 *
729 * The page queues must be locked.
730 */
55768178
DG
731void
732vm_page_deactivate(m)
15637ed4
RG
733 register vm_page_t m;
734{
55768178 735 int spl;
15637ed4
RG
736 VM_PAGE_CHECK(m);
737
738 /*
739 * Only move active pages -- ignore locked or already
740 * inactive ones.
741 *
742 * XXX: sometimes we get pages which aren't wired down
743 * or on any queue - we need to put them on the inactive
744 * queue also, otherwise we lose track of them.
745 * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
746 */
747
aa27e2cd 748 spl = splimp();
41aefbec 749 if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 && m->hold_count == 0) {
15637ed4 750 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
fd76afd7 751 if (m->flags & PG_ACTIVE) {
15637ed4 752 queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
fd76afd7 753 m->flags &= ~PG_ACTIVE;
15637ed4
RG
754 vm_page_active_count--;
755 }
756 queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
fd76afd7 757 m->flags |= PG_INACTIVE;
15637ed4 758 vm_page_inactive_count++;
aa27e2cd
DG
759#define NOT_DEACTIVATE_PROTECTS
760#ifndef NOT_DEACTIVATE_PROTECTS
a200ca2b 761 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
aa27e2cd
DG
762#else
763 if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
764 m->flags &= ~PG_CLEAN;
765#endif
55768178 766 if ((m->flags & PG_CLEAN) == 0)
fd76afd7 767 m->flags |= PG_LAUNDRY;
55768178 768 }
92c70781 769 splx(spl);
55768178
DG
770}
771
772/*
773 * vm_page_makefault
774 *
775 * Cause next access of this page to fault
776 */
777void
778vm_page_makefault(m)
779 vm_page_t m;
780{
a200ca2b 781 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
55768178
DG
782 if ((m->flags & PG_CLEAN) == 0)
783 m->flags |= PG_LAUNDRY;
15637ed4
RG
784}
785
786/*
787 * vm_page_activate:
788 *
789 * Put the specified page on the active list (if appropriate).
790 *
791 * The page queues must be locked.
792 */
55768178
DG
793void
794vm_page_activate(m)
15637ed4
RG
795 register vm_page_t m;
796{
4014f930
DG
797 int spl, target, shortage, maxscan;
798 vm_page_t actm, next;
799
15637ed4
RG
800 VM_PAGE_CHECK(m);
801
4014f930 802 spl = splimp();
55768178 803
aa27e2cd
DG
804 if (m->wire_count) {
805 splx(spl);
22c84afa 806 return;
aa27e2cd 807 }
4014f930
DG
808
809 if ((m->flags & (PG_INACTIVE|PG_ACTIVE)) ==
810 (PG_INACTIVE|PG_ACTIVE)) {
41aefbec
DG
811 panic("vm_page_activate: on both queues?");
812 }
22c84afa 813
fd76afd7 814 if (m->flags & PG_INACTIVE) {
4014f930 815 queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
15637ed4 816 vm_page_inactive_count--;
fd76afd7 817 m->flags &= ~PG_INACTIVE;
4014f930 818 vm_stat.reactivations++;
15637ed4 819 }
55768178 820
4014f930
DG
821 if (m->flags & PG_ACTIVE)
822 panic("vm_page_activate: already active");
823
824 m->flags |= PG_ACTIVE;
825 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
826 queue_remove(&m->object->memq, m, vm_page_t, listq);
827 queue_enter(&m->object->memq, m, vm_page_t, listq);
828 vm_page_active_count++;
aa27e2cd 829 /* m->act_count = 10; */
4014f930 830 m->act_count = 10;
55768178 831
4014f930 832 splx(spl);
15637ed4
RG
833}
834
835/*
836 * vm_page_zero_fill:
837 *
838 * Zero-fill the specified page.
839 * Written as a standard pagein routine, to
840 * be used by the zero-fill object.
841 */
842
55768178
DG
843boolean_t
844vm_page_zero_fill(m)
15637ed4
RG
845 vm_page_t m;
846{
847 VM_PAGE_CHECK(m);
848
849 pmap_zero_page(VM_PAGE_TO_PHYS(m));
850 return(TRUE);
851}
852
853/*
854 * vm_page_copy:
855 *
856 * Copy one page to another
857 */
55768178
DG
858void
859vm_page_copy(src_m, dest_m)
15637ed4
RG
860 vm_page_t src_m;
861 vm_page_t dest_m;
862{
863 VM_PAGE_CHECK(src_m);
864 VM_PAGE_CHECK(dest_m);
865
866 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
867}