Converted vm_page bit fields to flags to allow for some optimizations
[unix-history] / sys / vm / vm_fault.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_fault.c 7.6 (Berkeley) 5/7/91
fd76afd7 37 * $Id: vm_fault.c,v 1.9 1993/12/19 00:55:59 wollman Exp $
1284e777
RG
38 */
39
40/*
15637ed4
RG
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
15637ed4
RG
67/*
68 * Page fault handling module.
69 */
70
71#include "param.h"
fde1aeb2
GW
72#include "systm.h"
73#include "proc.h" /* XXX - just to get curproc */
15637ed4
RG
74
75#include "vm.h"
76#include "vm_page.h"
77#include "vm_pageout.h"
fde1aeb2 78#include "vm_user.h" /* make sure we match prototype */
15637ed4 79
bbc3f849
GW
80vm_statistics_data_t vm_stat;
81
15637ed4
RG
82/*
83 * vm_fault:
84 *
85 * Handle a page fault occuring at the given address,
86 * requiring the given permissions, in the map specified.
87 * If successful, the page is inserted into the
88 * associated physical map.
89 *
90 * NOTE: the given address should be truncated to the
91 * proper page address.
92 *
93 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
94 * a standard error specifying why the fault is fatal is returned.
95 *
96 *
97 * The map in question must be referenced, and remains so.
98 * Caller may hold no locks.
99 */
4c45483e 100int
15637ed4
RG
101vm_fault(map, vaddr, fault_type, change_wiring)
102 vm_map_t map;
103 vm_offset_t vaddr;
104 vm_prot_t fault_type;
105 boolean_t change_wiring;
106{
107 vm_object_t first_object;
108 vm_offset_t first_offset;
109 vm_map_entry_t entry;
110 register vm_object_t object;
111 register vm_offset_t offset;
112 register vm_page_t m;
113 vm_page_t first_m;
114 vm_prot_t prot;
115 int result;
116 boolean_t wired;
117 boolean_t su;
118 boolean_t lookup_still_valid;
119 boolean_t page_exists;
120 vm_page_t old_m;
121 vm_object_t next_object;
122
123 vm_stat.faults++; /* needs lock XXX */
124/*
125 * Recovery actions
126 */
127#define FREE_PAGE(m) { \
128 PAGE_WAKEUP(m); \
129 vm_page_lock_queues(); \
130 vm_page_free(m); \
131 vm_page_unlock_queues(); \
132}
133
134#define RELEASE_PAGE(m) { \
135 PAGE_WAKEUP(m); \
136 vm_page_lock_queues(); \
137 vm_page_activate(m); \
138 vm_page_unlock_queues(); \
139}
140
141#define UNLOCK_MAP { \
142 if (lookup_still_valid) { \
143 vm_map_lookup_done(map, entry); \
144 lookup_still_valid = FALSE; \
145 } \
146}
147
148#define UNLOCK_THINGS { \
149 object->paging_in_progress--; \
150 vm_object_unlock(object); \
151 if (object != first_object) { \
152 vm_object_lock(first_object); \
153 FREE_PAGE(first_m); \
154 first_object->paging_in_progress--; \
155 vm_object_unlock(first_object); \
156 } \
157 UNLOCK_MAP; \
158}
159
160#define UNLOCK_AND_DEALLOCATE { \
161 UNLOCK_THINGS; \
162 vm_object_deallocate(first_object); \
163}
164
165 RetryFault: ;
166
167 /*
168 * Find the backing store object and offset into
169 * it to begin the search.
170 */
171
172 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
173 &first_object, &first_offset,
174 &prot, &wired, &su)) != KERN_SUCCESS) {
175 return(result);
176 }
177 lookup_still_valid = TRUE;
178
179 if (wired)
180 fault_type = prot;
181
182 first_m = NULL;
183
184 /*
185 * Make a reference to this object to
186 * prevent its disposal while we are messing with
187 * it. Once we have the reference, the map is free
188 * to be diddled. Since objects reference their
189 * shadows (and copies), they will stay around as well.
190 */
191
192 vm_object_lock(first_object);
193
194 first_object->ref_count++;
195 first_object->paging_in_progress++;
196
197 /*
198 * INVARIANTS (through entire routine):
199 *
200 * 1) At all times, we must either have the object
201 * lock or a busy page in some object to prevent
202 * some other thread from trying to bring in
203 * the same page.
204 *
205 * Note that we cannot hold any locks during the
206 * pager access or when waiting for memory, so
207 * we use a busy page then.
208 *
209 * Note also that we aren't as concerned about
210 * more than one thead attempting to pager_data_unlock
211 * the same page at once, so we don't hold the page
212 * as busy then, but do record the highest unlock
213 * value so far. [Unlock requests may also be delivered
214 * out of order.]
215 *
216 * 2) Once we have a busy page, we must remove it from
217 * the pageout queues, so that the pageout daemon
218 * will not grab it away.
219 *
220 * 3) To prevent another thread from racing us down the
221 * shadow chain and entering a new page in the top
222 * object before we do, we must keep a busy page in
223 * the top object while following the shadow chain.
224 *
225 * 4) We must increment paging_in_progress on any object
226 * for which we have a busy page, to prevent
227 * vm_object_collapse from removing the busy page
228 * without our noticing.
229 */
230
231 /*
232 * Search for the page at object/offset.
233 */
234
235 object = first_object;
236 offset = first_offset;
237
238 /*
239 * See whether this page is resident
240 */
241
242 while (TRUE) {
243 m = vm_page_lookup(object, offset);
244 if (m != NULL) {
245 /*
246 * If the page is being brought in,
247 * wait for it and then retry.
248 */
fd76afd7 249 if (m->flags & PG_BUSY) {
15637ed4
RG
250#ifdef DOTHREADS
251 int wait_result;
252
253 PAGE_ASSERT_WAIT(m, !change_wiring);
254 UNLOCK_THINGS;
4c45483e 255 thread_block("pagein");
15637ed4
RG
256 wait_result = current_thread()->wait_result;
257 vm_object_deallocate(first_object);
258 if (wait_result != THREAD_AWAKENED)
259 return(KERN_SUCCESS);
260 goto RetryFault;
261#else
262 PAGE_ASSERT_WAIT(m, !change_wiring);
263 UNLOCK_THINGS;
4c45483e
GW
264 thread_wakeup((int)&vm_pages_needed);/* XXX! */
265 thread_block("pagein");
15637ed4
RG
266 vm_object_deallocate(first_object);
267 goto RetryFault;
268#endif
269 }
270
fd76afd7 271 if (m->flags & PG_ABSENT)
15637ed4
RG
272 panic("vm_fault: absent");
273
15637ed4
RG
274 /*
275 * Remove the page from the pageout daemon's
276 * reach while we play with it.
277 */
278
279 vm_page_lock_queues();
fd76afd7 280 if (m->flags & PG_INACTIVE) {
15637ed4
RG
281 queue_remove(&vm_page_queue_inactive, m,
282 vm_page_t, pageq);
fd76afd7 283 m->flags &= ~PG_INACTIVE;
15637ed4
RG
284 vm_page_inactive_count--;
285 vm_stat.reactivations++;
286 }
287
fd76afd7 288 if (m->flags & PG_ACTIVE) {
15637ed4
RG
289 queue_remove(&vm_page_queue_active, m,
290 vm_page_t, pageq);
fd76afd7 291 m->flags &= ~PG_ACTIVE;
15637ed4
RG
292 vm_page_active_count--;
293 }
294 vm_page_unlock_queues();
295
296 /*
297 * Mark page busy for other threads.
298 */
fd76afd7
DG
299 m->flags |= PG_BUSY;
300 m->flags &= ~PG_ABSENT;
15637ed4
RG
301 break;
302 }
303
304 if (((object->pager != NULL) &&
305 (!change_wiring || wired))
306 || (object == first_object)) {
307
308 /*
309 * Allocate a new page for this object/offset
310 * pair.
311 */
312
313 m = vm_page_alloc(object, offset);
314
315 if (m == NULL) {
316 UNLOCK_AND_DEALLOCATE;
317 VM_WAIT;
318 goto RetryFault;
319 }
320 }
321
322 if ((object->pager != NULL) &&
323 (!change_wiring || wired)) {
324 int rv;
325
326 /*
327 * Now that we have a busy page, we can
328 * release the object lock.
329 */
330 vm_object_unlock(object);
331
332 /*
333 * Call the pager to retrieve the data, if any,
334 * after releasing the lock on the map.
335 */
336 UNLOCK_MAP;
337
338 rv = vm_pager_get(object->pager, m, TRUE);
339 if (rv == VM_PAGER_OK) {
340 /*
341 * Found the page.
342 * Leave it busy while we play with it.
343 */
344 vm_object_lock(object);
345
346 /*
347 * Relookup in case pager changed page.
348 * Pager is responsible for disposition
349 * of old page if moved.
350 */
351 m = vm_page_lookup(object, offset);
352
353 vm_stat.pageins++;
fd76afd7 354 m->flags &= ~PG_FAKE;
15637ed4
RG
355 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
356 break;
357 }
358
359 /*
360 * Remove the bogus page (which does not
361 * exist at this object/offset); before
362 * doing so, we must get back our object
363 * lock to preserve our invariant.
364 *
365 * Also wake up any other thread that may want
366 * to bring in this page.
367 *
368 * If this is the top-level object, we must
369 * leave the busy page to prevent another
370 * thread from rushing past us, and inserting
371 * the page in that object at the same time
372 * that we are.
373 */
374
375 vm_object_lock(object);
376 /*
377 * Data outside the range of the pager; an error
378 */
379 if (rv == VM_PAGER_BAD) {
380 FREE_PAGE(m);
381 UNLOCK_AND_DEALLOCATE;
382 return(KERN_PROTECTION_FAILURE); /* XXX */
383 }
384 if (object != first_object) {
385 FREE_PAGE(m);
386 /*
387 * XXX - we cannot just fall out at this
388 * point, m has been freed and is invalid!
389 */
390 }
391 }
392
393 /*
394 * We get here if the object has no pager (or unwiring)
395 * or the pager doesn't have the page.
396 */
397 if (object == first_object)
398 first_m = m;
399
400 /*
401 * Move on to the next object. Lock the next
402 * object before unlocking the current one.
403 */
404
405 offset += object->shadow_offset;
406 next_object = object->shadow;
407 if (next_object == NULL) {
408 /*
409 * If there's no object left, fill the page
410 * in the top object with zeros.
411 */
412 if (object != first_object) {
413 object->paging_in_progress--;
414 vm_object_unlock(object);
415
416 object = first_object;
417 offset = first_offset;
418 m = first_m;
419 vm_object_lock(object);
420 }
421 first_m = NULL;
422
423 vm_page_zero_fill(m);
424 vm_stat.zero_fill_count++;
fd76afd7 425 m->flags &= ~(PG_FAKE | PG_ABSENT);
15637ed4
RG
426 break;
427 }
428 else {
429 vm_object_lock(next_object);
430 if (object != first_object)
431 object->paging_in_progress--;
432 vm_object_unlock(object);
433 object = next_object;
434 object->paging_in_progress++;
435 }
436 }
437
fd76afd7 438 if ((m->flags & (PG_ABSENT|PG_ACTIVE|PG_INACTIVE)) || !(m->flags & PG_BUSY))
15637ed4
RG
439 panic("vm_fault: absent or active or inactive or not busy after main loop");
440
441 /*
442 * PAGE HAS BEEN FOUND.
443 * [Loop invariant still holds -- the object lock
444 * is held.]
445 */
446
447 old_m = m; /* save page that would be copied */
448
449 /*
450 * If the page is being written, but isn't
451 * already owned by the top-level object,
452 * we have to copy it into a new page owned
453 * by the top-level object.
454 */
455
456 if (object != first_object) {
457 /*
458 * We only really need to copy if we
459 * want to write it.
460 */
461
462 if (fault_type & VM_PROT_WRITE) {
463
464 /*
465 * If we try to collapse first_object at this
466 * point, we may deadlock when we try to get
467 * the lock on an intermediate object (since we
468 * have the bottom object locked). We can't
469 * unlock the bottom object, because the page
470 * we found may move (by collapse) if we do.
471 *
472 * Instead, we first copy the page. Then, when
473 * we have no more use for the bottom object,
474 * we unlock it and try to collapse.
475 *
476 * Note that we copy the page even if we didn't
477 * need to... that's the breaks.
478 */
479
480 /*
481 * We already have an empty page in
482 * first_object - use it.
483 */
484
485 vm_page_copy(m, first_m);
fd76afd7 486 first_m->flags &= ~(PG_FAKE | PG_ABSENT);
15637ed4
RG
487
488 /*
489 * If another map is truly sharing this
490 * page with us, we have to flush all
491 * uses of the original page, since we
492 * can't distinguish those which want the
493 * original from those which need the
494 * new copy.
495 *
496 * XXX If we know that only one map has
497 * access to this page, then we could
498 * avoid the pmap_page_protect() call.
499 */
500
501 vm_page_lock_queues();
b74deadd 502 vm_page_activate(m);
1ecb0839 503 vm_page_deactivate(m);
15637ed4
RG
504 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
505 vm_page_unlock_queues();
506
507 /*
508 * We no longer need the old page or object.
509 */
510 PAGE_WAKEUP(m);
511 object->paging_in_progress--;
512 vm_object_unlock(object);
513
514 /*
515 * Only use the new page below...
516 */
517
518 vm_stat.cow_faults++;
519 m = first_m;
520 object = first_object;
521 offset = first_offset;
522
523 /*
524 * Now that we've gotten the copy out of the
525 * way, let's try to collapse the top object.
526 */
527 vm_object_lock(object);
528 /*
529 * But we have to play ugly games with
530 * paging_in_progress to do that...
531 */
532 object->paging_in_progress--;
533 vm_object_collapse(object);
534 object->paging_in_progress++;
535 }
536 else {
537 prot &= (~VM_PROT_WRITE);
fd76afd7 538 m->flags |= PG_COPY_ON_WRITE;
15637ed4
RG
539 }
540 }
541
fd76afd7 542 if (m->flags & (PG_ACTIVE|PG_INACTIVE))
15637ed4
RG
543 panic("vm_fault: active or inactive before copy object handling");
544
545 /*
546 * If the page is being written, but hasn't been
547 * copied to the copy-object, we have to copy it there.
548 */
549 RetryCopy:
550 if (first_object->copy != NULL) {
551 vm_object_t copy_object = first_object->copy;
552 vm_offset_t copy_offset;
553 vm_page_t copy_m;
554
555 /*
556 * We only need to copy if we want to write it.
557 */
558 if ((fault_type & VM_PROT_WRITE) == 0) {
559 prot &= ~VM_PROT_WRITE;
fd76afd7 560 m->flags |= PG_COPY_ON_WRITE;
15637ed4
RG
561 }
562 else {
563 /*
564 * Try to get the lock on the copy_object.
565 */
566 if (!vm_object_lock_try(copy_object)) {
567 vm_object_unlock(object);
568 /* should spin a bit here... */
569 vm_object_lock(object);
570 goto RetryCopy;
571 }
572
573 /*
574 * Make another reference to the copy-object,
575 * to keep it from disappearing during the
576 * copy.
577 */
578 copy_object->ref_count++;
579
580 /*
581 * Does the page exist in the copy?
582 */
583 copy_offset = first_offset
584 - copy_object->shadow_offset;
585 copy_m = vm_page_lookup(copy_object, copy_offset);
586 if (page_exists = (copy_m != NULL)) {
fd76afd7 587 if (copy_m->flags & PG_BUSY) {
15637ed4
RG
588#ifdef DOTHREADS
589 int wait_result;
590
591 /*
592 * If the page is being brought
593 * in, wait for it and then retry.
594 */
595 PAGE_ASSERT_WAIT(copy_m, !change_wiring);
596 RELEASE_PAGE(m);
597 copy_object->ref_count--;
598 vm_object_unlock(copy_object);
599 UNLOCK_THINGS;
4c45483e 600 thread_block("pagein");
15637ed4
RG
601 wait_result = current_thread()->wait_result;
602 vm_object_deallocate(first_object);
603 if (wait_result != THREAD_AWAKENED)
604 return(KERN_SUCCESS);
605 goto RetryFault;
606#else
607 /*
608 * If the page is being brought
609 * in, wait for it and then retry.
610 */
611 PAGE_ASSERT_WAIT(copy_m, !change_wiring);
612 RELEASE_PAGE(m);
613 copy_object->ref_count--;
614 vm_object_unlock(copy_object);
615 UNLOCK_THINGS;
4c45483e
GW
616 thread_wakeup((int)&vm_pages_needed);
617 /* XXX ^^^^^*/
618 thread_block("pagein");
15637ed4
RG
619 vm_object_deallocate(first_object);
620 goto RetryFault;
621#endif
622 }
623 }
624
625 /*
626 * If the page is not in memory (in the object)
627 * and the object has a pager, we have to check
628 * if the pager has the data in secondary
629 * storage.
630 */
631 if (!page_exists) {
632
633 /*
634 * If we don't allocate a (blank) page
635 * here... another thread could try
636 * to page it in, allocate a page, and
637 * then block on the busy page in its
638 * shadow (first_object). Then we'd
639 * trip over the busy page after we
640 * found that the copy_object's pager
641 * doesn't have the page...
642 */
643 copy_m = vm_page_alloc(copy_object,
644 copy_offset);
645 if (copy_m == NULL) {
646 /*
647 * Wait for a page, then retry.
648 */
649 RELEASE_PAGE(m);
650 copy_object->ref_count--;
651 vm_object_unlock(copy_object);
652 UNLOCK_AND_DEALLOCATE;
653 VM_WAIT;
654 goto RetryFault;
655 }
656
657 if (copy_object->pager != NULL) {
658 vm_object_unlock(object);
659 vm_object_unlock(copy_object);
660 UNLOCK_MAP;
661
662 page_exists = vm_pager_has_page(
663 copy_object->pager,
664 (copy_offset + copy_object->paging_offset));
665
666 vm_object_lock(copy_object);
667
668 /*
669 * Since the map is unlocked, someone
670 * else could have copied this object
671 * and put a different copy_object
672 * between the two. Or, the last
673 * reference to the copy-object (other
674 * than the one we have) may have
675 * disappeared - if that has happened,
676 * we don't need to make the copy.
677 */
678 if (copy_object->shadow != object ||
679 copy_object->ref_count == 1) {
680 /*
681 * Gaah... start over!
682 */
683 FREE_PAGE(copy_m);
684 vm_object_unlock(copy_object);
685 vm_object_deallocate(copy_object);
686 /* may block */
687 vm_object_lock(object);
688 goto RetryCopy;
689 }
690 vm_object_lock(object);
691
692 if (page_exists) {
693 /*
694 * We didn't need the page
695 */
696 FREE_PAGE(copy_m);
697 }
698 }
699 }
700 if (!page_exists) {
701 /*
702 * Must copy page into copy-object.
703 */
704 vm_page_copy(m, copy_m);
fd76afd7 705 copy_m->flags &= ~(PG_FAKE|PG_ABSENT);
15637ed4
RG
706
707 /*
708 * Things to remember:
709 * 1. The copied page must be marked 'dirty'
710 * so it will be paged out to the copy
711 * object.
712 * 2. If the old page was in use by any users
713 * of the copy-object, it must be removed
714 * from all pmaps. (We can't know which
715 * pmaps use it.)
716 */
717 vm_page_lock_queues();
718 pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
719 VM_PROT_NONE);
fd76afd7 720 copy_m->flags &= ~PG_CLEAN;
15637ed4
RG
721 vm_page_activate(copy_m); /* XXX */
722 vm_page_unlock_queues();
723
724 PAGE_WAKEUP(copy_m);
725 }
726 /*
727 * The reference count on copy_object must be
728 * at least 2: one for our extra reference,
729 * and at least one from the outside world
730 * (we checked that when we last locked
731 * copy_object).
732 */
733 copy_object->ref_count--;
734 vm_object_unlock(copy_object);
fd76afd7 735 m->flags &= ~PG_COPY_ON_WRITE;
15637ed4
RG
736 }
737 }
738
fd76afd7 739 if (m->flags & (PG_ACTIVE|PG_INACTIVE))
15637ed4
RG
740 panic("vm_fault: active or inactive before retrying lookup");
741
742 /*
743 * We must verify that the maps have not changed
744 * since our last lookup.
745 */
746
747 if (!lookup_still_valid) {
748 vm_object_t retry_object;
749 vm_offset_t retry_offset;
750 vm_prot_t retry_prot;
751
752 /*
753 * Since map entries may be pageable, make sure we can
754 * take a page fault on them.
755 */
756 vm_object_unlock(object);
757
758 /*
759 * To avoid trying to write_lock the map while another
760 * thread has it read_locked (in vm_map_pageable), we
761 * do not try for write permission. If the page is
762 * still writable, we will get write permission. If it
763 * is not, or has been marked needs_copy, we enter the
764 * mapping without write permission, and will merely
765 * take another fault.
766 */
767 result = vm_map_lookup(&map, vaddr,
768 fault_type & ~VM_PROT_WRITE, &entry,
769 &retry_object, &retry_offset, &retry_prot,
770 &wired, &su);
771
772 vm_object_lock(object);
773
774 /*
775 * If we don't need the page any longer, put it on the
776 * active list (the easiest thing to do here). If no
777 * one needs it, pageout will grab it eventually.
778 */
779
780 if (result != KERN_SUCCESS) {
781 RELEASE_PAGE(m);
782 UNLOCK_AND_DEALLOCATE;
783 return(result);
784 }
785
786 lookup_still_valid = TRUE;
787
788 if ((retry_object != first_object) ||
789 (retry_offset != first_offset)) {
790 RELEASE_PAGE(m);
791 UNLOCK_AND_DEALLOCATE;
792 goto RetryFault;
793 }
794
795 /*
796 * Check whether the protection has changed or the object
797 * has been copied while we left the map unlocked.
798 * Changing from read to write permission is OK - we leave
799 * the page write-protected, and catch the write fault.
800 * Changing from write to read permission means that we
801 * can't mark the page write-enabled after all.
802 */
803 prot &= retry_prot;
fd76afd7 804 if (m->flags & PG_COPY_ON_WRITE)
15637ed4
RG
805 prot &= ~VM_PROT_WRITE;
806 }
807
808 /*
809 * (the various bits we're fiddling with here are locked by
810 * the object's lock)
811 */
812
813 /* XXX This distorts the meaning of the copy_on_write bit */
814
815 if (prot & VM_PROT_WRITE)
fd76afd7 816 m->flags &= ~PG_COPY_ON_WRITE;
15637ed4
RG
817
818 /*
819 * It's critically important that a wired-down page be faulted
820 * only once in each map for which it is wired.
821 */
822
fd76afd7 823 if (m->flags & (PG_ACTIVE|PG_INACTIVE))
15637ed4
RG
824 panic("vm_fault: active or inactive before pmap_enter");
825
826 vm_object_unlock(object);
827
828 /*
829 * Put this page into the physical map.
830 * We had to do the unlock above because pmap_enter
831 * may cause other faults. We don't put the
832 * page back on the active queue until later so
833 * that the page-out daemon won't find us (yet).
834 */
317350b1 835 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
15637ed4
RG
836
837 /*
838 * If the page is not wired down, then put it where the
839 * pageout daemon can find it.
840 */
841 vm_object_lock(object);
842 vm_page_lock_queues();
843 if (change_wiring) {
844 if (wired)
845 vm_page_wire(m);
846 else
847 vm_page_unwire(m);
848 }
849 else
850 vm_page_activate(m);
851 vm_page_unlock_queues();
852
853 /*
854 * Unlock everything, and return
855 */
856
857 PAGE_WAKEUP(m);
858 UNLOCK_AND_DEALLOCATE;
859
860 return(KERN_SUCCESS);
861
862}
863
864/*
865 * vm_fault_wire:
866 *
867 * Wire down a range of virtual addresses in a map.
868 */
869void vm_fault_wire(map, start, end)
870 vm_map_t map;
871 vm_offset_t start, end;
872{
873
874 register vm_offset_t va;
875 register pmap_t pmap;
876
877 pmap = vm_map_pmap(map);
878
879 /*
880 * Inform the physical mapping system that the
881 * range of addresses may not fault, so that
882 * page tables and such can be locked down as well.
883 */
884
885 pmap_pageable(pmap, start, end, FALSE);
886
887 /*
888 * We simulate a fault to get the page and enter it
889 * in the physical map.
890 */
891
892 for (va = start; va < end; va += PAGE_SIZE) {
893 (void) vm_fault(map, va, VM_PROT_NONE, TRUE);
894 }
895}
896
897
898/*
899 * vm_fault_unwire:
900 *
901 * Unwire a range of virtual addresses in a map.
902 */
903void vm_fault_unwire(map, start, end)
904 vm_map_t map;
905 vm_offset_t start, end;
906{
907
908 register vm_offset_t va, pa;
909 register pmap_t pmap;
910
911 pmap = vm_map_pmap(map);
912
913 /*
914 * Since the pages are wired down, we must be able to
915 * get their mappings from the physical map system.
916 */
917
918 vm_page_lock_queues();
919
920 for (va = start; va < end; va += PAGE_SIZE) {
921 pa = pmap_extract(pmap, va);
922 if (pa == (vm_offset_t) 0) {
923 panic("unwire: page not in pmap");
924 }
925 pmap_change_wiring(pmap, va, FALSE);
926 vm_page_unwire(PHYS_TO_VM_PAGE(pa));
927 }
928 vm_page_unlock_queues();
929
930 /*
931 * Inform the physical mapping system that the range
932 * of addresses may fault, so that page tables and
933 * such may be unwired themselves.
934 */
935
936 pmap_pageable(pmap, start, end, TRUE);
937
938}
939
940/*
941 * Routine:
942 * vm_fault_copy_entry
943 * Function:
944 * Copy all of the pages from a wired-down map entry to another.
945 *
946 * In/out conditions:
947 * The source and destination maps must be locked for write.
948 * The source map entry must be wired down (or be a sharing map
949 * entry corresponding to a main map entry that is wired down).
950 */
951
952void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
953 vm_map_t dst_map;
954 vm_map_t src_map;
955 vm_map_entry_t dst_entry;
956 vm_map_entry_t src_entry;
957{
958
959 vm_object_t dst_object;
960 vm_object_t src_object;
961 vm_offset_t dst_offset;
962 vm_offset_t src_offset;
963 vm_prot_t prot;
964 vm_offset_t vaddr;
965 vm_page_t dst_m;
966 vm_page_t src_m;
967
968#ifdef lint
969 src_map++;
970#endif lint
971
972 src_object = src_entry->object.vm_object;
973 src_offset = src_entry->offset;
974
975 /*
976 * Create the top-level object for the destination entry.
977 * (Doesn't actually shadow anything - we copy the pages
978 * directly.)
979 */
980 dst_object = vm_object_allocate(
981 (vm_size_t) (dst_entry->end - dst_entry->start));
982
983 dst_entry->object.vm_object = dst_object;
984 dst_entry->offset = 0;
985
986 prot = dst_entry->max_protection;
987
988 /*
989 * Loop through all of the pages in the entry's range, copying
990 * each one from the source object (it should be there) to the
991 * destination object.
992 */
993 for (vaddr = dst_entry->start, dst_offset = 0;
994 vaddr < dst_entry->end;
995 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
996
997 /*
998 * Allocate a page in the destination object
999 */
1000 vm_object_lock(dst_object);
1001 do {
1002 dst_m = vm_page_alloc(dst_object, dst_offset);
1003 if (dst_m == NULL) {
1004 vm_object_unlock(dst_object);
1005 VM_WAIT;
1006 vm_object_lock(dst_object);
1007 }
1008 } while (dst_m == NULL);
1009
1010 /*
1011 * Find the page in the source object, and copy it in.
1012 * (Because the source is wired down, the page will be
1013 * in memory.)
1014 */
1015 vm_object_lock(src_object);
1016 src_m = vm_page_lookup(src_object, dst_offset + src_offset);
1017 if (src_m == NULL)
1018 panic("vm_fault_copy_wired: page missing");
1019
1020 vm_page_copy(src_m, dst_m);
1021
1022 /*
1023 * Enter it in the pmap...
1024 */
1025 vm_object_unlock(src_object);
1026 vm_object_unlock(dst_object);
1027
1028 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
1029 prot, FALSE);
1030
1031 /*
1032 * Mark it no longer busy, and put it on the active list.
1033 */
1034 vm_object_lock(dst_object);
1035 vm_page_lock_queues();
1036 vm_page_activate(dst_m);
1037 vm_page_unlock_queues();
1038 PAGE_WAKEUP(dst_m);
1039 vm_object_unlock(dst_object);
1040 }
1041
1042}