This commit was generated by cvs2svn to track changes on a CVS vendor
[unix-history] / sys / vm / vm_fault.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_fault.c 7.6 (Berkeley) 5/7/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
e7a091e4 65static char rcsid[] = "$Header: /home/cvs/386BSD/src/sys.386bsd/vm/vm_fault.c,v 1.1.1.1 93/06/12 14:57:40 rgrimes Exp $";
15637ed4
RG
66
67/*
68 * Page fault handling module.
69 */
70
71#include "param.h"
72
73#include "vm.h"
74#include "vm_page.h"
75#include "vm_pageout.h"
76
77/*
78 * vm_fault:
79 *
80 * Handle a page fault occuring at the given address,
81 * requiring the given permissions, in the map specified.
82 * If successful, the page is inserted into the
83 * associated physical map.
84 *
85 * NOTE: the given address should be truncated to the
86 * proper page address.
87 *
88 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
89 * a standard error specifying why the fault is fatal is returned.
90 *
91 *
92 * The map in question must be referenced, and remains so.
93 * Caller may hold no locks.
94 */
95vm_fault(map, vaddr, fault_type, change_wiring)
96 vm_map_t map;
97 vm_offset_t vaddr;
98 vm_prot_t fault_type;
99 boolean_t change_wiring;
100{
101 vm_object_t first_object;
102 vm_offset_t first_offset;
103 vm_map_entry_t entry;
104 register vm_object_t object;
105 register vm_offset_t offset;
106 register vm_page_t m;
107 vm_page_t first_m;
108 vm_prot_t prot;
109 int result;
110 boolean_t wired;
111 boolean_t su;
112 boolean_t lookup_still_valid;
113 boolean_t page_exists;
114 vm_page_t old_m;
115 vm_object_t next_object;
116
117 vm_stat.faults++; /* needs lock XXX */
118/*
119 * Recovery actions
120 */
121#define FREE_PAGE(m) { \
122 PAGE_WAKEUP(m); \
123 vm_page_lock_queues(); \
124 vm_page_free(m); \
125 vm_page_unlock_queues(); \
126}
127
128#define RELEASE_PAGE(m) { \
129 PAGE_WAKEUP(m); \
130 vm_page_lock_queues(); \
131 vm_page_activate(m); \
132 vm_page_unlock_queues(); \
133}
134
135#define UNLOCK_MAP { \
136 if (lookup_still_valid) { \
137 vm_map_lookup_done(map, entry); \
138 lookup_still_valid = FALSE; \
139 } \
140}
141
142#define UNLOCK_THINGS { \
143 object->paging_in_progress--; \
144 vm_object_unlock(object); \
145 if (object != first_object) { \
146 vm_object_lock(first_object); \
147 FREE_PAGE(first_m); \
148 first_object->paging_in_progress--; \
149 vm_object_unlock(first_object); \
150 } \
151 UNLOCK_MAP; \
152}
153
154#define UNLOCK_AND_DEALLOCATE { \
155 UNLOCK_THINGS; \
156 vm_object_deallocate(first_object); \
157}
158
159 RetryFault: ;
160
161 /*
162 * Find the backing store object and offset into
163 * it to begin the search.
164 */
165
166 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
167 &first_object, &first_offset,
168 &prot, &wired, &su)) != KERN_SUCCESS) {
169 return(result);
170 }
171 lookup_still_valid = TRUE;
172
173 if (wired)
174 fault_type = prot;
175
176 first_m = NULL;
177
178 /*
179 * Make a reference to this object to
180 * prevent its disposal while we are messing with
181 * it. Once we have the reference, the map is free
182 * to be diddled. Since objects reference their
183 * shadows (and copies), they will stay around as well.
184 */
185
186 vm_object_lock(first_object);
187
188 first_object->ref_count++;
189 first_object->paging_in_progress++;
190
191 /*
192 * INVARIANTS (through entire routine):
193 *
194 * 1) At all times, we must either have the object
195 * lock or a busy page in some object to prevent
196 * some other thread from trying to bring in
197 * the same page.
198 *
199 * Note that we cannot hold any locks during the
200 * pager access or when waiting for memory, so
201 * we use a busy page then.
202 *
203 * Note also that we aren't as concerned about
204 * more than one thead attempting to pager_data_unlock
205 * the same page at once, so we don't hold the page
206 * as busy then, but do record the highest unlock
207 * value so far. [Unlock requests may also be delivered
208 * out of order.]
209 *
210 * 2) Once we have a busy page, we must remove it from
211 * the pageout queues, so that the pageout daemon
212 * will not grab it away.
213 *
214 * 3) To prevent another thread from racing us down the
215 * shadow chain and entering a new page in the top
216 * object before we do, we must keep a busy page in
217 * the top object while following the shadow chain.
218 *
219 * 4) We must increment paging_in_progress on any object
220 * for which we have a busy page, to prevent
221 * vm_object_collapse from removing the busy page
222 * without our noticing.
223 */
224
225 /*
226 * Search for the page at object/offset.
227 */
228
229 object = first_object;
230 offset = first_offset;
231
232 /*
233 * See whether this page is resident
234 */
235
236 while (TRUE) {
237 m = vm_page_lookup(object, offset);
238 if (m != NULL) {
239 /*
240 * If the page is being brought in,
241 * wait for it and then retry.
242 */
243 if (m->busy) {
244#ifdef DOTHREADS
245 int wait_result;
246
247 PAGE_ASSERT_WAIT(m, !change_wiring);
248 UNLOCK_THINGS;
e7a091e4 249thread_wakeup(&vm_pages_needed); /* XXX! -- what does this do? */
15637ed4
RG
250 thread_block();
251 wait_result = current_thread()->wait_result;
252 vm_object_deallocate(first_object);
253 if (wait_result != THREAD_AWAKENED)
254 return(KERN_SUCCESS);
255 goto RetryFault;
256#else
257 PAGE_ASSERT_WAIT(m, !change_wiring);
258 UNLOCK_THINGS;
259thread_wakeup(&vm_pages_needed); /* XXX! */
260 thread_block();
261 vm_object_deallocate(first_object);
262 goto RetryFault;
263#endif
264 }
265
266 if (m->absent)
267 panic("vm_fault: absent");
268
269 /*
270 * If the desired access to this page has
271 * been locked out, request that it be unlocked.
272 */
273
274 if (fault_type & m->page_lock) {
275#ifdef DOTHREADS
276 int wait_result;
277
278 if ((fault_type & m->unlock_request) != fault_type)
279 panic("vm_fault: pager_data_unlock");
280
281 PAGE_ASSERT_WAIT(m, !change_wiring);
282 UNLOCK_THINGS;
e7a091e4 283thread_wakeup(&vm_pages_needed); /* XXX! -- what does this do? */
15637ed4
RG
284 thread_block();
285 wait_result = current_thread()->wait_result;
286 vm_object_deallocate(first_object);
287 if (wait_result != THREAD_AWAKENED)
288 return(KERN_SUCCESS);
289 goto RetryFault;
290#else
291 if ((fault_type & m->unlock_request) != fault_type)
292 panic("vm_fault: pager_data_unlock");
293
294 PAGE_ASSERT_WAIT(m, !change_wiring);
295 UNLOCK_THINGS;
296thread_wakeup(&vm_pages_needed); /* XXX */
297 thread_block();
298 vm_object_deallocate(first_object);
299 goto RetryFault;
300#endif
301 }
302
303 /*
304 * Remove the page from the pageout daemon's
305 * reach while we play with it.
306 */
307
308 vm_page_lock_queues();
309 if (m->inactive) {
310 queue_remove(&vm_page_queue_inactive, m,
311 vm_page_t, pageq);
312 m->inactive = FALSE;
313 vm_page_inactive_count--;
314 vm_stat.reactivations++;
315 }
316
317 if (m->active) {
318 queue_remove(&vm_page_queue_active, m,
319 vm_page_t, pageq);
320 m->active = FALSE;
321 vm_page_active_count--;
322 }
323 vm_page_unlock_queues();
324
325 /*
326 * Mark page busy for other threads.
327 */
328 m->busy = TRUE;
329 m->absent = FALSE;
330 break;
331 }
332
333 if (((object->pager != NULL) &&
334 (!change_wiring || wired))
335 || (object == first_object)) {
336
337 /*
338 * Allocate a new page for this object/offset
339 * pair.
340 */
341
342 m = vm_page_alloc(object, offset);
343
344 if (m == NULL) {
345 UNLOCK_AND_DEALLOCATE;
346 VM_WAIT;
347 goto RetryFault;
348 }
349 }
350
351 if ((object->pager != NULL) &&
352 (!change_wiring || wired)) {
353 int rv;
354
355 /*
356 * Now that we have a busy page, we can
357 * release the object lock.
358 */
359 vm_object_unlock(object);
360
361 /*
362 * Call the pager to retrieve the data, if any,
363 * after releasing the lock on the map.
364 */
365 UNLOCK_MAP;
366
367 rv = vm_pager_get(object->pager, m, TRUE);
368 if (rv == VM_PAGER_OK) {
369 /*
370 * Found the page.
371 * Leave it busy while we play with it.
372 */
373 vm_object_lock(object);
374
375 /*
376 * Relookup in case pager changed page.
377 * Pager is responsible for disposition
378 * of old page if moved.
379 */
380 m = vm_page_lookup(object, offset);
381
382 vm_stat.pageins++;
383 m->fake = FALSE;
384 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
385 break;
386 }
387
388 /*
389 * Remove the bogus page (which does not
390 * exist at this object/offset); before
391 * doing so, we must get back our object
392 * lock to preserve our invariant.
393 *
394 * Also wake up any other thread that may want
395 * to bring in this page.
396 *
397 * If this is the top-level object, we must
398 * leave the busy page to prevent another
399 * thread from rushing past us, and inserting
400 * the page in that object at the same time
401 * that we are.
402 */
403
404 vm_object_lock(object);
405 /*
406 * Data outside the range of the pager; an error
407 */
408 if (rv == VM_PAGER_BAD) {
409 FREE_PAGE(m);
410 UNLOCK_AND_DEALLOCATE;
411 return(KERN_PROTECTION_FAILURE); /* XXX */
412 }
413 if (object != first_object) {
414 FREE_PAGE(m);
415 /*
416 * XXX - we cannot just fall out at this
417 * point, m has been freed and is invalid!
418 */
419 }
420 }
421
422 /*
423 * We get here if the object has no pager (or unwiring)
424 * or the pager doesn't have the page.
425 */
426 if (object == first_object)
427 first_m = m;
428
429 /*
430 * Move on to the next object. Lock the next
431 * object before unlocking the current one.
432 */
433
434 offset += object->shadow_offset;
435 next_object = object->shadow;
436 if (next_object == NULL) {
437 /*
438 * If there's no object left, fill the page
439 * in the top object with zeros.
440 */
441 if (object != first_object) {
442 object->paging_in_progress--;
443 vm_object_unlock(object);
444
445 object = first_object;
446 offset = first_offset;
447 m = first_m;
448 vm_object_lock(object);
449 }
450 first_m = NULL;
451
452 vm_page_zero_fill(m);
453 vm_stat.zero_fill_count++;
454 m->fake = FALSE;
455 m->absent = FALSE;
456 break;
457 }
458 else {
459 vm_object_lock(next_object);
460 if (object != first_object)
461 object->paging_in_progress--;
462 vm_object_unlock(object);
463 object = next_object;
464 object->paging_in_progress++;
465 }
466 }
467
468 if (m->absent || m->active || m->inactive || !m->busy)
469 panic("vm_fault: absent or active or inactive or not busy after main loop");
470
471 /*
472 * PAGE HAS BEEN FOUND.
473 * [Loop invariant still holds -- the object lock
474 * is held.]
475 */
476
477 old_m = m; /* save page that would be copied */
478
479 /*
480 * If the page is being written, but isn't
481 * already owned by the top-level object,
482 * we have to copy it into a new page owned
483 * by the top-level object.
484 */
485
486 if (object != first_object) {
487 /*
488 * We only really need to copy if we
489 * want to write it.
490 */
491
492 if (fault_type & VM_PROT_WRITE) {
493
494 /*
495 * If we try to collapse first_object at this
496 * point, we may deadlock when we try to get
497 * the lock on an intermediate object (since we
498 * have the bottom object locked). We can't
499 * unlock the bottom object, because the page
500 * we found may move (by collapse) if we do.
501 *
502 * Instead, we first copy the page. Then, when
503 * we have no more use for the bottom object,
504 * we unlock it and try to collapse.
505 *
506 * Note that we copy the page even if we didn't
507 * need to... that's the breaks.
508 */
509
510 /*
511 * We already have an empty page in
512 * first_object - use it.
513 */
514
515 vm_page_copy(m, first_m);
516 first_m->fake = FALSE;
517 first_m->absent = FALSE;
518
519 /*
520 * If another map is truly sharing this
521 * page with us, we have to flush all
522 * uses of the original page, since we
523 * can't distinguish those which want the
524 * original from those which need the
525 * new copy.
526 *
527 * XXX If we know that only one map has
528 * access to this page, then we could
529 * avoid the pmap_page_protect() call.
530 */
531
532 vm_page_lock_queues();
e7a091e4 533 vm_page_activate(m);
15637ed4
RG
534 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
535 vm_page_unlock_queues();
536
537 /*
538 * We no longer need the old page or object.
539 */
540 PAGE_WAKEUP(m);
541 object->paging_in_progress--;
542 vm_object_unlock(object);
543
544 /*
545 * Only use the new page below...
546 */
547
548 vm_stat.cow_faults++;
549 m = first_m;
550 object = first_object;
551 offset = first_offset;
552
553 /*
554 * Now that we've gotten the copy out of the
555 * way, let's try to collapse the top object.
556 */
557 vm_object_lock(object);
558 /*
559 * But we have to play ugly games with
560 * paging_in_progress to do that...
561 */
562 object->paging_in_progress--;
563 vm_object_collapse(object);
564 object->paging_in_progress++;
565 }
566 else {
567 prot &= (~VM_PROT_WRITE);
568 m->copy_on_write = TRUE;
569 }
570 }
571
572 if (m->active || m->inactive)
573 panic("vm_fault: active or inactive before copy object handling");
574
575 /*
576 * If the page is being written, but hasn't been
577 * copied to the copy-object, we have to copy it there.
578 */
579 RetryCopy:
580 if (first_object->copy != NULL) {
581 vm_object_t copy_object = first_object->copy;
582 vm_offset_t copy_offset;
583 vm_page_t copy_m;
584
585 /*
586 * We only need to copy if we want to write it.
587 */
588 if ((fault_type & VM_PROT_WRITE) == 0) {
589 prot &= ~VM_PROT_WRITE;
590 m->copy_on_write = TRUE;
591 }
592 else {
593 /*
594 * Try to get the lock on the copy_object.
595 */
596 if (!vm_object_lock_try(copy_object)) {
597 vm_object_unlock(object);
598 /* should spin a bit here... */
599 vm_object_lock(object);
600 goto RetryCopy;
601 }
602
603 /*
604 * Make another reference to the copy-object,
605 * to keep it from disappearing during the
606 * copy.
607 */
608 copy_object->ref_count++;
609
610 /*
611 * Does the page exist in the copy?
612 */
613 copy_offset = first_offset
614 - copy_object->shadow_offset;
615 copy_m = vm_page_lookup(copy_object, copy_offset);
616 if (page_exists = (copy_m != NULL)) {
617 if (copy_m->busy) {
618#ifdef DOTHREADS
619 int wait_result;
620
621 /*
622 * If the page is being brought
623 * in, wait for it and then retry.
624 */
625 PAGE_ASSERT_WAIT(copy_m, !change_wiring);
626 RELEASE_PAGE(m);
627 copy_object->ref_count--;
628 vm_object_unlock(copy_object);
629 UNLOCK_THINGS;
e7a091e4 630thread_wakeup(&vm_pages_needed); /* XXX! -- what does this do? */
15637ed4
RG
631 thread_block();
632 wait_result = current_thread()->wait_result;
633 vm_object_deallocate(first_object);
634 if (wait_result != THREAD_AWAKENED)
635 return(KERN_SUCCESS);
636 goto RetryFault;
637#else
638 /*
639 * If the page is being brought
640 * in, wait for it and then retry.
641 */
642 PAGE_ASSERT_WAIT(copy_m, !change_wiring);
643 RELEASE_PAGE(m);
644 copy_object->ref_count--;
645 vm_object_unlock(copy_object);
646 UNLOCK_THINGS;
647thread_wakeup(&vm_pages_needed); /* XXX */
648 thread_block();
649 vm_object_deallocate(first_object);
650 goto RetryFault;
651#endif
652 }
653 }
654
655 /*
656 * If the page is not in memory (in the object)
657 * and the object has a pager, we have to check
658 * if the pager has the data in secondary
659 * storage.
660 */
661 if (!page_exists) {
662
663 /*
664 * If we don't allocate a (blank) page
665 * here... another thread could try
666 * to page it in, allocate a page, and
667 * then block on the busy page in its
668 * shadow (first_object). Then we'd
669 * trip over the busy page after we
670 * found that the copy_object's pager
671 * doesn't have the page...
672 */
673 copy_m = vm_page_alloc(copy_object,
674 copy_offset);
675 if (copy_m == NULL) {
676 /*
677 * Wait for a page, then retry.
678 */
679 RELEASE_PAGE(m);
680 copy_object->ref_count--;
681 vm_object_unlock(copy_object);
682 UNLOCK_AND_DEALLOCATE;
683 VM_WAIT;
684 goto RetryFault;
685 }
686
687 if (copy_object->pager != NULL) {
688 vm_object_unlock(object);
689 vm_object_unlock(copy_object);
690 UNLOCK_MAP;
691
692 page_exists = vm_pager_has_page(
693 copy_object->pager,
694 (copy_offset + copy_object->paging_offset));
695
696 vm_object_lock(copy_object);
697
698 /*
699 * Since the map is unlocked, someone
700 * else could have copied this object
701 * and put a different copy_object
702 * between the two. Or, the last
703 * reference to the copy-object (other
704 * than the one we have) may have
705 * disappeared - if that has happened,
706 * we don't need to make the copy.
707 */
708 if (copy_object->shadow != object ||
709 copy_object->ref_count == 1) {
710 /*
711 * Gaah... start over!
712 */
713 FREE_PAGE(copy_m);
714 vm_object_unlock(copy_object);
715 vm_object_deallocate(copy_object);
716 /* may block */
717 vm_object_lock(object);
718 goto RetryCopy;
719 }
720 vm_object_lock(object);
721
722 if (page_exists) {
723 /*
724 * We didn't need the page
725 */
726 FREE_PAGE(copy_m);
727 }
728 }
729 }
730 if (!page_exists) {
731 /*
732 * Must copy page into copy-object.
733 */
734 vm_page_copy(m, copy_m);
735 copy_m->fake = FALSE;
736 copy_m->absent = FALSE;
737
738 /*
739 * Things to remember:
740 * 1. The copied page must be marked 'dirty'
741 * so it will be paged out to the copy
742 * object.
743 * 2. If the old page was in use by any users
744 * of the copy-object, it must be removed
745 * from all pmaps. (We can't know which
746 * pmaps use it.)
747 */
748 vm_page_lock_queues();
749 pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
750 VM_PROT_NONE);
751 copy_m->clean = FALSE;
752 vm_page_activate(copy_m); /* XXX */
753 vm_page_unlock_queues();
754
755 PAGE_WAKEUP(copy_m);
756 }
757 /*
758 * The reference count on copy_object must be
759 * at least 2: one for our extra reference,
760 * and at least one from the outside world
761 * (we checked that when we last locked
762 * copy_object).
763 */
764 copy_object->ref_count--;
765 vm_object_unlock(copy_object);
766 m->copy_on_write = FALSE;
767 }
768 }
769
770 if (m->active || m->inactive)
771 panic("vm_fault: active or inactive before retrying lookup");
772
773 /*
774 * We must verify that the maps have not changed
775 * since our last lookup.
776 */
777
778 if (!lookup_still_valid) {
779 vm_object_t retry_object;
780 vm_offset_t retry_offset;
781 vm_prot_t retry_prot;
782
783 /*
784 * Since map entries may be pageable, make sure we can
785 * take a page fault on them.
786 */
787 vm_object_unlock(object);
788
789 /*
790 * To avoid trying to write_lock the map while another
791 * thread has it read_locked (in vm_map_pageable), we
792 * do not try for write permission. If the page is
793 * still writable, we will get write permission. If it
794 * is not, or has been marked needs_copy, we enter the
795 * mapping without write permission, and will merely
796 * take another fault.
797 */
798 result = vm_map_lookup(&map, vaddr,
799 fault_type & ~VM_PROT_WRITE, &entry,
800 &retry_object, &retry_offset, &retry_prot,
801 &wired, &su);
802
803 vm_object_lock(object);
804
805 /*
806 * If we don't need the page any longer, put it on the
807 * active list (the easiest thing to do here). If no
808 * one needs it, pageout will grab it eventually.
809 */
810
811 if (result != KERN_SUCCESS) {
812 RELEASE_PAGE(m);
813 UNLOCK_AND_DEALLOCATE;
814 return(result);
815 }
816
817 lookup_still_valid = TRUE;
818
819 if ((retry_object != first_object) ||
820 (retry_offset != first_offset)) {
821 RELEASE_PAGE(m);
822 UNLOCK_AND_DEALLOCATE;
823 goto RetryFault;
824 }
825
826 /*
827 * Check whether the protection has changed or the object
828 * has been copied while we left the map unlocked.
829 * Changing from read to write permission is OK - we leave
830 * the page write-protected, and catch the write fault.
831 * Changing from write to read permission means that we
832 * can't mark the page write-enabled after all.
833 */
834 prot &= retry_prot;
835 if (m->copy_on_write)
836 prot &= ~VM_PROT_WRITE;
837 }
838
839 /*
840 * (the various bits we're fiddling with here are locked by
841 * the object's lock)
842 */
843
844 /* XXX This distorts the meaning of the copy_on_write bit */
845
846 if (prot & VM_PROT_WRITE)
847 m->copy_on_write = FALSE;
848
849 /*
850 * It's critically important that a wired-down page be faulted
851 * only once in each map for which it is wired.
852 */
853
854 if (m->active || m->inactive)
855 panic("vm_fault: active or inactive before pmap_enter");
856
857 vm_object_unlock(object);
858
859 /*
860 * Put this page into the physical map.
861 * We had to do the unlock above because pmap_enter
862 * may cause other faults. We don't put the
863 * page back on the active queue until later so
864 * that the page-out daemon won't find us (yet).
865 */
866
867 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m),
868 prot & ~(m->page_lock), wired);
869
870 /*
871 * If the page is not wired down, then put it where the
872 * pageout daemon can find it.
873 */
874 vm_object_lock(object);
875 vm_page_lock_queues();
876 if (change_wiring) {
877 if (wired)
878 vm_page_wire(m);
879 else
880 vm_page_unwire(m);
881 }
882 else
883 vm_page_activate(m);
884 vm_page_unlock_queues();
885
886 /*
887 * Unlock everything, and return
888 */
889
890 PAGE_WAKEUP(m);
891 UNLOCK_AND_DEALLOCATE;
892
893 return(KERN_SUCCESS);
894
895}
896
897/*
898 * vm_fault_wire:
899 *
900 * Wire down a range of virtual addresses in a map.
901 */
902void vm_fault_wire(map, start, end)
903 vm_map_t map;
904 vm_offset_t start, end;
905{
906
907 register vm_offset_t va;
908 register pmap_t pmap;
909
910 pmap = vm_map_pmap(map);
911
912 /*
913 * Inform the physical mapping system that the
914 * range of addresses may not fault, so that
915 * page tables and such can be locked down as well.
916 */
917
918 pmap_pageable(pmap, start, end, FALSE);
919
920 /*
921 * We simulate a fault to get the page and enter it
922 * in the physical map.
923 */
924
925 for (va = start; va < end; va += PAGE_SIZE) {
926 (void) vm_fault(map, va, VM_PROT_NONE, TRUE);
927 }
928}
929
930
931/*
932 * vm_fault_unwire:
933 *
934 * Unwire a range of virtual addresses in a map.
935 */
936void vm_fault_unwire(map, start, end)
937 vm_map_t map;
938 vm_offset_t start, end;
939{
940
941 register vm_offset_t va, pa;
942 register pmap_t pmap;
943
944 pmap = vm_map_pmap(map);
945
946 /*
947 * Since the pages are wired down, we must be able to
948 * get their mappings from the physical map system.
949 */
950
951 vm_page_lock_queues();
952
953 for (va = start; va < end; va += PAGE_SIZE) {
954 pa = pmap_extract(pmap, va);
955 if (pa == (vm_offset_t) 0) {
956 panic("unwire: page not in pmap");
957 }
958 pmap_change_wiring(pmap, va, FALSE);
959 vm_page_unwire(PHYS_TO_VM_PAGE(pa));
960 }
961 vm_page_unlock_queues();
962
963 /*
964 * Inform the physical mapping system that the range
965 * of addresses may fault, so that page tables and
966 * such may be unwired themselves.
967 */
968
969 pmap_pageable(pmap, start, end, TRUE);
970
971}
972
973/*
974 * Routine:
975 * vm_fault_copy_entry
976 * Function:
977 * Copy all of the pages from a wired-down map entry to another.
978 *
979 * In/out conditions:
980 * The source and destination maps must be locked for write.
981 * The source map entry must be wired down (or be a sharing map
982 * entry corresponding to a main map entry that is wired down).
983 */
984
985void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
986 vm_map_t dst_map;
987 vm_map_t src_map;
988 vm_map_entry_t dst_entry;
989 vm_map_entry_t src_entry;
990{
991
992 vm_object_t dst_object;
993 vm_object_t src_object;
994 vm_offset_t dst_offset;
995 vm_offset_t src_offset;
996 vm_prot_t prot;
997 vm_offset_t vaddr;
998 vm_page_t dst_m;
999 vm_page_t src_m;
1000
1001#ifdef lint
1002 src_map++;
1003#endif lint
1004
1005 src_object = src_entry->object.vm_object;
1006 src_offset = src_entry->offset;
1007
1008 /*
1009 * Create the top-level object for the destination entry.
1010 * (Doesn't actually shadow anything - we copy the pages
1011 * directly.)
1012 */
1013 dst_object = vm_object_allocate(
1014 (vm_size_t) (dst_entry->end - dst_entry->start));
1015
1016 dst_entry->object.vm_object = dst_object;
1017 dst_entry->offset = 0;
1018
1019 prot = dst_entry->max_protection;
1020
1021 /*
1022 * Loop through all of the pages in the entry's range, copying
1023 * each one from the source object (it should be there) to the
1024 * destination object.
1025 */
1026 for (vaddr = dst_entry->start, dst_offset = 0;
1027 vaddr < dst_entry->end;
1028 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1029
1030 /*
1031 * Allocate a page in the destination object
1032 */
1033 vm_object_lock(dst_object);
1034 do {
1035 dst_m = vm_page_alloc(dst_object, dst_offset);
1036 if (dst_m == NULL) {
1037 vm_object_unlock(dst_object);
1038 VM_WAIT;
1039 vm_object_lock(dst_object);
1040 }
1041 } while (dst_m == NULL);
1042
1043 /*
1044 * Find the page in the source object, and copy it in.
1045 * (Because the source is wired down, the page will be
1046 * in memory.)
1047 */
1048 vm_object_lock(src_object);
1049 src_m = vm_page_lookup(src_object, dst_offset + src_offset);
1050 if (src_m == NULL)
1051 panic("vm_fault_copy_wired: page missing");
1052
1053 vm_page_copy(src_m, dst_m);
1054
1055 /*
1056 * Enter it in the pmap...
1057 */
1058 vm_object_unlock(src_object);
1059 vm_object_unlock(dst_object);
1060
1061 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
1062 prot, FALSE);
1063
1064 /*
1065 * Mark it no longer busy, and put it on the active list.
1066 */
1067 vm_object_lock(dst_object);
1068 vm_page_lock_queues();
1069 vm_page_activate(dst_m);
1070 vm_page_unlock_queues();
1071 PAGE_WAKEUP(dst_m);
1072 vm_object_unlock(dst_object);
1073 }
1074
1075}