Uncomment some code that John previously commented out. It turns out to
[unix-history] / sys / vm / vm_object.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_object.c 7.4 (Berkeley) 5/7/91
a068b30b 37 * $Id: vm_object.c,v 1.21 1994/02/10 00:15:51 davidg Exp $
55768178
DG
38 *
39 *
15637ed4
RG
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 *
45 * Permission to use, copy, modify and distribute this software and
46 * its documentation is hereby granted, provided that both the copyright
47 * notice and this permission notice appear in all copies of the
48 * software, derivative works or modified versions, and any portions
49 * thereof, and that both notices appear in supporting documentation.
50 *
51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 *
55 * Carnegie Mellon requests users of this software to return to
56 *
57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
58 * School of Computer Science
59 * Carnegie Mellon University
60 * Pittsburgh PA 15213-3890
61 *
62 * any improvements or extensions that they make and grant Carnegie the
63 * rights to redistribute these changes.
15637ed4
RG
64 */
65
66/*
67 * Virtual memory object module.
68 */
69
55768178 70#include "ddb.h"
15637ed4
RG
71#include "param.h"
72#include "malloc.h"
55768178 73#include "systm.h"
15637ed4
RG
74
75#include "vm.h"
76#include "vm_page.h"
55768178
DG
77#include "proc.h"
78
fde1aeb2 79
4c45483e 80static void _vm_object_allocate(vm_size_t, vm_object_t);
55768178 81void vm_object_deactivate_pages(vm_object_t);
4c45483e
GW
82static void vm_object_cache_trim(void);
83static void vm_object_remove(vm_pager_t);
84
15637ed4
RG
85/*
86 * Virtual memory objects maintain the actual data
87 * associated with allocated virtual memory. A given
88 * page of memory exists within exactly one object.
89 *
90 * An object is only deallocated when all "references"
91 * are given up. Only one "reference" to a given
92 * region of an object should be writeable.
93 *
94 * Associated with each object is a list of all resident
95 * memory pages belonging to that object; this list is
96 * maintained by the "vm_page" module, and locked by the object's
97 * lock.
98 *
99 * Each object also records a "pager" routine which is
100 * used to retrieve (and store) pages to the proper backing
101 * storage. In addition, objects may be backed by other
102 * objects from which they were virtual-copied.
103 *
104 * The only items within the object structure which are
105 * modified after time of creation are:
106 * reference count locked by object's lock
107 * pager routine locked by object's lock
108 *
109 */
110
55768178 111
bbc3f849
GW
112queue_head_t vm_object_cached_list; /* list of objects persisting */
113int vm_object_cached; /* size of cached list */
114simple_lock_data_t vm_cache_lock; /* lock for object cache */
115
116queue_head_t vm_object_list; /* list of allocated objects */
117long vm_object_count; /* count of all objects */
118simple_lock_data_t vm_object_list_lock;
119 /* lock for object list and count */
120
121vm_object_t kernel_object; /* the single kernel object */
55768178 122vm_object_t kmem_object; /* the kernel malloc object */
15637ed4
RG
123struct vm_object kernel_object_store;
124struct vm_object kmem_object_store;
125
ff1a3ad7 126extern int vm_cache_max;
55768178 127#define VM_OBJECT_HASH_COUNT 127
55768178 128
15637ed4
RG
129queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
130
131long object_collapses = 0;
132long object_bypasses = 0;
133
55768178
DG
134/*
135 * internal version of vm_object_allocate
136 */
137static inline void
138_vm_object_allocate(size, object)
139 vm_size_t size;
140 register vm_object_t object;
141{
142 queue_init(&object->memq);
143 vm_object_lock_init(object);
144 object->ref_count = 1;
145 object->resident_page_count = 0;
146 object->size = size;
147 object->can_persist = FALSE;
148 object->paging_in_progress = 0;
149 object->copy = NULL;
150
151 /*
152 * Object starts out read-write, with no pager.
153 */
154
155 object->pager = NULL;
156 object->internal = TRUE; /* vm_allocate_with_pager will reset */
157 object->paging_offset = 0;
158 object->shadow = NULL;
159 object->shadow_offset = (vm_offset_t) 0;
160
161 simple_lock(&vm_object_list_lock);
162 queue_enter(&vm_object_list, object, vm_object_t, object_list);
163 vm_object_count++;
164 simple_unlock(&vm_object_list_lock);
165}
166
15637ed4
RG
167/*
168 * vm_object_init:
169 *
170 * Initialize the VM objects module.
171 */
55768178
DG
172void
173vm_object_init()
15637ed4
RG
174{
175 register int i;
176
177 queue_init(&vm_object_cached_list);
178 queue_init(&vm_object_list);
179 vm_object_count = 0;
180 simple_lock_init(&vm_cache_lock);
181 simple_lock_init(&vm_object_list_lock);
182
183 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
184 queue_init(&vm_object_hashtable[i]);
185
186 kernel_object = &kernel_object_store;
187 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
188 kernel_object);
189
190 kmem_object = &kmem_object_store;
55768178
DG
191 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
192 kmem_object);
15637ed4
RG
193}
194
195/*
196 * vm_object_allocate:
197 *
198 * Returns a new object with the given size.
199 */
200
55768178
DG
201vm_object_t
202vm_object_allocate(size)
15637ed4
RG
203 vm_size_t size;
204{
205 register vm_object_t result;
55768178
DG
206 int s;
207
a200ca2b
DG
208 result = (vm_object_t)
209 malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK);
55768178 210
15637ed4
RG
211
212 _vm_object_allocate(size, result);
213
214 return(result);
215}
216
15637ed4
RG
217
218/*
219 * vm_object_reference:
220 *
221 * Gets another reference to the given object.
222 */
55768178
DG
223inline void
224vm_object_reference(object)
15637ed4
RG
225 register vm_object_t object;
226{
227 if (object == NULL)
228 return;
229
230 vm_object_lock(object);
231 object->ref_count++;
232 vm_object_unlock(object);
233}
234
235/*
236 * vm_object_deallocate:
237 *
238 * Release a reference to the specified object,
239 * gained either through a vm_object_allocate
240 * or a vm_object_reference call. When all references
241 * are gone, storage associated with this object
242 * may be relinquished.
243 *
244 * No object may be locked.
245 */
55768178
DG
246void
247vm_object_deallocate(object)
248 vm_object_t object;
15637ed4
RG
249{
250 vm_object_t temp;
251
252 while (object != NULL) {
253
254 /*
255 * The cache holds a reference (uncounted) to
256 * the object; we must lock it before removing
257 * the object.
258 */
259
260 vm_object_cache_lock();
261
262 /*
263 * Lose the reference
264 */
265 vm_object_lock(object);
266 if (--(object->ref_count) != 0) {
267
55768178 268 vm_object_unlock(object);
15637ed4
RG
269 /*
270 * If there are still references, then
271 * we are done.
272 */
15637ed4
RG
273 vm_object_cache_unlock();
274 return;
275 }
276
277 /*
278 * See if this object can persist. If so, enter
279 * it in the cache, then deactivate all of its
280 * pages.
281 */
282
283 if (object->can_persist) {
15637ed4
RG
284
285 queue_enter(&vm_object_cached_list, object,
286 vm_object_t, cached_list);
287 vm_object_cached++;
288 vm_object_cache_unlock();
289
55768178 290 /* vm_object_deactivate_pages(object); */
15637ed4
RG
291 vm_object_unlock(object);
292
293 vm_object_cache_trim();
294 return;
295 }
15637ed4
RG
296
297 /*
298 * Make sure no one can look us up now.
299 */
300 vm_object_remove(object->pager);
301 vm_object_cache_unlock();
55768178 302
15637ed4
RG
303 temp = object->shadow;
304 vm_object_terminate(object);
305 /* unlocks and deallocates object */
306 object = temp;
307 }
308}
309
15637ed4
RG
310/*
311 * vm_object_terminate actually destroys the specified object, freeing
312 * up all previously used resources.
313 *
314 * The object must be locked.
315 */
55768178
DG
316void
317vm_object_terminate(object)
15637ed4
RG
318 register vm_object_t object;
319{
320 register vm_page_t p;
321 vm_object_t shadow_object;
55768178 322 int s;
15637ed4
RG
323
324 /*
325 * Detach the object from its shadow if we are the shadow's
326 * copy.
327 */
328 if ((shadow_object = object->shadow) != NULL) {
329 vm_object_lock(shadow_object);
330 if (shadow_object->copy == object)
331 shadow_object->copy = NULL;
55768178 332/*
15637ed4
RG
333 else if (shadow_object->copy != NULL)
334 panic("vm_object_terminate: copy/shadow inconsistency");
55768178 335*/
15637ed4
RG
336 vm_object_unlock(shadow_object);
337 }
338
a200ca2b
DG
339 /*
340 * optim: get rid of any pages that we can right now
341 * so the pageout daemon can't get any more to page
342 * out at rundown.
343 */
691d9ea2 344#if 0
a200ca2b
DG
345 p = (vm_page_t) queue_first(&object->memq);
346 while (!queue_end(&object->memq, (queue_entry_t) p)) {
347 vm_page_t next = (vm_page_t) queue_next(&p->listq);
348 VM_PAGE_CHECK(p);
349 vm_page_lock_queues();
350
351 if (p->flags & PG_BUSY) {
352 p = next;
353 vm_page_unlock_queues();
354 continue;
355 }
356 if (!object->internal) {
357 if ((p->flags & PG_CLEAN) == 0) {
358 p = next;
359 vm_page_unlock_queues();
360 continue;
361 }
362
363 if (pmap_is_modified(VM_PAGE_TO_PHYS(p))) {
364 p->flags &= ~PG_CLEAN;
365 p = next;
366 vm_page_unlock_queues();
367 continue;
368 }
369 }
370
371 vm_page_free(p);
372 vm_page_unlock_queues();
373 p = next;
374 }
691d9ea2 375#endif
a200ca2b 376
15637ed4
RG
377 /*
378 * Wait until the pageout daemon is through
379 * with the object.
380 */
381
382 while (object->paging_in_progress != 0) {
55768178 383 vm_object_sleep(object, object, FALSE);
15637ed4
RG
384 vm_object_lock(object);
385 }
15637ed4
RG
386
387 /*
388 * While the paging system is locked,
389 * pull the object's pages off the active
390 * and inactive queues. This keeps the
391 * pageout daemon from playing with them
392 * during vm_pager_deallocate.
393 *
394 * We can't free the pages yet, because the
395 * object's pager may have to write them out
396 * before deallocating the paging space.
397 */
398
399 p = (vm_page_t) queue_first(&object->memq);
400 while (!queue_end(&object->memq, (queue_entry_t) p)) {
401 VM_PAGE_CHECK(p);
402
403 vm_page_lock_queues();
55768178 404 s = vm_disable_intr();
fd76afd7 405 if (p->flags & PG_ACTIVE) {
15637ed4
RG
406 queue_remove(&vm_page_queue_active, p, vm_page_t,
407 pageq);
fd76afd7 408 p->flags &= ~PG_ACTIVE;
15637ed4
RG
409 vm_page_active_count--;
410 }
411
fd76afd7 412 if (p->flags & PG_INACTIVE) {
15637ed4
RG
413 queue_remove(&vm_page_queue_inactive, p, vm_page_t,
414 pageq);
fd76afd7 415 p->flags &= ~PG_INACTIVE;
15637ed4
RG
416 vm_page_inactive_count--;
417 }
55768178 418 vm_set_intr(s);
15637ed4
RG
419 vm_page_unlock_queues();
420 p = (vm_page_t) queue_next(&p->listq);
421 }
422
423 vm_object_unlock(object);
424
425 if (object->paging_in_progress != 0)
426 panic("vm_object_deallocate: pageout in progress");
427
428 /*
429 * Clean and free the pages, as appropriate.
430 * All references to the object are gone,
431 * so we don't need to lock it.
432 */
433
434 if (!object->internal) {
435 vm_object_lock(object);
436 vm_object_page_clean(object, 0, 0);
437 vm_object_unlock(object);
438 }
55768178 439
15637ed4
RG
440 while (!queue_empty(&object->memq)) {
441 p = (vm_page_t) queue_first(&object->memq);
442
443 VM_PAGE_CHECK(p);
444
445 vm_page_lock_queues();
446 vm_page_free(p);
447 vm_page_unlock_queues();
448 }
449
450 /*
451 * Let the pager know object is dead.
452 */
453
55768178 454 if (object->pager != NULL) {
15637ed4 455 vm_pager_deallocate(object->pager);
55768178 456 }
15637ed4
RG
457
458
459 simple_lock(&vm_object_list_lock);
460 queue_remove(&vm_object_list, object, vm_object_t, object_list);
461 vm_object_count--;
462 simple_unlock(&vm_object_list_lock);
463
464 /*
465 * Free the space for the object.
466 */
467
a200ca2b 468 free((caddr_t)object, M_VMOBJ);
15637ed4
RG
469}
470
471/*
472 * vm_object_page_clean
473 *
474 * Clean all dirty pages in the specified range of object.
475 * Leaves page on whatever queue it is currently on.
476 *
477 * Odd semantics: if start == end, we clean everything.
478 *
479 * The object must be locked.
480 */
4c45483e 481void
15637ed4
RG
482vm_object_page_clean(object, start, end)
483 register vm_object_t object;
484 register vm_offset_t start;
485 register vm_offset_t end;
486{
487 register vm_page_t p;
55768178
DG
488 int s;
489 int size;
15637ed4
RG
490
491 if (object->pager == NULL)
492 return;
493
55768178
DG
494 if (start != end) {
495 start = trunc_page(start);
496 end = round_page(end);
497 }
498 size = end - start;
499
15637ed4
RG
500again:
501 p = (vm_page_t) queue_first(&object->memq);
55768178
DG
502 while (!queue_end(&object->memq, (queue_entry_t) p) && ((start == end) || (size != 0) ) ) {
503 if (start == end || (p->offset >= start && p->offset < end)) {
ce619eaa 504 if (p->flags & PG_BUSY)
55768178
DG
505 goto next;
506
507 size -= PAGE_SIZE;
508
509 if ((p->flags & PG_CLEAN)
510 && pmap_is_modified(VM_PAGE_TO_PHYS(p)))
fd76afd7 511 p->flags &= ~PG_CLEAN;
55768178
DG
512
513 if (p->flags & PG_ACTIVE)
514 vm_page_deactivate(p);
a200ca2b 515
55768178 516 if ((p->flags & PG_CLEAN) == 0) {
fd76afd7 517 p->flags |= PG_BUSY;
15637ed4
RG
518 object->paging_in_progress++;
519 vm_object_unlock(object);
520 (void) vm_pager_put(object->pager, p, TRUE);
521 vm_object_lock(object);
522 object->paging_in_progress--;
55768178
DG
523 if (object->paging_in_progress == 0)
524 wakeup((caddr_t) object);
15637ed4
RG
525 PAGE_WAKEUP(p);
526 goto again;
527 }
528 }
55768178 529next:
15637ed4
RG
530 p = (vm_page_t) queue_next(&p->listq);
531 }
55768178 532 wakeup((caddr_t)object);
15637ed4
RG
533}
534
535/*
536 * vm_object_deactivate_pages
537 *
538 * Deactivate all pages in the specified object. (Keep its pages
539 * in memory even though it is no longer referenced.)
540 *
541 * The object must be locked.
542 */
55768178 543void
15637ed4
RG
544vm_object_deactivate_pages(object)
545 register vm_object_t object;
546{
547 register vm_page_t p, next;
548
549 p = (vm_page_t) queue_first(&object->memq);
550 while (!queue_end(&object->memq, (queue_entry_t) p)) {
551 next = (vm_page_t) queue_next(&p->listq);
552 vm_page_lock_queues();
55768178 553 if ((p->flags & (PG_INACTIVE|PG_BUSY)) == 0 &&
ce619eaa 554 p->wire_count == 0)
8820571b
NW
555 vm_page_deactivate(p); /* optimisation from mach 3.0 -
556 * andrew@werple.apana.org.au,
557 * Feb '93
558 */
15637ed4
RG
559 vm_page_unlock_queues();
560 p = next;
561 }
562}
563
564/*
565 * Trim the object cache to size.
566 */
55768178 567void
15637ed4
RG
568vm_object_cache_trim()
569{
570 register vm_object_t object;
571
572 vm_object_cache_lock();
573 while (vm_object_cached > vm_cache_max) {
574 object = (vm_object_t) queue_first(&vm_object_cached_list);
575 vm_object_cache_unlock();
576
577 if (object != vm_object_lookup(object->pager))
578 panic("vm_object_deactivate: I'm sooo confused.");
579
580 pager_cache(object, FALSE);
581
582 vm_object_cache_lock();
583 }
584 vm_object_cache_unlock();
585}
586
15637ed4
RG
587/*
588 * vm_object_shutdown()
589 *
590 * Shut down the object system. Unfortunately, while we
591 * may be trying to do this, init is happily waiting for
592 * processes to exit, and therefore will be causing some objects
593 * to be deallocated. To handle this, we gain a fake reference
594 * to all objects we release paging areas for. This will prevent
595 * a duplicate deallocation. This routine is probably full of
596 * race conditions!
597 */
598
55768178
DG
599#if 0
600void
601vm_object_shutdown()
15637ed4
RG
602{
603 register vm_object_t object;
604
605 /*
606 * Clean up the object cache *before* we screw up the reference
607 * counts on all of the objects.
608 */
609
610 vm_object_cache_clear();
611
15637ed4
RG
612
613 /*
614 * First we gain a reference to each object so that
615 * no one else will deallocate them.
616 */
617
618 simple_lock(&vm_object_list_lock);
619 object = (vm_object_t) queue_first(&vm_object_list);
620 while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
621 vm_object_reference(object);
622 object = (vm_object_t) queue_next(&object->object_list);
623 }
624 simple_unlock(&vm_object_list_lock);
625
626 /*
627 * Now we deallocate all the paging areas. We don't need
628 * to lock anything because we've reduced to a single
629 * processor while shutting down. This also assumes that
630 * no new objects are being created.
631 */
632
633 object = (vm_object_t) queue_first(&vm_object_list);
634 while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
635 if (object->pager != NULL)
636 vm_pager_deallocate(object->pager);
637 object = (vm_object_t) queue_next(&object->object_list);
638 printf(".");
639 }
640 printf("done.\n");
641}
55768178 642#endif
15637ed4
RG
643/*
644 * vm_object_pmap_copy:
645 *
646 * Makes all physical pages in the specified
647 * object range copy-on-write. No writeable
648 * references to these pages should remain.
649 *
650 * The object must *not* be locked.
651 */
55768178
DG
652void
653vm_object_pmap_copy(object, start, end)
15637ed4
RG
654 register vm_object_t object;
655 register vm_offset_t start;
656 register vm_offset_t end;
657{
658 register vm_page_t p;
55768178
DG
659 vm_offset_t amount;
660
661 start = trunc_page(start);
662 end = round_page(end);
663
664 amount = ((end - start) + PAGE_SIZE - 1) / PAGE_SIZE;
15637ed4
RG
665
666 if (object == NULL)
667 return;
668
669 vm_object_lock(object);
670 p = (vm_page_t) queue_first(&object->memq);
671 while (!queue_end(&object->memq, (queue_entry_t) p)) {
672 if ((start <= p->offset) && (p->offset < end)) {
673 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
fd76afd7 674 p->flags |= PG_COPY_ON_WRITE;
55768178
DG
675 amount -= 1;
676 if (amount <= 0)
677 break;
15637ed4
RG
678 }
679 p = (vm_page_t) queue_next(&p->listq);
680 }
681 vm_object_unlock(object);
682}
683
684/*
685 * vm_object_pmap_remove:
686 *
687 * Removes all physical pages in the specified
688 * object range from all physical maps.
689 *
690 * The object must *not* be locked.
691 */
55768178
DG
692void
693vm_object_pmap_remove(object, start, end)
15637ed4
RG
694 register vm_object_t object;
695 register vm_offset_t start;
696 register vm_offset_t end;
697{
698 register vm_page_t p;
92c70781 699 vm_offset_t size;
15637ed4
RG
700
701 if (object == NULL)
702 return;
703
704 vm_object_lock(object);
92c70781
DG
705again:
706 size = ((end - start) + PAGE_SIZE - 1) / PAGE_SIZE;
15637ed4
RG
707 p = (vm_page_t) queue_first(&object->memq);
708 while (!queue_end(&object->memq, (queue_entry_t) p)) {
55768178 709 if ((start <= p->offset) && (p->offset < end)) {
92c70781
DG
710 if (p->flags & PG_BUSY) {
711 p->flags |= PG_WANTED;
712 tsleep((caddr_t) p, PVM, "vmopmr", 0);
713 goto again;
714 }
a200ca2b 715 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
55768178
DG
716 if ((p->flags & PG_CLEAN) == 0)
717 p->flags |= PG_LAUNDRY;
55768178
DG
718 if (--size <= 0) break;
719 }
15637ed4
RG
720 p = (vm_page_t) queue_next(&p->listq);
721 }
722 vm_object_unlock(object);
723}
724
725/*
726 * vm_object_copy:
727 *
728 * Create a new object which is a copy of an existing
729 * object, and mark all of the pages in the existing
730 * object 'copy-on-write'. The new object has one reference.
731 * Returns the new object.
732 *
733 * May defer the copy until later if the object is not backed
734 * up by a non-default pager.
735 */
55768178
DG
736void
737vm_object_copy(src_object, src_offset, size, dst_object, dst_offset, src_needs_copy)
15637ed4
RG
738 register vm_object_t src_object;
739 vm_offset_t src_offset;
740 vm_size_t size;
741 vm_object_t *dst_object; /* OUT */
742 vm_offset_t *dst_offset; /* OUT */
743 boolean_t *src_needs_copy; /* OUT */
744{
745 register vm_object_t new_copy;
746 register vm_object_t old_copy;
747 vm_offset_t new_start, new_end;
55768178
DG
748 vm_offset_t src_offset_end;
749 vm_offset_t tmpsize;
15637ed4
RG
750
751 register vm_page_t p;
752
753 if (src_object == NULL) {
754 /*
755 * Nothing to copy
756 */
757 *dst_object = NULL;
758 *dst_offset = 0;
759 *src_needs_copy = FALSE;
760 return;
761 }
762
763 /*
764 * If the object's pager is null_pager or the
765 * default pager, we don't have to make a copy
766 * of it. Instead, we set the needs copy flag and
767 * make a shadow later.
55768178 768 * DYSON: check for swap(default) pager too....
15637ed4
RG
769 */
770
771 vm_object_lock(src_object);
45852188 772
736d20f2
DG
773 /*
774 * Try to collapse the object before copying it.
775 */
736d20f2 776
55768178 777 vm_object_collapse(src_object);
736d20f2 778
15637ed4 779 if (src_object->pager == NULL ||
55768178
DG
780 src_object->pager->pg_type == PG_SWAP ||
781 src_object->internal) {
15637ed4
RG
782
783 /*
784 * Make another reference to the object
785 */
786 src_object->ref_count++;
787
788 /*
789 * Mark all of the pages copy-on-write.
790 */
55768178
DG
791 tmpsize = size;
792 src_offset_end = src_offset + size;
15637ed4 793 for (p = (vm_page_t) queue_first(&src_object->memq);
55768178 794 !queue_end(&src_object->memq, (queue_entry_t)p) && tmpsize > 0;
15637ed4
RG
795 p = (vm_page_t) queue_next(&p->listq)) {
796 if (src_offset <= p->offset &&
55768178 797 p->offset < src_offset_end) {
fd76afd7 798 p->flags |= PG_COPY_ON_WRITE;
55768178
DG
799 tmpsize -= PAGE_SIZE;
800 }
15637ed4
RG
801 }
802 vm_object_unlock(src_object);
803
804 *dst_object = src_object;
805 *dst_offset = src_offset;
806
807 /*
808 * Must make a shadow when write is desired
809 */
810 *src_needs_copy = TRUE;
811 return;
812 }
813
15637ed4
RG
814 /*
815 * If the object has a pager, the pager wants to
816 * see all of the changes. We need a copy-object
817 * for the changed pages.
818 *
819 * If there is a copy-object, and it is empty,
820 * no changes have been made to the object since the
821 * copy-object was made. We can use the same copy-
822 * object.
823 */
824
825 Retry1:
826 old_copy = src_object->copy;
827 if (old_copy != NULL) {
828 /*
829 * Try to get the locks (out of order)
830 */
831 if (!vm_object_lock_try(old_copy)) {
832 vm_object_unlock(src_object);
833
834 /* should spin a bit here... */
835 vm_object_lock(src_object);
836 goto Retry1;
837 }
838
839 if (old_copy->resident_page_count == 0 &&
840 old_copy->pager == NULL) {
841 /*
842 * Return another reference to
843 * the existing copy-object.
844 */
845 old_copy->ref_count++;
846 vm_object_unlock(old_copy);
847 vm_object_unlock(src_object);
848 *dst_object = old_copy;
849 *dst_offset = src_offset;
850 *src_needs_copy = FALSE;
851 return;
852 }
853 vm_object_unlock(old_copy);
854 }
855 vm_object_unlock(src_object);
856
857 /*
858 * If the object has a pager, the pager wants
859 * to see all of the changes. We must make
860 * a copy-object and put the changed pages there.
861 *
862 * The copy-object is always made large enough to
863 * completely shadow the original object, since
864 * it may have several users who want to shadow
865 * the original object at different points.
866 */
867
868 new_copy = vm_object_allocate(src_object->size);
869
870 Retry2:
871 vm_object_lock(src_object);
872 /*
873 * Copy object may have changed while we were unlocked
874 */
875 old_copy = src_object->copy;
876 if (old_copy != NULL) {
877 /*
878 * Try to get the locks (out of order)
879 */
880 if (!vm_object_lock_try(old_copy)) {
881 vm_object_unlock(src_object);
882 goto Retry2;
883 }
884
885 /*
886 * Consistency check
887 */
888 if (old_copy->shadow != src_object ||
889 old_copy->shadow_offset != (vm_offset_t) 0)
890 panic("vm_object_copy: copy/shadow inconsistency");
891
892 /*
893 * Make the old copy-object shadow the new one.
894 * It will receive no more pages from the original
895 * object.
896 */
897
898 src_object->ref_count--; /* remove ref. from old_copy */
899 old_copy->shadow = new_copy;
900 new_copy->ref_count++; /* locking not needed - we
901 have the only pointer */
902 vm_object_unlock(old_copy); /* done with old_copy */
903 }
904
905 new_start = (vm_offset_t) 0; /* always shadow original at 0 */
906 new_end = (vm_offset_t) new_copy->size; /* for the whole object */
907
908 /*
909 * Point the new copy at the existing object.
910 */
911
912 new_copy->shadow = src_object;
913 new_copy->shadow_offset = new_start;
914 src_object->ref_count++;
915 src_object->copy = new_copy;
916
917 /*
918 * Mark all the affected pages of the existing object
919 * copy-on-write.
920 */
55768178 921 tmpsize = size;
15637ed4 922 p = (vm_page_t) queue_first(&src_object->memq);
55768178
DG
923 while (!queue_end(&src_object->memq, (queue_entry_t) p) && tmpsize > 0) {
924 if ((new_start <= p->offset) && (p->offset < new_end)) {
fd76afd7 925 p->flags |= PG_COPY_ON_WRITE;
55768178
DG
926 tmpsize -= PAGE_SIZE;
927 }
15637ed4
RG
928 p = (vm_page_t) queue_next(&p->listq);
929 }
930
931 vm_object_unlock(src_object);
932
933 *dst_object = new_copy;
934 *dst_offset = src_offset - new_start;
935 *src_needs_copy = FALSE;
936}
937
938/*
939 * vm_object_shadow:
940 *
941 * Create a new object which is backed by the
942 * specified existing object range. The source
943 * object reference is deallocated.
944 *
945 * The new object and offset into that object
946 * are returned in the source parameters.
947 */
948
55768178
DG
949void
950vm_object_shadow(object, offset, length)
15637ed4
RG
951 vm_object_t *object; /* IN/OUT */
952 vm_offset_t *offset; /* IN/OUT */
953 vm_size_t length;
954{
955 register vm_object_t source;
956 register vm_object_t result;
957
958 source = *object;
959
960 /*
961 * Allocate a new object with the given length
962 */
963
964 if ((result = vm_object_allocate(length)) == NULL)
965 panic("vm_object_shadow: no object for shadowing");
966
967 /*
968 * The new object shadows the source object, adding
969 * a reference to it. Our caller changes his reference
970 * to point to the new object, removing a reference to
971 * the source object. Net result: no change of reference
972 * count.
973 */
974 result->shadow = source;
975
976 /*
977 * Store the offset into the source object,
978 * and fix up the offset into the new object.
979 */
980
981 result->shadow_offset = *offset;
982
983 /*
984 * Return the new things
985 */
986
987 *offset = 0;
988 *object = result;
989}
990
991/*
992 * Set the specified object's pager to the specified pager.
993 */
994
55768178
DG
995void
996vm_object_setpager(object, pager, paging_offset,
15637ed4
RG
997 read_only)
998 vm_object_t object;
999 vm_pager_t pager;
1000 vm_offset_t paging_offset;
1001 boolean_t read_only;
1002{
1003#ifdef lint
1004 read_only++; /* No longer used */
1005#endif lint
1006
1007 vm_object_lock(object); /* XXX ? */
55768178
DG
1008 if (object->pager && object->pager != pager) {
1009 panic("!!!pager already allocated!!!\n");
1010 }
15637ed4
RG
1011 object->pager = pager;
1012 object->paging_offset = paging_offset;
1013 vm_object_unlock(object); /* XXX ? */
1014}
1015
1016/*
1017 * vm_object_hash hashes the pager/id pair.
1018 */
1019
1020#define vm_object_hash(pager) \
55768178 1021 ((((unsigned)pager) >> 5)%VM_OBJECT_HASH_COUNT)
15637ed4
RG
1022
1023/*
1024 * vm_object_lookup looks in the object cache for an object with the
1025 * specified pager and paging id.
1026 */
1027
55768178
DG
1028vm_object_t
1029vm_object_lookup(pager)
15637ed4
RG
1030 vm_pager_t pager;
1031{
1032 register queue_t bucket;
1033 register vm_object_hash_entry_t entry;
1034 vm_object_t object;
1035
1036 bucket = &vm_object_hashtable[vm_object_hash(pager)];
1037
1038 vm_object_cache_lock();
1039
1040 entry = (vm_object_hash_entry_t) queue_first(bucket);
1041 while (!queue_end(bucket, (queue_entry_t) entry)) {
1042 object = entry->object;
1043 if (object->pager == pager) {
1044 vm_object_lock(object);
1045 if (object->ref_count == 0) {
1046 queue_remove(&vm_object_cached_list, object,
1047 vm_object_t, cached_list);
1048 vm_object_cached--;
1049 }
1050 object->ref_count++;
1051 vm_object_unlock(object);
1052 vm_object_cache_unlock();
1053 return(object);
1054 }
1055 entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
1056 }
1057
1058 vm_object_cache_unlock();
1059 return(NULL);
1060}
1061
1062/*
1063 * vm_object_enter enters the specified object/pager/id into
1064 * the hash table.
1065 */
1066
55768178
DG
1067void
1068vm_object_enter(object, pager)
15637ed4
RG
1069 vm_object_t object;
1070 vm_pager_t pager;
1071{
1072 register queue_t bucket;
1073 register vm_object_hash_entry_t entry;
1074
1075 /*
1076 * We don't cache null objects, and we can't cache
1077 * objects with the null pager.
1078 */
1079
1080 if (object == NULL)
1081 return;
1082 if (pager == NULL)
1083 return;
1084
1085 bucket = &vm_object_hashtable[vm_object_hash(pager)];
1086 entry = (vm_object_hash_entry_t)
1087 malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
1088 entry->object = object;
1089 object->can_persist = TRUE;
1090
1091 vm_object_cache_lock();
1092 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links);
1093 vm_object_cache_unlock();
1094}
1095
1096/*
1097 * vm_object_remove:
1098 *
1099 * Remove the pager from the hash table.
1100 * Note: This assumes that the object cache
1101 * is locked. XXX this should be fixed
1102 * by reorganizing vm_object_deallocate.
1103 */
55768178 1104void
15637ed4
RG
1105vm_object_remove(pager)
1106 register vm_pager_t pager;
1107{
1108 register queue_t bucket;
1109 register vm_object_hash_entry_t entry;
1110 register vm_object_t object;
1111
1112 bucket = &vm_object_hashtable[vm_object_hash(pager)];
1113
1114 entry = (vm_object_hash_entry_t) queue_first(bucket);
1115 while (!queue_end(bucket, (queue_entry_t) entry)) {
1116 object = entry->object;
1117 if (object->pager == pager) {
1118 queue_remove(bucket, entry, vm_object_hash_entry_t,
1119 hash_links);
1120 free((caddr_t)entry, M_VMOBJHASH);
1121 break;
1122 }
1123 entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
1124 }
1125}
1126
1127/*
1128 * vm_object_cache_clear removes all objects from the cache.
1129 *
1130 */
55768178
DG
1131void
1132vm_object_cache_clear()
15637ed4
RG
1133{
1134 register vm_object_t object;
1135
1136 /*
1137 * Remove each object in the cache by scanning down the
1138 * list of cached objects.
1139 */
1140 vm_object_cache_lock();
1141 while (!queue_empty(&vm_object_cached_list)) {
1142 object = (vm_object_t) queue_first(&vm_object_cached_list);
1143 vm_object_cache_unlock();
1144
1145 /*
1146 * Note: it is important that we use vm_object_lookup
1147 * to gain a reference, and not vm_object_reference, because
1148 * the logic for removing an object from the cache lies in
1149 * lookup.
1150 */
1151 if (object != vm_object_lookup(object->pager))
1152 panic("vm_object_cache_clear: I'm sooo confused.");
1153 pager_cache(object, FALSE);
1154
1155 vm_object_cache_lock();
1156 }
1157 vm_object_cache_unlock();
1158}
1159
1160boolean_t vm_object_collapse_allowed = TRUE;
1161/*
1162 * vm_object_collapse:
1163 *
1164 * Collapse an object with the object backing it.
1165 * Pages in the backing object are moved into the
1166 * parent, and the backing object is deallocated.
1167 *
1168 * Requires that the object be locked and the page
1169 * queues be unlocked.
1170 *
55768178
DG
1171 * This routine has significant changes by John S. Dyson
1172 * to fix some swap memory leaks. 18 Dec 93
736d20f2 1173 *
15637ed4 1174 */
55768178
DG
1175void
1176vm_object_collapse(object)
15637ed4
RG
1177 register vm_object_t object;
1178
1179{
1180 register vm_object_t backing_object;
1181 register vm_offset_t backing_offset;
1182 register vm_size_t size;
1183 register vm_offset_t new_offset;
1184 register vm_page_t p, pp;
1185
1186 if (!vm_object_collapse_allowed)
1187 return;
1188
1189 while (TRUE) {
1190 /*
1191 * Verify that the conditions are right for collapse:
1192 *
1193 * The object exists and no pages in it are currently
736d20f2 1194 * being paged out.
15637ed4
RG
1195 */
1196 if (object == NULL ||
736d20f2 1197 object->paging_in_progress != 0)
15637ed4
RG
1198 return;
1199
1200 /*
1201 * There is a backing object, and
1202 */
1203
1204 if ((backing_object = object->shadow) == NULL)
1205 return;
1206
1207 vm_object_lock(backing_object);
1208 /*
1209 * ...
1210 * The backing object is not read_only,
1211 * and no pages in the backing object are
1212 * currently being paged out.
1213 * The backing object is internal.
1214 */
1215
1216 if (!backing_object->internal ||
736d20f2 1217 backing_object->paging_in_progress != 0 ) {
15637ed4
RG
1218 vm_object_unlock(backing_object);
1219 return;
1220 }
1221
1222 /*
1223 * The backing object can't be a copy-object:
1224 * the shadow_offset for the copy-object must stay
1225 * as 0. Furthermore (for the 'we have all the
1226 * pages' case), if we bypass backing_object and
1227 * just shadow the next object in the chain, old
1228 * pages from that object would then have to be copied
1229 * BOTH into the (former) backing_object and into the
1230 * parent object.
1231 */
1232 if (backing_object->shadow != NULL &&
736d20f2
DG
1233 backing_object->shadow->copy == backing_object) {
1234 vm_object_unlock(backing_object);
1235 return;
1236 }
1237
1238 /*
1239 * we can deal only with the swap pager
1240 */
55768178 1241 if ((object->pager &&
736d20f2
DG
1242 object->pager->pg_type != PG_SWAP) ||
1243 (backing_object->pager &&
1244 backing_object->pager->pg_type != PG_SWAP)) {
15637ed4
RG
1245 vm_object_unlock(backing_object);
1246 return;
1247 }
736d20f2 1248
15637ed4
RG
1249
1250 /*
1251 * We know that we can either collapse the backing
1252 * object (if the parent is the only reference to
1253 * it) or (perhaps) remove the parent's reference
1254 * to it.
1255 */
1256
1257 backing_offset = object->shadow_offset;
1258 size = object->size;
1259
1260 /*
1261 * If there is exactly one reference to the backing
1262 * object, we can collapse it into the parent.
1263 */
1264
1265 if (backing_object->ref_count == 1) {
1266
1267 /*
1268 * We can collapse the backing object.
1269 *
1270 * Move all in-memory pages from backing_object
1271 * to the parent. Pages that have been paged out
1272 * will be overwritten by any of the parent's
1273 * pages that shadow them.
1274 */
1275
1276 while (!queue_empty(&backing_object->memq)) {
1277
1278 p = (vm_page_t)
1279 queue_first(&backing_object->memq);
1280
1281 new_offset = (p->offset - backing_offset);
1282
1283 /*
1284 * If the parent has a page here, or if
1285 * this page falls outside the parent,
1286 * dispose of it.
1287 *
1288 * Otherwise, move it as planned.
1289 */
1290
1291 if (p->offset < backing_offset ||
1292 new_offset >= size) {
1293 vm_page_lock_queues();
1294 vm_page_free(p);
1295 vm_page_unlock_queues();
1296 } else {
1297 pp = vm_page_lookup(object, new_offset);
55768178
DG
1298 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1299 object->paging_offset + new_offset))) {
15637ed4
RG
1300 vm_page_lock_queues();
1301 vm_page_free(p);
1302 vm_page_unlock_queues();
55768178 1303 } else {
15637ed4
RG
1304 vm_page_rename(p, object, new_offset);
1305 }
1306 }
1307 }
1308
1309 /*
1310 * Move the pager from backing_object to object.
e475daaa
JH
1311 */
1312
55768178
DG
1313 if (backing_object->pager) {
1314 backing_object->paging_in_progress++;
1315 if (object->pager) {
1316 vm_pager_t bopager;
736d20f2 1317 object->paging_in_progress++;
736d20f2
DG
1318 /*
1319 * copy shadow object pages into ours
1320 * and destroy unneeded pages in shadow object.
1321 */
55768178
DG
1322 bopager = backing_object->pager;
1323 backing_object->pager = NULL;
1324 vm_object_remove(backing_object->pager);
736d20f2 1325 swap_pager_copy(
55768178 1326 bopager, backing_object->paging_offset,
736d20f2
DG
1327 object->pager, object->paging_offset,
1328 object->shadow_offset);
1329 object->paging_in_progress--;
55768178 1330 if (object->paging_in_progress == 0)
736d20f2 1331 wakeup((caddr_t)object);
736d20f2 1332 } else {
55768178 1333 object->paging_in_progress++;
736d20f2
DG
1334 /*
1335 * grab the shadow objects pager
1336 */
1337 object->pager = backing_object->pager;
1338 object->paging_offset = backing_object->paging_offset + backing_offset;
55768178
DG
1339 vm_object_remove(backing_object->pager);
1340 backing_object->pager = NULL;
736d20f2
DG
1341 /*
1342 * free unnecessary blocks
1343 */
55768178
DG
1344 swap_pager_freespace(object->pager, 0, object->paging_offset);
1345 object->paging_in_progress--;
1346 if (object->paging_in_progress == 0)
1347 wakeup((caddr_t)object);
736d20f2 1348 }
55768178
DG
1349 backing_object->paging_in_progress--;
1350 if (backing_object->paging_in_progress == 0)
1351 wakeup((caddr_t)backing_object);
e475daaa 1352 }
15637ed4 1353
15637ed4
RG
1354
1355 /*
1356 * Object now shadows whatever backing_object did.
1357 * Note that the reference to backing_object->shadow
1358 * moves from within backing_object to within object.
1359 */
1360
1361 object->shadow = backing_object->shadow;
1362 object->shadow_offset += backing_object->shadow_offset;
1363 if (object->shadow != NULL &&
1364 object->shadow->copy != NULL) {
1365 panic("vm_object_collapse: we collapsed a copy-object!");
1366 }
1367 /*
1368 * Discard backing_object.
1369 *
1370 * Since the backing object has no pages, no
1371 * pager left, and no object references within it,
1372 * all that is necessary is to dispose of it.
1373 */
1374
1375 vm_object_unlock(backing_object);
1376
1377 simple_lock(&vm_object_list_lock);
1378 queue_remove(&vm_object_list, backing_object,
1379 vm_object_t, object_list);
1380 vm_object_count--;
1381 simple_unlock(&vm_object_list_lock);
1382
1383 free((caddr_t)backing_object, M_VMOBJ);
1384
1385 object_collapses++;
1386 }
1387 else {
1388 /*
1389 * If all of the pages in the backing object are
1390 * shadowed by the parent object, the parent
1391 * object no longer has to shadow the backing
1392 * object; it can shadow the next one in the
1393 * chain.
1394 *
1395 * The backing object must not be paged out - we'd
1396 * have to check all of the paged-out pages, as
1397 * well.
1398 */
1399
1400 if (backing_object->pager != NULL) {
1401 vm_object_unlock(backing_object);
1402 return;
1403 }
1404
1405 /*
1406 * Should have a check for a 'small' number
1407 * of pages here.
1408 */
1409
1410 p = (vm_page_t) queue_first(&backing_object->memq);
1411 while (!queue_end(&backing_object->memq,
1412 (queue_entry_t) p)) {
1413
1414 new_offset = (p->offset - backing_offset);
1415
1416 /*
1417 * If the parent has a page here, or if
1418 * this page falls outside the parent,
1419 * keep going.
1420 *
1421 * Otherwise, the backing_object must be
1422 * left in the chain.
1423 */
1424
1425 if (p->offset >= backing_offset &&
1426 new_offset <= size &&
55768178
DG
1427 ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE)) &&
1428 (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset+new_offset))) {
15637ed4
RG
1429 /*
1430 * Page still needed.
1431 * Can't go any further.
1432 */
1433 vm_object_unlock(backing_object);
1434 return;
1435 }
1436 p = (vm_page_t) queue_next(&p->listq);
1437 }
1438
1439 /*
1440 * Make the parent shadow the next object
1441 * in the chain. Deallocating backing_object
1442 * will not remove it, since its reference
1443 * count is at least 2.
1444 */
1445
1446 vm_object_reference(object->shadow = backing_object->shadow);
1447 object->shadow_offset += backing_object->shadow_offset;
1448
8820571b
NW
1449#if 1
1450 /* Mach 3.0 code */
1451 /* andrew@werple.apana.org.au, 12 Feb 1993 */
1452
1453 /*
1454 * Backing object might have had a copy pointer
1455 * to us. If it did, clear it.
1456 */
1457 if (backing_object->copy == object)
1458 backing_object->copy = NULL;
1459#endif
1460
15637ed4
RG
1461 /* Drop the reference count on backing_object.
1462 * Since its ref_count was at least 2, it
1463 * will not vanish; so we don't need to call
1464 * vm_object_deallocate.
1465 */
55768178 1466 if (backing_object->ref_count == 1)
736d20f2 1467 printf("should have called obj deallocate\n");
15637ed4
RG
1468 backing_object->ref_count--;
1469 vm_object_unlock(backing_object);
1470
1471 object_bypasses ++;
1472
1473 }
1474
1475 /*
1476 * Try again with this object's new backing object.
1477 */
1478 }
1479}
1480
1481/*
1482 * vm_object_page_remove: [internal]
1483 *
1484 * Removes all physical pages in the specified
1485 * object range from the object's list of pages.
1486 *
1487 * The object must be locked.
1488 */
55768178
DG
1489void
1490vm_object_page_remove(object, start, end)
15637ed4
RG
1491 register vm_object_t object;
1492 register vm_offset_t start;
1493 register vm_offset_t end;
1494{
1495 register vm_page_t p, next;
55768178
DG
1496 vm_offset_t size;
1497 int cnt;
1498 int s;
15637ed4
RG
1499
1500 if (object == NULL)
1501 return;
1502
55768178
DG
1503 start = trunc_page(start);
1504 end = round_page(end);
92c70781 1505again:
55768178
DG
1506 size = end-start;
1507 if (size > 4*PAGE_SIZE || size >= object->size/4) {
1508 p = (vm_page_t) queue_first(&object->memq);
1509 while (!queue_end(&object->memq, (queue_entry_t) p) && size > 0) {
1510 next = (vm_page_t) queue_next(&p->listq);
1511 if ((start <= p->offset) && (p->offset < end)) {
92c70781
DG
1512 if (p->flags & PG_BUSY) {
1513 p->flags |= PG_WANTED;
1514 tsleep((caddr_t) p, PVM, "vmopar", 0);
1515 goto again;
1516 }
55768178
DG
1517 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1518 vm_page_lock_queues();
1519 vm_page_free(p);
1520 vm_page_unlock_queues();
1521 size -= PAGE_SIZE;
1522 }
1523 p = next;
1524 }
1525 } else {
1526 while (size > 0) {
1527 while (p = vm_page_lookup(object, start)) {
92c70781
DG
1528 if (p->flags & PG_BUSY) {
1529 p->flags |= PG_WANTED;
1530 tsleep((caddr_t) p, PVM, "vmopar", 0);
1531 goto again;
1532 }
55768178
DG
1533 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1534 vm_page_lock_queues();
1535 vm_page_free(p);
1536 vm_page_unlock_queues();
1537 }
1538 start += PAGE_SIZE;
1539 size -= PAGE_SIZE;
15637ed4 1540 }
15637ed4
RG
1541 }
1542}
1543
1544/*
1545 * Routine: vm_object_coalesce
1546 * Function: Coalesces two objects backing up adjoining
1547 * regions of memory into a single object.
1548 *
1549 * returns TRUE if objects were combined.
1550 *
1551 * NOTE: Only works at the moment if the second object is NULL -
1552 * if it's not, which object do we lock first?
1553 *
1554 * Parameters:
1555 * prev_object First object to coalesce
1556 * prev_offset Offset into prev_object
1557 * next_object Second object into coalesce
1558 * next_offset Offset into next_object
1559 *
1560 * prev_size Size of reference to prev_object
1561 * next_size Size of reference to next_object
1562 *
1563 * Conditions:
1564 * The object must *not* be locked.
1565 */
55768178
DG
1566boolean_t
1567vm_object_coalesce(prev_object, next_object, prev_offset, next_offset, prev_size, next_size)
15637ed4
RG
1568 register vm_object_t prev_object;
1569 vm_object_t next_object;
1570 vm_offset_t prev_offset, next_offset;
1571 vm_size_t prev_size, next_size;
1572{
1573 vm_size_t newsize;
1574
1575#ifdef lint
1576 next_offset++;
1577#endif lint
1578
1579 if (next_object != NULL) {
1580 return(FALSE);
1581 }
1582
1583 if (prev_object == NULL) {
1584 return(TRUE);
1585 }
1586
1587 vm_object_lock(prev_object);
1588
1589 /*
1590 * Try to collapse the object first
1591 */
1592 vm_object_collapse(prev_object);
1593
1594 /*
1595 * Can't coalesce if:
1596 * . more than one reference
1597 * . paged out
1598 * . shadows another object
1599 * . has a copy elsewhere
1600 * (any of which mean that the pages not mapped to
1601 * prev_entry may be in use anyway)
1602 */
15637ed4
RG
1603 if (prev_object->ref_count > 1 ||
1604 prev_object->pager != NULL ||
1605 prev_object->shadow != NULL ||
1606 prev_object->copy != NULL) {
1607 vm_object_unlock(prev_object);
1608 return(FALSE);
1609 }
1610
1611 /*
1612 * Remove any pages that may still be in the object from
1613 * a previous deallocation.
1614 */
15637ed4
RG
1615 vm_object_page_remove(prev_object,
1616 prev_offset + prev_size,
1617 prev_offset + prev_size + next_size);
1618
1619 /*
1620 * Extend the object if necessary.
1621 */
1622 newsize = prev_offset + prev_size + next_size;
1623 if (newsize > prev_object->size)
1624 prev_object->size = newsize;
1625
1626 vm_object_unlock(prev_object);
1627 return(TRUE);
1628}
1629
55768178
DG
1630/*
1631 * returns page after looking up in shadow chain
1632 */
1633
1634vm_page_t
1635vm_object_page_lookup(object, offset)
1636 vm_object_t object;
1637 vm_offset_t offset;
1638{
1639 vm_page_t m;
1640 if (!(m=vm_page_lookup(object, offset))) {
1641 if (!object->shadow)
1642 return 0;
1643 else
1644 return vm_object_page_lookup(object->shadow, offset + object->shadow_offset);
1645 }
1646 return m;
1647}
1648
d55b6cf4 1649#if defined(DEBUG) || (NDDB > 0)
15637ed4
RG
1650/*
1651 * vm_object_print: [ debug ]
1652 */
55768178
DG
1653void
1654vm_object_print(object, full)
15637ed4
RG
1655 vm_object_t object;
1656 boolean_t full;
1657{
1658 register vm_page_t p;
1659 extern indent;
1660
1661 register int count;
1662
1663 if (object == NULL)
1664 return;
1665
1666 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1667 (int) object, (int) object->size,
1668 object->resident_page_count, object->ref_count);
1669 printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1670 (int) object->pager, (int) object->paging_offset,
1671 (int) object->shadow, (int) object->shadow_offset);
1672 printf("cache: next=0x%x, prev=0x%x\n",
1673 object->cached_list.next, object->cached_list.prev);
1674
1675 if (!full)
1676 return;
1677
1678 indent += 2;
1679 count = 0;
1680 p = (vm_page_t) queue_first(&object->memq);
1681 while (!queue_end(&object->memq, (queue_entry_t) p)) {
1682 if (count == 0)
1683 iprintf("memory:=");
1684 else if (count == 6) {
1685 printf("\n");
1686 iprintf(" ...");
1687 count = 0;
1688 } else
1689 printf(",");
1690 count++;
1691
1692 printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p));
1693 p = (vm_page_t) queue_next(&p->listq);
1694 }
1695 if (count != 0)
1696 printf("\n");
1697 indent -= 2;
1698}
d55b6cf4 1699#endif /* defined(DEBUG) || (NDDB > 0) */