change sleep->tsleep.
[unix-history] / sys / vm / vm_object.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_object.c 7.4 (Berkeley) 5/7/91
d149b268 37 * $Id: vm_object.c,v 1.13 1993/12/22 12:51:59 davidg Exp $
55768178
DG
38 *
39 *
15637ed4
RG
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 *
45 * Permission to use, copy, modify and distribute this software and
46 * its documentation is hereby granted, provided that both the copyright
47 * notice and this permission notice appear in all copies of the
48 * software, derivative works or modified versions, and any portions
49 * thereof, and that both notices appear in supporting documentation.
50 *
51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 *
55 * Carnegie Mellon requests users of this software to return to
56 *
57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
58 * School of Computer Science
59 * Carnegie Mellon University
60 * Pittsburgh PA 15213-3890
61 *
62 * any improvements or extensions that they make and grant Carnegie the
63 * rights to redistribute these changes.
15637ed4
RG
64 */
65
66/*
67 * Virtual memory object module.
68 */
69
55768178 70#include "ddb.h"
15637ed4
RG
71#include "param.h"
72#include "malloc.h"
55768178 73#include "systm.h"
15637ed4
RG
74
75#include "vm.h"
76#include "vm_page.h"
55768178
DG
77#include "proc.h"
78
fde1aeb2 79
4c45483e 80static void _vm_object_allocate(vm_size_t, vm_object_t);
55768178 81void vm_object_deactivate_pages(vm_object_t);
4c45483e
GW
82static void vm_object_cache_trim(void);
83static void vm_object_remove(vm_pager_t);
84
15637ed4
RG
85/*
86 * Virtual memory objects maintain the actual data
87 * associated with allocated virtual memory. A given
88 * page of memory exists within exactly one object.
89 *
90 * An object is only deallocated when all "references"
91 * are given up. Only one "reference" to a given
92 * region of an object should be writeable.
93 *
94 * Associated with each object is a list of all resident
95 * memory pages belonging to that object; this list is
96 * maintained by the "vm_page" module, and locked by the object's
97 * lock.
98 *
99 * Each object also records a "pager" routine which is
100 * used to retrieve (and store) pages to the proper backing
101 * storage. In addition, objects may be backed by other
102 * objects from which they were virtual-copied.
103 *
104 * The only items within the object structure which are
105 * modified after time of creation are:
106 * reference count locked by object's lock
107 * pager routine locked by object's lock
108 *
109 */
110
55768178 111
bbc3f849
GW
112queue_head_t vm_object_cached_list; /* list of objects persisting */
113int vm_object_cached; /* size of cached list */
114simple_lock_data_t vm_cache_lock; /* lock for object cache */
115
116queue_head_t vm_object_list; /* list of allocated objects */
117long vm_object_count; /* count of all objects */
118simple_lock_data_t vm_object_list_lock;
119 /* lock for object list and count */
120
121vm_object_t kernel_object; /* the single kernel object */
55768178 122vm_object_t kmem_object; /* the kernel malloc object */
15637ed4
RG
123struct vm_object kernel_object_store;
124struct vm_object kmem_object_store;
125
15637ed4 126
55768178
DG
127#ifdef VSMALL
128#define VM_OBJECT_HASH_COUNT 127
129int vm_cache_max = 256; /* can patch if necessary */
130#else
131#define VM_OBJECT_HASH_COUNT 521
132int vm_cache_max = 2048; /* can patch if necessary */
133#endif
134
15637ed4
RG
135queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
136
137long object_collapses = 0;
138long object_bypasses = 0;
139
55768178
DG
140/*
141 * internal version of vm_object_allocate
142 */
143static inline void
144_vm_object_allocate(size, object)
145 vm_size_t size;
146 register vm_object_t object;
147{
148 queue_init(&object->memq);
149 vm_object_lock_init(object);
150 object->ref_count = 1;
151 object->resident_page_count = 0;
152 object->size = size;
153 object->can_persist = FALSE;
154 object->paging_in_progress = 0;
155 object->copy = NULL;
156
157 /*
158 * Object starts out read-write, with no pager.
159 */
160
161 object->pager = NULL;
162 object->internal = TRUE; /* vm_allocate_with_pager will reset */
163 object->paging_offset = 0;
164 object->shadow = NULL;
165 object->shadow_offset = (vm_offset_t) 0;
166
167 simple_lock(&vm_object_list_lock);
168 queue_enter(&vm_object_list, object, vm_object_t, object_list);
169 vm_object_count++;
170 simple_unlock(&vm_object_list_lock);
171}
172
15637ed4
RG
173/*
174 * vm_object_init:
175 *
176 * Initialize the VM objects module.
177 */
55768178
DG
178void
179vm_object_init()
15637ed4
RG
180{
181 register int i;
182
183 queue_init(&vm_object_cached_list);
184 queue_init(&vm_object_list);
185 vm_object_count = 0;
186 simple_lock_init(&vm_cache_lock);
187 simple_lock_init(&vm_object_list_lock);
188
189 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
190 queue_init(&vm_object_hashtable[i]);
191
192 kernel_object = &kernel_object_store;
193 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
194 kernel_object);
195
196 kmem_object = &kmem_object_store;
55768178
DG
197 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
198 kmem_object);
15637ed4
RG
199}
200
201/*
202 * vm_object_allocate:
203 *
204 * Returns a new object with the given size.
205 */
206
55768178
DG
207static struct vm_object *objpool;
208static int objpoolcnt;
209
210vm_object_t
211vm_object_allocate(size)
15637ed4
RG
212 vm_size_t size;
213{
214 register vm_object_t result;
55768178
DG
215 int s;
216
217 if (objpool) {
218 result = objpool;
219 objpool = result->copy;
220 --objpoolcnt;
221 } else {
222 result = (vm_object_t)
223 malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK);
224 }
225
15637ed4
RG
226
227 _vm_object_allocate(size, result);
228
229 return(result);
230}
231
15637ed4
RG
232
233/*
234 * vm_object_reference:
235 *
236 * Gets another reference to the given object.
237 */
55768178
DG
238inline void
239vm_object_reference(object)
15637ed4
RG
240 register vm_object_t object;
241{
242 if (object == NULL)
243 return;
244
245 vm_object_lock(object);
246 object->ref_count++;
247 vm_object_unlock(object);
248}
249
250/*
251 * vm_object_deallocate:
252 *
253 * Release a reference to the specified object,
254 * gained either through a vm_object_allocate
255 * or a vm_object_reference call. When all references
256 * are gone, storage associated with this object
257 * may be relinquished.
258 *
259 * No object may be locked.
260 */
55768178
DG
261void
262vm_object_deallocate(object)
263 vm_object_t object;
15637ed4
RG
264{
265 vm_object_t temp;
266
267 while (object != NULL) {
268
269 /*
270 * The cache holds a reference (uncounted) to
271 * the object; we must lock it before removing
272 * the object.
273 */
274
275 vm_object_cache_lock();
276
277 /*
278 * Lose the reference
279 */
280 vm_object_lock(object);
281 if (--(object->ref_count) != 0) {
282
55768178 283 vm_object_unlock(object);
15637ed4
RG
284 /*
285 * If there are still references, then
286 * we are done.
287 */
15637ed4
RG
288 vm_object_cache_unlock();
289 return;
290 }
291
292 /*
293 * See if this object can persist. If so, enter
294 * it in the cache, then deactivate all of its
295 * pages.
296 */
297
298 if (object->can_persist) {
15637ed4
RG
299
300 queue_enter(&vm_object_cached_list, object,
301 vm_object_t, cached_list);
302 vm_object_cached++;
303 vm_object_cache_unlock();
304
55768178 305 /* vm_object_deactivate_pages(object); */
15637ed4
RG
306 vm_object_unlock(object);
307
308 vm_object_cache_trim();
309 return;
310 }
15637ed4
RG
311
312 /*
313 * Make sure no one can look us up now.
314 */
315 vm_object_remove(object->pager);
316 vm_object_cache_unlock();
55768178 317
15637ed4
RG
318 temp = object->shadow;
319 vm_object_terminate(object);
320 /* unlocks and deallocates object */
321 object = temp;
322 }
323}
324
15637ed4
RG
325/*
326 * vm_object_terminate actually destroys the specified object, freeing
327 * up all previously used resources.
328 *
329 * The object must be locked.
330 */
55768178
DG
331void
332vm_object_terminate(object)
15637ed4
RG
333 register vm_object_t object;
334{
335 register vm_page_t p;
336 vm_object_t shadow_object;
55768178 337 int s;
15637ed4
RG
338
339 /*
340 * Detach the object from its shadow if we are the shadow's
341 * copy.
342 */
343 if ((shadow_object = object->shadow) != NULL) {
344 vm_object_lock(shadow_object);
345 if (shadow_object->copy == object)
346 shadow_object->copy = NULL;
55768178 347/*
15637ed4
RG
348 else if (shadow_object->copy != NULL)
349 panic("vm_object_terminate: copy/shadow inconsistency");
55768178 350*/
15637ed4
RG
351 vm_object_unlock(shadow_object);
352 }
353
354 /*
355 * Wait until the pageout daemon is through
356 * with the object.
357 */
358
55768178 359 s = splhigh();
15637ed4 360 while (object->paging_in_progress != 0) {
55768178 361 vm_object_sleep(object, object, FALSE);
15637ed4
RG
362 vm_object_lock(object);
363 }
55768178 364 splx(s);
15637ed4
RG
365
366
367 /*
368 * While the paging system is locked,
369 * pull the object's pages off the active
370 * and inactive queues. This keeps the
371 * pageout daemon from playing with them
372 * during vm_pager_deallocate.
373 *
374 * We can't free the pages yet, because the
375 * object's pager may have to write them out
376 * before deallocating the paging space.
377 */
378
379 p = (vm_page_t) queue_first(&object->memq);
380 while (!queue_end(&object->memq, (queue_entry_t) p)) {
381 VM_PAGE_CHECK(p);
382
383 vm_page_lock_queues();
55768178 384 s = vm_disable_intr();
fd76afd7 385 if (p->flags & PG_ACTIVE) {
15637ed4
RG
386 queue_remove(&vm_page_queue_active, p, vm_page_t,
387 pageq);
fd76afd7 388 p->flags &= ~PG_ACTIVE;
15637ed4
RG
389 vm_page_active_count--;
390 }
391
fd76afd7 392 if (p->flags & PG_INACTIVE) {
15637ed4
RG
393 queue_remove(&vm_page_queue_inactive, p, vm_page_t,
394 pageq);
fd76afd7 395 p->flags &= ~PG_INACTIVE;
15637ed4
RG
396 vm_page_inactive_count--;
397 }
55768178 398 vm_set_intr(s);
15637ed4
RG
399 vm_page_unlock_queues();
400 p = (vm_page_t) queue_next(&p->listq);
401 }
402
403 vm_object_unlock(object);
404
405 if (object->paging_in_progress != 0)
406 panic("vm_object_deallocate: pageout in progress");
407
408 /*
409 * Clean and free the pages, as appropriate.
410 * All references to the object are gone,
411 * so we don't need to lock it.
412 */
413
414 if (!object->internal) {
415 vm_object_lock(object);
416 vm_object_page_clean(object, 0, 0);
417 vm_object_unlock(object);
418 }
55768178 419
15637ed4
RG
420 while (!queue_empty(&object->memq)) {
421 p = (vm_page_t) queue_first(&object->memq);
422
423 VM_PAGE_CHECK(p);
424
425 vm_page_lock_queues();
426 vm_page_free(p);
427 vm_page_unlock_queues();
428 }
429
430 /*
431 * Let the pager know object is dead.
432 */
433
55768178 434 if (object->pager != NULL) {
15637ed4 435 vm_pager_deallocate(object->pager);
55768178 436 }
15637ed4
RG
437
438
439 simple_lock(&vm_object_list_lock);
440 queue_remove(&vm_object_list, object, vm_object_t, object_list);
441 vm_object_count--;
442 simple_unlock(&vm_object_list_lock);
443
444 /*
445 * Free the space for the object.
446 */
447
55768178
DG
448 if (objpoolcnt < 64) {
449 object->copy = objpool;
450 objpool = object;
451 ++objpoolcnt;
452 return;
453 } else
454 free((caddr_t)object, M_VMOBJ);
15637ed4
RG
455}
456
457/*
458 * vm_object_page_clean
459 *
460 * Clean all dirty pages in the specified range of object.
461 * Leaves page on whatever queue it is currently on.
462 *
463 * Odd semantics: if start == end, we clean everything.
464 *
465 * The object must be locked.
466 */
4c45483e 467void
15637ed4
RG
468vm_object_page_clean(object, start, end)
469 register vm_object_t object;
470 register vm_offset_t start;
471 register vm_offset_t end;
472{
473 register vm_page_t p;
55768178
DG
474 int s;
475 int size;
15637ed4
RG
476
477 if (object->pager == NULL)
478 return;
479
55768178
DG
480 if (start != end) {
481 start = trunc_page(start);
482 end = round_page(end);
483 }
484 size = end - start;
485
15637ed4 486again:
55768178 487 s = splimp();
15637ed4 488 p = (vm_page_t) queue_first(&object->memq);
55768178
DG
489 while (!queue_end(&object->memq, (queue_entry_t) p) && ((start == end) || (size != 0) ) ) {
490 if (start == end || (p->offset >= start && p->offset < end)) {
491 if (pmap_is_wired(VM_PAGE_TO_PHYS(p)) ||
492 p->flags & PG_BUSY)
493 goto next;
494
495 size -= PAGE_SIZE;
496
497 if ((p->flags & PG_CLEAN)
498 && pmap_is_modified(VM_PAGE_TO_PHYS(p)))
fd76afd7 499 p->flags &= ~PG_CLEAN;
55768178
DG
500
501 if (p->flags & PG_ACTIVE)
502 vm_page_deactivate(p);
15637ed4 503 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
55768178 504 if ((p->flags & PG_CLEAN) == 0) {
fd76afd7 505 p->flags |= PG_BUSY;
15637ed4
RG
506 object->paging_in_progress++;
507 vm_object_unlock(object);
508 (void) vm_pager_put(object->pager, p, TRUE);
509 vm_object_lock(object);
510 object->paging_in_progress--;
55768178
DG
511 if (object->paging_in_progress == 0)
512 wakeup((caddr_t) object);
15637ed4 513 PAGE_WAKEUP(p);
55768178 514 splx(s);
15637ed4
RG
515 goto again;
516 }
517 }
55768178 518next:
15637ed4
RG
519 p = (vm_page_t) queue_next(&p->listq);
520 }
55768178
DG
521 splx(s);
522 wakeup((caddr_t)object);
15637ed4
RG
523}
524
525/*
526 * vm_object_deactivate_pages
527 *
528 * Deactivate all pages in the specified object. (Keep its pages
529 * in memory even though it is no longer referenced.)
530 *
531 * The object must be locked.
532 */
55768178 533void
15637ed4
RG
534vm_object_deactivate_pages(object)
535 register vm_object_t object;
536{
537 register vm_page_t p, next;
538
55768178 539 int s = splimp();
15637ed4
RG
540 p = (vm_page_t) queue_first(&object->memq);
541 while (!queue_end(&object->memq, (queue_entry_t) p)) {
542 next = (vm_page_t) queue_next(&p->listq);
543 vm_page_lock_queues();
55768178
DG
544 if ((p->flags & (PG_INACTIVE|PG_BUSY)) == 0 &&
545 p->wire_count == 0 &&
546 (object->can_persist || !pmap_is_wired(VM_PAGE_TO_PHYS(p))))
8820571b
NW
547 vm_page_deactivate(p); /* optimisation from mach 3.0 -
548 * andrew@werple.apana.org.au,
549 * Feb '93
550 */
15637ed4
RG
551 vm_page_unlock_queues();
552 p = next;
553 }
55768178 554 splx(s);
15637ed4
RG
555}
556
557/*
558 * Trim the object cache to size.
559 */
55768178 560void
15637ed4
RG
561vm_object_cache_trim()
562{
563 register vm_object_t object;
564
565 vm_object_cache_lock();
566 while (vm_object_cached > vm_cache_max) {
567 object = (vm_object_t) queue_first(&vm_object_cached_list);
568 vm_object_cache_unlock();
569
570 if (object != vm_object_lookup(object->pager))
571 panic("vm_object_deactivate: I'm sooo confused.");
572
573 pager_cache(object, FALSE);
574
575 vm_object_cache_lock();
576 }
577 vm_object_cache_unlock();
578}
579
15637ed4
RG
580/*
581 * vm_object_shutdown()
582 *
583 * Shut down the object system. Unfortunately, while we
584 * may be trying to do this, init is happily waiting for
585 * processes to exit, and therefore will be causing some objects
586 * to be deallocated. To handle this, we gain a fake reference
587 * to all objects we release paging areas for. This will prevent
588 * a duplicate deallocation. This routine is probably full of
589 * race conditions!
590 */
591
55768178
DG
592#if 0
593void
594vm_object_shutdown()
15637ed4
RG
595{
596 register vm_object_t object;
597
598 /*
599 * Clean up the object cache *before* we screw up the reference
600 * counts on all of the objects.
601 */
602
603 vm_object_cache_clear();
604
15637ed4
RG
605
606 /*
607 * First we gain a reference to each object so that
608 * no one else will deallocate them.
609 */
610
611 simple_lock(&vm_object_list_lock);
612 object = (vm_object_t) queue_first(&vm_object_list);
613 while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
614 vm_object_reference(object);
615 object = (vm_object_t) queue_next(&object->object_list);
616 }
617 simple_unlock(&vm_object_list_lock);
618
619 /*
620 * Now we deallocate all the paging areas. We don't need
621 * to lock anything because we've reduced to a single
622 * processor while shutting down. This also assumes that
623 * no new objects are being created.
624 */
625
626 object = (vm_object_t) queue_first(&vm_object_list);
627 while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
628 if (object->pager != NULL)
629 vm_pager_deallocate(object->pager);
630 object = (vm_object_t) queue_next(&object->object_list);
631 printf(".");
632 }
633 printf("done.\n");
634}
55768178 635#endif
15637ed4
RG
636/*
637 * vm_object_pmap_copy:
638 *
639 * Makes all physical pages in the specified
640 * object range copy-on-write. No writeable
641 * references to these pages should remain.
642 *
643 * The object must *not* be locked.
644 */
55768178
DG
645void
646vm_object_pmap_copy(object, start, end)
15637ed4
RG
647 register vm_object_t object;
648 register vm_offset_t start;
649 register vm_offset_t end;
650{
651 register vm_page_t p;
55768178
DG
652 vm_offset_t amount;
653
654 start = trunc_page(start);
655 end = round_page(end);
656
657 amount = ((end - start) + PAGE_SIZE - 1) / PAGE_SIZE;
15637ed4
RG
658
659 if (object == NULL)
660 return;
661
662 vm_object_lock(object);
663 p = (vm_page_t) queue_first(&object->memq);
664 while (!queue_end(&object->memq, (queue_entry_t) p)) {
665 if ((start <= p->offset) && (p->offset < end)) {
666 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
fd76afd7 667 p->flags |= PG_COPY_ON_WRITE;
55768178
DG
668 amount -= 1;
669 if (amount <= 0)
670 break;
15637ed4
RG
671 }
672 p = (vm_page_t) queue_next(&p->listq);
673 }
674 vm_object_unlock(object);
675}
676
677/*
678 * vm_object_pmap_remove:
679 *
680 * Removes all physical pages in the specified
681 * object range from all physical maps.
682 *
683 * The object must *not* be locked.
684 */
55768178
DG
685void
686vm_object_pmap_remove(object, start, end)
15637ed4
RG
687 register vm_object_t object;
688 register vm_offset_t start;
689 register vm_offset_t end;
690{
691 register vm_page_t p;
55768178 692 vm_offset_t size = ((end - start) + PAGE_SIZE - 1) / PAGE_SIZE;
15637ed4
RG
693
694 if (object == NULL)
695 return;
696
697 vm_object_lock(object);
698 p = (vm_page_t) queue_first(&object->memq);
699 while (!queue_end(&object->memq, (queue_entry_t) p)) {
55768178
DG
700 if ((start <= p->offset) && (p->offset < end)) {
701 if ((p->flags & PG_CLEAN)
702 && pmap_is_modified(VM_PAGE_TO_PHYS(p))) {
703 p->flags &= ~PG_CLEAN;
704 }
705 if ((p->flags & PG_CLEAN) == 0)
706 p->flags |= PG_LAUNDRY;
15637ed4 707 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
55768178
DG
708 if (--size <= 0) break;
709 }
15637ed4
RG
710 p = (vm_page_t) queue_next(&p->listq);
711 }
712 vm_object_unlock(object);
713}
714
55768178
DG
715void
716vm_object_save_pmap_attributes(vm_object_t object,
717 vm_offset_t start, vm_offset_t end) {
718
719 vm_page_t p;
720
721 if (!object)
722 return;
723
724 if (object->shadow) {
725 vm_object_save_pmap_attributes(object->shadow,
726 object->shadow_offset + start,
727 object->shadow_offset + end);
728 }
729
730 p = (vm_page_t) queue_first(&object->memq);
731 while (!queue_end(&object->memq, (queue_entry_t) p)) {
732 if ((start <= p->offset) && (p->offset < end)) {
733 if ((p->flags & PG_CLEAN)
734 && pmap_is_modified(VM_PAGE_TO_PHYS(p))) {
735 p->flags &= ~PG_CLEAN;
736 }
737 if ((p->flags & PG_CLEAN) == 0)
738 p->flags |= PG_LAUNDRY;
739 }
740 p = (vm_page_t) queue_next(&p->listq);
741 }
742}
743
15637ed4
RG
744/*
745 * vm_object_copy:
746 *
747 * Create a new object which is a copy of an existing
748 * object, and mark all of the pages in the existing
749 * object 'copy-on-write'. The new object has one reference.
750 * Returns the new object.
751 *
752 * May defer the copy until later if the object is not backed
753 * up by a non-default pager.
754 */
55768178
DG
755void
756vm_object_copy(src_object, src_offset, size, dst_object, dst_offset, src_needs_copy)
15637ed4
RG
757 register vm_object_t src_object;
758 vm_offset_t src_offset;
759 vm_size_t size;
760 vm_object_t *dst_object; /* OUT */
761 vm_offset_t *dst_offset; /* OUT */
762 boolean_t *src_needs_copy; /* OUT */
763{
764 register vm_object_t new_copy;
765 register vm_object_t old_copy;
766 vm_offset_t new_start, new_end;
55768178
DG
767 vm_offset_t src_offset_end;
768 vm_offset_t tmpsize;
15637ed4
RG
769
770 register vm_page_t p;
771
772 if (src_object == NULL) {
773 /*
774 * Nothing to copy
775 */
776 *dst_object = NULL;
777 *dst_offset = 0;
778 *src_needs_copy = FALSE;
779 return;
780 }
781
782 /*
783 * If the object's pager is null_pager or the
784 * default pager, we don't have to make a copy
785 * of it. Instead, we set the needs copy flag and
786 * make a shadow later.
55768178 787 * DYSON: check for swap(default) pager too....
15637ed4
RG
788 */
789
790 vm_object_lock(src_object);
45852188 791
736d20f2
DG
792 /*
793 * Try to collapse the object before copying it.
794 */
736d20f2 795
55768178 796 vm_object_collapse(src_object);
736d20f2 797
15637ed4 798 if (src_object->pager == NULL ||
55768178
DG
799 src_object->pager->pg_type == PG_SWAP ||
800 src_object->internal) {
15637ed4
RG
801
802 /*
803 * Make another reference to the object
804 */
805 src_object->ref_count++;
806
807 /*
808 * Mark all of the pages copy-on-write.
809 */
55768178
DG
810 tmpsize = size;
811 src_offset_end = src_offset + size;
15637ed4 812 for (p = (vm_page_t) queue_first(&src_object->memq);
55768178 813 !queue_end(&src_object->memq, (queue_entry_t)p) && tmpsize > 0;
15637ed4
RG
814 p = (vm_page_t) queue_next(&p->listq)) {
815 if (src_offset <= p->offset &&
55768178 816 p->offset < src_offset_end) {
fd76afd7 817 p->flags |= PG_COPY_ON_WRITE;
55768178
DG
818 tmpsize -= PAGE_SIZE;
819 }
15637ed4
RG
820 }
821 vm_object_unlock(src_object);
822
823 *dst_object = src_object;
824 *dst_offset = src_offset;
825
826 /*
827 * Must make a shadow when write is desired
828 */
829 *src_needs_copy = TRUE;
830 return;
831 }
832
15637ed4
RG
833 /*
834 * If the object has a pager, the pager wants to
835 * see all of the changes. We need a copy-object
836 * for the changed pages.
837 *
838 * If there is a copy-object, and it is empty,
839 * no changes have been made to the object since the
840 * copy-object was made. We can use the same copy-
841 * object.
842 */
843
844 Retry1:
845 old_copy = src_object->copy;
846 if (old_copy != NULL) {
847 /*
848 * Try to get the locks (out of order)
849 */
850 if (!vm_object_lock_try(old_copy)) {
851 vm_object_unlock(src_object);
852
853 /* should spin a bit here... */
854 vm_object_lock(src_object);
855 goto Retry1;
856 }
857
858 if (old_copy->resident_page_count == 0 &&
859 old_copy->pager == NULL) {
860 /*
861 * Return another reference to
862 * the existing copy-object.
863 */
864 old_copy->ref_count++;
865 vm_object_unlock(old_copy);
866 vm_object_unlock(src_object);
867 *dst_object = old_copy;
868 *dst_offset = src_offset;
869 *src_needs_copy = FALSE;
870 return;
871 }
872 vm_object_unlock(old_copy);
873 }
874 vm_object_unlock(src_object);
875
876 /*
877 * If the object has a pager, the pager wants
878 * to see all of the changes. We must make
879 * a copy-object and put the changed pages there.
880 *
881 * The copy-object is always made large enough to
882 * completely shadow the original object, since
883 * it may have several users who want to shadow
884 * the original object at different points.
885 */
886
887 new_copy = vm_object_allocate(src_object->size);
888
889 Retry2:
890 vm_object_lock(src_object);
891 /*
892 * Copy object may have changed while we were unlocked
893 */
894 old_copy = src_object->copy;
895 if (old_copy != NULL) {
896 /*
897 * Try to get the locks (out of order)
898 */
899 if (!vm_object_lock_try(old_copy)) {
900 vm_object_unlock(src_object);
901 goto Retry2;
902 }
903
904 /*
905 * Consistency check
906 */
907 if (old_copy->shadow != src_object ||
908 old_copy->shadow_offset != (vm_offset_t) 0)
909 panic("vm_object_copy: copy/shadow inconsistency");
910
911 /*
912 * Make the old copy-object shadow the new one.
913 * It will receive no more pages from the original
914 * object.
915 */
916
917 src_object->ref_count--; /* remove ref. from old_copy */
918 old_copy->shadow = new_copy;
919 new_copy->ref_count++; /* locking not needed - we
920 have the only pointer */
921 vm_object_unlock(old_copy); /* done with old_copy */
922 }
923
924 new_start = (vm_offset_t) 0; /* always shadow original at 0 */
925 new_end = (vm_offset_t) new_copy->size; /* for the whole object */
926
927 /*
928 * Point the new copy at the existing object.
929 */
930
931 new_copy->shadow = src_object;
932 new_copy->shadow_offset = new_start;
933 src_object->ref_count++;
934 src_object->copy = new_copy;
935
936 /*
937 * Mark all the affected pages of the existing object
938 * copy-on-write.
939 */
55768178 940 tmpsize = size;
15637ed4 941 p = (vm_page_t) queue_first(&src_object->memq);
55768178
DG
942 while (!queue_end(&src_object->memq, (queue_entry_t) p) && tmpsize > 0) {
943 if ((new_start <= p->offset) && (p->offset < new_end)) {
fd76afd7 944 p->flags |= PG_COPY_ON_WRITE;
55768178
DG
945 tmpsize -= PAGE_SIZE;
946 }
15637ed4
RG
947 p = (vm_page_t) queue_next(&p->listq);
948 }
949
950 vm_object_unlock(src_object);
951
952 *dst_object = new_copy;
953 *dst_offset = src_offset - new_start;
954 *src_needs_copy = FALSE;
955}
956
957/*
958 * vm_object_shadow:
959 *
960 * Create a new object which is backed by the
961 * specified existing object range. The source
962 * object reference is deallocated.
963 *
964 * The new object and offset into that object
965 * are returned in the source parameters.
966 */
967
55768178
DG
968void
969vm_object_shadow(object, offset, length)
15637ed4
RG
970 vm_object_t *object; /* IN/OUT */
971 vm_offset_t *offset; /* IN/OUT */
972 vm_size_t length;
973{
974 register vm_object_t source;
975 register vm_object_t result;
976
977 source = *object;
978
979 /*
980 * Allocate a new object with the given length
981 */
982
983 if ((result = vm_object_allocate(length)) == NULL)
984 panic("vm_object_shadow: no object for shadowing");
985
986 /*
987 * The new object shadows the source object, adding
988 * a reference to it. Our caller changes his reference
989 * to point to the new object, removing a reference to
990 * the source object. Net result: no change of reference
991 * count.
992 */
993 result->shadow = source;
994
995 /*
996 * Store the offset into the source object,
997 * and fix up the offset into the new object.
998 */
999
1000 result->shadow_offset = *offset;
1001
1002 /*
1003 * Return the new things
1004 */
1005
1006 *offset = 0;
1007 *object = result;
1008}
1009
1010/*
1011 * Set the specified object's pager to the specified pager.
1012 */
1013
55768178
DG
1014void
1015vm_object_setpager(object, pager, paging_offset,
15637ed4
RG
1016 read_only)
1017 vm_object_t object;
1018 vm_pager_t pager;
1019 vm_offset_t paging_offset;
1020 boolean_t read_only;
1021{
1022#ifdef lint
1023 read_only++; /* No longer used */
1024#endif lint
1025
1026 vm_object_lock(object); /* XXX ? */
55768178
DG
1027 if (object->pager && object->pager != pager) {
1028 panic("!!!pager already allocated!!!\n");
1029 }
15637ed4
RG
1030 object->pager = pager;
1031 object->paging_offset = paging_offset;
1032 vm_object_unlock(object); /* XXX ? */
1033}
1034
1035/*
1036 * vm_object_hash hashes the pager/id pair.
1037 */
1038
1039#define vm_object_hash(pager) \
55768178 1040 ((((unsigned)pager) >> 5)%VM_OBJECT_HASH_COUNT)
15637ed4
RG
1041
1042/*
1043 * vm_object_lookup looks in the object cache for an object with the
1044 * specified pager and paging id.
1045 */
1046
55768178
DG
1047vm_object_t
1048vm_object_lookup(pager)
15637ed4
RG
1049 vm_pager_t pager;
1050{
1051 register queue_t bucket;
1052 register vm_object_hash_entry_t entry;
1053 vm_object_t object;
1054
1055 bucket = &vm_object_hashtable[vm_object_hash(pager)];
1056
1057 vm_object_cache_lock();
1058
1059 entry = (vm_object_hash_entry_t) queue_first(bucket);
1060 while (!queue_end(bucket, (queue_entry_t) entry)) {
1061 object = entry->object;
1062 if (object->pager == pager) {
1063 vm_object_lock(object);
1064 if (object->ref_count == 0) {
1065 queue_remove(&vm_object_cached_list, object,
1066 vm_object_t, cached_list);
1067 vm_object_cached--;
1068 }
1069 object->ref_count++;
1070 vm_object_unlock(object);
1071 vm_object_cache_unlock();
1072 return(object);
1073 }
1074 entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
1075 }
1076
1077 vm_object_cache_unlock();
1078 return(NULL);
1079}
1080
1081/*
1082 * vm_object_enter enters the specified object/pager/id into
1083 * the hash table.
1084 */
1085
55768178
DG
1086void
1087vm_object_enter(object, pager)
15637ed4
RG
1088 vm_object_t object;
1089 vm_pager_t pager;
1090{
1091 register queue_t bucket;
1092 register vm_object_hash_entry_t entry;
1093
1094 /*
1095 * We don't cache null objects, and we can't cache
1096 * objects with the null pager.
1097 */
1098
1099 if (object == NULL)
1100 return;
1101 if (pager == NULL)
1102 return;
1103
1104 bucket = &vm_object_hashtable[vm_object_hash(pager)];
1105 entry = (vm_object_hash_entry_t)
1106 malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
1107 entry->object = object;
1108 object->can_persist = TRUE;
1109
1110 vm_object_cache_lock();
1111 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links);
1112 vm_object_cache_unlock();
1113}
1114
1115/*
1116 * vm_object_remove:
1117 *
1118 * Remove the pager from the hash table.
1119 * Note: This assumes that the object cache
1120 * is locked. XXX this should be fixed
1121 * by reorganizing vm_object_deallocate.
1122 */
55768178 1123void
15637ed4
RG
1124vm_object_remove(pager)
1125 register vm_pager_t pager;
1126{
1127 register queue_t bucket;
1128 register vm_object_hash_entry_t entry;
1129 register vm_object_t object;
1130
1131 bucket = &vm_object_hashtable[vm_object_hash(pager)];
1132
1133 entry = (vm_object_hash_entry_t) queue_first(bucket);
1134 while (!queue_end(bucket, (queue_entry_t) entry)) {
1135 object = entry->object;
1136 if (object->pager == pager) {
1137 queue_remove(bucket, entry, vm_object_hash_entry_t,
1138 hash_links);
1139 free((caddr_t)entry, M_VMOBJHASH);
1140 break;
1141 }
1142 entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
1143 }
1144}
1145
1146/*
1147 * vm_object_cache_clear removes all objects from the cache.
1148 *
1149 */
55768178
DG
1150void
1151vm_object_cache_clear()
15637ed4
RG
1152{
1153 register vm_object_t object;
1154
1155 /*
1156 * Remove each object in the cache by scanning down the
1157 * list of cached objects.
1158 */
1159 vm_object_cache_lock();
1160 while (!queue_empty(&vm_object_cached_list)) {
1161 object = (vm_object_t) queue_first(&vm_object_cached_list);
1162 vm_object_cache_unlock();
1163
1164 /*
1165 * Note: it is important that we use vm_object_lookup
1166 * to gain a reference, and not vm_object_reference, because
1167 * the logic for removing an object from the cache lies in
1168 * lookup.
1169 */
1170 if (object != vm_object_lookup(object->pager))
1171 panic("vm_object_cache_clear: I'm sooo confused.");
1172 pager_cache(object, FALSE);
1173
1174 vm_object_cache_lock();
1175 }
1176 vm_object_cache_unlock();
1177}
1178
1179boolean_t vm_object_collapse_allowed = TRUE;
1180/*
1181 * vm_object_collapse:
1182 *
1183 * Collapse an object with the object backing it.
1184 * Pages in the backing object are moved into the
1185 * parent, and the backing object is deallocated.
1186 *
1187 * Requires that the object be locked and the page
1188 * queues be unlocked.
1189 *
55768178
DG
1190 * This routine has significant changes by John S. Dyson
1191 * to fix some swap memory leaks. 18 Dec 93
736d20f2 1192 *
15637ed4 1193 */
55768178
DG
1194void
1195vm_object_collapse(object)
15637ed4
RG
1196 register vm_object_t object;
1197
1198{
1199 register vm_object_t backing_object;
1200 register vm_offset_t backing_offset;
1201 register vm_size_t size;
1202 register vm_offset_t new_offset;
1203 register vm_page_t p, pp;
1204
1205 if (!vm_object_collapse_allowed)
1206 return;
1207
1208 while (TRUE) {
1209 /*
1210 * Verify that the conditions are right for collapse:
1211 *
1212 * The object exists and no pages in it are currently
736d20f2 1213 * being paged out.
15637ed4
RG
1214 */
1215 if (object == NULL ||
736d20f2 1216 object->paging_in_progress != 0)
15637ed4
RG
1217 return;
1218
1219 /*
1220 * There is a backing object, and
1221 */
1222
1223 if ((backing_object = object->shadow) == NULL)
1224 return;
1225
1226 vm_object_lock(backing_object);
1227 /*
1228 * ...
1229 * The backing object is not read_only,
1230 * and no pages in the backing object are
1231 * currently being paged out.
1232 * The backing object is internal.
1233 */
1234
1235 if (!backing_object->internal ||
736d20f2 1236 backing_object->paging_in_progress != 0 ) {
15637ed4
RG
1237 vm_object_unlock(backing_object);
1238 return;
1239 }
1240
1241 /*
1242 * The backing object can't be a copy-object:
1243 * the shadow_offset for the copy-object must stay
1244 * as 0. Furthermore (for the 'we have all the
1245 * pages' case), if we bypass backing_object and
1246 * just shadow the next object in the chain, old
1247 * pages from that object would then have to be copied
1248 * BOTH into the (former) backing_object and into the
1249 * parent object.
1250 */
1251 if (backing_object->shadow != NULL &&
736d20f2
DG
1252 backing_object->shadow->copy == backing_object) {
1253 vm_object_unlock(backing_object);
1254 return;
1255 }
1256
1257 /*
1258 * we can deal only with the swap pager
1259 */
55768178 1260 if ((object->pager &&
736d20f2
DG
1261 object->pager->pg_type != PG_SWAP) ||
1262 (backing_object->pager &&
1263 backing_object->pager->pg_type != PG_SWAP)) {
15637ed4
RG
1264 vm_object_unlock(backing_object);
1265 return;
1266 }
736d20f2 1267
15637ed4
RG
1268
1269 /*
1270 * We know that we can either collapse the backing
1271 * object (if the parent is the only reference to
1272 * it) or (perhaps) remove the parent's reference
1273 * to it.
1274 */
1275
1276 backing_offset = object->shadow_offset;
1277 size = object->size;
1278
1279 /*
1280 * If there is exactly one reference to the backing
1281 * object, we can collapse it into the parent.
1282 */
1283
1284 if (backing_object->ref_count == 1) {
1285
1286 /*
1287 * We can collapse the backing object.
1288 *
1289 * Move all in-memory pages from backing_object
1290 * to the parent. Pages that have been paged out
1291 * will be overwritten by any of the parent's
1292 * pages that shadow them.
1293 */
1294
1295 while (!queue_empty(&backing_object->memq)) {
1296
1297 p = (vm_page_t)
1298 queue_first(&backing_object->memq);
1299
1300 new_offset = (p->offset - backing_offset);
1301
1302 /*
1303 * If the parent has a page here, or if
1304 * this page falls outside the parent,
1305 * dispose of it.
1306 *
1307 * Otherwise, move it as planned.
1308 */
1309
1310 if (p->offset < backing_offset ||
1311 new_offset >= size) {
1312 vm_page_lock_queues();
1313 vm_page_free(p);
1314 vm_page_unlock_queues();
1315 } else {
1316 pp = vm_page_lookup(object, new_offset);
55768178
DG
1317 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1318 object->paging_offset + new_offset))) {
15637ed4
RG
1319 vm_page_lock_queues();
1320 vm_page_free(p);
1321 vm_page_unlock_queues();
55768178 1322 } else {
15637ed4
RG
1323 vm_page_rename(p, object, new_offset);
1324 }
1325 }
1326 }
1327
1328 /*
1329 * Move the pager from backing_object to object.
e475daaa
JH
1330 */
1331
55768178
DG
1332 if (backing_object->pager) {
1333 backing_object->paging_in_progress++;
1334 if (object->pager) {
1335 vm_pager_t bopager;
736d20f2 1336 object->paging_in_progress++;
736d20f2
DG
1337 /*
1338 * copy shadow object pages into ours
1339 * and destroy unneeded pages in shadow object.
1340 */
55768178
DG
1341 bopager = backing_object->pager;
1342 backing_object->pager = NULL;
1343 vm_object_remove(backing_object->pager);
736d20f2 1344 swap_pager_copy(
55768178 1345 bopager, backing_object->paging_offset,
736d20f2
DG
1346 object->pager, object->paging_offset,
1347 object->shadow_offset);
1348 object->paging_in_progress--;
55768178 1349 if (object->paging_in_progress == 0)
736d20f2 1350 wakeup((caddr_t)object);
736d20f2 1351 } else {
55768178 1352 object->paging_in_progress++;
736d20f2
DG
1353 /*
1354 * grab the shadow objects pager
1355 */
1356 object->pager = backing_object->pager;
1357 object->paging_offset = backing_object->paging_offset + backing_offset;
55768178
DG
1358 vm_object_remove(backing_object->pager);
1359 backing_object->pager = NULL;
736d20f2
DG
1360 /*
1361 * free unnecessary blocks
1362 */
55768178
DG
1363 swap_pager_freespace(object->pager, 0, object->paging_offset);
1364 object->paging_in_progress--;
1365 if (object->paging_in_progress == 0)
1366 wakeup((caddr_t)object);
736d20f2 1367 }
55768178
DG
1368 backing_object->paging_in_progress--;
1369 if (backing_object->paging_in_progress == 0)
1370 wakeup((caddr_t)backing_object);
e475daaa 1371 }
15637ed4 1372
15637ed4
RG
1373
1374 /*
1375 * Object now shadows whatever backing_object did.
1376 * Note that the reference to backing_object->shadow
1377 * moves from within backing_object to within object.
1378 */
1379
1380 object->shadow = backing_object->shadow;
1381 object->shadow_offset += backing_object->shadow_offset;
1382 if (object->shadow != NULL &&
1383 object->shadow->copy != NULL) {
1384 panic("vm_object_collapse: we collapsed a copy-object!");
1385 }
1386 /*
1387 * Discard backing_object.
1388 *
1389 * Since the backing object has no pages, no
1390 * pager left, and no object references within it,
1391 * all that is necessary is to dispose of it.
1392 */
1393
1394 vm_object_unlock(backing_object);
1395
1396 simple_lock(&vm_object_list_lock);
1397 queue_remove(&vm_object_list, backing_object,
1398 vm_object_t, object_list);
1399 vm_object_count--;
1400 simple_unlock(&vm_object_list_lock);
1401
1402 free((caddr_t)backing_object, M_VMOBJ);
1403
1404 object_collapses++;
1405 }
1406 else {
1407 /*
1408 * If all of the pages in the backing object are
1409 * shadowed by the parent object, the parent
1410 * object no longer has to shadow the backing
1411 * object; it can shadow the next one in the
1412 * chain.
1413 *
1414 * The backing object must not be paged out - we'd
1415 * have to check all of the paged-out pages, as
1416 * well.
1417 */
1418
1419 if (backing_object->pager != NULL) {
1420 vm_object_unlock(backing_object);
1421 return;
1422 }
1423
1424 /*
1425 * Should have a check for a 'small' number
1426 * of pages here.
1427 */
1428
1429 p = (vm_page_t) queue_first(&backing_object->memq);
1430 while (!queue_end(&backing_object->memq,
1431 (queue_entry_t) p)) {
1432
1433 new_offset = (p->offset - backing_offset);
1434
1435 /*
1436 * If the parent has a page here, or if
1437 * this page falls outside the parent,
1438 * keep going.
1439 *
1440 * Otherwise, the backing_object must be
1441 * left in the chain.
1442 */
1443
1444 if (p->offset >= backing_offset &&
1445 new_offset <= size &&
55768178
DG
1446 ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE)) &&
1447 (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset+new_offset))) {
15637ed4
RG
1448 /*
1449 * Page still needed.
1450 * Can't go any further.
1451 */
1452 vm_object_unlock(backing_object);
1453 return;
1454 }
1455 p = (vm_page_t) queue_next(&p->listq);
1456 }
1457
1458 /*
1459 * Make the parent shadow the next object
1460 * in the chain. Deallocating backing_object
1461 * will not remove it, since its reference
1462 * count is at least 2.
1463 */
1464
1465 vm_object_reference(object->shadow = backing_object->shadow);
1466 object->shadow_offset += backing_object->shadow_offset;
1467
8820571b
NW
1468#if 1
1469 /* Mach 3.0 code */
1470 /* andrew@werple.apana.org.au, 12 Feb 1993 */
1471
1472 /*
1473 * Backing object might have had a copy pointer
1474 * to us. If it did, clear it.
1475 */
1476 if (backing_object->copy == object)
1477 backing_object->copy = NULL;
1478#endif
1479
15637ed4
RG
1480 /* Drop the reference count on backing_object.
1481 * Since its ref_count was at least 2, it
1482 * will not vanish; so we don't need to call
1483 * vm_object_deallocate.
1484 */
55768178 1485 if (backing_object->ref_count == 1)
736d20f2 1486 printf("should have called obj deallocate\n");
15637ed4
RG
1487 backing_object->ref_count--;
1488 vm_object_unlock(backing_object);
1489
1490 object_bypasses ++;
1491
1492 }
1493
1494 /*
1495 * Try again with this object's new backing object.
1496 */
1497 }
1498}
1499
1500/*
1501 * vm_object_page_remove: [internal]
1502 *
1503 * Removes all physical pages in the specified
1504 * object range from the object's list of pages.
1505 *
1506 * The object must be locked.
1507 */
55768178
DG
1508void
1509vm_object_page_remove(object, start, end)
15637ed4
RG
1510 register vm_object_t object;
1511 register vm_offset_t start;
1512 register vm_offset_t end;
1513{
1514 register vm_page_t p, next;
55768178
DG
1515 vm_offset_t size;
1516 int cnt;
1517 int s;
15637ed4
RG
1518
1519 if (object == NULL)
1520 return;
1521
55768178
DG
1522 start = trunc_page(start);
1523 end = round_page(end);
1524 size = end-start;
1525 if (size > 4*PAGE_SIZE || size >= object->size/4) {
1526 p = (vm_page_t) queue_first(&object->memq);
1527 while (!queue_end(&object->memq, (queue_entry_t) p) && size > 0) {
1528 next = (vm_page_t) queue_next(&p->listq);
1529 if ((start <= p->offset) && (p->offset < end)) {
1530 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1531 vm_page_lock_queues();
1532 vm_page_free(p);
1533 vm_page_unlock_queues();
1534 size -= PAGE_SIZE;
1535 }
1536 p = next;
1537 }
1538 } else {
1539 while (size > 0) {
1540 while (p = vm_page_lookup(object, start)) {
1541 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1542 vm_page_lock_queues();
1543 vm_page_free(p);
1544 vm_page_unlock_queues();
1545 }
1546 start += PAGE_SIZE;
1547 size -= PAGE_SIZE;
15637ed4 1548 }
15637ed4
RG
1549 }
1550}
1551
1552/*
1553 * Routine: vm_object_coalesce
1554 * Function: Coalesces two objects backing up adjoining
1555 * regions of memory into a single object.
1556 *
1557 * returns TRUE if objects were combined.
1558 *
1559 * NOTE: Only works at the moment if the second object is NULL -
1560 * if it's not, which object do we lock first?
1561 *
1562 * Parameters:
1563 * prev_object First object to coalesce
1564 * prev_offset Offset into prev_object
1565 * next_object Second object into coalesce
1566 * next_offset Offset into next_object
1567 *
1568 * prev_size Size of reference to prev_object
1569 * next_size Size of reference to next_object
1570 *
1571 * Conditions:
1572 * The object must *not* be locked.
1573 */
55768178
DG
1574boolean_t
1575vm_object_coalesce(prev_object, next_object, prev_offset, next_offset, prev_size, next_size)
15637ed4
RG
1576 register vm_object_t prev_object;
1577 vm_object_t next_object;
1578 vm_offset_t prev_offset, next_offset;
1579 vm_size_t prev_size, next_size;
1580{
1581 vm_size_t newsize;
1582
1583#ifdef lint
1584 next_offset++;
1585#endif lint
1586
1587 if (next_object != NULL) {
1588 return(FALSE);
1589 }
1590
1591 if (prev_object == NULL) {
1592 return(TRUE);
1593 }
1594
1595 vm_object_lock(prev_object);
1596
1597 /*
1598 * Try to collapse the object first
1599 */
1600 vm_object_collapse(prev_object);
1601
1602 /*
1603 * Can't coalesce if:
1604 * . more than one reference
1605 * . paged out
1606 * . shadows another object
1607 * . has a copy elsewhere
1608 * (any of which mean that the pages not mapped to
1609 * prev_entry may be in use anyway)
1610 */
1611
1612 if (prev_object->ref_count > 1 ||
1613 prev_object->pager != NULL ||
1614 prev_object->shadow != NULL ||
1615 prev_object->copy != NULL) {
1616 vm_object_unlock(prev_object);
1617 return(FALSE);
1618 }
1619
1620 /*
1621 * Remove any pages that may still be in the object from
1622 * a previous deallocation.
1623 */
1624
55768178 1625/*
15637ed4
RG
1626 vm_object_page_remove(prev_object,
1627 prev_offset + prev_size,
1628 prev_offset + prev_size + next_size);
55768178 1629*/
15637ed4
RG
1630
1631 /*
1632 * Extend the object if necessary.
1633 */
1634 newsize = prev_offset + prev_size + next_size;
1635 if (newsize > prev_object->size)
1636 prev_object->size = newsize;
1637
1638 vm_object_unlock(prev_object);
1639 return(TRUE);
1640}
1641
55768178
DG
1642/*
1643 * returns page after looking up in shadow chain
1644 */
1645
1646vm_page_t
1647vm_object_page_lookup(object, offset)
1648 vm_object_t object;
1649 vm_offset_t offset;
1650{
1651 vm_page_t m;
1652 if (!(m=vm_page_lookup(object, offset))) {
1653 if (!object->shadow)
1654 return 0;
1655 else
1656 return vm_object_page_lookup(object->shadow, offset + object->shadow_offset);
1657 }
1658 return m;
1659}
1660
d55b6cf4 1661#if defined(DEBUG) || (NDDB > 0)
15637ed4
RG
1662/*
1663 * vm_object_print: [ debug ]
1664 */
55768178
DG
1665void
1666vm_object_print(object, full)
15637ed4
RG
1667 vm_object_t object;
1668 boolean_t full;
1669{
1670 register vm_page_t p;
1671 extern indent;
1672
1673 register int count;
1674
1675 if (object == NULL)
1676 return;
1677
1678 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1679 (int) object, (int) object->size,
1680 object->resident_page_count, object->ref_count);
1681 printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1682 (int) object->pager, (int) object->paging_offset,
1683 (int) object->shadow, (int) object->shadow_offset);
1684 printf("cache: next=0x%x, prev=0x%x\n",
1685 object->cached_list.next, object->cached_list.prev);
1686
1687 if (!full)
1688 return;
1689
1690 indent += 2;
1691 count = 0;
1692 p = (vm_page_t) queue_first(&object->memq);
1693 while (!queue_end(&object->memq, (queue_entry_t) p)) {
1694 if (count == 0)
1695 iprintf("memory:=");
1696 else if (count == 6) {
1697 printf("\n");
1698 iprintf(" ...");
1699 count = 0;
1700 } else
1701 printf(",");
1702 count++;
1703
1704 printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p));
1705 p = (vm_page_t) queue_next(&p->listq);
1706 }
1707 if (count != 0)
1708 printf("\n");
1709 indent -= 2;
1710}
d55b6cf4 1711#endif /* defined(DEBUG) || (NDDB > 0) */