Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1991 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
36 | * @(#)vm_object.c 7.4 (Berkeley) 5/7/91 | |
37 | * | |
38 | * | |
39 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
40 | * All rights reserved. | |
41 | * | |
42 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
43 | * | |
44 | * Permission to use, copy, modify and distribute this software and | |
45 | * its documentation is hereby granted, provided that both the copyright | |
46 | * notice and this permission notice appear in all copies of the | |
47 | * software, derivative works or modified versions, and any portions | |
48 | * thereof, and that both notices appear in supporting documentation. | |
49 | * | |
50 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
51 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
52 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
53 | * | |
54 | * Carnegie Mellon requests users of this software to return to | |
55 | * | |
56 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
57 | * School of Computer Science | |
58 | * Carnegie Mellon University | |
59 | * Pittsburgh PA 15213-3890 | |
60 | * | |
61 | * any improvements or extensions that they make and grant Carnegie the | |
62 | * rights to redistribute these changes. | |
63 | * | |
64 | * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE | |
65 | * -------------------- ----- ---------------------- | |
66 | * CURRENT PATCH LEVEL: 1 00147 | |
67 | * -------------------- ----- ---------------------- | |
68 | * | |
69 | * 20 Apr 93 Paul Kranenburg Detect and prevent kernel deadlocks in | |
70 | * VM system | |
71 | */ | |
72 | ||
73 | /* | |
74 | * Virtual memory object module. | |
75 | */ | |
76 | ||
77 | #include "param.h" | |
78 | #include "malloc.h" | |
79 | ||
80 | #include "vm.h" | |
81 | #include "vm_page.h" | |
82 | ||
83 | /* | |
84 | * Virtual memory objects maintain the actual data | |
85 | * associated with allocated virtual memory. A given | |
86 | * page of memory exists within exactly one object. | |
87 | * | |
88 | * An object is only deallocated when all "references" | |
89 | * are given up. Only one "reference" to a given | |
90 | * region of an object should be writeable. | |
91 | * | |
92 | * Associated with each object is a list of all resident | |
93 | * memory pages belonging to that object; this list is | |
94 | * maintained by the "vm_page" module, and locked by the object's | |
95 | * lock. | |
96 | * | |
97 | * Each object also records a "pager" routine which is | |
98 | * used to retrieve (and store) pages to the proper backing | |
99 | * storage. In addition, objects may be backed by other | |
100 | * objects from which they were virtual-copied. | |
101 | * | |
102 | * The only items within the object structure which are | |
103 | * modified after time of creation are: | |
104 | * reference count locked by object's lock | |
105 | * pager routine locked by object's lock | |
106 | * | |
107 | */ | |
108 | ||
109 | struct vm_object kernel_object_store; | |
110 | struct vm_object kmem_object_store; | |
111 | ||
112 | #define VM_OBJECT_HASH_COUNT 157 | |
113 | ||
114 | int vm_cache_max = 100; /* can patch if necessary */ | |
115 | queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT]; | |
116 | ||
117 | long object_collapses = 0; | |
118 | long object_bypasses = 0; | |
119 | ||
120 | /* | |
121 | * vm_object_init: | |
122 | * | |
123 | * Initialize the VM objects module. | |
124 | */ | |
125 | void vm_object_init() | |
126 | { | |
127 | register int i; | |
128 | ||
129 | queue_init(&vm_object_cached_list); | |
130 | queue_init(&vm_object_list); | |
131 | vm_object_count = 0; | |
132 | simple_lock_init(&vm_cache_lock); | |
133 | simple_lock_init(&vm_object_list_lock); | |
134 | ||
135 | for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) | |
136 | queue_init(&vm_object_hashtable[i]); | |
137 | ||
138 | kernel_object = &kernel_object_store; | |
139 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, | |
140 | kernel_object); | |
141 | ||
142 | kmem_object = &kmem_object_store; | |
143 | _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object); | |
144 | } | |
145 | ||
146 | /* | |
147 | * vm_object_allocate: | |
148 | * | |
149 | * Returns a new object with the given size. | |
150 | */ | |
151 | ||
152 | vm_object_t vm_object_allocate(size) | |
153 | vm_size_t size; | |
154 | { | |
155 | register vm_object_t result; | |
156 | ||
157 | result = (vm_object_t) | |
158 | malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); | |
159 | ||
160 | _vm_object_allocate(size, result); | |
161 | ||
162 | return(result); | |
163 | } | |
164 | ||
165 | _vm_object_allocate(size, object) | |
166 | vm_size_t size; | |
167 | register vm_object_t object; | |
168 | { | |
169 | queue_init(&object->memq); | |
170 | vm_object_lock_init(object); | |
171 | object->ref_count = 1; | |
172 | object->resident_page_count = 0; | |
173 | object->size = size; | |
174 | object->can_persist = FALSE; | |
175 | object->paging_in_progress = 0; | |
176 | object->copy = NULL; | |
177 | ||
178 | /* | |
179 | * Object starts out read-write, with no pager. | |
180 | */ | |
181 | ||
182 | object->pager = NULL; | |
183 | object->pager_ready = FALSE; | |
184 | object->internal = TRUE; /* vm_allocate_with_pager will reset */ | |
185 | object->paging_offset = 0; | |
186 | object->shadow = NULL; | |
187 | object->shadow_offset = (vm_offset_t) 0; | |
188 | ||
189 | simple_lock(&vm_object_list_lock); | |
190 | queue_enter(&vm_object_list, object, vm_object_t, object_list); | |
191 | vm_object_count++; | |
192 | simple_unlock(&vm_object_list_lock); | |
193 | } | |
194 | ||
195 | /* | |
196 | * vm_object_reference: | |
197 | * | |
198 | * Gets another reference to the given object. | |
199 | */ | |
200 | void vm_object_reference(object) | |
201 | register vm_object_t object; | |
202 | { | |
203 | if (object == NULL) | |
204 | return; | |
205 | ||
206 | vm_object_lock(object); | |
207 | object->ref_count++; | |
208 | vm_object_unlock(object); | |
209 | } | |
210 | ||
211 | /* | |
212 | * vm_object_deallocate: | |
213 | * | |
214 | * Release a reference to the specified object, | |
215 | * gained either through a vm_object_allocate | |
216 | * or a vm_object_reference call. When all references | |
217 | * are gone, storage associated with this object | |
218 | * may be relinquished. | |
219 | * | |
220 | * No object may be locked. | |
221 | */ | |
222 | void vm_object_deallocate(object) | |
223 | register vm_object_t object; | |
224 | { | |
225 | vm_object_t temp; | |
226 | ||
227 | while (object != NULL) { | |
228 | ||
229 | /* | |
230 | * The cache holds a reference (uncounted) to | |
231 | * the object; we must lock it before removing | |
232 | * the object. | |
233 | */ | |
234 | ||
235 | vm_object_cache_lock(); | |
236 | ||
237 | /* | |
238 | * Lose the reference | |
239 | */ | |
240 | vm_object_lock(object); | |
241 | if (--(object->ref_count) != 0) { | |
242 | ||
243 | /* | |
244 | * If there are still references, then | |
245 | * we are done. | |
246 | */ | |
247 | vm_object_unlock(object); | |
248 | vm_object_cache_unlock(); | |
249 | return; | |
250 | } | |
251 | ||
252 | /* | |
253 | * See if this object can persist. If so, enter | |
254 | * it in the cache, then deactivate all of its | |
255 | * pages. | |
256 | */ | |
257 | ||
258 | if (object->can_persist) { | |
259 | register vm_page_t p; | |
260 | ||
261 | /* | |
262 | * Check for dirty pages in object | |
263 | * Print warning as this may signify kernel bugs | |
264 | * pk@cs.few.eur.nl - 4/15/93 | |
265 | */ | |
266 | p = (vm_page_t) queue_first(&object->memq); | |
267 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
268 | VM_PAGE_CHECK(p); | |
269 | ||
270 | if (pmap_is_modified(VM_PAGE_TO_PHYS(p)) || | |
271 | !p->clean) { | |
272 | ||
273 | printf("vm_object_dealloc: persistent object %x isn't clean\n", object); | |
274 | goto cant_persist; | |
275 | } | |
276 | ||
277 | p = (vm_page_t) queue_next(&p->listq); | |
278 | } | |
279 | ||
280 | queue_enter(&vm_object_cached_list, object, | |
281 | vm_object_t, cached_list); | |
282 | vm_object_cached++; | |
283 | vm_object_cache_unlock(); | |
284 | ||
285 | vm_object_deactivate_pages(object); | |
286 | vm_object_unlock(object); | |
287 | ||
288 | vm_object_cache_trim(); | |
289 | return; | |
290 | } | |
291 | cant_persist:; | |
292 | ||
293 | /* | |
294 | * Make sure no one can look us up now. | |
295 | */ | |
296 | vm_object_remove(object->pager); | |
297 | vm_object_cache_unlock(); | |
298 | ||
299 | temp = object->shadow; | |
300 | vm_object_terminate(object); | |
301 | /* unlocks and deallocates object */ | |
302 | object = temp; | |
303 | } | |
304 | } | |
305 | ||
306 | ||
307 | /* | |
308 | * vm_object_terminate actually destroys the specified object, freeing | |
309 | * up all previously used resources. | |
310 | * | |
311 | * The object must be locked. | |
312 | */ | |
313 | void vm_object_terminate(object) | |
314 | register vm_object_t object; | |
315 | { | |
316 | register vm_page_t p; | |
317 | vm_object_t shadow_object; | |
318 | ||
319 | /* | |
320 | * Detach the object from its shadow if we are the shadow's | |
321 | * copy. | |
322 | */ | |
323 | if ((shadow_object = object->shadow) != NULL) { | |
324 | vm_object_lock(shadow_object); | |
325 | if (shadow_object->copy == object) | |
326 | shadow_object->copy = NULL; | |
327 | #if 0 | |
328 | else if (shadow_object->copy != NULL) | |
329 | panic("vm_object_terminate: copy/shadow inconsistency"); | |
330 | #endif | |
331 | vm_object_unlock(shadow_object); | |
332 | } | |
333 | ||
334 | /* | |
335 | * Wait until the pageout daemon is through | |
336 | * with the object. | |
337 | */ | |
338 | ||
339 | while (object->paging_in_progress != 0) { | |
340 | vm_object_sleep(object, object, FALSE); | |
341 | vm_object_lock(object); | |
342 | } | |
343 | ||
344 | ||
345 | /* | |
346 | * While the paging system is locked, | |
347 | * pull the object's pages off the active | |
348 | * and inactive queues. This keeps the | |
349 | * pageout daemon from playing with them | |
350 | * during vm_pager_deallocate. | |
351 | * | |
352 | * We can't free the pages yet, because the | |
353 | * object's pager may have to write them out | |
354 | * before deallocating the paging space. | |
355 | */ | |
356 | ||
357 | p = (vm_page_t) queue_first(&object->memq); | |
358 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
359 | VM_PAGE_CHECK(p); | |
360 | ||
361 | vm_page_lock_queues(); | |
362 | if (p->active) { | |
363 | queue_remove(&vm_page_queue_active, p, vm_page_t, | |
364 | pageq); | |
365 | p->active = FALSE; | |
366 | vm_page_active_count--; | |
367 | } | |
368 | ||
369 | if (p->inactive) { | |
370 | queue_remove(&vm_page_queue_inactive, p, vm_page_t, | |
371 | pageq); | |
372 | p->inactive = FALSE; | |
373 | vm_page_inactive_count--; | |
374 | } | |
375 | vm_page_unlock_queues(); | |
376 | p = (vm_page_t) queue_next(&p->listq); | |
377 | } | |
378 | ||
379 | vm_object_unlock(object); | |
380 | ||
381 | if (object->paging_in_progress != 0) | |
382 | panic("vm_object_deallocate: pageout in progress"); | |
383 | ||
384 | /* | |
385 | * Clean and free the pages, as appropriate. | |
386 | * All references to the object are gone, | |
387 | * so we don't need to lock it. | |
388 | */ | |
389 | ||
390 | if (!object->internal) { | |
391 | vm_object_lock(object); | |
392 | vm_object_page_clean(object, 0, 0); | |
393 | vm_object_unlock(object); | |
394 | } | |
395 | while (!queue_empty(&object->memq)) { | |
396 | p = (vm_page_t) queue_first(&object->memq); | |
397 | ||
398 | VM_PAGE_CHECK(p); | |
399 | ||
400 | vm_page_lock_queues(); | |
401 | vm_page_free(p); | |
402 | vm_page_unlock_queues(); | |
403 | } | |
404 | ||
405 | /* | |
406 | * Let the pager know object is dead. | |
407 | */ | |
408 | ||
409 | if (object->pager != NULL) | |
410 | vm_pager_deallocate(object->pager); | |
411 | ||
412 | ||
413 | simple_lock(&vm_object_list_lock); | |
414 | queue_remove(&vm_object_list, object, vm_object_t, object_list); | |
415 | vm_object_count--; | |
416 | simple_unlock(&vm_object_list_lock); | |
417 | ||
418 | /* | |
419 | * Free the space for the object. | |
420 | */ | |
421 | ||
422 | free((caddr_t)object, M_VMOBJ); | |
423 | } | |
424 | ||
425 | /* | |
426 | * vm_object_page_clean | |
427 | * | |
428 | * Clean all dirty pages in the specified range of object. | |
429 | * Leaves page on whatever queue it is currently on. | |
430 | * | |
431 | * Odd semantics: if start == end, we clean everything. | |
432 | * | |
433 | * The object must be locked. | |
434 | */ | |
435 | vm_object_page_clean(object, start, end) | |
436 | register vm_object_t object; | |
437 | register vm_offset_t start; | |
438 | register vm_offset_t end; | |
439 | { | |
440 | register vm_page_t p; | |
441 | ||
442 | if (object->pager == NULL) | |
443 | return; | |
444 | ||
445 | again: | |
446 | p = (vm_page_t) queue_first(&object->memq); | |
447 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
448 | if (start == end || | |
449 | p->offset >= start && p->offset < end) { | |
450 | if (p->clean && pmap_is_modified(VM_PAGE_TO_PHYS(p))) | |
451 | p->clean = FALSE; | |
452 | pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); | |
453 | if (!p->clean) { | |
454 | p->busy = TRUE; | |
455 | object->paging_in_progress++; | |
456 | vm_object_unlock(object); | |
457 | (void) vm_pager_put(object->pager, p, TRUE); | |
458 | vm_object_lock(object); | |
459 | object->paging_in_progress--; | |
460 | p->busy = FALSE; | |
461 | PAGE_WAKEUP(p); | |
462 | goto again; | |
463 | } | |
464 | } | |
465 | p = (vm_page_t) queue_next(&p->listq); | |
466 | } | |
467 | } | |
468 | ||
469 | /* | |
470 | * vm_object_deactivate_pages | |
471 | * | |
472 | * Deactivate all pages in the specified object. (Keep its pages | |
473 | * in memory even though it is no longer referenced.) | |
474 | * | |
475 | * The object must be locked. | |
476 | */ | |
477 | vm_object_deactivate_pages(object) | |
478 | register vm_object_t object; | |
479 | { | |
480 | register vm_page_t p, next; | |
481 | ||
482 | p = (vm_page_t) queue_first(&object->memq); | |
483 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
484 | next = (vm_page_t) queue_next(&p->listq); | |
485 | vm_page_lock_queues(); | |
486 | vm_page_deactivate(p); | |
487 | vm_page_unlock_queues(); | |
488 | p = next; | |
489 | } | |
490 | } | |
491 | ||
492 | /* | |
493 | * Trim the object cache to size. | |
494 | */ | |
495 | vm_object_cache_trim() | |
496 | { | |
497 | register vm_object_t object; | |
498 | ||
499 | vm_object_cache_lock(); | |
500 | while (vm_object_cached > vm_cache_max) { | |
501 | object = (vm_object_t) queue_first(&vm_object_cached_list); | |
502 | vm_object_cache_unlock(); | |
503 | ||
504 | if (object != vm_object_lookup(object->pager)) | |
505 | panic("vm_object_deactivate: I'm sooo confused."); | |
506 | ||
507 | pager_cache(object, FALSE); | |
508 | ||
509 | vm_object_cache_lock(); | |
510 | } | |
511 | vm_object_cache_unlock(); | |
512 | } | |
513 | ||
514 | ||
515 | /* | |
516 | * vm_object_shutdown() | |
517 | * | |
518 | * Shut down the object system. Unfortunately, while we | |
519 | * may be trying to do this, init is happily waiting for | |
520 | * processes to exit, and therefore will be causing some objects | |
521 | * to be deallocated. To handle this, we gain a fake reference | |
522 | * to all objects we release paging areas for. This will prevent | |
523 | * a duplicate deallocation. This routine is probably full of | |
524 | * race conditions! | |
525 | */ | |
526 | ||
527 | void vm_object_shutdown() | |
528 | { | |
529 | register vm_object_t object; | |
530 | ||
531 | /* | |
532 | * Clean up the object cache *before* we screw up the reference | |
533 | * counts on all of the objects. | |
534 | */ | |
535 | ||
536 | vm_object_cache_clear(); | |
537 | ||
538 | printf("free paging spaces: "); | |
539 | ||
540 | /* | |
541 | * First we gain a reference to each object so that | |
542 | * no one else will deallocate them. | |
543 | */ | |
544 | ||
545 | simple_lock(&vm_object_list_lock); | |
546 | object = (vm_object_t) queue_first(&vm_object_list); | |
547 | while (!queue_end(&vm_object_list, (queue_entry_t) object)) { | |
548 | vm_object_reference(object); | |
549 | object = (vm_object_t) queue_next(&object->object_list); | |
550 | } | |
551 | simple_unlock(&vm_object_list_lock); | |
552 | ||
553 | /* | |
554 | * Now we deallocate all the paging areas. We don't need | |
555 | * to lock anything because we've reduced to a single | |
556 | * processor while shutting down. This also assumes that | |
557 | * no new objects are being created. | |
558 | */ | |
559 | ||
560 | object = (vm_object_t) queue_first(&vm_object_list); | |
561 | while (!queue_end(&vm_object_list, (queue_entry_t) object)) { | |
562 | if (object->pager != NULL) | |
563 | vm_pager_deallocate(object->pager); | |
564 | object = (vm_object_t) queue_next(&object->object_list); | |
565 | printf("."); | |
566 | } | |
567 | printf("done.\n"); | |
568 | } | |
569 | ||
570 | /* | |
571 | * vm_object_pmap_copy: | |
572 | * | |
573 | * Makes all physical pages in the specified | |
574 | * object range copy-on-write. No writeable | |
575 | * references to these pages should remain. | |
576 | * | |
577 | * The object must *not* be locked. | |
578 | */ | |
579 | void vm_object_pmap_copy(object, start, end) | |
580 | register vm_object_t object; | |
581 | register vm_offset_t start; | |
582 | register vm_offset_t end; | |
583 | { | |
584 | register vm_page_t p; | |
585 | ||
586 | if (object == NULL) | |
587 | return; | |
588 | ||
589 | vm_object_lock(object); | |
590 | p = (vm_page_t) queue_first(&object->memq); | |
591 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
592 | if ((start <= p->offset) && (p->offset < end)) { | |
593 | pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); | |
594 | p->copy_on_write = TRUE; | |
595 | } | |
596 | p = (vm_page_t) queue_next(&p->listq); | |
597 | } | |
598 | vm_object_unlock(object); | |
599 | } | |
600 | ||
601 | /* | |
602 | * vm_object_pmap_remove: | |
603 | * | |
604 | * Removes all physical pages in the specified | |
605 | * object range from all physical maps. | |
606 | * | |
607 | * The object must *not* be locked. | |
608 | */ | |
609 | void vm_object_pmap_remove(object, start, end) | |
610 | register vm_object_t object; | |
611 | register vm_offset_t start; | |
612 | register vm_offset_t end; | |
613 | { | |
614 | register vm_page_t p; | |
615 | ||
616 | if (object == NULL) | |
617 | return; | |
618 | ||
619 | vm_object_lock(object); | |
620 | p = (vm_page_t) queue_first(&object->memq); | |
621 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
622 | if ((start <= p->offset) && (p->offset < end)) | |
623 | pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); | |
624 | p = (vm_page_t) queue_next(&p->listq); | |
625 | } | |
626 | vm_object_unlock(object); | |
627 | } | |
628 | ||
629 | /* | |
630 | * vm_object_copy: | |
631 | * | |
632 | * Create a new object which is a copy of an existing | |
633 | * object, and mark all of the pages in the existing | |
634 | * object 'copy-on-write'. The new object has one reference. | |
635 | * Returns the new object. | |
636 | * | |
637 | * May defer the copy until later if the object is not backed | |
638 | * up by a non-default pager. | |
639 | */ | |
640 | void vm_object_copy(src_object, src_offset, size, | |
641 | dst_object, dst_offset, src_needs_copy) | |
642 | register vm_object_t src_object; | |
643 | vm_offset_t src_offset; | |
644 | vm_size_t size; | |
645 | vm_object_t *dst_object; /* OUT */ | |
646 | vm_offset_t *dst_offset; /* OUT */ | |
647 | boolean_t *src_needs_copy; /* OUT */ | |
648 | { | |
649 | register vm_object_t new_copy; | |
650 | register vm_object_t old_copy; | |
651 | vm_offset_t new_start, new_end; | |
652 | ||
653 | register vm_page_t p; | |
654 | ||
655 | if (src_object == NULL) { | |
656 | /* | |
657 | * Nothing to copy | |
658 | */ | |
659 | *dst_object = NULL; | |
660 | *dst_offset = 0; | |
661 | *src_needs_copy = FALSE; | |
662 | return; | |
663 | } | |
664 | ||
665 | /* | |
666 | * If the object's pager is null_pager or the | |
667 | * default pager, we don't have to make a copy | |
668 | * of it. Instead, we set the needs copy flag and | |
669 | * make a shadow later. | |
670 | */ | |
671 | ||
672 | vm_object_lock(src_object); | |
673 | if (src_object->pager == NULL || | |
674 | src_object->internal) { | |
675 | ||
676 | /* | |
677 | * Make another reference to the object | |
678 | */ | |
679 | src_object->ref_count++; | |
680 | ||
681 | /* | |
682 | * Mark all of the pages copy-on-write. | |
683 | */ | |
684 | for (p = (vm_page_t) queue_first(&src_object->memq); | |
685 | !queue_end(&src_object->memq, (queue_entry_t)p); | |
686 | p = (vm_page_t) queue_next(&p->listq)) { | |
687 | if (src_offset <= p->offset && | |
688 | p->offset < src_offset + size) | |
689 | p->copy_on_write = TRUE; | |
690 | } | |
691 | vm_object_unlock(src_object); | |
692 | ||
693 | *dst_object = src_object; | |
694 | *dst_offset = src_offset; | |
695 | ||
696 | /* | |
697 | * Must make a shadow when write is desired | |
698 | */ | |
699 | *src_needs_copy = TRUE; | |
700 | return; | |
701 | } | |
702 | ||
703 | /* | |
704 | * Try to collapse the object before copying it. | |
705 | */ | |
706 | vm_object_collapse(src_object); | |
707 | ||
708 | /* | |
709 | * If the object has a pager, the pager wants to | |
710 | * see all of the changes. We need a copy-object | |
711 | * for the changed pages. | |
712 | * | |
713 | * If there is a copy-object, and it is empty, | |
714 | * no changes have been made to the object since the | |
715 | * copy-object was made. We can use the same copy- | |
716 | * object. | |
717 | */ | |
718 | ||
719 | Retry1: | |
720 | old_copy = src_object->copy; | |
721 | if (old_copy != NULL) { | |
722 | /* | |
723 | * Try to get the locks (out of order) | |
724 | */ | |
725 | if (!vm_object_lock_try(old_copy)) { | |
726 | vm_object_unlock(src_object); | |
727 | ||
728 | /* should spin a bit here... */ | |
729 | vm_object_lock(src_object); | |
730 | goto Retry1; | |
731 | } | |
732 | ||
733 | if (old_copy->resident_page_count == 0 && | |
734 | old_copy->pager == NULL) { | |
735 | /* | |
736 | * Return another reference to | |
737 | * the existing copy-object. | |
738 | */ | |
739 | old_copy->ref_count++; | |
740 | vm_object_unlock(old_copy); | |
741 | vm_object_unlock(src_object); | |
742 | *dst_object = old_copy; | |
743 | *dst_offset = src_offset; | |
744 | *src_needs_copy = FALSE; | |
745 | return; | |
746 | } | |
747 | vm_object_unlock(old_copy); | |
748 | } | |
749 | vm_object_unlock(src_object); | |
750 | ||
751 | /* | |
752 | * If the object has a pager, the pager wants | |
753 | * to see all of the changes. We must make | |
754 | * a copy-object and put the changed pages there. | |
755 | * | |
756 | * The copy-object is always made large enough to | |
757 | * completely shadow the original object, since | |
758 | * it may have several users who want to shadow | |
759 | * the original object at different points. | |
760 | */ | |
761 | ||
762 | new_copy = vm_object_allocate(src_object->size); | |
763 | ||
764 | Retry2: | |
765 | vm_object_lock(src_object); | |
766 | /* | |
767 | * Copy object may have changed while we were unlocked | |
768 | */ | |
769 | old_copy = src_object->copy; | |
770 | if (old_copy != NULL) { | |
771 | /* | |
772 | * Try to get the locks (out of order) | |
773 | */ | |
774 | if (!vm_object_lock_try(old_copy)) { | |
775 | vm_object_unlock(src_object); | |
776 | goto Retry2; | |
777 | } | |
778 | ||
779 | /* | |
780 | * Consistency check | |
781 | */ | |
782 | if (old_copy->shadow != src_object || | |
783 | old_copy->shadow_offset != (vm_offset_t) 0) | |
784 | panic("vm_object_copy: copy/shadow inconsistency"); | |
785 | ||
786 | /* | |
787 | * Make the old copy-object shadow the new one. | |
788 | * It will receive no more pages from the original | |
789 | * object. | |
790 | */ | |
791 | ||
792 | src_object->ref_count--; /* remove ref. from old_copy */ | |
793 | old_copy->shadow = new_copy; | |
794 | new_copy->ref_count++; /* locking not needed - we | |
795 | have the only pointer */ | |
796 | vm_object_unlock(old_copy); /* done with old_copy */ | |
797 | } | |
798 | ||
799 | new_start = (vm_offset_t) 0; /* always shadow original at 0 */ | |
800 | new_end = (vm_offset_t) new_copy->size; /* for the whole object */ | |
801 | ||
802 | /* | |
803 | * Point the new copy at the existing object. | |
804 | */ | |
805 | ||
806 | new_copy->shadow = src_object; | |
807 | new_copy->shadow_offset = new_start; | |
808 | src_object->ref_count++; | |
809 | src_object->copy = new_copy; | |
810 | ||
811 | /* | |
812 | * Mark all the affected pages of the existing object | |
813 | * copy-on-write. | |
814 | */ | |
815 | p = (vm_page_t) queue_first(&src_object->memq); | |
816 | while (!queue_end(&src_object->memq, (queue_entry_t) p)) { | |
817 | if ((new_start <= p->offset) && (p->offset < new_end)) | |
818 | p->copy_on_write = TRUE; | |
819 | p = (vm_page_t) queue_next(&p->listq); | |
820 | } | |
821 | ||
822 | vm_object_unlock(src_object); | |
823 | ||
824 | *dst_object = new_copy; | |
825 | *dst_offset = src_offset - new_start; | |
826 | *src_needs_copy = FALSE; | |
827 | } | |
828 | ||
829 | /* | |
830 | * vm_object_shadow: | |
831 | * | |
832 | * Create a new object which is backed by the | |
833 | * specified existing object range. The source | |
834 | * object reference is deallocated. | |
835 | * | |
836 | * The new object and offset into that object | |
837 | * are returned in the source parameters. | |
838 | */ | |
839 | ||
840 | void vm_object_shadow(object, offset, length) | |
841 | vm_object_t *object; /* IN/OUT */ | |
842 | vm_offset_t *offset; /* IN/OUT */ | |
843 | vm_size_t length; | |
844 | { | |
845 | register vm_object_t source; | |
846 | register vm_object_t result; | |
847 | ||
848 | source = *object; | |
849 | ||
850 | /* | |
851 | * Allocate a new object with the given length | |
852 | */ | |
853 | ||
854 | if ((result = vm_object_allocate(length)) == NULL) | |
855 | panic("vm_object_shadow: no object for shadowing"); | |
856 | ||
857 | /* | |
858 | * The new object shadows the source object, adding | |
859 | * a reference to it. Our caller changes his reference | |
860 | * to point to the new object, removing a reference to | |
861 | * the source object. Net result: no change of reference | |
862 | * count. | |
863 | */ | |
864 | result->shadow = source; | |
865 | ||
866 | /* | |
867 | * Store the offset into the source object, | |
868 | * and fix up the offset into the new object. | |
869 | */ | |
870 | ||
871 | result->shadow_offset = *offset; | |
872 | ||
873 | /* | |
874 | * Return the new things | |
875 | */ | |
876 | ||
877 | *offset = 0; | |
878 | *object = result; | |
879 | } | |
880 | ||
881 | /* | |
882 | * Set the specified object's pager to the specified pager. | |
883 | */ | |
884 | ||
885 | void vm_object_setpager(object, pager, paging_offset, | |
886 | read_only) | |
887 | vm_object_t object; | |
888 | vm_pager_t pager; | |
889 | vm_offset_t paging_offset; | |
890 | boolean_t read_only; | |
891 | { | |
892 | #ifdef lint | |
893 | read_only++; /* No longer used */ | |
894 | #endif lint | |
895 | ||
896 | vm_object_lock(object); /* XXX ? */ | |
897 | object->pager = pager; | |
898 | object->paging_offset = paging_offset; | |
899 | vm_object_unlock(object); /* XXX ? */ | |
900 | } | |
901 | ||
902 | /* | |
903 | * vm_object_hash hashes the pager/id pair. | |
904 | */ | |
905 | ||
906 | #define vm_object_hash(pager) \ | |
907 | (((unsigned)pager)%VM_OBJECT_HASH_COUNT) | |
908 | ||
909 | /* | |
910 | * vm_object_lookup looks in the object cache for an object with the | |
911 | * specified pager and paging id. | |
912 | */ | |
913 | ||
914 | vm_object_t vm_object_lookup(pager) | |
915 | vm_pager_t pager; | |
916 | { | |
917 | register queue_t bucket; | |
918 | register vm_object_hash_entry_t entry; | |
919 | vm_object_t object; | |
920 | ||
921 | bucket = &vm_object_hashtable[vm_object_hash(pager)]; | |
922 | ||
923 | vm_object_cache_lock(); | |
924 | ||
925 | entry = (vm_object_hash_entry_t) queue_first(bucket); | |
926 | while (!queue_end(bucket, (queue_entry_t) entry)) { | |
927 | object = entry->object; | |
928 | if (object->pager == pager) { | |
929 | vm_object_lock(object); | |
930 | if (object->ref_count == 0) { | |
931 | queue_remove(&vm_object_cached_list, object, | |
932 | vm_object_t, cached_list); | |
933 | vm_object_cached--; | |
934 | } | |
935 | object->ref_count++; | |
936 | vm_object_unlock(object); | |
937 | vm_object_cache_unlock(); | |
938 | return(object); | |
939 | } | |
940 | entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); | |
941 | } | |
942 | ||
943 | vm_object_cache_unlock(); | |
944 | return(NULL); | |
945 | } | |
946 | ||
947 | /* | |
948 | * vm_object_enter enters the specified object/pager/id into | |
949 | * the hash table. | |
950 | */ | |
951 | ||
952 | void vm_object_enter(object, pager) | |
953 | vm_object_t object; | |
954 | vm_pager_t pager; | |
955 | { | |
956 | register queue_t bucket; | |
957 | register vm_object_hash_entry_t entry; | |
958 | ||
959 | /* | |
960 | * We don't cache null objects, and we can't cache | |
961 | * objects with the null pager. | |
962 | */ | |
963 | ||
964 | if (object == NULL) | |
965 | return; | |
966 | if (pager == NULL) | |
967 | return; | |
968 | ||
969 | bucket = &vm_object_hashtable[vm_object_hash(pager)]; | |
970 | entry = (vm_object_hash_entry_t) | |
971 | malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); | |
972 | entry->object = object; | |
973 | object->can_persist = TRUE; | |
974 | ||
975 | vm_object_cache_lock(); | |
976 | queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links); | |
977 | vm_object_cache_unlock(); | |
978 | } | |
979 | ||
980 | /* | |
981 | * vm_object_remove: | |
982 | * | |
983 | * Remove the pager from the hash table. | |
984 | * Note: This assumes that the object cache | |
985 | * is locked. XXX this should be fixed | |
986 | * by reorganizing vm_object_deallocate. | |
987 | */ | |
988 | vm_object_remove(pager) | |
989 | register vm_pager_t pager; | |
990 | { | |
991 | register queue_t bucket; | |
992 | register vm_object_hash_entry_t entry; | |
993 | register vm_object_t object; | |
994 | ||
995 | bucket = &vm_object_hashtable[vm_object_hash(pager)]; | |
996 | ||
997 | entry = (vm_object_hash_entry_t) queue_first(bucket); | |
998 | while (!queue_end(bucket, (queue_entry_t) entry)) { | |
999 | object = entry->object; | |
1000 | if (object->pager == pager) { | |
1001 | queue_remove(bucket, entry, vm_object_hash_entry_t, | |
1002 | hash_links); | |
1003 | free((caddr_t)entry, M_VMOBJHASH); | |
1004 | break; | |
1005 | } | |
1006 | entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | /* | |
1011 | * vm_object_cache_clear removes all objects from the cache. | |
1012 | * | |
1013 | */ | |
1014 | ||
1015 | void vm_object_cache_clear() | |
1016 | { | |
1017 | register vm_object_t object; | |
1018 | ||
1019 | /* | |
1020 | * Remove each object in the cache by scanning down the | |
1021 | * list of cached objects. | |
1022 | */ | |
1023 | vm_object_cache_lock(); | |
1024 | while (!queue_empty(&vm_object_cached_list)) { | |
1025 | object = (vm_object_t) queue_first(&vm_object_cached_list); | |
1026 | vm_object_cache_unlock(); | |
1027 | ||
1028 | /* | |
1029 | * Note: it is important that we use vm_object_lookup | |
1030 | * to gain a reference, and not vm_object_reference, because | |
1031 | * the logic for removing an object from the cache lies in | |
1032 | * lookup. | |
1033 | */ | |
1034 | if (object != vm_object_lookup(object->pager)) | |
1035 | panic("vm_object_cache_clear: I'm sooo confused."); | |
1036 | pager_cache(object, FALSE); | |
1037 | ||
1038 | vm_object_cache_lock(); | |
1039 | } | |
1040 | vm_object_cache_unlock(); | |
1041 | } | |
1042 | ||
1043 | boolean_t vm_object_collapse_allowed = TRUE; | |
1044 | /* | |
1045 | * vm_object_collapse: | |
1046 | * | |
1047 | * Collapse an object with the object backing it. | |
1048 | * Pages in the backing object are moved into the | |
1049 | * parent, and the backing object is deallocated. | |
1050 | * | |
1051 | * Requires that the object be locked and the page | |
1052 | * queues be unlocked. | |
1053 | * | |
1054 | */ | |
1055 | void vm_object_collapse(object) | |
1056 | register vm_object_t object; | |
1057 | ||
1058 | { | |
1059 | register vm_object_t backing_object; | |
1060 | register vm_offset_t backing_offset; | |
1061 | register vm_size_t size; | |
1062 | register vm_offset_t new_offset; | |
1063 | register vm_page_t p, pp; | |
1064 | ||
1065 | if (!vm_object_collapse_allowed) | |
1066 | return; | |
1067 | ||
1068 | while (TRUE) { | |
1069 | /* | |
1070 | * Verify that the conditions are right for collapse: | |
1071 | * | |
1072 | * The object exists and no pages in it are currently | |
1073 | * being paged out (or have ever been paged out). | |
1074 | */ | |
1075 | if (object == NULL || | |
1076 | object->paging_in_progress != 0 || | |
1077 | object->pager != NULL) | |
1078 | return; | |
1079 | ||
1080 | /* | |
1081 | * There is a backing object, and | |
1082 | */ | |
1083 | ||
1084 | if ((backing_object = object->shadow) == NULL) | |
1085 | return; | |
1086 | ||
1087 | vm_object_lock(backing_object); | |
1088 | /* | |
1089 | * ... | |
1090 | * The backing object is not read_only, | |
1091 | * and no pages in the backing object are | |
1092 | * currently being paged out. | |
1093 | * The backing object is internal. | |
1094 | */ | |
1095 | ||
1096 | if (!backing_object->internal || | |
1097 | backing_object->paging_in_progress != 0) { | |
1098 | vm_object_unlock(backing_object); | |
1099 | return; | |
1100 | } | |
1101 | ||
1102 | /* | |
1103 | * The backing object can't be a copy-object: | |
1104 | * the shadow_offset for the copy-object must stay | |
1105 | * as 0. Furthermore (for the 'we have all the | |
1106 | * pages' case), if we bypass backing_object and | |
1107 | * just shadow the next object in the chain, old | |
1108 | * pages from that object would then have to be copied | |
1109 | * BOTH into the (former) backing_object and into the | |
1110 | * parent object. | |
1111 | */ | |
1112 | if (backing_object->shadow != NULL && | |
1113 | backing_object->shadow->copy != NULL) { | |
1114 | vm_object_unlock(backing_object); | |
1115 | return; | |
1116 | } | |
1117 | ||
1118 | /* | |
1119 | * We know that we can either collapse the backing | |
1120 | * object (if the parent is the only reference to | |
1121 | * it) or (perhaps) remove the parent's reference | |
1122 | * to it. | |
1123 | */ | |
1124 | ||
1125 | backing_offset = object->shadow_offset; | |
1126 | size = object->size; | |
1127 | ||
1128 | /* | |
1129 | * If there is exactly one reference to the backing | |
1130 | * object, we can collapse it into the parent. | |
1131 | */ | |
1132 | ||
1133 | if (backing_object->ref_count == 1) { | |
1134 | ||
1135 | /* | |
1136 | * We can collapse the backing object. | |
1137 | * | |
1138 | * Move all in-memory pages from backing_object | |
1139 | * to the parent. Pages that have been paged out | |
1140 | * will be overwritten by any of the parent's | |
1141 | * pages that shadow them. | |
1142 | */ | |
1143 | ||
1144 | while (!queue_empty(&backing_object->memq)) { | |
1145 | ||
1146 | p = (vm_page_t) | |
1147 | queue_first(&backing_object->memq); | |
1148 | ||
1149 | new_offset = (p->offset - backing_offset); | |
1150 | ||
1151 | /* | |
1152 | * If the parent has a page here, or if | |
1153 | * this page falls outside the parent, | |
1154 | * dispose of it. | |
1155 | * | |
1156 | * Otherwise, move it as planned. | |
1157 | */ | |
1158 | ||
1159 | if (p->offset < backing_offset || | |
1160 | new_offset >= size) { | |
1161 | vm_page_lock_queues(); | |
1162 | vm_page_free(p); | |
1163 | vm_page_unlock_queues(); | |
1164 | } else { | |
1165 | pp = vm_page_lookup(object, new_offset); | |
1166 | if (pp != NULL && !pp->fake) { | |
1167 | vm_page_lock_queues(); | |
1168 | vm_page_free(p); | |
1169 | vm_page_unlock_queues(); | |
1170 | } | |
1171 | else { | |
1172 | if (pp) { | |
1173 | /* may be someone waiting for it */ | |
1174 | PAGE_WAKEUP(pp); | |
1175 | vm_page_lock_queues(); | |
1176 | vm_page_free(pp); | |
1177 | vm_page_unlock_queues(); | |
1178 | } | |
1179 | vm_page_rename(p, object, new_offset); | |
1180 | } | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | /* | |
1185 | * Move the pager from backing_object to object. | |
1186 | * | |
1187 | * XXX We're only using part of the paging space | |
1188 | * for keeps now... we ought to discard the | |
1189 | * unused portion. | |
1190 | */ | |
1191 | ||
1192 | object->pager = backing_object->pager; | |
1193 | object->paging_offset += backing_offset; | |
1194 | ||
1195 | backing_object->pager = NULL; | |
1196 | ||
1197 | /* | |
1198 | * Object now shadows whatever backing_object did. | |
1199 | * Note that the reference to backing_object->shadow | |
1200 | * moves from within backing_object to within object. | |
1201 | */ | |
1202 | ||
1203 | object->shadow = backing_object->shadow; | |
1204 | object->shadow_offset += backing_object->shadow_offset; | |
1205 | if (object->shadow != NULL && | |
1206 | object->shadow->copy != NULL) { | |
1207 | panic("vm_object_collapse: we collapsed a copy-object!"); | |
1208 | } | |
1209 | /* | |
1210 | * Discard backing_object. | |
1211 | * | |
1212 | * Since the backing object has no pages, no | |
1213 | * pager left, and no object references within it, | |
1214 | * all that is necessary is to dispose of it. | |
1215 | */ | |
1216 | ||
1217 | vm_object_unlock(backing_object); | |
1218 | ||
1219 | simple_lock(&vm_object_list_lock); | |
1220 | queue_remove(&vm_object_list, backing_object, | |
1221 | vm_object_t, object_list); | |
1222 | vm_object_count--; | |
1223 | simple_unlock(&vm_object_list_lock); | |
1224 | ||
1225 | free((caddr_t)backing_object, M_VMOBJ); | |
1226 | ||
1227 | object_collapses++; | |
1228 | } | |
1229 | else { | |
1230 | /* | |
1231 | * If all of the pages in the backing object are | |
1232 | * shadowed by the parent object, the parent | |
1233 | * object no longer has to shadow the backing | |
1234 | * object; it can shadow the next one in the | |
1235 | * chain. | |
1236 | * | |
1237 | * The backing object must not be paged out - we'd | |
1238 | * have to check all of the paged-out pages, as | |
1239 | * well. | |
1240 | */ | |
1241 | ||
1242 | if (backing_object->pager != NULL) { | |
1243 | vm_object_unlock(backing_object); | |
1244 | return; | |
1245 | } | |
1246 | ||
1247 | /* | |
1248 | * Should have a check for a 'small' number | |
1249 | * of pages here. | |
1250 | */ | |
1251 | ||
1252 | p = (vm_page_t) queue_first(&backing_object->memq); | |
1253 | while (!queue_end(&backing_object->memq, | |
1254 | (queue_entry_t) p)) { | |
1255 | ||
1256 | new_offset = (p->offset - backing_offset); | |
1257 | ||
1258 | /* | |
1259 | * If the parent has a page here, or if | |
1260 | * this page falls outside the parent, | |
1261 | * keep going. | |
1262 | * | |
1263 | * Otherwise, the backing_object must be | |
1264 | * left in the chain. | |
1265 | */ | |
1266 | ||
1267 | if (p->offset >= backing_offset && | |
1268 | new_offset <= size && | |
1269 | ((pp = vm_page_lookup(object, new_offset)) | |
1270 | == NULL || | |
1271 | pp->fake)) { | |
1272 | /* | |
1273 | * Page still needed. | |
1274 | * Can't go any further. | |
1275 | */ | |
1276 | vm_object_unlock(backing_object); | |
1277 | return; | |
1278 | } | |
1279 | p = (vm_page_t) queue_next(&p->listq); | |
1280 | } | |
1281 | ||
1282 | /* | |
1283 | * Make the parent shadow the next object | |
1284 | * in the chain. Deallocating backing_object | |
1285 | * will not remove it, since its reference | |
1286 | * count is at least 2. | |
1287 | */ | |
1288 | ||
1289 | vm_object_reference(object->shadow = backing_object->shadow); | |
1290 | object->shadow_offset += backing_object->shadow_offset; | |
1291 | ||
1292 | /* Drop the reference count on backing_object. | |
1293 | * Since its ref_count was at least 2, it | |
1294 | * will not vanish; so we don't need to call | |
1295 | * vm_object_deallocate. | |
1296 | */ | |
1297 | backing_object->ref_count--; | |
1298 | vm_object_unlock(backing_object); | |
1299 | ||
1300 | object_bypasses ++; | |
1301 | ||
1302 | } | |
1303 | ||
1304 | /* | |
1305 | * Try again with this object's new backing object. | |
1306 | */ | |
1307 | } | |
1308 | } | |
1309 | ||
1310 | /* | |
1311 | * vm_object_page_remove: [internal] | |
1312 | * | |
1313 | * Removes all physical pages in the specified | |
1314 | * object range from the object's list of pages. | |
1315 | * | |
1316 | * The object must be locked. | |
1317 | */ | |
1318 | void vm_object_page_remove(object, start, end) | |
1319 | register vm_object_t object; | |
1320 | register vm_offset_t start; | |
1321 | register vm_offset_t end; | |
1322 | { | |
1323 | register vm_page_t p, next; | |
1324 | ||
1325 | if (object == NULL) | |
1326 | return; | |
1327 | ||
1328 | p = (vm_page_t) queue_first(&object->memq); | |
1329 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
1330 | next = (vm_page_t) queue_next(&p->listq); | |
1331 | if ((start <= p->offset) && (p->offset < end)) { | |
1332 | pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); | |
1333 | vm_page_lock_queues(); | |
1334 | vm_page_free(p); | |
1335 | vm_page_unlock_queues(); | |
1336 | } | |
1337 | p = next; | |
1338 | } | |
1339 | } | |
1340 | ||
1341 | /* | |
1342 | * Routine: vm_object_coalesce | |
1343 | * Function: Coalesces two objects backing up adjoining | |
1344 | * regions of memory into a single object. | |
1345 | * | |
1346 | * returns TRUE if objects were combined. | |
1347 | * | |
1348 | * NOTE: Only works at the moment if the second object is NULL - | |
1349 | * if it's not, which object do we lock first? | |
1350 | * | |
1351 | * Parameters: | |
1352 | * prev_object First object to coalesce | |
1353 | * prev_offset Offset into prev_object | |
1354 | * next_object Second object into coalesce | |
1355 | * next_offset Offset into next_object | |
1356 | * | |
1357 | * prev_size Size of reference to prev_object | |
1358 | * next_size Size of reference to next_object | |
1359 | * | |
1360 | * Conditions: | |
1361 | * The object must *not* be locked. | |
1362 | */ | |
1363 | boolean_t vm_object_coalesce(prev_object, next_object, | |
1364 | prev_offset, next_offset, | |
1365 | prev_size, next_size) | |
1366 | ||
1367 | register vm_object_t prev_object; | |
1368 | vm_object_t next_object; | |
1369 | vm_offset_t prev_offset, next_offset; | |
1370 | vm_size_t prev_size, next_size; | |
1371 | { | |
1372 | vm_size_t newsize; | |
1373 | ||
1374 | #ifdef lint | |
1375 | next_offset++; | |
1376 | #endif lint | |
1377 | ||
1378 | if (next_object != NULL) { | |
1379 | return(FALSE); | |
1380 | } | |
1381 | ||
1382 | if (prev_object == NULL) { | |
1383 | return(TRUE); | |
1384 | } | |
1385 | ||
1386 | vm_object_lock(prev_object); | |
1387 | ||
1388 | /* | |
1389 | * Try to collapse the object first | |
1390 | */ | |
1391 | vm_object_collapse(prev_object); | |
1392 | ||
1393 | /* | |
1394 | * Can't coalesce if: | |
1395 | * . more than one reference | |
1396 | * . paged out | |
1397 | * . shadows another object | |
1398 | * . has a copy elsewhere | |
1399 | * (any of which mean that the pages not mapped to | |
1400 | * prev_entry may be in use anyway) | |
1401 | */ | |
1402 | ||
1403 | if (prev_object->ref_count > 1 || | |
1404 | prev_object->pager != NULL || | |
1405 | prev_object->shadow != NULL || | |
1406 | prev_object->copy != NULL) { | |
1407 | vm_object_unlock(prev_object); | |
1408 | return(FALSE); | |
1409 | } | |
1410 | ||
1411 | /* | |
1412 | * Remove any pages that may still be in the object from | |
1413 | * a previous deallocation. | |
1414 | */ | |
1415 | ||
1416 | vm_object_page_remove(prev_object, | |
1417 | prev_offset + prev_size, | |
1418 | prev_offset + prev_size + next_size); | |
1419 | ||
1420 | /* | |
1421 | * Extend the object if necessary. | |
1422 | */ | |
1423 | newsize = prev_offset + prev_size + next_size; | |
1424 | if (newsize > prev_object->size) | |
1425 | prev_object->size = newsize; | |
1426 | ||
1427 | vm_object_unlock(prev_object); | |
1428 | return(TRUE); | |
1429 | } | |
1430 | ||
1431 | /* | |
1432 | * vm_object_print: [ debug ] | |
1433 | */ | |
1434 | void vm_object_print(object, full) | |
1435 | vm_object_t object; | |
1436 | boolean_t full; | |
1437 | { | |
1438 | register vm_page_t p; | |
1439 | extern indent; | |
1440 | ||
1441 | register int count; | |
1442 | ||
1443 | if (object == NULL) | |
1444 | return; | |
1445 | ||
1446 | iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", | |
1447 | (int) object, (int) object->size, | |
1448 | object->resident_page_count, object->ref_count); | |
1449 | printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", | |
1450 | (int) object->pager, (int) object->paging_offset, | |
1451 | (int) object->shadow, (int) object->shadow_offset); | |
1452 | printf("cache: next=0x%x, prev=0x%x\n", | |
1453 | object->cached_list.next, object->cached_list.prev); | |
1454 | ||
1455 | if (!full) | |
1456 | return; | |
1457 | ||
1458 | indent += 2; | |
1459 | count = 0; | |
1460 | p = (vm_page_t) queue_first(&object->memq); | |
1461 | while (!queue_end(&object->memq, (queue_entry_t) p)) { | |
1462 | if (count == 0) | |
1463 | iprintf("memory:="); | |
1464 | else if (count == 6) { | |
1465 | printf("\n"); | |
1466 | iprintf(" ..."); | |
1467 | count = 0; | |
1468 | } else | |
1469 | printf(","); | |
1470 | count++; | |
1471 | ||
1472 | printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); | |
1473 | p = (vm_page_t) queue_next(&p->listq); | |
1474 | } | |
1475 | if (count != 0) | |
1476 | printf("\n"); | |
1477 | indent -= 2; | |
1478 | } |