Commit | Line | Data |
---|---|---|
b688fc87 WJ |
1 | /* |
2 | * Copyright (c) 1991 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
36 | * @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 | |
37 | * | |
38 | * | |
39 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
40 | * All rights reserved. | |
41 | * | |
42 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
43 | * | |
44 | * Permission to use, copy, modify and distribute this software and | |
45 | * its documentation is hereby granted, provided that both the copyright | |
46 | * notice and this permission notice appear in all copies of the | |
47 | * software, derivative works or modified versions, and any portions | |
48 | * thereof, and that both notices appear in supporting documentation. | |
49 | * | |
50 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
51 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
52 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
53 | * | |
54 | * Carnegie Mellon requests users of this software to return to | |
55 | * | |
56 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
57 | * School of Computer Science | |
58 | * Carnegie Mellon University | |
59 | * Pittsburgh PA 15213-3890 | |
60 | * | |
61 | * any improvements or extensions that they make and grant Carnegie the | |
62 | * rights to redistribute these changes. | |
63 | */ | |
64 | ||
65 | /* | |
66 | * The proverbial page-out daemon. | |
67 | */ | |
68 | ||
69 | #include "param.h" | |
70 | ||
71 | #include "vm.h" | |
72 | #include "vm_page.h" | |
73 | #include "vm_pageout.h" | |
74 | ||
75 | int vm_pages_needed; /* Event on which pageout daemon sleeps */ | |
76 | int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ | |
77 | ||
78 | int vm_page_free_min_sanity = 40; | |
79 | ||
80 | /* | |
81 | * vm_pageout_scan does the dirty work for the pageout daemon. | |
82 | */ | |
83 | vm_pageout_scan() | |
84 | { | |
85 | register vm_page_t m; | |
86 | register int page_shortage; | |
87 | register int s; | |
88 | register int pages_freed; | |
89 | int free; | |
90 | ||
91 | /* | |
92 | * Only continue when we want more pages to be "free" | |
93 | */ | |
94 | ||
95 | s = splimp(); | |
96 | simple_lock(&vm_page_queue_free_lock); | |
97 | free = vm_page_free_count; | |
98 | simple_unlock(&vm_page_queue_free_lock); | |
99 | splx(s); | |
100 | ||
101 | if (free < vm_page_free_target) { | |
102 | swapout_threads(); | |
103 | ||
104 | /* | |
105 | * Be sure the pmap system is updated so | |
106 | * we can scan the inactive queue. | |
107 | */ | |
108 | ||
109 | pmap_update(); | |
110 | } | |
111 | ||
112 | /* | |
113 | * Acquire the resident page system lock, | |
114 | * as we may be changing what's resident quite a bit. | |
115 | */ | |
116 | vm_page_lock_queues(); | |
117 | ||
118 | /* | |
119 | * Start scanning the inactive queue for pages we can free. | |
120 | * We keep scanning until we have enough free pages or | |
121 | * we have scanned through the entire queue. If we | |
122 | * encounter dirty pages, we start cleaning them. | |
123 | */ | |
124 | ||
125 | pages_freed = 0; | |
126 | m = (vm_page_t) queue_first(&vm_page_queue_inactive); | |
127 | while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) { | |
128 | vm_page_t next; | |
129 | ||
130 | s = splimp(); | |
131 | simple_lock(&vm_page_queue_free_lock); | |
132 | free = vm_page_free_count; | |
133 | simple_unlock(&vm_page_queue_free_lock); | |
134 | splx(s); | |
135 | ||
136 | if (free >= vm_page_free_target) | |
137 | break; | |
138 | ||
139 | if (m->clean) { | |
140 | next = (vm_page_t) queue_next(&m->pageq); | |
141 | if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { | |
142 | vm_page_activate(m); | |
143 | vm_stat.reactivations++; | |
144 | } | |
145 | else { | |
146 | register vm_object_t object; | |
147 | object = m->object; | |
148 | if (!vm_object_lock_try(object)) { | |
149 | /* | |
150 | * Can't lock object - | |
151 | * skip page. | |
152 | */ | |
153 | m = next; | |
154 | continue; | |
155 | } | |
156 | pmap_page_protect(VM_PAGE_TO_PHYS(m), | |
157 | VM_PROT_NONE); | |
158 | vm_page_free(m); /* will dequeue */ | |
159 | pages_freed++; | |
160 | vm_object_unlock(object); | |
161 | } | |
162 | m = next; | |
163 | } | |
164 | else { | |
165 | /* | |
166 | * If a page is dirty, then it is either | |
167 | * being washed (but not yet cleaned) | |
168 | * or it is still in the laundry. If it is | |
169 | * still in the laundry, then we start the | |
170 | * cleaning operation. | |
171 | */ | |
172 | ||
173 | if (m->laundry) { | |
174 | /* | |
175 | * Clean the page and remove it from the | |
176 | * laundry. | |
177 | * | |
178 | * We set the busy bit to cause | |
179 | * potential page faults on this page to | |
180 | * block. | |
181 | * | |
182 | * And we set pageout-in-progress to keep | |
183 | * the object from disappearing during | |
184 | * pageout. This guarantees that the | |
185 | * page won't move from the inactive | |
186 | * queue. (However, any other page on | |
187 | * the inactive queue may move!) | |
188 | */ | |
189 | ||
190 | register vm_object_t object; | |
191 | register vm_pager_t pager; | |
192 | int pageout_status; | |
193 | ||
194 | object = m->object; | |
195 | if (!vm_object_lock_try(object)) { | |
196 | /* | |
197 | * Skip page if we can't lock | |
198 | * its object | |
199 | */ | |
200 | m = (vm_page_t) queue_next(&m->pageq); | |
201 | continue; | |
202 | } | |
203 | ||
204 | pmap_page_protect(VM_PAGE_TO_PHYS(m), | |
205 | VM_PROT_NONE); | |
206 | m->busy = TRUE; | |
207 | vm_stat.pageouts++; | |
208 | ||
209 | /* | |
210 | * Try to collapse the object before | |
211 | * making a pager for it. We must | |
212 | * unlock the page queues first. | |
213 | */ | |
214 | vm_page_unlock_queues(); | |
215 | ||
216 | vm_object_collapse(object); | |
217 | ||
218 | object->paging_in_progress++; | |
219 | vm_object_unlock(object); | |
220 | ||
221 | /* | |
222 | * Do a wakeup here in case the following | |
223 | * operations block. | |
224 | */ | |
225 | thread_wakeup((int) &vm_page_free_count); | |
226 | ||
227 | /* | |
228 | * If there is no pager for the page, | |
229 | * use the default pager. If there's | |
230 | * no place to put the page at the | |
231 | * moment, leave it in the laundry and | |
232 | * hope that there will be paging space | |
233 | * later. | |
234 | */ | |
235 | ||
236 | if ((pager = object->pager) == NULL) { | |
237 | pager = vm_pager_allocate(PG_DFLT, | |
238 | (caddr_t)0, | |
239 | object->size, | |
240 | VM_PROT_ALL); | |
241 | if (pager != NULL) { | |
242 | vm_object_setpager(object, | |
243 | pager, 0, FALSE); | |
244 | } | |
245 | } | |
246 | pageout_status = pager ? | |
247 | vm_pager_put(pager, m, FALSE) : | |
248 | VM_PAGER_FAIL; | |
249 | vm_object_lock(object); | |
250 | vm_page_lock_queues(); | |
251 | next = (vm_page_t) queue_next(&m->pageq); | |
252 | ||
253 | switch (pageout_status) { | |
254 | case VM_PAGER_OK: | |
255 | case VM_PAGER_PEND: | |
256 | m->laundry = FALSE; | |
257 | break; | |
258 | case VM_PAGER_BAD: | |
259 | /* | |
260 | * Page outside of range of object. | |
261 | * Right now we essentially lose the | |
262 | * changes by pretending it worked. | |
263 | * XXX dubious, what should we do? | |
264 | */ | |
265 | m->laundry = FALSE; | |
266 | m->clean = TRUE; | |
267 | pmap_clear_modify(VM_PAGE_TO_PHYS(m)); | |
268 | break; | |
269 | case VM_PAGER_FAIL: | |
270 | /* | |
271 | * If page couldn't be paged out, then | |
272 | * reactivate the page so it doesn't | |
273 | * clog the inactive list. (We will | |
274 | * try paging out it again later). | |
275 | */ | |
276 | vm_page_activate(m); | |
277 | break; | |
278 | } | |
279 | ||
280 | pmap_clear_reference(VM_PAGE_TO_PHYS(m)); | |
281 | ||
282 | /* | |
283 | * If the operation is still going, leave | |
284 | * the page busy to block all other accesses. | |
285 | * Also, leave the paging in progress | |
286 | * indicator set so that we don't attempt an | |
287 | * object collapse. | |
288 | */ | |
289 | if (pageout_status != VM_PAGER_PEND) { | |
290 | m->busy = FALSE; | |
291 | PAGE_WAKEUP(m); | |
292 | object->paging_in_progress--; | |
293 | } | |
294 | thread_wakeup((int) object); | |
295 | vm_object_unlock(object); | |
296 | m = next; | |
297 | } | |
298 | else | |
299 | m = (vm_page_t) queue_next(&m->pageq); | |
300 | } | |
301 | } | |
302 | ||
303 | /* | |
304 | * Compute the page shortage. If we are still very low on memory | |
305 | * be sure that we will move a minimal amount of pages from active | |
306 | * to inactive. | |
307 | */ | |
308 | ||
309 | page_shortage = vm_page_inactive_target - vm_page_inactive_count; | |
310 | page_shortage -= vm_page_free_count; | |
311 | ||
312 | if ((page_shortage <= 0) && (pages_freed == 0)) | |
313 | page_shortage = 1; | |
314 | ||
315 | while (page_shortage > 0) { | |
316 | /* | |
317 | * Move some more pages from active to inactive. | |
318 | */ | |
319 | ||
320 | if (queue_empty(&vm_page_queue_active)) { | |
321 | break; | |
322 | } | |
323 | m = (vm_page_t) queue_first(&vm_page_queue_active); | |
324 | vm_page_deactivate(m); | |
325 | page_shortage--; | |
326 | } | |
327 | ||
328 | vm_page_unlock_queues(); | |
329 | } | |
330 | ||
331 | /* | |
332 | * vm_pageout is the high level pageout daemon. | |
333 | */ | |
334 | ||
335 | void vm_pageout() | |
336 | { | |
337 | (void) spl0(); | |
338 | ||
339 | /* | |
340 | * Initialize some paging parameters. | |
341 | */ | |
342 | ||
343 | if (vm_page_free_min == 0) { | |
344 | vm_page_free_min = vm_page_free_count / 20; | |
345 | if (vm_page_free_min < 3) | |
346 | vm_page_free_min = 3; | |
347 | ||
348 | if (vm_page_free_min > vm_page_free_min_sanity) | |
349 | vm_page_free_min = vm_page_free_min_sanity; | |
350 | } | |
351 | ||
352 | if (vm_page_free_reserved == 0) { | |
353 | if ((vm_page_free_reserved = vm_page_free_min / 2) < 10) | |
354 | vm_page_free_reserved = 10; | |
355 | } | |
356 | if (vm_pageout_free_min == 0) { | |
357 | if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10) | |
358 | vm_pageout_free_min = 10; | |
359 | } | |
360 | ||
361 | if (vm_page_free_target == 0) | |
362 | vm_page_free_target = (vm_page_free_min * 4) / 3; | |
363 | ||
364 | if (vm_page_inactive_target == 0) | |
365 | vm_page_inactive_target = vm_page_free_min * 2; | |
366 | ||
367 | if (vm_page_free_target <= vm_page_free_min) | |
368 | vm_page_free_target = vm_page_free_min + 1; | |
369 | ||
370 | if (vm_page_inactive_target <= vm_page_free_target) | |
371 | vm_page_inactive_target = vm_page_free_target + 1; | |
372 | ||
373 | /* | |
374 | * The pageout daemon is never done, so loop | |
375 | * forever. | |
376 | */ | |
377 | ||
378 | simple_lock(&vm_pages_needed_lock); | |
379 | while (TRUE) { | |
380 | thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock, | |
381 | FALSE); | |
382 | vm_pageout_scan(); | |
383 | vm_pager_sync(); | |
384 | simple_lock(&vm_pages_needed_lock); | |
385 | thread_wakeup((int) &vm_page_free_count); | |
386 | } | |
387 | } |