Commit | Line | Data |
---|---|---|
175f072e | 1 | /* |
175f072e KM |
2 | * Copyright (c) 1991 Regents of the University of California. |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
0e24ad83 | 8 | * %sccs.include.redist.c% |
175f072e | 9 | * |
ebb526a2 | 10 | * @(#)vm_pageout.c 7.6 (Berkeley) %G% |
0e24ad83 KM |
11 | * |
12 | * | |
13 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
14 | * All rights reserved. | |
15 | * | |
16 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
17 | * | |
18 | * Permission to use, copy, modify and distribute this software and | |
19 | * its documentation is hereby granted, provided that both the copyright | |
20 | * notice and this permission notice appear in all copies of the | |
21 | * software, derivative works or modified versions, and any portions | |
22 | * thereof, and that both notices appear in supporting documentation. | |
23 | * | |
24 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
25 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
26 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
27 | * | |
28 | * Carnegie Mellon requests users of this software to return to | |
29 | * | |
30 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
31 | * School of Computer Science | |
32 | * Carnegie Mellon University | |
33 | * Pittsburgh PA 15213-3890 | |
34 | * | |
35 | * any improvements or extensions that they make and grant Carnegie the | |
36 | * rights to redistribute these changes. | |
175f072e KM |
37 | */ |
38 | ||
39 | /* | |
40 | * The proverbial page-out daemon. | |
41 | */ | |
42 | ||
ffe0d082 MK |
43 | #include "param.h" |
44 | ||
45 | #include "vm.h" | |
46 | #include "vm_page.h" | |
47 | #include "vm_pageout.h" | |
175f072e | 48 | |
ad005344 | 49 | int vm_pages_needed; /* Event on which pageout daemon sleeps */ |
175f072e KM |
50 | |
51 | int vm_page_free_min_sanity = 40; | |
52 | ||
53 | /* | |
54 | * vm_pageout_scan does the dirty work for the pageout daemon. | |
55 | */ | |
56 | vm_pageout_scan() | |
57 | { | |
58 | register vm_page_t m; | |
59 | register int page_shortage; | |
60 | register int s; | |
61 | register int pages_freed; | |
62 | int free; | |
63 | ||
64 | /* | |
65 | * Only continue when we want more pages to be "free" | |
66 | */ | |
67 | ||
68 | s = splimp(); | |
69 | simple_lock(&vm_page_queue_free_lock); | |
ebb526a2 | 70 | free = cnt.v_free_count; |
175f072e KM |
71 | simple_unlock(&vm_page_queue_free_lock); |
72 | splx(s); | |
73 | ||
ebb526a2 | 74 | if (free < cnt.v_free_target) { |
175f072e KM |
75 | swapout_threads(); |
76 | ||
77 | /* | |
78 | * Be sure the pmap system is updated so | |
79 | * we can scan the inactive queue. | |
80 | */ | |
81 | ||
82 | pmap_update(); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Acquire the resident page system lock, | |
87 | * as we may be changing what's resident quite a bit. | |
88 | */ | |
89 | vm_page_lock_queues(); | |
90 | ||
91 | /* | |
92 | * Start scanning the inactive queue for pages we can free. | |
93 | * We keep scanning until we have enough free pages or | |
94 | * we have scanned through the entire queue. If we | |
95 | * encounter dirty pages, we start cleaning them. | |
96 | */ | |
97 | ||
98 | pages_freed = 0; | |
99 | m = (vm_page_t) queue_first(&vm_page_queue_inactive); | |
100 | while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) { | |
101 | vm_page_t next; | |
102 | ||
103 | s = splimp(); | |
104 | simple_lock(&vm_page_queue_free_lock); | |
ebb526a2 | 105 | free = cnt.v_free_count; |
175f072e KM |
106 | simple_unlock(&vm_page_queue_free_lock); |
107 | splx(s); | |
108 | ||
ebb526a2 | 109 | if (free >= cnt.v_free_target) |
175f072e KM |
110 | break; |
111 | ||
112 | if (m->clean) { | |
113 | next = (vm_page_t) queue_next(&m->pageq); | |
114 | if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { | |
115 | vm_page_activate(m); | |
ebb526a2 | 116 | cnt.v_reactivated++; |
175f072e KM |
117 | } |
118 | else { | |
119 | register vm_object_t object; | |
120 | object = m->object; | |
121 | if (!vm_object_lock_try(object)) { | |
122 | /* | |
123 | * Can't lock object - | |
124 | * skip page. | |
125 | */ | |
126 | m = next; | |
127 | continue; | |
128 | } | |
c6fb7e1b MH |
129 | pmap_page_protect(VM_PAGE_TO_PHYS(m), |
130 | VM_PROT_NONE); | |
175f072e KM |
131 | vm_page_free(m); /* will dequeue */ |
132 | pages_freed++; | |
133 | vm_object_unlock(object); | |
134 | } | |
135 | m = next; | |
136 | } | |
137 | else { | |
138 | /* | |
139 | * If a page is dirty, then it is either | |
140 | * being washed (but not yet cleaned) | |
141 | * or it is still in the laundry. If it is | |
142 | * still in the laundry, then we start the | |
143 | * cleaning operation. | |
144 | */ | |
145 | ||
146 | if (m->laundry) { | |
147 | /* | |
148 | * Clean the page and remove it from the | |
149 | * laundry. | |
150 | * | |
151 | * We set the busy bit to cause | |
152 | * potential page faults on this page to | |
153 | * block. | |
154 | * | |
155 | * And we set pageout-in-progress to keep | |
156 | * the object from disappearing during | |
157 | * pageout. This guarantees that the | |
158 | * page won't move from the inactive | |
159 | * queue. (However, any other page on | |
160 | * the inactive queue may move!) | |
161 | */ | |
162 | ||
163 | register vm_object_t object; | |
164 | register vm_pager_t pager; | |
165 | int pageout_status; | |
166 | ||
167 | object = m->object; | |
168 | if (!vm_object_lock_try(object)) { | |
169 | /* | |
170 | * Skip page if we can't lock | |
171 | * its object | |
172 | */ | |
173 | m = (vm_page_t) queue_next(&m->pageq); | |
174 | continue; | |
175 | } | |
176 | ||
c6fb7e1b MH |
177 | pmap_page_protect(VM_PAGE_TO_PHYS(m), |
178 | VM_PROT_NONE); | |
175f072e | 179 | m->busy = TRUE; |
ebb526a2 | 180 | cnt.v_pageouts++; |
175f072e KM |
181 | |
182 | /* | |
183 | * Try to collapse the object before | |
184 | * making a pager for it. We must | |
185 | * unlock the page queues first. | |
186 | */ | |
187 | vm_page_unlock_queues(); | |
188 | ||
189 | vm_object_collapse(object); | |
190 | ||
191 | object->paging_in_progress++; | |
192 | vm_object_unlock(object); | |
193 | ||
194 | /* | |
195 | * Do a wakeup here in case the following | |
196 | * operations block. | |
197 | */ | |
ebb526a2 | 198 | thread_wakeup((int) &cnt.v_free_count); |
175f072e KM |
199 | |
200 | /* | |
201 | * If there is no pager for the page, | |
202 | * use the default pager. If there's | |
203 | * no place to put the page at the | |
204 | * moment, leave it in the laundry and | |
205 | * hope that there will be paging space | |
206 | * later. | |
207 | */ | |
208 | ||
ffe0d082 | 209 | if ((pager = object->pager) == NULL) { |
175f072e KM |
210 | pager = vm_pager_allocate(PG_DFLT, |
211 | (caddr_t)0, | |
212 | object->size, | |
213 | VM_PROT_ALL); | |
ffe0d082 | 214 | if (pager != NULL) { |
175f072e KM |
215 | vm_object_setpager(object, |
216 | pager, 0, FALSE); | |
217 | } | |
218 | } | |
219 | pageout_status = pager ? | |
220 | vm_pager_put(pager, m, FALSE) : | |
221 | VM_PAGER_FAIL; | |
222 | vm_object_lock(object); | |
223 | vm_page_lock_queues(); | |
224 | next = (vm_page_t) queue_next(&m->pageq); | |
225 | ||
226 | switch (pageout_status) { | |
227 | case VM_PAGER_OK: | |
228 | case VM_PAGER_PEND: | |
229 | m->laundry = FALSE; | |
230 | break; | |
231 | case VM_PAGER_BAD: | |
232 | /* | |
233 | * Page outside of range of object. | |
234 | * Right now we essentially lose the | |
235 | * changes by pretending it worked. | |
236 | * XXX dubious, what should we do? | |
237 | */ | |
238 | m->laundry = FALSE; | |
239 | m->clean = TRUE; | |
240 | pmap_clear_modify(VM_PAGE_TO_PHYS(m)); | |
241 | break; | |
242 | case VM_PAGER_FAIL: | |
243 | /* | |
244 | * If page couldn't be paged out, then | |
245 | * reactivate the page so it doesn't | |
246 | * clog the inactive list. (We will | |
247 | * try paging out it again later). | |
248 | */ | |
249 | vm_page_activate(m); | |
250 | break; | |
251 | } | |
252 | ||
253 | pmap_clear_reference(VM_PAGE_TO_PHYS(m)); | |
175f072e KM |
254 | |
255 | /* | |
c6fb7e1b MH |
256 | * If the operation is still going, leave |
257 | * the page busy to block all other accesses. | |
258 | * Also, leave the paging in progress | |
259 | * indicator set so that we don't attempt an | |
260 | * object collapse. | |
175f072e | 261 | */ |
c6fb7e1b MH |
262 | if (pageout_status != VM_PAGER_PEND) { |
263 | m->busy = FALSE; | |
264 | PAGE_WAKEUP(m); | |
175f072e | 265 | object->paging_in_progress--; |
c6fb7e1b | 266 | } |
175f072e KM |
267 | thread_wakeup((int) object); |
268 | vm_object_unlock(object); | |
269 | m = next; | |
270 | } | |
271 | else | |
272 | m = (vm_page_t) queue_next(&m->pageq); | |
273 | } | |
274 | } | |
275 | ||
276 | /* | |
277 | * Compute the page shortage. If we are still very low on memory | |
278 | * be sure that we will move a minimal amount of pages from active | |
279 | * to inactive. | |
280 | */ | |
281 | ||
ebb526a2 KM |
282 | page_shortage = cnt.v_inactive_target - cnt.v_inactive_count; |
283 | page_shortage -= cnt.v_free_count; | |
175f072e KM |
284 | |
285 | if ((page_shortage <= 0) && (pages_freed == 0)) | |
286 | page_shortage = 1; | |
287 | ||
288 | while (page_shortage > 0) { | |
289 | /* | |
290 | * Move some more pages from active to inactive. | |
291 | */ | |
292 | ||
293 | if (queue_empty(&vm_page_queue_active)) { | |
294 | break; | |
295 | } | |
296 | m = (vm_page_t) queue_first(&vm_page_queue_active); | |
297 | vm_page_deactivate(m); | |
298 | page_shortage--; | |
299 | } | |
300 | ||
301 | vm_page_unlock_queues(); | |
302 | } | |
303 | ||
304 | /* | |
305 | * vm_pageout is the high level pageout daemon. | |
306 | */ | |
307 | ||
308 | void vm_pageout() | |
309 | { | |
310 | (void) spl0(); | |
311 | ||
312 | /* | |
313 | * Initialize some paging parameters. | |
314 | */ | |
315 | ||
ebb526a2 KM |
316 | if (cnt.v_free_min == 0) { |
317 | cnt.v_free_min = cnt.v_free_count / 20; | |
318 | if (cnt.v_free_min < 3) | |
319 | cnt.v_free_min = 3; | |
175f072e | 320 | |
ebb526a2 KM |
321 | if (cnt.v_free_min > vm_page_free_min_sanity) |
322 | cnt.v_free_min = vm_page_free_min_sanity; | |
175f072e KM |
323 | } |
324 | ||
ebb526a2 KM |
325 | if (cnt.v_free_target == 0) |
326 | cnt.v_free_target = (cnt.v_free_min * 4) / 3; | |
175f072e | 327 | |
ebb526a2 KM |
328 | if (cnt.v_inactive_target == 0) |
329 | cnt.v_inactive_target = cnt.v_free_min * 2; | |
175f072e | 330 | |
ebb526a2 KM |
331 | if (cnt.v_free_target <= cnt.v_free_min) |
332 | cnt.v_free_target = cnt.v_free_min + 1; | |
175f072e | 333 | |
ebb526a2 KM |
334 | if (cnt.v_inactive_target <= cnt.v_free_target) |
335 | cnt.v_inactive_target = cnt.v_free_target + 1; | |
175f072e KM |
336 | |
337 | /* | |
338 | * The pageout daemon is never done, so loop | |
339 | * forever. | |
340 | */ | |
341 | ||
342 | simple_lock(&vm_pages_needed_lock); | |
343 | while (TRUE) { | |
344 | thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock, | |
345 | FALSE); | |
346 | vm_pageout_scan(); | |
347 | vm_pager_sync(); | |
348 | simple_lock(&vm_pages_needed_lock); | |
ebb526a2 | 349 | thread_wakeup((int) &cnt.v_free_count); |
175f072e KM |
350 | } |
351 | } |