NetBSD:
[unix-history] / sys / vm / vm_pageout.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
65 * -------------------- ----- ----------------------
66 * CURRENT PATCH LEVEL: 1 00137
67 * -------------------- ----- ----------------------
68 *
69 * 20 Aug 92 David Greenman Removed un-necessary call to
70 * swapout_thread
71 * 08 Aug 93 Paul Kranenburg Add counters for vmstat
72 */
73
74/*
75 * The proverbial page-out daemon.
76 */
77
78#include "param.h"
79
80#include "vm.h"
81#include "vm_page.h"
82#include "vm_pageout.h"
83#include "vmmeter.h"
84
85int vm_pages_needed; /* Event on which pageout daemon sleeps */
86int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */
87
88int vm_page_free_min_sanity = 40;
89
90int vm_page_pagesfreed; /* Pages freed by page daemon */
91
92/*
93 * vm_pageout_scan does the dirty work for the pageout daemon.
94 */
95vm_pageout_scan()
96{
97 register vm_page_t m;
98 register int page_shortage;
99 register int s;
100 register int pages_freed;
101 int free;
102
103 /*
104 * Only continue when we want more pages to be "free"
105 */
106
107 s = splimp();
108 simple_lock(&vm_page_queue_free_lock);
109 free = vm_page_free_count;
110 simple_unlock(&vm_page_queue_free_lock);
111 splx(s);
112
113 if (free < vm_page_free_target) {
114#ifdef OMIT
115 swapout_threads();
116#endif /* OMIT*/
117
118 /*
119 * Be sure the pmap system is updated so
120 * we can scan the inactive queue.
121 */
122
123 pmap_update();
124 }
125
126 /*
127 * Acquire the resident page system lock,
128 * as we may be changing what's resident quite a bit.
129 */
130 vm_page_lock_queues();
131
132 /*
133 * Start scanning the inactive queue for pages we can free.
134 * We keep scanning until we have enough free pages or
135 * we have scanned through the entire queue. If we
136 * encounter dirty pages, we start cleaning them.
137 */
138
139 pages_freed = 0;
140 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
141 while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
142 vm_page_t next;
143
144 s = splimp();
145 simple_lock(&vm_page_queue_free_lock);
146 free = vm_page_free_count;
147 simple_unlock(&vm_page_queue_free_lock);
148 splx(s);
149
150 if (free >= vm_page_free_target)
151 break;
152
153 if (m->clean) {
154 next = (vm_page_t) queue_next(&m->pageq);
155 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
156 vm_page_activate(m);
157 vm_stat.reactivations++;
158 }
159 else {
160 register vm_object_t object;
161 object = m->object;
162 if (!vm_object_lock_try(object)) {
163 /*
164 * Can't lock object -
165 * skip page.
166 */
167 m = next;
168 continue;
169 }
170 pmap_page_protect(VM_PAGE_TO_PHYS(m),
171 VM_PROT_NONE);
172 vm_page_free(m); /* will dequeue */
173 pages_freed++;
174 vm_object_unlock(object);
175 }
176 m = next;
177 }
178 else {
179 /*
180 * If a page is dirty, then it is either
181 * being washed (but not yet cleaned)
182 * or it is still in the laundry. If it is
183 * still in the laundry, then we start the
184 * cleaning operation.
185 */
186
187 if (m->laundry) {
188 /*
189 * Clean the page and remove it from the
190 * laundry.
191 *
192 * We set the busy bit to cause
193 * potential page faults on this page to
194 * block.
195 *
196 * And we set pageout-in-progress to keep
197 * the object from disappearing during
198 * pageout. This guarantees that the
199 * page won't move from the inactive
200 * queue. (However, any other page on
201 * the inactive queue may move!)
202 */
203
204 register vm_object_t object;
205 register vm_pager_t pager;
206 int pageout_status;
207
208 object = m->object;
209 if (!vm_object_lock_try(object)) {
210 /*
211 * Skip page if we can't lock
212 * its object
213 */
214 m = (vm_page_t) queue_next(&m->pageq);
215 continue;
216 }
217
218 pmap_page_protect(VM_PAGE_TO_PHYS(m),
219 VM_PROT_NONE);
220 m->busy = TRUE;
221 vm_stat.pageouts++;
222
223 /*
224 * Try to collapse the object before
225 * making a pager for it. We must
226 * unlock the page queues first.
227 */
228 vm_page_unlock_queues();
229
230 vm_object_collapse(object);
231
232 object->paging_in_progress++;
233 vm_object_unlock(object);
234
235 /*
236 * Do a wakeup here in case the following
237 * operations block.
238 */
239 thread_wakeup((int) &vm_page_free_count);
240
241 /*
242 * If there is no pager for the page,
243 * use the default pager. If there's
244 * no place to put the page at the
245 * moment, leave it in the laundry and
246 * hope that there will be paging space
247 * later.
248 */
249
250 if ((pager = object->pager) == NULL) {
251 pager = vm_pager_allocate(PG_DFLT,
252 (caddr_t)0,
253 object->size,
254 VM_PROT_ALL);
255 if (pager != NULL) {
256 vm_object_setpager(object,
257 pager, 0, FALSE);
258 }
259 }
260 pageout_status = pager ?
261 vm_pager_put(pager, m, FALSE) :
262 VM_PAGER_FAIL;
263 vm_object_lock(object);
264 vm_page_lock_queues();
265 next = (vm_page_t) queue_next(&m->pageq);
266
267 switch (pageout_status) {
268 case VM_PAGER_OK:
269 case VM_PAGER_PEND:
270 m->laundry = FALSE;
271 break;
272 case VM_PAGER_BAD:
273 /*
274 * Page outside of range of object.
275 * Right now we essentially lose the
276 * changes by pretending it worked.
277 * XXX dubious, what should we do?
278 */
279 m->laundry = FALSE;
280 m->clean = TRUE;
281 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
282 break;
283 case VM_PAGER_FAIL:
284 /*
285 * If page couldn't be paged out, then
286 * reactivate the page so it doesn't
287 * clog the inactive list. (We will
288 * try paging out it again later).
289 */
290 vm_page_activate(m);
291 break;
292 }
293
294 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
295
296 /*
297 * If the operation is still going, leave
298 * the page busy to block all other accesses.
299 * Also, leave the paging in progress
300 * indicator set so that we don't attempt an
301 * object collapse.
302 */
303 if (pageout_status != VM_PAGER_PEND) {
304 m->busy = FALSE;
305 PAGE_WAKEUP(m);
306 object->paging_in_progress--;
307 }
308 thread_wakeup((int) object);
309 vm_object_unlock(object);
310 m = next;
311 }
312 else
313 m = (vm_page_t) queue_next(&m->pageq);
314 }
315 }
316
317 /*
318 * Compute the page shortage. If we are still very low on memory
319 * be sure that we will move a minimal amount of pages from active
320 * to inactive.
321 */
322
323 page_shortage = vm_page_inactive_target - vm_page_inactive_count;
324 page_shortage -= vm_page_free_count;
325
326 if ((page_shortage <= 0) && (pages_freed == 0))
327 page_shortage = 1;
328
329 while (page_shortage > 0) {
330 /*
331 * Move some more pages from active to inactive.
332 */
333
334 if (queue_empty(&vm_page_queue_active)) {
335 break;
336 }
337 m = (vm_page_t) queue_first(&vm_page_queue_active);
338 vm_page_deactivate(m);
339 page_shortage--;
340 }
341
342 vm_page_pagesfreed += pages_freed;
343 vm_page_unlock_queues();
344}
345
346/*
347 * vm_pageout is the high level pageout daemon.
348 */
349
350void vm_pageout()
351{
352 (void) spl0();
353
354 /*
355 * Initialize some paging parameters.
356 */
357
358 if (vm_page_free_min == 0) {
359 vm_page_free_min = vm_page_free_count / 20;
360 if (vm_page_free_min < 3)
361 vm_page_free_min = 3;
362
363 if (vm_page_free_min > vm_page_free_min_sanity)
364 vm_page_free_min = vm_page_free_min_sanity;
365 }
366
367 if (vm_page_free_reserved == 0) {
368 if ((vm_page_free_reserved = vm_page_free_min / 2) < 10)
369 vm_page_free_reserved = 10;
370 }
371 if (vm_pageout_free_min == 0) {
372 if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10)
373 vm_pageout_free_min = 10;
374 }
375
376 if (vm_page_free_target == 0)
377 vm_page_free_target = (vm_page_free_min * 4) / 3;
378
379 if (vm_page_inactive_target == 0)
380 vm_page_inactive_target = vm_page_free_min * 2;
381
382 if (vm_page_free_target <= vm_page_free_min)
383 vm_page_free_target = vm_page_free_min + 1;
384
385 if (vm_page_inactive_target <= vm_page_free_target)
386 vm_page_inactive_target = vm_page_free_target + 1;
387
388 /*
389 * The pageout daemon is never done, so loop
390 * forever.
391 */
392
393 simple_lock(&vm_pages_needed_lock);
394 while (TRUE) {
395 thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
396 FALSE);
397 cnt.v_scan++;
398 vm_pageout_scan();
399 vm_pager_sync();
400 simple_lock(&vm_pages_needed_lock);
401 thread_wakeup((int) &vm_page_free_count);
402 }
403}