indir => syscall; __indir => __syscall
[unix-history] / usr / src / sys / vm / vm_pageout.c
CommitLineData
175f072e 1/*
ad0f93d2
KB
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
175f072e
KM
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
0e24ad83 8 * %sccs.include.redist.c%
175f072e 9 *
ad0f93d2 10 * @(#)vm_pageout.c 8.1 (Berkeley) %G%
0e24ad83
KM
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
175f072e
KM
37 */
38
39/*
40 * The proverbial page-out daemon.
41 */
42
1692aa9f 43#include <sys/param.h>
ffe0d082 44
1692aa9f
KB
45#include <vm/vm.h>
46#include <vm/vm_page.h>
47#include <vm/vm_pageout.h>
175f072e 48
ad005344 49int vm_pages_needed; /* Event on which pageout daemon sleeps */
175f072e
KM
50
51int vm_page_free_min_sanity = 40;
52
886321a4
MH
53int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */
54
175f072e
KM
55/*
56 * vm_pageout_scan does the dirty work for the pageout daemon.
57 */
1692aa9f 58void
175f072e
KM
59vm_pageout_scan()
60{
61 register vm_page_t m;
62 register int page_shortage;
63 register int s;
64 register int pages_freed;
65 int free;
66
67 /*
68 * Only continue when we want more pages to be "free"
69 */
70
71 s = splimp();
72 simple_lock(&vm_page_queue_free_lock);
ebb526a2 73 free = cnt.v_free_count;
175f072e
KM
74 simple_unlock(&vm_page_queue_free_lock);
75 splx(s);
76
ebb526a2 77 if (free < cnt.v_free_target) {
175f072e
KM
78 swapout_threads();
79
80 /*
81 * Be sure the pmap system is updated so
82 * we can scan the inactive queue.
83 */
84
85 pmap_update();
86 }
87
88 /*
89 * Acquire the resident page system lock,
90 * as we may be changing what's resident quite a bit.
91 */
92 vm_page_lock_queues();
93
94 /*
95 * Start scanning the inactive queue for pages we can free.
96 * We keep scanning until we have enough free pages or
97 * we have scanned through the entire queue. If we
98 * encounter dirty pages, we start cleaning them.
99 */
100
101 pages_freed = 0;
102 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
103 while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
8faf81db
MH
104 vm_page_t next;
105 vm_object_t object;
106 vm_pager_t pager;
107 int pageout_status;
175f072e
KM
108
109 s = splimp();
110 simple_lock(&vm_page_queue_free_lock);
ebb526a2 111 free = cnt.v_free_count;
175f072e
KM
112 simple_unlock(&vm_page_queue_free_lock);
113 splx(s);
114
ebb526a2 115 if (free >= cnt.v_free_target)
175f072e
KM
116 break;
117
8faf81db
MH
118 /*
119 * If the page has been referenced, move it back to the
120 * active queue.
121 */
122 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
123 next = (vm_page_t) queue_next(&m->pageq);
124 vm_page_activate(m);
125 cnt.v_reactivated++;
126 m = next;
127 continue;
128 }
129
130 /*
131 * If the page is clean, free it up.
132 */
2cbf9af3 133 if (m->flags & PG_CLEAN) {
175f072e 134 next = (vm_page_t) queue_next(&m->pageq);
8faf81db
MH
135 object = m->object;
136 if (vm_object_lock_try(object)) {
c6fb7e1b
MH
137 pmap_page_protect(VM_PAGE_TO_PHYS(m),
138 VM_PROT_NONE);
8faf81db 139 vm_page_free(m);
175f072e
KM
140 pages_freed++;
141 vm_object_unlock(object);
142 }
143 m = next;
8faf81db 144 continue;
175f072e 145 }
175f072e 146
8faf81db
MH
147 /*
148 * If the page is dirty but already being washed, skip it.
149 */
150 if ((m->flags & PG_LAUNDRY) == 0) {
151 m = (vm_page_t) queue_next(&m->pageq);
152 continue;
153 }
175f072e 154
8faf81db
MH
155 /*
156 * Otherwise the page is dirty and still in the laundry,
157 * so we start the cleaning operation and remove it from
158 * the laundry.
159 *
160 * We set the busy bit to cause potential page faults on
161 * this page to block.
162 *
163 * We also set pageout-in-progress to keep the object from
164 * disappearing during pageout. This guarantees that the
165 * page won't move from the inactive queue. (However, any
166 * other page on the inactive queue may move!)
167 */
168 object = m->object;
169 if (!vm_object_lock_try(object)) {
170 m = (vm_page_t) queue_next(&m->pageq);
171 continue;
172 }
173 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
174 m->flags |= PG_BUSY;
175 cnt.v_pageouts++;
175f072e 176
8faf81db
MH
177 /*
178 * Try to collapse the object before making a pager for it.
179 * We must unlock the page queues first.
180 */
181 vm_page_unlock_queues();
182 vm_object_collapse(object);
175f072e 183
8faf81db
MH
184 object->paging_in_progress++;
185 vm_object_unlock(object);
175f072e 186
8faf81db
MH
187 /*
188 * Do a wakeup here in case the following operations block.
189 */
190 thread_wakeup((int) &cnt.v_free_count);
175f072e 191
8faf81db
MH
192 /*
193 * If there is no pager for the page, use the default pager.
194 * If there is no place to put the page at the moment,
195 * leave it in the laundry and hope that there will be
196 * paging space later.
197 */
198 if ((pager = object->pager) == NULL) {
199 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
200 object->size, VM_PROT_ALL);
201 if (pager != NULL)
202 vm_object_setpager(object, pager, 0, FALSE);
203 }
204 pageout_status = pager ?
205 vm_pager_put(pager, m, FALSE) : VM_PAGER_FAIL;
206 vm_object_lock(object);
207 vm_page_lock_queues();
208 next = (vm_page_t) queue_next(&m->pageq);
209
210 switch (pageout_status) {
211 case VM_PAGER_OK:
212 case VM_PAGER_PEND:
213 m->flags &= ~PG_LAUNDRY;
214 break;
215 case VM_PAGER_BAD:
216 /*
217 * Page outside of range of object. Right now we
218 * essentially lose the changes by pretending it
219 * worked.
220 *
221 * XXX dubious, what should we do?
222 */
223 m->flags &= ~PG_LAUNDRY;
224 m->flags |= PG_CLEAN;
225 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
226 break;
227 case VM_PAGER_FAIL:
228 case VM_PAGER_ERROR:
229 /*
230 * If page couldn't be paged out, then reactivate
231 * the page so it doesn't clog the inactive list.
232 * (We will try paging out it again later).
233 */
234 vm_page_activate(m);
235 break;
236 }
237
238 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
239
240 /*
241 * If the operation is still going, leave the page busy
242 * to block all other accesses. Also, leave the paging
243 * in progress indicator set so that we don't attempt an
244 * object collapse.
245 */
246 if (pageout_status != VM_PAGER_PEND) {
247 m->flags &= ~PG_BUSY;
248 PAGE_WAKEUP(m);
249 object->paging_in_progress--;
175f072e 250 }
8faf81db
MH
251 thread_wakeup((int) object);
252 vm_object_unlock(object);
253 m = next;
175f072e
KM
254 }
255
256 /*
257 * Compute the page shortage. If we are still very low on memory
258 * be sure that we will move a minimal amount of pages from active
259 * to inactive.
260 */
261
ebb526a2 262 page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
8faf81db 263 if (page_shortage <= 0 && pages_freed == 0)
175f072e
KM
264 page_shortage = 1;
265
266 while (page_shortage > 0) {
267 /*
268 * Move some more pages from active to inactive.
269 */
270
271 if (queue_empty(&vm_page_queue_active)) {
272 break;
273 }
274 m = (vm_page_t) queue_first(&vm_page_queue_active);
275 vm_page_deactivate(m);
276 page_shortage--;
277 }
278
279 vm_page_unlock_queues();
280}
281
282/*
283 * vm_pageout is the high level pageout daemon.
284 */
285
286void vm_pageout()
287{
288 (void) spl0();
289
290 /*
291 * Initialize some paging parameters.
292 */
293
ebb526a2
KM
294 if (cnt.v_free_min == 0) {
295 cnt.v_free_min = cnt.v_free_count / 20;
296 if (cnt.v_free_min < 3)
297 cnt.v_free_min = 3;
175f072e 298
ebb526a2
KM
299 if (cnt.v_free_min > vm_page_free_min_sanity)
300 cnt.v_free_min = vm_page_free_min_sanity;
175f072e
KM
301 }
302
ebb526a2
KM
303 if (cnt.v_free_target == 0)
304 cnt.v_free_target = (cnt.v_free_min * 4) / 3;
175f072e 305
ebb526a2
KM
306 if (cnt.v_free_target <= cnt.v_free_min)
307 cnt.v_free_target = cnt.v_free_min + 1;
175f072e 308
886321a4
MH
309 /* XXX does not really belong here */
310 if (vm_page_max_wired == 0)
311 vm_page_max_wired = cnt.v_free_count / 3;
312
175f072e
KM
313 /*
314 * The pageout daemon is never done, so loop
315 * forever.
316 */
317
318 simple_lock(&vm_pages_needed_lock);
319 while (TRUE) {
320 thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
321 FALSE);
8faf81db
MH
322 /*
323 * Compute the inactive target for this scan.
324 * We need to keep a reasonable amount of memory in the
325 * inactive list to better simulate LRU behavior.
326 */
327 cnt.v_inactive_target =
328 (cnt.v_active_count + cnt.v_inactive_count) / 3;
329 if (cnt.v_inactive_target <= cnt.v_free_target)
330 cnt.v_inactive_target = cnt.v_free_target + 1;
331
175f072e
KM
332 vm_pageout_scan();
333 vm_pager_sync();
334 simple_lock(&vm_pages_needed_lock);
ebb526a2 335 thread_wakeup((int) &cnt.v_free_count);
175f072e
KM
336 }
337}