initialize swpa buffers to use proc0 credential (from Leres)
[unix-history] / usr / src / sys / vm / vm_pageout.c
CommitLineData
175f072e 1/*
175f072e
KM
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
0e24ad83 8 * %sccs.include.redist.c%
175f072e 9 *
1692aa9f 10 * @(#)vm_pageout.c 7.7 (Berkeley) %G%
0e24ad83
KM
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
175f072e
KM
37 */
38
39/*
40 * The proverbial page-out daemon.
41 */
42
1692aa9f 43#include <sys/param.h>
ffe0d082 44
1692aa9f
KB
45#include <vm/vm.h>
46#include <vm/vm_page.h>
47#include <vm/vm_pageout.h>
175f072e 48
ad005344 49int vm_pages_needed; /* Event on which pageout daemon sleeps */
175f072e
KM
50
51int vm_page_free_min_sanity = 40;
52
53/*
54 * vm_pageout_scan does the dirty work for the pageout daemon.
55 */
1692aa9f 56void
175f072e
KM
57vm_pageout_scan()
58{
59 register vm_page_t m;
60 register int page_shortage;
61 register int s;
62 register int pages_freed;
63 int free;
64
65 /*
66 * Only continue when we want more pages to be "free"
67 */
68
69 s = splimp();
70 simple_lock(&vm_page_queue_free_lock);
ebb526a2 71 free = cnt.v_free_count;
175f072e
KM
72 simple_unlock(&vm_page_queue_free_lock);
73 splx(s);
74
ebb526a2 75 if (free < cnt.v_free_target) {
175f072e
KM
76 swapout_threads();
77
78 /*
79 * Be sure the pmap system is updated so
80 * we can scan the inactive queue.
81 */
82
83 pmap_update();
84 }
85
86 /*
87 * Acquire the resident page system lock,
88 * as we may be changing what's resident quite a bit.
89 */
90 vm_page_lock_queues();
91
92 /*
93 * Start scanning the inactive queue for pages we can free.
94 * We keep scanning until we have enough free pages or
95 * we have scanned through the entire queue. If we
96 * encounter dirty pages, we start cleaning them.
97 */
98
99 pages_freed = 0;
100 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
101 while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
102 vm_page_t next;
103
104 s = splimp();
105 simple_lock(&vm_page_queue_free_lock);
ebb526a2 106 free = cnt.v_free_count;
175f072e
KM
107 simple_unlock(&vm_page_queue_free_lock);
108 splx(s);
109
ebb526a2 110 if (free >= cnt.v_free_target)
175f072e
KM
111 break;
112
113 if (m->clean) {
114 next = (vm_page_t) queue_next(&m->pageq);
115 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
116 vm_page_activate(m);
ebb526a2 117 cnt.v_reactivated++;
175f072e
KM
118 }
119 else {
120 register vm_object_t object;
121 object = m->object;
122 if (!vm_object_lock_try(object)) {
123 /*
124 * Can't lock object -
125 * skip page.
126 */
127 m = next;
128 continue;
129 }
c6fb7e1b
MH
130 pmap_page_protect(VM_PAGE_TO_PHYS(m),
131 VM_PROT_NONE);
175f072e
KM
132 vm_page_free(m); /* will dequeue */
133 pages_freed++;
134 vm_object_unlock(object);
135 }
136 m = next;
137 }
138 else {
139 /*
140 * If a page is dirty, then it is either
141 * being washed (but not yet cleaned)
142 * or it is still in the laundry. If it is
143 * still in the laundry, then we start the
144 * cleaning operation.
145 */
146
147 if (m->laundry) {
148 /*
149 * Clean the page and remove it from the
150 * laundry.
151 *
152 * We set the busy bit to cause
153 * potential page faults on this page to
154 * block.
155 *
156 * And we set pageout-in-progress to keep
157 * the object from disappearing during
158 * pageout. This guarantees that the
159 * page won't move from the inactive
160 * queue. (However, any other page on
161 * the inactive queue may move!)
162 */
163
164 register vm_object_t object;
165 register vm_pager_t pager;
166 int pageout_status;
167
168 object = m->object;
169 if (!vm_object_lock_try(object)) {
170 /*
171 * Skip page if we can't lock
172 * its object
173 */
174 m = (vm_page_t) queue_next(&m->pageq);
175 continue;
176 }
177
c6fb7e1b
MH
178 pmap_page_protect(VM_PAGE_TO_PHYS(m),
179 VM_PROT_NONE);
175f072e 180 m->busy = TRUE;
ebb526a2 181 cnt.v_pageouts++;
175f072e
KM
182
183 /*
184 * Try to collapse the object before
185 * making a pager for it. We must
186 * unlock the page queues first.
187 */
188 vm_page_unlock_queues();
189
190 vm_object_collapse(object);
191
192 object->paging_in_progress++;
193 vm_object_unlock(object);
194
195 /*
196 * Do a wakeup here in case the following
197 * operations block.
198 */
ebb526a2 199 thread_wakeup((int) &cnt.v_free_count);
175f072e
KM
200
201 /*
202 * If there is no pager for the page,
203 * use the default pager. If there's
204 * no place to put the page at the
205 * moment, leave it in the laundry and
206 * hope that there will be paging space
207 * later.
208 */
209
ffe0d082 210 if ((pager = object->pager) == NULL) {
175f072e
KM
211 pager = vm_pager_allocate(PG_DFLT,
212 (caddr_t)0,
213 object->size,
214 VM_PROT_ALL);
ffe0d082 215 if (pager != NULL) {
175f072e
KM
216 vm_object_setpager(object,
217 pager, 0, FALSE);
218 }
219 }
220 pageout_status = pager ?
221 vm_pager_put(pager, m, FALSE) :
222 VM_PAGER_FAIL;
223 vm_object_lock(object);
224 vm_page_lock_queues();
225 next = (vm_page_t) queue_next(&m->pageq);
226
227 switch (pageout_status) {
228 case VM_PAGER_OK:
229 case VM_PAGER_PEND:
230 m->laundry = FALSE;
231 break;
232 case VM_PAGER_BAD:
233 /*
234 * Page outside of range of object.
235 * Right now we essentially lose the
236 * changes by pretending it worked.
237 * XXX dubious, what should we do?
238 */
239 m->laundry = FALSE;
240 m->clean = TRUE;
241 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
242 break;
243 case VM_PAGER_FAIL:
244 /*
245 * If page couldn't be paged out, then
246 * reactivate the page so it doesn't
247 * clog the inactive list. (We will
248 * try paging out it again later).
249 */
250 vm_page_activate(m);
251 break;
252 }
253
254 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
175f072e
KM
255
256 /*
c6fb7e1b
MH
257 * If the operation is still going, leave
258 * the page busy to block all other accesses.
259 * Also, leave the paging in progress
260 * indicator set so that we don't attempt an
261 * object collapse.
175f072e 262 */
c6fb7e1b
MH
263 if (pageout_status != VM_PAGER_PEND) {
264 m->busy = FALSE;
265 PAGE_WAKEUP(m);
175f072e 266 object->paging_in_progress--;
c6fb7e1b 267 }
175f072e
KM
268 thread_wakeup((int) object);
269 vm_object_unlock(object);
270 m = next;
271 }
272 else
273 m = (vm_page_t) queue_next(&m->pageq);
274 }
275 }
276
277 /*
278 * Compute the page shortage. If we are still very low on memory
279 * be sure that we will move a minimal amount of pages from active
280 * to inactive.
281 */
282
ebb526a2
KM
283 page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
284 page_shortage -= cnt.v_free_count;
175f072e
KM
285
286 if ((page_shortage <= 0) && (pages_freed == 0))
287 page_shortage = 1;
288
289 while (page_shortage > 0) {
290 /*
291 * Move some more pages from active to inactive.
292 */
293
294 if (queue_empty(&vm_page_queue_active)) {
295 break;
296 }
297 m = (vm_page_t) queue_first(&vm_page_queue_active);
298 vm_page_deactivate(m);
299 page_shortage--;
300 }
301
302 vm_page_unlock_queues();
303}
304
305/*
306 * vm_pageout is the high level pageout daemon.
307 */
308
309void vm_pageout()
310{
311 (void) spl0();
312
313 /*
314 * Initialize some paging parameters.
315 */
316
ebb526a2
KM
317 if (cnt.v_free_min == 0) {
318 cnt.v_free_min = cnt.v_free_count / 20;
319 if (cnt.v_free_min < 3)
320 cnt.v_free_min = 3;
175f072e 321
ebb526a2
KM
322 if (cnt.v_free_min > vm_page_free_min_sanity)
323 cnt.v_free_min = vm_page_free_min_sanity;
175f072e
KM
324 }
325
ebb526a2
KM
326 if (cnt.v_free_target == 0)
327 cnt.v_free_target = (cnt.v_free_min * 4) / 3;
175f072e 328
ebb526a2
KM
329 if (cnt.v_inactive_target == 0)
330 cnt.v_inactive_target = cnt.v_free_min * 2;
175f072e 331
ebb526a2
KM
332 if (cnt.v_free_target <= cnt.v_free_min)
333 cnt.v_free_target = cnt.v_free_min + 1;
175f072e 334
ebb526a2
KM
335 if (cnt.v_inactive_target <= cnt.v_free_target)
336 cnt.v_inactive_target = cnt.v_free_target + 1;
175f072e
KM
337
338 /*
339 * The pageout daemon is never done, so loop
340 * forever.
341 */
342
343 simple_lock(&vm_pages_needed_lock);
344 while (TRUE) {
345 thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
346 FALSE);
347 vm_pageout_scan();
348 vm_pager_sync();
349 simple_lock(&vm_pages_needed_lock);
ebb526a2 350 thread_wakeup((int) &cnt.v_free_count);
175f072e
KM
351 }
352}