hack chgkprot to attempt to allow writing ktext without forcing
[unix-history] / usr / src / sys / vm / vm_pageout.c
CommitLineData
175f072e 1/*
175f072e
KM
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
0e24ad83 8 * %sccs.include.redist.c%
175f072e 9 *
8faf81db 10 * @(#)vm_pageout.c 7.10 (Berkeley) %G%
0e24ad83
KM
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
175f072e
KM
37 */
38
39/*
40 * The proverbial page-out daemon.
41 */
42
1692aa9f 43#include <sys/param.h>
ffe0d082 44
1692aa9f
KB
45#include <vm/vm.h>
46#include <vm/vm_page.h>
47#include <vm/vm_pageout.h>
175f072e 48
ad005344 49int vm_pages_needed; /* Event on which pageout daemon sleeps */
175f072e
KM
50
51int vm_page_free_min_sanity = 40;
52
53/*
54 * vm_pageout_scan does the dirty work for the pageout daemon.
55 */
1692aa9f 56void
175f072e
KM
57vm_pageout_scan()
58{
59 register vm_page_t m;
60 register int page_shortage;
61 register int s;
62 register int pages_freed;
63 int free;
64
65 /*
66 * Only continue when we want more pages to be "free"
67 */
68
69 s = splimp();
70 simple_lock(&vm_page_queue_free_lock);
ebb526a2 71 free = cnt.v_free_count;
175f072e
KM
72 simple_unlock(&vm_page_queue_free_lock);
73 splx(s);
74
ebb526a2 75 if (free < cnt.v_free_target) {
175f072e
KM
76 swapout_threads();
77
78 /*
79 * Be sure the pmap system is updated so
80 * we can scan the inactive queue.
81 */
82
83 pmap_update();
84 }
85
86 /*
87 * Acquire the resident page system lock,
88 * as we may be changing what's resident quite a bit.
89 */
90 vm_page_lock_queues();
91
92 /*
93 * Start scanning the inactive queue for pages we can free.
94 * We keep scanning until we have enough free pages or
95 * we have scanned through the entire queue. If we
96 * encounter dirty pages, we start cleaning them.
97 */
98
99 pages_freed = 0;
100 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
101 while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
8faf81db
MH
102 vm_page_t next;
103 vm_object_t object;
104 vm_pager_t pager;
105 int pageout_status;
175f072e
KM
106
107 s = splimp();
108 simple_lock(&vm_page_queue_free_lock);
ebb526a2 109 free = cnt.v_free_count;
175f072e
KM
110 simple_unlock(&vm_page_queue_free_lock);
111 splx(s);
112
ebb526a2 113 if (free >= cnt.v_free_target)
175f072e
KM
114 break;
115
8faf81db
MH
116 /*
117 * If the page has been referenced, move it back to the
118 * active queue.
119 */
120 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
121 next = (vm_page_t) queue_next(&m->pageq);
122 vm_page_activate(m);
123 cnt.v_reactivated++;
124 m = next;
125 continue;
126 }
127
128 /*
129 * If the page is clean, free it up.
130 */
2cbf9af3 131 if (m->flags & PG_CLEAN) {
175f072e 132 next = (vm_page_t) queue_next(&m->pageq);
8faf81db
MH
133 object = m->object;
134 if (vm_object_lock_try(object)) {
c6fb7e1b
MH
135 pmap_page_protect(VM_PAGE_TO_PHYS(m),
136 VM_PROT_NONE);
8faf81db 137 vm_page_free(m);
175f072e
KM
138 pages_freed++;
139 vm_object_unlock(object);
140 }
141 m = next;
8faf81db 142 continue;
175f072e 143 }
175f072e 144
8faf81db
MH
145 /*
146 * If the page is dirty but already being washed, skip it.
147 */
148 if ((m->flags & PG_LAUNDRY) == 0) {
149 m = (vm_page_t) queue_next(&m->pageq);
150 continue;
151 }
175f072e 152
8faf81db
MH
153 /*
154 * Otherwise the page is dirty and still in the laundry,
155 * so we start the cleaning operation and remove it from
156 * the laundry.
157 *
158 * We set the busy bit to cause potential page faults on
159 * this page to block.
160 *
161 * We also set pageout-in-progress to keep the object from
162 * disappearing during pageout. This guarantees that the
163 * page won't move from the inactive queue. (However, any
164 * other page on the inactive queue may move!)
165 */
166 object = m->object;
167 if (!vm_object_lock_try(object)) {
168 m = (vm_page_t) queue_next(&m->pageq);
169 continue;
170 }
171 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
172 m->flags |= PG_BUSY;
173 cnt.v_pageouts++;
175f072e 174
8faf81db
MH
175 /*
176 * Try to collapse the object before making a pager for it.
177 * We must unlock the page queues first.
178 */
179 vm_page_unlock_queues();
180 vm_object_collapse(object);
175f072e 181
8faf81db
MH
182 object->paging_in_progress++;
183 vm_object_unlock(object);
175f072e 184
8faf81db
MH
185 /*
186 * Do a wakeup here in case the following operations block.
187 */
188 thread_wakeup((int) &cnt.v_free_count);
175f072e 189
8faf81db
MH
190 /*
191 * If there is no pager for the page, use the default pager.
192 * If there is no place to put the page at the moment,
193 * leave it in the laundry and hope that there will be
194 * paging space later.
195 */
196 if ((pager = object->pager) == NULL) {
197 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
198 object->size, VM_PROT_ALL);
199 if (pager != NULL)
200 vm_object_setpager(object, pager, 0, FALSE);
201 }
202 pageout_status = pager ?
203 vm_pager_put(pager, m, FALSE) : VM_PAGER_FAIL;
204 vm_object_lock(object);
205 vm_page_lock_queues();
206 next = (vm_page_t) queue_next(&m->pageq);
207
208 switch (pageout_status) {
209 case VM_PAGER_OK:
210 case VM_PAGER_PEND:
211 m->flags &= ~PG_LAUNDRY;
212 break;
213 case VM_PAGER_BAD:
214 /*
215 * Page outside of range of object. Right now we
216 * essentially lose the changes by pretending it
217 * worked.
218 *
219 * XXX dubious, what should we do?
220 */
221 m->flags &= ~PG_LAUNDRY;
222 m->flags |= PG_CLEAN;
223 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
224 break;
225 case VM_PAGER_FAIL:
226 case VM_PAGER_ERROR:
227 /*
228 * If page couldn't be paged out, then reactivate
229 * the page so it doesn't clog the inactive list.
230 * (We will try paging out it again later).
231 */
232 vm_page_activate(m);
233 break;
234 }
235
236 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
237
238 /*
239 * If the operation is still going, leave the page busy
240 * to block all other accesses. Also, leave the paging
241 * in progress indicator set so that we don't attempt an
242 * object collapse.
243 */
244 if (pageout_status != VM_PAGER_PEND) {
245 m->flags &= ~PG_BUSY;
246 PAGE_WAKEUP(m);
247 object->paging_in_progress--;
175f072e 248 }
8faf81db
MH
249 thread_wakeup((int) object);
250 vm_object_unlock(object);
251 m = next;
175f072e
KM
252 }
253
254 /*
255 * Compute the page shortage. If we are still very low on memory
256 * be sure that we will move a minimal amount of pages from active
257 * to inactive.
258 */
259
ebb526a2 260 page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
8faf81db 261 if (page_shortage <= 0 && pages_freed == 0)
175f072e
KM
262 page_shortage = 1;
263
264 while (page_shortage > 0) {
265 /*
266 * Move some more pages from active to inactive.
267 */
268
269 if (queue_empty(&vm_page_queue_active)) {
270 break;
271 }
272 m = (vm_page_t) queue_first(&vm_page_queue_active);
273 vm_page_deactivate(m);
274 page_shortage--;
275 }
276
277 vm_page_unlock_queues();
278}
279
280/*
281 * vm_pageout is the high level pageout daemon.
282 */
283
284void vm_pageout()
285{
286 (void) spl0();
287
288 /*
289 * Initialize some paging parameters.
290 */
291
ebb526a2
KM
292 if (cnt.v_free_min == 0) {
293 cnt.v_free_min = cnt.v_free_count / 20;
294 if (cnt.v_free_min < 3)
295 cnt.v_free_min = 3;
175f072e 296
ebb526a2
KM
297 if (cnt.v_free_min > vm_page_free_min_sanity)
298 cnt.v_free_min = vm_page_free_min_sanity;
175f072e
KM
299 }
300
ebb526a2
KM
301 if (cnt.v_free_target == 0)
302 cnt.v_free_target = (cnt.v_free_min * 4) / 3;
175f072e 303
ebb526a2
KM
304 if (cnt.v_free_target <= cnt.v_free_min)
305 cnt.v_free_target = cnt.v_free_min + 1;
175f072e 306
175f072e
KM
307 /*
308 * The pageout daemon is never done, so loop
309 * forever.
310 */
311
312 simple_lock(&vm_pages_needed_lock);
313 while (TRUE) {
314 thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
315 FALSE);
8faf81db
MH
316 /*
317 * Compute the inactive target for this scan.
318 * We need to keep a reasonable amount of memory in the
319 * inactive list to better simulate LRU behavior.
320 */
321 cnt.v_inactive_target =
322 (cnt.v_active_count + cnt.v_inactive_count) / 3;
323 if (cnt.v_inactive_target <= cnt.v_free_target)
324 cnt.v_inactive_target = cnt.v_free_target + 1;
325
175f072e
KM
326 vm_pageout_scan();
327 vm_pager_sync();
328 simple_lock(&vm_pages_needed_lock);
ebb526a2 329 thread_wakeup((int) &cnt.v_free_count);
175f072e
KM
330 }
331}