Commit a whole cluster of last minute critical (and one cosmetic) fixes
[unix-history] / sys / vm / vm_pager.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_pager.c 7.4 (Berkeley) 5/7/91
22c84afa 37 * $Id: vm_pager.c,v 1.10 1994/01/31 04:21:43 davidg Exp $
1284e777
RG
38 */
39
40/*
15637ed4
RG
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
15637ed4
RG
65 */
66
67/*
68 * Paging space routine stubs. Emulates a matchmaker-like interface
69 * for builtin pagers.
70 */
71
72#include "param.h"
fde1aeb2 73#include "systm.h"
15637ed4
RG
74#include "malloc.h"
75
76#include "vm.h"
77#include "vm_page.h"
78#include "vm_kern.h"
79
15637ed4 80extern struct pagerops swappagerops;
15637ed4 81extern struct pagerops vnodepagerops;
15637ed4 82extern struct pagerops devicepagerops;
15637ed4
RG
83
84struct pagerops *pagertab[] = {
85 &swappagerops, /* PG_SWAP */
86 &vnodepagerops, /* PG_VNODE */
87 &devicepagerops, /* PG_DEV */
88};
89int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
90
91struct pagerops *dfltpagerops = NULL; /* default pager */
92
93/*
94 * Kernel address space for mapping pages.
95 * Used by pagers where KVAs are needed for IO.
96 */
55768178 97#define PAGER_MAP_SIZE (1024 * PAGE_SIZE)
15637ed4
RG
98vm_map_t pager_map;
99vm_offset_t pager_sva, pager_eva;
100
101void
102vm_pager_init()
103{
104 struct pagerops **pgops;
105
106 /*
107 * Allocate a kernel submap for tracking get/put page mappings
108 */
109 pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
110 PAGER_MAP_SIZE, FALSE);
111 /*
112 * Initialize known pagers
9cd75be8 113 * If pgops is a null pointer skip over it.
15637ed4
RG
114 */
115 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
9cd75be8 116 if (*pgops) (*(*pgops)->pgo_init)();
15637ed4
RG
117 if (dfltpagerops == NULL)
118 panic("no default pager");
119}
120
121/*
122 * Allocate an instance of a pager of the given type.
123 */
124vm_pager_t
0a8fbd8d 125vm_pager_allocate(type, handle, size, prot, off)
15637ed4
RG
126 int type;
127 caddr_t handle;
128 vm_size_t size;
129 vm_prot_t prot;
a200ca2b 130 vm_offset_t off;
15637ed4
RG
131{
132 vm_pager_t pager;
133 struct pagerops *ops;
134
135 ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
0a8fbd8d 136 return((*ops->pgo_alloc)(handle, size, prot, off));
15637ed4
RG
137}
138
139void
140vm_pager_deallocate(pager)
141 vm_pager_t pager;
142{
143 if (pager == NULL)
144 panic("vm_pager_deallocate: null pager");
145
146 VM_PAGER_DEALLOC(pager);
147}
148
55768178
DG
149int
150vm_pager_getmulti(pager, m, count, reqpage, sync)
151 vm_pager_t pager;
152 vm_page_t m;
153 int count;
154 int reqpage;
155 boolean_t sync;
156{
157 extern boolean_t vm_page_zero_fill();
158 extern int vm_pageout_count;
159 int i;
160
161 if (pager == NULL) {
162 for (i=0;i<count;i++)
163 vm_page_zero_fill(m+i);
164 return VM_PAGER_OK;
165 }
166 return(VM_PAGER_GET_MULTI(pager, m, count, reqpage, sync));
167}
168
22c84afa
DG
169int
170vm_pager_putmulti(pager, m, count, sync, rtvals)
171 vm_pager_t pager;
172 vm_page_t *m;
173 int count;
174 boolean_t sync;
175 int *rtvals;
176{
177 int i;
178
179 if( pager->pg_ops->pgo_putmulti)
180 return(VM_PAGER_PUT_MULTI(pager, m, count, sync, rtvals));
181 else {
182 for(i=0;i<count;i++) {
183 rtvals[i] = VM_PAGER_PUT( pager, m[i], sync);
184 }
185 return 1;
186 }
187}
188
4c45483e 189int
15637ed4
RG
190vm_pager_get(pager, m, sync)
191 vm_pager_t pager;
192 vm_page_t m;
193 boolean_t sync;
194{
195 extern boolean_t vm_page_zero_fill();
196
197 if (pager == NULL)
198 return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
199 return(VM_PAGER_GET(pager, m, sync));
200}
201
4c45483e 202int
15637ed4
RG
203vm_pager_put(pager, m, sync)
204 vm_pager_t pager;
205 vm_page_t m;
206 boolean_t sync;
207{
208 if (pager == NULL)
209 panic("vm_pager_put: null pager");
210 return(VM_PAGER_PUT(pager, m, sync));
211}
212
213boolean_t
214vm_pager_has_page(pager, offset)
215 vm_pager_t pager;
216 vm_offset_t offset;
217{
218 if (pager == NULL)
219 panic("vm_pager_has_page");
220 return(VM_PAGER_HASPAGE(pager, offset));
221}
222
223/*
224 * Called by pageout daemon before going back to sleep.
225 * Gives pagers a chance to clean up any completed async pageing operations.
226 */
227void
228vm_pager_sync()
229{
230 struct pagerops **pgops;
231
232 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
233 (*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
234}
235
236vm_offset_t
237vm_pager_map_page(m)
238 vm_page_t m;
239{
240 vm_offset_t kva;
241
242#ifdef DEBUG
fd76afd7 243 if (!(m->flags & PG_BUSY) || (m->flags & PG_ACTIVE))
15637ed4 244 panic("vm_pager_map_page: page active or not busy");
fd76afd7 245 if (m->flags & PG_PAGEROWNED)
15637ed4
RG
246 printf("vm_pager_map_page: page %x already in pager\n", m);
247#endif
248 kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
249#ifdef DEBUG
fd76afd7 250 m->flags |= PG_PAGEROWNED;
15637ed4
RG
251#endif
252 pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
253 VM_PROT_DEFAULT, TRUE);
254 return(kva);
255}
256
257void
258vm_pager_unmap_page(kva)
259 vm_offset_t kva;
260{
261#ifdef DEBUG
262 vm_page_t m;
263
264 m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva));
265#endif
266 kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
267#ifdef DEBUG
fd76afd7
DG
268 if (m->flags & PG_PAGEROWNED)
269 m->flags &= ~PG_PAGEROWNED;
15637ed4
RG
270 else
271 printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n",
272 m, kva, VM_PAGE_TO_PHYS(m));
273#endif
274}
275
276vm_pager_t
277vm_pager_lookup(list, handle)
278 register queue_head_t *list;
279 caddr_t handle;
280{
281 register vm_pager_t pager;
282
283 pager = (vm_pager_t) queue_first(list);
284 while (!queue_end(list, (queue_entry_t)pager)) {
285 if (pager->pg_handle == handle)
286 return(pager);
287 pager = (vm_pager_t) queue_next(&pager->pg_list);
288 }
289 return(NULL);
290}
291
292/*
293 * This routine gains a reference to the object.
294 * Explicit deallocation is necessary.
295 */
4c45483e 296int
15637ed4
RG
297pager_cache(object, should_cache)
298 vm_object_t object;
299 boolean_t should_cache;
300{
301 if (object == NULL)
302 return(KERN_INVALID_ARGUMENT);
303
304 vm_object_cache_lock();
305 vm_object_lock(object);
306 object->can_persist = should_cache;
307 vm_object_unlock(object);
308 vm_object_cache_unlock();
309
310 vm_object_deallocate(object);
311
312 return(KERN_SUCCESS);
313}