386BSD 0.1 development
[unix-history] / usr / src / sys.386bsd / vm / vm_pager.c
CommitLineData
b688fc87
WJ
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_pager.c 7.4 (Berkeley) 5/7/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
65/*
66 * Paging space routine stubs. Emulates a matchmaker-like interface
67 * for builtin pagers.
68 */
69
70#include "param.h"
71#include "malloc.h"
72
73#include "vm.h"
74#include "vm_page.h"
75#include "vm_kern.h"
76
77#include "swappager.h"
78
79#if NSWAPPAGER > 0
80extern struct pagerops swappagerops;
81#else
82#define swappagerops NULL
83#endif
84#include "vnodepager.h"
85#if NVNODEPAGER > 0
86extern struct pagerops vnodepagerops;
87#else
88#define vnodepagerops NULL
89#endif
90#include "devpager.h"
91#if NDEVPAGER > 0
92extern struct pagerops devicepagerops;
93#else
94#define devicepagerops NULL
95#endif
96
97struct pagerops *pagertab[] = {
98 &swappagerops, /* PG_SWAP */
99 &vnodepagerops, /* PG_VNODE */
100 &devicepagerops, /* PG_DEV */
101};
102int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
103
104struct pagerops *dfltpagerops = NULL; /* default pager */
105
106/*
107 * Kernel address space for mapping pages.
108 * Used by pagers where KVAs are needed for IO.
109 */
110#define PAGER_MAP_SIZE (256 * PAGE_SIZE)
111vm_map_t pager_map;
112vm_offset_t pager_sva, pager_eva;
113
114void
115vm_pager_init()
116{
117 struct pagerops **pgops;
118
119 /*
120 * Allocate a kernel submap for tracking get/put page mappings
121 */
122 pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
123 PAGER_MAP_SIZE, FALSE);
124 /*
125 * Initialize known pagers
126 */
127 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
128 (*(*pgops)->pgo_init)();
129 if (dfltpagerops == NULL)
130 panic("no default pager");
131}
132
133/*
134 * Allocate an instance of a pager of the given type.
135 */
136vm_pager_t
137vm_pager_allocate(type, handle, size, prot)
138 int type;
139 caddr_t handle;
140 vm_size_t size;
141 vm_prot_t prot;
142{
143 vm_pager_t pager;
144 struct pagerops *ops;
145
146 ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
147 return((*ops->pgo_alloc)(handle, size, prot));
148}
149
150void
151vm_pager_deallocate(pager)
152 vm_pager_t pager;
153{
154 if (pager == NULL)
155 panic("vm_pager_deallocate: null pager");
156
157 VM_PAGER_DEALLOC(pager);
158}
159
160vm_pager_get(pager, m, sync)
161 vm_pager_t pager;
162 vm_page_t m;
163 boolean_t sync;
164{
165 extern boolean_t vm_page_zero_fill();
166
167 if (pager == NULL)
168 return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
169 return(VM_PAGER_GET(pager, m, sync));
170}
171
172vm_pager_put(pager, m, sync)
173 vm_pager_t pager;
174 vm_page_t m;
175 boolean_t sync;
176{
177 if (pager == NULL)
178 panic("vm_pager_put: null pager");
179 return(VM_PAGER_PUT(pager, m, sync));
180}
181
182boolean_t
183vm_pager_has_page(pager, offset)
184 vm_pager_t pager;
185 vm_offset_t offset;
186{
187 if (pager == NULL)
188 panic("vm_pager_has_page");
189 return(VM_PAGER_HASPAGE(pager, offset));
190}
191
192/*
193 * Called by pageout daemon before going back to sleep.
194 * Gives pagers a chance to clean up any completed async pageing operations.
195 */
196void
197vm_pager_sync()
198{
199 struct pagerops **pgops;
200
201 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
202 (*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
203}
204
205vm_offset_t
206vm_pager_map_page(m)
207 vm_page_t m;
208{
209 vm_offset_t kva;
210
211#ifdef DEBUG
212 if (!m->busy || m->active)
213 panic("vm_pager_map_page: page active or not busy");
214 if (m->pagerowned)
215 printf("vm_pager_map_page: page %x already in pager\n", m);
216#endif
217 kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
218#ifdef DEBUG
219 m->pagerowned = 1;
220#endif
221 pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
222 VM_PROT_DEFAULT, TRUE);
223 return(kva);
224}
225
226void
227vm_pager_unmap_page(kva)
228 vm_offset_t kva;
229{
230#ifdef DEBUG
231 vm_page_t m;
232
233 m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva));
234#endif
235 pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
236 kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
237#ifdef DEBUG
238 if (m->pagerowned)
239 m->pagerowned = 0;
240 else
241 printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n",
242 m, kva, VM_PAGE_TO_PHYS(m));
243#endif
244}
245
246vm_pager_t
247vm_pager_lookup(list, handle)
248 register queue_head_t *list;
249 caddr_t handle;
250{
251 register vm_pager_t pager;
252
253 pager = (vm_pager_t) queue_first(list);
254 while (!queue_end(list, (queue_entry_t)pager)) {
255 if (pager->pg_handle == handle)
256 return(pager);
257 pager = (vm_pager_t) queue_next(&pager->pg_list);
258 }
259 return(NULL);
260}
261
262/*
263 * This routine gains a reference to the object.
264 * Explicit deallocation is necessary.
265 */
266pager_cache(object, should_cache)
267 vm_object_t object;
268 boolean_t should_cache;
269{
270 if (object == NULL)
271 return(KERN_INVALID_ARGUMENT);
272
273 vm_object_cache_lock();
274 vm_object_lock(object);
275 object->can_persist = should_cache;
276 vm_object_unlock(object);
277 vm_object_cache_unlock();
278
279 vm_object_deallocate(object);
280
281 return(KERN_SUCCESS);
282}