additions to allow single stepping (from ralph)
[unix-history] / usr / src / sys / vm / vm_pager.c
CommitLineData
175f072e 1/*
ad0f93d2
KB
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
175f072e
KM
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
0e24ad83 8 * %sccs.include.redist.c%
175f072e 9 *
ad0f93d2 10 * @(#)vm_pager.c 8.1 (Berkeley) %G%
0e24ad83
KM
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
175f072e
KM
37 */
38
39/*
40 * Paging space routine stubs. Emulates a matchmaker-like interface
41 * for builtin pagers.
42 */
43
1692aa9f
KB
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/malloc.h>
175f072e 47
1692aa9f
KB
48#include <vm/vm.h>
49#include <vm/vm_page.h>
50#include <vm/vm_kern.h>
175f072e 51
16e37ef4 52#ifdef SWAPPAGER
175f072e 53extern struct pagerops swappagerops;
175f072e 54#endif
5a2135db 55
16e37ef4 56#ifdef VNODEPAGER
175f072e 57extern struct pagerops vnodepagerops;
175f072e 58#endif
5a2135db 59
16e37ef4 60#ifdef DEVPAGER
175f072e 61extern struct pagerops devicepagerops;
175f072e
KM
62#endif
63
64struct pagerops *pagertab[] = {
16e37ef4
CT
65#ifdef SWAPPAGER
66 &swappagerops, /* PG_SWAP */
1f245e77 67#endif
16e37ef4
CT
68#ifdef VNODEPAGER
69 &vnodepagerops, /* PG_VNODE */
1f245e77 70#endif
16e37ef4
CT
71#ifdef DEVPAGER
72 &devicepagerops, /* PG_DEV */
1f245e77 73#endif
175f072e
KM
74};
75int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
76
ffe0d082 77struct pagerops *dfltpagerops = NULL; /* default pager */
175f072e
KM
78
79/*
80 * Kernel address space for mapping pages.
81 * Used by pagers where KVAs are needed for IO.
82 */
83#define PAGER_MAP_SIZE (256 * PAGE_SIZE)
84vm_map_t pager_map;
11371b93 85vm_offset_t pager_sva, pager_eva;
175f072e
KM
86
87void
88vm_pager_init()
89{
175f072e
KM
90 struct pagerops **pgops;
91
92 /*
93 * Allocate a kernel submap for tracking get/put page mappings
94 */
11371b93 95 pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
175f072e
KM
96 PAGER_MAP_SIZE, FALSE);
97 /*
98 * Initialize known pagers
99 */
100 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
101 (*(*pgops)->pgo_init)();
ffe0d082 102 if (dfltpagerops == NULL)
175f072e
KM
103 panic("no default pager");
104}
105
106/*
107 * Allocate an instance of a pager of the given type.
108 */
109vm_pager_t
110vm_pager_allocate(type, handle, size, prot)
111 int type;
112 caddr_t handle;
113 vm_size_t size;
114 vm_prot_t prot;
115{
116 vm_pager_t pager;
117 struct pagerops *ops;
118
119 ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
120 return((*ops->pgo_alloc)(handle, size, prot));
121}
122
123void
124vm_pager_deallocate(pager)
125 vm_pager_t pager;
126{
ffe0d082 127 if (pager == NULL)
175f072e
KM
128 panic("vm_pager_deallocate: null pager");
129
130 VM_PAGER_DEALLOC(pager);
131}
132
1692aa9f 133int
175f072e
KM
134vm_pager_get(pager, m, sync)
135 vm_pager_t pager;
136 vm_page_t m;
137 boolean_t sync;
138{
139 extern boolean_t vm_page_zero_fill();
140
ffe0d082 141 if (pager == NULL)
175f072e
KM
142 return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
143 return(VM_PAGER_GET(pager, m, sync));
144}
145
1692aa9f 146int
175f072e
KM
147vm_pager_put(pager, m, sync)
148 vm_pager_t pager;
149 vm_page_t m;
150 boolean_t sync;
151{
ffe0d082 152 if (pager == NULL)
175f072e
KM
153 panic("vm_pager_put: null pager");
154 return(VM_PAGER_PUT(pager, m, sync));
155}
156
157boolean_t
158vm_pager_has_page(pager, offset)
159 vm_pager_t pager;
160 vm_offset_t offset;
161{
ffe0d082 162 if (pager == NULL)
175f072e
KM
163 panic("vm_pager_has_page");
164 return(VM_PAGER_HASPAGE(pager, offset));
165}
166
167/*
168 * Called by pageout daemon before going back to sleep.
169 * Gives pagers a chance to clean up any completed async pageing operations.
170 */
171void
172vm_pager_sync()
173{
174 struct pagerops **pgops;
175
176 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
ffe0d082 177 (*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
175f072e
KM
178}
179
180vm_offset_t
181vm_pager_map_page(m)
182 vm_page_t m;
183{
184 vm_offset_t kva;
185
11371b93 186#ifdef DEBUG
559409a4
KM
187 if ((m->flags & PG_BUSY) == 0)
188 panic("vm_pager_map_page: page not busy");
2cbf9af3 189 if (m->flags & PG_PAGEROWNED)
11371b93
MH
190 printf("vm_pager_map_page: page %x already in pager\n", m);
191#endif
175f072e 192 kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
11371b93 193#ifdef DEBUG
2cbf9af3 194 m->flags |= PG_PAGEROWNED;
11371b93 195#endif
175f072e
KM
196 pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
197 VM_PROT_DEFAULT, TRUE);
175f072e
KM
198 return(kva);
199}
200
201void
202vm_pager_unmap_page(kva)
203 vm_offset_t kva;
204{
11371b93
MH
205#ifdef DEBUG
206 vm_page_t m;
207
208 m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva));
175f072e 209#endif
11371b93 210 pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
175f072e 211 kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
11371b93 212#ifdef DEBUG
2cbf9af3
KM
213 if (m->flags & PG_PAGEROWNED)
214 m->flags &= ~PG_PAGEROWNED;
11371b93
MH
215 else
216 printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n",
217 m, kva, VM_PAGE_TO_PHYS(m));
218#endif
175f072e
KM
219}
220
221vm_pager_t
222vm_pager_lookup(list, handle)
223 register queue_head_t *list;
224 caddr_t handle;
225{
226 register vm_pager_t pager;
227
228 pager = (vm_pager_t) queue_first(list);
229 while (!queue_end(list, (queue_entry_t)pager)) {
230 if (pager->pg_handle == handle)
231 return(pager);
232 pager = (vm_pager_t) queue_next(&pager->pg_list);
233 }
ffe0d082 234 return(NULL);
175f072e
KM
235}
236
237/*
238 * This routine gains a reference to the object.
239 * Explicit deallocation is necessary.
240 */
1692aa9f 241int
175f072e
KM
242pager_cache(object, should_cache)
243 vm_object_t object;
244 boolean_t should_cache;
245{
ffe0d082 246 if (object == NULL)
175f072e
KM
247 return(KERN_INVALID_ARGUMENT);
248
249 vm_object_cache_lock();
250 vm_object_lock(object);
a9e495d8
KM
251 if (should_cache)
252 object->flags |= OBJ_CANPERSIST;
253 else
254 object->flags &= ~OBJ_CANPERSIST;
175f072e
KM
255 vm_object_unlock(object);
256 vm_object_cache_unlock();
257
258 vm_object_deallocate(object);
259
260 return(KERN_SUCCESS);
261}