Commit | Line | Data |
---|---|---|
b688fc87 WJ |
1 | /* |
2 | * Copyright (c) 1990 University of Utah. | |
3 | * Copyright (c) 1991 The Regents of the University of California. | |
4 | * All rights reserved. | |
5 | * | |
6 | * This code is derived from software contributed to Berkeley by | |
7 | * the Systems Programming Group of the University of Utah Computer | |
8 | * Science Department. | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | |
18 | * 3. All advertising materials mentioning features or use of this software | |
19 | * must display the following acknowledgement: | |
20 | * This product includes software developed by the University of | |
21 | * California, Berkeley and its contributors. | |
22 | * 4. Neither the name of the University nor the names of its contributors | |
23 | * may be used to endorse or promote products derived from this software | |
24 | * without specific prior written permission. | |
25 | * | |
26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
36 | * SUCH DAMAGE. | |
37 | * | |
38 | * @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 | |
39 | */ | |
40 | ||
41 | /* | |
42 | * Page to/from files (vnodes). | |
43 | * | |
44 | * TODO: | |
45 | * pageouts | |
46 | * fix credential use (uses current process credentials now) | |
47 | */ | |
48 | #include "vnodepager.h" | |
49 | #if NVNODEPAGER > 0 | |
50 | ||
51 | #include "param.h" | |
52 | #include "proc.h" | |
53 | #include "malloc.h" | |
54 | #include "vnode.h" | |
55 | #include "uio.h" | |
56 | #include "mount.h" | |
57 | ||
58 | #include "vm_param.h" | |
59 | #include "lock.h" | |
60 | #include "queue.h" | |
61 | #include "vm_prot.h" | |
62 | #include "vm_object.h" | |
63 | #include "vm_page.h" | |
64 | #include "vnode_pager.h" | |
65 | ||
66 | queue_head_t vnode_pager_list; /* list of managed vnodes */ | |
67 | ||
68 | #ifdef DEBUG | |
69 | int vpagerdebug = 0x00; | |
70 | #define VDB_FOLLOW 0x01 | |
71 | #define VDB_INIT 0x02 | |
72 | #define VDB_IO 0x04 | |
73 | #define VDB_FAIL 0x08 | |
74 | #define VDB_ALLOC 0x10 | |
75 | #define VDB_SIZE 0x20 | |
76 | #endif | |
77 | ||
78 | void | |
79 | vnode_pager_init() | |
80 | { | |
81 | #ifdef DEBUG | |
82 | if (vpagerdebug & VDB_FOLLOW) | |
83 | printf("vnode_pager_init()\n"); | |
84 | #endif | |
85 | queue_init(&vnode_pager_list); | |
86 | } | |
87 | ||
88 | /* | |
89 | * Allocate (or lookup) pager for a vnode. | |
90 | * Handle is a vnode pointer. | |
91 | */ | |
92 | vm_pager_t | |
93 | vnode_pager_alloc(handle, size, prot) | |
94 | caddr_t handle; | |
95 | vm_size_t size; | |
96 | vm_prot_t prot; | |
97 | { | |
98 | register vm_pager_t pager; | |
99 | register vn_pager_t vnp; | |
100 | vm_object_t object; | |
101 | struct vattr vattr; | |
102 | struct vnode *vp; | |
103 | struct proc *p = curproc; /* XXX */ | |
104 | ||
105 | #ifdef DEBUG | |
106 | if (vpagerdebug & (VDB_FOLLOW|VDB_ALLOC)) | |
107 | printf("vnode_pager_alloc(%x, %x, %x)\n", handle, size, prot); | |
108 | #endif | |
109 | /* | |
110 | * Pageout to vnode, no can do yet. | |
111 | */ | |
112 | if (handle == NULL) | |
113 | return(NULL); | |
114 | ||
115 | /* | |
116 | * Vnodes keep a pointer to any associated pager so no need to | |
117 | * lookup with vm_pager_lookup. | |
118 | */ | |
119 | vp = (struct vnode *)handle; | |
120 | pager = (vm_pager_t)vp->v_vmdata; | |
121 | if (pager == NULL) { | |
122 | /* | |
123 | * Allocate pager structures | |
124 | */ | |
125 | pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK); | |
126 | if (pager == NULL) | |
127 | return(NULL); | |
128 | vnp = (vn_pager_t)malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK); | |
129 | if (vnp == NULL) { | |
130 | free((caddr_t)pager, M_VMPAGER); | |
131 | return(NULL); | |
132 | } | |
133 | /* | |
134 | * And an object of the appropriate size | |
135 | */ | |
136 | if (VOP_GETATTR(vp, &vattr, p->p_ucred, p) == 0) { | |
137 | object = vm_object_allocate(round_page(vattr.va_size)); | |
138 | vm_object_enter(object, pager); | |
139 | vm_object_setpager(object, pager, 0, TRUE); | |
140 | } else { | |
141 | free((caddr_t)vnp, M_VMPGDATA); | |
142 | free((caddr_t)pager, M_VMPAGER); | |
143 | return(NULL); | |
144 | } | |
145 | /* | |
146 | * Hold a reference to the vnode and initialize pager data. | |
147 | */ | |
148 | VREF(vp); | |
149 | vnp->vnp_flags = 0; | |
150 | vnp->vnp_vp = vp; | |
151 | vnp->vnp_size = vattr.va_size; | |
152 | queue_enter(&vnode_pager_list, pager, vm_pager_t, pg_list); | |
153 | pager->pg_handle = handle; | |
154 | pager->pg_type = PG_VNODE; | |
155 | pager->pg_ops = &vnodepagerops; | |
156 | pager->pg_data = (caddr_t)vnp; | |
157 | vp->v_vmdata = (caddr_t)pager; | |
158 | } else { | |
159 | /* | |
160 | * vm_object_lookup() will remove the object from the | |
161 | * cache if found and also gain a reference to the object. | |
162 | */ | |
163 | object = vm_object_lookup(pager); | |
164 | #ifdef DEBUG | |
165 | vnp = (vn_pager_t)pager->pg_data; | |
166 | #endif | |
167 | } | |
168 | #ifdef DEBUG | |
169 | if (vpagerdebug & VDB_ALLOC) | |
170 | printf("vnode_pager_setup: vp %x sz %x pager %x object %x\n", | |
171 | vp, vnp->vnp_size, pager, object); | |
172 | #endif | |
173 | return(pager); | |
174 | } | |
175 | ||
176 | void | |
177 | vnode_pager_dealloc(pager) | |
178 | vm_pager_t pager; | |
179 | { | |
180 | register vn_pager_t vnp = (vn_pager_t)pager->pg_data; | |
181 | register struct vnode *vp; | |
182 | struct proc *p = curproc; /* XXX */ | |
183 | ||
184 | #ifdef DEBUG | |
185 | if (vpagerdebug & VDB_FOLLOW) | |
186 | printf("vnode_pager_dealloc(%x)\n", pager); | |
187 | #endif | |
188 | if (vp = vnp->vnp_vp) { | |
189 | vp->v_vmdata = NULL; | |
190 | vp->v_flag &= ~VTEXT; | |
191 | #if 0 | |
192 | /* can hang if done at reboot on NFS FS */ | |
193 | (void) VOP_FSYNC(vp, p->p_ucred, p); | |
194 | #endif | |
195 | vrele(vp); | |
196 | } | |
197 | queue_remove(&vnode_pager_list, pager, vm_pager_t, pg_list); | |
198 | free((caddr_t)vnp, M_VMPGDATA); | |
199 | free((caddr_t)pager, M_VMPAGER); | |
200 | } | |
201 | ||
202 | vnode_pager_getpage(pager, m, sync) | |
203 | vm_pager_t pager; | |
204 | vm_page_t m; | |
205 | boolean_t sync; | |
206 | { | |
207 | ||
208 | #ifdef DEBUG | |
209 | if (vpagerdebug & VDB_FOLLOW) | |
210 | printf("vnode_pager_getpage(%x, %x)\n", pager, m); | |
211 | #endif | |
212 | return(vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_READ)); | |
213 | } | |
214 | ||
215 | boolean_t | |
216 | vnode_pager_putpage(pager, m, sync) | |
217 | vm_pager_t pager; | |
218 | vm_page_t m; | |
219 | boolean_t sync; | |
220 | { | |
221 | int err; | |
222 | ||
223 | #ifdef DEBUG | |
224 | if (vpagerdebug & VDB_FOLLOW) | |
225 | printf("vnode_pager_putpage(%x, %x)\n", pager, m); | |
226 | #endif | |
227 | if (pager == NULL) | |
228 | return; | |
229 | err = vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_WRITE); | |
230 | if (err == VM_PAGER_OK) { | |
231 | m->clean = TRUE; /* XXX - wrong place */ | |
232 | pmap_clear_modify(VM_PAGE_TO_PHYS(m)); /* XXX - wrong place */ | |
233 | } | |
234 | return(err); | |
235 | } | |
236 | ||
237 | boolean_t | |
238 | vnode_pager_haspage(pager, offset) | |
239 | vm_pager_t pager; | |
240 | vm_offset_t offset; | |
241 | { | |
242 | register vn_pager_t vnp = (vn_pager_t)pager->pg_data; | |
243 | daddr_t bn; | |
244 | int err; | |
245 | ||
246 | #ifdef DEBUG | |
247 | if (vpagerdebug & VDB_FOLLOW) | |
248 | printf("vnode_pager_haspage(%x, %x)\n", pager, offset); | |
249 | #endif | |
250 | ||
251 | /* | |
252 | * Offset beyond end of file, do not have the page | |
253 | */ | |
254 | if (offset >= vnp->vnp_size) { | |
255 | #ifdef DEBUG | |
256 | if (vpagerdebug & (VDB_FAIL|VDB_SIZE)) | |
257 | printf("vnode_pager_haspage: pg %x, off %x, size %x\n", | |
258 | pager, offset, vnp->vnp_size); | |
259 | #endif | |
260 | return(FALSE); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Read the index to find the disk block to read | |
265 | * from. If there is no block, report that we don't | |
266 | * have this data. | |
267 | * | |
268 | * Assumes that the vnode has whole page or nothing. | |
269 | */ | |
270 | err = VOP_BMAP(vnp->vnp_vp, | |
271 | offset / vnp->vnp_vp->v_mount->mnt_stat.f_bsize, | |
272 | (struct vnode **)0, &bn); | |
273 | if (err) { | |
274 | #ifdef DEBUG | |
275 | if (vpagerdebug & VDB_FAIL) | |
276 | printf("vnode_pager_haspage: BMAP err %d, pg %x, off %x\n", | |
277 | err, pager, offset); | |
278 | #endif | |
279 | return(TRUE); | |
280 | } | |
281 | return((long)bn < 0 ? FALSE : TRUE); | |
282 | } | |
283 | ||
284 | /* | |
285 | * (XXX) | |
286 | * Lets the VM system know about a change in size for a file. | |
287 | * If this vnode is mapped into some address space (i.e. we have a pager | |
288 | * for it) we adjust our own internal size and flush any cached pages in | |
289 | * the associated object that are affected by the size change. | |
290 | * | |
291 | * Note: this routine may be invoked as a result of a pager put | |
292 | * operation (possibly at object termination time), so we must be careful. | |
293 | */ | |
294 | vnode_pager_setsize(vp, nsize) | |
295 | struct vnode *vp; | |
296 | u_long nsize; | |
297 | { | |
298 | register vn_pager_t vnp; | |
299 | register vm_object_t object; | |
300 | vm_pager_t pager; | |
301 | ||
302 | /* | |
303 | * Not a mapped vnode | |
304 | */ | |
305 | if (vp == NULL || vp->v_type != VREG || vp->v_vmdata == NULL) | |
306 | return; | |
307 | /* | |
308 | * Hasn't changed size | |
309 | */ | |
310 | pager = (vm_pager_t)vp->v_vmdata; | |
311 | vnp = (vn_pager_t)pager->pg_data; | |
312 | if (nsize == vnp->vnp_size) | |
313 | return; | |
314 | /* | |
315 | * No object. | |
316 | * This can happen during object termination since | |
317 | * vm_object_page_clean is called after the object | |
318 | * has been removed from the hash table, and clean | |
319 | * may cause vnode write operations which can wind | |
320 | * up back here. | |
321 | */ | |
322 | object = vm_object_lookup(pager); | |
323 | if (object == NULL) | |
324 | return; | |
325 | ||
326 | #ifdef DEBUG | |
327 | if (vpagerdebug & (VDB_FOLLOW|VDB_SIZE)) | |
328 | printf("vnode_pager_setsize: vp %x obj %x osz %d nsz %d\n", | |
329 | vp, object, vnp->vnp_size, nsize); | |
330 | #endif | |
331 | /* | |
332 | * File has shrunk. | |
333 | * Toss any cached pages beyond the new EOF. | |
334 | */ | |
335 | if (nsize < vnp->vnp_size) { | |
336 | vm_object_lock(object); | |
337 | vm_object_page_remove(object, | |
338 | (vm_offset_t)nsize, vnp->vnp_size); | |
339 | vm_object_unlock(object); | |
340 | } | |
341 | vnp->vnp_size = (vm_offset_t)nsize; | |
342 | vm_object_deallocate(object); | |
343 | } | |
344 | ||
345 | vnode_pager_umount(mp) | |
346 | register struct mount *mp; | |
347 | { | |
348 | register vm_pager_t pager, npager; | |
349 | struct vnode *vp; | |
350 | ||
351 | pager = (vm_pager_t) queue_first(&vnode_pager_list); | |
352 | while (!queue_end(&vnode_pager_list, (queue_entry_t)pager)) { | |
353 | /* | |
354 | * Save the next pointer now since uncaching may | |
355 | * terminate the object and render pager invalid | |
356 | */ | |
357 | vp = ((vn_pager_t)pager->pg_data)->vnp_vp; | |
358 | npager = (vm_pager_t) queue_next(&pager->pg_list); | |
359 | if (mp == (struct mount *)0 || vp->v_mount == mp) | |
360 | (void) vnode_pager_uncache(vp); | |
361 | pager = npager; | |
362 | } | |
363 | } | |
364 | ||
365 | /* | |
366 | * Remove vnode associated object from the object cache. | |
367 | * | |
368 | * Note: this routine may be invoked as a result of a pager put | |
369 | * operation (possibly at object termination time), so we must be careful. | |
370 | */ | |
371 | boolean_t | |
372 | vnode_pager_uncache(vp) | |
373 | register struct vnode *vp; | |
374 | { | |
375 | register vm_object_t object; | |
376 | boolean_t uncached, locked; | |
377 | vm_pager_t pager; | |
378 | ||
379 | /* | |
380 | * Not a mapped vnode | |
381 | */ | |
382 | pager = (vm_pager_t)vp->v_vmdata; | |
383 | if (pager == NULL) | |
384 | return (TRUE); | |
385 | /* | |
386 | * Unlock the vnode if it is currently locked. | |
387 | * We do this since uncaching the object may result | |
388 | * in its destruction which may initiate paging | |
389 | * activity which may necessitate locking the vnode. | |
390 | */ | |
391 | locked = VOP_ISLOCKED(vp); | |
392 | if (locked) | |
393 | VOP_UNLOCK(vp); | |
394 | /* | |
395 | * Must use vm_object_lookup() as it actually removes | |
396 | * the object from the cache list. | |
397 | */ | |
398 | object = vm_object_lookup(pager); | |
399 | if (object) { | |
400 | uncached = (object->ref_count <= 1); | |
401 | pager_cache(object, FALSE); | |
402 | } else | |
403 | uncached = TRUE; | |
404 | if (locked) | |
405 | VOP_LOCK(vp); | |
406 | return(uncached); | |
407 | } | |
408 | ||
409 | vnode_pager_io(vnp, m, rw) | |
410 | register vn_pager_t vnp; | |
411 | vm_page_t m; | |
412 | enum uio_rw rw; | |
413 | { | |
414 | struct uio auio; | |
415 | struct iovec aiov; | |
416 | vm_offset_t kva, foff; | |
417 | int error, size; | |
418 | struct proc *p = curproc; /* XXX */ | |
419 | ||
420 | #ifdef DEBUG | |
421 | if (vpagerdebug & VDB_FOLLOW) | |
422 | printf("vnode_pager_io(%x, %x, %c): vnode %x\n", | |
423 | vnp, m, rw == UIO_READ ? 'R' : 'W', vnp->vnp_vp); | |
424 | #endif | |
425 | foff = m->offset + m->object->paging_offset; | |
426 | /* | |
427 | * Return failure if beyond current EOF | |
428 | */ | |
429 | if (foff >= vnp->vnp_size) { | |
430 | #ifdef DEBUG | |
431 | if (vpagerdebug & VDB_SIZE) | |
432 | printf("vnode_pager_io: vp %x, off %d size %d\n", | |
433 | vnp->vnp_vp, foff, vnp->vnp_size); | |
434 | #endif | |
435 | return(VM_PAGER_BAD); | |
436 | } | |
437 | if (foff + PAGE_SIZE > vnp->vnp_size) | |
438 | size = vnp->vnp_size - foff; | |
439 | else | |
440 | size = PAGE_SIZE; | |
441 | /* | |
442 | * Allocate a kernel virtual address and initialize so that | |
443 | * we can use VOP_READ/WRITE routines. | |
444 | */ | |
445 | kva = vm_pager_map_page(m); | |
446 | aiov.iov_base = (caddr_t)kva; | |
447 | aiov.iov_len = size; | |
448 | auio.uio_iov = &aiov; | |
449 | auio.uio_iovcnt = 1; | |
450 | auio.uio_offset = foff; | |
451 | auio.uio_segflg = UIO_SYSSPACE; | |
452 | auio.uio_rw = rw; | |
453 | auio.uio_resid = size; | |
454 | auio.uio_procp = (struct proc *)0; | |
455 | #ifdef DEBUG | |
456 | if (vpagerdebug & VDB_IO) | |
457 | printf("vnode_pager_io: vp %x kva %x foff %x size %x", | |
458 | vnp->vnp_vp, kva, foff, size); | |
459 | #endif | |
460 | if (rw == UIO_READ) | |
461 | error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred); | |
462 | else | |
463 | error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred); | |
464 | #ifdef DEBUG | |
465 | if (vpagerdebug & VDB_IO) { | |
466 | if (error || auio.uio_resid) | |
467 | printf(" returns error %x, resid %x", | |
468 | error, auio.uio_resid); | |
469 | printf("\n"); | |
470 | } | |
471 | #endif | |
472 | if (!error) { | |
473 | register int count = size - auio.uio_resid; | |
474 | ||
475 | if (count == 0) | |
476 | error = EINVAL; | |
477 | else if (count != PAGE_SIZE && rw == UIO_READ) | |
478 | bzero(kva + count, PAGE_SIZE - count); | |
479 | } | |
480 | vm_pager_unmap_page(kva); | |
481 | return (error ? VM_PAGER_FAIL : VM_PAGER_OK); | |
482 | } | |
483 | #endif |