| 1 | /* |
| 2 | * Copyright (c) 1990 University of Utah. |
| 3 | * Copyright (c) 1991 The Regents of the University of California. |
| 4 | * All rights reserved. |
| 5 | * |
| 6 | * This code is derived from software contributed to Berkeley by |
| 7 | * the Systems Programming Group of the University of Utah Computer |
| 8 | * Science Department. |
| 9 | * |
| 10 | * %sccs.include.redist.c% |
| 11 | * |
| 12 | * @(#)device_pager.c 7.6 (Berkeley) %G% |
| 13 | */ |
| 14 | |
| 15 | /* |
| 16 | * Page to/from special files. |
| 17 | */ |
| 18 | |
| 19 | #include "devpager.h" |
| 20 | #if NDEVPAGER > 0 |
| 21 | |
| 22 | #include <sys/param.h> |
| 23 | #include <sys/systm.h> |
| 24 | #include <sys/conf.h> |
| 25 | #include <sys/mman.h> |
| 26 | #include <sys/malloc.h> |
| 27 | |
| 28 | #include <vm/vm.h> |
| 29 | #include <vm/vm_kern.h> |
| 30 | #include <vm/vm_page.h> |
| 31 | #include <vm/device_pager.h> |
| 32 | |
| 33 | queue_head_t dev_pager_list; /* list of managed devices */ |
| 34 | |
| 35 | #ifdef DEBUG |
| 36 | int dpagerdebug = 0; |
| 37 | #define DDB_FOLLOW 0x01 |
| 38 | #define DDB_INIT 0x02 |
| 39 | #define DDB_ALLOC 0x04 |
| 40 | #define DDB_FAIL 0x08 |
| 41 | #endif |
| 42 | |
| 43 | static vm_pager_t dev_pager_alloc __P((caddr_t, vm_size_t, vm_prot_t)); |
| 44 | static void dev_pager_dealloc __P((vm_pager_t)); |
| 45 | static int dev_pager_getpage |
| 46 | __P((vm_pager_t, vm_page_t, boolean_t)); |
| 47 | static boolean_t dev_pager_haspage __P((vm_pager_t, vm_offset_t)); |
| 48 | static void dev_pager_init __P((void)); |
| 49 | static int dev_pager_putpage |
| 50 | __P((vm_pager_t, vm_page_t, boolean_t)); |
| 51 | |
| 52 | struct pagerops devicepagerops = { |
| 53 | dev_pager_init, |
| 54 | dev_pager_alloc, |
| 55 | dev_pager_dealloc, |
| 56 | dev_pager_getpage, |
| 57 | dev_pager_putpage, |
| 58 | dev_pager_haspage |
| 59 | }; |
| 60 | |
| 61 | static void |
| 62 | dev_pager_init() |
| 63 | { |
| 64 | #ifdef DEBUG |
| 65 | if (dpagerdebug & DDB_FOLLOW) |
| 66 | printf("dev_pager_init()\n"); |
| 67 | #endif |
| 68 | queue_init(&dev_pager_list); |
| 69 | } |
| 70 | |
| 71 | static vm_pager_t |
| 72 | dev_pager_alloc(handle, size, prot) |
| 73 | caddr_t handle; |
| 74 | vm_size_t size; |
| 75 | vm_prot_t prot; |
| 76 | { |
| 77 | dev_t dev; |
| 78 | vm_pager_t pager; |
| 79 | int (*mapfunc)(), nprot; |
| 80 | register vm_object_t object; |
| 81 | register vm_page_t page; |
| 82 | register dev_pager_t devp; |
| 83 | register int npages, off; |
| 84 | extern int nullop(), enodev(); |
| 85 | |
| 86 | |
| 87 | #ifdef DEBUG |
| 88 | if (dpagerdebug & DDB_FOLLOW) |
| 89 | printf("dev_pager_alloc(%x, %x, %x)\n", handle, size, prot); |
| 90 | #endif |
| 91 | /* |
| 92 | * Pageout to device, should never happen. |
| 93 | */ |
| 94 | if (handle == NULL) |
| 95 | panic("dev_pager_alloc called"); |
| 96 | |
| 97 | /* |
| 98 | * Look it up, creating as necessary |
| 99 | */ |
| 100 | pager = vm_pager_lookup(&dev_pager_list, handle); |
| 101 | if (pager == NULL) { |
| 102 | /* |
| 103 | * Validation. Make sure this device can be mapped |
| 104 | * and that range to map is acceptible to device. |
| 105 | */ |
| 106 | dev = (dev_t)handle; |
| 107 | mapfunc = cdevsw[major(dev)].d_mmap; |
| 108 | if (!mapfunc || mapfunc == enodev || mapfunc == nullop) |
| 109 | return(NULL); |
| 110 | nprot = 0; |
| 111 | if (prot & VM_PROT_READ) |
| 112 | nprot |= PROT_READ; |
| 113 | if (prot & VM_PROT_WRITE) |
| 114 | nprot |= PROT_WRITE; |
| 115 | if (prot & VM_PROT_EXECUTE) |
| 116 | nprot |= PROT_EXEC; |
| 117 | npages = atop(round_page(size)); |
| 118 | for (off = 0; npages--; off += PAGE_SIZE) |
| 119 | if ((*mapfunc)(dev, off, nprot) == -1) |
| 120 | return(NULL); |
| 121 | /* |
| 122 | * Allocate and initialize pager structs |
| 123 | */ |
| 124 | pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK); |
| 125 | if (pager == NULL) |
| 126 | return(NULL); |
| 127 | devp = (dev_pager_t)malloc(sizeof *devp, M_VMPGDATA, M_WAITOK); |
| 128 | if (devp == NULL) { |
| 129 | free((caddr_t)pager, M_VMPAGER); |
| 130 | return(NULL); |
| 131 | } |
| 132 | devp->devp_dev = dev; |
| 133 | devp->devp_npages = atop(round_page(size)); |
| 134 | pager->pg_handle = handle; |
| 135 | pager->pg_ops = &devicepagerops; |
| 136 | pager->pg_type = PG_DEVICE; |
| 137 | pager->pg_data = (caddr_t)devp; |
| 138 | /* |
| 139 | * Allocate object and vm_page structures to describe memory |
| 140 | */ |
| 141 | npages = devp->devp_npages; |
| 142 | object = devp->devp_object = vm_object_allocate(ptoa(npages)); |
| 143 | vm_object_enter(object, pager); |
| 144 | vm_object_setpager(object, pager, (vm_offset_t)0, FALSE); |
| 145 | devp->devp_pages = (vm_page_t) |
| 146 | kmem_alloc(kernel_map, npages*sizeof(struct vm_page)); |
| 147 | off = 0; |
| 148 | for (page = devp->devp_pages; |
| 149 | page < &devp->devp_pages[npages]; page++) { |
| 150 | vm_object_lock(object); |
| 151 | VM_PAGE_INIT(page, object, off); |
| 152 | page->phys_addr = |
| 153 | pmap_phys_address((*mapfunc)(dev, off, nprot)); |
| 154 | page->wire_count = 1; |
| 155 | page->fictitious = TRUE; |
| 156 | PAGE_WAKEUP(page); |
| 157 | vm_object_unlock(object); |
| 158 | off += PAGE_SIZE; |
| 159 | } |
| 160 | /* |
| 161 | * Finally, put it on the managed list so other can find it. |
| 162 | */ |
| 163 | queue_enter(&dev_pager_list, devp, dev_pager_t, devp_list); |
| 164 | #ifdef DEBUG |
| 165 | if (dpagerdebug & DDB_ALLOC) { |
| 166 | printf("dev_pager_alloc: pages %d@%x\n", |
| 167 | devp->devp_npages, devp->devp_pages); |
| 168 | printf("dev_pager_alloc: pager %x devp %x object %x\n", |
| 169 | pager, devp, object); |
| 170 | vm_object_print(object, FALSE); |
| 171 | } |
| 172 | #endif |
| 173 | } else { |
| 174 | /* |
| 175 | * vm_object_lookup() gains a reference and also |
| 176 | * removes the object from the cache. |
| 177 | */ |
| 178 | devp = (dev_pager_t)pager->pg_data; |
| 179 | if (vm_object_lookup(pager) != devp->devp_object) |
| 180 | panic("dev_pager_setup: bad object"); |
| 181 | } |
| 182 | return(pager); |
| 183 | |
| 184 | } |
| 185 | |
| 186 | static void |
| 187 | dev_pager_dealloc(pager) |
| 188 | vm_pager_t pager; |
| 189 | { |
| 190 | dev_pager_t devp = (dev_pager_t)pager->pg_data; |
| 191 | register vm_object_t object; |
| 192 | |
| 193 | #ifdef DEBUG |
| 194 | if (dpagerdebug & DDB_FOLLOW) |
| 195 | printf("dev_pager_dealloc(%x)\n", pager); |
| 196 | #endif |
| 197 | queue_remove(&dev_pager_list, devp, dev_pager_t, devp_list); |
| 198 | object = devp->devp_object; |
| 199 | #ifdef DEBUG |
| 200 | if (dpagerdebug & DDB_ALLOC) |
| 201 | printf("dev_pager_dealloc: devp %x object %x pages %d@%x\n", |
| 202 | devp, object, devp->devp_npages, devp->devp_pages); |
| 203 | #endif |
| 204 | while (!queue_empty(&object->memq)) |
| 205 | vm_page_remove((vm_page_t)queue_first(&object->memq)); |
| 206 | kmem_free(kernel_map, (vm_offset_t)devp->devp_pages, |
| 207 | devp->devp_npages * sizeof(struct vm_page)); |
| 208 | free((caddr_t)devp, M_VMPGDATA); |
| 209 | pager->pg_data = 0; |
| 210 | } |
| 211 | |
| 212 | static int |
| 213 | dev_pager_getpage(pager, m, sync) |
| 214 | vm_pager_t pager; |
| 215 | vm_page_t m; |
| 216 | boolean_t sync; |
| 217 | { |
| 218 | #ifdef DEBUG |
| 219 | if (dpagerdebug & DDB_FOLLOW) |
| 220 | printf("dev_pager_getpage(%x, %x)\n", pager, m); |
| 221 | #endif |
| 222 | return(VM_PAGER_BAD); |
| 223 | } |
| 224 | |
| 225 | static int |
| 226 | dev_pager_putpage(pager, m, sync) |
| 227 | vm_pager_t pager; |
| 228 | vm_page_t m; |
| 229 | boolean_t sync; |
| 230 | { |
| 231 | #ifdef DEBUG |
| 232 | if (dpagerdebug & DDB_FOLLOW) |
| 233 | printf("dev_pager_putpage(%x, %x)\n", pager, m); |
| 234 | #endif |
| 235 | if (pager == NULL) |
| 236 | return; |
| 237 | panic("dev_pager_putpage called"); |
| 238 | } |
| 239 | |
| 240 | static boolean_t |
| 241 | dev_pager_haspage(pager, offset) |
| 242 | vm_pager_t pager; |
| 243 | vm_offset_t offset; |
| 244 | { |
| 245 | #ifdef DEBUG |
| 246 | if (dpagerdebug & DDB_FOLLOW) |
| 247 | printf("dev_pager_haspage(%x, %x)\n", pager, offset); |
| 248 | #endif |
| 249 | return(TRUE); |
| 250 | } |
| 251 | #endif |