Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1988 University of Utah. | |
3 | * Copyright (c) 1991 The Regents of the University of California. | |
4 | * All rights reserved. | |
5 | * | |
6 | * This code is derived from software contributed to Berkeley by | |
7 | * the Systems Programming Group of the University of Utah Computer | |
8 | * Science Department. | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | |
18 | * 3. All advertising materials mentioning features or use of this software | |
19 | * must display the following acknowledgement: | |
20 | * This product includes software developed by the University of | |
21 | * California, Berkeley and its contributors. | |
22 | * 4. Neither the name of the University nor the names of its contributors | |
23 | * may be used to endorse or promote products derived from this software | |
24 | * without specific prior written permission. | |
25 | * | |
26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
36 | * SUCH DAMAGE. | |
37 | * | |
38 | * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$ | |
39 | * | |
40 | * @(#)vm_mmap.c 7.5 (Berkeley) 6/28/91 | |
41 | * | |
42 | * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE | |
43 | * -------------------- ----- ---------------------- | |
44 | * CURRENT PATCH LEVEL: 1 00137 | |
45 | * -------------------- ----- ---------------------- | |
46 | * | |
47 | * 08 Apr 93 Yuval Yarom Several VM system fixes | |
48 | */ | |
49 | ||
50 | /* | |
51 | * Mapped file (mmap) interface to VM | |
52 | */ | |
53 | ||
54 | #include "param.h" | |
55 | #include "systm.h" | |
56 | #include "filedesc.h" | |
57 | #include "proc.h" | |
58 | #include "vnode.h" | |
59 | #include "specdev.h" | |
60 | #include "file.h" | |
61 | #include "mman.h" | |
62 | #include "conf.h" | |
63 | ||
64 | #include "vm.h" | |
65 | #include "vm_pager.h" | |
66 | #include "vm_prot.h" | |
67 | #include "vm_statistics.h" | |
68 | ||
69 | #ifdef DEBUG | |
70 | int mmapdebug = 0; | |
71 | #define MDB_FOLLOW 0x01 | |
72 | #define MDB_SYNC 0x02 | |
73 | #define MDB_MAPIT 0x04 | |
74 | #endif | |
75 | ||
76 | /* ARGSUSED */ | |
77 | getpagesize(p, uap, retval) | |
78 | struct proc *p; | |
79 | void *uap; | |
80 | int *retval; | |
81 | { | |
82 | ||
83 | *retval = NBPG * CLSIZE; | |
84 | return (0); | |
85 | } | |
86 | ||
87 | /* ARGSUSED */ | |
88 | sbrk(p, uap, retval) | |
89 | struct proc *p; | |
90 | struct args { | |
91 | int incr; | |
92 | } *uap; | |
93 | int *retval; | |
94 | { | |
95 | ||
96 | /* Not yet implemented */ | |
97 | return (EOPNOTSUPP); | |
98 | } | |
99 | ||
100 | /* ARGSUSED */ | |
101 | sstk(p, uap, retval) | |
102 | struct proc *p; | |
103 | struct args { | |
104 | int incr; | |
105 | } *uap; | |
106 | int *retval; | |
107 | { | |
108 | ||
109 | /* Not yet implemented */ | |
110 | return (EOPNOTSUPP); | |
111 | } | |
112 | ||
113 | smmap(p, uap, retval) | |
114 | struct proc *p; | |
115 | register struct args { | |
116 | caddr_t addr; | |
117 | int len; | |
118 | int prot; | |
119 | int flags; | |
120 | int fd; | |
121 | off_t pos; | |
122 | } *uap; | |
123 | int *retval; | |
124 | { | |
125 | register struct filedesc *fdp = p->p_fd; | |
126 | register struct file *fp; | |
127 | struct vnode *vp; | |
128 | vm_offset_t addr; | |
129 | vm_size_t size; | |
130 | vm_prot_t prot; | |
131 | caddr_t handle; | |
132 | int mtype, error; | |
133 | ||
134 | #ifdef DEBUG | |
135 | if (mmapdebug & MDB_FOLLOW) | |
136 | printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", | |
137 | p->p_pid, uap->addr, uap->len, uap->prot, | |
138 | uap->flags, uap->fd, uap->pos); | |
139 | #endif | |
140 | /* | |
141 | * Make sure one of the sharing types is specified | |
142 | */ | |
143 | mtype = uap->flags & MAP_TYPE; | |
144 | switch (mtype) { | |
145 | case MAP_FILE: | |
146 | case MAP_ANON: | |
147 | break; | |
148 | default: | |
149 | return(EINVAL); | |
150 | } | |
151 | /* | |
152 | * Address (if FIXED) must be page aligned. | |
153 | * Size is implicitly rounded to a page boundary. | |
154 | */ | |
155 | addr = (vm_offset_t) uap->addr; | |
156 | if ((uap->flags & MAP_FIXED) && (addr & page_mask) || uap->len < 0) | |
157 | return(EINVAL); | |
158 | size = (vm_size_t) round_page(uap->len); | |
159 | if ((uap->flags & MAP_FIXED) && (addr + size > VM_MAXUSER_ADDRESS)) | |
160 | return(EINVAL); | |
161 | /* | |
162 | * XXX if no hint provided for a non-fixed mapping place it after | |
163 | * the end of the largest possible heap. | |
164 | * | |
165 | * There should really be a pmap call to determine a reasonable | |
166 | * location. | |
167 | */ | |
168 | if (addr == 0 && (uap->flags & MAP_FIXED) == 0) | |
169 | addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); | |
170 | /* | |
171 | * Mapping file or named anonymous, get fp for validation | |
172 | */ | |
173 | if (mtype == MAP_FILE || uap->fd != -1) { | |
174 | if (((unsigned)uap->fd) >= fdp->fd_nfiles || | |
175 | (fp = fdp->fd_ofiles[uap->fd]) == NULL) | |
176 | return(EBADF); | |
177 | } | |
178 | /* | |
179 | * If we are mapping a file we need to check various | |
180 | * file/vnode related things. | |
181 | */ | |
182 | if (mtype == MAP_FILE) { | |
183 | /* | |
184 | * Obtain vnode and make sure it is of appropriate type | |
185 | */ | |
186 | if (fp->f_type != DTYPE_VNODE) | |
187 | return(EINVAL); | |
188 | vp = (struct vnode *)fp->f_data; | |
189 | if (vp->v_type != VREG && vp->v_type != VCHR) | |
190 | return(EINVAL); | |
191 | /* | |
192 | * Ensure that file protection and desired protection | |
193 | * are compatible. Note that we only worry about writability | |
194 | * if mapping is shared. | |
195 | */ | |
196 | if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || | |
197 | ((uap->flags & MAP_SHARED) && | |
198 | (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) | |
199 | return(EACCES); | |
200 | handle = (caddr_t)vp; | |
201 | } else if (uap->fd != -1) | |
202 | handle = (caddr_t)fp; | |
203 | else | |
204 | handle = NULL; | |
205 | /* | |
206 | * Map protections to MACH style | |
207 | */ | |
208 | prot = VM_PROT_NONE; | |
209 | if (uap->prot & PROT_READ) | |
210 | prot |= VM_PROT_READ; | |
211 | if (uap->prot & PROT_WRITE) | |
212 | prot |= VM_PROT_WRITE; | |
213 | if (uap->prot & PROT_EXEC) | |
214 | prot |= VM_PROT_EXECUTE; | |
215 | ||
216 | error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, | |
217 | uap->flags, handle, (vm_offset_t)uap->pos); | |
218 | if (error == 0) | |
219 | *retval = (int) addr; | |
220 | return(error); | |
221 | } | |
222 | ||
223 | msync(p, uap, retval) | |
224 | struct proc *p; | |
225 | struct args { | |
226 | caddr_t addr; | |
227 | int len; | |
228 | } *uap; | |
229 | int *retval; | |
230 | { | |
231 | vm_offset_t addr, objoff, oaddr; | |
232 | vm_size_t size, osize; | |
233 | vm_prot_t prot, mprot; | |
234 | vm_inherit_t inherit; | |
235 | vm_object_t object; | |
236 | boolean_t shared; | |
237 | int rv; | |
238 | ||
239 | #ifdef DEBUG | |
240 | if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) | |
241 | printf("msync(%d): addr %x len %x\n", | |
242 | p->p_pid, uap->addr, uap->len); | |
243 | #endif | |
244 | if (((int)uap->addr & page_mask) || uap->len < 0) | |
245 | return(EINVAL); | |
246 | addr = oaddr = (vm_offset_t)uap->addr; | |
247 | osize = (vm_size_t)uap->len; | |
248 | /* | |
249 | * Region must be entirely contained in a single entry | |
250 | */ | |
251 | if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, | |
252 | TRUE)) | |
253 | return(EINVAL); | |
254 | /* | |
255 | * Determine the object associated with that entry | |
256 | * (object is returned locked on KERN_SUCCESS) | |
257 | */ | |
258 | rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, | |
259 | &inherit, &shared, &object, &objoff); | |
260 | if (rv != KERN_SUCCESS) | |
261 | return(EINVAL); | |
262 | #ifdef DEBUG | |
263 | if (mmapdebug & MDB_SYNC) | |
264 | printf("msync: region: object %x addr %x size %d objoff %d\n", | |
265 | object, addr, size, objoff); | |
266 | #endif | |
267 | /* | |
268 | * Do not msync non-vnoded backed objects. | |
269 | */ | |
270 | if (object->internal || object->pager == NULL || | |
271 | object->pager->pg_type != PG_VNODE) { | |
272 | vm_object_unlock(object); | |
273 | return(EINVAL); | |
274 | } | |
275 | objoff += oaddr - addr; | |
276 | if (osize == 0) | |
277 | osize = size; | |
278 | #ifdef DEBUG | |
279 | if (mmapdebug & MDB_SYNC) | |
280 | printf("msync: cleaning/flushing object range [%x-%x)\n", | |
281 | objoff, objoff+osize); | |
282 | #endif | |
283 | if (prot & VM_PROT_WRITE) | |
284 | vm_object_page_clean(object, objoff, objoff+osize); | |
285 | /* | |
286 | * (XXX) | |
287 | * Bummer, gotta flush all cached pages to ensure | |
288 | * consistency with the file system cache. | |
289 | */ | |
290 | vm_object_page_remove(object, objoff, objoff+osize); | |
291 | vm_object_unlock(object); | |
292 | return(0); | |
293 | } | |
294 | ||
295 | munmap(p, uap, retval) | |
296 | register struct proc *p; | |
297 | register struct args { | |
298 | caddr_t addr; | |
299 | int len; | |
300 | } *uap; | |
301 | int *retval; | |
302 | { | |
303 | vm_offset_t addr; | |
304 | vm_size_t size; | |
305 | ||
306 | #ifdef DEBUG | |
307 | if (mmapdebug & MDB_FOLLOW) | |
308 | printf("munmap(%d): addr %x len %x\n", | |
309 | p->p_pid, uap->addr, uap->len); | |
310 | #endif | |
311 | ||
312 | addr = (vm_offset_t) uap->addr; | |
313 | if ((addr & page_mask) || uap->len < 0) | |
314 | return(EINVAL); | |
315 | size = (vm_size_t) round_page(uap->len); | |
316 | if (size == 0) | |
317 | return(0); | |
318 | if (addr + size >= VM_MAXUSER_ADDRESS) | |
319 | return(EINVAL); | |
320 | if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size, | |
321 | FALSE)) | |
322 | return(EINVAL); | |
323 | /* returns nothing but KERN_SUCCESS anyway */ | |
324 | (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); | |
325 | return(0); | |
326 | } | |
327 | ||
328 | munmapfd(p, fd) | |
329 | register struct proc *p; | |
330 | { | |
331 | #ifdef DEBUG | |
332 | if (mmapdebug & MDB_FOLLOW) | |
333 | printf("munmapfd(%d): fd %d\n", p->p_pid, fd); | |
334 | #endif | |
335 | ||
336 | /* | |
337 | * XXX -- should vm_deallocate any regions mapped to this file | |
338 | */ | |
339 | p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; | |
340 | } | |
341 | ||
342 | mprotect(p, uap, retval) | |
343 | struct proc *p; | |
344 | struct args { | |
345 | caddr_t addr; | |
346 | int len; | |
347 | int prot; | |
348 | } *uap; | |
349 | int *retval; | |
350 | { | |
351 | vm_offset_t addr; | |
352 | vm_size_t size; | |
353 | register vm_prot_t prot; | |
354 | ||
355 | #ifdef DEBUG | |
356 | if (mmapdebug & MDB_FOLLOW) | |
357 | printf("mprotect(%d): addr %x len %x prot %d\n", | |
358 | p->p_pid, uap->addr, uap->len, uap->prot); | |
359 | #endif | |
360 | ||
361 | addr = (vm_offset_t) uap->addr; | |
362 | if ((addr & page_mask) || uap->len < 0) | |
363 | return(EINVAL); | |
364 | size = (vm_size_t) uap->len; | |
365 | /* | |
366 | * Map protections | |
367 | */ | |
368 | prot = VM_PROT_NONE; | |
369 | if (uap->prot & PROT_READ) | |
370 | prot |= VM_PROT_READ; | |
371 | if (uap->prot & PROT_WRITE) | |
372 | prot |= VM_PROT_WRITE; | |
373 | if (uap->prot & PROT_EXEC) | |
374 | prot |= VM_PROT_EXECUTE; | |
375 | ||
376 | switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, | |
377 | FALSE)) { | |
378 | case KERN_SUCCESS: | |
379 | return (0); | |
380 | case KERN_PROTECTION_FAILURE: | |
381 | return (EACCES); | |
382 | } | |
383 | return (EINVAL); | |
384 | } | |
385 | ||
386 | /* ARGSUSED */ | |
387 | madvise(p, uap, retval) | |
388 | struct proc *p; | |
389 | struct args { | |
390 | caddr_t addr; | |
391 | int len; | |
392 | int behav; | |
393 | } *uap; | |
394 | int *retval; | |
395 | { | |
396 | ||
397 | /* Not yet implemented */ | |
398 | return (EOPNOTSUPP); | |
399 | } | |
400 | ||
401 | /* ARGSUSED */ | |
402 | mincore(p, uap, retval) | |
403 | struct proc *p; | |
404 | struct args { | |
405 | caddr_t addr; | |
406 | int len; | |
407 | char *vec; | |
408 | } *uap; | |
409 | int *retval; | |
410 | { | |
411 | ||
412 | /* Not yet implemented */ | |
413 | return (EOPNOTSUPP); | |
414 | } | |
415 | ||
416 | /* | |
417 | * Internal version of mmap. | |
418 | * Currently used by mmap, exec, and sys5 shared memory. | |
419 | * Handle is: | |
420 | * MAP_FILE: a vnode pointer | |
421 | * MAP_ANON: NULL or a file pointer | |
422 | */ | |
423 | vm_mmap(map, addr, size, prot, flags, handle, foff) | |
424 | register vm_map_t map; | |
425 | register vm_offset_t *addr; | |
426 | register vm_size_t size; | |
427 | vm_prot_t prot; | |
428 | register int flags; | |
429 | caddr_t handle; /* XXX should be vp */ | |
430 | vm_offset_t foff; | |
431 | { | |
432 | register vm_pager_t pager; | |
433 | boolean_t fitit; | |
434 | vm_object_t object; | |
435 | struct vnode *vp; | |
436 | int type; | |
437 | int rv = KERN_SUCCESS; | |
438 | ||
439 | if (size == 0) | |
440 | return (0); | |
441 | ||
442 | if ((flags & MAP_FIXED) == 0) { | |
443 | fitit = TRUE; | |
444 | *addr = round_page(*addr); | |
445 | } else { | |
446 | fitit = FALSE; | |
447 | (void) vm_deallocate(map, *addr, size); | |
448 | } | |
449 | ||
450 | /* | |
451 | * Lookup/allocate pager. All except an unnamed anonymous lookup | |
452 | * gain a reference to ensure continued existance of the object. | |
453 | * (XXX the exception is to appease the pageout daemon) | |
454 | */ | |
455 | if ((flags & MAP_TYPE) == MAP_ANON) | |
456 | type = PG_DFLT; | |
457 | else { | |
458 | vp = (struct vnode *)handle; | |
459 | if (vp->v_type == VCHR) { | |
460 | type = PG_DEVICE; | |
461 | handle = (caddr_t)vp->v_rdev; | |
462 | } else | |
463 | type = PG_VNODE; | |
464 | } | |
465 | pager = vm_pager_allocate(type, handle, size, prot); | |
466 | if (pager == NULL) | |
467 | return (type == PG_DEVICE ? EINVAL : ENOMEM); | |
468 | /* | |
469 | * Find object and release extra reference gained by lookup | |
470 | */ | |
471 | object = vm_object_lookup(pager); | |
472 | vm_object_deallocate(object); | |
473 | ||
474 | /* | |
475 | * Anonymous memory. | |
476 | */ | |
477 | if ((flags & MAP_TYPE) == MAP_ANON) { | |
478 | rv = vm_allocate_with_pager(map, addr, size, fitit, | |
479 | pager, (vm_offset_t)foff, TRUE); | |
480 | if (rv != KERN_SUCCESS) { | |
481 | if (handle == NULL) | |
482 | vm_pager_deallocate(pager); | |
483 | else | |
484 | vm_object_deallocate(object); | |
485 | goto out; | |
486 | } | |
487 | /* | |
488 | * The object of unnamed anonymous regions was just created | |
489 | * find it for pager_cache. | |
490 | */ | |
491 | if (handle == NULL) | |
492 | object = vm_object_lookup(pager); | |
493 | ||
494 | /* | |
495 | * Don't cache anonymous objects. | |
496 | * Loses the reference gained by vm_pager_allocate. | |
497 | */ | |
498 | (void) pager_cache(object, FALSE); | |
499 | #ifdef DEBUG | |
500 | if (mmapdebug & MDB_MAPIT) | |
501 | printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", | |
502 | curproc->p_pid, *addr, size, pager); | |
503 | #endif | |
504 | } | |
505 | /* | |
506 | * Must be type MAP_FILE. | |
507 | * Distinguish between character special and regular files. | |
508 | */ | |
509 | else if (vp->v_type == VCHR) { | |
510 | rv = vm_allocate_with_pager(map, addr, size, fitit, | |
511 | pager, (vm_offset_t)foff, FALSE); | |
512 | /* | |
513 | * Uncache the object and lose the reference gained | |
514 | * by vm_pager_allocate(). If the call to | |
515 | * vm_allocate_with_pager() was sucessful, then we | |
516 | * gained an additional reference ensuring the object | |
517 | * will continue to exist. If the call failed then | |
518 | * the deallocate call below will terminate the | |
519 | * object which is fine. | |
520 | */ | |
521 | (void) pager_cache(object, FALSE); | |
522 | if (rv != KERN_SUCCESS) | |
523 | goto out; | |
524 | } | |
525 | /* | |
526 | * A regular file | |
527 | */ | |
528 | else { | |
529 | #ifdef DEBUG | |
530 | if (object == NULL) | |
531 | printf("vm_mmap: no object: vp %x, pager %x\n", | |
532 | vp, pager); | |
533 | #endif | |
534 | /* | |
535 | * Map it directly. | |
536 | * Allows modifications to go out to the vnode. | |
537 | */ | |
538 | if (flags & MAP_SHARED) { | |
539 | rv = vm_allocate_with_pager(map, addr, size, | |
540 | fitit, pager, | |
541 | (vm_offset_t)foff, FALSE); | |
542 | if (rv != KERN_SUCCESS) { | |
543 | vm_object_deallocate(object); | |
544 | goto out; | |
545 | } | |
546 | /* | |
547 | * Don't cache the object. This is the easiest way | |
548 | * of ensuring that data gets back to the filesystem | |
549 | * because vnode_pager_deallocate() will fsync the | |
550 | * vnode. pager_cache() will lose the extra ref. | |
551 | */ | |
552 | if (prot & VM_PROT_WRITE) | |
553 | pager_cache(object, FALSE); | |
554 | else | |
555 | vm_object_deallocate(object); | |
556 | } | |
557 | /* | |
558 | * Copy-on-write of file. Two flavors. | |
559 | * MAP_COPY is true COW, you essentially get a snapshot of | |
560 | * the region at the time of mapping. MAP_PRIVATE means only | |
561 | * that your changes are not reflected back to the object. | |
562 | * Changes made by others will be seen. | |
563 | */ | |
564 | else { | |
565 | vm_map_t tmap; | |
566 | vm_offset_t off; | |
567 | ||
568 | /* locate and allocate the target address space */ | |
569 | rv = vm_map_find(map, NULL, (vm_offset_t)0, | |
570 | addr, size, fitit); | |
571 | if (rv != KERN_SUCCESS) { | |
572 | vm_object_deallocate(object); | |
573 | goto out; | |
574 | } | |
575 | tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, | |
576 | VM_MIN_ADDRESS+size, TRUE); | |
577 | off = VM_MIN_ADDRESS; | |
578 | rv = vm_allocate_with_pager(tmap, &off, size, | |
579 | TRUE, pager, | |
580 | (vm_offset_t)foff, FALSE); | |
581 | if (rv != KERN_SUCCESS) { | |
582 | vm_object_deallocate(object); | |
583 | vm_map_deallocate(tmap); | |
584 | goto out; | |
585 | } | |
586 | /* | |
587 | * (XXX) | |
588 | * MAP_PRIVATE implies that we see changes made by | |
589 | * others. To ensure that we need to guarentee that | |
590 | * no copy object is created (otherwise original | |
591 | * pages would be pushed to the copy object and we | |
592 | * would never see changes made by others). We | |
593 | * totally sleeze it right now by marking the object | |
594 | * internal temporarily. | |
595 | */ | |
596 | if ((flags & MAP_COPY) == 0) | |
597 | object->internal = TRUE; | |
598 | rv = vm_map_copy(map, tmap, *addr, size, off, | |
599 | FALSE, FALSE); | |
600 | object->internal = FALSE; | |
601 | /* | |
602 | * (XXX) | |
603 | * My oh my, this only gets worse... | |
604 | * Force creation of a shadow object so that | |
605 | * vm_map_fork will do the right thing. | |
606 | */ | |
607 | if ((flags & MAP_COPY) == 0) { | |
608 | vm_map_t tmap; | |
609 | vm_map_entry_t tentry; | |
610 | vm_object_t tobject; | |
611 | vm_offset_t toffset; | |
612 | vm_prot_t tprot; | |
613 | boolean_t twired, tsu; | |
614 | ||
615 | tmap = map; | |
616 | vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, | |
617 | &tentry, &tobject, &toffset, | |
618 | &tprot, &twired, &tsu); | |
619 | vm_map_lookup_done(tmap, tentry); | |
620 | } | |
621 | /* | |
622 | * (XXX) | |
623 | * Map copy code cannot detect sharing unless a | |
624 | * sharing map is involved. So we cheat and write | |
625 | * protect everything ourselves. | |
626 | */ | |
627 | vm_object_pmap_copy(object, (vm_offset_t)foff, | |
628 | (vm_offset_t)foff+size); | |
629 | vm_object_deallocate(object); | |
630 | vm_map_deallocate(tmap); | |
631 | if (rv != KERN_SUCCESS) | |
632 | goto out; | |
633 | } | |
634 | #ifdef DEBUG | |
635 | if (mmapdebug & MDB_MAPIT) | |
636 | printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", | |
637 | curproc->p_pid, *addr, size, pager); | |
638 | #endif | |
639 | } | |
640 | /* | |
641 | * Correct protection (default is VM_PROT_ALL). | |
642 | * Note that we set the maximum protection. This may not be | |
643 | * entirely correct. Maybe the maximum protection should be based | |
644 | * on the object permissions where it makes sense (e.g. a vnode). | |
645 | * | |
646 | * Changed my mind: leave max prot at VM_PROT_ALL. | |
647 | */ | |
648 | if (prot != VM_PROT_ALL) { | |
649 | rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); | |
650 | if (rv != KERN_SUCCESS) { | |
651 | (void) vm_deallocate(map, *addr, size); | |
652 | goto out; | |
653 | } | |
654 | } | |
655 | /* | |
656 | * Shared memory is also shared with children. | |
657 | */ | |
658 | if (flags & MAP_SHARED) { | |
659 | rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); | |
660 | if (rv != KERN_SUCCESS) { | |
661 | (void) vm_deallocate(map, *addr, size); | |
662 | goto out; | |
663 | } | |
664 | } | |
665 | out: | |
666 | #ifdef DEBUG | |
667 | if (mmapdebug & MDB_MAPIT) | |
668 | printf("vm_mmap: rv %d\n", rv); | |
669 | #endif | |
670 | switch (rv) { | |
671 | case KERN_SUCCESS: | |
672 | return (0); | |
673 | case KERN_INVALID_ADDRESS: | |
674 | case KERN_NO_SPACE: | |
675 | return (ENOMEM); | |
676 | case KERN_PROTECTION_FAILURE: | |
677 | return (EACCES); | |
678 | default: | |
679 | return (EINVAL); | |
680 | } | |
681 | } | |
682 | ||
683 | /* | |
684 | * Internal bastardized version of MACHs vm_region system call. | |
685 | * Given address and size it returns map attributes as well | |
686 | * as the (locked) object mapped at that location. | |
687 | */ | |
688 | vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) | |
689 | vm_map_t map; | |
690 | vm_offset_t *addr; /* IN/OUT */ | |
691 | vm_size_t *size; /* OUT */ | |
692 | vm_prot_t *prot; /* OUT */ | |
693 | vm_prot_t *max_prot; /* OUT */ | |
694 | vm_inherit_t *inheritance; /* OUT */ | |
695 | boolean_t *shared; /* OUT */ | |
696 | vm_object_t *object; /* OUT */ | |
697 | vm_offset_t *objoff; /* OUT */ | |
698 | { | |
699 | vm_map_entry_t tmp_entry; | |
700 | register | |
701 | vm_map_entry_t entry; | |
702 | register | |
703 | vm_offset_t tmp_offset; | |
704 | vm_offset_t start; | |
705 | ||
706 | if (map == NULL) | |
707 | return(KERN_INVALID_ARGUMENT); | |
708 | ||
709 | start = *addr; | |
710 | ||
711 | vm_map_lock_read(map); | |
712 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
713 | if ((entry = tmp_entry->next) == &map->header) { | |
714 | vm_map_unlock_read(map); | |
715 | return(KERN_NO_SPACE); | |
716 | } | |
717 | start = entry->start; | |
718 | *addr = start; | |
719 | } else | |
720 | entry = tmp_entry; | |
721 | ||
722 | *prot = entry->protection; | |
723 | *max_prot = entry->max_protection; | |
724 | *inheritance = entry->inheritance; | |
725 | ||
726 | tmp_offset = entry->offset + (start - entry->start); | |
727 | *size = (entry->end - start); | |
728 | ||
729 | if (entry->is_a_map) { | |
730 | register vm_map_t share_map; | |
731 | vm_size_t share_size; | |
732 | ||
733 | share_map = entry->object.share_map; | |
734 | ||
735 | vm_map_lock_read(share_map); | |
736 | (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); | |
737 | ||
738 | if ((share_size = (tmp_entry->end - tmp_offset)) < *size) | |
739 | *size = share_size; | |
740 | ||
741 | vm_object_lock(tmp_entry->object); | |
742 | *object = tmp_entry->object.vm_object; | |
743 | *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); | |
744 | ||
745 | *shared = (share_map->ref_count != 1); | |
746 | vm_map_unlock_read(share_map); | |
747 | } else { | |
748 | vm_object_lock(entry->object); | |
749 | *object = entry->object.vm_object; | |
750 | *objoff = tmp_offset; | |
751 | ||
752 | *shared = FALSE; | |
753 | } | |
754 | ||
755 | vm_map_unlock_read(map); | |
756 | ||
757 | return(KERN_SUCCESS); | |
758 | } | |
759 | ||
760 | /* | |
761 | * Yet another bastard routine. | |
762 | */ | |
763 | vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) | |
764 | register vm_map_t map; | |
765 | register vm_offset_t *addr; | |
766 | register vm_size_t size; | |
767 | boolean_t fitit; | |
768 | vm_pager_t pager; | |
769 | vm_offset_t poffset; | |
770 | boolean_t internal; | |
771 | { | |
772 | register vm_object_t object; | |
773 | register int result; | |
774 | ||
775 | if (map == NULL) | |
776 | return(KERN_INVALID_ARGUMENT); | |
777 | ||
778 | *addr = trunc_page(*addr); | |
779 | size = round_page(size); | |
780 | ||
781 | /* | |
782 | * Lookup the pager/paging-space in the object cache. | |
783 | * If it's not there, then create a new object and cache | |
784 | * it. | |
785 | */ | |
786 | object = vm_object_lookup(pager); | |
787 | vm_stat.lookups++; | |
788 | if (object == NULL) { | |
789 | object = vm_object_allocate(size); | |
790 | vm_object_enter(object, pager); | |
791 | } else | |
792 | vm_stat.hits++; | |
793 | object->internal = internal; | |
794 | ||
795 | result = vm_map_find(map, object, poffset, addr, size, fitit); | |
796 | if (result != KERN_SUCCESS) | |
797 | vm_object_deallocate(object); | |
798 | else if (pager != NULL) | |
799 | vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); | |
800 | return(result); | |
801 | } | |
802 | ||
803 | /* | |
804 | * XXX: this routine belongs in vm_map.c. | |
805 | * | |
806 | * Returns TRUE if the range [start - end) is allocated in either | |
807 | * a single entry (single_entry == TRUE) or multiple contiguous | |
808 | * entries (single_entry == FALSE). | |
809 | * | |
810 | * start and end should be page aligned. | |
811 | */ | |
812 | boolean_t | |
813 | vm_map_is_allocated(map, start, end, single_entry) | |
814 | vm_map_t map; | |
815 | vm_offset_t start, end; | |
816 | boolean_t single_entry; | |
817 | { | |
818 | vm_map_entry_t mapent; | |
819 | register vm_offset_t nend; | |
820 | ||
821 | vm_map_lock_read(map); | |
822 | ||
823 | /* | |
824 | * Start address not in any entry | |
825 | */ | |
826 | if (!vm_map_lookup_entry(map, start, &mapent)) { | |
827 | vm_map_unlock_read(map); | |
828 | return (FALSE); | |
829 | } | |
830 | /* | |
831 | * Find the maximum stretch of contiguously allocated space | |
832 | */ | |
833 | nend = mapent->end; | |
834 | if (!single_entry) { | |
835 | mapent = mapent->next; | |
836 | while (mapent != &map->header && mapent->start == nend) { | |
837 | nend = mapent->end; | |
838 | mapent = mapent->next; | |
839 | } | |
840 | } | |
841 | ||
842 | vm_map_unlock_read(map); | |
843 | return (end <= nend); | |
844 | } |