vn_open now returns locked, so must unlock when done
[unix-history] / usr / src / sys / vm / vm_mmap.c
CommitLineData
619edcce
KM
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * %sccs.include.redist.c%
11 *
12 * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$
13 *
64691901 14 * @(#)vm_mmap.c 7.4 (Berkeley) %G%
619edcce
KM
15 */
16
17/*
18 * Mapped file (mmap) interface to VM
19 */
20
21#include "param.h"
22#include "systm.h"
cc3c05c4 23#include "filedesc.h"
619edcce
KM
24#include "proc.h"
25#include "vnode.h"
26#include "specdev.h"
27#include "file.h"
28#include "mman.h"
29#include "conf.h"
30
451a445a
MK
31#include "vm.h"
32#include "vm_pager.h"
33#include "vm_prot.h"
34#include "vm_statistics.h"
619edcce
KM
35
36#ifdef DEBUG
37int mmapdebug = 0;
38#define MDB_FOLLOW 0x01
39#define MDB_SYNC 0x02
40#define MDB_MAPIT 0x04
41#endif
42
43/* ARGSUSED */
44getpagesize(p, uap, retval)
45 struct proc *p;
451a445a 46 void *uap;
619edcce
KM
47 int *retval;
48{
49
50 *retval = NBPG * CLSIZE;
51 return (0);
52}
53
54/* ARGSUSED */
55sbrk(p, uap, retval)
56 struct proc *p;
57 struct args {
58 int incr;
59 } *uap;
60 int *retval;
61{
62
63 /* Not yet implemented */
64 return (EOPNOTSUPP);
65}
66
67/* ARGSUSED */
68sstk(p, uap, retval)
69 struct proc *p;
70 struct args {
71 int incr;
72 } *uap;
73 int *retval;
74{
75
76 /* Not yet implemented */
77 return (EOPNOTSUPP);
78}
79
80smmap(p, uap, retval)
cc3c05c4 81 struct proc *p;
619edcce
KM
82 register struct args {
83 caddr_t addr;
84 int len;
85 int prot;
86 int flags;
87 int fd;
88 off_t pos;
89 } *uap;
90 int *retval;
91{
cc3c05c4
KM
92 register struct filedesc *fdp = p->p_fd;
93 register struct file *fp;
619edcce
KM
94 struct vnode *vp;
95 vm_offset_t addr;
96 vm_size_t size;
97 vm_prot_t prot;
98 caddr_t handle;
99 int mtype, error;
100
101#ifdef DEBUG
102 if (mmapdebug & MDB_FOLLOW)
103 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
104 p->p_pid, uap->addr, uap->len, uap->prot,
105 uap->flags, uap->fd, uap->pos);
106#endif
107 /*
108 * Make sure one of the sharing types is specified
109 */
110 mtype = uap->flags & MAP_TYPE;
111 switch (mtype) {
112 case MAP_FILE:
113 case MAP_ANON:
114 break;
115 default:
116 return(EINVAL);
117 }
118 /*
119 * Address (if FIXED) and size must be page aligned
120 */
121 size = (vm_size_t)uap->len;
122 addr = (vm_offset_t)uap->addr;
123 if ((size & page_mask) ||
124 (uap->flags & MAP_FIXED) && (addr & page_mask))
125 return(EINVAL);
126 /*
127 * Mapping file or named anonymous, get fp for validation
128 */
129 if (mtype == MAP_FILE || uap->fd != -1) {
451a445a
MK
130 if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
131 (fp = fdp->fd_ofiles[uap->fd]) == NULL)
619edcce
KM
132 return(EBADF);
133 }
134 /*
135 * If we are mapping a file we need to check various
136 * file/vnode related things.
137 */
138 if (mtype == MAP_FILE) {
139 /*
140 * Obtain vnode and make sure it is of appropriate type
141 */
142 if (fp->f_type != DTYPE_VNODE)
143 return(EINVAL);
144 vp = (struct vnode *)fp->f_data;
145 if (vp->v_type != VREG && vp->v_type != VCHR)
146 return(EINVAL);
147 /*
148 * Ensure that file protection and desired protection
149 * are compatible. Note that we only worry about writability
150 * if mapping is shared.
151 */
152 if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 ||
153 ((uap->flags & MAP_SHARED) &&
154 (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0))
155 return(EACCES);
156 handle = (caddr_t)vp;
157 } else if (uap->fd != -1)
158 handle = (caddr_t)fp;
159 else
160 handle = NULL;
161 /*
162 * Map protections to MACH style
163 */
164 prot = VM_PROT_NONE;
165 if (uap->prot & PROT_READ)
166 prot |= VM_PROT_READ;
167 if (uap->prot & PROT_WRITE)
168 prot |= VM_PROT_WRITE;
169 if (uap->prot & PROT_EXEC)
170 prot |= VM_PROT_EXECUTE;
171
451a445a 172 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot,
619edcce
KM
173 uap->flags, handle, (vm_offset_t)uap->pos);
174 if (error == 0)
175 *retval = (int) addr;
176 return(error);
177}
178
179msync(p, uap, retval)
180 struct proc *p;
181 struct args {
182 char *addr;
183 int len;
184 } *uap;
185 int *retval;
186{
187 vm_offset_t addr, objoff, oaddr;
188 vm_size_t size, osize;
189 vm_prot_t prot, mprot;
190 vm_inherit_t inherit;
191 vm_object_t object;
192 boolean_t shared;
193 int rv;
194
195#ifdef DEBUG
196 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
197 printf("msync(%d): addr %x len %x\n",
198 p->p_pid, uap->addr, uap->len);
199#endif
200 if (((int)uap->addr & page_mask) || (uap->len & page_mask))
201 return(EINVAL);
202 addr = oaddr = (vm_offset_t)uap->addr;
203 osize = (vm_size_t)uap->len;
204 /*
205 * Region must be entirely contained in a single entry
206 */
451a445a
MK
207 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize,
208 TRUE))
619edcce
KM
209 return(EINVAL);
210 /*
211 * Determine the object associated with that entry
212 * (object is returned locked on KERN_SUCCESS)
213 */
451a445a 214 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot,
619edcce
KM
215 &inherit, &shared, &object, &objoff);
216 if (rv != KERN_SUCCESS)
217 return(EINVAL);
218#ifdef DEBUG
219 if (mmapdebug & MDB_SYNC)
220 printf("msync: region: object %x addr %x size %d objoff %d\n",
221 object, addr, size, objoff);
222#endif
223 /*
224 * Do not msync non-vnoded backed objects.
225 */
451a445a 226 if (object->internal || object->pager == NULL ||
619edcce
KM
227 object->pager->pg_type != PG_VNODE) {
228 vm_object_unlock(object);
229 return(EINVAL);
230 }
231 objoff += oaddr - addr;
232 if (osize == 0)
233 osize = size;
234#ifdef DEBUG
235 if (mmapdebug & MDB_SYNC)
236 printf("msync: cleaning/flushing object range [%x-%x)\n",
237 objoff, objoff+osize);
238#endif
239 if (prot & VM_PROT_WRITE)
240 vm_object_page_clean(object, objoff, objoff+osize);
241 /*
242 * (XXX)
243 * Bummer, gotta flush all cached pages to ensure
244 * consistency with the file system cache.
245 */
246 vm_object_page_remove(object, objoff, objoff+osize);
247 vm_object_unlock(object);
248 return(0);
249}
250
251munmap(p, uap, retval)
252 register struct proc *p;
253 register struct args {
254 caddr_t addr;
255 int len;
256 } *uap;
257 int *retval;
258{
259 vm_offset_t addr;
260 vm_size_t size;
261
262#ifdef DEBUG
263 if (mmapdebug & MDB_FOLLOW)
264 printf("munmap(%d): addr %x len %x\n",
265 p->p_pid, uap->addr, uap->len);
266#endif
267
268 addr = (vm_offset_t) uap->addr;
269 size = (vm_size_t) uap->len;
270 if ((addr & page_mask) || (size & page_mask))
271 return(EINVAL);
272 if (size == 0)
273 return(0);
451a445a
MK
274 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size,
275 FALSE))
619edcce
KM
276 return(EINVAL);
277 /* returns nothing but KERN_SUCCESS anyway */
451a445a 278 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size);
619edcce
KM
279 return(0);
280}
281
282munmapfd(fd)
283{
284#ifdef DEBUG
285 if (mmapdebug & MDB_FOLLOW)
451a445a 286 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd);
619edcce
KM
287#endif
288
289 /*
290 * XXX -- should vm_deallocate any regions mapped to this file
291 */
451a445a 292 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
619edcce
KM
293}
294
295mprotect(p, uap, retval)
296 struct proc *p;
297 struct args {
298 char *addr;
299 int len;
300 int prot;
301 } *uap;
302 int *retval;
303{
304 vm_offset_t addr;
305 vm_size_t size;
306 register vm_prot_t prot;
307
308#ifdef DEBUG
309 if (mmapdebug & MDB_FOLLOW)
310 printf("mprotect(%d): addr %x len %x prot %d\n",
311 p->p_pid, uap->addr, uap->len, uap->prot);
312#endif
313
314 addr = (vm_offset_t) uap->addr;
315 size = (vm_size_t) uap->len;
316 if ((addr & page_mask) || (size & page_mask))
317 return(EINVAL);
318 /*
319 * Map protections
320 */
321 prot = VM_PROT_NONE;
322 if (uap->prot & PROT_READ)
323 prot |= VM_PROT_READ;
324 if (uap->prot & PROT_WRITE)
325 prot |= VM_PROT_WRITE;
326 if (uap->prot & PROT_EXEC)
327 prot |= VM_PROT_EXECUTE;
328
451a445a
MK
329 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
330 FALSE)) {
619edcce
KM
331 case KERN_SUCCESS:
332 return (0);
333 case KERN_PROTECTION_FAILURE:
334 return (EACCES);
335 }
336 return (EINVAL);
337}
338
339/* ARGSUSED */
340madvise(p, uap, retval)
341 struct proc *p;
342 struct args {
343 char *addr;
344 int len;
345 int behav;
346 } *uap;
347 int *retval;
348{
349
350 /* Not yet implemented */
351 return (EOPNOTSUPP);
352}
353
354/* ARGSUSED */
355mincore(p, uap, retval)
356 struct proc *p;
357 struct args {
358 char *addr;
359 int len;
360 char *vec;
361 } *uap;
362 int *retval;
363{
364
365 /* Not yet implemented */
366 return (EOPNOTSUPP);
367}
368
369/*
370 * Internal version of mmap.
371 * Currently used by mmap, exec, and sys5 shared memory.
372 * Handle is:
373 * MAP_FILE: a vnode pointer
374 * MAP_ANON: NULL or a file pointer
375 */
376vm_mmap(map, addr, size, prot, flags, handle, foff)
377 register vm_map_t map;
378 register vm_offset_t *addr;
379 register vm_size_t size;
380 vm_prot_t prot;
381 register int flags;
382 caddr_t handle; /* XXX should be vp */
383 vm_offset_t foff;
384{
385 register vm_pager_t pager;
386 boolean_t fitit;
387 vm_object_t object;
388 struct vnode *vp;
389 int type;
390 int rv = KERN_SUCCESS;
391
392 if (size == 0)
393 return (0);
394
395 if ((flags & MAP_FIXED) == 0) {
396 fitit = TRUE;
397 *addr = round_page(*addr);
398 } else {
399 fitit = FALSE;
400 (void) vm_deallocate(map, *addr, size);
401 }
402
403 /*
404 * Lookup/allocate pager. All except an unnamed anonymous lookup
405 * gain a reference to ensure continued existance of the object.
406 * (XXX the exception is to appease the pageout daemon)
407 */
408 if ((flags & MAP_TYPE) == MAP_ANON)
409 type = PG_DFLT;
410 else {
411 vp = (struct vnode *)handle;
412 if (vp->v_type == VCHR) {
413 type = PG_DEVICE;
414 handle = (caddr_t)vp->v_rdev;
415 } else
416 type = PG_VNODE;
417 }
418 pager = vm_pager_allocate(type, handle, size, prot);
451a445a 419 if (pager == NULL)
619edcce
KM
420 return (type == PG_DEVICE ? EINVAL : ENOMEM);
421 /*
422 * Find object and release extra reference gained by lookup
423 */
424 object = vm_object_lookup(pager);
425 vm_object_deallocate(object);
426
427 /*
428 * Anonymous memory.
429 */
430 if ((flags & MAP_TYPE) == MAP_ANON) {
431 rv = vm_allocate_with_pager(map, addr, size, fitit,
432 pager, (vm_offset_t)foff, TRUE);
433 if (rv != KERN_SUCCESS) {
434 if (handle == NULL)
435 vm_pager_deallocate(pager);
436 else
437 vm_object_deallocate(object);
438 goto out;
439 }
440 /*
441 * Don't cache anonymous objects.
442 * Loses the reference gained by vm_pager_allocate.
443 */
444 (void) pager_cache(object, FALSE);
445#ifdef DEBUG
446 if (mmapdebug & MDB_MAPIT)
447 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
451a445a 448 curproc->p_pid, *addr, size, pager);
619edcce
KM
449#endif
450 }
451 /*
452 * Must be type MAP_FILE.
453 * Distinguish between character special and regular files.
454 */
455 else if (vp->v_type == VCHR) {
456 rv = vm_allocate_with_pager(map, addr, size, fitit,
457 pager, (vm_offset_t)foff, FALSE);
458 /*
459 * Uncache the object and lose the reference gained
460 * by vm_pager_allocate(). If the call to
461 * vm_allocate_with_pager() was sucessful, then we
462 * gained an additional reference ensuring the object
463 * will continue to exist. If the call failed then
464 * the deallocate call below will terminate the
465 * object which is fine.
466 */
467 (void) pager_cache(object, FALSE);
468 if (rv != KERN_SUCCESS)
469 goto out;
470 }
471 /*
472 * A regular file
473 */
474 else {
475#ifdef DEBUG
451a445a 476 if (object == NULL)
619edcce
KM
477 printf("vm_mmap: no object: vp %x, pager %x\n",
478 vp, pager);
479#endif
480 /*
481 * Map it directly.
482 * Allows modifications to go out to the vnode.
483 */
484 if (flags & MAP_SHARED) {
485 rv = vm_allocate_with_pager(map, addr, size,
486 fitit, pager,
487 (vm_offset_t)foff, FALSE);
488 if (rv != KERN_SUCCESS) {
489 vm_object_deallocate(object);
490 goto out;
491 }
492 /*
493 * Don't cache the object. This is the easiest way
494 * of ensuring that data gets back to the filesystem
495 * because vnode_pager_deallocate() will fsync the
496 * vnode. pager_cache() will lose the extra ref.
497 */
498 if (prot & VM_PROT_WRITE)
499 pager_cache(object, FALSE);
500 else
501 vm_object_deallocate(object);
502 }
503 /*
504 * Copy-on-write of file. Two flavors.
505 * MAP_COPY is true COW, you essentially get a snapshot of
506 * the region at the time of mapping. MAP_PRIVATE means only
507 * that your changes are not reflected back to the object.
508 * Changes made by others will be seen.
509 */
510 else {
511 vm_map_t tmap;
512 vm_offset_t off;
513
514 /* locate and allocate the target address space */
451a445a 515 rv = vm_map_find(map, NULL, (vm_offset_t)0,
619edcce
KM
516 addr, size, fitit);
517 if (rv != KERN_SUCCESS) {
518 vm_object_deallocate(object);
519 goto out;
520 }
521 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
522 VM_MIN_ADDRESS+size, TRUE);
523 off = VM_MIN_ADDRESS;
524 rv = vm_allocate_with_pager(tmap, &off, size,
525 TRUE, pager,
526 (vm_offset_t)foff, FALSE);
527 if (rv != KERN_SUCCESS) {
528 vm_object_deallocate(object);
529 vm_map_deallocate(tmap);
530 goto out;
531 }
532 /*
533 * (XXX)
534 * MAP_PRIVATE implies that we see changes made by
535 * others. To ensure that we need to guarentee that
536 * no copy object is created (otherwise original
537 * pages would be pushed to the copy object and we
538 * would never see changes made by others). We
539 * totally sleeze it right now by marking the object
540 * internal temporarily.
541 */
542 if ((flags & MAP_COPY) == 0)
543 object->internal = TRUE;
544 rv = vm_map_copy(map, tmap, *addr, size, off,
545 FALSE, FALSE);
546 object->internal = FALSE;
547 /*
548 * (XXX)
549 * My oh my, this only gets worse...
550 * Force creation of a shadow object so that
551 * vm_map_fork will do the right thing.
552 */
553 if ((flags & MAP_COPY) == 0) {
554 vm_map_t tmap;
555 vm_map_entry_t tentry;
556 vm_object_t tobject;
557 vm_offset_t toffset;
558 vm_prot_t tprot;
559 boolean_t twired, tsu;
560
561 tmap = map;
562 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
563 &tentry, &tobject, &toffset,
564 &tprot, &twired, &tsu);
565 vm_map_lookup_done(tmap, tentry);
566 }
567 /*
568 * (XXX)
569 * Map copy code cannot detect sharing unless a
570 * sharing map is involved. So we cheat and write
64691901 571 * protect everything ourselves.
619edcce 572 */
64691901 573 vm_object_pmap_copy(object, (vm_offset_t)foff,
619edcce
KM
574 (vm_offset_t)foff+size);
575 vm_object_deallocate(object);
576 vm_map_deallocate(tmap);
577 if (rv != KERN_SUCCESS)
578 goto out;
579 }
580#ifdef DEBUG
581 if (mmapdebug & MDB_MAPIT)
582 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
451a445a 583 curproc->p_pid, *addr, size, pager);
619edcce
KM
584#endif
585 }
586 /*
587 * Correct protection (default is VM_PROT_ALL).
588 * Note that we set the maximum protection. This may not be
589 * entirely correct. Maybe the maximum protection should be based
590 * on the object permissions where it makes sense (e.g. a vnode).
591 *
592 * Changed my mind: leave max prot at VM_PROT_ALL.
593 */
594 if (prot != VM_PROT_ALL) {
595 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
596 if (rv != KERN_SUCCESS) {
597 (void) vm_deallocate(map, *addr, size);
598 goto out;
599 }
600 }
601 /*
602 * Shared memory is also shared with children.
603 */
604 if (flags & MAP_SHARED) {
605 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE);
606 if (rv != KERN_SUCCESS) {
607 (void) vm_deallocate(map, *addr, size);
608 goto out;
609 }
610 }
611out:
612#ifdef DEBUG
613 if (mmapdebug & MDB_MAPIT)
614 printf("vm_mmap: rv %d\n", rv);
615#endif
616 switch (rv) {
617 case KERN_SUCCESS:
618 return (0);
619 case KERN_INVALID_ADDRESS:
620 case KERN_NO_SPACE:
621 return (ENOMEM);
622 case KERN_PROTECTION_FAILURE:
623 return (EACCES);
624 default:
625 return (EINVAL);
626 }
627}
628
629/*
630 * Internal bastardized version of MACHs vm_region system call.
631 * Given address and size it returns map attributes as well
632 * as the (locked) object mapped at that location.
633 */
634vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
635 vm_map_t map;
636 vm_offset_t *addr; /* IN/OUT */
637 vm_size_t *size; /* OUT */
638 vm_prot_t *prot; /* OUT */
639 vm_prot_t *max_prot; /* OUT */
640 vm_inherit_t *inheritance; /* OUT */
641 boolean_t *shared; /* OUT */
642 vm_object_t *object; /* OUT */
643 vm_offset_t *objoff; /* OUT */
644{
645 vm_map_entry_t tmp_entry;
646 register
647 vm_map_entry_t entry;
648 register
649 vm_offset_t tmp_offset;
650 vm_offset_t start;
651
451a445a 652 if (map == NULL)
619edcce
KM
653 return(KERN_INVALID_ARGUMENT);
654
655 start = *addr;
656
657 vm_map_lock_read(map);
658 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
659 if ((entry = tmp_entry->next) == &map->header) {
660 vm_map_unlock_read(map);
661 return(KERN_NO_SPACE);
662 }
663 start = entry->start;
664 *addr = start;
665 } else
666 entry = tmp_entry;
667
668 *prot = entry->protection;
669 *max_prot = entry->max_protection;
670 *inheritance = entry->inheritance;
671
672 tmp_offset = entry->offset + (start - entry->start);
673 *size = (entry->end - start);
674
675 if (entry->is_a_map) {
676 register vm_map_t share_map;
677 vm_size_t share_size;
678
679 share_map = entry->object.share_map;
680
681 vm_map_lock_read(share_map);
682 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
683
684 if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
685 *size = share_size;
686
687 vm_object_lock(tmp_entry->object);
688 *object = tmp_entry->object.vm_object;
689 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
690
691 *shared = (share_map->ref_count != 1);
692 vm_map_unlock_read(share_map);
693 } else {
694 vm_object_lock(entry->object);
695 *object = entry->object.vm_object;
696 *objoff = tmp_offset;
697
698 *shared = FALSE;
699 }
700
701 vm_map_unlock_read(map);
702
703 return(KERN_SUCCESS);
704}
705
706/*
707 * Yet another bastard routine.
708 */
709vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
710 register vm_map_t map;
711 register vm_offset_t *addr;
712 register vm_size_t size;
713 boolean_t fitit;
714 vm_pager_t pager;
715 vm_offset_t poffset;
716 boolean_t internal;
717{
718 register vm_object_t object;
719 register int result;
720
451a445a 721 if (map == NULL)
619edcce
KM
722 return(KERN_INVALID_ARGUMENT);
723
724 *addr = trunc_page(*addr);
725 size = round_page(size);
726
727 /*
728 * Lookup the pager/paging-space in the object cache.
729 * If it's not there, then create a new object and cache
730 * it.
731 */
732 object = vm_object_lookup(pager);
733 vm_stat.lookups++;
451a445a 734 if (object == NULL) {
619edcce
KM
735 object = vm_object_allocate(size);
736 vm_object_enter(object, pager);
737 } else
738 vm_stat.hits++;
739 object->internal = internal;
740
741 result = vm_map_find(map, object, poffset, addr, size, fitit);
742 if (result != KERN_SUCCESS)
743 vm_object_deallocate(object);
451a445a 744 else if (pager != NULL)
619edcce
KM
745 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
746 return(result);
747}
748
749/*
750 * XXX: this routine belongs in vm_map.c.
751 *
752 * Returns TRUE if the range [start - end) is allocated in either
753 * a single entry (single_entry == TRUE) or multiple contiguous
754 * entries (single_entry == FALSE).
755 *
756 * start and end should be page aligned.
757 */
758boolean_t
759vm_map_is_allocated(map, start, end, single_entry)
760 vm_map_t map;
761 vm_offset_t start, end;
762 boolean_t single_entry;
763{
764 vm_map_entry_t mapent;
765 register vm_offset_t nend;
766
767 vm_map_lock_read(map);
768
769 /*
770 * Start address not in any entry
771 */
772 if (!vm_map_lookup_entry(map, start, &mapent)) {
773 vm_map_unlock_read(map);
774 return (FALSE);
775 }
776 /*
777 * Find the maximum stretch of contiguously allocated space
778 */
779 nend = mapent->end;
780 if (!single_entry) {
781 mapent = mapent->next;
782 while (mapent != &map->header && mapent->start == nend) {
783 nend = mapent->end;
784 mapent = mapent->next;
785 }
786 }
787
788 vm_map_unlock_read(map);
789 return (end <= nend);
790}