-
-/*
- * Internal bastardized version of MACHs vm_region system call.
- * Given address and size it returns map attributes as well
- * as the (locked) object mapped at that location.
- */
-int
-vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
- vm_map_t map;
- vm_offset_t *addr; /* IN/OUT */
- vm_size_t *size; /* OUT */
- vm_prot_t *prot; /* OUT */
- vm_prot_t *max_prot; /* OUT */
- vm_inherit_t *inheritance; /* OUT */
- boolean_t *shared; /* OUT */
- vm_object_t *object; /* OUT */
- vm_offset_t *objoff; /* OUT */
-{
- vm_map_entry_t tmp_entry;
- register
- vm_map_entry_t entry;
- register
- vm_offset_t tmp_offset;
- vm_offset_t start;
-
- if (map == NULL)
- return(KERN_INVALID_ARGUMENT);
-
- start = *addr;
-
- vm_map_lock_read(map);
- if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
- if ((entry = tmp_entry->next) == &map->header) {
- vm_map_unlock_read(map);
- return(KERN_NO_SPACE);
- }
- start = entry->start;
- *addr = start;
- } else
- entry = tmp_entry;
-
- *prot = entry->protection;
- *max_prot = entry->max_protection;
- *inheritance = entry->inheritance;
-
- tmp_offset = entry->offset + (start - entry->start);
- *size = (entry->end - start);
-
- if (entry->is_a_map) {
- register vm_map_t share_map;
- vm_size_t share_size;
-
- share_map = entry->object.share_map;
-
- vm_map_lock_read(share_map);
- (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
-
- if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
- *size = share_size;
-
- vm_object_lock(tmp_entry->object);
- *object = tmp_entry->object.vm_object;
- *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
-
- *shared = (share_map->ref_count != 1);
- vm_map_unlock_read(share_map);
- } else {
- vm_object_lock(entry->object);
- *object = entry->object.vm_object;
- *objoff = tmp_offset;
-
- *shared = FALSE;
- }
-
- vm_map_unlock_read(map);
-
- return(KERN_SUCCESS);
-}
-
-/*
- * Yet another bastard routine.
- */
-int
-vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
- register vm_map_t map;
- register vm_offset_t *addr;
- register vm_size_t size;
- boolean_t fitit;
- vm_pager_t pager;
- vm_offset_t poffset;
- boolean_t internal;
-{
- register vm_object_t object;
- register int result;
-
- if (map == NULL)
- return(KERN_INVALID_ARGUMENT);
-
- *addr = trunc_page(*addr);
- size = round_page(size);
-
- /*
- * Lookup the pager/paging-space in the object cache.
- * If it's not there, then create a new object and cache
- * it.
- */
- object = vm_object_lookup(pager);
- cnt.v_lookups++;
- if (object == NULL) {
- object = vm_object_allocate(size);
- /*
- * From Mike Hibler: "unnamed anonymous objects should never
- * be on the hash list ... For now you can just change
- * vm_allocate_with_pager to not do vm_object_enter if this
- * is an internal object ..."
- */
- if (!internal)
- vm_object_enter(object, pager);
- } else
- cnt.v_hits++;
- if (internal)
- object->flags |= OBJ_INTERNAL;
- else
- object->flags &= ~OBJ_INTERNAL;
-
- result = vm_map_find(map, object, poffset, addr, size, fitit);
- if (result != KERN_SUCCESS)
- vm_object_deallocate(object);
- else if (pager != NULL)
- vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
- return(result);
-}
-
-/*
- * XXX: this routine belongs in vm_map.c.
- *
- * Returns TRUE if the range [start - end) is allocated in either
- * a single entry (single_entry == TRUE) or multiple contiguous
- * entries (single_entry == FALSE).
- *
- * start and end should be page aligned.
- */
-boolean_t
-vm_map_is_allocated(map, start, end, single_entry)
- vm_map_t map;
- vm_offset_t start, end;
- boolean_t single_entry;
-{
- vm_map_entry_t mapent;
- register vm_offset_t nend;
-
- vm_map_lock_read(map);
-
- /*
- * Start address not in any entry
- */
- if (!vm_map_lookup_entry(map, start, &mapent)) {
- vm_map_unlock_read(map);
- return (FALSE);
- }
- /*
- * Find the maximum stretch of contiguously allocated space
- */
- nend = mapent->end;
- if (!single_entry) {
- mapent = mapent->next;
- while (mapent != &map->header && mapent->start == nend) {
- nend = mapent->end;
- mapent = mapent->next;
- }
- }
-
- vm_map_unlock_read(map);
- return (end <= nend);
-}