can_persist and internal bit fields replaced with flags
SCCS-vsn: sys/vm/vm_object.c 7.7
*
* %sccs.include.redist.c%
*
*
* %sccs.include.redist.c%
*
- * @(#)vm_object.c 7.6 (Berkeley) %G%
+ * @(#)vm_object.c 7.7 (Berkeley) %G%
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
object->ref_count = 1;
object->resident_page_count = 0;
object->size = size;
object->ref_count = 1;
object->resident_page_count = 0;
object->size = size;
- object->can_persist = FALSE;
+ object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
object->paging_in_progress = 0;
object->copy = NULL;
object->paging_in_progress = 0;
object->copy = NULL;
- object->pager_ready = FALSE;
- object->internal = TRUE; /* vm_allocate_with_pager will reset */
object->paging_offset = 0;
object->shadow = NULL;
object->shadow_offset = (vm_offset_t) 0;
object->paging_offset = 0;
object->shadow = NULL;
object->shadow_offset = (vm_offset_t) 0;
- if (object->can_persist) {
+ if (object->flags & OBJ_CANPERSIST) {
queue_enter(&vm_object_cached_list, object,
vm_object_t, cached_list);
queue_enter(&vm_object_cached_list, object,
vm_object_t, cached_list);
queue_remove(&vm_page_queue_active, p, vm_page_t,
pageq);
p->active = FALSE;
queue_remove(&vm_page_queue_active, p, vm_page_t,
pageq);
p->active = FALSE;
- vm_stat.active_count--;
}
if (p->inactive) {
queue_remove(&vm_page_queue_inactive, p, vm_page_t,
pageq);
p->inactive = FALSE;
}
if (p->inactive) {
queue_remove(&vm_page_queue_inactive, p, vm_page_t,
pageq);
p->inactive = FALSE;
- vm_stat.inactive_count--;
+ cnt.v_inactive_count--;
}
vm_page_unlock_queues();
p = (vm_page_t) queue_next(&p->listq);
}
vm_page_unlock_queues();
p = (vm_page_t) queue_next(&p->listq);
* so we don't need to lock it.
*/
* so we don't need to lock it.
*/
- if (!object->internal) {
+ if ((object->flags & OBJ_INTERNAL) == 0) {
vm_object_lock(object);
vm_object_page_clean(object, 0, 0);
vm_object_unlock(object);
vm_object_lock(object);
vm_object_page_clean(object, 0, 0);
vm_object_unlock(object);
vm_object_lock(src_object);
if (src_object->pager == NULL ||
vm_object_lock(src_object);
if (src_object->pager == NULL ||
- src_object->internal) {
+ (src_object->flags & OBJ_INTERNAL)) {
/*
* Make another reference to the object
/*
* Make another reference to the object
entry = (vm_object_hash_entry_t)
malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
entry->object = object;
entry = (vm_object_hash_entry_t)
malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
entry->object = object;
- object->can_persist = TRUE;
+ object->flags |= OBJ_CANPERSIST;
vm_object_cache_lock();
queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links);
vm_object_cache_lock();
queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links);
* The backing object is internal.
*/
* The backing object is internal.
*/
- if (!backing_object->internal ||
+ if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
backing_object->paging_in_progress != 0) {
vm_object_unlock(backing_object);
return;
backing_object->paging_in_progress != 0) {
vm_object_unlock(backing_object);
return;