Fixed broken pte bit definitions. I fixed this long ago in pte.h, but
[unix-history] / sys / vm / vm_page.h
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1284e777 36 * from: @(#)vm_page.h 7.3 (Berkeley) 4/21/91
4014f930 37 * $Id: vm_page.h,v 1.13 1994/04/05 03:23:50 davidg Exp $
1284e777
RG
38 */
39
40/*
15637ed4
RG
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 * Resident memory system definitions.
69 */
70
71#ifndef _VM_PAGE_
72#define _VM_PAGE_
73
1834a73c 74#ifdef KERNEL
41aefbec 75#include <systm.h>
1834a73c 76#endif
15637ed4
RG
77/*
78 * Management of resident (logical) pages.
79 *
80 * A small structure is kept for each resident
81 * page, indexed by page number. Each structure
82 * is an element of several lists:
83 *
84 * A hash table bucket used to quickly
85 * perform object/offset lookups
86 *
87 * A list of all pages for a given object,
88 * so they can be quickly deactivated at
89 * time of deallocation.
90 *
91 * An ordered list of pages due for pageout.
92 *
93 * In addition, the structure contains the object
94 * and offset to which this page belongs (for pageout),
95 * and sundry status bits.
96 *
97 * Fields in this structure are locked either by the lock on the
98 * object that the page belongs to (O) or by the lock on the page
99 * queues (P).
100 */
101
fd76afd7
DG
102#define PG_INACTIVE 0x0001
103#define PG_ACTIVE 0x0002
104#define PG_LAUNDRY 0x0004
105#define PG_CLEAN 0x0008
106#define PG_BUSY 0x0010
107#define PG_WANTED 0x0020
108#define PG_TABLED 0x0040
109#define PG_COPY_ON_WRITE 0x0080
110#define PG_FICTITIOUS 0x0100
111#define PG_ABSENT 0x0200
112#define PG_FAKE 0x0400
113#define PG_PAGEROWNED 0x0800
114#define PG_PTPAGE 0x1000
115
15637ed4 116struct vm_page {
fd76afd7
DG
117 queue_chain_t pageq; /* queue info for FIFO */
118 /* queue or free list (P) */
15637ed4
RG
119 queue_chain_t hashq; /* hash table links (O)*/
120 queue_chain_t listq; /* all pages in same object (O)*/
121
122 vm_object_t object; /* which object am I in (O,P)*/
123 vm_offset_t offset; /* offset into that object (O,P) */
124
fd76afd7 125 unsigned int wire_count; /* how many wired down maps use me? */
55768178 126 unsigned short flags; /* bit encoded flags */
4014f930 127 unsigned short act_count; /* active count */
41aefbec 128 int hold_count; /* page hold count -- don't pageout */
15637ed4
RG
129
130 vm_offset_t phys_addr; /* physical address of page */
15637ed4
RG
131};
132
133typedef struct vm_page *vm_page_t;
134
135#if VM_PAGE_DEBUG
136#define VM_PAGE_CHECK(mem) { \
55768178 137 if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
15637ed4 138 (((unsigned int) mem) > ((unsigned int) &vm_page_array[last_page-first_page])) || \
fd76afd7 139 ((mem->flags & PG_ACTIVE) && (mem->flags & PG_INACTIVE)) \
15637ed4
RG
140 ) panic("vm_page_check: not valid!"); \
141 }
bbc3f849 142#else /* VM_PAGE_DEBUG */
15637ed4 143#define VM_PAGE_CHECK(mem)
bbc3f849 144#endif /* VM_PAGE_DEBUG */
15637ed4
RG
145
146#ifdef KERNEL
147/*
148 * Each pageable resident page falls into one of three lists:
149 *
150 * free
151 * Available for allocation now.
152 * inactive
153 * Not referenced in any map, but still has an
154 * object/offset-page mapping, and may be dirty.
155 * This is the list of pages that should be
156 * paged out next.
157 * active
158 * A list of pages which have been placed in
159 * at least one physical map. This list is
160 * ordered, in LRU-like fashion.
161 */
162
163extern
164queue_head_t vm_page_queue_free; /* memory free queue */
165extern
166queue_head_t vm_page_queue_active; /* active memory queue */
167extern
168queue_head_t vm_page_queue_inactive; /* inactive memory queue */
169
170extern
171vm_page_t vm_page_array; /* First resident page in table */
172extern
173long first_page; /* first physical page number */
174 /* ... represented in vm_page_array */
175extern
176long last_page; /* last physical page number */
177 /* ... represented in vm_page_array */
178 /* [INCLUSIVE] */
179extern
180vm_offset_t first_phys_addr; /* physical address for first_page */
181extern
182vm_offset_t last_phys_addr; /* physical address for last_page */
183
184extern
185int vm_page_free_count; /* How many pages are free? */
186extern
187int vm_page_active_count; /* How many pages are active? */
188extern
189int vm_page_inactive_count; /* How many pages are inactive? */
190extern
191int vm_page_wire_count; /* How many pages are wired? */
192extern
193int vm_page_free_target; /* How many do we want free? */
194extern
195int vm_page_free_min; /* When to wakeup pageout */
196extern
197int vm_page_inactive_target;/* How many do we want inactive? */
198extern
199int vm_page_free_reserved; /* How many pages reserved to do pageout */
200extern
201int vm_page_laundry_count; /* How many pages being laundered? */
202
203#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
204
205#define IS_VM_PHYSADDR(pa) \
206 ((pa) >= first_phys_addr && (pa) <= last_phys_addr)
207
208#define PHYS_TO_VM_PAGE(pa) \
209 (&vm_page_array[atop(pa) - first_page ])
210
211extern
212simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
213 page queues */
214extern
215simple_lock_data_t vm_page_queue_free_lock;
216 /* lock on free page queue */
217vm_offset_t vm_page_startup();
218vm_page_t vm_page_lookup();
219vm_page_t vm_page_alloc();
15637ed4
RG
220void vm_page_free();
221void vm_page_activate();
222void vm_page_deactivate();
223void vm_page_rename();
224void vm_page_replace();
225
226boolean_t vm_page_zero_fill();
227void vm_page_copy();
41aefbec 228#if 0
15637ed4
RG
229void vm_page_wire();
230void vm_page_unwire();
41aefbec 231#endif
15637ed4
RG
232
233/*
234 * Functions implemented as macros
235 */
236
237#define PAGE_ASSERT_WAIT(m, interruptible) { \
fd76afd7 238 (m)->flags |= PG_WANTED; \
15637ed4
RG
239 assert_wait((int) (m), (interruptible)); \
240 }
241
242#define PAGE_WAKEUP(m) { \
fd76afd7
DG
243 (m)->flags &= ~PG_BUSY; \
244 if ((m)->flags & PG_WANTED) { \
245 (m)->flags &= ~PG_WANTED; \
15637ed4
RG
246 thread_wakeup((int) (m)); \
247 } \
248 }
249
250#define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
251#define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
252
fd76afd7 253#define vm_page_set_modified(m) { (m)->flags &= ~PG_CLEAN; }
fde1aeb2
GW
254
255/* Some pmap things are declared here for the convenience of other bits of
256 code. */
257extern void pmap_bootstrap(vm_offset_t, vm_offset_t);
258extern void pmap_init(vm_offset_t, vm_offset_t);
259extern vm_offset_t pmap_map(vm_offset_t, vm_offset_t, vm_offset_t, int);
260extern void pmap_remove_all(vm_offset_t);
261extern void pmap_copy_on_write(vm_offset_t);
262extern void pmap_page_protect(vm_offset_t, vm_prot_t);
263extern void pmap_update(void);
264extern void pmap_zero_page(vm_offset_t);
265extern void pmap_copy_page(vm_offset_t, vm_offset_t);
266extern void pmap_clear_modify(vm_offset_t);
267extern void pmap_clear_reference(vm_offset_t);
268extern boolean_t pmap_is_referenced(vm_offset_t);
269extern boolean_t pmap_is_modified(vm_offset_t);
270extern vm_offset_t pmap_phys_ddress(int);
fde1aeb2 271
55768178 272
41aefbec
DG
273/*
274 * Keep page from being freed by the page daemon
275 * much of the same effect as wiring, except much lower
276 * overhead and should be used only for *very* temporary
277 * holding ("wiring").
278 */
279static inline void
280vm_page_hold(mem)
281 vm_page_t mem;
282{
283 mem->hold_count++;
284}
285
286static inline void
287vm_page_unhold(mem)
288 vm_page_t mem;
289{
290 if( --mem->hold_count < 0)
291 panic("vm_page_unhold: hold count < 0!!!");
292}
293
bbc3f849
GW
294#endif /* KERNEL */
295#endif /* _VM_PAGE_ */