Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1991 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * the Systems Programming Group of the University of Utah Computer | |
7 | * Science Department and William Jolitz of UUNET Technologies Inc. | |
8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | |
14 | * 2. Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in the | |
16 | * documentation and/or other materials provided with the distribution. | |
17 | * 3. All advertising materials mentioning features or use of this software | |
18 | * must display the following acknowledgement: | |
19 | * This product includes software developed by the University of | |
20 | * California, Berkeley and its contributors. | |
21 | * 4. Neither the name of the University nor the names of its contributors | |
22 | * may be used to endorse or promote products derived from this software | |
23 | * without specific prior written permission. | |
24 | * | |
25 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
27 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
28 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
29 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
30 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
31 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
32 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
33 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
34 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
35 | * SUCH DAMAGE. | |
36 | * | |
15637ed4 RG |
37 | * Derived from hp300 version by Mike Hibler, this version by William |
38 | * Jolitz uses a recursive map [a pde points to the page directory] to | |
39 | * map the page tables using the pagetables themselves. This is done to | |
40 | * reduce the impact on kernel virtual memory for lots of sparse address | |
41 | * space, and to reduce the cost of memory to each process. | |
42 | * | |
adc13511 RG |
43 | * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 |
44 | * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 | |
fde1aeb2 | 45 | * $Id: pmap.h,v 1.6 1993/11/13 02:25:16 davidg Exp $ |
15637ed4 RG |
46 | */ |
47 | ||
48 | #ifndef _PMAP_MACHINE_ | |
49 | #define _PMAP_MACHINE_ 1 | |
50 | ||
51 | /* | |
52 | * 386 page table entry and page table directory | |
53 | * W.Jolitz, 8/89 | |
54 | */ | |
15637ed4 RG |
55 | struct pde |
56 | { | |
57 | unsigned int | |
58 | pd_v:1, /* valid bit */ | |
59 | pd_prot:2, /* access control */ | |
60 | pd_mbz1:2, /* reserved, must be zero */ | |
61 | pd_u:1, /* hardware maintained 'used' bit */ | |
62 | :1, /* not used */ | |
63 | pd_mbz2:2, /* reserved, must be zero */ | |
64 | :3, /* reserved for software */ | |
65 | pd_pfnum:20; /* physical page frame number of pte's*/ | |
66 | }; | |
67 | ||
fde1aeb2 GW |
68 | #define PD_MASK 0xffc00000UL /* page directory address bits */ |
69 | #define PT_MASK 0x003ff000UL /* page table address bits */ | |
15637ed4 RG |
70 | #define PD_SHIFT 22 /* page directory address shift */ |
71 | #define PG_SHIFT 12 /* page table address shift */ | |
72 | ||
73 | struct pte | |
74 | { | |
75 | unsigned int | |
76 | pg_v:1, /* valid bit */ | |
77 | pg_prot:2, /* access control */ | |
78 | pg_mbz1:2, /* reserved, must be zero */ | |
79 | pg_u:1, /* hardware maintained 'used' bit */ | |
80 | pg_m:1, /* hardware maintained modified bit */ | |
81 | pg_mbz2:2, /* reserved, must be zero */ | |
82 | pg_w:1, /* software, wired down page */ | |
83 | :1, /* software (unused) */ | |
84 | pg_nc:1, /* 'uncacheable page' bit */ | |
85 | pg_pfnum:20; /* physical page frame number */ | |
86 | }; | |
87 | ||
88 | #define PG_V 0x00000001 | |
89 | #define PG_RO 0x00000000 | |
90 | #define PG_RW 0x00000002 | |
91 | #define PG_u 0x00000004 | |
92 | #define PG_PROT 0x00000006 /* all protection bits . */ | |
93 | #define PG_W 0x00000200 | |
0d1f4bfa | 94 | #define PG_N 0x00000800 /* Non-cacheable */ |
15637ed4 | 95 | #define PG_M 0x00000040 |
0d1f4bfa | 96 | #define PG_U 0x00000020 |
fde1aeb2 | 97 | #define PG_FRAME 0xfffff000UL |
15637ed4 RG |
98 | |
99 | #define PG_NOACC 0 | |
100 | #define PG_KR 0x00000000 | |
101 | #define PG_KW 0x00000002 | |
102 | #define PG_URKR 0x00000004 | |
103 | #define PG_URKW 0x00000004 | |
104 | #define PG_UW 0x00000006 | |
105 | ||
106 | /* Garbage for current bastardized pager that assumes a hp300 */ | |
107 | #define PG_NV 0 | |
108 | #define PG_CI 0 | |
adc13511 | 109 | |
15637ed4 RG |
110 | /* |
111 | * Page Protection Exception bits | |
112 | */ | |
0d1f4bfa RG |
113 | #define PGEX_P 0x01 /* Protection violation vs. not present */ |
114 | #define PGEX_W 0x02 /* during a Write cycle */ | |
115 | #define PGEX_U 0x04 /* access from User mode (UPL) */ | |
15637ed4 RG |
116 | |
117 | typedef struct pde pd_entry_t; /* page directory entry */ | |
118 | typedef struct pte pt_entry_t; /* Mach page table entry */ | |
119 | ||
120 | /* | |
3fc0a319 DG |
121 | * NKPDE controls the virtual space of the kernel, what ever is left, minus |
122 | * the alternate page table area is given to the user (NUPDE) | |
15637ed4 | 123 | */ |
adc13511 | 124 | #define NKPDE 7 /* number of kernel pde's */ |
3fc0a319 | 125 | #define NUPDE (NPTEPG-NKPDE-1)/* number of user pde's */ |
0d1f4bfa RG |
126 | /* |
127 | * The *PTDI values control the layout of virtual memory | |
128 | * | |
129 | * XXX This works for now, but I am not real happy with it, I'll fix it | |
130 | * right after I fix locore.s and the magic 28K hole | |
131 | */ | |
adc13511 RG |
132 | #define APTDPTDI (NPTEPG-1) /* alt ptd entry that points to APTD */ |
133 | #define KPTDI (APTDPTDI-NKPDE)/* start of kernel virtual pde's */ | |
42811493 | 134 | #define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */ |
3fc0a319 DG |
135 | #define KSTKPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */ |
136 | #define KSTKPTEOFF (NBPG/sizeof(struct pde)-UPAGES) /* pte entry for kernel stack */ | |
137 | ||
138 | #define PDESIZE sizeof(struct pde) /* for assembly files */ | |
139 | #define PTESIZE sizeof(struct pte) /* for assembly files */ | |
15637ed4 RG |
140 | |
141 | /* | |
142 | * Address of current and alternate address space page table maps | |
143 | * and directories. | |
144 | */ | |
145 | #ifdef KERNEL | |
146 | extern struct pte PTmap[], APTmap[], Upte; | |
147 | extern struct pde PTD[], APTD[], PTDpde, APTDpde, Upde; | |
adc13511 | 148 | extern pt_entry_t *Sysmap; |
15637ed4 RG |
149 | |
150 | extern int IdlePTD; /* physical address of "Idle" state directory */ | |
151 | #endif | |
152 | ||
153 | /* | |
154 | * virtual address to page table entry and | |
155 | * to physical address. Likewise for alternate address space. | |
156 | * Note: these work recursively, thus vtopte of a pte will give | |
157 | * the corresponding pde that in turn maps it. | |
158 | */ | |
159 | #define vtopte(va) (PTmap + i386_btop(va)) | |
160 | #define kvtopte(va) vtopte(va) | |
161 | #define ptetov(pt) (i386_ptob(pt - PTmap)) | |
adc13511 | 162 | #define vtophys(va) (i386_ptob(vtopte(va)->pg_pfnum) | ((int)(va) & PGOFSET)) |
0d1f4bfa | 163 | #define ispt(va) ((va) >= UPT_MIN_ADDRESS && (va) <= KPT_MAX_ADDRESS) |
15637ed4 RG |
164 | |
165 | #define avtopte(va) (APTmap + i386_btop(va)) | |
166 | #define ptetoav(pt) (i386_ptob(pt - APTmap)) | |
adc13511 | 167 | #define avtophys(va) (i386_ptob(avtopte(va)->pg_pfnum) | ((int)(va) & PGOFSET)) |
15637ed4 RG |
168 | |
169 | /* | |
170 | * macros to generate page directory/table indicies | |
171 | */ | |
172 | ||
173 | #define pdei(va) (((va)&PD_MASK)>>PD_SHIFT) | |
174 | #define ptei(va) (((va)&PT_MASK)>>PG_SHIFT) | |
175 | ||
176 | /* | |
177 | * Pmap stuff | |
178 | */ | |
179 | ||
180 | struct pmap { | |
181 | pd_entry_t *pm_pdir; /* KVA of page directory */ | |
182 | boolean_t pm_pdchanged; /* pdir changed */ | |
183 | short pm_dref; /* page directory ref count */ | |
184 | short pm_count; /* pmap reference count */ | |
185 | simple_lock_data_t pm_lock; /* lock on pmap */ | |
186 | struct pmap_statistics pm_stats; /* pmap statistics */ | |
187 | long pm_ptpages; /* more stats: PT pages */ | |
188 | }; | |
189 | ||
190 | typedef struct pmap *pmap_t; | |
191 | ||
192 | #ifdef KERNEL | |
193 | extern pmap_t kernel_pmap; | |
194 | #endif | |
195 | ||
196 | /* | |
197 | * Macros for speed | |
198 | */ | |
0d1f4bfa | 199 | #define PMAP_ACTIVATE(pmapp, pcbp) \ |
15637ed4 RG |
200 | if ((pmapp) != NULL /*&& (pmapp)->pm_pdchanged */) { \ |
201 | (pcbp)->pcb_cr3 = \ | |
fde1aeb2 | 202 | pmap_extract(kernel_pmap, (vm_offset_t)(pmapp)->pm_pdir); \ |
15637ed4 RG |
203 | if ((pmapp) == &curproc->p_vmspace->vm_pmap) \ |
204 | load_cr3((pcbp)->pcb_cr3); \ | |
205 | (pmapp)->pm_pdchanged = FALSE; \ | |
206 | } | |
207 | ||
0d1f4bfa | 208 | #define PMAP_DEACTIVATE(pmapp, pcbp) |
15637ed4 RG |
209 | |
210 | /* | |
211 | * For each vm_page_t, there is a list of all currently valid virtual | |
212 | * mappings of that page. An entry is a pv_entry_t, the list is pv_table. | |
213 | */ | |
214 | typedef struct pv_entry { | |
215 | struct pv_entry *pv_next; /* next pv_entry */ | |
216 | pmap_t pv_pmap; /* pmap where mapping lies */ | |
217 | vm_offset_t pv_va; /* virtual address for mapping */ | |
218 | int pv_flags; /* flags */ | |
219 | } *pv_entry_t; | |
220 | ||
221 | #define PV_ENTRY_NULL ((pv_entry_t) 0) | |
222 | ||
223 | #define PV_CI 0x01 /* all entries must be cache inhibited */ | |
0d1f4bfa | 224 | #define PV_PTPAGE 0x02 /* entry maps a page table page */ |
15637ed4 RG |
225 | |
226 | #ifdef KERNEL | |
227 | ||
228 | pv_entry_t pv_table; /* array of entries, one per page */ | |
229 | ||
0d1f4bfa RG |
230 | #define pa_index(pa) atop(pa - vm_first_phys) |
231 | #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) | |
15637ed4 RG |
232 | |
233 | #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) | |
234 | ||
fde1aeb2 GW |
235 | extern pmap_t pmap_create(vm_size_t); |
236 | extern void pmap_pinit(struct pmap *); | |
237 | extern void pmap_destroy(pmap_t); | |
238 | extern void pmap_release(struct pmap *); | |
239 | extern void pmap_reference(pmap_t); | |
240 | extern void pmap_remove(struct pmap *, vm_offset_t, vm_offset_t); | |
241 | extern void pmap_protect(struct pmap *, vm_offset_t, vm_offset_t, vm_prot_t); | |
242 | extern void pmap_enter(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); | |
243 | extern void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t); | |
244 | extern struct pte *pmap_pte(pmap_t, vm_offset_t); | |
245 | extern vm_offset_t pmap_extract(pmap_t, vm_offset_t); | |
246 | extern void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); | |
247 | extern void pmap_collect(pmap_t); | |
248 | struct pcb; extern void pmap_activate(pmap_t, struct pcb *); | |
249 | extern pmap_t pmap_kernel(void); | |
250 | extern void pmap_pageable(pmap_t, vm_offset_t, vm_offset_t, boolean_t); | |
251 | ||
252 | ||
126bd309 | 253 | #endif /* KERNEL */ |
15637ed4 | 254 | |
126bd309 | 255 | #endif /* _PMAP_MACHINE_ */ |