BSD 4_4_Lite1 release
[unix-history] / usr / src / sys / sparc / include / pmap.h
CommitLineData
e6394f29 1/*
ad787160
C
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
e6394f29
CT
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
b480239a
KB
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 * This product includes software developed by the University of
1869bdc0 12 * California, Lawrence Berkeley Laboratory.
b480239a 13 *
ad787160
C
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the University of
25 * California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
e6394f29 41 *
ad787160 42 * @(#)pmap.h 8.1 (Berkeley) 6/11/93
e6394f29 43 *
ba91fcb3 44 * from: $Header: pmap.h,v 1.11 93/05/25 10:36:09 torek Exp $
e6394f29
CT
45 */
46
47#ifndef _SPARC_PMAP_H_
48#define _SPARC_PMAP_H_
49
5548a02f 50#include <machine/pte.h>
e6394f29
CT
51
52/*
53 * Pmap structure.
54 *
55 * The pmap structure really comes in two variants, one---a single
56 * instance---for kernel virtual memory and the other---up to nproc
57 * instances---for user virtual memory. Unfortunately, we have to mash
58 * both into the same structure. Fortunately, they are almost the same.
59 *
60 * The kernel begins at 0xf8000000 and runs to 0xffffffff (although
61 * some of this is not actually used). Kernel space, including DVMA
62 * space (for now?), is mapped identically into all user contexts.
63 * There is no point in duplicating this mapping in each user process
64 * so they do not appear in the user structures.
65 *
66 * User space begins at 0x00000000 and runs through 0x1fffffff,
67 * then has a `hole', then resumes at 0xe0000000 and runs until it
68 * hits the kernel space at 0xf8000000. This can be mapped
69 * contiguously by ignorning the top two bits and pretending the
70 * space goes from 0 to 37ffffff. Typically the lower range is
71 * used for text+data and the upper for stack, but the code here
72 * makes no such distinction.
73 *
74 * Since each virtual segment covers 256 kbytes, the user space
75 * requires 3584 segments, while the kernel (including DVMA) requires
76 * only 512 segments.
77 *
78 * The segment map entry for virtual segment vseg is offset in
79 * pmap->pm_rsegmap by 0 if pmap is not the kernel pmap, or by
80 * NUSEG if it is. We keep a pointer called pmap->pm_segmap
81 * pre-offset by this value. pmap->pm_segmap thus contains the
82 * values to be loaded into the user portion of the hardware segment
83 * map so as to reach the proper PMEGs within the MMU. The kernel
84 * mappings are `set early' and are always valid in every context
85 * (every change is always propagated immediately).
86 *
87 * The PMEGs within the MMU are loaded `on demand'; when a PMEG is
88 * taken away from context `c', the pmap for context c has its
89 * corresponding pm_segmap[vseg] entry marked invalid (the MMU segment
90 * map entry is also made invalid at the same time). Thus
91 * pm_segmap[vseg] is the `invalid pmeg' number (127 or 511) whenever
92 * the corresponding PTEs are not actually in the MMU. On the other
93 * hand, pm_pte[vseg] is NULL only if no pages in that virtual segment
94 * are in core; otherwise it points to a copy of the 32 or 64 PTEs that
95 * must be loaded in the MMU in order to reach those pages.
96 * pm_npte[vseg] counts the number of valid pages in each vseg.
97 *
98 * XXX performance: faster to count valid bits?
99 *
100 * The kernel pmap cannot malloc() PTEs since malloc() will sometimes
101 * allocate a new virtual segment. Since kernel mappings are never
102 * `stolen' out of the the MMU, we just keep all its PTEs there, and
103 * have no software copies. Its mmu entries are nonetheless kept on lists
104 * so that the code that fiddles with mmu lists has something to fiddle.
105 */
106#define NKSEG ((int)((-(unsigned)KERNBASE) / NBPSG)) /* i.e., 512 */
107#define NUSEG (4096 - NKSEG) /* i.e., 3584 */
108
109/* data appearing in both user and kernel pmaps */
110struct pmap_common {
111 union ctxinfo *pmc_ctx; /* current context, if any */
112 int pmc_ctxnum; /* current context's number */
113#if NCPUS > 1
114 simple_lock_data_t pmc_lock; /* spinlock */
115#endif
116 int pmc_refcount; /* just what it says */
117 struct mmuentry *pmc_mmuforw; /* pmap pmeg chain */
118 struct mmuentry **pmc_mmuback; /* (two way street) */
119 pmeg_t *pmc_segmap; /* points to pm_rsegmap per above */
120 u_char *pmc_npte; /* points to pm_rnpte */
121 int **pmc_pte; /* points to pm_rpte */
122};
123
124/* data appearing only in user pmaps */
125struct pmap {
126 struct pmap_common pmc;
127 pmeg_t pm_rsegmap[NUSEG]; /* segment map */
128 u_char pm_rnpte[NUSEG]; /* number of valid PTEs per seg */
129 int *pm_rpte[NUSEG]; /* points to PTEs for valid segments */
130};
131
132/* data appearing only in the kernel pmap */
133struct kpmap {
134 struct pmap_common pmc;
135 pmeg_t pm_rsegmap[NKSEG]; /* segment map */
136 u_char pm_rnpte[NKSEG]; /* number of valid PTEs per kseg */
137 int *pm_rpte[NKSEG]; /* always NULL */
138};
139
140#define pm_ctx pmc.pmc_ctx
141#define pm_ctxnum pmc.pmc_ctxnum
142#define pm_lock pmc.pmc_lock
143#define pm_refcount pmc.pmc_refcount
144#define pm_mmuforw pmc.pmc_mmuforw
145#define pm_mmuback pmc.pmc_mmuback
146#define pm_segmap pmc.pmc_segmap
147#define pm_npte pmc.pmc_npte
148#define pm_pte pmc.pmc_pte
149
150#ifdef KERNEL
151
152typedef struct pmap *pmap_t;
153#define PMAP_NULL ((pmap_t)0)
154
155extern struct kpmap kernel_pmap_store;
156#define kernel_pmap ((struct pmap *)(&kernel_pmap_store))
157
158#define PMAP_ACTIVATE(pmap, pcb, iscurproc)
159#define PMAP_DEACTIVATE(pmap, pcb)
160
161/*
162 * Since PTEs also contain type bits, we have to have some way
163 * to tell pmap_enter `this is an IO page' or `this is not to
164 * be cached'. Since physical addresses are always aligned, we
165 * can do this with the low order bits.
166 *
167 * The ordering below is important: PMAP_PGTYPE << PG_TNC must give
168 * exactly the PG_NC and PG_TYPE bits.
169 */
170#define PMAP_OBIO 1 /* tells pmap_enter to use PG_OBIO */
171#define PMAP_VME16 2 /* etc */
172#define PMAP_VME32 3 /* etc */
173#define PMAP_NC 4 /* tells pmap_enter to set PG_NC */
174#define PMAP_TNC 7 /* mask to get PG_TYPE & PG_NC */
175
ba91fcb3 176#endif /* KERNEL */
e6394f29
CT
177
178#endif /* _SPARC_PMAP_H_ */