use standard C declarations, use strerror
[unix-history] / usr / src / lib / libkvm / kvm_sparc.c
CommitLineData
b8b13822
KM
1/*-
2 * Copyright (c) 1992 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software developed by the Computer Systems
c9c64c8b
KB
6 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 * BG 91-66 and contributed to Berkeley.
b8b13822
KM
8 *
9 * %sccs.include.redist.c%
10 */
11
12#if defined(LIBC_SCCS) && !defined(lint)
c9c64c8b 13static char sccsid[] = "@(#)kvm_sparc.c 5.2 (Berkeley) %G%";
b8b13822
KM
14#endif /* LIBC_SCCS and not lint */
15
16/*
17 * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
18 * vm code will one day obsolete this module.
19 */
20
21#include <sys/param.h>
22#include <sys/user.h>
23#include <sys/proc.h>
24#include <sys/stat.h>
25#include <nlist.h>
26#include <kvm.h>
27
28#include <vm/vm.h>
29#include <vm/vm_param.h>
30
31#include <limits.h>
c9c64c8b 32#include <db.h>
b8b13822
KM
33
34#include "kvm_private.h"
35
36#define NPMEG 128
37
38/* XXX from sparc/pmap.c */
39#define MAXMEM (128 * 1024 * 1024) /* no more than 128 MB phys mem */
40#define NPGBANK 16 /* 2^4 pages per bank (64K / bank) */
41#define BSHIFT 4 /* log2(NPGBANK) */
42#define BOFFSET (NPGBANK - 1)
43#define BTSIZE (MAXMEM / NBPG / NPGBANK)
44#define HWTOSW(pmap_stod, pg) (pmap_stod[(pg) >> BSHIFT] | ((pg) & BOFFSET))
45
46struct vmstate {
47 pmeg_t segmap[NKSEG];
48 int pmeg[NPMEG][NPTESG];
49 int pmap_stod[BTSIZE]; /* dense to sparse */
50};
51
52void
53_kvm_freevtop(kd)
54 kvm_t *kd;
55{
56 if (kd->vmst != 0)
57 free(kd->vmst);
58}
59
60int
61_kvm_initvtop(kd)
62 kvm_t *kd;
63{
64 register int i;
65 register int off;
66 register struct vmstate *vm;
67 struct stat st;
68 struct nlist nlist[2];
69
70 vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
71 if (vm == 0)
72 return (-1);
73
74 kd->vmst = vm;
75
76 if (fstat(kd->pmfd, &st) < 0)
77 return (-1);
78 /*
79 * Read segment table.
80 */
81 off = st.st_size - ctob(btoc(sizeof(vm->segmap)));
82 errno = 0;
83 if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
84 read(kd->pmfd, (char *)vm->segmap, sizeof(vm->segmap)) < 0) {
85 _kvm_err(kd, kd->program, "cannot read segment map");
86 return (-1);
87 }
88 /*
89 * Read PMEGs.
90 */
91 off = st.st_size - ctob(btoc(sizeof(vm->pmeg)) +
92 btoc(sizeof(vm->segmap)));
93 errno = 0;
94 if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
95 read(kd->pmfd, (char *)vm->pmeg, sizeof(vm->pmeg)) < 0) {
96 _kvm_err(kd, kd->program, "cannot read PMEG table");
97 return (-1);
98 }
99 /*
100 * Make pmap_stod be an identity map so we can bootstrap it in.
101 * We assume it's in the first contiguous chunk of physical memory.
102 */
103 for (i = 0; i < BTSIZE; ++i)
104 vm->pmap_stod[i] = i << 4;
105
106 /*
107 * It's okay to do this nlist separately from the one kvm_getprocs()
108 * does, since the only time we could gain anything by combining
109 * them is if we do a kvm_getprocs() on a dead kernel, which is
110 * not too common.
111 */
112 nlist[0].n_name = "_pmap_stod";
113 nlist[1].n_name = 0;
114 if (kvm_nlist(kd, nlist) != 0) {
115 _kvm_err(kd, kd->program, "pmap_stod: no such symbol");
116 return (-1);
117 }
118 if (kvm_read(kd, (u_long)nlist[0].n_value,
119 (char *)vm->pmap_stod, sizeof(vm->pmap_stod))
120 != sizeof(vm->pmap_stod)) {
121 _kvm_err(kd, kd->program, "cannot read pmap_stod");
122 return (-1);
123 }
124 return (0);
125}
126
127#define VA_OFF(va) (va & (NBPG - 1))
128
129/*
130 * Translate a user virtual address to a physical address.
131 */
132int
133_kvm_uvatop(kd, p, va, pa)
134 kvm_t *kd;
135 const struct proc *p;
136 u_long va;
137 u_long *pa;
138{
139 int kva, pte;
140 register int off, frame;
141 register struct vmspace *vms = p->p_vmspace;
142
143 if ((u_long)vms < KERNBASE) {
144 _kvm_err(kd, kd->program, "_kvm_uvatop: corrupt proc");
145 return (0);
146 }
147 if (va >= KERNBASE)
148 return (0);
149 /*
150 * Get the PTE. This takes two steps. We read the
151 * base address of the table, then we index it.
152 * Note that the index pte table is indexed by
153 * virtual segment rather than physical segment.
154 */
155 kva = (u_long)&vms->vm_pmap.pm_rpte[VA_VSEG(va)];
156 if (kvm_read(kd, kva, (char *)&kva, 4) != 4 || kva == 0)
157 goto invalid;
158 kva += sizeof(vms->vm_pmap.pm_rpte[0]) * VA_VPG(va);
159 if (kvm_read(kd, kva, (char *)&pte, 4) == 4 && (pte & PG_V)) {
160 off = VA_OFF(va);
161 /*
162 * /dev/mem adheres to the hardware model of physical memory
163 * (with holes in the address space), while crashdumps
164 * adhere to the contiguous software model.
165 */
166 if (ISALIVE(kd))
167 frame = pte & PG_PFNUM;
168 else
169 frame = HWTOSW(kd->vmst->pmap_stod, pte & PG_PFNUM);
170 *pa = (frame << PGSHIFT) | off;
171 return (NBPG - off);
172 }
173invalid:
174 _kvm_err(kd, 0, "invalid address (%x)", va);
175 return (0);
176}
177
178/*
179 * Translate a kernel virtual address to a physical address using the
180 * mapping information in kd->vm. Returns the result in pa, and returns
181 * the number of bytes that are contiguously available from this
182 * physical address. This routine is used only for crashdumps.
183 */
184int
185_kvm_kvatop(kd, va, pa)
186 kvm_t *kd;
187 u_long va;
188 u_long *pa;
189{
190 register struct vmstate *vm;
191 register int s;
192 register int pte;
193 register int off;
194
195 if (va >= KERNBASE) {
196 vm = kd->vmst;
197 s = vm->segmap[VA_VSEG(va) - NUSEG];
198 pte = vm->pmeg[s][VA_VPG(va)];
199 if ((pte & PG_V) != 0) {
200 off = VA_OFF(va);
201 *pa = (HWTOSW(vm->pmap_stod, pte & PG_PFNUM)
202 << PGSHIFT) | off;
203
204 return (NBPG - off);
205 }
206 }
207 _kvm_err(kd, 0, "invalid address (%x)", va);
208 return (0);
209}