Remove duplicate specification of shared lib version number.
[unix-history] / lib / libutil / kvm.c
CommitLineData
15637ed4
RG
1/*-
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
34 * -------------------- ----- ----------------------
35 * CURRENT PATCH LEVEL: 3 00136
36 * -------------------- ----- ----------------------
37 *
38 * 08 Sep 92 Greenman & Kranenburg Change vaddr calc, move bogus #endif
39 * 05 Aug 92 David Greenman Fix kernel namelist db create/use
40 * 08 Aug 93 Paul Kranenburg Fix for command line args from ps and w
41 */
42
43#if defined(LIBC_SCCS) && !defined(lint)
44static char sccsid[] = "@(#)kvm.c 5.18 (Berkeley) 5/7/91";
45#endif /* LIBC_SCCS and not lint */
46
47/*
48 * Updated for 386BSD 0.1 by David Greenman (davidg%implode@percy.rain.com)
49 * and Paul Kranenburg (pk@cs.few.eur.nl)
50 * 20-Aug-1992
51 * And again by same on 04-Aug-1993
52 */
53
54
55#include <sys/param.h>
56#include <sys/user.h>
57#include <sys/proc.h>
58#include <sys/ioctl.h>
59#include <sys/kinfo.h>
60#include <sys/tty.h>
61#include <machine/vmparam.h>
62#include <fcntl.h>
63#include <nlist.h>
64#include <kvm.h>
65#include <ndbm.h>
66#include <limits.h>
67#include <paths.h>
68#include <stdio.h>
69#include <string.h>
70
71#ifdef SPPWAIT
72#define NEWVM
73#endif
74
75#ifdef NEWVM
76#define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
77#define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
78#include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
79#include <vm/vm_page.h>
80#include <vm/swap_pager.h>
81#include <sys/kinfo_proc.h>
82#ifdef hp300
83#include <hp300/hp300/pte.h>
84#endif
85#else /* NEWVM */
86#include <machine/pte.h>
87#include <sys/vmmac.h>
88#include <sys/text.h>
89#endif /* NEWVM */
90
91/*
92 * files
93 */
94static const char *unixf, *memf, *kmemf, *swapf;
95static int unixx, mem, kmem, swap;
96static DBM *db;
97/*
98 * flags
99 */
100static int deadkernel;
101static int kvminit = 0;
102static int kvmfilesopen = 0;
103/*
104 * state
105 */
106static struct kinfo_proc *kvmprocbase, *kvmprocptr;
107static int kvmnprocs;
108/*
109 * u. buffer
110 */
111static union {
112 struct user user;
113 char upages[UPAGES][NBPG];
114} user;
115
116#ifdef NEWVM
117struct swapblk {
118 long offset; /* offset in swap device */
119 long size; /* remaining size of block in swap device */
120};
121#endif
122/*
123 * random other stuff
124 */
125#ifndef NEWVM
126static struct pte *Usrptmap, *usrpt;
127static struct pte *Sysmap;
128static int Syssize;
129#endif
130static int dmmin, dmmax;
131static int pcbpf;
132static int argaddr0; /* XXX */
133static int argaddr1;
134static int swaddr;
135static int nswap;
136static char *tmp;
137#if defined(hp300)
138static int lowram;
139static struct ste *Sysseg;
140#endif
141#if defined(i386)
142static struct pde *PTD;
143#endif
144
145#define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
146#define MAXSYMSIZE 256
147
148#if defined(hp300)
149#define pftoc(f) ((f) - lowram)
150#define iskva(v) (1)
151#endif
152
153#ifndef pftoc
154#define pftoc(f) (f)
155#endif
156#ifndef iskva
157#define iskva(v) ((u_long)(v) & KERNBASE)
158#endif
159
160static struct nlist nl[] = {
161 { "_Usrptmap" },
162#define X_USRPTMAP 0
163 { "_usrpt" },
164#define X_USRPT 1
165 { "_nswap" },
166#define X_NSWAP 2
167 { "_dmmin" },
168#define X_DMMIN 3
169 { "_dmmax" },
170#define X_DMMAX 4
171 { "_vm_page_buckets" },
172#define X_VM_PAGE_BUCKETS 5
173 { "_vm_page_hash_mask" },
174#define X_VM_PAGE_HASH_MASK 6
175 { "_page_shift" },
176#define X_PAGE_SHIFT 7
177 /*
178 * everything here and down, only if a dead kernel
179 */
180 { "_Sysmap" },
181#define X_SYSMAP 8
182#define X_DEADKERNEL X_SYSMAP
183 { "_Syssize" },
184#define X_SYSSIZE 9
185 { "_allproc" },
186#define X_ALLPROC 10
187 { "_zombproc" },
188#define X_ZOMBPROC 11
189 { "_nproc" },
190#define X_NPROC 12
191#define X_LAST 12
192#if defined(hp300)
193 { "_Sysseg" },
194#define X_SYSSEG (X_LAST+1)
195 { "_lowram" },
196#define X_LOWRAM (X_LAST+2)
197#endif
198#if defined(i386)
199 { "_IdlePTD" },
200#define X_IdlePTD (X_LAST+1)
201#endif
202 { "" },
203};
204
205static off_t Vtophys();
206static void klseek(), seterr(), setsyserr(), vstodb();
207static int getkvars(), kvm_doprocs(), kvm_init();
208#ifdef NEWVM
209static int vatosw();
210static int findpage();
211#endif
212
213/*
214 * returns 0 if files were opened now,
215 * 1 if files were already opened,
216 * -1 if files could not be opened.
217 */
218kvm_openfiles(uf, mf, sf)
219 const char *uf, *mf, *sf;
220{
221 if (kvmfilesopen)
222 return (1);
223 unixx = mem = kmem = swap = -1;
224 unixf = (uf == NULL) ? _PATH_UNIX : uf;
225 memf = (mf == NULL) ? _PATH_MEM : mf;
226
227 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
228 setsyserr("can't open %s", unixf);
229 goto failed;
230 }
231 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
232 setsyserr("can't open %s", memf);
233 goto failed;
234 }
235 if (sf != NULL)
236 swapf = sf;
237 if (mf != NULL) {
238 deadkernel++;
239 kmemf = mf;
240 kmem = mem;
241 swap = -1;
242 } else {
243 kmemf = _PATH_KMEM;
244 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
245 setsyserr("can't open %s", kmemf);
246 goto failed;
247 }
248 swapf = (sf == NULL) ? _PATH_DRUM : sf;
249 /*
250 * live kernel - avoid looking up nlist entries
251 * past X_DEADKERNEL.
252 */
253 nl[X_DEADKERNEL].n_name = "";
254 }
255 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
256 seterr("can't open %s", swapf);
257 goto failed;
258 }
259 kvmfilesopen++;
260 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
261 return (-1);
262 return (0);
263failed:
264 kvm_close();
265 return (-1);
266}
267
268static
269kvm_init(uf, mf, sf)
270 char *uf, *mf, *sf;
271{
272 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
273 return (-1);
274 if (getkvars() == -1)
275 return (-1);
276 kvminit = 1;
277
278 return (0);
279}
280
281kvm_close()
282{
283 if (unixx != -1) {
284 close(unixx);
285 unixx = -1;
286 }
287 if (kmem != -1) {
288 if (kmem != mem)
289 close(kmem);
290 /* otherwise kmem is a copy of mem, and will be closed below */
291 kmem = -1;
292 }
293 if (mem != -1) {
294 close(mem);
295 mem = -1;
296 }
297 if (swap != -1) {
298 close(swap);
299 swap = -1;
300 }
301 if (db != NULL) {
302 dbm_close(db);
303 db = NULL;
304 }
305 kvminit = 0;
306 kvmfilesopen = 0;
307 deadkernel = 0;
308#ifndef NEWVM
309 if (Sysmap) {
310 free(Sysmap);
311 Sysmap = NULL;
312 }
313#endif
314}
315
316kvm_nlist(nl)
317 struct nlist *nl;
318{
319 datum key, data;
320 char dbname[MAXPATHLEN];
321 char dbversion[_POSIX2_LINE_MAX];
322 char kversion[_POSIX2_LINE_MAX];
323 int dbversionlen;
324 char symbuf[MAXSYMSIZE];
325 struct nlist nbuf, *n;
326 int num, did;
327
328 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
329 return (-1);
330 if (deadkernel)
331 goto hard2;
332 /*
333 * initialize key datum
334 */
335 key.dptr = symbuf;
336
337 if (db != NULL)
338 goto win; /* off to the races */
339 /*
340 * open database
341 */
342 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
343 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
344 goto hard2;
345 /*
346 * read version out of database
347 */
348 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
349 key.dsize = (sizeof ("VERSION") - 1);
350 data = dbm_fetch(db, key);
351 if (data.dptr == NULL)
352 goto hard1;
353 bcopy(data.dptr, dbversion, data.dsize);
354 dbversionlen = data.dsize;
355 /*
356 * read version string from kernel memory
357 */
358 bcopy("_version", symbuf, sizeof ("_version")-1);
359 key.dsize = (sizeof ("_version")-1);
360 data = dbm_fetch(db, key);
361 if (data.dptr == NULL)
362 goto hard1;
363 if (data.dsize != sizeof (struct nlist))
364 goto hard1;
365 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
366 lseek(kmem, nbuf.n_value, 0);
367 if (read(kmem, kversion, dbversionlen) != dbversionlen)
368 goto hard1;
369 /*
370 * if they match, we win - otherwise do it the hard way
371 */
372 if (bcmp(dbversion, kversion, dbversionlen) != 0)
373 goto hard1;
374 /*
375 * getem from the database.
376 */
377win:
378 num = did = 0;
379 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
380 int len;
381 /*
382 * clear out fields from users buffer
383 */
384 n->n_type = 0;
385 n->n_other = 0;
386 n->n_desc = 0;
387 n->n_value = 0;
388 /*
389 * query db
390 */
391 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
392 seterr("symbol too large");
393 return (-1);
394 }
395 (void)strcpy(symbuf, n->n_name);
396 key.dsize = len;
397 data = dbm_fetch(db, key);
398 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
399 continue;
400 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
401 n->n_value = nbuf.n_value;
402 n->n_type = nbuf.n_type;
403 n->n_desc = nbuf.n_desc;
404 n->n_other = nbuf.n_other;
405 did++;
406 }
407 return (num - did);
408hard1:
409 dbm_close(db);
410 db = NULL;
411hard2:
412 num = nlist(unixf, nl);
413 if (num == -1)
414 seterr("nlist (hard way) failed");
415 return (num);
416}
417
418kvm_getprocs(what, arg)
419 int what, arg;
420{
421 static int ocopysize = -1;
422
423 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
424 return (NULL);
425 if (!deadkernel) {
426 int ret, copysize;
427
428 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
429 setsyserr("can't get estimate for kerninfo");
430 return (-1);
431 }
432 copysize = ret;
433 if (copysize > ocopysize &&
170dc86a
DG
434 (kvmprocbase = (struct kinfo_proc *)
435 realloc(kvmprocbase, copysize)) == NULL) {
15637ed4
RG
436 seterr("out of memory");
437 return (-1);
438 }
439 ocopysize = copysize;
440 if ((ret = getkerninfo(what, kvmprocbase, &copysize,
441 arg)) == -1) {
442 setsyserr("can't get proc list");
443 return (-1);
444 }
445 if (copysize % sizeof (struct kinfo_proc)) {
446 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
447 copysize, sizeof (struct kinfo_proc));
448 return (-1);
449 }
450 kvmnprocs = copysize / sizeof (struct kinfo_proc);
451 } else {
452 int nproc;
453
454 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
455 sizeof (int)) != sizeof (int)) {
456 seterr("can't read nproc");
457 return (-1);
458 }
459 if ((kvmprocbase = (struct kinfo_proc *)
460 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
461 seterr("out of memory (addr: %x nproc = %d)",
462 nl[X_NPROC].n_value, nproc);
463 return (-1);
464 }
465 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
466 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
467 }
468 kvmprocptr = kvmprocbase;
469
470 return (kvmnprocs);
471}
472
473/*
474 * XXX - should NOT give up so easily - especially since the kernel
475 * may be corrupt (it died). Should gather as much information as possible.
476 * Follows proc ptrs instead of reading table since table may go
477 * away soon.
478 */
479static
480kvm_doprocs(what, arg, buff)
481 int what, arg;
482 char *buff;
483{
484 struct proc *p, proc;
485 register char *bp = buff;
486 int i = 0;
487 int doingzomb = 0;
488 struct eproc eproc;
489 struct pgrp pgrp;
490 struct session sess;
491 struct tty tty;
492#ifndef NEWVM
493 struct text text;
494#endif
495
496 /* allproc */
497 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
498 sizeof (struct proc *)) != sizeof (struct proc *)) {
499 seterr("can't read allproc");
500 return (-1);
501 }
502
503again:
504 for (; p; p = proc.p_nxt) {
505 if (kvm_read(p, &proc, sizeof (struct proc)) !=
506 sizeof (struct proc)) {
507 seterr("can't read proc at %x", p);
508 return (-1);
509 }
510#ifdef NEWVM
511 if (kvm_read(proc.p_cred, &eproc.e_pcred,
512 sizeof (struct pcred)) == sizeof (struct pcred))
513 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
514 sizeof (struct ucred));
515 switch(ki_op(what)) {
516
517 case KINFO_PROC_PID:
518 if (proc.p_pid != (pid_t)arg)
519 continue;
520 break;
521
522
523 case KINFO_PROC_UID:
524 if (eproc.e_ucred.cr_uid != (uid_t)arg)
525 continue;
526 break;
527
528 case KINFO_PROC_RUID:
529 if (eproc.e_pcred.p_ruid != (uid_t)arg)
530 continue;
531 break;
532 }
533#else
534 switch(ki_op(what)) {
535
536 case KINFO_PROC_PID:
537 if (proc.p_pid != (pid_t)arg)
538 continue;
539 break;
540
541
542 case KINFO_PROC_UID:
543 if (proc.p_uid != (uid_t)arg)
544 continue;
545 break;
546
547 case KINFO_PROC_RUID:
548 if (proc.p_ruid != (uid_t)arg)
549 continue;
550 break;
551 }
552#endif
553 /*
554 * gather eproc
555 */
556 eproc.e_paddr = p;
557 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
558 sizeof (struct pgrp)) {
559 seterr("can't read pgrp at %x", proc.p_pgrp);
560 return (-1);
561 }
562 eproc.e_sess = pgrp.pg_session;
563 eproc.e_pgid = pgrp.pg_id;
564 eproc.e_jobc = pgrp.pg_jobc;
565 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
566 != sizeof (struct session)) {
567 seterr("can't read session at %x", pgrp.pg_session);
568 return (-1);
569 }
570 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
571 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
572 != sizeof (struct tty)) {
573 seterr("can't read tty at %x", sess.s_ttyp);
574 return (-1);
575 }
576 eproc.e_tdev = tty.t_dev;
577 eproc.e_tsess = tty.t_session;
578 if (tty.t_pgrp != NULL) {
579 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
580 pgrp)) != sizeof (struct pgrp)) {
581 seterr("can't read tpgrp at &x",
582 tty.t_pgrp);
583 return (-1);
584 }
585 eproc.e_tpgid = pgrp.pg_id;
586 } else
587 eproc.e_tpgid = -1;
588 } else
589 eproc.e_tdev = NODEV;
590 if (proc.p_wmesg)
591 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
592#ifdef NEWVM
593 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
594 sizeof (struct vmspace));
595 eproc.e_xsize = eproc.e_xrssize =
596 eproc.e_xccount = eproc.e_xswrss = 0;
597#else
598 if (proc.p_textp) {
599 kvm_read(proc.p_textp, &text, sizeof (text));
600 eproc.e_xsize = text.x_size;
601 eproc.e_xrssize = text.x_rssize;
602 eproc.e_xccount = text.x_ccount;
603 eproc.e_xswrss = text.x_swrss;
604 } else {
605 eproc.e_xsize = eproc.e_xrssize =
606 eproc.e_xccount = eproc.e_xswrss = 0;
607 }
608#endif
609
610 switch(ki_op(what)) {
611
612 case KINFO_PROC_PGRP:
613 if (eproc.e_pgid != (pid_t)arg)
614 continue;
615 break;
616
617 case KINFO_PROC_TTY:
618 if ((proc.p_flag&SCTTY) == 0 ||
619 eproc.e_tdev != (dev_t)arg)
620 continue;
621 break;
622 }
623
624 i++;
625 bcopy(&proc, bp, sizeof (struct proc));
626 bp += sizeof (struct proc);
627 bcopy(&eproc, bp, sizeof (struct eproc));
628 bp+= sizeof (struct eproc);
629 }
630 if (!doingzomb) {
631 /* zombproc */
632 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
633 sizeof (struct proc *)) != sizeof (struct proc *)) {
634 seterr("can't read zombproc");
635 return (-1);
636 }
637 doingzomb = 1;
638 goto again;
639 }
640
641 return (i);
642}
643
644struct proc *
645kvm_nextproc()
646{
647
648 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
649 return (NULL);
650 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
651 seterr("end of proc list");
652 return (NULL);
653 }
654 return((struct proc *)(kvmprocptr++));
655}
656
657struct eproc *
658kvm_geteproc(p)
659 const struct proc *p;
660{
661 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
662}
663
664kvm_setproc()
665{
666 kvmprocptr = kvmprocbase;
667}
668
669kvm_freeprocs()
670{
671
672 if (kvmprocbase) {
673 free(kvmprocbase);
674 kvmprocbase = NULL;
675 }
676}
677
678#ifdef i386
679/* See also ./sys/kern/kern_execve.c */
680#define ARGSIZE (roundup(ARG_MAX, NBPG))
681#endif
682
683#ifdef NEWVM
684struct user *
685kvm_getu(p)
686 const struct proc *p;
687{
688 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
689 register int i;
690 register char *up;
691 u_int vaddr;
692 struct swapblk swb;
693
694 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
695 return (NULL);
696 if (p->p_stat == SZOMB) {
697 seterr("zombie process");
698 return (NULL);
699 }
700
701 argaddr0 = argaddr1 = swaddr = 0;
702 if ((p->p_flag & SLOAD) == 0) {
703 vm_offset_t maddr;
704
705 if (swap < 0) {
706 seterr("no swap");
707 return (NULL);
708 }
709 /*
710 * Costly operation, better set enable_swap to zero
711 * in vm/vm_glue.c, since paging of user pages isn't
712 * done yet anyway.
713 */
714 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
715 return NULL;
716
717 if (maddr == 0 && swb.size < UPAGES * NBPG)
718 return NULL;
719
720 for (i = 0; i < UPAGES; i++) {
721 if (maddr) {
722 (void) lseek(mem, maddr + i * NBPG, 0);
723 if (read(mem,
724 (char *)user.upages[i], NBPG) != NBPG) {
725 seterr(
726 "can't read u for pid %d from %s",
727 p->p_pid, swapf);
728 return NULL;
729 }
730 } else {
731 (void) lseek(swap, swb.offset + i * NBPG, 0);
732 if (read(swap,
733 (char *)user.upages[i], NBPG) != NBPG) {
734 seterr(
735 "can't read u for pid %d from %s",
736 p->p_pid, swapf);
737 return NULL;
738 }
739 }
740 }
741 return(&user.user);
742 }
743 /*
744 * Read u-area one page at a time for the benefit of post-mortems
745 */
746 up = (char *) p->p_addr;
747 for (i = 0; i < UPAGES; i++) {
748 klseek(kmem, (long)up, 0);
749 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
750 seterr("cant read page %x of u of pid %d from %s",
751 up, p->p_pid, kmemf);
752 return(NULL);
753 }
754 up += CLBYTES;
755 }
756 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
757 /*
758 * Conjure up a physical address for the arguments.
759 */
760#ifdef hp300
761 if (kp->kp_eproc.e_vm.vm_pmap.pm_ptab) {
762 struct pte pte[CLSIZE*2];
763
764 klseek(kmem,
765 (long)&kp->kp_eproc.e_vm.vm_pmap.pm_ptab
766 [btoc(USRSTACK-CLBYTES*2)], 0);
767 if (read(kmem, (char *)&pte, sizeof(pte)) == sizeof(pte)) {
768#if CLBYTES < 2048
769 argaddr0 = ctob(pftoc(pte[CLSIZE*0].pg_pfnum));
770#endif
771 argaddr1 = ctob(pftoc(pte[CLSIZE*1].pg_pfnum));
772 }
773 }
774#endif
775 kp->kp_eproc.e_vm.vm_rssize =
776 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
777
778 vaddr = (u_int)kp->kp_eproc.e_vm.vm_maxsaddr + MAXSSIZ - ARGSIZE;
779
780#ifdef i386
781 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
782 struct pde pde;
783
784 klseek(kmem,
785 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(vaddr)]), 0);
786
787 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
788 && pde.pd_v) {
789
790 struct pte pte;
791
792 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
793 (ptei(vaddr) * sizeof pte), 0) == -1)
794 seterr("kvm_getu: lseek");
795 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
796 if (pte.pg_v) {
797 argaddr1 = (long)ctob(pte.pg_pfnum);
798 } else {
799 goto hard;
800 }
801 } else {
802 seterr("kvm_getu: read");
803 }
804 } else {
805 goto hard;
806 }
807 }
808#endif /* i386 */
809
810hard:
811 if (vatosw(p, vaddr, &argaddr1, &swb)) {
812 if (argaddr1 == 0 && swb.size >= ARGSIZE)
813 swaddr = swb.offset;
814 }
815
816 return(&user.user);
817}
818#else
819struct user *
820kvm_getu(p)
821 const struct proc *p;
822{
823 struct pte *pteaddr, apte;
824 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
825 register int i;
826 int ncl;
827
828 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
829 return (NULL);
830 if (p->p_stat == SZOMB) {
831 seterr("zombie process");
832 return (NULL);
833 }
834 if ((p->p_flag & SLOAD) == 0) {
835 if (swap < 0) {
836 seterr("no swap");
837 return (NULL);
838 }
839 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
840 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
841 sizeof (struct user)) {
842 seterr("can't read u for pid %d from %s",
843 p->p_pid, swapf);
844 return (NULL);
845 }
846 pcbpf = 0;
847 argaddr0 = 0;
848 argaddr1 = 0;
849 return (&user.user);
850 }
851 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
852 klseek(kmem, (long)pteaddr, 0);
853 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
854 seterr("can't read indir pte to get u for pid %d from %s",
855 p->p_pid, kmemf);
856 return (NULL);
857 }
858 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
859 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
860 seterr("can't read page table for u of pid %d from %s",
861 p->p_pid, memf);
862 return (NULL);
863 }
864 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
865 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
866 else
867 argaddr0 = 0;
868 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
869 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
870 else
871 argaddr1 = 0;
872 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
873 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
874 while (--ncl >= 0) {
875 i = ncl * CLSIZE;
876 lseek(mem,
877 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
878 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
879 seterr("can't read page %d of u of pid %d from %s",
880 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
881 return(NULL);
882 }
883 }
884 return (&user.user);
885}
886#endif
887
888char *
889kvm_getargs(p, up)
890 const struct proc *p;
891 const struct user *up;
892{
893#ifdef i386
894 /* See also ./sys/kern/kern_execve.c */
895 static char cmdbuf[ARGSIZE];
896 static union {
897 char argc[ARGSIZE];
898 int argi[ARGSIZE/sizeof (int)];
899 } argspac;
900#else
901 static char cmdbuf[CLBYTES*2];
902 static union {
903 char argc[CLBYTES*2];
904 int argi[CLBYTES*2/sizeof (int)];
905 } argspac;
906#endif
907 register char *cp;
908 register int *ip;
909 char c;
910 int nbad;
911#ifndef NEWVM
912 struct dblock db;
913#endif
914 const char *file;
915 int stkoff = 0;
916
917#if defined(NEWVM) && defined(hp300)
918 stkoff = 20; /* XXX for sigcode */
919#endif
920 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
921 goto retucomm;
922 if ((p->p_flag & SLOAD) == 0 || argaddr1 == 0) {
923#ifdef NEWVM
924 if (swaddr == 0)
925 goto retucomm; /* XXX for now */
926#ifdef i386
927 (void) lseek(swap, swaddr, 0);
928 if (read(swap, &argspac.argc[0], ARGSIZE) != ARGSIZE)
929 goto bad;
930#else
931 if (argaddr0) {
932 lseek(swap, (long)argaddr0, 0);
933 if (read(swap, (char *)&argspac, CLBYTES) != CLBYTES)
934 goto bad;
935 } else
936 bzero(&argspac, CLBYTES);
937 lseek(swap, (long)argaddr1, 0);
938 if (read(swap, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
939 goto bad;
940#endif
941#else
942 if (swap < 0 || p->p_ssize == 0)
943 goto retucomm;
944 vstodb(0, CLSIZE, &up->u_smap, &db, 1);
945 (void) lseek(swap, (long)dtob(db.db_base), 0);
946 if (read(swap, (char *)&argspac.argc[CLBYTES], CLBYTES)
947 != CLBYTES)
948 goto bad;
949 vstodb(1, CLSIZE, &up->u_smap, &db, 1);
950 (void) lseek(swap, (long)dtob(db.db_base), 0);
951 if (read(swap, (char *)&argspac.argc[0], CLBYTES) != CLBYTES)
952 goto bad;
953 file = swapf;
954#endif
955 } else {
956#ifdef i386
957 lseek(mem, (long)argaddr1, 0);
958 if (read(mem, &argspac.argc[0], ARGSIZE) != ARGSIZE)
959 goto bad;
960#else
961 if (argaddr0) {
962 lseek(mem, (long)argaddr0, 0);
963 if (read(mem, (char *)&argspac, CLBYTES) != CLBYTES)
964 goto bad;
965 } else
966 bzero(&argspac, CLBYTES);
967 lseek(mem, (long)argaddr1, 0);
968 if (read(mem, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
969 goto bad;
970#endif
971 file = (char *) memf;
972 }
973
974 nbad = 0;
975#ifdef i386
976 ip = &argspac.argi[(ARGSIZE-ARG_MAX)/sizeof (int)];
977
978 for (cp = (char *)ip; cp < &argspac.argc[ARGSIZE-stkoff]; cp++) {
979#else
980 ip = &argspac.argi[CLBYTES*2/sizeof (int)];
981 ip -= 2; /* last arg word and .long 0 */
982 ip -= stkoff / sizeof (int);
983 while (*--ip) {
984 if (ip == argspac.argi)
985 goto retucomm;
986 }
987 *(char *)ip = ' ';
988 ip++;
989
990 for (cp = (char *)ip; cp < &argspac.argc[CLBYTES*2-stkoff]; cp++) {
991#endif
992 c = *cp;
993 if (c == 0) { /* convert null between arguments to space */
994 *cp = ' ';
995 if (*(cp+1) == 0) break; /* if null argument follows then no more args */
996 }
997 else if (c < ' ' || c > 0176) {
998 if (++nbad >= 5*(0+1)) { /* eflg -> 0 XXX */ /* limit number of bad chars to 5 */
999 *cp++ = '?';
1000 break;
1001 }
1002 *cp = '?';
1003 }
1004 else if (0 == 0 && c == '=') { /* eflg -> 0 XXX */
1005 while (*--cp != ' ')
1006 if (cp <= (char *)ip)
1007 break;
1008 break;
1009 }
1010 }
1011 *cp = 0;
1012 while (*--cp == ' ')
1013 *cp = 0;
1014 cp = (char *)ip;
1015 (void) strcpy(cmdbuf, cp);
1016 if (cp[0] == '-' || cp[0] == '?' || cp[0] <= ' ') {
1017 (void) strcat(cmdbuf, " (");
1018 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
1019 (void) strcat(cmdbuf, ")");
1020 }
1021 return (cmdbuf);
1022
1023bad:
1024 seterr("error locating command name for pid %d from %s",
1025 p->p_pid, file);
1026retucomm:
1027 (void) strcpy(cmdbuf, " (");
1028 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
1029 (void) strcat(cmdbuf, ")");
1030 return (cmdbuf);
1031}
1032
1033
1034static
1035getkvars()
1036{
1037 if (kvm_nlist(nl) == -1)
1038 return (-1);
1039 if (deadkernel) {
1040 /* We must do the sys map first because klseek uses it */
1041 long addr;
1042
1043#ifndef NEWVM
1044 Syssize = nl[X_SYSSIZE].n_value;
1045 Sysmap = (struct pte *)
1046 calloc((unsigned) Syssize, sizeof (struct pte));
1047 if (Sysmap == NULL) {
1048 seterr("out of space for Sysmap");
1049 return (-1);
1050 }
1051 addr = (long) nl[X_SYSMAP].n_value;
1052 addr &= ~KERNBASE;
1053 (void) lseek(kmem, addr, 0);
1054 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1055 != Syssize * sizeof (struct pte)) {
1056 seterr("can't read Sysmap");
1057 return (-1);
1058 }
1059#endif
1060#if defined(hp300)
1061 addr = (long) nl[X_LOWRAM].n_value;
1062 (void) lseek(kmem, addr, 0);
1063 if (read(kmem, (char *) &lowram, sizeof (lowram))
1064 != sizeof (lowram)) {
1065 seterr("can't read lowram");
1066 return (-1);
1067 }
1068 lowram = btop(lowram);
1069 Sysseg = (struct ste *) malloc(NBPG);
1070 if (Sysseg == NULL) {
1071 seterr("out of space for Sysseg");
1072 return (-1);
1073 }
1074 addr = (long) nl[X_SYSSEG].n_value;
1075 (void) lseek(kmem, addr, 0);
1076 read(kmem, (char *)&addr, sizeof(addr));
1077 (void) lseek(kmem, (long)addr, 0);
1078 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1079 seterr("can't read Sysseg");
1080 return (-1);
1081 }
1082#endif
1083#if defined(i386)
1084 PTD = (struct pde *) malloc(NBPG);
1085 if (PTD == NULL) {
1086 seterr("out of space for PTD");
1087 return (-1);
1088 }
1089 addr = (long) nl[X_IdlePTD].n_value;
1090 (void) lseek(kmem, addr, 0);
1091 read(kmem, (char *)&addr, sizeof(addr));
1092 (void) lseek(kmem, (long)addr, 0);
1093 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1094 seterr("can't read PTD");
1095 return (-1);
1096 }
1097#endif
1098 }
1099#ifndef NEWVM
1100 usrpt = (struct pte *)nl[X_USRPT].n_value;
1101 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1102#endif
1103 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1104 sizeof (long)) {
1105 seterr("can't read nswap");
1106 return (-1);
1107 }
1108 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1109 sizeof (long)) {
1110 seterr("can't read dmmin");
1111 return (-1);
1112 }
1113 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1114 sizeof (long)) {
1115 seterr("can't read dmmax");
1116 return (-1);
1117 }
1118 return (0);
1119}
1120
1121kvm_read(loc, buf, len)
1122 void *loc;
1123 void *buf;
1124{
1125 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1126 return (-1);
1127 if (iskva(loc)) {
1128 klseek(kmem, (off_t) loc, 0);
1129 if (read(kmem, buf, len) != len) {
1130 seterr("error reading kmem at %x", loc);
1131 return (-1);
1132 }
1133 } else {
1134 lseek(mem, (off_t) loc, 0);
1135 if (read(mem, buf, len) != len) {
1136 seterr("error reading mem at %x", loc);
1137 return (-1);
1138 }
1139 }
1140 return (len);
1141}
1142
1143static void
1144klseek(fd, loc, off)
1145 int fd;
1146 off_t loc;
1147 int off;
1148{
1149
1150 if (deadkernel) {
1151 if ((loc = Vtophys(loc)) == -1)
1152 return;
1153 }
1154 (void) lseek(fd, (off_t)loc, off);
1155}
1156
1157#ifndef NEWVM
1158/*
1159 * Given a base/size pair in virtual swap area,
1160 * return a physical base/size pair which is the
1161 * (largest) initial, physically contiguous block.
1162 */
1163static void
1164vstodb(vsbase, vssize, dmp, dbp, rev)
1165 register int vsbase;
1166 int vssize;
1167 struct dmap *dmp;
1168 register struct dblock *dbp;
1169{
1170 register int blk = dmmin;
1171 register swblk_t *ip = dmp->dm_map;
1172
1173 vsbase = ctod(vsbase);
1174 vssize = ctod(vssize);
1175 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1176 /*panic("vstodb")*/;
1177 while (vsbase >= blk) {
1178 vsbase -= blk;
1179 if (blk < dmmax)
1180 blk *= 2;
1181 ip++;
1182 }
1183 if (*ip <= 0 || *ip + blk > nswap)
1184 /*panic("vstodb")*/;
1185 dbp->db_size = MIN(vssize, blk - vsbase);
1186 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1187}
1188#endif
1189
1190#ifdef NEWVM
1191static off_t
1192Vtophys(loc)
1193 u_long loc;
1194{
1195 off_t newloc = (off_t) -1;
1196#ifdef hp300
1197 int p, ste, pte;
1198
1199 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1200 if ((ste & SG_V) == 0) {
1201 seterr("vtophys: segment not valid");
1202 return((off_t) -1);
1203 }
1204 p = btop(loc & SG_PMASK);
1205 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1206 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1207 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1208 seterr("vtophys: cannot locate pte");
1209 return((off_t) -1);
1210 }
1211 newloc = pte & PG_FRAME;
1212 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1213 seterr("vtophys: page not valid");
1214 return((off_t) -1);
1215 }
1216 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1217#endif
1218#ifdef i386
1219 struct pde pde;
1220 struct pte pte;
1221 int p;
1222
1223 pde = PTD[loc >> PD_SHIFT];
1224 if (pde.pd_v == 0) {
1225 seterr("vtophys: page directory entry not valid");
1226 return((off_t) -1);
1227 }
1228 p = btop(loc & PT_MASK);
1229 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1230 (void) lseek(kmem, (long)newloc, 0);
1231 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1232 seterr("vtophys: cannot obtain desired pte");
1233 return((off_t) -1);
1234 }
1235 newloc = pte.pg_pfnum;
1236 if (pte.pg_v == 0) {
1237 seterr("vtophys: page table entry not valid");
1238 return((off_t) -1);
1239 }
1240 newloc += (loc & PGOFSET);
1241#endif
1242 return((off_t) newloc);
1243}
1244#else
1245static off_t
1246vtophys(loc)
1247 long loc;
1248{
1249 int p;
1250 off_t newloc;
1251 register struct pte *pte;
1252
1253 newloc = loc & ~KERNBASE;
1254 p = btop(newloc);
1255#if defined(vax) || defined(tahoe)
1256 if ((loc & KERNBASE) == 0) {
1257 seterr("vtophys: translating non-kernel address");
1258 return((off_t) -1);
1259 }
1260#endif
1261 if (p >= Syssize) {
1262 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1263 return((off_t) -1);
1264 }
1265 pte = &Sysmap[p];
1266 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1267 seterr("vtophys: page not valid");
1268 return((off_t) -1);
1269 }
1270#if defined(hp300)
1271 if (pte->pg_pfnum < lowram) {
1272 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1273 return((off_t) -1);
1274 }
1275#endif
1276 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1277 return(loc);
1278}
1279#endif
1280
1281
1282#ifdef NEWVM
1283/*
1284 * locate address of unwired or swapped page
1285 */
1286
1287#define DEBUG 0
1288
1289#define KREAD(off, addr, len) \
1290 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1291
1292
1293static int
1294vatosw(p, vaddr, maddr, swb)
1295struct proc *p ;
1296vm_offset_t vaddr;
1297vm_offset_t *maddr;
1298struct swapblk *swb;
1299{
1300 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1301 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1302 struct vm_object vm_object;
1303 struct vm_map_entry vm_entry;
1304 struct pager_struct pager;
1305 struct swpager swpager;
1306 struct swblock swblock;
1307 long addr, off;
1308 int i;
1309
1310 if (p->p_pid == 0 || p->p_pid == 2)
1311 return 0;
1312
1313 addr = (long)mp->header.next;
1314 for (i = 0; i < mp->nentries; i++) {
1315 /* Weed through map entries until vaddr in range */
1316 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1317 setsyserr("vatosw: read vm_map_entry");
1318 return 0;
1319 }
1320 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1321 (vm_entry.object.vm_object != 0))
1322 break;
1323
1324 addr = (long)vm_entry.next;
1325 }
1326 if (i == mp->nentries) {
1327 seterr("%u: map not found\n", p->p_pid);
1328 return 0;
1329 }
1330
1331 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1332 seterr("%u: Is a map\n", p->p_pid);
1333 return 0;
1334 }
1335
1336 /* Locate memory object */
1337 off = (vaddr - vm_entry.start) + vm_entry.offset;
1338 addr = (long)vm_entry.object.vm_object;
1339 while (1) {
1340 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1341 setsyserr("vatosw: read vm_object");
1342 return 0;
1343 }
1344
1345#if DEBUG
1346 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1347 p->p_pid, addr, off);
1348#endif
1349
1350 /* Lookup in page queue */
1351 if (findpage(addr, off, maddr))
1352 return 1;
1353
1354 if (vm_object.shadow == 0)
1355 break;
1356
1357#if DEBUG
1358 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1359 p->p_pid, addr, off, vm_object.shadow_offset);
1360#endif
1361
1362 addr = (long)vm_object.shadow;
1363 off += vm_object.shadow_offset;
1364 }
1365
1366 if (!vm_object.pager) {
1367 seterr("%u: no pager\n", p->p_pid);
1368 return 0;
1369 }
1370
1371 /* Find address in swap space */
1372 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1373 setsyserr("vatosw: read pager");
1374 return 0;
1375 }
1376 if (pager.pg_type != PG_SWAP) {
1377 seterr("%u: weird pager\n", p->p_pid);
1378 return 0;
1379 }
1380
1381 /* Get swap pager data */
1382 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1383 setsyserr("vatosw: read swpager");
1384 return 0;
1385 }
1386
1387 off += vm_object.paging_offset;
1388
1389 /* Read swap block array */
1390 if (!KREAD((long)swpager.sw_blocks +
1391 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1392 &swblock, sizeof swblock)) {
1393 setsyserr("vatosw: read swblock");
1394 return 0;
1395 }
1396 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1397 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1398 return 1;
1399}
1400
1401
1402#define atop(x) (((unsigned)(x)) >> page_shift)
1403#define vm_page_hash(object, offset) \
1404 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1405
1406static int
1407findpage(object, offset, maddr)
1408long object;
1409long offset;
1410vm_offset_t *maddr;
1411{
1412static long vm_page_hash_mask;
1413static long vm_page_buckets;
1414static long page_shift;
1415 queue_head_t bucket;
1416 struct vm_page mem;
1417 long addr, baddr;
1418
1419 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1420 &vm_page_hash_mask, sizeof (long))) {
1421 seterr("can't read vm_page_hash_mask");
1422 return 0;
1423 }
1424 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1425 &page_shift, sizeof (long))) {
1426 seterr("can't read page_shift");
1427 return 0;
1428 }
1429 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1430 &vm_page_buckets, sizeof (long))) {
1431 seterr("can't read vm_page_buckets");
1432 return 0;
1433 }
1434
1435 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1436 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1437 seterr("can't read vm_page_bucket");
1438 return 0;
1439 }
1440
1441 addr = (long)bucket.next;
1442 while (addr != baddr) {
1443 if (!KREAD(addr, &mem, sizeof (mem))) {
1444 seterr("can't read vm_page");
1445 return 0;
1446 }
1447 if ((long)mem.object == object && mem.offset == offset) {
1448 *maddr = (long)mem.phys_addr;
1449 return 1;
1450 }
1451 addr = (long)mem.hashq.next;
1452 }
1453 return 0;
1454}
1455#endif /* NEWVM */
1456
1457#include <varargs.h>
1458static char errbuf[_POSIX2_LINE_MAX];
1459
1460static void
1461seterr(va_alist)
1462 va_dcl
1463{
1464 char *fmt;
1465 va_list ap;
1466
1467 va_start(ap);
1468 fmt = va_arg(ap, char *);
1469 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1470#if DEBUG
1471 (void) vfprintf(stderr, fmt, ap);
1472#endif
1473 va_end(ap);
1474}
1475
1476static void
1477setsyserr(va_alist)
1478 va_dcl
1479{
1480 char *fmt, *cp;
1481 va_list ap;
1482 extern int errno;
1483
1484 va_start(ap);
1485 fmt = va_arg(ap, char *);
1486 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1487 for (cp=errbuf; *cp; cp++)
1488 ;
1489 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1490 va_end(ap);
1491}
1492
1493char *
1494kvm_geterr()
1495{
1496 return (errbuf);
1497}