+/*
+ * Copyright (c) 1992 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * %sccs.include.redist.c%
+ *
+ * @(#)machdep.c 7.1 (Berkeley) %G%
+ *
+ * from: $Header: machdep.c,v 1.32 92/07/13 01:41:14 torek Exp $
+ */
+
+#include "param.h"
+#include "proc.h"
+#include "user.h"
+#include "map.h"
+#include "buf.h"
+#include "device.h"
+#include "reboot.h"
+#include "systm.h"
+#include "conf.h"
+#include "file.h"
+#include "clist.h"
+#include "callout.h"
+#include "malloc.h"
+#include "mbuf.h"
+#include "mount.h"
+#include "msgbuf.h"
+#ifdef SYSVSHM
+#include "shm.h"
+#endif
+#include "exec.h"
+
+#include "machine/autoconf.h"
+#include "machine/frame.h"
+#include "machine/cpu.h"
+
+#include "vm/vm_kern.h"
+#include "vm/vm_page.h"
+
+#include "asm.h"
+#include "cache.h"
+#include "vaddrs.h"
+
+vm_map_t buffer_map;
+extern vm_offset_t avail_end;
+
+/*
+ * Declare these as initialized data so we can patch them.
+ */
+int nswbuf = 0;
+#ifdef NBUF
+int nbuf = NBUF;
+#else
+int nbuf = 0;
+#endif
+#ifdef BUFPAGES
+int bufpages = BUFPAGES;
+#else
+int bufpages = 0;
+#endif
+
+int physmem;
+
+extern struct msgbuf msgbuf;
+struct msgbuf *msgbufp = &msgbuf;
+int msgbufmapped = 1; /* message buffer is always mapped */
+
+/*
+ * safepri is a safe priority for sleep to set for a spin-wait
+ * during autoconfiguration or after a panic.
+ */
+int safepri = 0;
+
+caddr_t allocsys();
+
+/*
+ * Machine-dependent startup code
+ */
+cpu_startup()
+{
+ register unsigned i;
+ register caddr_t v;
+ register int sz;
+ int base, residual;
+#ifdef DEBUG
+ extern int pmapdebug;
+ int opmapdebug = pmapdebug;
+#endif
+ vm_offset_t minaddr, maxaddr;
+ vm_size_t size;
+
+#ifdef DEBUG
+ pmapdebug = 0;
+#endif
+
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ printf(version);
+ /*identifycpu();*/
+ physmem = btoc(avail_end);
+ printf("real mem = %d\n", avail_end);
+
+ /*
+ * Find out how much space we need, allocate it,
+ * and then give everything true virtual addresses.
+ */
+ sz = (int)allocsys((caddr_t)0);
+ if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
+ panic("startup: no room for tables");
+ if (allocsys(v) - v != sz)
+ panic("startup: table size inconsistency");
+
+ /*
+ * Now allocate buffers proper. They are different than the above
+ * in that they usually occupy more virtual memory than physical.
+ */
+ size = MAXBSIZE * nbuf;
+ buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
+ &maxaddr, size, FALSE);
+ minaddr = (vm_offset_t)buffers;
+ if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
+ &minaddr, size, FALSE) != KERN_SUCCESS)
+ panic("startup: cannot allocate buffers");
+ base = bufpages / nbuf;
+ residual = bufpages % nbuf;
+ for (i = 0; i < nbuf; i++) {
+ vm_size_t curbufsize;
+ vm_offset_t curbuf;
+
+ /*
+ * First <residual> buffers get (base+1) physical pages
+ * allocated for them. The rest get (base) physical pages.
+ *
+ * The rest of each buffer occupies virtual space,
+ * but has no physical memory allocated for it.
+ */
+ curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
+ curbufsize = CLBYTES * (i < residual ? base+1 : base);
+ vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
+ vm_map_simplify(buffer_map, curbuf);
+ }
+ /*
+ * Allocate a submap for exec arguments. This map effectively
+ * limits the number of processes exec'ing at any time.
+ */
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16*NCARGS, TRUE);
+ /*
+ * Allocate a map for physio. Others use a submap of the kernel
+ * map, but we want one completely separate, even though it uses
+ * the same pmap.
+ */
+ phys_map = vm_map_create(kernel_pmap, DVMA_BASE, DVMA_END, 1);
+ if (phys_map == NULL)
+ panic("unable to create DVMA map");
+
+ /*
+ * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
+ * we use the more space efficient malloc in place of kmem_alloc.
+ */
+ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
+ M_MBUF, M_NOWAIT);
+ bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
+ mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
+ VM_MBUF_SIZE, FALSE);
+ /*
+ * Initialize callouts
+ */
+ callfree = callout;
+ for (i = 1; i < ncallout; i++)
+ callout[i-1].c_next = &callout[i];
+ callout[i-1].c_next = NULL;
+
+#ifdef DEBUG
+ pmapdebug = opmapdebug;
+#endif
+ printf("avail mem = %d\n", ptoa(cnt.v_free_count));
+ printf("using %d buffers containing %d bytes of memory\n",
+ nbuf, bufpages * CLBYTES);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+
+ /*
+ * Configure the system.
+ */
+ configure();
+
+ /*
+ * Turn on the cache (do after configuration due to a bug in
+ * some versions of the SPARC chips -- this info from Gilmore).
+ */
+ cache_enable();
+}
+
+/*
+ * Allocate space for system data structures. We are given
+ * a starting virtual address and we return a final virtual
+ * address; along the way we set each data structure pointer.
+ *
+ * You call allocsys() with 0 to find out how much space we want,
+ * allocate that much and fill it with zeroes, and then call
+ * allocsys() again with the correct base virtual address.
+ */
+caddr_t
+allocsys(v)
+ register caddr_t v;
+{
+
+#define valloc(name, type, num) \
+ v = (caddr_t)(((name) = (type *)v) + (num))
+ valloc(cfree, struct cblock, nclist);
+ valloc(callout, struct callout, ncallout);
+ valloc(swapmap, struct map, nswapmap = maxproc * 2);
+#ifdef SYSVSHM
+ valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
+#endif
+
+ /*
+ * Determine how many buffers to allocate (enough to
+ * hold 5% of total physical memory, but at least 16).
+ * Allocate 1/2 as many swap buffer headers as file i/o buffers.
+ */
+ if (bufpages == 0)
+ bufpages = (physmem / 20) / CLSIZE;
+ if (nbuf == 0) {
+ nbuf = bufpages;
+ if (nbuf < 16)
+ nbuf = 16;
+ }
+ if (nswbuf == 0) {
+ nswbuf = (nbuf / 2) &~ 1; /* force even */
+ if (nswbuf > 256)
+ nswbuf = 256; /* sanity */
+ }
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+ return (v);
+}
+
+/*
+ * Set up registers on exec.
+ *
+ * XXX this entire mess must be fixed
+ */
+/* ARGSUSED */
+setregs(p, entry, retval)
+ register struct proc *p;
+ u_long entry;
+ int retval[2];
+{
+ register struct trapframe *tf = p->p_md.md_tf;
+ register struct fpstate *fs;
+ register int psr, sp;
+
+ /*
+ * The syscall will ``return'' to npc or %g7 or %g2; set them all.
+ * Set the rest of the registers to 0 except for %o6 (stack pointer,
+ * built in exec()) and psr (retain CWP and PSR_S bits).
+ */
+ psr = tf->tf_psr & (PSR_S | PSR_CWP);
+ sp = tf->tf_out[6];
+ if ((fs = p->p_md.md_fpstate) != NULL) {
+ /*
+ * We hold an FPU state. If we own *the* FPU chip state
+ * we must get rid of it, and the only way to do that is
+ * to save it. In any case, get rid of our FPU state.
+ */
+ if (p == fpproc) {
+ savefpstate(fs);
+ fpproc = NULL;
+ }
+ free((void *)fs, M_SUBPROC);
+ p->p_md.md_fpstate = NULL;
+ }
+ bzero((caddr_t)tf, sizeof *tf);
+ tf->tf_psr = psr;
+ tf->tf_global[2] = tf->tf_global[7] = tf->tf_npc = entry & ~3;
+ tf->tf_out[6] = sp;
+ retval[1] = 0;
+}
+
+#ifdef DEBUG
+int sigdebug = 0;
+int sigpid = 0;
+#define SDB_FOLLOW 0x01
+#define SDB_KSTACK 0x02
+#define SDB_FPSTATE 0x04
+#endif
+
+struct sigframe {
+ int sf_signo; /* signal number */
+ int sf_code; /* code */
+#ifdef COMPAT_SUNOS
+ struct sigcontext *sf_scp; /* points to user addr of sigcontext */
+#else
+ int sf_xxx; /* placeholder */
+#endif
+ int sf_addr; /* SunOS compat, always 0 for now */
+ struct sigcontext sf_sc; /* actual sigcontext */
+};
+
+/*
+ * Send an interrupt to process.
+ */
+void
+sendsig(catcher, sig, mask, code)
+ sig_t catcher;
+ int sig, mask;
+ unsigned code;
+{
+ register struct proc *p = curproc;
+ register struct sigacts *psp = p->p_sigacts;
+ register struct sigframe *fp;
+ register struct trapframe *tf;
+ register int addr, oonstack;
+ struct sigframe sf;
+ extern char sigcode[], esigcode[];
+#define szsigcode (esigcode - sigcode)
+
+ tf = p->p_md.md_tf;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
+ /*
+ * Compute new user stack addresses, subtract off
+ * one signal frame, and align.
+ */
+ if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
+ psp->ps_sigstk.ss_size);
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
+ } else
+ fp = (struct sigframe *)tf->tf_out[6];
+ fp = (struct sigframe *)((int)(fp - 1) & ~7);
+
+#ifdef DEBUG
+ if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig: %s[%d] sig %d newusp %x scp %x\n",
+ p->p_comm, p->p_pid, sig, fp, &fp->sf_sc);
+#endif
+ /*
+ * Now set up the signal frame. We build it in kernel space
+ * and then copy it out. We probably ought to just build it
+ * directly in user space....
+ */
+ sf.sf_signo = sig;
+ sf.sf_code = code;
+#ifdef COMPAT_SUNOS
+ sf.sf_scp = &fp->sf_sc;
+#endif
+ sf.sf_addr = 0; /* XXX */
+
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ sf.sf_sc.sc_onstack = oonstack;
+ sf.sf_sc.sc_mask = mask;
+ sf.sf_sc.sc_sp = tf->tf_out[6];
+ sf.sf_sc.sc_pc = tf->tf_pc;
+ sf.sf_sc.sc_npc = tf->tf_npc;
+ sf.sf_sc.sc_psr = tf->tf_psr;
+ sf.sf_sc.sc_g1 = tf->tf_global[1];
+ sf.sf_sc.sc_o0 = tf->tf_out[0];
+
+ /*
+ * Put the stack in a consistent state before we whack away
+ * at it. Note that write_user_windows may just dump the
+ * registers into the pcb; we need them in the process's memory.
+ */
+ write_user_windows();
+ if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf)) {
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+#ifdef DEBUG
+ if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig: window save or copyout error\n");
+#endif
+ sigexit(p, SIGILL);
+ /* NOTREACHED */
+ }
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sendsig: %s[%d] sig %d scp %x\n",
+ p->p_comm, p->p_pid, sig, &fp->sf_sc);
+#endif
+ /*
+ * Arrange to continue execution at the code copied out in exec().
+ * It needs the function to call in %g1, and a new stack pointer.
+ */
+#ifdef COMPAT_SUNOS
+ if (psp->ps_usertramp & sigmask(sig)) {
+ addr = (int)catcher; /* user does his own trampolining */
+ } else
+#endif
+ {
+ addr = USRSTACK - sizeof(struct ps_strings) - szsigcode;
+ tf->tf_global[1] = (int)catcher;
+ }
+ tf->tf_pc = addr;
+ tf->tf_npc = addr + 4;
+ tf->tf_out[6] = (int)fp - sizeof(struct rwindow);
+#ifdef DEBUG
+ if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig: about to return to catcher\n");
+#endif
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above),
+ * and return to the given trap frame (if there is one).
+ * Check carefully to make sure that the user has not
+ * modified the state to gain improper privileges or to cause
+ * a machine fault.
+ */
+/* ARGSUSED */
+struct sigreturn_args {
+ struct sigcontext *scp;
+};
+sigreturn(p, uap, retval)
+ register struct proc *p;
+ struct sigreturn_args *uap;
+ int *retval;
+{
+ register struct sigcontext *scp;
+ register struct trapframe *tf;
+
+ /* First ensure consistent stack state (see sendsig). */
+ write_user_windows();
+ if (rwindow_save(p))
+ sigexit(p, SIGILL);
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sigreturn: %s[%d], scp %x\n",
+ p->p_comm, p->p_pid, uap->scp);
+#endif
+ scp = uap->scp;
+ if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0)
+ return (EINVAL);
+ tf = p->p_md.md_tf;
+ /*
+ * Only the icc bits in the psr are used, so it need not be
+ * verified. pc and npc must be multiples of 4. This is all
+ * that is required; if it holds, just do it.
+ */
+ if (((scp->sc_pc | scp->sc_npc) & 3) != 0)
+ return (EINVAL);
+ /* take only psr ICC field */
+ tf->tf_psr = (tf->tf_psr & ~PSR_ICC) | (scp->sc_psr & PSR_ICC);
+ tf->tf_pc = scp->sc_pc;
+ tf->tf_npc = scp->sc_npc;
+ tf->tf_global[1] = scp->sc_g1;
+ tf->tf_out[0] = scp->sc_o0;
+ tf->tf_out[6] = scp->sc_sp;
+ if (scp->sc_onstack & 1)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
+ p->p_sigmask = scp->sc_mask & ~sigcantmask;
+ return (EJUSTRETURN);
+}
+
+int waittime = -1;
+
+boot(howto)
+ register int howto;
+{
+ int i;
+ static char str[4]; /* room for "-sd\0" */
+ extern volatile void romhalt(void);
+ extern volatile void romboot(char *);
+
+ fb_unblank();
+ boothowto = howto;
+ if ((howto & RB_NOSYNC) == 0 && waittime < 0 && rootfs) {
+ register struct buf *bp;
+ int iter, nbusy;
+#if 1
+ extern struct proc proc0;
+
+ /* protect against curproc->p_stats.foo refs in sync() XXX */
+ if (curproc == NULL)
+ curproc = &proc0;
+#endif
+ waittime = 0;
+ (void) spl0();
+ printf("syncing disks... ");
+ /*
+ * Release vnodes held by texts before sync.
+ */
+ if (panicstr == 0)
+ vnode_pager_umount((struct mount *)NULL);
+#include "fd.h"
+#if NFD > 0
+ fdshutdown();
+#endif
+ sync((struct proc *)NULL, (void *)NULL, (int *)NULL);
+
+ for (iter = 0; iter < 20; iter++) {
+ nbusy = 0;
+ for (bp = &buf[nbuf]; --bp >= buf; )
+ if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
+ nbusy++;
+ if (nbusy == 0)
+ break;
+ printf("%d ", nbusy);
+ DELAY(40000 * iter);
+ }
+ if (nbusy)
+ printf("giving up\n");
+ else
+ printf("done\n");
+ /*
+ * If we've been adjusting the clock, the todr
+ * will be out of synch; adjust it now.
+ */
+ resettodr();
+ }
+ (void) splhigh(); /* ??? */
+ if (howto & RB_HALT) {
+ printf("halted\n\n");
+ romhalt();
+ }
+ if (howto & RB_DUMP)
+ dumpsys();
+ printf("rebooting\n\n");
+ i = 1;
+ if (howto & RB_SINGLE)
+ str[i++] = 's';
+ if (howto & RB_KDB)
+ str[i++] = 'd';
+ if (i > 1) {
+ str[0] = '-';
+ str[i] = 0;
+ } else
+ str[0] = 0;
+ romboot(str);
+ /*NOTREACHED*/
+}
+
+int dumpmag = 0x8fca0101; /* magic number for savecore */
+int dumpsize = 0; /* also for savecore */
+long dumplo = 0;
+
+dumpconf()
+{
+ int nblks;
+
+ dumpsize = physmem;
+#define DUMPMMU
+#ifdef DUMPMMU
+#define NPMEG 128
+ /*
+ * savecore views the image in units of pages (i.e., dumpsize is in
+ * pages) so we round the two mmu entities into page-sized chunks.
+ * The PMEGs (32kB) and the segment table (512 bytes plus padding)
+ * are appending to the end of the crash dump.
+ */
+ dumpsize += btoc(sizeof(((struct kpmap *)0)->pm_rsegmap)) +
+ btoc(NPMEG * NPTESG * sizeof(int));
+#endif
+ if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
+ nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
+ /*
+ * Don't dump on the first CLBYTES (why CLBYTES?)
+ * in case the dump device includes a disk label.
+ */
+ if (dumplo < btodb(CLBYTES))
+ dumplo = btodb(CLBYTES);
+
+ /*
+ * If dumpsize is too big for the partition, truncate it.
+ * Otherwise, put the dump at the end of the partition
+ * by making dumplo as large as possible.
+ */
+ if (dumpsize > btoc(dbtob(nblks - dumplo)))
+ dumpsize = btoc(dbtob(nblks - dumplo));
+ else if (dumplo + ctod(dumpsize) > nblks)
+ dumplo = nblks - ctod(dumpsize);
+ }
+}
+
+#ifdef DUMPMMU
+/* XXX */
+#include "ctlreg.h"
+#define getpte(va) lda(va, ASI_PTE)
+#define setsegmap(va, pmeg) stba(va, ASI_SEGMAP, pmeg)
+
+/*
+ * Write the mmu contents to the dump device.
+ * This gets appended to the end of a crash dump since
+ * there is no in-core copy of kernel memory mappings.
+ */
+int
+dumpmmu(blkno)
+ register daddr_t blkno;
+{
+ register int (*dump)(/*dev_t, daddr_t, caddr_t, int*/);
+ register int pmeg;
+ register int addr; /* unused kernel virtual address */
+ register int i;
+ register int *pte, *ptend;
+ register int error;
+ register struct kpmap *kpmap = &kernel_pmap_store;
+ int buffer[dbtob(1) / sizeof(int)];
+ extern int seginval; /* from pmap.c */
+
+
+ dump = bdevsw[major(dumpdev)].d_dump;
+
+ /*
+ * dump page table entries
+ *
+ * We dump each pmeg in order (by segment number). Since the MMU
+ * automatically maps the given virtual segment to a pmeg we must
+ * iterate over the segments by incrementing an unused segment slot
+ * in the MMU. This fixed segment number is used in the virtual
+ * address argument to getpte().
+ */
+
+ /* First find an unused virtual segment. */
+ i = NKSEG;
+ while (kpmap->pm_rsegmap[--i] != seginval)
+ if (i <= 0)
+ return (-1);
+ /*
+ * Compute the base address corresponding to the unused segment.
+ * Note that the kernel segments start after all the user segments
+ * so we must account for this offset.
+ */
+ addr = VSTOVA(i + NUSEG);
+ /*
+ * Go through the pmegs and dump each one.
+ */
+ pte = buffer;
+ ptend = &buffer[sizeof(buffer) / sizeof(buffer[0])];
+ for (pmeg = 0; pmeg < NPMEG; ++pmeg) {
+ register int va = addr;
+
+ setsegmap(addr, pmeg);
+ i = NPTESG;
+ do {
+ *pte++ = getpte(va);
+ if (pte >= ptend) {
+ /*
+ * Note that we'll dump the last block
+ * the last time through the loops because
+ * all the PMEGs occupy 32KB which is
+ * a multiple of the block size.
+ */
+ error = (*dump)(dumpdev, blkno,
+ (caddr_t)buffer,
+ dbtob(1));
+ if (error != 0)
+ return (error);
+ ++blkno;
+ pte = buffer;
+ }
+ va += NBPG;
+ } while (--i > 0);
+ }
+ setsegmap(addr, seginval);
+
+ /*
+ * dump (512 byte) segment map
+ * XXX assume it's a multiple of the block size
+ */
+ error = (*dump)(dumpdev, blkno, (caddr_t)kpmap->pm_rsegmap,
+ sizeof(kpmap->pm_rsegmap), 0);
+ return (error);
+}
+#endif
+
+#define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */
+static vm_offset_t dumpspace;
+
+caddr_t
+reserve_dumppages(p)
+ caddr_t p;
+{
+
+ dumpspace = (vm_offset_t)p;
+ return (p + BYTES_PER_DUMP);
+}
+
+/*
+ * Write a crash dump.
+ */
+dumpsys()
+{
+ register unsigned bytes, i, n;
+ register int maddr, psize;
+ register daddr_t blkno;
+ register int (*dump)(/*dev_t, daddr_t, caddr_t, int, int*/);
+ int error = 0;
+
+ if (dumpdev == NODEV)
+ return;
+ /* copy registers to memory */
+ snapshot(cpcb);
+ /*
+ * For dumps during autoconfiguration,
+ * if dump device has already configured...
+ */
+ if (dumpsize == 0)
+ dumpconf();
+ if (dumplo < 0)
+ return;
+ printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
+
+ psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
+ printf("dump ");
+ if (psize == -1) {
+ printf("area unavailable\n");
+ return;
+ }
+ bytes = physmem << PGSHIFT;
+ maddr = 0;
+ blkno = dumplo;
+ dump = bdevsw[major(dumpdev)].d_dump;
+ for (i = 0; i < bytes; i += n) {
+ n = bytes - i;
+ if (n > BYTES_PER_DUMP)
+ n = BYTES_PER_DUMP;
+#ifdef DEBUG
+ /* print out how many MBs we have dumped */
+ if (i && (i % (1024*1024)) == 0)
+ printf("%d ", i / (1024*1024));
+#endif
+ (void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
+ error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, (int)n);
+ if (error)
+ break;
+ maddr += n;
+ blkno += btodb(n);
+ }
+#ifdef DUMPMMU
+ if (!error)
+ error = dumpmmu(blkno);
+#endif
+ switch (error) {
+
+ case ENXIO:
+ printf("device bad\n");
+ break;
+
+ case EFAULT:
+ printf("device not ready\n");
+ break;
+
+ case EINVAL:
+ printf("area improper\n");
+ break;
+
+ case EIO:
+ printf("i/o error\n");
+ break;
+
+ case 0:
+ printf("succeeded\n");
+ break;
+
+ default:
+ printf("error %d\n", error);
+ break;
+ }
+}
+
+/*
+ * Map an I/O device given physical address and size in bytes, e.g.,
+ *
+ * mydev = (struct mydev *)mapdev(myioaddr, 0, sizeof(struct mydev));
+ *
+ * See also machine/autoconf.h.
+ */
+void *
+mapdev(phys, virt, size)
+ register void *phys;
+ register int virt, size;
+{
+ register vm_offset_t v;
+ register void *ret;
+ static vm_offset_t iobase = IODEV_BASE;
+
+ size = round_page(size);
+ if (virt)
+ v = virt;
+ else {
+ v = iobase;
+ iobase += size;
+ if (iobase > IODEV_END) /* unlikely */
+ panic("mapiodev");
+ }
+ ret = (void *)v;
+ do {
+ pmap_enter(kernel_pmap, v,
+ (vm_offset_t)phys | PMAP_OBIO | PMAP_NC,
+ VM_PROT_READ | VM_PROT_WRITE, 1);
+ v += PAGE_SIZE;
+ phys += PAGE_SIZE;
+ } while ((size -= PAGE_SIZE) > 0);
+ return (ret);
+}