fix autoconf, move code to isa.c, remove debugging, drop redundant tlbflushes, macros...
[unix-history] / usr / src / sys / i386 / i386 / machdep.c
/*-
* Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* %sccs.include.386.c%
*
* @(#)machdep.c 5.4 (Berkeley) %G%
*/
#include "param.h"
#include "systm.h"
#include "dir.h"
#include "user.h"
#include "kernel.h"
#include "map.h"
#include "vm.h"
#include "proc.h"
#include "buf.h"
#include "reboot.h"
#include "conf.h"
#include "inode.h"
#include "file.h"
#include "text.h"
#include "clist.h"
#include "callout.h"
#include "cmap.h"
#include "mbuf.h"
#include "msgbuf.h"
#include "quota.h"
#include "../net/netisr.h"
#include "../i386/frame.h"
#include "../i386/reg.h"
#include "../i386/segments.h"
#include "../i386/pte.h"
#include "../i386/psl.h"
#include "../i386/isa/rtc.h"
/*
* Declare these as initialized data so we can patch them.
*/
int nswbuf = 0;
#ifdef NBUF
int nbuf = NBUF;
#else
int nbuf = 0;
#endif
#ifdef BUFPAGES
int bufpages = BUFPAGES;
#else
int bufpages = 0;
#endif
int kernmem;
/*
* Machine-dependent startup code
*/
/*extern char Sysbase;
caddr_t sbase = { &Sysbase };*/
extern char Sysbase[];
/* extern struct pte EMCmap[];
extern char EMCbase[]; */
int boothowto = 0, Maxmem = 0;
extern int bootdev;
#ifdef SMALL
extern int forcemaxmem;
#endif
int biosmem;
extern cyloffset;
caddr_t bypasshole(b,t) caddr_t b,t; {
if (b <= Sysbase + 0xa0000 && t > Sysbase + 0xa0000)
return(Sysbase + 0x100000);
return(b);
}
startup(firstaddr)
int firstaddr;
{
register int unixsize;
register unsigned i;
register struct pte *pte;
int mapaddr, j;
register caddr_t v;
int maxbufs, base, residual;
extern struct map *useriomap;
/*
* Initialize the console before we print anything out.
*/
/*cninit();*/
/*
* Bounds check memory size information against bios values
* use the lesser of the two
*/
biosmem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
printf("Maxmem %x howto %x bootdev %x cyloff %x firstaddr %x bios %d %d\n",
Maxmem, boothowto, bootdev, cyloffset, firstaddr,
biosmem,
rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI)<<8)
);
maxmem = Maxmem-1;
#ifdef notdef
if(biosmem != 640)
panic("does not have 640K of base memory");
biosmem = 1024;
biosmem += rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI)<<8);
biosmem = biosmem/4 - 1 ;
if (biosmem < maxmem) maxmem=biosmem;
#ifdef SMALL
if(forcemaxmem && maxmem > forcemaxmem)
maxmem = forcemaxmem-1;
#endif
#endif
/*
maxmem = 0xA00;*/
/*
* Initialize error message buffer (at end of core).
*/
/* Problem to resolve. AT's have memory that is not contigous, as
I/O address space for video adapters and network cards fall into
a range of 0xa0000 - 0x100000 . Note that the cmap really expects
contigous memory. For the moment, use the bottom of memory for
kernel and run-time configured storage (e.g. valloc), using memory
above 0x100000 for the cmap, and wasting the stuff left over after
valloc-end up to 0xa0000 (640K). Will have to fix this before beta,
and will have to somehow move this out into per bus adapter directory
(e.g. configurable). For now, punt
How about starting cmap normally following valloc space, and then
write a routine than allocs only phys pages in the 0xa0000-0x100000
hole?
Temporary fix for beta, if we only have 640K, then cmap follows valloc
up to 640K.
*/
maxmem -= btoc(sizeof (struct msgbuf));
pte = msgbufmap;
for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
*(int *)pte++ = PG_V | PG_KW | ctob(maxmem + i);
#ifdef notdef
/* XXX EMC */
pte = EMCmap;
*(int *)pte = PG_V | PG_UW | 0xc0000000;
printf("EMC at %x\n", EMCbase);
#endif
freemem = physmem = maxmem;
load_cr3(_cr3());
#ifdef KDB
kdb_init(); /* startup kernel debugger */
#endif
/*
* Good {morning,afternoon,evening,night}.
*/
printf(version);
printf("real mem = %d\n", ctob(physmem));
/*
* Allocate space for system data structures.
* The first available real memory address is in "firstaddr".
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
v = (caddr_t)(Sysbase + (firstaddr * NBPG));
/*v = sbase + (firstaddr * NBPG);*/
#define valloc(name, type, num) \
v = bypasshole (v, v + (int) ((name)+(num))) ; \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
v = bypasshole (v, v + (int) ((name)+(num))) ; \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloclim(inode, struct inode, ninode, inodeNINODE);
valloclim(file, struct file, nfile, fileNFILE);
valloclim(proc, struct proc, nproc, procNPROC);
valloclim(text, struct text, ntext, textNTEXT);
valloc(cfree, struct cblock, nclist);
valloc(callout, struct callout, ncallout);
valloc(swapmap, struct map, nswapmap = nproc * 2);
valloc(argmap, struct map, ARGMAPSIZE);
valloc(kernelmap, struct map, nproc);
valloc(useriomap, struct map, nproc);
valloc(mbmap, struct map, nmbclusters/4);
valloc(namecache, struct namecache, nchsize);
#ifdef QUOTA
valloclim(quota, struct quota, nquota, quotaNQUOTA);
valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
#endif
/*
* Determine how many buffers to allocate.
* Use 10% of memory for the first 2 Meg, 5% of the remaining
* memory. Insure a minimum of 16 buffers.
* We allocate 1/2 as many swap buffer headers as file i/o buffers.
*/
if (bufpages == 0)
if (physmem < (2 * 1024 * 1024))
bufpages = physmem / 10 / CLSIZE;
else
bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE;
if (nbuf == 0) {
nbuf = bufpages / 2;
if (nbuf < 16)
nbuf = 16;
}
if (nswbuf == 0) {
nswbuf = (nbuf / 2) &~ 1; /* force even */
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
valloc(swbuf, struct buf, nswbuf);
/*
* Now the amount of virtual memory remaining for buffers
* can be calculated, estimating needs for the cmap.
*/
ncmap = (maxmem*NBPG - ((int)(v - Sysbase))) /
(CLBYTES + sizeof(struct cmap)) + 2;
maxbufs = ((SYSPTSIZE * NBPG) -
((int)(v - Sysbase + ncmap * sizeof(struct cmap)))) /
(MAXBSIZE + sizeof(struct buf));
if (maxbufs < 16)
panic("sys pt too small");
if (nbuf > maxbufs) {
printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
nbuf = maxbufs;
}
if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
bufpages = nbuf * (MAXBSIZE / CLBYTES);
valloc(buf, struct buf, nbuf);
/*
* Allocate space for core map.
* Allow space for all of physical memory minus the amount
* dedicated to the system. The amount of physical memory
* dedicated to the system is the total virtual memory of
* the system thus far, plus core map, buffer pages,
* and buffer headers not yet allocated.
* Add 2: 1 because the 0th entry is unused, 1 for rounding.
*/
/*ncmap = (maxmem*NBPG - ((int)((v - sbase) + bufpages*CLBYTES))) /*/
ncmap = (maxmem*NBPG - ((int)((v - Sysbase) + bufpages*CLBYTES))) /
(CLBYTES + sizeof(struct cmap)) + 2;
valloclim(cmap, struct cmap, ncmap, ecmap);
/*
* Clear space allocated thus far, and make r/w entries
* for the space in the kernel map.
*/
unixsize = btoc((int)(v - Sysbase));
while (firstaddr < unixsize) {
*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | ctob(firstaddr);
clearseg((unsigned)firstaddr);
firstaddr++;
}
/*
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
*/
v = bypasshole (v, (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET +
MAXBSIZE*nbuf));
v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
valloc(buffers, char, MAXBSIZE * nbuf);
base = bufpages / nbuf;
residual = bufpages % nbuf;
mapaddr = firstaddr = btoc((unsigned) buffers - (unsigned)Sysbase);
for (i = 0; i < residual; i++) {
for (j = 0; j < (base + 1) * CLSIZE; j++) {
*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | ctob(firstaddr);
clearseg((unsigned)firstaddr);
firstaddr++;
}
mapaddr += MAXBSIZE / NBPG;
}
for (i = residual; i < nbuf; i++) {
for (j = 0; j < base * CLSIZE; j++) {
*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | ctob(firstaddr);
clearseg((unsigned)firstaddr);
firstaddr++;
}
mapaddr += MAXBSIZE / NBPG;
}
unixsize = btoc((int)(v - Sysbase));
if (firstaddr >= physmem - 8*UPAGES)
panic("no memory");
/*
* Initialize callouts
*/
callfree = callout;
for (i = 1; i < ncallout; i++)
callout[i-1].c_next = &callout[i];
/*
* Initialize memory allocator and swap
* and user page table maps.
*
* THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
* WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
*/
/*
* cmap must not allocate the hole, so toss memory
*/
if(firstaddr < 640/4 && maxmem > 1024/4){
printf("[not using %dK due to hole]\n", 4*(640/4 - firstaddr));
firstaddr = 0x100;
}
if(maxmem < 2048/4-10)
printf("WARNING: NOT ENOUGH RAM MEMORY - RUNNING IN DEGRADED MODE\n");
meminit(firstaddr, maxmem);
maxmem = freemem;
printf("avail mem = %d\n", ctob(maxmem));
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
rminit(kernelmap, (long)USRPTSIZE, (long)1,
"usrpt", nproc);
/*
* PTEs for mapping user space into kernel for phyio operations.
* One page is enough to handle 4Mb of simultaneous raw IO operations.
*/
rminit(useriomap, (long)USRIOSIZE, (long)1, "usrio", nproc);
rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
"mbclusters", nmbclusters/4);
/*intenable = 1; /* Enable interrupts from now on */
/*
* Set up CPU-specific registers, cache, etc.
*/
initcpu();
/*
* Set up buffers, so they can be used to read disk labels.
*/
bhinit();
binit();
/*
* Configure the system.
*/
configure();
}
#ifdef PGINPROF
/*
* Return the difference (in microseconds)
* between the current time and a previous
* time as represented by the arguments.
* If there is a pending clock interrupt
* which has not been serviced due to high
* ipl, return error code.
*/
/*ARGSUSED*/
vmtime(otime, olbolt, oicr)
register int otime, olbolt, oicr;
{
return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
}
#endif
struct sigframe {
int sf_signum;
int sf_code;
struct sigcontext *sf_scp;
int (*sf_handler)();
int sf_eax;
int sf_edx;
int sf_ecx;
struct save87 sf_fsave; /* fpu coproc */
} ;
/*
* Send an interrupt to process.
*
* Stack is set up to allow sigcode stored
* in u. to call routine, followed by kcall
* to sigreturn routine below. After sigreturn
* resets the signal mask, the stack, and the
* frame pointer, it returns to the user
* specified pc, psl.
*/
sendsig(p, sig, mask, frmtrp)
int (*p)(), sig, mask;
{
register struct sigcontext *scp;
register int *regs;
register struct sigframe *fp;
int oonstack;
#include "dbg.h"
dprintf(DSIGNAL,"sendsig %d code %d to pid %d frmtrp %d to locn %x\n",
sig, u.u_code, u.u_procp->p_pid, frmtrp, p);
regs = u.u_ar0;
oonstack = u.u_onstack;
/*
* Allocate and validate space for the signal handler
* context. Note that if the stack is in P0 space, the
* call to grow() is a nop, and the useracc() check
* will fail if the process has not already allocated
* the space with a `brk'.
*/
if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
scp = (struct sigcontext *)u.u_sigsp - 1;
u.u_onstack = 1;
} else {
if (frmtrp)
scp = (struct sigcontext *)regs[tESP] - 1;
else
scp = (struct sigcontext *)regs[sESP] - 1;
}
fp = (struct sigframe *)scp - 1;
if ((int)fp <= USRSTACK - ctob(u.u_ssize))
(void) grow((unsigned)fp);
if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
/*
* Process has trashed its stack; give it an illegal
* instruction to halt it in its tracks.
*/
printf("sendsig: failed to grow stack down to %x\n", fp);
u.u_signal[SIGILL] = SIG_DFL;
sig = sigmask(SIGILL);
u.u_procp->p_sigignore &= ~sig;
u.u_procp->p_sigcatch &= ~sig;
u.u_procp->p_sigmask &= ~sig;
psignal(u.u_procp, SIGILL);
return;
}
/*
* Build the argument list for the signal handler.
*/
fp->sf_signum = sig;
if (sig == SIGILL || sig == SIGFPE) {
fp->sf_code = u.u_code;
u.u_code = 0;
} else
fp->sf_code = 0;
/* indicate trap occured from system call */
if(!frmtrp) fp->sf_code |= 0x80;
fp->sf_scp = scp;
fp->sf_handler = p;
/* save scratch registers */
if(frmtrp) {
fp->sf_eax = regs[tEAX];
fp->sf_edx = regs[tEDX];
fp->sf_ecx = regs[tECX];
} else {
fp->sf_eax = regs[sEAX];
fp->sf_edx = regs[sEDX];
fp->sf_ecx = regs[sECX];
}
#ifdef notyet
/* XXX FPU state? */
#endif
/*
* Build the signal context to be used by sigreturn.
*/
scp->sc_onstack = oonstack;
scp->sc_mask = mask;
if(frmtrp) {
scp->sc_sp = regs[tESP];
scp->sc_fp = regs[tEBP];
scp->sc_pc = regs[tEIP];
scp->sc_ps = regs[tEFLAGS];
regs[tESP] = (int)fp;
regs[tEIP] = (int)u.u_pcb.pcb_sigc;
} else {
scp->sc_sp = regs[sESP];
scp->sc_fp = regs[sEBP];
scp->sc_pc = regs[sEIP];
scp->sc_ps = regs[sEFLAGS];
regs[sESP] = (int)fp;
regs[sEIP] = (int)u.u_pcb.pcb_sigc;
}
}
/*
* System call to cleanup state after a signal
* has been taken. Reset signal mask and
* stack state from context left by sendsig (above).
* Return to previous pc and psl as specified by
* context left by sendsig. Check carefully to
* make sure that the user has not modified the
* psl to gain improper priviledges or to cause
* a machine fault.
*/
sigreturn()
{
register struct sigframe *fp;
register struct sigcontext *scp;
register int *regs = u.u_ar0;
fp = (struct sigframe *) regs[sESP] ;
if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0) {
u.u_error = EINVAL;
return;
}
/* restore scratch registers */
regs[sEAX] = fp->sf_eax ;
regs[sEDX] = fp->sf_edx ;
regs[sECX] = fp->sf_ecx ;
#ifdef notyet
/* XXX FPU state? */
#endif
scp = fp->sf_scp;
if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0) {
u.u_error = EINVAL;
return;
}
#ifdef notyet
if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
u.u_error = EINVAL;
return;
}
#endif
u.u_eosys = JUSTRETURN;
u.u_onstack = scp->sc_onstack & 01;
u.u_procp->p_sigmask = scp->sc_mask &~
(sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
regs[sEBP] = scp->sc_fp;
regs[sESP] = scp->sc_sp;
regs[sEIP] = scp->sc_pc;
regs[sEFLAGS] = scp->sc_ps;
}
int waittime = -1;
boot(arghowto)
int arghowto;
{
register long dummy; /* r12 is reserved */
register int howto; /* r11 == how to boot */
register int devtype; /* r10 == major of root dev */
extern char *panicstr;
howto = arghowto;
if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
register struct buf *bp;
int iter, nbusy;
waittime = 0;
(void) splnet();
printf("syncing disks... ");
/*
* Release inodes held by texts before update.
*/
if (panicstr == 0)
xumount(NODEV);
update();
for (iter = 0; iter < 20; iter++) {
nbusy = 0;
for (bp = &buf[nbuf]; --bp >= buf; )
if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
nbusy++;
if (nbusy == 0)
break;
printf("%d ", nbusy);
DELAY(40000 * iter);
}
if (nbusy)
printf("giving up\n");
else
printf("done\n");
DELAY(10000); /* wait for printf to finish */
}
splhigh();
devtype = major(rootdev);
if (howto&RB_HALT) {
printf("halting (in tight loop); hit reset\n\n");
reset_cpu();
for (;;) ;
} else {
if (howto & RB_DUMP) {
doadump(); /* CPBOOT's itsself */
/*NOTREACHED*/
}
}
#ifdef lint
dummy = 0; dummy = dummy;
printf("howto %d, devtype %d\n", arghowto, devtype);
#endif
reset_cpu();
for(;;) ;
/*NOTREACHED*/
}
int dumpmag = 0x8fca0101; /* magic number for savecore */
int dumpsize = 0; /* also for savecore */
/*
* Doadump comes here after turning off memory management and
* getting on the dump stack, either when called above, or by
* the auto-restart code.
*/
dumpsys()
{
if (dumpdev == NODEV)
return;
#ifdef notdef
if ((minor(dumpdev)&07) != 1)
return;
#endif
dumpsize = physmem;
printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
printf("dump ");
switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
case ENXIO:
printf("device bad\n");
break;
case EFAULT:
printf("device not ready\n");
break;
case EINVAL:
printf("area improper\n");
break;
case EIO:
printf("i/o error\n");
break;
default:
printf("succeeded\n");
break;
}
printf("\n\n");
DELAY(1000);
}
microtime(tvp)
register struct timeval *tvp;
{
int s = splhigh();
*tvp = time;
tvp->tv_usec += tick;
while (tvp->tv_usec > 1000000) {
tvp->tv_sec++;
tvp->tv_usec -= 1000000;
}
splx(s);
}
physstrat(bp, strat, prio)
struct buf *bp;
int (*strat)(), prio;
{
register int s;
caddr_t baddr;
/*
* vmapbuf clobbers b_addr so we must remember it so that it
* can be restored after vunmapbuf. This is truely rude, we
* should really be storing this in a field in the buf struct
* but none are available and I didn't want to add one at
* this time. Note that b_addr for dirty page pushes is
* restored in vunmapbuf. (ugh!)
*/
baddr = bp->b_un.b_addr;
vmapbuf(bp);
(*strat)(bp);
/* pageout daemon doesn't wait for pushed pages */
if (bp->b_flags & B_DIRTY)
return;
s = splbio();
while ((bp->b_flags & B_DONE) == 0)
sleep((caddr_t)bp, prio);
splx(s);
vunmapbuf(bp);
bp->b_un.b_addr = baddr;
}
initcpu()
{
register struct proc *p;
p = &proc[0];
}
/*
* Clear registers on exec
*/
setregs(entry)
u_long entry;
{
#ifdef notdef
/* should pass args to init on the stack */
for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
*rp++ = 0;
#endif
u.u_ar0[sEBP] = 0; /* bottom of the fp chain */
u.u_ar0[sEIP] = entry;
}
/*
* Initialize 386 and configure to run kernel
*/
/*
* Initialize segments & interrupt table
*/
#define GNULL_SEL 0 /* Null Descriptor */
#define GCODE_SEL 1 /* Kernel Code Descriptor */
#define GDATA_SEL 2 /* Kernel Data Descriptor */
#define GLDT_SEL 3 /* LDT - eventually one per process */
#define GTGATE_SEL 4 /* Process task switch gate */
#define GPANIC_SEL 5 /* Task state to consider panic from */
#define GPROC0_SEL 6 /* Task state process slot zero and up */
union descriptor gdt[GPROC0_SEL];
/* interrupt descriptor table */
struct gate_descriptor idt[32+16];
/* local descriptor table */
union descriptor ldt[5];
#define LSYS5CALLS_SEL 0 /* forced by intel BCS */
#define LSYS5SIGR_SEL 1
#define L43BSDCALLS_SEL 2 /* notyet */
#define LUCODE_SEL 3
#define LUDATA_SEL 4
/* seperate stack, es,fs,gs sels ? */
/* #define LPOSIXCALLS_SEL 5 /* notyet */
struct i386tss tss;
/* software prototypes -- in more palitable form */
struct soft_segment_descriptor gdt_segs[] = {
/* Null Descriptor */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment type */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0,0,
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Code Descriptor for kernel */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMERA, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
0,0,
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ },
/* Data Descriptor for kernel */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMRWA, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
0,0,
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ },
/* LDT Descriptor */
{ (int) ldt, /* segment base address */
sizeof(ldt)-1, /* length - all address space */
SDT_SYSLDT, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
0,0,
0, /* unused - default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Null Descriptor - Placeholder */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment type */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0,0,
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Panic Tss Descriptor */
{ (int) &u, /* segment base address */
sizeof(tss)-1, /* length - all address space */
SDT_SYS386TSS, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
0,0,
0, /* unused - default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ }};
struct soft_segment_descriptor ldt_segs[] = {
/* Null Descriptor - overwritten by call gate */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment type */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0,0,
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Null Descriptor - overwritten by call gate */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment type */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0,0,
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Null Descriptor - overwritten by call gate */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment type */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0,0,
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Code Descriptor for user */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMERA, /* segment type */
SEL_UPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0,0,
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ },
/* Data Descriptor for user */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMRWA, /* segment type */
SEL_UPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0,0,
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ } };
/* table descriptors - used to load tables by microp */
struct region_descriptor r_gdt = {
sizeof(gdt)-1,(char *)gdt
};
struct region_descriptor r_idt = {
sizeof(idt)-1,(char *)idt
};
setidt(idx, func, typ, dpl) char *func; {
struct gate_descriptor *ip = idt + idx;
ip->gd_looffset = (int)func;
ip->gd_selector = 8;
ip->gd_stkcpy = 0;
ip->gd_xx = 0;
ip->gd_type = typ;
ip->gd_dpl = dpl;
ip->gd_p = 1;
ip->gd_hioffset = ((int)func)>>16 ;
}
#define IDTVEC(name) X/**/name
extern IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
int lcr0(), lcr3(), rcr0(), rcr2();
int _udatasel, _ucodesel, _gsel_tss;
init386(first) { extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
int x, *pi;
struct gate_descriptor *gdp;
/* make gdt memory segments */
gdt_segs[GCODE_SEL].ssd_limit = btoc((int) &etext + NBPG);
for (x=0; x < 6; x++) ssdtosd(gdt_segs+x, gdt+x);
/* make ldt memory segments */
ldt_segs[LUCODE_SEL].ssd_limit = btoc((int) Sysbase);
/*ldt_segs[LUDATA_SEL].ssd_limit = btoc((int) Sysbase); */
ldt_segs[LUDATA_SEL].ssd_limit = btoc(0xfffff000);
/* Note. eventually want private ldts per process */
for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
/* exceptions */
setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL);
setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL);
setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL);
setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL);
setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_KPL);
setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL);
setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL);
setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL);
setidt(8, &IDTVEC(dble), SDT_SYS386TGT, SEL_KPL);
setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL);
setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL);
setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL);
setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL);
setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL);
setidt(14, &IDTVEC(page), SDT_SYS386TGT, SEL_KPL);
setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL);
setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL);
setidt(17, &IDTVEC(rsvd0), SDT_SYS386TGT, SEL_KPL);
setidt(18, &IDTVEC(rsvd1), SDT_SYS386TGT, SEL_KPL);
setidt(19, &IDTVEC(rsvd2), SDT_SYS386TGT, SEL_KPL);
setidt(20, &IDTVEC(rsvd3), SDT_SYS386TGT, SEL_KPL);
setidt(21, &IDTVEC(rsvd4), SDT_SYS386TGT, SEL_KPL);
setidt(22, &IDTVEC(rsvd5), SDT_SYS386TGT, SEL_KPL);
setidt(23, &IDTVEC(rsvd6), SDT_SYS386TGT, SEL_KPL);
setidt(24, &IDTVEC(rsvd7), SDT_SYS386TGT, SEL_KPL);
setidt(25, &IDTVEC(rsvd8), SDT_SYS386TGT, SEL_KPL);
setidt(26, &IDTVEC(rsvd9), SDT_SYS386TGT, SEL_KPL);
setidt(27, &IDTVEC(rsvd10), SDT_SYS386TGT, SEL_KPL);
setidt(28, &IDTVEC(rsvd11), SDT_SYS386TGT, SEL_KPL);
setidt(29, &IDTVEC(rsvd12), SDT_SYS386TGT, SEL_KPL);
setidt(30, &IDTVEC(rsvd13), SDT_SYS386TGT, SEL_KPL);
setidt(31, &IDTVEC(rsvd14), SDT_SYS386TGT, SEL_KPL);
#include "isa.h"
#if NISA >0
isa_defaultirq();
#endif
lgdt(gdt, sizeof(gdt)-1);
lidt(idt, sizeof(idt)-1);
lldt(GSEL(GLDT_SEL, SEL_KPL));
/* make a initial tss so microp can get interrupt stack on syscall! */
u.u_pcb.pcbtss.tss_esp0 = (int) &u + UPAGES*NBPG;
u.u_pcb.pcbtss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
_gsel_tss = GSEL(GPANIC_SEL, SEL_KPL);
ltr(_gsel_tss);
/* make a call gate to reenter kernel with */
gdp = &ldt[LSYS5CALLS_SEL].gd;
x = (int) &IDTVEC(syscall);
gdp->gd_looffset = x++;
gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
gdp->gd_stkcpy = 0;
gdp->gd_type = SDT_SYS386CGT;
gdp->gd_dpl = SEL_UPL;
gdp->gd_p = 1;
gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
/* transfer to user mode */
_ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
_udatasel = LSEL(LUDATA_SEL, SEL_UPL);
}
/*
* zero out physical memory
* specified in relocation units (NBPG bytes)
*/
clearseg(n) {
extern CMAP1, CADDR1;
CMAP1 = PG_V | PG_KW | ctob(n);
load_cr3(_cr3());
bzero(&CADDR1,NBPG);
}
/*
* copy a page of physical memory
* specified in relocation units (NBPG bytes)
*/
copyseg(frm, n) {
extern CMAP2, CADDR2;
CMAP2 = PG_V | PG_KW | ctob(n);
load_cr3(_cr3());
bcopy(frm, &CADDR2,NBPG);
}
aston() {
schednetisr(NETISR_AST);
}
/*
* insert an element into a queue
*/
#undef insque
_insque(element, head)
register struct prochd *element, *head;
{
element->ph_link = head->ph_link;
head->ph_link = (struct proc *)element;
element->ph_rlink = (struct proc *)head;
((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
}
/*
* remove an element from a queue
*/
#undef remque
_remque(element)
register struct prochd *element;
{
((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
element->ph_rlink = (struct proc *)0;
}
vmunaccess() {}
/*
* Below written in C to allow access to debugging code
*/
copyinstr(fromaddr, toaddr, maxlength, lencopied) int *lencopied;
char *toaddr; {
int c,tally;
tally = 0;
while (maxlength--) {
c = fubyte(fromaddr++);
if (c == -1) {
if(lencopied) *lencopied = tally;
return(EFAULT);
}
tally++;
*toaddr++ = (char) c;
if (c == 0){
if(lencopied) *lencopied = tally;
return(0);
}
}
if(lencopied) *lencopied = tally;
return(ENOENT);
}
copyoutstr(fromaddr, toaddr, maxlength, lencopied) int *lencopied;
u_char *fromaddr; {
int c;
int tally;
tally = 0;
while (maxlength--) {
c = subyte(toaddr++,*fromaddr);
if (c == -1) return(EFAULT);
tally++;
if (*fromaddr++ == 0){
if(lencopied) *lencopied = tally;
return(0);
}
}
if(lencopied) *lencopied = tally;
return(ENOENT);
}
copystr(fromaddr, toaddr, maxlength, lencopied) int *lencopied;
u_char *fromaddr, *toaddr; {
int tally;
tally = 0;
while (maxlength--) {
*toaddr = *fromaddr++;
tally++;
if (*toaddr++ == 0) {
if(lencopied) *lencopied = tally;
return(0);
}
}
if(lencopied) *lencopied = tally;
return(ENOENT);
}
/*
* ovbcopy - like bcopy, but recognizes overlapping ranges and handles
* them correctly.
*/
ovbcopy(from, to, bytes)
char *from, *to;
int bytes; /* num bytes to copy */
{
/* Assume that bcopy copies left-to-right (low addr first). */
if (from + bytes <= to || to + bytes <= from || to == from)
bcopy(from, to, bytes); /* non-overlapping or no-op*/
else if (from > to)
bcopy(from, to, bytes); /* overlapping but OK */
else {
/* to > from: overlapping, and must copy right-to-left. */
from += bytes - 1;
to += bytes - 1;
while (bytes-- > 0)
*to-- = *from--;
}
}