* Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* @(#)machdep.c 5.4 (Berkeley) %G%
#include "../net/netisr.h"
#include "../i386/frame.h"
#include "../i386/segments.h"
#include "../i386/isa/rtc.h"
* Declare these as initialized data so we can patch them.
* Machine-dependent startup code
caddr_t sbase = { &Sysbase };*/
/* extern struct pte EMCmap[];
extern char EMCbase[]; */
int boothowto
= 0, Maxmem
= 0;
caddr_t
bypasshole(b
,t
) caddr_t b
,t
; {
if (b
<= Sysbase
+ 0xa0000 && t
> Sysbase
+ 0xa0000)
return(Sysbase
+ 0x100000);
register struct pte
*pte
;
int maxbufs
, base
, residual
;
extern struct map
*useriomap
;
* Initialize the console before we print anything out.
* Bounds check memory size information against bios values
* use the lesser of the two
biosmem
= rtcin(RTC_BASELO
)+ (rtcin(RTC_BASEHI
)<<8);
printf("Maxmem %x howto %x bootdev %x cyloff %x firstaddr %x bios %d %d\n",
Maxmem
, boothowto
, bootdev
, cyloffset
, firstaddr
,
rtcin(RTC_EXTLO
) + (rtcin(RTC_EXTHI
)<<8)
panic("does not have 640K of base memory");
biosmem
+= rtcin(RTC_EXTLO
) + (rtcin(RTC_EXTHI
)<<8);
biosmem
= biosmem
/4 - 1 ;
if (biosmem
< maxmem
) maxmem
=biosmem
;
if(forcemaxmem
&& maxmem
> forcemaxmem
)
* Initialize error message buffer (at end of core).
/* Problem to resolve. AT's have memory that is not contigous, as
I/O address space for video adapters and network cards fall into
a range of 0xa0000 - 0x100000 . Note that the cmap really expects
contigous memory. For the moment, use the bottom of memory for
kernel and run-time configured storage (e.g. valloc), using memory
above 0x100000 for the cmap, and wasting the stuff left over after
valloc-end up to 0xa0000 (640K). Will have to fix this before beta,
and will have to somehow move this out into per bus adapter directory
(e.g. configurable). For now, punt
How about starting cmap normally following valloc space, and then
write a routine than allocs only phys pages in the 0xa0000-0x100000
Temporary fix for beta, if we only have 640K, then cmap follows valloc
maxmem
-= btoc(sizeof (struct msgbuf
));
for (i
= 0; i
< btoc(sizeof (struct msgbuf
)); i
++)
*(int *)pte
++ = PG_V
| PG_KW
| ctob(maxmem
+ i
);
*(int *)pte
= PG_V
| PG_UW
| 0xc0000000;
printf("EMC at %x\n", EMCbase
);
freemem
= physmem
= maxmem
;
kdb_init(); /* startup kernel debugger */
* Good {morning,afternoon,evening,night}.
printf("real mem = %d\n", ctob(physmem
));
* Allocate space for system data structures.
* The first available real memory address is in "firstaddr".
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
v
= (caddr_t
)(Sysbase
+ (firstaddr
* NBPG
));
/*v = sbase + (firstaddr * NBPG);*/
#define valloc(name, type, num) \
v = bypasshole (v, v + (int) ((name)+(num))) ; \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
v = bypasshole (v, v + (int) ((name)+(num))) ; \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloclim(inode
, struct inode
, ninode
, inodeNINODE
);
valloclim(file
, struct file
, nfile
, fileNFILE
);
valloclim(proc
, struct proc
, nproc
, procNPROC
);
valloclim(text
, struct text
, ntext
, textNTEXT
);
valloc(cfree
, struct cblock
, nclist
);
valloc(callout
, struct callout
, ncallout
);
valloc(swapmap
, struct map
, nswapmap
= nproc
* 2);
valloc(argmap
, struct map
, ARGMAPSIZE
);
valloc(kernelmap
, struct map
, nproc
);
valloc(useriomap
, struct map
, nproc
);
valloc(mbmap
, struct map
, nmbclusters
/4);
valloc(namecache
, struct namecache
, nchsize
);
valloclim(quota
, struct quota
, nquota
, quotaNQUOTA
);
valloclim(dquot
, struct dquot
, ndquot
, dquotNDQUOT
);
* Determine how many buffers to allocate.
* Use 10% of memory for the first 2 Meg, 5% of the remaining
* memory. Insure a minimum of 16 buffers.
* We allocate 1/2 as many swap buffer headers as file i/o buffers.
if (physmem
< (2 * 1024 * 1024))
bufpages
= physmem
/ 10 / CLSIZE
;
bufpages
= ((2 * 1024 * 1024 + physmem
) / 20) / CLSIZE
;
nswbuf
= (nbuf
/ 2) &~ 1; /* force even */
nswbuf
= 256; /* sanity */
valloc(swbuf
, struct buf
, nswbuf
);
* Now the amount of virtual memory remaining for buffers
* can be calculated, estimating needs for the cmap.
ncmap
= (maxmem
*NBPG
- ((int)(v
- Sysbase
))) /
(CLBYTES
+ sizeof(struct cmap
)) + 2;
maxbufs
= ((SYSPTSIZE
* NBPG
) -
((int)(v
- Sysbase
+ ncmap
* sizeof(struct cmap
)))) /
(MAXBSIZE
+ sizeof(struct buf
));
panic("sys pt too small");
printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs
);
if (bufpages
> nbuf
* (MAXBSIZE
/ CLBYTES
))
bufpages
= nbuf
* (MAXBSIZE
/ CLBYTES
);
valloc(buf
, struct buf
, nbuf
);
* Allocate space for core map.
* Allow space for all of physical memory minus the amount
* dedicated to the system. The amount of physical memory
* dedicated to the system is the total virtual memory of
* the system thus far, plus core map, buffer pages,
* and buffer headers not yet allocated.
* Add 2: 1 because the 0th entry is unused, 1 for rounding.
/*ncmap = (maxmem*NBPG - ((int)((v - sbase) + bufpages*CLBYTES))) /*/
ncmap = (maxmem*NBPG - ((int)((v - Sysbase) + bufpages*CLBYTES))) /
(CLBYTES + sizeof(struct cmap)) + 2;
valloclim(cmap, struct cmap, ncmap, ecmap);
* Clear space allocated thus far, and make r/w entries
* for the space in the kernel map.
unixsize
= btoc((int)(v
- Sysbase
));
while (firstaddr
< unixsize
) {
*(int *)(&Sysmap
[firstaddr
]) = PG_V
| PG_KW
| ctob(firstaddr
);
clearseg((unsigned)firstaddr
);
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
v
= bypasshole (v
, (caddr_t
) ((int)(v
+ PGOFSET
) &~ PGOFSET
+
v
= (caddr_t
) ((int)(v
+ PGOFSET
) &~ PGOFSET
);
valloc(buffers
, char, MAXBSIZE
* nbuf
);
residual
= bufpages
% nbuf
;
mapaddr
= firstaddr
= btoc((unsigned) buffers
- (unsigned)Sysbase
);
for (i
= 0; i
< residual
; i
++) {
for (j
= 0; j
< (base
+ 1) * CLSIZE
; j
++) {
*(int *)(&Sysmap
[mapaddr
+j
]) = PG_V
| PG_KW
| ctob(firstaddr
);
clearseg((unsigned)firstaddr
);
mapaddr
+= MAXBSIZE
/ NBPG
;
for (i
= residual
; i
< nbuf
; i
++) {
for (j
= 0; j
< base
* CLSIZE
; j
++) {
*(int *)(&Sysmap
[mapaddr
+j
]) = PG_V
| PG_KW
| ctob(firstaddr
);
clearseg((unsigned)firstaddr
);
mapaddr
+= MAXBSIZE
/ NBPG
;
unixsize
= btoc((int)(v
- Sysbase
));
if (firstaddr
>= physmem
- 8*UPAGES
)
for (i
= 1; i
< ncallout
; i
++)
callout
[i
-1].c_next
= &callout
[i
];
* Initialize memory allocator and swap
* and user page table maps.
* THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
* WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
* cmap must not allocate the hole, so toss memory
if(firstaddr
< 640/4 && maxmem
> 1024/4){
printf("[not using %dK due to hole]\n", 4*(640/4 - firstaddr
));
printf("WARNING: NOT ENOUGH RAM MEMORY - RUNNING IN DEGRADED MODE\n");
meminit(firstaddr
, maxmem
);
printf("avail mem = %d\n", ctob(maxmem
));
printf("using %d buffers containing %d bytes of memory\n",
nbuf
, bufpages
* CLBYTES
);
rminit(kernelmap
, (long)USRPTSIZE
, (long)1,
* PTEs for mapping user space into kernel for phyio operations.
* One page is enough to handle 4Mb of simultaneous raw IO operations.
rminit(useriomap
, (long)USRIOSIZE
, (long)1, "usrio", nproc
);
rminit(mbmap
, (long)(nmbclusters
* CLSIZE
), (long)CLSIZE
,
"mbclusters", nmbclusters
/4);
/*intenable = 1; /* Enable interrupts from now on */
* Set up CPU-specific registers, cache, etc.
* Set up buffers, so they can be used to read disk labels.
* Return the difference (in microseconds)
* between the current time and a previous
* time as represented by the arguments.
* If there is a pending clock interrupt
* which has not been serviced due to high
* ipl, return error code.
vmtime(otime
, olbolt
, oicr
)
register int otime
, olbolt
, oicr
;
return (((time
.tv_sec
-otime
)*60 + lbolt
-olbolt
)*16667);
struct sigcontext
*sf_scp
;
struct save87 sf_fsave
; /* fpu coproc */
* Send an interrupt to process.
* Stack is set up to allow sigcode stored
* in u. to call routine, followed by kcall
* to sigreturn routine below. After sigreturn
* resets the signal mask, the stack, and the
* frame pointer, it returns to the user
sendsig(p
, sig
, mask
, frmtrp
)
register struct sigcontext
*scp
;
register struct sigframe
*fp
;
dprintf(DSIGNAL
,"sendsig %d code %d to pid %d frmtrp %d to locn %x\n",
sig
, u
.u_code
, u
.u_procp
->p_pid
, frmtrp
, p
);
* Allocate and validate space for the signal handler
* context. Note that if the stack is in P0 space, the
* call to grow() is a nop, and the useracc() check
* will fail if the process has not already allocated
* the space with a `brk'.
if (!u
.u_onstack
&& (u
.u_sigonstack
& sigmask(sig
))) {
scp
= (struct sigcontext
*)u
.u_sigsp
- 1;
scp
= (struct sigcontext
*)regs
[tESP
] - 1;
scp
= (struct sigcontext
*)regs
[sESP
] - 1;
fp
= (struct sigframe
*)scp
- 1;
if ((int)fp
<= USRSTACK
- ctob(u
.u_ssize
))
(void) grow((unsigned)fp
);
if (useracc((caddr_t
)fp
, sizeof (*fp
) + sizeof (*scp
), B_WRITE
) == 0) {
* Process has trashed its stack; give it an illegal
* instruction to halt it in its tracks.
printf("sendsig: failed to grow stack down to %x\n", fp
);
u
.u_signal
[SIGILL
] = SIG_DFL
;
u
.u_procp
->p_sigignore
&= ~sig
;
u
.u_procp
->p_sigcatch
&= ~sig
;
u
.u_procp
->p_sigmask
&= ~sig
;
psignal(u
.u_procp
, SIGILL
);
* Build the argument list for the signal handler.
if (sig
== SIGILL
|| sig
== SIGFPE
) {
/* indicate trap occured from system call */
if(!frmtrp
) fp
->sf_code
|= 0x80;
/* save scratch registers */
* Build the signal context to be used by sigreturn.
scp
->sc_onstack
= oonstack
;
scp
->sc_ps
= regs
[tEFLAGS
];
regs
[tEIP
] = (int)u
.u_pcb
.pcb_sigc
;
scp
->sc_ps
= regs
[sEFLAGS
];
regs
[sEIP
] = (int)u
.u_pcb
.pcb_sigc
;
* System call to cleanup state after a signal
* has been taken. Reset signal mask and
* stack state from context left by sendsig (above).
* Return to previous pc and psl as specified by
* context left by sendsig. Check carefully to
* make sure that the user has not modified the
* psl to gain improper priviledges or to cause
register struct sigframe
*fp
;
register struct sigcontext
*scp
;
register int *regs
= u
.u_ar0
;
fp
= (struct sigframe
*) regs
[sESP
] ;
if (useracc((caddr_t
)fp
, sizeof (*fp
), 0) == 0) {
/* restore scratch registers */
regs
[sEAX
] = fp
->sf_eax
;
regs
[sEDX
] = fp
->sf_edx
;
regs
[sECX
] = fp
->sf_ecx
;
if (useracc((caddr_t
)scp
, sizeof (*scp
), 0) == 0) {
if ((scp
->sc_ps
& PSL_MBZ
) != 0 || (scp
->sc_ps
& PSL_MBO
) != PSL_MBO
) {
u
.u_onstack
= scp
->sc_onstack
& 01;
u
.u_procp
->p_sigmask
= scp
->sc_mask
&~
(sigmask(SIGKILL
)|sigmask(SIGCONT
)|sigmask(SIGSTOP
));
regs
[sEFLAGS
] = scp
->sc_ps
;
register long dummy
; /* r12 is reserved */
register int howto
; /* r11 == how to boot */
register int devtype
; /* r10 == major of root dev */
if ((howto
&RB_NOSYNC
) == 0 && waittime
< 0 && bfreelist
[0].b_forw
) {
printf("syncing disks... ");
* Release inodes held by texts before update.
for (iter
= 0; iter
< 20; iter
++) {
for (bp
= &buf
[nbuf
]; --bp
>= buf
; )
if ((bp
->b_flags
& (B_BUSY
|B_INVAL
)) == B_BUSY
)
DELAY(10000); /* wait for printf to finish */
devtype
= major(rootdev
);
printf("halting (in tight loop); hit reset\n\n");
doadump(); /* CPBOOT's itsself */
dummy
= 0; dummy
= dummy
;
printf("howto %d, devtype %d\n", arghowto
, devtype
);
int dumpmag
= 0x8fca0101; /* magic number for savecore */
int dumpsize
= 0; /* also for savecore */
* Doadump comes here after turning off memory management and
* getting on the dump stack, either when called above, or by
if ((minor(dumpdev
)&07) != 1)
printf("\ndumping to dev %x, offset %d\n", dumpdev
, dumplo
);
switch ((*bdevsw
[major(dumpdev
)].d_dump
)(dumpdev
)) {
printf("device not ready\n");
printf("area improper\n");
register struct timeval
*tvp
;
while (tvp
->tv_usec
> 1000000) {
physstrat(bp
, strat
, prio
)
* vmapbuf clobbers b_addr so we must remember it so that it
* can be restored after vunmapbuf. This is truely rude, we
* should really be storing this in a field in the buf struct
* but none are available and I didn't want to add one at
* this time. Note that b_addr for dirty page pushes is
* restored in vunmapbuf. (ugh!)
/* pageout daemon doesn't wait for pushed pages */
if (bp
->b_flags
& B_DIRTY
)
while ((bp
->b_flags
& B_DONE
) == 0)
sleep((caddr_t
)bp
, prio
);
* Clear registers on exec
/* should pass args to init on the stack */
for (rp
= &u
.u_ar0
[0]; rp
< &u
.u_ar0
[16];)
u
.u_ar0
[sEBP
] = 0; /* bottom of the fp chain */
* Initialize 386 and configure to run kernel
* Initialize segments & interrupt table
#define GNULL_SEL 0 /* Null Descriptor */
#define GCODE_SEL 1 /* Kernel Code Descriptor */
#define GDATA_SEL 2 /* Kernel Data Descriptor */
#define GLDT_SEL 3 /* LDT - eventually one per process */
#define GTGATE_SEL 4 /* Process task switch gate */
#define GPANIC_SEL 5 /* Task state to consider panic from */
#define GPROC0_SEL 6 /* Task state process slot zero and up */
union descriptor gdt
[GPROC0_SEL
];
/* interrupt descriptor table */
struct gate_descriptor idt
[32+16];
/* local descriptor table */
#define LSYS5CALLS_SEL 0 /* forced by intel BCS */
#define L43BSDCALLS_SEL 2 /* notyet */
/* seperate stack, es,fs,gs sels ? */
/* #define LPOSIXCALLS_SEL 5 /* notyet */
/* software prototypes -- in more palitable form */
struct soft_segment_descriptor gdt_segs
[] = {
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Code Descriptor for kernel */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMERA
, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ },
/* Data Descriptor for kernel */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMRWA
, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ },
{ (int) ldt
, /* segment base address */
sizeof(ldt
)-1, /* length - all address space */
SDT_SYSLDT
, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
0, /* unused - default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Null Descriptor - Placeholder */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Panic Tss Descriptor */
{ (int) &u
, /* segment base address */
sizeof(tss
)-1, /* length - all address space */
SDT_SYS386TSS
, /* segment type */
0, /* segment descriptor priority level */
1, /* segment descriptor present */
0, /* unused - default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ }};
struct soft_segment_descriptor ldt_segs
[] = {
/* Null Descriptor - overwritten by call gate */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Null Descriptor - overwritten by call gate */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Null Descriptor - overwritten by call gate */
{ 0x0, /* segment base address */
0x0, /* length - all address space */
0, /* segment descriptor priority level */
0, /* segment descriptor present */
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
/* Code Descriptor for user */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMERA
, /* segment type */
SEL_UPL
, /* segment descriptor priority level */
1, /* segment descriptor present */
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ },
/* Data Descriptor for user */
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMRWA
, /* segment type */
SEL_UPL
, /* segment descriptor priority level */
1, /* segment descriptor present */
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ } };
/* table descriptors - used to load tables by microp */
struct region_descriptor r_gdt
= {
sizeof(gdt
)-1,(char *)gdt
struct region_descriptor r_idt
= {
sizeof(idt
)-1,(char *)idt
setidt(idx
, func
, typ
, dpl
) char *func
; {
struct gate_descriptor
*ip
= idt
+ idx
;
ip
->gd_looffset
= (int)func
;
ip
->gd_hioffset
= ((int)func
)>>16 ;
#define IDTVEC(name) X/**/name
extern IDTVEC(div
), IDTVEC(dbg
), IDTVEC(nmi
), IDTVEC(bpt
), IDTVEC(ofl
),
IDTVEC(bnd
), IDTVEC(ill
), IDTVEC(dna
), IDTVEC(dble
), IDTVEC(fpusegm
),
IDTVEC(tss
), IDTVEC(missing
), IDTVEC(stk
), IDTVEC(prot
),
IDTVEC(page
), IDTVEC(rsvd
), IDTVEC(fpu
), IDTVEC(rsvd0
),
IDTVEC(rsvd1
), IDTVEC(rsvd2
), IDTVEC(rsvd3
), IDTVEC(rsvd4
),
IDTVEC(rsvd5
), IDTVEC(rsvd6
), IDTVEC(rsvd7
), IDTVEC(rsvd8
),
IDTVEC(rsvd9
), IDTVEC(rsvd10
), IDTVEC(rsvd11
), IDTVEC(rsvd12
),
IDTVEC(rsvd13
), IDTVEC(rsvd14
), IDTVEC(rsvd14
), IDTVEC(syscall
);
int lcr0(), lcr3(), rcr0(), rcr2();
int _udatasel
, _ucodesel
, _gsel_tss
;
init386(first
) { extern ssdtosd(), lgdt(), lidt(), lldt(), etext
;
struct gate_descriptor
*gdp
;
/* make gdt memory segments */
gdt_segs
[GCODE_SEL
].ssd_limit
= btoc((int) &etext
+ NBPG
);
for (x
=0; x
< 6; x
++) ssdtosd(gdt_segs
+x
, gdt
+x
);
/* make ldt memory segments */
ldt_segs
[LUCODE_SEL
].ssd_limit
= btoc((int) Sysbase
);
/*ldt_segs[LUDATA_SEL].ssd_limit = btoc((int) Sysbase); */
ldt_segs
[LUDATA_SEL
].ssd_limit
= btoc(0xfffff000);
/* Note. eventually want private ldts per process */
for (x
=0; x
< 5; x
++) ssdtosd(ldt_segs
+x
, ldt
+x
);
setidt(0, &IDTVEC(div
), SDT_SYS386TGT
, SEL_KPL
);
setidt(1, &IDTVEC(dbg
), SDT_SYS386TGT
, SEL_KPL
);
setidt(2, &IDTVEC(nmi
), SDT_SYS386TGT
, SEL_KPL
);
setidt(3, &IDTVEC(bpt
), SDT_SYS386TGT
, SEL_UPL
);
setidt(4, &IDTVEC(ofl
), SDT_SYS386TGT
, SEL_KPL
);
setidt(5, &IDTVEC(bnd
), SDT_SYS386TGT
, SEL_KPL
);
setidt(6, &IDTVEC(ill
), SDT_SYS386TGT
, SEL_KPL
);
setidt(7, &IDTVEC(dna
), SDT_SYS386TGT
, SEL_KPL
);
setidt(8, &IDTVEC(dble
), SDT_SYS386TGT
, SEL_KPL
);
setidt(9, &IDTVEC(fpusegm
), SDT_SYS386TGT
, SEL_KPL
);
setidt(10, &IDTVEC(tss
), SDT_SYS386TGT
, SEL_KPL
);
setidt(11, &IDTVEC(missing
), SDT_SYS386TGT
, SEL_KPL
);
setidt(12, &IDTVEC(stk
), SDT_SYS386TGT
, SEL_KPL
);
setidt(13, &IDTVEC(prot
), SDT_SYS386TGT
, SEL_KPL
);
setidt(14, &IDTVEC(page
), SDT_SYS386TGT
, SEL_KPL
);
setidt(15, &IDTVEC(rsvd
), SDT_SYS386TGT
, SEL_KPL
);
setidt(16, &IDTVEC(fpu
), SDT_SYS386TGT
, SEL_KPL
);
setidt(17, &IDTVEC(rsvd0
), SDT_SYS386TGT
, SEL_KPL
);
setidt(18, &IDTVEC(rsvd1
), SDT_SYS386TGT
, SEL_KPL
);
setidt(19, &IDTVEC(rsvd2
), SDT_SYS386TGT
, SEL_KPL
);
setidt(20, &IDTVEC(rsvd3
), SDT_SYS386TGT
, SEL_KPL
);
setidt(21, &IDTVEC(rsvd4
), SDT_SYS386TGT
, SEL_KPL
);
setidt(22, &IDTVEC(rsvd5
), SDT_SYS386TGT
, SEL_KPL
);
setidt(23, &IDTVEC(rsvd6
), SDT_SYS386TGT
, SEL_KPL
);
setidt(24, &IDTVEC(rsvd7
), SDT_SYS386TGT
, SEL_KPL
);
setidt(25, &IDTVEC(rsvd8
), SDT_SYS386TGT
, SEL_KPL
);
setidt(26, &IDTVEC(rsvd9
), SDT_SYS386TGT
, SEL_KPL
);
setidt(27, &IDTVEC(rsvd10
), SDT_SYS386TGT
, SEL_KPL
);
setidt(28, &IDTVEC(rsvd11
), SDT_SYS386TGT
, SEL_KPL
);
setidt(29, &IDTVEC(rsvd12
), SDT_SYS386TGT
, SEL_KPL
);
setidt(30, &IDTVEC(rsvd13
), SDT_SYS386TGT
, SEL_KPL
);
setidt(31, &IDTVEC(rsvd14
), SDT_SYS386TGT
, SEL_KPL
);
lgdt(gdt
, sizeof(gdt
)-1);
lidt(idt
, sizeof(idt
)-1);
lldt(GSEL(GLDT_SEL
, SEL_KPL
));
/* make a initial tss so microp can get interrupt stack on syscall! */
u
.u_pcb
.pcbtss
.tss_esp0
= (int) &u
+ UPAGES
*NBPG
;
u
.u_pcb
.pcbtss
.tss_ss0
= GSEL(GDATA_SEL
, SEL_KPL
) ;
_gsel_tss
= GSEL(GPANIC_SEL
, SEL_KPL
);
/* make a call gate to reenter kernel with */
gdp
= &ldt
[LSYS5CALLS_SEL
].gd
;
x
= (int) &IDTVEC(syscall
);
gdp
->gd_selector
= GSEL(GCODE_SEL
,SEL_KPL
);
gdp
->gd_type
= SDT_SYS386CGT
;
gdp
->gd_hioffset
= ((int) &IDTVEC(syscall
)) >>16;
/* transfer to user mode */
_ucodesel
= LSEL(LUCODE_SEL
, SEL_UPL
);
_udatasel
= LSEL(LUDATA_SEL
, SEL_UPL
);
* zero out physical memory
* specified in relocation units (NBPG bytes)
CMAP1
= PG_V
| PG_KW
| ctob(n
);
* copy a page of physical memory
* specified in relocation units (NBPG bytes)
CMAP2
= PG_V
| PG_KW
| ctob(n
);
bcopy(frm
, &CADDR2
,NBPG
);
* insert an element into a queue
register struct prochd
*element
, *head
;
element
->ph_link
= head
->ph_link
;
head
->ph_link
= (struct proc
*)element
;
element
->ph_rlink
= (struct proc
*)head
;
((struct prochd
*)(element
->ph_link
))->ph_rlink
=(struct proc
*)element
;
* remove an element from a queue
register struct prochd
*element
;
((struct prochd
*)(element
->ph_link
))->ph_rlink
= element
->ph_rlink
;
((struct prochd
*)(element
->ph_rlink
))->ph_link
= element
->ph_link
;
element
->ph_rlink
= (struct proc
*)0;
* Below written in C to allow access to debugging code
copyinstr(fromaddr
, toaddr
, maxlength
, lencopied
) int *lencopied
;
if(lencopied
) *lencopied
= tally
;
if(lencopied
) *lencopied
= tally
;
if(lencopied
) *lencopied
= tally
;
copyoutstr(fromaddr
, toaddr
, maxlength
, lencopied
) int *lencopied
;
c
= subyte(toaddr
++,*fromaddr
);
if (c
== -1) return(EFAULT
);
if(lencopied
) *lencopied
= tally
;
if(lencopied
) *lencopied
= tally
;
copystr(fromaddr
, toaddr
, maxlength
, lencopied
) int *lencopied
;
u_char
*fromaddr
, *toaddr
; {
if(lencopied
) *lencopied
= tally
;
if(lencopied
) *lencopied
= tally
;
* ovbcopy - like bcopy, but recognizes overlapping ranges and handles
int bytes
; /* num bytes to copy */
/* Assume that bcopy copies left-to-right (low addr first). */
if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
bcopy(from
, to
, bytes
); /* non-overlapping or no-op*/
bcopy(from
, to
, bytes
); /* overlapping but OK */
/* to > from: overlapping, and must copy right-to-left. */