* Copyright (c) 1982,1986,1988 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
* @(#)machdep.c 7.14 (Berkeley) 5/26/88
#include "../vaxuba/ubavar.h"
#include "../vaxuba/ubareg.h"
* Declare these as initialized data so we can patch them.
int msgbufmapped
; /* set when safe to use msgbuf */
* Machine-dependent startup code
register struct pte
*pte
;
int maxbufs
, base
, residual
;
* Leave last 5k of phys. memory as console work area.
* Initialize error message buffer (at end of core).
maxmem
-= btoc(sizeof (struct msgbuf
));
for (i
= 0; i
< btoc(sizeof (struct msgbuf
)); i
++)
*(int *)pte
++ = PG_V
| PG_KW
| (maxmem
+ i
);
* redirect console to qvss if it exists
* redirect console to qdss if it exists
* Good {morning,afternoon,evening,night}.
printf("real mem = %d\n", ctob(physmem
));
* Allocate space for system data structures.
* The first available real memory address is in "firstaddr".
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
v
= (caddr_t
)(KERNBASE
| (firstaddr
* NBPG
));
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloclim(inode
, struct inode
, ninode
, inodeNINODE
);
valloclim(file
, struct file
, nfile
, fileNFILE
);
valloclim(proc
, struct proc
, nproc
, procNPROC
);
valloclim(text
, struct text
, ntext
, textNTEXT
);
valloc(cfree
, struct cblock
, nclist
);
valloc(callout
, struct callout
, ncallout
);
valloc(swapmap
, struct map
, nswapmap
= nproc
* 2);
valloc(argmap
, struct map
, ARGMAPSIZE
);
valloc(kernelmap
, struct map
, nproc
);
valloc(mbmap
, struct map
, nmbclusters
/4);
valloc(namecache
, struct namecache
, nchsize
);
valloc(kmemmap
, struct map
, ekmempt
- kmempt
);
valloc(kmemusage
, struct kmemusage
, ekmempt
- kmempt
);
valloclim(quota
, struct quota
, nquota
, quotaNQUOTA
);
valloclim(dquot
, struct dquot
, ndquot
, dquotNDQUOT
);
* Determine how many buffers to allocate.
* Use 10% of memory for the first 2 Meg, 5% of the remaining
* memory. Insure a minimum of 16 buffers.
* We allocate 1/2 as many swap buffer headers as file i/o buffers.
if (physmem
< (2 * 1024 * CLSIZE
))
bufpages
= physmem
/ 10 / CLSIZE
;
bufpages
= ((2 * 1024 * CLSIZE
+ physmem
) / 20) / CLSIZE
;
nswbuf
= (nbuf
/ 2) &~ 1; /* force even */
nswbuf
= 256; /* sanity */
valloc(swbuf
, struct buf
, nswbuf
);
* Now the amount of virtual memory remaining for buffers
* can be calculated, estimating needs for the cmap.
ncmap
= (maxmem
*NBPG
- ((int)v
&~ KERNBASE
)) /
(CLBYTES
+ sizeof(struct cmap
)) + 2;
maxbufs
= ((SYSPTSIZE
* NBPG
) -
((int)(v
+ ncmap
* sizeof(struct cmap
)) - KERNBASE
)) /
(MAXBSIZE
+ sizeof(struct buf
));
panic("sys pt too small");
printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs
);
if (bufpages
> nbuf
* (MAXBSIZE
/ CLBYTES
))
bufpages
= nbuf
* (MAXBSIZE
/ CLBYTES
);
valloc(buf
, struct buf
, nbuf
);
* Allocate space for core map.
* Allow space for all of phsical memory minus the amount
* dedicated to the system. The amount of physical memory
* dedicated to the system is the total virtual memory of
* the system thus far, plus core map, buffer pages,
* and buffer headers not yet allocated.
* Add 2: 1 because the 0th entry is unused, 1 for rounding.
ncmap
= (maxmem
*NBPG
- ((int)(v
+ bufpages
*CLBYTES
) &~ KERNBASE
)) /
(CLBYTES
+ sizeof(struct cmap
)) + 2;
valloclim(cmap
, struct cmap
, ncmap
, ecmap
);
* Clear space allocated thus far, and make r/w entries
* for the space in the kernel map.
unixsize
= btoc((int)v
&~ KERNBASE
);
while (firstaddr
< unixsize
) {
*(int *)(&Sysmap
[firstaddr
]) = PG_V
| PG_KW
| firstaddr
;
clearseg((unsigned)firstaddr
);
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
v
= (caddr_t
) ((int)(v
+ PGOFSET
) &~ PGOFSET
);
valloc(buffers
, char, MAXBSIZE
* nbuf
);
residual
= bufpages
% nbuf
;
for (i
= 0; i
< nbuf
; i
++) {
n
= (i
< residual
? base
+ 1 : base
) * CLSIZE
;
for (j
= 0; j
< n
; j
++) {
*(int *)(&Sysmap
[mapaddr
+j
]) = PG_V
| PG_KW
| firstaddr
;
clearseg((unsigned)firstaddr
);
mapaddr
+= MAXBSIZE
/ NBPG
;
unixsize
= btoc((int)v
&~ KERNBASE
);
if (firstaddr
>= physmem
- 8*UPAGES
)
mtpr(TBIA
, 0); /* After we just cleared it all! */
for (i
= 1; i
< ncallout
; i
++)
callout
[i
-1].c_next
= &callout
[i
];
* Initialize memory allocator and swap
* and user page table maps.
* THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
* WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
meminit(firstaddr
, maxmem
);
printf("avail mem = %d\n", ctob(maxmem
));
printf("using %d buffers containing %d bytes of memory\n",
nbuf
, bufpages
* CLBYTES
);
rminit(kernelmap
, (long)USRPTSIZE
, (long)1,
rminit(mbmap
, (long)(nmbclusters
* CLSIZE
), (long)CLSIZE
,
"mbclusters", nmbclusters
/4);
kmeminit(); /* now safe to do malloc/free */
* Set up CPU-specific registers, cache, etc.
* Set up buffers, so they can be used to read disk labels.
* Clear restart inhibit flags.
* Return the difference (in microseconds)
* between the current time and a previous
* time as represented by the arguments.
* If there is a pending clock interrupt
* which has not been serviced due to high
* ipl, return error code.
vmtime(otime
, olbolt
, oicr
)
register int otime
, olbolt
, oicr
;
return(((time
.tv_sec
-otime
)*60 + lbolt
-olbolt
)*16667 + mfpr(ICR
)-oicr
);
* Clear registers on exec
/* should pass args to init on the stack */
/* should also fix this code before using it, it's wrong */
/* wanna clear the scb? */
for (rp
= &u
.u_ar0
[0]; rp
< &u
.u_ar0
[16];)
* Send an interrupt to process.
* Stack is set up to allow sigcode stored
* in u. to call routine, followed by chmk
* to sigreturn routine below. After sigreturn
* resets the signal mask, the stack, the frame
* pointer, and the argument pointer, it returns
* to the user specified pc, psl.
register struct sigcontext
*scp
;
register struct sigframe
{
struct sigcontext
*sf_scp
;
struct sigcontext
*sf_scpcopy
;
* Allocate and validate space for the signal handler
* context. Note that if the stack is in P0 space, the
* call to grow() is a nop, and the useracc() check
* will fail if the process has not already allocated
* the space with a `brk'.
if (!u
.u_onstack
&& (u
.u_sigonstack
& sigmask(sig
))) {
scp
= (struct sigcontext
*)u
.u_sigsp
- 1;
scp
= (struct sigcontext
*)regs
[SP
] - 1;
fp
= (struct sigframe
*)scp
- 1;
if ((int)fp
<= USRSTACK
- ctob(u
.u_ssize
))
(void)grow((unsigned)fp
);
if (useracc((caddr_t
)fp
, sizeof (*fp
) + sizeof (*scp
), B_WRITE
) == 0) {
* Process has trashed its stack; give it an illegal
* instruction to halt it in its tracks.
u
.u_signal
[SIGILL
] = SIG_DFL
;
u
.u_procp
->p_sigignore
&= ~sig
;
u
.u_procp
->p_sigcatch
&= ~sig
;
u
.u_procp
->p_sigmask
&= ~sig
;
psignal(u
.u_procp
, SIGILL
);
* Build the argument list for the signal handler.
if (sig
== SIGILL
|| sig
== SIGFPE
) {
* Build the calls argument frame to be used to call sigreturn
* Build the signal context to be used by sigreturn.
scp
->sc_onstack
= oonstack
;
regs
[PS
] &= ~(PSL_CM
|PSL_FPD
);
regs
[PC
] = (int)u
.u_pcb
.pcb_sigc
;
* System call to cleanup state after a signal
* has been taken. Reset signal mask and
* stack state from context left by sendsig (above).
* Return to previous pc and psl as specified by
* context left by sendsig. Check carefully to
* make sure that the user has not modified the
* psl to gain improper priviledges or to cause
struct sigcontext
*sigcntxp
;
register struct sigcontext
*scp
;
register int *regs
= u
.u_ar0
;
scp
= ((struct a
*)(u
.u_ap
))->sigcntxp
;
if (useracc((caddr_t
)scp
, sizeof (*scp
), B_WRITE
) == 0)
if ((scp
->sc_ps
& (PSL_MBZ
|PSL_IPL
|PSL_IS
)) != 0 ||
(scp
->sc_ps
& (PSL_PRVMOD
|PSL_CURMOD
)) != (PSL_PRVMOD
|PSL_CURMOD
) ||
((scp
->sc_ps
& PSL_CM
) &&
(scp
->sc_ps
& (PSL_FPD
|PSL_DV
|PSL_FU
|PSL_IV
)) != 0)) {
u
.u_onstack
= scp
->sc_onstack
& 01;
u
.u_procp
->p_sigmask
= scp
->sc_mask
&~
(sigmask(SIGKILL
)|sigmask(SIGCONT
)|sigmask(SIGSTOP
));
/* XXX - BEGIN 4.2 COMPATIBILITY */
* Compatibility with 4.2 chmk $139 used by longjmp()
register struct sigcontext
*scp
;
register int *regs
= u
.u_ar0
;
scp
= (struct sigcontext
*)fuword((caddr_t
)regs
[SP
]);
if (useracc((caddr_t
)scp
, 3 * sizeof (int), B_WRITE
) == 0)
u
.u_onstack
= scp
->sc_onstack
& 01;
u
.u_procp
->p_sigmask
= scp
->sc_mask
&~
(sigmask(SIGKILL
)|sigmask(SIGCONT
)|sigmask(SIGSTOP
));
/* XXX - END 4.2 COMPATIBILITY */
(void) copyin((caddr_t
)u
.u_ar0
[FP
], (caddr_t
)&frame
, sizeof (frame
));
sp
= u
.u_ar0
[FP
] + sizeof (frame
);
u
.u_ar0
[PC
] = frame
.fr_savpc
;
u
.u_ar0
[FP
] = frame
.fr_savfp
;
u
.u_ar0
[AP
] = frame
.fr_savap
;
for (reg
= 0; reg
<= 11; reg
++) {
u
.u_ar0
[ipcreg
[reg
]] = fuword((caddr_t
)sp
);
u
.u_ar0
[PS
] = (u
.u_ar0
[PS
] & 0xffff0000) | frame
.fr_psw
;
sp
+= 4 + 4 * (fuword((caddr_t
)sp
) & 0xff);
u
.u_ar0
[PC
] = fuword((caddr_t
)sp
);
u
.u_ar0
[PS
] = fuword((caddr_t
)sp
);
u
.u_ar0
[PS
] |= PSL_USERSET
;
u
.u_ar0
[PS
] &= ~PSL_USERCLR
;
if (u
.u_ar0
[PS
] & PSL_CM
)
u
.u_ar0
[PS
] &= ~PSL_CM_CLR
;
* Memenable enables memory controller corrected data reporting.
* This runs at regular intervals, turning on the interrupt.
* The interrupt is turned off, per memory controller, when error
* reporting occurs. Thus we report at most once per memintvl.
(*cpuops
->cpu_memenable
)();
timeout(memenable
, (caddr_t
)0, memintvl
*hz
);
* Memerr is the interrupt routine for corrected read data
* interrupts. It looks to see which memory controllers have
* unreported errors, reports them, and disables further
* reporting for a time on those controller.
* Invalidate single all pte's in a cluster
register caddr_t addr
; /* must be first reg var */
for (i
= 0; i
< CLSIZE
; i
++) {
register int howto
; /* r11 == how to boot */
register int devtype
; /* r10 == major of root dev */
if ((howto
&RB_NOSYNC
)==0 && waittime
< 0 && bfreelist
[0].b_forw
) {
printf("syncing disks... ");
* Release inodes held by texts before update.
for (iter
= 0; iter
< 20; iter
++) {
for (bp
= &buf
[nbuf
]; --bp
>= buf
; )
if ((bp
->b_flags
& (B_BUSY
|B_INVAL
)) == B_BUSY
)
* If we've been adjusting the clock, the todr
* will be out of synch; adjust it now.
splx(0x1f); /* extreme priority */
devtype
= major(rootdev
);
/* 630 can be told to halt, but how? */
printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
* Reboot after panic or via reboot system call. Note that r11
* and r10 must already have the proper boot values (`call by voodoo').
* TXDB_BOOT erases memory! Instead we set the `did
* a dump' flag in the rpb.
*(int *)&Sysmap
[0] &= ~PG_PROT
;
*(int *)&Sysmap
[0] |= PG_KW
;
* Except on 780s and 8600s, boot flags go in r5. SBI
* VAXen do not care, so copy boot flags to r5 always.
while (((oldmask
= mfpr(TXCS
)) & TXCS_RDY
) == 0)
#if VAX8200 || VAX780 || VAX750 || VAX730 || VAX630
mtpr(TXCS
, TXCS_LCONS
| TXCS_WMASK
);
while ((mfpr(TXCS
) & TXCS_RDY
) == 0)
while ((mfpr(TXCS
) & TXCS_RDY
) == 0)
mtpr(TXCS
, oldmask
| TXCS_WMASK
);
int dumpmag
= 0x8fca0101; /* magic number for savecore */
int dumpsize
= 0; /* also for savecore */
* Doadump comes here after turning off memory management and
* getting on the dump stack, either when called above, or by
* For dumps during autoconfiguration,
* if dump device has already configured...
if (dumplo
== 0 && bdevsw
[major(dumpdev
)].d_psize
)
dumplo
= (*bdevsw
[major(dumpdev
)].d_psize
)(dumpdev
) - physmem
;
printf("\ndumping to dev %x, offset %d\n", dumpdev
, dumplo
);
switch ((*bdevsw
[major(dumpdev
)].d_dump
)(dumpdev
)) {
printf("device not ready\n");
printf("area improper\n");
* Machine check error recovery code.
if ((*cpuops
->cpu_mchk
)(cmcf
) == MCHK_RECOVERED
)
#if defined(VAX780) || defined(VAX750)
* These strings are shared between the 780 and 750 machine check code
* in ka780.c and ka730.c.
"cp read", "ctrl str par", "cp tbuf par", "cp cache par",
"cp rdtimo", "cp rds", "ucode lost", 0,
"ib rds", "ib rd timo", 0, "ib cache par"
* Return the best possible estimate of the time in the timeval
* to which tvp points. We do this by reading the interval count
* register to determine the time remaining to the next clock tick.
* We must compensate for wraparound which is not yet reflected in the time
* (which happens when the ICR hits 0 and wraps after the splhigh(),
* but before the mfpr(ICR)). Also check that this time is no less than
* any previously-reported time, which could happen around the time
* of a clock adjustment. Just for fun, we guarantee that the time
* will be greater than the value obtained by a previous call.
register struct timeval
*tvp
;
static struct timeval lasttime
;
if (t
< -tick
/ 2 && (mfpr(ICCS
) & ICCS_INT
))
tvp
->tv_usec
+= tick
+ t
;
if (tvp
->tv_usec
> 1000000) {
if (tvp
->tv_sec
== lasttime
.tv_sec
&&
tvp
->tv_usec
<= lasttime
.tv_usec
&&
(tvp
->tv_usec
= lasttime
.tv_usec
+ 1) > 1000000) {
* Enable floating point accelerator if it exists
* and has control register.
if ((mfpr(ACCS
) & 0xff) != 0) {
printf("Enabling FPA\n");
* Return a reasonable approximation of the time of day register.
* More precisely, return a number that increases by one about
* once every ten milliseconds.
#if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730
{ static int t
; DELAY(10000); return (++t
); }