Add support for microvax 3000.
[unix-history] / usr / src / sys / vax / vax / machdep.c
CommitLineData
da7c5cc6 1/*
9a0de372 2 * Copyright (c) 1982,1986,1988 Regents of the University of California.
da7c5cc6
KM
3 * All rights reserved. The Berkeley software License Agreement
4 * specifies the terms and conditions for redistribution.
5 *
802ae52e 6 * @(#)machdep.c 7.15 (Berkeley) %G%
da7c5cc6 7 */
961945a8 8
1884f3f6
JB
9#include "param.h"
10#include "systm.h"
11#include "dir.h"
12#include "user.h"
13#include "kernel.h"
9a0de372 14#include "malloc.h"
1884f3f6
JB
15#include "map.h"
16#include "vm.h"
17#include "proc.h"
18#include "buf.h"
19#include "reboot.h"
20#include "conf.h"
21#include "inode.h"
22#include "file.h"
23#include "text.h"
24#include "clist.h"
25#include "callout.h"
26#include "cmap.h"
27#include "mbuf.h"
28#include "msgbuf.h"
29#include "quota.h"
79821b2c 30
9a0de372
MK
31#include "reg.h"
32#include "pte.h"
33#include "psl.h"
1884f3f6 34#include "frame.h"
f0266f2b 35#include "clock.h"
1884f3f6
JB
36#include "cons.h"
37#include "cpu.h"
38#include "mem.h"
39#include "mtpr.h"
40#include "rpb.h"
90dc7048 41#include "ka630.h"
9a0de372 42
cd3da95f
BJ
43#include "../vaxuba/ubavar.h"
44#include "../vaxuba/ubareg.h"
45
3dc04a07
BJ
46/*
47 * Declare these as initialized data so we can patch them.
48 */
3dc04a07 49int nswbuf = 0;
7cb2e3f5
MK
50#ifdef NBUF
51int nbuf = NBUF;
52#else
53int nbuf = 0;
54#endif
55#ifdef BUFPAGES
56int bufpages = BUFPAGES;
57#else
6e4812d6 58int bufpages = 0;
7cb2e3f5 59#endif
81b61456 60int msgbufmapped; /* set when safe to use msgbuf */
3dc04a07 61
79821b2c
BJ
62/*
63 * Machine-dependent startup code
64 */
65startup(firstaddr)
c34926db 66 int firstaddr;
79821b2c
BJ
67{
68 register int unixsize;
a0eab615 69 register unsigned i;
a53ab6ba 70 register struct pte *pte;
9a0de372 71 int mapaddr, j, n;
c34926db 72 register caddr_t v;
6e4812d6 73 int maxbufs, base, residual;
79821b2c 74
802ae52e 75#if VAX630 || VAX650
90dc7048
BK
76 /*
77 * Leave last 5k of phys. memory as console work area.
78 */
802ae52e 79 if (cpu == VAX_630 || cpu == VAX_650)
90dc7048
BK
80 maxmem -= 10;
81#endif
90f8d91f
BJ
82 /*
83 * Initialize error message buffer (at end of core).
84 */
690a7ad7 85 maxmem -= btoc(sizeof (struct msgbuf));
90f8d91f 86 pte = msgbufmap;
690a7ad7 87 for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
90f8d91f 88 *(int *)pte++ = PG_V | PG_KW | (maxmem + i);
654b7651 89 mtpr(TBIA, 0);
81b61456 90 msgbufmapped = 1;
90f8d91f 91
802ae52e 92#ifdef QBA
41f2548c
MK
93#include "qv.h"
94#if NQV > 0
95 /*
96 * redirect console to qvss if it exists
97 */
9340d736 98 qvcons_init();
5cfca4a9
MT
99#endif
100#include "qd.h"
101#if NQD > 0
102 /*
103 * redirect console to qdss if it exists
104 */
9340d736 105 qdcons_init();
41f2548c 106#endif
233e2ac3
MK
107#endif
108
c8d6c8bc 109#ifdef KADB
233e2ac3 110 kdb_init();
41f2548c 111#endif
79821b2c
BJ
112 /*
113 * Good {morning,afternoon,evening,night}.
114 */
90f8d91f 115 printf(version);
7cb2e3f5 116 printf("real mem = %d\n", ctob(physmem));
a53ab6ba
BJ
117
118 /*
c34926db 119 * Allocate space for system data structures.
3b1e560f 120 * The first available real memory address is in "firstaddr".
3b1e560f
SL
121 * The first available kernel virtual address is in "v".
122 * As pages of kernel virtual memory are allocated, "v" is incremented.
7cb2e3f5
MK
123 * As pages of memory are allocated and cleared,
124 * "firstaddr" is incremented.
3b1e560f
SL
125 * An index into the kernel page table corresponding to the
126 * virtual memory address maintained in "v" is kept in "mapaddr".
a53ab6ba 127 */
9a0de372 128 v = (caddr_t)(KERNBASE | (firstaddr * NBPG));
c34926db 129#define valloc(name, type, num) \
7cb2e3f5 130 (name) = (type *)v; v = (caddr_t)((name)+(num))
c34926db 131#define valloclim(name, type, num, lim) \
7cb2e3f5 132 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
c34926db
BJ
133 valloclim(inode, struct inode, ninode, inodeNINODE);
134 valloclim(file, struct file, nfile, fileNFILE);
135 valloclim(proc, struct proc, nproc, procNPROC);
136 valloclim(text, struct text, ntext, textNTEXT);
137 valloc(cfree, struct cblock, nclist);
138 valloc(callout, struct callout, ncallout);
233328fc
BJ
139 valloc(swapmap, struct map, nswapmap = nproc * 2);
140 valloc(argmap, struct map, ARGMAPSIZE);
c34926db 141 valloc(kernelmap, struct map, nproc);
e32d6e7d 142 valloc(mbmap, struct map, nmbclusters/4);
7dba1c03 143 valloc(namecache, struct namecache, nchsize);
5ddf6544
KM
144 valloc(kmemmap, struct map, ekmempt - kmempt);
145 valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
3b1e560f 146#ifdef QUOTA
831f59be
RE
147 valloclim(quota, struct quota, nquota, quotaNQUOTA);
148 valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
149#endif
7cb2e3f5
MK
150
151 /*
152 * Determine how many buffers to allocate.
ec02650a
KM
153 * Use 10% of memory for the first 2 Meg, 5% of the remaining
154 * memory. Insure a minimum of 16 buffers.
7cb2e3f5
MK
155 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
156 */
157 if (bufpages == 0)
6692a5c8 158 if (physmem < (2 * 1024 * CLSIZE))
ec02650a
KM
159 bufpages = physmem / 10 / CLSIZE;
160 else
6692a5c8 161 bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE;
7cb2e3f5
MK
162 if (nbuf == 0) {
163 nbuf = bufpages / 2;
164 if (nbuf < 16)
165 nbuf = 16;
166 }
167 if (nswbuf == 0) {
168 nswbuf = (nbuf / 2) &~ 1; /* force even */
169 if (nswbuf > 256)
170 nswbuf = 256; /* sanity */
171 }
172 valloc(swbuf, struct buf, nswbuf);
173
174 /*
175 * Now the amount of virtual memory remaining for buffers
176 * can be calculated, estimating needs for the cmap.
177 */
9a0de372 178 ncmap = (maxmem*NBPG - ((int)v &~ KERNBASE)) /
7cb2e3f5
MK
179 (CLBYTES + sizeof(struct cmap)) + 2;
180 maxbufs = ((SYSPTSIZE * NBPG) -
9a0de372 181 ((int)(v + ncmap * sizeof(struct cmap)) - KERNBASE)) /
7cb2e3f5
MK
182 (MAXBSIZE + sizeof(struct buf));
183 if (maxbufs < 16)
184 panic("sys pt too small");
185 if (nbuf > maxbufs) {
186 printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
187 nbuf = maxbufs;
188 }
189 if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
190 bufpages = nbuf * (MAXBSIZE / CLBYTES);
191 valloc(buf, struct buf, nbuf);
192
c34926db 193 /*
7cb2e3f5 194 * Allocate space for core map.
3b1e560f
SL
195 * Allow space for all of phsical memory minus the amount
196 * dedicated to the system. The amount of physical memory
197 * dedicated to the system is the total virtual memory of
7cb2e3f5
MK
198 * the system thus far, plus core map, buffer pages,
199 * and buffer headers not yet allocated.
200 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
c34926db 201 */
9a0de372 202 ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ KERNBASE)) /
7cb2e3f5 203 (CLBYTES + sizeof(struct cmap)) + 2;
c34926db
BJ
204 valloclim(cmap, struct cmap, ncmap, ecmap);
205
206 /*
7cb2e3f5 207 * Clear space allocated thus far, and make r/w entries
c34926db
BJ
208 * for the space in the kernel map.
209 */
9a0de372 210 unixsize = btoc((int)v &~ KERNBASE);
7cb2e3f5
MK
211 while (firstaddr < unixsize) {
212 *(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
3b1e560f
SL
213 clearseg((unsigned)firstaddr);
214 firstaddr++;
c34926db 215 }
7cb2e3f5
MK
216
217 /*
218 * Now allocate buffers proper. They are different than the above
219 * in that they usually occupy more virtual memory than physical.
220 */
221 v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
222 valloc(buffers, char, MAXBSIZE * nbuf);
223 base = bufpages / nbuf;
224 residual = bufpages % nbuf;
225 mapaddr = firstaddr;
9a0de372
MK
226 for (i = 0; i < nbuf; i++) {
227 n = (i < residual ? base + 1 : base) * CLSIZE;
228 for (j = 0; j < n; j++) {
7cb2e3f5
MK
229 *(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
230 clearseg((unsigned)firstaddr);
231 firstaddr++;
232 }
233 mapaddr += MAXBSIZE / NBPG;
234 }
235
9a0de372 236 unixsize = btoc((int)v &~ KERNBASE);
3b1e560f
SL
237 if (firstaddr >= physmem - 8*UPAGES)
238 panic("no memory");
654b7651 239 mtpr(TBIA, 0); /* After we just cleared it all! */
79821b2c 240
c4710996
BJ
241 /*
242 * Initialize callouts
243 */
244 callfree = callout;
245 for (i = 1; i < ncallout; i++)
246 callout[i-1].c_next = &callout[i];
247
79821b2c 248 /*
233328fc
BJ
249 * Initialize memory allocator and swap
250 * and user page table maps.
251 *
252 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
253 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
79821b2c 254 */
3b1e560f 255 meminit(firstaddr, maxmem);
a53ab6ba 256 maxmem = freemem;
79821b2c 257 printf("avail mem = %d\n", ctob(maxmem));
6e4812d6
KM
258 printf("using %d buffers containing %d bytes of memory\n",
259 nbuf, bufpages * CLBYTES);
a29f7995
BJ
260 rminit(kernelmap, (long)USRPTSIZE, (long)1,
261 "usrpt", nproc);
06126147 262 rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
a29f7995 263 "mbclusters", nmbclusters/4);
5ddf6544 264 kmeminit(); /* now safe to do malloc/free */
fc814008 265
cf19cacc 266 /*
06126147 267 * Set up CPU-specific registers, cache, etc.
cf19cacc 268 */
06126147 269 initcpu();
cf19cacc 270
233e2ac3
MK
271 /*
272 * Set up buffers, so they can be used to read disk labels.
273 */
274 bhinit();
275 binit();
276
6692a5c8 277 /*
06126147 278 * Configure the system.
6692a5c8 279 */
06126147
MK
280 configure();
281
cf19cacc
BJ
282 /*
283 * Clear restart inhibit flags.
284 */
285 tocons(TXDB_CWSI);
286 tocons(TXDB_CCSI);
79821b2c
BJ
287}
288
79821b2c
BJ
289#ifdef PGINPROF
290/*
291 * Return the difference (in microseconds)
292 * between the current time and a previous
293 * time as represented by the arguments.
294 * If there is a pending clock interrupt
295 * which has not been serviced due to high
296 * ipl, return error code.
297 */
298vmtime(otime, olbolt, oicr)
299 register int otime, olbolt, oicr;
300{
301
302 if (mfpr(ICCS)&ICCS_INT)
303 return(-1);
304 else
9f159248 305 return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
79821b2c
BJ
306}
307#endif
308
f835d7b3
MK
309/*
310 * Clear registers on exec
311 */
312setregs(entry)
313 u_long entry;
314{
315#ifdef notdef
316 register int *rp;
317
318 /* should pass args to init on the stack */
319 /* should also fix this code before using it, it's wrong */
320 /* wanna clear the scb? */
321 for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
322 *rp++ = 0;
323#endif
324 u.u_ar0[PC] = entry + 2;
325}
326
79821b2c 327/*
dd012d1e 328 * Send an interrupt to process.
f815f998 329 *
dd012d1e
SL
330 * Stack is set up to allow sigcode stored
331 * in u. to call routine, followed by chmk
120dfe64
KM
332 * to sigreturn routine below. After sigreturn
333 * resets the signal mask, the stack, the frame
334 * pointer, and the argument pointer, it returns
335 * to the user specified pc, psl.
79821b2c 336 */
56652904
MK
337sendsig(p, sig, mask)
338 int (*p)(), sig, mask;
79821b2c 339{
120dfe64 340 register struct sigcontext *scp;
4e207164
SL
341 register int *regs;
342 register struct sigframe {
343 int sf_signum;
344 int sf_code;
345 struct sigcontext *sf_scp;
346 int (*sf_handler)();
120dfe64 347 int sf_argcount;
4e207164 348 struct sigcontext *sf_scpcopy;
120dfe64 349 } *fp;
457aa395 350 int oonstack;
79821b2c
BJ
351
352 regs = u.u_ar0;
457aa395 353 oonstack = u.u_onstack;
120dfe64
KM
354 /*
355 * Allocate and validate space for the signal handler
356 * context. Note that if the stack is in P0 space, the
357 * call to grow() is a nop, and the useracc() check
358 * will fail if the process has not already allocated
359 * the space with a `brk'.
360 */
56652904 361 if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
120dfe64 362 scp = (struct sigcontext *)u.u_sigsp - 1;
457aa395 363 u.u_onstack = 1;
dd012d1e 364 } else
120dfe64
KM
365 scp = (struct sigcontext *)regs[SP] - 1;
366 fp = (struct sigframe *)scp - 1;
367 if ((int)fp <= USRSTACK - ctob(u.u_ssize))
8011f5df 368 (void)grow((unsigned)fp);
2cce526e 369 if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
120dfe64
KM
370 /*
371 * Process has trashed its stack; give it an illegal
372 * instruction to halt it in its tracks.
373 */
374 u.u_signal[SIGILL] = SIG_DFL;
375 sig = sigmask(SIGILL);
376 u.u_procp->p_sigignore &= ~sig;
377 u.u_procp->p_sigcatch &= ~sig;
378 u.u_procp->p_sigmask &= ~sig;
379 psignal(u.u_procp, SIGILL);
380 return;
381 }
382 /*
383 * Build the argument list for the signal handler.
384 */
4e207164 385 fp->sf_signum = sig;
dd012d1e 386 if (sig == SIGILL || sig == SIGFPE) {
4e207164 387 fp->sf_code = u.u_code;
dca3793c
BJ
388 u.u_code = 0;
389 } else
4e207164
SL
390 fp->sf_code = 0;
391 fp->sf_scp = scp;
392 fp->sf_handler = p;
e4c50b16 393 /*
120dfe64 394 * Build the calls argument frame to be used to call sigreturn
e4c50b16 395 */
120dfe64 396 fp->sf_argcount = 1;
4e207164 397 fp->sf_scpcopy = scp;
120dfe64
KM
398 /*
399 * Build the signal context to be used by sigreturn.
400 */
4e207164 401 scp->sc_onstack = oonstack;
56652904 402 scp->sc_mask = mask;
120dfe64
KM
403 scp->sc_sp = regs[SP];
404 scp->sc_fp = regs[FP];
405 scp->sc_ap = regs[AP];
4e207164
SL
406 scp->sc_pc = regs[PC];
407 scp->sc_ps = regs[PS];
408 regs[SP] = (int)fp;
79821b2c
BJ
409 regs[PS] &= ~(PSL_CM|PSL_FPD);
410 regs[PC] = (int)u.u_pcb.pcb_sigc;
411 return;
79821b2c
BJ
412}
413
dd012d1e 414/*
120dfe64 415 * System call to cleanup state after a signal
dd012d1e 416 * has been taken. Reset signal mask and
e4c50b16 417 * stack state from context left by sendsig (above).
120dfe64
KM
418 * Return to previous pc and psl as specified by
419 * context left by sendsig. Check carefully to
420 * make sure that the user has not modified the
6692a5c8 421 * psl to gain improper priviledges or to cause
120dfe64 422 * a machine fault.
dd012d1e 423 */
120dfe64 424sigreturn()
dd012d1e 425{
120dfe64
KM
426 struct a {
427 struct sigcontext *sigcntxp;
428 };
e4c50b16 429 register struct sigcontext *scp;
120dfe64
KM
430 register int *regs = u.u_ar0;
431
432 scp = ((struct a *)(u.u_ap))->sigcntxp;
2cce526e 433 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
120dfe64
KM
434 return;
435 if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
436 (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
437 ((scp->sc_ps & PSL_CM) &&
438 (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0)) {
439 u.u_error = EINVAL;
440 return;
441 }
442 u.u_eosys = JUSTRETURN;
443 u.u_onstack = scp->sc_onstack & 01;
444 u.u_procp->p_sigmask = scp->sc_mask &~
445 (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
446 regs[FP] = scp->sc_fp;
447 regs[AP] = scp->sc_ap;
448 regs[SP] = scp->sc_sp;
449 regs[PC] = scp->sc_pc;
450 regs[PS] = scp->sc_ps;
451}
dd012d1e 452
120dfe64
KM
453/* XXX - BEGIN 4.2 COMPATIBILITY */
454/*
455 * Compatibility with 4.2 chmk $139 used by longjmp()
456 */
457osigcleanup()
458{
459 register struct sigcontext *scp;
460 register int *regs = u.u_ar0;
461
462 scp = (struct sigcontext *)fuword((caddr_t)regs[SP]);
e4c50b16
SL
463 if ((int)scp == -1)
464 return;
2cce526e 465 if (useracc((caddr_t)scp, 3 * sizeof (int), B_WRITE) == 0)
dd012d1e 466 return;
e4c50b16 467 u.u_onstack = scp->sc_onstack & 01;
120dfe64
KM
468 u.u_procp->p_sigmask = scp->sc_mask &~
469 (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
470 regs[SP] = scp->sc_sp;
dd012d1e 471}
120dfe64 472/* XXX - END 4.2 COMPATIBILITY */
dd012d1e
SL
473
474#ifdef notdef
9a3561c2
BJ
475dorti()
476{
2de46def 477 struct frame frame;
9a3561c2
BJ
478 register int sp;
479 register int reg, mask;
480 extern int ipcreg[];
9a3561c2
BJ
481
482 (void) copyin((caddr_t)u.u_ar0[FP], (caddr_t)&frame, sizeof (frame));
483 sp = u.u_ar0[FP] + sizeof (frame);
2de46def
BJ
484 u.u_ar0[PC] = frame.fr_savpc;
485 u.u_ar0[FP] = frame.fr_savfp;
486 u.u_ar0[AP] = frame.fr_savap;
487 mask = frame.fr_mask;
9a3561c2
BJ
488 for (reg = 0; reg <= 11; reg++) {
489 if (mask&1) {
858ecf0f 490 u.u_ar0[ipcreg[reg]] = fuword((caddr_t)sp);
9a3561c2
BJ
491 sp += 4;
492 }
493 mask >>= 1;
494 }
2de46def
BJ
495 sp += frame.fr_spa;
496 u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw;
497 if (frame.fr_s)
858ecf0f 498 sp += 4 + 4 * (fuword((caddr_t)sp) & 0xff);
9a3561c2 499 /* phew, now the rei */
858ecf0f 500 u.u_ar0[PC] = fuword((caddr_t)sp);
9a3561c2 501 sp += 4;
858ecf0f 502 u.u_ar0[PS] = fuword((caddr_t)sp);
9a3561c2 503 sp += 4;
aea14351 504 u.u_ar0[PS] |= PSL_USERSET;
9a3561c2 505 u.u_ar0[PS] &= ~PSL_USERCLR;
bf47fbca
MK
506 if (u.u_ar0[PS] & PSL_CM)
507 u.u_ar0[PS] &= ~PSL_CM_CLR;
cdc19302 508 u.u_ar0[SP] = (int)sp;
9a3561c2 509}
dd012d1e 510#endif
9a3561c2 511
79821b2c 512/*
9a0de372 513 * Memenable enables memory controller corrected data reporting.
dca3793c
BJ
514 * This runs at regular intervals, turning on the interrupt.
515 * The interrupt is turned off, per memory controller, when error
516 * reporting occurs. Thus we report at most once per memintvl.
79821b2c 517 */
79821b2c
BJ
518int memintvl = MEMINTVL;
519
dca3793c 520memenable()
79821b2c 521{
d96afe11 522
9a0de372 523 (*cpuops->cpu_memenable)();
dca3793c 524 if (memintvl > 0)
5ec8e3cb 525 timeout(memenable, (caddr_t)0, memintvl*hz);
dca3793c
BJ
526}
527
528/*
529 * Memerr is the interrupt routine for corrected read data
530 * interrupts. It looks to see which memory controllers have
531 * unreported errors, reports them, and disables further
532 * reporting for a time on those controller.
533 */
534memerr()
535{
b5dd6ff7 536
9a0de372 537 (*cpuops->cpu_memerr)();
f25ccb7b 538}
f25ccb7b 539
79821b2c
BJ
540/*
541 * Invalidate single all pte's in a cluster
542 */
543tbiscl(v)
544 unsigned v;
545{
546 register caddr_t addr; /* must be first reg var */
547 register int i;
548
549 asm(".set TBIS,58");
550 addr = ptob(v);
551 for (i = 0; i < CLSIZE; i++) {
552#ifdef lint
553 mtpr(TBIS, addr);
554#else
555 asm("mtpr r11,$TBIS");
556#endif
557 addr += NBPG;
558 }
559}
b559a38a 560
0dc06be8
BJ
561int waittime = -1;
562
9a0de372 563boot(howto)
0dc06be8 564 register int howto; /* r11 == how to boot */
9a0de372 565{
0dc06be8 566 register int devtype; /* r10 == major of root dev */
d1aee2ff 567 extern char *panicstr;
0dc06be8 568
cf19cacc 569 if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
4a3fcdaa
MK
570 register struct buf *bp;
571 int iter, nbusy;
572
0dc06be8 573 waittime = 0;
56652904 574 (void) splnet();
87771f34 575 printf("syncing disks... ");
75395bd6
MK
576 /*
577 * Release inodes held by texts before update.
578 */
d1aee2ff
MK
579 if (panicstr == 0)
580 xumount(NODEV);
56652904 581 update();
62364f0e 582
4a3fcdaa 583 for (iter = 0; iter < 20; iter++) {
68503f9a
BJ
584 nbusy = 0;
585 for (bp = &buf[nbuf]; --bp >= buf; )
cd0e9146 586 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
68503f9a
BJ
587 nbusy++;
588 if (nbusy == 0)
589 break;
590 printf("%d ", nbusy);
b359d7d6 591 DELAY(40000 * iter);
68503f9a 592 }
4a3fcdaa
MK
593 if (nbusy)
594 printf("giving up\n");
595 else
596 printf("done\n");
75395bd6
MK
597 /*
598 * If we've been adjusting the clock, the todr
599 * will be out of synch; adjust it now.
600 */
601 resettodr();
0dc06be8
BJ
602 }
603 splx(0x1f); /* extreme priority */
604 devtype = major(rootdev);
2de46def 605 if (howto&RB_HALT) {
9a0de372 606 /* 630 can be told to halt, but how? */
2de46def 607 printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
2de46def
BJ
608 for (;;)
609 ;
610 } else {
9a0de372
MK
611 if (howto & RB_DUMP)
612 doadump();
613 vaxboot();
2de46def 614 }
4a3fcdaa 615#ifdef lint
9a0de372 616 devtype = devtype;
4a3fcdaa 617#endif
858ecf0f
BJ
618 /*NOTREACHED*/
619}
620
9a0de372
MK
621/*
622 * Reboot after panic or via reboot system call. Note that r11
623 * and r10 must already have the proper boot values (`call by voodoo').
624 */
625vaxboot()
626{
627
628 switch (cpu) {
629
630#ifdef VAX8200
631 case VAX_8200:
632 /*
633 * TXDB_BOOT erases memory! Instead we set the `did
634 * a dump' flag in the rpb.
635 */
636 *(int *)&Sysmap[0] &= ~PG_PROT;
637 *(int *)&Sysmap[0] |= PG_KW;
638 mtpr(TBIS, &rpb);
639 rpb.rp_flag = 1;
640 break;
641#endif
642
643 default:
644 tocons(TXDB_BOOT);
645 }
646
647 /*
648 * Except on 780s and 8600s, boot flags go in r5. SBI
649 * VAXen do not care, so copy boot flags to r5 always.
650 */
651 asm("movl r11,r5");
652 for (;;) {
653 asm("halt");
654 }
655}
656
858ecf0f
BJ
657tocons(c)
658{
9a0de372 659 register int oldmask;
858ecf0f 660
2cce526e 661 while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0)
858ecf0f 662 continue;
2cce526e
MK
663
664 switch (cpu) {
665
802ae52e 666#if VAX8200 || VAX780 || VAX750 || VAX730 || VAX630 || VAX650
9a0de372 667 case VAX_8200:
2cce526e
MK
668 case VAX_780:
669 case VAX_750:
670 case VAX_730:
90dc7048 671 case VAX_630:
802ae52e 672 case VAX_650:
2cce526e
MK
673 c |= TXDB_CONS;
674 break;
675#endif
676
677#if VAX8600
678 case VAX_8600:
679 mtpr(TXCS, TXCS_LCONS | TXCS_WMASK);
680 while ((mfpr(TXCS) & TXCS_RDY) == 0)
681 continue;
682 break;
683#endif
684 }
685
858ecf0f 686 mtpr(TXDB, c);
2cce526e
MK
687
688#if VAX8600
689 switch (cpu) {
690
691 case VAX_8600:
692 while ((mfpr(TXCS) & TXCS_RDY) == 0)
693 continue;
694 mtpr(TXCS, oldmask | TXCS_WMASK);
695 break;
696 }
697#endif
9a0de372
MK
698#ifdef lint
699 oldmask = oldmask;
700#endif
0dc06be8 701}
2de46def 702
cb9c8b95
SL
703int dumpmag = 0x8fca0101; /* magic number for savecore */
704int dumpsize = 0; /* also for savecore */
2de46def
BJ
705/*
706 * Doadump comes here after turning off memory management and
707 * getting on the dump stack, either when called above, or by
708 * the auto-restart code.
709 */
710dumpsys()
711{
712
4b1492fc 713 rpb.rp_flag = 1;
81b61456 714 msgbufmapped = 0;
233e2ac3
MK
715 if (dumpdev == NODEV)
716 return;
233e2ac3
MK
717 /*
718 * For dumps during autoconfiguration,
719 * if dump device has already configured...
720 */
721 if (dumplo == 0 && bdevsw[major(dumpdev)].d_psize)
722 dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) - physmem;
723 if (dumplo < 0)
724 dumplo = 0;
cb9c8b95 725 dumpsize = physmem;
2de46def 726 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
dca3793c
BJ
727 printf("dump ");
728 switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
729
730 case ENXIO:
731 printf("device bad\n");
732 break;
733
734 case EFAULT:
735 printf("device not ready\n");
736 break;
737
738 case EINVAL:
739 printf("area improper\n");
740 break;
741
742 case EIO:
743 printf("i/o error");
744 break;
745
746 default:
747 printf("succeeded");
748 break;
749 }
2de46def 750}
a58c4eb5 751
dca3793c
BJ
752/*
753 * Machine check error recovery code.
dca3793c 754 */
9a0de372
MK
755machinecheck(cmcf)
756 caddr_t cmcf;
757{
758
759 if ((*cpuops->cpu_mchk)(cmcf) == MCHK_RECOVERED)
760 return;
761 (*cpuops->cpu_memerr)();
762 panic("mchk");
763}
b5dd6ff7 764
fca14b6b 765#if defined(VAX780) || defined(VAX750)
9a0de372
MK
766/*
767 * These strings are shared between the 780 and 750 machine check code
768 * in ka780.c and ka730.c.
769 */
770char *mc780750[16] = {
a58c4eb5
BJ
771 "cp read", "ctrl str par", "cp tbuf par", "cp cache par",
772 "cp rdtimo", "cp rds", "ucode lost", 0,
773 0, 0, "ib tbuf par", 0,
774 "ib rds", "ib rd timo", 0, "ib cache par"
775};
a78738a8 776#endif
9f159248 777
f0266f2b
MK
778/*
779 * Return the best possible estimate of the time in the timeval
780 * to which tvp points. We do this by reading the interval count
781 * register to determine the time remaining to the next clock tick.
782 * We must compensate for wraparound which is not yet reflected in the time
783 * (which happens when the ICR hits 0 and wraps after the splhigh(),
784 * but before the mfpr(ICR)). Also check that this time is no less than
785 * any previously-reported time, which could happen around the time
786 * of a clock adjustment. Just for fun, we guarantee that the time
787 * will be greater than the value obtained by a previous call.
788 */
9f159248 789microtime(tvp)
75395bd6 790 register struct timeval *tvp;
9f159248 791{
75395bd6 792 int s = splhigh();
f0266f2b
MK
793 static struct timeval lasttime;
794 register long t;
9f159248 795
75395bd6 796 *tvp = time;
f0266f2b
MK
797 t = mfpr(ICR);
798 if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT))
799 t += tick;
800 tvp->tv_usec += tick + t;
801 if (tvp->tv_usec > 1000000) {
802 tvp->tv_sec++;
803 tvp->tv_usec -= 1000000;
804 }
805 if (tvp->tv_sec == lasttime.tv_sec &&
806 tvp->tv_usec <= lasttime.tv_usec &&
626f8a2f 807 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
9f159248
BJ
808 tvp->tv_sec++;
809 tvp->tv_usec -= 1000000;
810 }
f0266f2b 811 lasttime = *tvp;
9f159248
BJ
812 splx(s);
813}
3df747ec 814
06126147 815initcpu()
6692a5c8 816{
06126147
MK
817 /*
818 * Enable cache.
819 */
820 switch (cpu) {
821
9a0de372
MK
822#if VAX8600
823 case VAX_8600:
824 mtpr(CSWP, 3);
825 break;
826#endif
827#if VAX8200
828 case VAX_8200:
829 mtpr(CADR, 0);
830 break;
831#endif
6692a5c8
JB
832#if VAX780
833 case VAX_780:
06126147 834 mtpr(SBIMT, 0x200000);
6692a5c8
JB
835 break;
836#endif
837#if VAX750
838 case VAX_750:
06126147 839 mtpr(CADR, 0);
6692a5c8 840 break;
06126147
MK
841#endif
842 default:
843 break;
844 }
845
846 /*
847 * Enable floating point accelerator if it exists
848 * and has control register.
849 */
850 switch(cpu) {
851
852#if VAX8600 || VAX780
06126147 853 case VAX_8600:
9a0de372 854 case VAX_780:
06126147
MK
855 if ((mfpr(ACCS) & 0xff) != 0) {
856 printf("Enabling FPA\n");
857 mtpr(ACCS, 0x8000);
858 }
6692a5c8
JB
859#endif
860 default:
861 break;
862 }
863}
f0bb3ac4
KB
864
865/*
9a0de372 866 * Return a reasonable approximation of the time of day register.
f0bb3ac4
KB
867 * More precisely, return a number that increases by one about
868 * once every ten milliseconds.
869 */
870todr()
871{
9a0de372 872
f0bb3ac4
KB
873 switch (cpu) {
874
802ae52e 875#if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730 || VAX650
f0bb3ac4 876 case VAX_8600:
9a0de372 877 case VAX_8200:
f0bb3ac4
KB
878 case VAX_780:
879 case VAX_750:
880 case VAX_730:
802ae52e 881 case VAX_650:
f0bb3ac4
KB
882 return (mfpr(TODR));
883#endif
884
885#if VAX630
886 case VAX_630:
887 /* XXX crude */
888 { static int t; DELAY(10000); return (++t); }
889#endif
890
891 default:
892 panic("todr");
893 }
894 /* NOTREACHED */
895}