BSD 4_3 release
[unix-history] / usr / src / sys / vax / locore.s
index 0315e04..76a6b82 100644 (file)
@@ -1,23 +1,32 @@
-/*     locore.s        6.3     83/08/12        */
+/*
+ * Copyright (c) 1980, 1986 Regents of the University of California.
+ * All rights reserved.  The Berkeley software License Agreement
+ * specifies the terms and conditions for redistribution.
+ *
+ *     @(#)locore.s    7.1 (Berkeley) 6/5/86
+ */
 
 
-#include "../machine/psl.h"
-#include "../machine/pte.h"
+#include "psl.h"
+#include "pte.h"
 
 
-#include "../h/errno.h"
+#include "errno.h"
+#include "cmap.h"
 
 
-#include "../vax/mtpr.h"
-#include "../vax/trap.h"
-#include "../vax/cpu.h"
-#include "../vax/nexus.h"
-#include "../vax/cons.h"
-#include "../vax/clock.h"
+#include "mtpr.h"
+#include "trap.h"
+#include "cpu.h"
+#include "nexus.h"
+#include "cons.h"
+#include "clock.h"
+#include "ioa.h"
+#include "ka630.h"
 #include "../vaxuba/ubareg.h"
 
 #include "../vaxuba/ubareg.h"
 
-#include "dh.h"
 #include "dz.h"
 #include "uu.h"
 #include "ps.h"
 #include "mba.h"
 #include "dz.h"
 #include "uu.h"
 #include "ps.h"
 #include "mba.h"
+#include "uba.h"
 
        .set    HIGH,0x1f       # mask for total disable
        .set    MCKVEC,4        # offset into scb of machine check vector
 
        .set    HIGH,0x1f       # mask for total disable
        .set    MCKVEC,4        # offset into scb of machine check vector
@@ -49,10 +58,10 @@ _doadump:
 #define        _rpbmap _Sysmap                         # rpb, scb, UNI*vec, istack*4
        bicl2   $PG_PROT,_rpbmap
        bisl2   $PG_KW,_rpbmap
 #define        _rpbmap _Sysmap                         # rpb, scb, UNI*vec, istack*4
        bicl2   $PG_PROT,_rpbmap
        bisl2   $PG_KW,_rpbmap
+       mtpr    $0,$TBIA
        tstl    _rpb+RP_FLAG                    # dump only once!
        bneq    1f
        incl    _rpb+RP_FLAG
        tstl    _rpb+RP_FLAG                    # dump only once!
        bneq    1f
        incl    _rpb+RP_FLAG
-       mtpr    $0,$TBIA
        movl    sp,erpb
        movab   erpb,sp
        mfpr    $PCBB,-(sp)
        movl    sp,erpb
        movab   erpb,sp
        mfpr    $PCBB,-(sp)
@@ -63,10 +72,8 @@ _doadump:
        pushr   $0x3fff
        calls   $0,_dumpsys
 1:
        pushr   $0x3fff
        calls   $0,_dumpsys
 1:
-       mfpr    $TXCS,r0
-       bitl    $TXCS_RDY,r0
-       beql    1b
-       mtpr    $TXDB_BOOT,$TXDB
+       pushl   $TXDB_BOOT
+       calls   $1,_tocons
        halt
 
 /*
        halt
 
 /*
@@ -82,9 +89,44 @@ _doadump:
 #define        PUSHR           pushr $0x3f
 #define        POPR            popr $0x3f
 
 #define        PUSHR           pushr $0x3f
 #define        POPR            popr $0x3f
 
+       .data
+nofault: .long 0       # where to go on predicted machcheck
+       .text
 SCBVEC(machcheck):
 SCBVEC(machcheck):
+       tstl    nofault
+       bneq    1f
        PUSHR; pushab 6*4(sp); calls $1,_machinecheck; POPR;
        addl2 (sp)+,sp; rei
        PUSHR; pushab 6*4(sp); calls $1,_machinecheck; POPR;
        addl2 (sp)+,sp; rei
+       .align  2
+1:
+       casel   _cpu,$1,$VAX_MAX
+0:
+       .word   8f-0b           # 1 is 780
+       .word   5f-0b           # 2 is 750
+       .word   5f-0b           # 3 is 730
+       .word   7f-0b           # 4 is 8600
+       .word   1f-0b           # ???
+       .word   1f-0b           # ???
+       .word   1f-0b           # ???
+       .word   1f-0b           # 8 is 630
+5:
+#if defined(VAX750) || defined(VAX730)
+       mtpr    $0xf,$MCESR
+#endif
+       brb     1f
+7:
+#if VAX8600
+       mtpr    $0,$EHSR
+#endif
+       brb     1f
+8:
+#if VAX780
+       mtpr    $0,$SBIFS
+#endif
+1:
+       addl2   (sp)+,sp                # discard mchchk trash
+       movl    nofault,(sp)
+       rei
 SCBVEC(kspnotval):
        PUSHR; PANIC("KSP not valid");
 SCBVEC(powfail):
 SCBVEC(kspnotval):
        PUSHR; PANIC("KSP not valid");
 SCBVEC(powfail):
@@ -104,20 +146,20 @@ SCBVEC(wtime):
 
 #if NMBA > 0
 SCBVEC(mba3int):
 
 #if NMBA > 0
 SCBVEC(mba3int):
-       PUSHR; pushl $3; brb 1f
+       PUSHR; incl _intrcnt+I_MBA3; pushl $3; brb 1f
 SCBVEC(mba2int):
 SCBVEC(mba2int):
-       PUSHR; pushl $2; brb 1f
+       PUSHR; incl _intrcnt+I_MBA2; pushl $2; brb 1f
 SCBVEC(mba1int):
 SCBVEC(mba1int):
-       PUSHR; pushl $1; brb 1f
+       PUSHR; incl _intrcnt+I_MBA1; pushl $1; brb 1f
 SCBVEC(mba0int):
 SCBVEC(mba0int):
-       PUSHR; pushl $0
+       PUSHR; incl _intrcnt+I_MBA0; pushl $0
 1:     calls $1,_mbintr
        POPR
        incl    _cnt+V_INTR
        rei
 #endif
 
 1:     calls $1,_mbintr
        POPR
        incl    _cnt+V_INTR
        rei
 #endif
 
-#if VAX780
+#if defined(VAX780) || defined(VAX8600)
 /*
  * Registers for the uba handling code
  */
 /*
  * Registers for the uba handling code
  */
@@ -127,6 +169,18 @@ SCBVEC(mba0int):
 #define        rUBA    r4
 /* r2,r5 are scratch */
 
 #define        rUBA    r4
 /* r2,r5 are scratch */
 
+#define        I_UBA   I_UBA0          /* base of UBA interrupt counters */
+
+#if NUBA > 4
+SCBVEC(ua7int):
+       PUSHR; movl $7,rUBANUM; moval _uba_hd+(7*UH_SIZE),rUBAHD; brb 1f
+SCBVEC(ua6int):
+       PUSHR; movl $6,rUBANUM; moval _uba_hd+(6*UH_SIZE),rUBAHD; brb 1f
+SCBVEC(ua5int):
+       PUSHR; movl $5,rUBANUM; moval _uba_hd+(5*UH_SIZE),rUBAHD; brb 1f
+SCBVEC(ua4int):
+       PUSHR; movl $4,rUBANUM; moval _uba_hd+(4*UH_SIZE),rUBAHD; brb 1f
+#endif
 SCBVEC(ua3int):
        PUSHR; movl $3,rUBANUM; moval _uba_hd+(3*UH_SIZE),rUBAHD; brb 1f
 SCBVEC(ua2int):
 SCBVEC(ua3int):
        PUSHR; movl $3,rUBANUM; moval _uba_hd+(3*UH_SIZE),rUBAHD; brb 1f
 SCBVEC(ua2int):
@@ -136,7 +190,6 @@ SCBVEC(ua1int):
 SCBVEC(ua0int):
        PUSHR; movl $0,rUBANUM; moval _uba_hd+(0*UH_SIZE),rUBAHD;
 1:
 SCBVEC(ua0int):
        PUSHR; movl $0,rUBANUM; moval _uba_hd+(0*UH_SIZE),rUBAHD;
 1:
-       incl    _cnt+V_INTR
        mfpr    $IPL,r2                         /* r2 = mfpr(IPL); */
        movl    UH_UBA(rUBAHD),rUBA             /* uba = uhp->uh_uba; */
        movl    UBA_BRRVR-0x14*4(rUBA)[r2],rUVEC
        mfpr    $IPL,r2                         /* r2 = mfpr(IPL); */
        movl    UH_UBA(rUBAHD),rUBA             /* uba = uhp->uh_uba; */
        movl    UBA_BRRVR-0x14*4(rUBA)[r2],rUVEC
@@ -149,66 +202,104 @@ ubanorm:
 ubaerror:
        PUSHR; calls $0,_ubaerror; POPR         /* ubaerror r/w's r0-r5 */
        tstl rUVEC; jneq ubanorm                /* rUVEC contains result */
 ubaerror:
        PUSHR; calls $0,_ubaerror; POPR         /* ubaerror r/w's r0-r5 */
        tstl rUVEC; jneq ubanorm                /* rUVEC contains result */
+       incl _intrcnt+I_UBA[rUBANUM]
+       incl    _cnt+V_INTR
        POPR
        rei
 #endif
 SCBVEC(cnrint):
        POPR
        rei
 #endif
 SCBVEC(cnrint):
-       PUSHR; calls $0,_cnrint; POPR; incl _cnt+V_INTR; rei
+       PUSHR; calls $0,_cnrint; POPR
+       incl _cnt+V_INTR
+       incl _intrcnt+I_CNR
+       rei
 SCBVEC(cnxint):
 SCBVEC(cnxint):
-       PUSHR; calls $0,_cnxint; POPR; incl _cnt+V_INTR; rei
+       PUSHR; calls $0,_cnxint; POPR
+       incl _cnt+V_INTR
+       incl _intrcnt+I_CNX
+       rei
 SCBVEC(hardclock):
        PUSHR
        mtpr $ICCS_RUN|ICCS_IE|ICCS_INT|ICCS_ERR,$ICCS
 SCBVEC(hardclock):
        PUSHR
        mtpr $ICCS_RUN|ICCS_IE|ICCS_INT|ICCS_ERR,$ICCS
-       pushl 4+6*4(sp); pushl 4+6*4(sp);
-       calls $2,_hardclock                     # hardclock(pc,psl)
 #if NPS > 0
        pushl   4+6*4(sp); pushl 4+6*4(sp);
        calls   $2,_psextsync
 #endif
 #if NPS > 0
        pushl   4+6*4(sp); pushl 4+6*4(sp);
        calls   $2,_psextsync
 #endif
+       pushl 4+6*4(sp); pushl 4+6*4(sp);
+       calls $2,_hardclock                     # hardclock(pc,psl)
        POPR;
        POPR;
-       incl    _cnt+V_INTR             ## temp so not to break vmstat -= HZ
+       incl    _cnt+V_INTR
+       incl    _intrcnt+I_CLOCK
        rei
 SCBVEC(softclock):
        PUSHR
        rei
 SCBVEC(softclock):
        PUSHR
-#if NDZ > 0
-       calls   $0,_dztimer
-#endif
-#if NDH > 0
-       calls   $0,_dhtimer
-#endif
        pushl   4+6*4(sp); pushl 4+6*4(sp);
        calls   $2,_softclock                   # softclock(pc,psl)
        POPR; 
        pushl   4+6*4(sp); pushl 4+6*4(sp);
        calls   $2,_softclock                   # softclock(pc,psl)
        POPR; 
+       incl    _cnt+V_SOFT
        rei
        rei
+
 #include "../net/netisr.h"
        .globl  _netisr
 SCBVEC(netintr):
        PUSHR
 #include "../net/netisr.h"
        .globl  _netisr
 SCBVEC(netintr):
        PUSHR
-       bbcc    $NETISR_RAW,_netisr,1f; calls $0,_rawintr; 1:
+#include "imp.h"
+#if NIMP > 0
+       bbcc    $NETISR_IMP,_netisr,1f; calls $0,_impintr; 1:
+#endif
 #ifdef INET
 #ifdef INET
-#include "../netinet/in_systm.h"
        bbcc    $NETISR_IP,_netisr,1f; calls $0,_ipintr; 1:
 #endif
 #ifdef NS
        bbcc    $NETISR_NS,_netisr,1f; calls $0,_nsintr; 1:
 #endif
        bbcc    $NETISR_IP,_netisr,1f; calls $0,_ipintr; 1:
 #endif
 #ifdef NS
        bbcc    $NETISR_NS,_netisr,1f; calls $0,_nsintr; 1:
 #endif
+       bbcc    $NETISR_RAW,_netisr,1f; calls $0,_rawintr; 1:
        POPR
        POPR
+       incl    _cnt+V_SOFT
        rei
        rei
-#if defined(VAX750) || defined(VAX730)
+
+#if defined(VAX750) || defined(VAX730) || defined(VAX8600)
 SCBVEC(consdin):
        PUSHR;
 SCBVEC(consdin):
        PUSHR;
-#if defined(VAX750) && !defined(VAX730) && !defined(MRSP)
+       incl _intrcnt+I_TUR
+       casel   _cpu,$VAX_750,$VAX_8600
+0:
+       .word   5f-0b           # 2 is VAX_750
+       .word   3f-0b           # 3 is VAX_730
+       .word   6f-0b           # 4 is VAX_8600
+       halt
+5:
+#if defined(VAX750) && !defined(MRSP)
        jsb     tudma
 #endif
        jsb     tudma
 #endif
-       calls $0,_turintr;
+3:
+#if defined(VAX750) || defined(VAX730)
+       calls $0,_turintr
+       brb 2f
+#else
+       halt
+#endif
+6:
+#if VAX8600
+       calls $0, _crlintr
+#else
+       halt
+#endif
+2:
        POPR;
        incl _cnt+V_INTR;
        rei
        POPR;
        incl _cnt+V_INTR;
        rei
-SCBVEC(consdout):
-       PUSHR; calls $0,_tuxintr; POPR; incl _cnt+V_INTR; rei
 #else
 SCBVEC(consdin):
        halt
 #else
 SCBVEC(consdin):
        halt
+#endif
+
+#if defined(VAX750) || defined(VAX730)
+SCBVEC(consdout):
+       PUSHR; calls $0,_tuxintr; POPR
+       incl _cnt+V_INTR
+       incl _intrcnt+I_TUX
+       rei
+#else
 SCBVEC(consdout):
        halt
 #endif
 SCBVEC(consdout):
        halt
 #endif
@@ -334,7 +425,7 @@ uudma:
        rsb                             # continue processing in uurintr
 #endif
 
        rsb                             # continue processing in uurintr
 #endif
 
-#if defined(VAX750) && !defined(VAX730) && !defined(MRSP)
+#if defined(VAX750) && !defined(MRSP)
 /*
  * Pseudo DMA routine for VAX-11/750 console tu58 
  *         (without MRSP)
 /*
  * Pseudo DMA routine for VAX-11/750 console tu58 
  *         (without MRSP)
@@ -457,6 +548,35 @@ SCBVEC(ustray):
        POPR
        rei
 
        POPR
        rei
 
+#ifdef VAX630
+/*
+ * Emulation OpCode jump table:
+ *     ONLY GOES FROM 0xf8 (-8) TO 0x3B (59)
+ */
+#define EMUTABLE       0x43
+#define NOEMULATE      .long noemulate
+#define        EMULATE(a)      .long _EM/**/a
+       .globl  _emJUMPtable
+_emJUMPtable:
+/* f8 */       EMULATE(ashp);  EMULATE(cvtlp); NOEMULATE;      NOEMULATE
+/* fc */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 00 */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 04 */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 08 */       EMULATE(cvtps); EMULATE(cvtsp); NOEMULATE;      EMULATE(crc)
+/* 0c */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 10 */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 14 */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 18 */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 1c */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 20 */       EMULATE(addp4); EMULATE(addp6); EMULATE(subp4); EMULATE(subp6)
+/* 24 */       EMULATE(cvtpt); EMULATE(mulp);  EMULATE(cvttp); EMULATE(divp)
+/* 28 */       NOEMULATE;      EMULATE(cmpc3); EMULATE(scanc); EMULATE(spanc)
+/* 2c */       NOEMULATE;      EMULATE(cmpc5); EMULATE(movtc); EMULATE(movtuc)
+/* 30 */       NOEMULATE;      NOEMULATE;      NOEMULATE;      NOEMULATE
+/* 34 */       EMULATE(movp);  EMULATE(cmpp3); EMULATE(cvtpl); EMULATE(cmpp4)
+/* 38 */       EMULATE(editpc); EMULATE(matchc); EMULATE(locc); EMULATE(skpc)
+#endif
+
 /*
  * Trap and fault vector routines
  */ 
 /*
  * Trap and fault vector routines
  */ 
@@ -488,10 +608,67 @@ SCBVEC(protflt):
        TRAP(PROTFLT)
 segflt:
        TRAP(SEGFLT)
        TRAP(PROTFLT)
 segflt:
        TRAP(SEGFLT)
+
+/*
+ * The following is called with the stack set up as follows:
+ *
+ *       (sp): Opcode
+ *      4(sp): Instruction PC
+ *      8(sp): Operand 1
+ *     12(sp): Operand 2
+ *     16(sp): Operand 3
+ *     20(sp): Operand 4
+ *     24(sp): Operand 5
+ *     28(sp): Operand 6
+ *     32(sp): Operand 7 (unused)
+ *     36(sp): Operand 8 (unused)
+ *     40(sp): Return PC
+ *     44(sp): Return PSL
+ *     48(sp): TOS before instruction
+ *
+ * Each individual routine is called with the stack set up as follows:
+ *
+ *       (sp): Return address of trap handler
+ *      4(sp): Opcode (will get return PSL)
+ *      8(sp): Instruction PC
+ *     12(sp): Operand 1
+ *     16(sp): Operand 2
+ *     20(sp): Operand 3
+ *     24(sp): Operand 4
+ *     28(sp): Operand 5
+ *     32(sp): Operand 6
+ *     36(sp): saved register 11
+ *     40(sp): saved register 10
+ *     44(sp): Return PC
+ *     48(sp): Return PSL
+ *     52(sp): TOS before instruction
+ */
+
+SCBVEC(emulate):
+#ifdef VAX630
+       movl    r11,32(sp)              # save register r11 in unused operand
+       movl    r10,36(sp)              # save register r10 in unused operand
+       cvtbl   (sp),r10                # get opcode
+       addl2   $8,r10                  # shift negative opcodes
+       subl3   r10,$EMUTABLE,r11       # forget it if opcode is out of range
+       bcs     noemulate
+       movl    _emJUMPtable[r10],r10   # call appropriate emulation routine
+       jsb     (r10)           # routines put return values into regs 0-5
+       movl    32(sp),r11              # restore register r11
+       movl    36(sp),r10              # restore register r10
+       insv    (sp),$0,$4,44(sp)       # and condition codes in Opcode spot
+       addl2   $40,sp                  # adjust stack for return
+       rei
+noemulate:
+       addl2   $48,sp                  # adjust stack for
+#endif VAX630
+       .word   0xffff                  # "reserved instruction fault"
+SCBVEC(emulateFPD):
+       .word   0xffff                  # "reserved instruction fault"
 SCBVEC(transflt):
        bitl    $2,(sp)+
        bnequ   tableflt
 SCBVEC(transflt):
        bitl    $2,(sp)+
        bnequ   tableflt
-       jsb     Fastreclaim             # try and avoid pagein
+       jsb     Fastreclaim             # try and avoid pagein
        TRAP(PAGEFLT)
 tableflt: 
        TRAP(TABLEFLT)
        TRAP(PAGEFLT)
 tableflt: 
        TRAP(TABLEFLT)
@@ -513,22 +690,19 @@ SCBVEC(syscall):
 
 /*
  * System page table
 
 /*
  * System page table
+ * Mbmap and Usrptmap are enlarged by CLSIZE entries
+ * as they are managed by resource maps starting with index 1 or CLSIZE.
  */ 
 #define        vaddr(x)        ((((x)-_Sysmap)/4)*NBPG+0x80000000)
 #define        SYSMAP(mname, vname, npte)                      \
 _/**/mname:    .globl  _/**/mname;             \
  */ 
 #define        vaddr(x)        ((((x)-_Sysmap)/4)*NBPG+0x80000000)
 #define        SYSMAP(mname, vname, npte)                      \
 _/**/mname:    .globl  _/**/mname;             \
-       .space  npte*4;                         \
+       .space  (npte)*4;                               \
        .globl  _/**/vname;                     \
        .set    _/**/vname,vaddr(_/**/mname)
 
        .data
        .align  2
        SYSMAP(Sysmap   ,Sysbase        ,SYSPTSIZE      )
        .globl  _/**/vname;                     \
        .set    _/**/vname,vaddr(_/**/mname)
 
        .data
        .align  2
        SYSMAP(Sysmap   ,Sysbase        ,SYSPTSIZE      )
-       SYSMAP(UMBAbeg  ,umbabeg        ,0              )
-       SYSMAP(Nexmap   ,nexus          ,16*MAXNNEXUS   )
-       SYSMAP(UMEMmap  ,umem           ,512*MAXNUBA    )
-       SYSMAP(UMBAend  ,umbaend        ,0              )
-       SYSMAP(Usrptmap ,usrpt          ,USRPTSIZE      )
        SYSMAP(Forkmap  ,forkutl        ,UPAGES         )
        SYSMAP(Xswapmap ,xswaputl       ,UPAGES         )
        SYSMAP(Xswap2map,xswap2utl      ,UPAGES         )
        SYSMAP(Forkmap  ,forkutl        ,UPAGES         )
        SYSMAP(Xswapmap ,xswaputl       ,UPAGES         )
        SYSMAP(Xswap2map,xswap2utl      ,UPAGES         )
@@ -537,12 +711,27 @@ _/**/mname:       .globl  _/**/mname;             \
        SYSMAP(Vfmap    ,vfutl          ,UPAGES         )
        SYSMAP(CMAP1    ,CADDR1         ,1              )
        SYSMAP(CMAP2    ,CADDR2         ,1              )
        SYSMAP(Vfmap    ,vfutl          ,UPAGES         )
        SYSMAP(CMAP1    ,CADDR1         ,1              )
        SYSMAP(CMAP2    ,CADDR2         ,1              )
-       SYSMAP(mcrmap   ,mcr            ,1              )
        SYSMAP(mmap     ,vmmap          ,1              )
        SYSMAP(mmap     ,vmmap          ,1              )
+       SYSMAP(alignmap ,alignutl       ,1              )       /* XXX */
        SYSMAP(msgbufmap,msgbuf         ,MSGBUFPTECNT   )
        SYSMAP(msgbufmap,msgbuf         ,MSGBUFPTECNT   )
+       SYSMAP(Mbmap    ,mbutl          ,NMBCLUSTERS*CLSIZE+CLSIZE )
        SYSMAP(camap    ,cabase         ,16*CLSIZE      )
        SYSMAP(camap    ,cabase         ,16*CLSIZE      )
+#ifdef GPROF
+       SYSMAP(profmap  ,profbase       ,600*CLSIZE     )
+#endif
        SYSMAP(ecamap   ,calimit        ,0              )
        SYSMAP(ecamap   ,calimit        ,0              )
-       SYSMAP(Mbmap    ,mbutl          ,NMBCLUSTERS*CLSIZE)
+
+       SYSMAP(UMBAbeg  ,umbabeg        ,0              )
+       SYSMAP(Nexmap   ,nexus          ,16*MAXNNEXUS   )
+       SYSMAP(UMEMmap  ,umem           ,UBAPAGES*NUBA  )
+       SYSMAP(Ioamap   ,ioa            ,MAXNIOA*IOAMAPSIZ/NBPG )
+       SYSMAP(UMBAend  ,umbaend        ,0              )
+#if VAX630
+       SYSMAP(Clockmap ,cldevice       ,1              )
+       SYSMAP(Ka630map ,ka630cpu       ,1              )
+#endif
+
+       SYSMAP(Usrptmap ,usrpt          ,USRPTSIZE+CLSIZE )
 
 eSysmap:
        .globl  _Syssize
 
 eSysmap:
        .globl  _Syssize
@@ -561,6 +750,7 @@ _cpu:       .long   0
        .globl  start
 start:
        .word   0
        .globl  start
 start:
        .word   0
+       mtpr    $0,$ICCS
 /* set system control block base and system page table params */
        mtpr    $_scb-0x80000000,$SCBB
        mtpr    $_Sysmap-0x80000000,$SBR
 /* set system control block base and system page table params */
        mtpr    $_scb-0x80000000,$SCBB
        mtpr    $_Sysmap-0x80000000,$SBR
@@ -585,8 +775,15 @@ start:
 /* count up memory */
        clrl    r7
 1:     pushl   $4; pushl r7; calls $2,_badaddr; tstl r0; bneq 9f
 /* count up memory */
        clrl    r7
 1:     pushl   $4; pushl r7; calls $2,_badaddr; tstl r0; bneq 9f
-       acbl    $8192*1024-1,$64*1024,r7,1b
+       acbl    $MAXMEM*1024-1,$64*1024,r7,1b
 9:
 9:
+#ifdef  VAX630
+/* leave an area for uVAX II console scratch pad at the top */
+       cmpb    _cpu,$VAX_630
+       bneq    1f
+       subl2   $4096,r7
+1:
+#endif
 /* clear memory from kernel bss and pages for proc 0 u. and page table */
        movab   _edata,r6
        movab   _end,r5
 /* clear memory from kernel bss and pages for proc 0 u. and page table */
        movab   _edata,r6
        movab   _end,r5
@@ -594,24 +791,26 @@ start:
        addl2   $(UPAGES*NBPG)+NBPG+NBPG,r5
 1:     clrq    (r6); acbl r5,$8,r6,1b
 /* trap() and syscall() save r0-r11 in the entry mask (per ../h/reg.h) */
        addl2   $(UPAGES*NBPG)+NBPG+NBPG,r5
 1:     clrq    (r6); acbl r5,$8,r6,1b
 /* trap() and syscall() save r0-r11 in the entry mask (per ../h/reg.h) */
+/* panic() is convenient place to save all for debugging */
        bisw2   $0x0fff,_trap
        bisw2   $0x0fff,_syscall
        bisw2   $0x0fff,_trap
        bisw2   $0x0fff,_syscall
+       bisw2   $0x0fff,_panic
        calls   $0,_fixctlrmask
        calls   $0,_fixctlrmask
-/* initialize system page table: scb and int stack writeable */
+/* initialize system page table: uba vectors and int stack writeable */
        clrl    r2
        movab   eintstack,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
 1:     bisl3   $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
        clrl    r2
        movab   eintstack,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
 1:     bisl3   $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
-/* make rpb read-only as red zone for interrupt stack */
+/* make rpb, scb read-only as red zone for interrupt stack */
        bicl2   $PG_PROT,_rpbmap
        bisl2   $PG_KR,_rpbmap
 /* make kernel text space read-only */
        movab   _etext+NBPG-1,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
        bicl2   $PG_PROT,_rpbmap
        bisl2   $PG_KR,_rpbmap
 /* make kernel text space read-only */
        movab   _etext+NBPG-1,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
-1:     bisl3   $PG_V|PG_KR,r2,_Sysmap[r2]; aoblss r1,r2,1b
+1:     bisl3   $PG_V|PG_URKR,r2,_Sysmap[r2]; aoblss r1,r2,1b
 /* make kernel data, bss, read-write */
        movab   _end+NBPG-1,r1; bbcc $31,r1,0f; 0:; ashl $-PGSHIFT,r1,r1
 1:     bisl3   $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
 /* now go to mapped mode */
 /* make kernel data, bss, read-write */
        movab   _end+NBPG-1,r1; bbcc $31,r1,0f; 0:; ashl $-PGSHIFT,r1,r1
 1:     bisl3   $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
 /* now go to mapped mode */
-       mtpr    $1,$TBIA; mtpr $1,$MAPEN; jmp *$0f; 0:
+       mtpr    $0,$TBIA; mtpr $1,$MAPEN; jmp *$0f; 0:
 /* init mem sizes */
        ashl    $-PGSHIFT,r7,_maxmem
        movl    _maxmem,_physmem
 /* init mem sizes */
        ashl    $-PGSHIFT,r7,_maxmem
        movl    _maxmem,_physmem
@@ -654,6 +853,7 @@ start:
        mfpr    $P1BR,PCB_P1BR(r1)
        mfpr    $P1LR,PCB_P1LR(r1)
        movl    $CLSIZE,PCB_SZPT(r1)            # init u.u_pcb.pcb_szpt
        mfpr    $P1BR,PCB_P1BR(r1)
        mfpr    $P1LR,PCB_P1LR(r1)
        movl    $CLSIZE,PCB_SZPT(r1)            # init u.u_pcb.pcb_szpt
+       movl    r10,PCB_R10(r1)
        movl    r11,PCB_R11(r1)
        movab   1f,PCB_PC(r1)                   # initial pc
        clrl    PCB_PSL(r1)                     # mode(k,k), ipl=0
        movl    r11,PCB_R11(r1)
        movab   1f,PCB_PC(r1)                   # initial pc
        clrl    PCB_PSL(r1)                     # mode(k,k), ipl=0
@@ -664,7 +864,9 @@ start:
        rei
 /* put signal trampoline code in u. area */
 1:     movab   _u,r0
        rei
 /* put signal trampoline code in u. area */
 1:     movab   _u,r0
-       movc3   $16,sigcode,PCB_SIGC(r0)
+       movc3   $19,sigcode,PCB_SIGC(r0)
+/* save boot device in global _bootdev */
+       movl    r10,_bootdev
 /* save reboot flags in global _boothowto */
        movl    r11,_boothowto
 /* calculate firstaddr, and call main() */
 /* save reboot flags in global _boothowto */
        movl    r11,_boothowto
 /* calculate firstaddr, and call main() */
@@ -673,20 +875,69 @@ start:
 /* proc[1] == /etc/init now running here; run icode */
        pushl   $PSL_CURMOD|PSL_PRVMOD; pushl $0; rei
 
 /* proc[1] == /etc/init now running here; run icode */
        pushl   $PSL_CURMOD|PSL_PRVMOD; pushl $0; rei
 
-/* signal trampoline code: it is known that this code takes exactly 16 bytes */
+/* signal trampoline code: it is known that this code takes exactly 19 bytes */
 /* in ../vax/pcb.h and in the movc3 above */
 sigcode:
 /* in ../vax/pcb.h and in the movc3 above */
 sigcode:
-       calls   $4,5(pc)                        # params pushed by sendsig
-       chmk    $139                            # cleanup mask and onsigstack
-       rei
-       .word   0x7f                            # registers 0-6 (6==sp/compat)
-       callg   (ap),*16(ap)
-       ret
+       calls   $4,8(pc)        # params pushed by sendsig
+       movl    sp,ap           # calls frame built by sendsig
+       chmk    $103            # cleanup mask and onsigstack
+       halt                    # sigreturn() does not return!
+       .word   0x3f            # registers 0-5
+       callg   (ap),*16(ap)    # call the signal handler
+       ret                     # return to code above
+
+       .set    exec,11
+       .set    exit,1
+       .globl  _icode
+       .globl  _initflags
+       .globl  _szicode
+/*
+ * Icode is copied out to process 1 to exec /etc/init.
+ * If the exec fails, process 1 exits.
+ */
+_icode:
+       pushab  b`argv-l0(pc)
+l0:    pushab  b`init-l1(pc)
+l1:    pushl   $2
+       movl    sp,ap
+       chmk    $exec
+       chmk    $exit
+
+init:  .asciz  "/etc/init"
+       .align  2
+_initflags:
+       .long   0
+argv:  .long   init+5-_icode
+       .long   _initflags-_icode
+       .long   0
+_szicode:
+       .long   _szicode-_icode
 
 /*
  * Primitives
  */ 
 
 
 /*
  * Primitives
  */ 
 
+#ifdef GPROF
+#define        ENTRY(name, regs) \
+       .globl _/**/name; .align 1; _/**/name: .word regs; jsb mcount
+#define        JSBENTRY(name, regs) \
+       .globl _/**/name; _/**/name: \
+       movl fp,-(sp); movab -12(sp),fp; pushr $(regs); jsb mcount; \
+       popr $(regs); movl (sp)+,fp
+#else
+#define        ENTRY(name, regs) \
+       .globl _/**/name; .align 1; _/**/name: .word regs
+#define        JSBENTRY(name, regs) \
+       .globl _/**/name; _/**/name:
+#endif GPROF
+#define R0 0x01
+#define R1 0x02
+#define R2 0x04
+#define R3 0x08
+#define R4 0x10
+#define R5 0x20
+#define R6 0x40
+
 /*
  * badaddr(addr, len)
  *     see if access addr with a len type instruction causes a machine check
 /*
  * badaddr(addr, len)
  *     see if access addr with a len type instruction causes a machine check
@@ -698,40 +949,22 @@ _badaddr:
        movl    $1,r0
        mfpr    $IPL,r1
        mtpr    $HIGH,$IPL
        movl    $1,r0
        mfpr    $IPL,r1
        mtpr    $HIGH,$IPL
-       movl    _scb+MCKVEC,r2
        movl    4(ap),r3
        movl    8(ap),r4
        movl    4(ap),r3
        movl    8(ap),r4
-       movab   9f+INTSTK,_scb+MCKVEC
+       movab   2f,nofault              # jump to 2f on machcheck
        bbc     $0,r4,1f; tstb  (r3)
 1:     bbc     $1,r4,1f; tstw  (r3)
 1:     bbc     $2,r4,1f; tstl  (r3)
 1:     clrl    r0                      # made it w/o machine checks
        bbc     $0,r4,1f; tstb  (r3)
 1:     bbc     $1,r4,1f; tstw  (r3)
 1:     bbc     $2,r4,1f; tstl  (r3)
 1:     clrl    r0                      # made it w/o machine checks
-2:     movl    r2,_scb+MCKVEC
+2:     clrl    nofault
        mtpr    r1,$IPL
        ret
        mtpr    r1,$IPL
        ret
-       .align  2
-9:
-       casel   _cpu,$1,$VAX_MAX
-0:
-       .word   8f-0b           # 1 is 780
-       .word   5f-0b           # 2 is 750
-       .word   5f-0b           # 3 is 730
-5:
-#if defined(VAX750) || defined(VAX730)
-       mtpr    $0xf,$MCESR
-#endif
-       brb     1f
-8:
-#if VAX780
-       mtpr    $0,$SBIFS
-#endif
-1:
-       addl2   (sp)+,sp                # discard mchchk trash
-       movab   2b,(sp)
-       rei
 
 
-_addupc:       .globl  _addupc
-       .word   0x0
+/*
+ * update profiling information for the user
+ * addupc(pc, &u.u_prof, ticks)
+ */
+ENTRY(addupc, 0)
        movl    8(ap),r2                # &u.u_prof
        subl3   8(r2),4(ap),r0          # corrected pc
        blss    9f
        movl    8(ap),r2                # &u.u_prof
        subl3   8(r2),4(ap),r0          # corrected pc
        blss    9f
@@ -754,96 +987,302 @@ _addupc: .globl  _addupc
        clrl    12(r2)
        ret
 
        clrl    12(r2)
        ret
 
-_Copyin:       .globl  _Copyin         # <<<massaged for jsb by asm.sed>>>
-       movl    12(sp),r0               # copy length
-       blss    ersb
-       movl    4(sp),r1                # copy user address
-       cmpl    $NBPG,r0                # probing one page or less ?
-       bgeq    cishort                 # yes
-ciloop:
-       prober  $3,$NBPG,(r1)           # bytes accessible ?
-       beql    ersb                    # no
-       addl2   $NBPG,r1                # incr user address ptr
-       acbl    $NBPG+1,$-NBPG,r0,ciloop        # reduce count and loop
-cishort:
-       prober  $3,r0,(r1)              # bytes accessible ?
+/*
+ * Copy a null terminated string from the user address space into
+ * the kernel address space.
+ *
+ * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
+ */
+ENTRY(copyinstr, R6)
+       movl    12(ap),r6               # r6 = max length
+       jlss    8f
+       movl    4(ap),r1                # r1 = user address
+       bicl3   $~(NBPG*CLSIZE-1),r1,r2 # r2 = bytes on first page
+       subl3   r2,$NBPG*CLSIZE,r2
+       movl    8(ap),r3                # r3 = kernel address
+1:
+       cmpl    r6,r2                   # r2 = min(bytes on page, length left);
+       jgeq    2f
+       movl    r6,r2
+2:
+       prober  $3,r2,(r1)              # bytes accessible?
+       jeql    8f
+       subl2   r2,r6                   # update bytes left count
+#ifdef NOSUBSINST
+       # fake the locc instr. for processors that don't have it
+       movl    r2,r0
+6:
+       tstb    (r1)+
+       jeql    5f
+       sobgtr  r0,6b
+       jbr     7f
+5:
+       decl    r1
+       jbr     3f
+7:
+#else
+       locc    $0,r2,(r1)              # null byte found?
+       jneq    3f
+#endif
+       subl2   r2,r1                   # back up pointer updated by `locc'
+       movc3   r2,(r1),(r3)            # copy in next piece
+       movl    $(NBPG*CLSIZE),r2       # check next page
+       tstl    r6                      # run out of space?
+       jneq    1b
+       movl    $ENOENT,r0              # set error code and return
+       jbr     9f
+3:
+       tstl    16(ap)                  # return length?
+       beql    4f
+       subl3   r6,12(ap),r6            # actual len = maxlen - unused pages
+       subl2   r0,r6                   #       - unused on this page
+       addl3   $1,r6,*16(ap)           #       + the null byte
+4:
+       subl2   r0,r2                   # r2 = number of bytes to move
+       subl2   r2,r1                   # back up pointer updated by `locc'
+       incl    r2                      # copy null byte as well
+       movc3   r2,(r1),(r3)            # copy in last piece
+       clrl    r0                      # redundant
+       ret
+8:
+       movl    $EFAULT,r0
+9:
+       tstl    16(ap)
+       beql    1f
+       subl3   r6,12(ap),*16(ap)
+1:
+       ret
+
+/*
+ * Copy a null terminated string from the kernel
+ * address space to the user address space.
+ *
+ * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
+ */
+ENTRY(copyoutstr, R6)
+       movl    12(ap),r6               # r6 = max length
+       jlss    8b
+       movl    4(ap),r1                # r1 = kernel address
+       movl    8(ap),r3                # r3 = user address
+       bicl3   $~(NBPG*CLSIZE-1),r3,r2 # r2 = bytes on first page
+       subl3   r2,$NBPG*CLSIZE,r2
+1:
+       cmpl    r6,r2                   # r2 = min(bytes on page, length left);
+       jgeq    2f
+       movl    r6,r2
+2:
+       probew  $3,r2,(r3)              # bytes accessible?
+       jeql    8b
+       subl2   r2,r6                   # update bytes left count
+#ifdef NOSUBSINST
+       # fake the locc instr. for processors that don't have it
+       movl    r2,r0
+6:
+       tstb    (r1)+
+       jeql    5f
+       sobgtr  r0,6b
+       jbr     7f
+5:
+       decl    r1
+       jbr     3b
+7:
+#else
+       locc    $0,r2,(r1)              # null byte found?
+       jneq    3b
+#endif
+       subl2   r2,r1                   # back up pointer updated by `locc'
+       movc3   r2,(r1),(r3)            # copy in next piece
+       movl    $(NBPG*CLSIZE),r2       # check next page
+       tstl    r6                      # run out of space?
+       jneq    1b
+       movl    $ENOENT,r0              # set error code and return
+       jbr     9b
+
+/*
+ * Copy a null terminated string from one point to another in
+ * the kernel address space.
+ *
+ * copystr(fromaddr, toaddr, maxlength, &lencopied)
+ */
+ENTRY(copystr, R6)
+       movl    12(ap),r6               # r6 = max length
+       jlss    8b
+       movl    4(ap),r1                # r1 = src address
+       movl    8(ap),r3                # r3 = dest address
+1:
+       movzwl  $65535,r2               # r2 = bytes in first chunk
+       cmpl    r6,r2                   # r2 = min(bytes in chunk, length left);
+       jgeq    2f
+       movl    r6,r2
+2:
+       subl2   r2,r6                   # update bytes left count
+#ifdef NOSUBSINST
+       # fake the locc instr. for processors that don't have it
+       movl    r2,r0
+6:
+       tstb    (r1)+
+       jeql    5f
+       sobgtr  r0,6b
+       jbr     7f
+5:
+       decl    r1
+       jbr     3b
+7:
+#else
+       locc    $0,r2,(r1)              # null byte found?
+       jneq    3b
+#endif
+       subl2   r2,r1                   # back up pointer updated by `locc'
+       movc3   r2,(r1),(r3)            # copy in next piece
+       tstl    r6                      # run out of space?
+       jneq    1b
+       movl    $ENOENT,r0              # set error code and return
+       jbr     9b
+
+/* 
+ * Copy specified amount of data from user space into the kernel
+ * Copyin(from, to, len)
+ *     r1 == from (user source address)
+ *     r3 == to (kernel destination address)
+ *     r5 == length
+ */
+       .align  1
+JSBENTRY(Copyin, R1|R3|R5)
+       cmpl    r5,$(NBPG*CLSIZE)       # probing one page or less ?
+       bgtru   1f                      # no
+       prober  $3,r5,(r1)              # bytes accessible ?
        beql    ersb                    # no
        beql    ersb                    # no
-       movl    4(sp),r1
-       movl    8(sp),r3
-       jbr     2f
+       movc3   r5,(r1),(r3)
+/*     clrl    r0                      # redundant */
+       rsb
 1:
 1:
-       subl2   r0,12(sp)
+       blss    ersb                    # negative length?
+       pushl   r6                      # r6 = length
+       movl    r5,r6
+       bicl3   $~(NBPG*CLSIZE-1),r1,r0 # r0 = bytes on first page
+       subl3   r0,$(NBPG*CLSIZE),r0
+       addl2   $(NBPG*CLSIZE),r0       # plus one additional full page
+       jbr     2f
+
+ciloop:
        movc3   r0,(r1),(r3)
        movc3   r0,(r1),(r3)
+       movl    $(2*NBPG*CLSIZE),r0     # next amount to move
 2:
 2:
-       movzwl  $65535,r0
-       cmpl    12(sp),r0
-       jgtr    1b
-       movc3   12(sp),(r1),(r3)
-       clrl    r0                      #redundant
+       cmpl    r0,r6
+       bleq    3f
+       movl    r6,r0
+3:
+       prober  $3,r0,(r1)              # bytes accessible ?
+       beql    ersb1                   # no
+       subl2   r0,r6                   # last move?
+       bneq    ciloop                  # no
+
+       movc3   r0,(r1),(r3)
+/*     clrl    r0                      # redundant */
+       movl    (sp)+,r6                # restore r6
        rsb
 
        rsb
 
+ersb1:
+       movl    (sp)+,r6                # restore r6
 ersb:
        movl    $EFAULT,r0
        rsb
 
 ersb:
        movl    $EFAULT,r0
        rsb
 
-_Copyout:      .globl  _Copyout        # <<<massaged for jsb by asm.sed >>>
-       movl    12(sp),r0               # get count
-       blss    ersb
-       movl    8(sp),r1                # get user address
-       cmpl    $NBPG,r0                # can do in one probew?
-       bgeq    coshort                 # yes
-coloop:
-       probew  $3,$NBPG,(r1)           # bytes accessible?
-       beql    ersb                    # no 
-       addl2   $NBPG,r1                # increment user address
-       acbl    $NBPG+1,$-NBPG,r0,coloop        # reduce count and loop
-coshort:
-       probew  $3,r0,(r1)              # bytes accessible?
+/* 
+ * Copy specified amount of data from kernel to the user space
+ * Copyout(from, to, len)
+ *     r1 == from (kernel source address)
+ *     r3 == to (user destination address)
+ *     r5 == length
+ */
+       .align  1
+JSBENTRY(Copyout, R1|R3|R5)
+       cmpl    r5,$(NBPG*CLSIZE)       # moving one page or less ?
+       bgtru   1f                      # no
+       probew  $3,r5,(r3)              # bytes writeable?
        beql    ersb                    # no
        beql    ersb                    # no
-       movl    4(sp),r1
-       movl    8(sp),r3
-       jbr     2f
+       movc3   r5,(r1),(r3)
+/*     clrl    r0                      # redundant */
+       rsb
 1:
 1:
-       subl2   r0,12(sp)
+       blss    ersb                    # negative length?
+       pushl   r6                      # r6 = length
+       movl    r5,r6
+       bicl3   $~(NBPG*CLSIZE-1),r3,r0 # r0 = bytes on first page
+       subl3   r0,$(NBPG*CLSIZE),r0
+       addl2   $(NBPG*CLSIZE),r0       # plus one additional full page
+       jbr     2f
+
+coloop:
        movc3   r0,(r1),(r3)
        movc3   r0,(r1),(r3)
+       movl    $(2*NBPG*CLSIZE),r0     # next amount to move
 2:
 2:
-       movzwl  $65535,r0
-       cmpl    12(sp),r0
-       jgtr    1b
-       movc3   12(sp),(r1),(r3)
-       clrl    r0                              #redundant
+       cmpl    r0,r6
+       bleq    3f
+       movl    r6,r0
+3:
+       probew  $3,r0,(r3)              # bytes writeable?
+       beql    ersb1                   # no
+       subl2   r0,r6                   # last move?
+       bneq    coloop                  # no
+
+       movc3   r0,(r1),(r3)
+/*     clrl    r0                      # redundant */
+       movl    (sp)+,r6                # restore r6
        rsb
 
 /*
  * non-local goto's
  */
        rsb
 
 /*
  * non-local goto's
  */
-       .globl  _Setjmp
-_Setjmp:
-       movq    r6,(r0)+
-       movq    r8,(r0)+
-       movq    r10,(r0)+
-       movq    r12,(r0)+
-       addl3   $4,sp,(r0)+
-       movl    (sp),(r0)
+#ifdef notdef          /* this is now expanded completely inline */
+       .align  1
+JSBENTRY(Setjmp, R0)
+       movl    fp,(r0)+        # current stack frame
+       movl    (sp),(r0)       # resuming pc
        clrl    r0
        rsb
        clrl    r0
        rsb
+#endif
 
 
-       .globl  _Longjmp
-_Longjmp:
-       movq    (r0)+,r6
-       movq    (r0)+,r8
-       movq    (r0)+,r10
-       movq    (r0)+,r12
-       movl    (r0)+,r1
-       cmpl    r1,sp                           # must be a pop
-       bgequ   lj2
-       pushab  lj1
+#define PCLOC 16       /* location of pc in calls frame */
+#define APLOC 8                /* location of ap,fp in calls frame */
+       .align  1
+JSBENTRY(Longjmp, R0)
+       movl    (r0)+,newfp     # must save parameters in memory as all
+       movl    (r0),newpc      # registers may be clobbered.
+1:
+       cmpl    fp,newfp        # are we there yet?
+       bgequ   2f              # yes
+       moval   1b,PCLOC(fp)    # redirect return pc to us!
+       ret                     # pop next frame
+2:
+       beql    3f              # did we miss our frame?
+       pushab  4f              # yep ?!?
        calls   $1,_panic
        calls   $1,_panic
-lj2:
-       movl    r1,sp
-       jmp     *(r0)                           # ``rsb''
+3:
+       movl    newpc,r0        # all done, just return to the `setjmp'
+       jmp     (r0)            # ``rsb''
 
 
-lj1:   .asciz  "longjmp"
+       .data
+newpc: .space  4
+newfp: .space  4
+4:     .asciz  "longjmp"
+       .text
+/*
+ * setjmp that saves all registers as the call frame may not
+ * be available to recover them in the usual mannor by longjmp.
+ * Called before swapping out the u. area, restored by resume()
+ * below.
+ */
+ENTRY(savectx, 0)
+       movl    4(ap),r0
+       movq    r6,(r0)+
+       movq    r8,(r0)+
+       movq    r10,(r0)+
+       movq    APLOC(fp),(r0)+ # save ap, fp
+       addl3   $8,ap,(r0)+     # save sp
+       movl    PCLOC(fp),(r0)  # save pc
+       clrl    r0
+       ret
 
        .globl  _whichqs
        .globl  _qs
 
        .globl  _whichqs
        .globl  _qs
@@ -867,10 +1306,10 @@ lj1:     .asciz  "longjmp"
 /*
  * Setrq(p), using fancy VAX instructions.
  *
 /*
  * Setrq(p), using fancy VAX instructions.
  *
- * Call should be made at spl6(), and p->p_stat should be SRUN
+ * Call should be made at splclock(), and p->p_stat should be SRUN
  */
  */
-       .globl  _Setrq          # <<<massaged to jsb by "asm.sed">>>
-_Setrq:
+       .align  1
+ JSBENTRY(Setrq, R0)
        tstl    P_RLINK(r0)             ## firewall: p->p_rlink must be 0
        beql    set1                    ##
        pushab  set3                    ##
        tstl    P_RLINK(r0)             ## firewall: p->p_rlink must be 0
        beql    set1                    ##
        pushab  set3                    ##
@@ -889,10 +1328,10 @@ set3:    .asciz  "setrq"
 /*
  * Remrq(p), using fancy VAX instructions
  *
 /*
  * Remrq(p), using fancy VAX instructions
  *
- * Call should be made at spl6().
+ * Call should be made at splclock().
  */
  */
-       .globl  _Remrq          # <<<massaged to jsb by "asm.sed">>>
-_Remrq:
+       .align  1
+ JSBENTRY(Remrq, R0)
        movzbl  P_PRI(r0),r1
        ashl    $-2,r1,r1
        bbsc    r1,_whichqs,rem1
        movzbl  P_PRI(r0),r1
        ashl    $-2,r1,r1
        bbsc    r1,_whichqs,rem1
@@ -918,51 +1357,63 @@ rem3:    .asciz  "remrq"
 _masterpaddr:
        .long   0
 
 _masterpaddr:
        .long   0
 
+       .set    ASTLVL_NONE,4
        .text
 sw0:   .asciz  "swtch"
        .text
 sw0:   .asciz  "swtch"
+
+/*
+ * When no processes are on the runq, Swtch branches to idle
+ * to wait for something to come ready.
+ */
+       .globl  Idle
+Idle: idle:
+       mtpr    $0,$IPL                 # must allow interrupts here
+       tstl    _whichqs                # look for non-empty queue
+       bneq    sw1
+       brb     idle
+
+badsw: pushab  sw0
+       calls   $1,_panic
+       /*NOTREACHED*/
+
 /*
  * Swtch(), using fancy VAX instructions
  */
 /*
  * Swtch(), using fancy VAX instructions
  */
-       .globl  _Swtch
-_Swtch:                                # <<<massaged to jsb by "asm.sed">>>
+       .align  1
+JSBENTRY(Swtch, 0)
        movl    $1,_noproc
        movl    $1,_noproc
-       clrl    _runrun
+       incl    _cnt+V_SWTCH
 sw1:   ffs     $0,$32,_whichqs,r0      # look for non-empty queue
 sw1:   ffs     $0,$32,_whichqs,r0      # look for non-empty queue
-       bneq    sw1a
-       mtpr    $0,$IPL                 # must allow interrupts here
-       jbr     sw1                     # this is an idle loop!
-sw1a:  mtpr    $0x18,$IPL              # lock out all so _whichqs==_qs
+       beql    idle                    # if none, idle
+       mtpr    $0x18,$IPL              # lock out all so _whichqs==_qs
        bbcc    r0,_whichqs,sw1         # proc moved via lbolt interrupt
        movaq   _qs[r0],r1
        remque  *(r1),r2                # r2 = p = highest pri process
        bbcc    r0,_whichqs,sw1         # proc moved via lbolt interrupt
        movaq   _qs[r0],r1
        remque  *(r1),r2                # r2 = p = highest pri process
-       bvc     sw2                     # make sure something was there
-sw1b:  pushab  sw0
-       calls   $1,_panic
+       bvs     badsw                   # make sure something was there
 sw2:   beql    sw3
        insv    $1,r0,$1,_whichqs       # still more procs in this queue
 sw3:
        clrl    _noproc
 sw2:   beql    sw3
        insv    $1,r0,$1,_whichqs       # still more procs in this queue
 sw3:
        clrl    _noproc
+       clrl    _runrun
        tstl    P_WCHAN(r2)             ## firewalls
        tstl    P_WCHAN(r2)             ## firewalls
-       bneq    sw1b                    ##
-       movzbl  P_STAT(r2),r3           ##
-       cmpl    $SRUN,r3                ##
-       bneq    sw1b                    ##
+       bneq    badsw                   ##
+       cmpb    P_STAT(r2),$SRUN        ##
+       bneq    badsw                   ##
        clrl    P_RLINK(r2)             ##
        movl    *P_ADDR(r2),r0
        clrl    P_RLINK(r2)             ##
        movl    *P_ADDR(r2),r0
+#ifdef notdef
+       cmpl    r0,_masterpaddr         # resume of current proc is easy
+       beql    res0
+#endif
        movl    r0,_masterpaddr
        ashl    $PGSHIFT,r0,r0          # r0 = pcbb(p)
        movl    r0,_masterpaddr
        ashl    $PGSHIFT,r0,r0          # r0 = pcbb(p)
-/*     mfpr    $PCBB,r1                # resume of current proc is easy
- *     cmpl    r0,r1
- */    beql    res0
-       incl    _cnt+V_SWTCH
 /* fall into... */
 
 /*
  * Resume(pf)
  */
 /* fall into... */
 
 /*
  * Resume(pf)
  */
-       .globl  _Resume         # <<<massaged to jsb by "asm.sed">>>
-_Resume:
-       mtpr    $0x18,$IPL                      # no interrupts, please
+JSBENTRY(Resume, R0)
+       mtpr    $HIGH,$IPL                      # no interrupts, please
        movl    _CMAP2,_u+PCB_CMAP2     # yech
        svpctx
        mtpr    r0,$PCBB
        movl    _CMAP2,_u+PCB_CMAP2     # yech
        svpctx
        mtpr    r0,$PCBB
@@ -971,19 +1422,34 @@ _Resume:
        mtpr    $_CADDR2,$TBIS
 res0:
        tstl    _u+PCB_SSWAP
        mtpr    $_CADDR2,$TBIS
 res0:
        tstl    _u+PCB_SSWAP
-       beql    res1
-       movl    _u+PCB_SSWAP,r0
+       bneq    res1
+       rei
+res1:
+       movl    _u+PCB_SSWAP,r0                 # longjmp to saved context
        clrl    _u+PCB_SSWAP
        clrl    _u+PCB_SSWAP
-       movab   _Longjmp,(sp)
+       movq    (r0)+,r6
+       movq    (r0)+,r8
+       movq    (r0)+,r10
+       movq    (r0)+,r12
+       movl    (r0)+,r1
+       cmpl    r1,sp                           # must be a pop
+       bgequ   1f
+       pushab  2f
+       calls   $1,_panic
+       /* NOTREACHED */
+1:
+       movl    r1,sp
+       movl    (r0),(sp)                       # address to return to
        movl    $PSL_PRVMOD,4(sp)               # ``cheating'' (jfr)
        movl    $PSL_PRVMOD,4(sp)               # ``cheating'' (jfr)
-res1:
        rei
 
        rei
 
+2:     .asciz  "ldctx"
+
 /*
  * {fu,su},{byte,word}, all massaged by asm.sed to jsb's
  */
 /*
  * {fu,su},{byte,word}, all massaged by asm.sed to jsb's
  */
-       .globl  _Fuword
-_Fuword:
+       .align  1
+JSBENTRY(Fuword, R0)
        prober  $3,$4,(r0)
        beql    fserr
        movl    (r0),r0
        prober  $3,$4,(r0)
        beql    fserr
        movl    (r0),r0
@@ -992,23 +1458,23 @@ fserr:
        mnegl   $1,r0
        rsb
 
        mnegl   $1,r0
        rsb
 
-       .globl  _Fubyte
-_Fubyte:
+       .align  1
+JSBENTRY(Fubyte, R0)
        prober  $3,$1,(r0)
        beql    fserr
        movzbl  (r0),r0
        rsb
 
        prober  $3,$1,(r0)
        beql    fserr
        movzbl  (r0),r0
        rsb
 
-       .globl  _Suword
-_Suword:
+       .align  1
+JSBENTRY(Suword, R0|R1)
        probew  $3,$4,(r0)
        beql    fserr
        movl    r1,(r0)
        clrl    r0
        rsb
 
        probew  $3,$4,(r0)
        beql    fserr
        movl    r1,(r0)
        clrl    r0
        rsb
 
-       .globl  _Subyte
-_Subyte:
+       .align  1
+JSBENTRY(Subyte, R0|R1)
        probew  $3,$1,(r0)
        beql    fserr
        movb    r1,(r0)
        probew  $3,$1,(r0)
        beql    fserr
        movb    r1,(r0)
@@ -1019,8 +1485,7 @@ _Subyte:
  * Copy 1 relocation unit (NBPG bytes)
  * from user virtual address to physical address
  */
  * Copy 1 relocation unit (NBPG bytes)
  * from user virtual address to physical address
  */
-_copyseg:      .globl  _copyseg
-       .word   0x0
+ENTRY(copyseg, 0)
        bisl3   $PG_V|PG_KW,8(ap),_CMAP2
        mtpr    $_CADDR2,$TBIS  # invalidate entry for copy 
        movc3   $NBPG,*4(ap),_CADDR2
        bisl3   $PG_V|PG_KW,8(ap),_CMAP2
        mtpr    $_CADDR2,$TBIS  # invalidate entry for copy 
        movc3   $NBPG,*4(ap),_CADDR2
@@ -1030,8 +1495,7 @@ _copyseg:         .globl  _copyseg
  * zero out physical memory
  * specified in relocation units (NBPG bytes)
  */
  * zero out physical memory
  * specified in relocation units (NBPG bytes)
  */
-_clearseg:     .globl  _clearseg
-       .word   0x0
+ENTRY(clearseg, 0)
        bisl3   $PG_V|PG_KW,4(ap),_CMAP1
        mtpr    $_CADDR1,$TBIS
        movc5   $0,(sp),$0,$NBPG,_CADDR1
        bisl3   $PG_V|PG_KW,4(ap),_CMAP1
        mtpr    $_CADDR1,$TBIS
        movc5   $0,(sp),$0,$NBPG,_CADDR1
@@ -1042,8 +1506,7 @@ _clearseg:        .globl  _clearseg
  * Given virtual address, byte count, and rw flag
  * returns 0 on no access.
  */
  * Given virtual address, byte count, and rw flag
  * returns 0 on no access.
  */
-_useracc:      .globl  _useracc
-       .word   0x0
+ENTRY(useracc, 0)
        movl    4(ap),r0                # get va
        movl    8(ap),r1                # count
        tstl    12(ap)                  # test for read access ?
        movl    4(ap),r0                # get va
        movl    8(ap),r1                # count
        tstl    12(ap)                  # test for read access ?
@@ -1084,9 +1547,7 @@ uaerr:
  * We can't use the probe instruction directly because
  * it ors together current and previous mode.
  */
  * We can't use the probe instruction directly because
  * it ors together current and previous mode.
  */
-       .globl  _kernacc
-_kernacc:
-       .word   0x0
+ ENTRY(kernacc, 0)
        movl    4(ap),r0        # virtual address
        bbcc    $31,r0,kacc1
        bbs     $30,r0,kacerr
        movl    4(ap),r0        # virtual address
        bbcc    $31,r0,kacc1
        bbs     $30,r0,kacerr
@@ -1139,35 +1600,37 @@ kacerr:
  *     for the purpose of simulating a reference bit)
  *
  * Built in constants:
  *     for the purpose of simulating a reference bit)
  *
  * Built in constants:
- *     CLSIZE of 2, USRSTACK of 0x7ffff000, any bit fields
- *     in pte's or the core map
+ *     CLSIZE of 2, any bit fields in pte's
  */
        .text
        .globl  Fastreclaim
 Fastreclaim:
        PUSHR
  */
        .text
        .globl  Fastreclaim
 Fastreclaim:
        PUSHR
+#ifdef GPROF
+       movl    fp,-(sp)
+       movab   12(sp),fp
+       jsb     mcount
+       movl    (sp)+,fp
+#endif GPROF
        extzv   $9,$23,28(sp),r3        # virtual address
        bicl2   $1,r3                   # v = clbase(btop(virtaddr)); 
        movl    _u+U_PROCP,r5           # p = u.u_procp 
                                        # from vtopte(p, v) ...
        extzv   $9,$23,28(sp),r3        # virtual address
        bicl2   $1,r3                   # v = clbase(btop(virtaddr)); 
        movl    _u+U_PROCP,r5           # p = u.u_procp 
                                        # from vtopte(p, v) ...
+       movl    $1,r2                   # type = CTEXT;
        cmpl    r3,P_TSIZE(r5)
        cmpl    r3,P_TSIZE(r5)
-       jgequ   2f                      # if (isatsv(p, v)) {
-       ashl    $2,r3,r4
-       addl2   P_P0BR(r5),r4           #       tptopte(p, vtotp(p, v));
-       movl    $1,r2                   #       type = CTEXT;
-       jbr     3f
-2:
-       subl3   P_SSIZE(r5),$0x3ffff8,r0
+       jlssu   1f                      # if (isatsv(p, v)) {
+       addl3   P_TSIZE(r5),P_DSIZE(r5),r0
        cmpl    r3,r0
        cmpl    r3,r0
-       jgequ   2f                      # } else if (isadsv(p, v)) {
-       ashl    $2,r3,r4
-       addl2   P_P0BR(r5),r4           #       dptopte(p, vtodp(p, v));
+       jgequ   2f
        clrl    r2                      #       type = !CTEXT;
        clrl    r2                      #       type = !CTEXT;
+1:
+       ashl    $2,r3,r4
+       addl2   P_P0BR(r5),r4           #       tptopte(p, vtotp(p, v));
        jbr     3f
 2:
        cvtwl   P_SZPT(r5),r4           # } else (isassv(p, v)) {
        ashl    $7,r4,r4
        jbr     3f
 2:
        cvtwl   P_SZPT(r5),r4           # } else (isassv(p, v)) {
        ashl    $7,r4,r4
-       subl2   $(0x3ffff8+UPAGES),r4
+       subl2   $0x400000,r4
        addl2   r3,r4
        ashl    $2,r4,r4
        addl2   P_P0BR(r5),r4           #       sptopte(p, vtosp(p, v));
        addl2   r3,r4
        ashl    $2,r4,r4
        addl2   P_P0BR(r5),r4           #       sptopte(p, vtosp(p, v));
@@ -1184,14 +1647,14 @@ Fastreclaim:
        subl2   _firstfree,r0
        ashl    $-1,r0,r0       
        incl    r0                      # pgtocm(pte->pg_pfnum) 
        subl2   _firstfree,r0
        ashl    $-1,r0,r0       
        incl    r0                      # pgtocm(pte->pg_pfnum) 
-       mull2   $12,r0
+       mull2   $SZ_CMAP,r0
        addl2   _cmap,r0                # &cmap[pgtocm(pte->pg_pfnum)] 
        tstl    r2
        jeql    2f                      # if (type == CTEXT &&
        addl2   _cmap,r0                # &cmap[pgtocm(pte->pg_pfnum)] 
        tstl    r2
        jeql    2f                      # if (type == CTEXT &&
-       jbc     $29,4(r0),2f            #     c_intrans)
+       jbc     $C_INTRANS,(r0),2f      #     c_intrans)
        POPR; rsb                       #       let pagein handle it
 2:
        POPR; rsb                       #       let pagein handle it
 2:
-       jbc     $30,4(r0),2f            # if (c_free)
+       jbc     $C_FREE,(r0),2f         # if (c_free)
        POPR; rsb                       #       let pagein handle it 
 2:
        bisb2   $0x80,3(r4)             # pte->pg_v = 1;
        POPR; rsb                       #       let pagein handle it 
 2:
        bisb2   $0x80,3(r4)             # pte->pg_v = 1;