* Copyright (c) 1980 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
* @(#)locore.s 6.37 (Berkeley) %G%
#include "../vaxuba/ubareg.h"
.set HIGH,0x1f # mask for total disable
.set MCKVEC,4 # offset into scb of machine check vector
.set NISP,3 # number of interrupt stack pages
* User structure is UPAGES at top of user space.
.set _u,0x80000000 - UPAGES*NBPG
* Called by auto-restart.
* May be called manually.
#define _rpbmap _Sysmap # rpb, scb, UNI*vec, istack*4
tstl _rpb+RP_FLAG # dump only once!
* Interrupt vector routines
#define SCBVEC(name) .align 2; .globl _X/**/name; _X/**/name
#define PANIC(msg) clrl _waittime; pushab 1f; \
calls $1,_panic; 1: .asciz msg
#define PRINTF(n,msg) pushab 1f; calls $n+1,_printf; MSG(msg)
#define MSG(msg) .data; 1: .asciz msg; .text
#define PUSHR pushr $0x3f
nofault: .long 0 # where to go on predicted machcheck
PUSHR; pushab 6*4(sp); calls $1,_machinecheck; POPR;
#if defined(VAX750) || defined(VAX730)
addl2 (sp)+,sp # discard mchchk trash
PUSHR; PANIC("KSP not valid");
SCBVEC(chme): SCBVEC(chms): SCBVEC(chmu):
PUSHR; PANIC("CHM? in kernel");
PUSHR; PRINTF(0, "stray scb interrupt\n"); POPR;
PUSHR; mfpr $IPL,-(sp); PRINTF(1, "nexus stray intr ipl%x\n"); POPR; rei
PUSHR; calls $0,_memerr; POPR; rei
PUSHR; pushl 6*4(sp); PRINTF(1,"write timeout %x\n"); POPR;
PUSHR; incl _intrcnt+I_MBA3; pushl $3; brb 1f
PUSHR; incl _intrcnt+I_MBA2; pushl $2; brb 1f
PUSHR; incl _intrcnt+I_MBA1; pushl $1; brb 1f
PUSHR; incl _intrcnt+I_MBA0; pushl $0
#if defined(VAX780) || defined(VAX8600)
* Registers for the uba handling code
#define I_UBA I_UBA0 /* base of UBA interrupt counters */
PUSHR; movl $7,rUBANUM; moval _uba_hd+(7*UH_SIZE),rUBAHD; brb 1f
PUSHR; movl $6,rUBANUM; moval _uba_hd+(6*UH_SIZE),rUBAHD; brb 1f
PUSHR; movl $5,rUBANUM; moval _uba_hd+(5*UH_SIZE),rUBAHD; brb 1f
PUSHR; movl $4,rUBANUM; moval _uba_hd+(4*UH_SIZE),rUBAHD; brb 1f
PUSHR; movl $3,rUBANUM; moval _uba_hd+(3*UH_SIZE),rUBAHD; brb 1f
PUSHR; movl $2,rUBANUM; moval _uba_hd+(2*UH_SIZE),rUBAHD; brb 1f
PUSHR; movl $1,rUBANUM; moval _uba_hd+(1*UH_SIZE),rUBAHD; brb 1f
PUSHR; movl $0,rUBANUM; moval _uba_hd+(0*UH_SIZE),rUBAHD;
mfpr $IPL,r2 /* r2 = mfpr(IPL); */
movl UH_UBA(rUBAHD),rUBA /* uba = uhp->uh_uba; */
movl UBA_BRRVR-0x14*4(rUBA)[r2],rUVEC
/* uvec = uba->uba_brrvr[r2-0x14] */
addl2 UH_VEC(rUBAHD),rUVEC /* uvec += uh->uh_vec */
jmp 2(r1) /* 2 skips ``pushr $0x3f'' */
PUSHR; calls $0,_ubaerror; POPR /* ubaerror r/w's r0-r5 */
tstl rUVEC; jneq ubanorm /* rUVEC contains result */
incl _intrcnt+I_UBA[rUBANUM]
PUSHR; calls $0,_cnrint; POPR
PUSHR; calls $0,_cnxint; POPR
mtpr $ICCS_RUN|ICCS_IE|ICCS_INT|ICCS_ERR,$ICCS
pushl 4+6*4(sp); pushl 4+6*4(sp);
pushl 4+6*4(sp); pushl 4+6*4(sp);
calls $2,_hardclock # hardclock(pc,psl)
pushl 4+6*4(sp); pushl 4+6*4(sp);
calls $2,_softclock # softclock(pc,psl)
#include "../net/netisr.h"
bbcc $NETISR_IMP,_netisr,1f; calls $0,_impintr; 1:
bbcc $NETISR_IP,_netisr,1f; calls $0,_ipintr; 1:
bbcc $NETISR_NS,_netisr,1f; calls $0,_nsintr; 1:
bbcc $NETISR_RAW,_netisr,1f; calls $0,_rawintr; 1:
#if defined(VAX750) || defined(VAX730) || defined(VAX8600)
casel _cpu,$VAX_750,$VAX_8600
.word 5f-0b # 2 is VAX_750
.word 3f-0b # 3 is VAX_730
.word 6f-0b # 4 is VAX_8600
#if defined(VAX750) && !defined(MRSP)
#if defined(VAX750) || defined(VAX730)
#if defined(VAX750) || defined(VAX730)
PUSHR; calls $0,_tuxintr; POPR
movab _dzpdma(r0),r3 # pdma structure base
movl (r0)+,r1 # device register address
movzbl 1(r1),r2 # get line number
bicb2 $0xf8,r2 # clear garbage bits
addl2 r2,r0 # point at line's pdma structure
cmpl r2,(r0)+ # p_mem < p_end ?
bgequ dzpcall # no, go call dzxint
movb (r2)+,6(r1) # dztbuf = *p_mem++
brb dzploop # check for another line
pushl (r0)+ # push tty address
calls $1,*(r0) # call interrupt rtn
brb dzploop # check for another line
#if NUU > 0 && defined(UUDMA)
* Pseudo DMA routine for tu58 (on DL11)
movl 16(r2),r2 # r2 = uuaddr
movab _uu_softc(r3),r5 # r5 = uuc
cvtwl 2(r2),r1 # c = uuaddr->rdb
bbc $15,r1,1f # if (c & UUDB_ERROR)
movl $13,16(r5) # uuc->tu_state = TUC_RCVERR;
rsb # let uurintr handle it
tstl 4(r5) # if (uuc->tu_rcnt) {
movb r1,*0(r5) # *uuc->tu_rbptr++ = r1
decl 4(r5) # if (--uuc->tu_rcnt)
POPR # registers saved in ubglue.s
cmpl 16(r5),$8 # if (uuc->tu_state != TUS_GETH)
beql 2f # let uurintr handle it
mull2 $14,r0 # sizeof(uudata[ctlr]) = 14
movab _uudata(r0),r4 # data = &uudata[ctlr];
cmpb $1,(r4) # if (data->pk_flag != TUF_DATA)
/* this is for command packets */
beql 1f # r0 = uuc->tu_rbptr
movl 24(r5),r0 # r0 = uuc->tu_addr
movzbl 1(r4),r3 # counter to r3 (data->pk_count)
movzwl (r4),r1 # first word of checksum (=header)
mfpr $IPL,-(sp) # s = spl5();
mtpr $0x15,$IPL # to keep disk interrupts out
clrw (r2) # disable receiver interrupts
3: bbc $7,(r2),3b # while ((uuaddr->rcs & UUCS_READY)==0);
cvtwb 2(r2),(r0)+ # *buffer = uuaddr->rdb & 0xff
sobgtr r3,1f # continue with next byte ...
addw2 2(r2),r1 # unless this was the last (odd count)
1: bbc $7,(r2),1b # while ((uuaddr->rcs & UUCS_READY)==0);
cvtwb 2(r2),(r0)+ # *buffer = uuaddr->rdb & 0xff
addw2 -2(r0),r1 # add to checksum..
adwc $0,r1 # get the carry
sobgtr r3,3b # loop while r3 > 0
* We're ready to get the checksum
1: bbc $7,(r2),1b # while ((uuaddr->rcs & UUCS_READY)==0);
cvtwb 2(r2),12(r4) # get first (lower) byte
cvtwb 2(r2),13(r4) # ..and second
cmpw 12(r4),r1 # is checksum ok?
movl $14,16(r5) # uuc->tu_state = TUS_CHKERR
movl $11,16(r5) # uuc->tu_state = TUS_GET (ok)
movw $0x40,(r2) # enable receiver interrupts
mtpr (sp)+,$IPL # splx(s);
rsb # continue processing in uurintr
#if defined(VAX750) && !defined(MRSP)
* Pseudo DMA routine for VAX-11/750 console tu58
tstl 4(r5) # if (tu.tu_rcnt) {
mfpr $CSRD,r1 # get data from tu58
movb r1,*0(r5) # *tu.tu_rbptr++ = r1
decl 4(r5) # if (--tu.tu_rcnt)
POPR # registers saved in ubglue.s
cmpl 16(r5),$8 # if (tu.tu_state != TUS_GETH)
beql 2f # let turintr handle it
movab _tudata,r4 # r4 = tudata
cmpb $1,(r4) # if (tudata.pk_flag != TUF_DATA)
bneq 3b # let turintr handle it
movl 24(r5),r1 # get buffer pointer to r1
movzbl 1(r4),r3 # counter to r3
movzwl (r4),r0 # first word of checksum (=header)
mtpr $0,$CSRS # disable receiver interrupts
bsbw 5f # wait for next byte
movb r5,(r1)+ # *buffer = rdb
sobgtr r3,1f # continue with next byte ...
mfpr $CSRD,r2 # unless this was the last (odd count)
1: bsbw 5f # wait for next byte
movb r5,(r1)+ # *buffer = rdb
movzwl -2(r1),r2 # get the last word back from memory
addw2 r2,r0 # add to checksum..
adwc $0,r0 # get the carry
sobgtr r3,3b # loop while r3 > 0
* We're ready to get the checksum.
movb r5,12(r4) # get first (lower) byte
movb r5,13(r4) # ..and second
cmpw 12(r4),r0 # is checksum ok?
movl $14,16(r5) # tu.tu_state = TUS_CHKERR
movl $11,16(r5) # tu.tu_state = TUS_GET
mtpr $0x40,$CSRS # enable receiver interrupts
rsb # continue processing in turintr
* Loop until a new byte is ready from
* the tu58, make sure we don't loop forever
movl $5000,r5 # loop max 5000 times
movl $13,16(r5) # return TUS_RCVERR
tstl (sp)+ # and let turintr handle it
* Stray UNIBUS interrupt catch routines
#define PJ PUSHR;jsb _Xustray
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
subl3 $_catcher+8,(sp)+,r10
subl3 $_catcher+8,(sp)+,r0
PRINTF(2, "uba?: stray intr ipl %x vec %o\n")
* Emulation OpCode jump table:
* ONLY GOES FROM 0xf8 (-8) TO 0x3B (59)
#define NOEMULATE .long noemulate
#define EMULATE(a) .long _EM/**/a
/* f8 */ EMULATE(ashp); EMULATE(cvtlp); NOEMULATE; NOEMULATE
/* fc */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 00 */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 04 */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 08 */ EMULATE(cvtps); EMULATE(cvtsp); NOEMULATE; EMULATE(crc)
/* 0c */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 10 */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 14 */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 18 */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 1c */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 20 */ EMULATE(addp4); EMULATE(addp6); EMULATE(subp4); EMULATE(subp6)
/* 24 */ EMULATE(cvtpt); EMULATE(mulp); EMULATE(cvttp); EMULATE(divp)
/* 28 */ NOEMULATE; EMULATE(cmpc3); EMULATE(scanc); EMULATE(spanc)
/* 2c */ NOEMULATE; EMULATE(cmpc5); EMULATE(movtc); EMULATE(movtuc)
/* 30 */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 34 */ EMULATE(movp); EMULATE(cmpp3); EMULATE(cvtpl); EMULATE(cmpp4)
/* 38 */ EMULATE(editpc); EMULATE(matchc); EMULATE(locc); EMULATE(skpc)
* Trap and fault vector routines
#define TRAP(a) pushl $T_/**/a; jbr alltraps
* Ast delivery (profiling and/or reschedule)
pushl $0; TRAP(PRIVINFLT)
* The following is called with the stack set up as follows:
* 32(sp): Operand 7 (unused)
* 36(sp): Operand 8 (unused)
* 48(sp): TOS before instruction
* Each individual routine is called with the stack set up as follows:
* (sp): Return address of trap handler
* 4(sp): Opcode (will get return PSL)
* 36(sp): saved register 11
* 40(sp): saved register 10
* 52(sp): TOS before instruction
movl r11,32(sp) # save register r11 in unused operand
movl r10,36(sp) # save register r10 in unused operand
cvtbl (sp),r10 # get opcode
addl2 $8,r10 # shift negative opcodes
subl3 r10,$EMUTABLE,r11 # forget it if opcode is out of range
movl _emJUMPtable[r10],r10 # call appropriate emulation routine
jsb (r10) # routines put return values into regs 0-5
movl 32(sp),r11 # restore register r11
movl 36(sp),r10 # restore register r10
insv (sp),$0,$4,44(sp) # and condition codes in Opcode spot
addl2 $40,sp # adjust stack for return
addl2 $48,sp # adjust stack for
.word 0xffff # "reserved instruction fault"
.word 0xffff # "reserved instruction fault"
jsb Fastreclaim # try and avoid pagein
mfpr $USP,-(sp); calls $0,_trap; mtpr (sp)+,$USP
addl2 $8,sp # pop type, code
mtpr $HIGH,$IPL ## dont go to a higher IPL (GROT)
mfpr $USP,-(sp); calls $0,_syscall; mtpr (sp)+,$USP
addl2 $8,sp # pop type, code
mtpr $HIGH,$IPL ## dont go to a higher IPL (GROT)
* Mbmap and Usrptmap are enlarged by CLSIZE entries
* as they are managed by resource maps starting with index 1 or CLSIZE.
#define vaddr(x) ((((x)-_Sysmap)/4)*NBPG+0x80000000)
#define SYSMAP(mname, vname, npte) \
_/**/mname: .globl _/**/mname; \
.set _/**/vname,vaddr(_/**/mname)
SYSMAP(Sysmap ,Sysbase ,SYSPTSIZE )
SYSMAP(Forkmap ,forkutl ,UPAGES )
SYSMAP(Xswapmap ,xswaputl ,UPAGES )
SYSMAP(Xswap2map,xswap2utl ,UPAGES )
SYSMAP(Swapmap ,swaputl ,UPAGES )
SYSMAP(Pushmap ,pushutl ,UPAGES )
SYSMAP(Vfmap ,vfutl ,UPAGES )
SYSMAP(CMAP1 ,CADDR1 ,1 )
SYSMAP(CMAP2 ,CADDR2 ,1 )
SYSMAP(alignmap ,alignutl ,1 ) /* XXX */
SYSMAP(msgbufmap,msgbuf ,MSGBUFPTECNT )
SYSMAP(Mbmap ,mbutl ,NMBCLUSTERS*CLSIZE+CLSIZE )
SYSMAP(camap ,cabase ,16*CLSIZE )
SYSMAP(profmap ,profbase ,600*CLSIZE )
SYSMAP(ecamap ,calimit ,0 )
SYSMAP(UMBAbeg ,umbabeg ,0 )
SYSMAP(Nexmap ,nexus ,16*MAXNNEXUS )
SYSMAP(UMEMmap ,umem ,UBAPAGES*NUBA )
SYSMAP(Ioamap ,ioa ,MAXNIOA*IOAMAPSIZ/NBPG )
SYSMAP(UMBAend ,umbaend ,0 )
SYSMAP(Clockmap ,cldevice ,1 )
SYSMAP(Ka630map ,ka630cpu ,1 )
SYSMAP(Usrptmap ,usrpt ,USRPTSIZE+CLSIZE )
.set _Syssize,(eSysmap-_Sysmap)/4
* ipl 0x1f; mapen 0; scbb, pcbb, sbr, slr, isp, ksp not set
/* set system control block base and system page table params */
mtpr $_scb-0x80000000,$SCBB
mtpr $_Sysmap-0x80000000,$SBR
/* double map the kernel into the virtual user addresses of phys mem */
/* set ISP and get cpu type */
movl $_intstack+NISP*NBPG,sp
movl r0,(r0)+ # rp_selfref
movl r1,(r0)+ # rp_dumprout
1: addl2 (r1)+,r3; sobgtr r2,1b
movl r3,(r0)+ # rp_chksum
1: pushl $4; pushl r7; calls $2,_badaddr; tstl r0; bneq 9f
acbl $MAXMEM*1024-1,$64*1024,r7,1b
/* clear memory from kernel bss and pages for proc 0 u. and page table */
addl2 $(UPAGES*NBPG)+NBPG+NBPG,r5
1: clrq (r6); acbl r5,$8,r6,1b
/* trap() and syscall() save r0-r11 in the entry mask (per ../h/reg.h) */
/* panic() is convenient place to save all for debugging */
/* initialize system page table: uba vectors and int stack writeable */
movab eintstack,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
1: bisl3 $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
/* make rpb, scb read-only as red zone for interrupt stack */
/* make kernel text space read-only */
movab _etext+NBPG-1,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
1: bisl3 $PG_V|PG_URKR,r2,_Sysmap[r2]; aoblss r1,r2,1b
/* make kernel data, bss, read-write */
movab _end+NBPG-1,r1; bbcc $31,r1,0f; 0:; ashl $-PGSHIFT,r1,r1
1: bisl3 $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
/* now go to mapped mode */
mtpr $0,$TBIA; mtpr $1,$MAPEN; jmp *$0f; 0:
ashl $-PGSHIFT,r7,_maxmem
/* setup context for proc[0] == Scheduler */
bicl2 $NBPG-1,r6 # make page boundary
/* setup page table for proc[0] */
ashl $-PGSHIFT,r6,r3 # r3 = btoc(r6)
bisl3 $PG_V|PG_KW,r3,_Usrptmap # init first upt entry
moval -4*UPAGES(r0)[r1],r2
/* setup mapping for UPAGES of _u */
movl $UPAGES,r2; movab _u+NBPG*UPAGES,r1; addl2 $UPAGES,r3; jbr 2f
bisl3 $PG_V|PG_URKW,r3,-(r0)
/* initialize (slightly) the pcb */
movab UPAGES*NBPG(r1),PCB_KSP(r1)
movb $4,PCB_P0LR+3(r1) # disable ast
movl $CLSIZE,PCB_SZPT(r1) # init u.u_pcb.pcb_szpt
movab 1f,PCB_PC(r1) # initial pc
clrl PCB_PSL(r1) # mode(k,k), ipl=0
mtpr r3,$PCBB # first pcbb
/* set regs, p0br, p0lr, p1br, p1lr, astlvl, ksp and change to kernel mode */
/* put signal trampoline code in u. area */
movc3 $19,sigcode,PCB_SIGC(r0)
/* save boot device in global _bootdev */
/* save reboot flags in global _boothowto */
/* calculate firstaddr, and call main() */
movab _end+NBPG-1,r0; bbcc $31,r0,0f; 0:; ashl $-PGSHIFT,r0,-(sp)
addl2 $UPAGES+1,(sp); calls $1,_main
/* proc[1] == /etc/init now running here; run icode */
pushl $PSL_CURMOD|PSL_PRVMOD; pushl $0; rei
/* signal trampoline code: it is known that this code takes exactly 19 bytes */
/* in ../vax/pcb.h and in the movc3 above */
calls $4,8(pc) # params pushed by sendsig
movl sp,ap # calls frame built by sendsig
chmk $103 # cleanup mask and onsigstack
halt # sigreturn() does not return!
.word 0x3f # registers 0-5
callg (ap),*16(ap) # call the signal handler
ret # return to code above
* Icode is copied out to process 1 to exec /etc/init.
* If the exec fails, process 1 exits.
argv: .long init+5-_icode
#define ENTRY(name, regs) \
.globl _/**/name; .align 1; _/**/name: .word regs; jsb mcount
#define JSBENTRY(name, regs) \
.globl _/**/name; _/**/name: \
movl fp,-(sp); movab -12(sp),fp; pushr $(regs); jsb mcount; \
popr $(regs); movl (sp)+,fp
#define ENTRY(name, regs) \
.globl _/**/name; .align 1; _/**/name: .word regs
#define JSBENTRY(name, regs) \
.globl _/**/name; _/**/name:
* see if access addr with a len type instruction causes a machine check
* len is length of access (1=byte, 2=short, 4=long)
movab 2f,nofault # jump to 2f on machcheck
1: bbc $1,r4,1f; tstw (r3)
1: bbc $2,r4,1f; tstl (r3)
1: clrl r0 # made it w/o machine checks
* update profiling information for the user
* addupc(pc, &u.u_prof, ticks)
movl 8(ap),r2 # &u.u_prof
subl3 8(r2),4(ap),r0 # corrected pc
extzv $1,$31,r0,r0 # logical right shift
extzv $1,$31,12(r2),r1 # ditto for scale
* Copy a null terminated string from the user address space into
* the kernel address space.
* copyinstr(fromaddr, toaddr, maxlength, &lencopied)
movl 12(ap),r6 # r6 = max length
movl 4(ap),r1 # r1 = user address
bicl3 $~(NBPG*CLSIZE-1),r1,r2 # r2 = bytes on first page
movl 8(ap),r3 # r3 = kernel address
cmpl r6,r2 # r2 = min(bytes on page, length left);
prober $3,r2,(r1) # bytes accessible?
subl2 r2,r6 # update bytes left count
# fake the locc instr. for processors that don't have it
locc $0,r2,(r1) # null byte found?
subl2 r2,r1 # back up pointer updated by `locc'
movc3 r2,(r1),(r3) # copy in next piece
movl $(NBPG*CLSIZE),r2 # check next page
tstl r6 # run out of space?
movl $ENOENT,r0 # set error code and return
tstl 16(ap) # return length?
subl3 r6,12(ap),r6 # actual len = maxlen - unused pages
subl2 r0,r6 # - unused on this page
addl3 $1,r6,*16(ap) # + the null byte
subl2 r0,r2 # r2 = number of bytes to move
subl2 r2,r1 # back up pointer updated by `locc'
incl r2 # copy null byte as well
movc3 r2,(r1),(r3) # copy in last piece
* Copy a null terminated string from the kernel
* address space to the user address space.
* copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
movl 12(ap),r6 # r6 = max length
movl 4(ap),r1 # r1 = kernel address
movl 8(ap),r3 # r3 = user address
bicl3 $~(NBPG*CLSIZE-1),r3,r2 # r2 = bytes on first page
cmpl r6,r2 # r2 = min(bytes on page, length left);
probew $3,r2,(r3) # bytes accessible?
subl2 r2,r6 # update bytes left count
# fake the locc instr. for processors that don't have it
locc $0,r2,(r1) # null byte found?
subl2 r2,r1 # back up pointer updated by `locc'
movc3 r2,(r1),(r3) # copy in next piece
movl $(NBPG*CLSIZE),r2 # check next page
tstl r6 # run out of space?
movl $ENOENT,r0 # set error code and return
* Copy a null terminated string from one point to another in
* the kernel address space.
* copystr(fromaddr, toaddr, maxlength, &lencopied)
movl 12(ap),r6 # r6 = max length
movl 4(ap),r1 # r1 = src address
movl 8(ap),r3 # r3 = dest address
movzwl $65535,r2 # r2 = bytes in first chunk
cmpl r6,r2 # r2 = min(bytes in chunk, length left);
subl2 r2,r6 # update bytes left count
# fake the locc instr. for processors that don't have it
locc $0,r2,(r1) # null byte found?
subl2 r2,r1 # back up pointer updated by `locc'
movc3 r2,(r1),(r3) # copy in next piece
tstl r6 # run out of space?
movl $ENOENT,r0 # set error code and return
* Copy specified amount of data from user space into the kernel
* r1 == from (user source address)
* r3 == to (kernel destination address)
JSBENTRY(Copyin, R1|R3|R5)
cmpl r5,$(NBPG*CLSIZE) # probing one page or less ?
prober $3,r5,(r1) # bytes accessible ?
/* clrl r0 # redundant */
blss ersb # negative length?
bicl3 $~(NBPG*CLSIZE-1),r1,r0 # r0 = bytes on first page
subl3 r0,$(NBPG*CLSIZE),r0
addl2 $(NBPG*CLSIZE),r0 # plus one additional full page
movl $(2*NBPG*CLSIZE),r0 # next amount to move
prober $3,r0,(r1) # bytes accessible ?
/* clrl r0 # redundant */
movl (sp)+,r6 # restore r6
movl (sp)+,r6 # restore r6
* Copy specified amount of data from kernel to the user space
* r1 == from (kernel source address)
* r3 == to (user destination address)
JSBENTRY(Copyout, R1|R3|R5)
cmpl r5,$(NBPG*CLSIZE) # moving one page or less ?
probew $3,r5,(r3) # bytes writeable?
/* clrl r0 # redundant */
blss ersb # negative length?
bicl3 $~(NBPG*CLSIZE-1),r3,r0 # r0 = bytes on first page
subl3 r0,$(NBPG*CLSIZE),r0
addl2 $(NBPG*CLSIZE),r0 # plus one additional full page
movl $(2*NBPG*CLSIZE),r0 # next amount to move
probew $3,r0,(r3) # bytes writeable?
/* clrl r0 # redundant */
movl (sp)+,r6 # restore r6
#ifdef notdef /* this is now expanded completely inline */
movl fp,(r0)+ # current stack frame
movl (sp),(r0) # resuming pc
#define PCLOC 16 /* location of pc in calls frame */
#define APLOC 8 /* location of ap,fp in calls frame */
movl (r0)+,newfp # must save parameters in memory as all
movl (r0),newpc # registers may be clobbered.
cmpl fp,newfp # are we there yet?
moval 1b,PCLOC(fp) # redirect return pc to us!
beql 3f # did we miss our frame?
movl newpc,r0 # all done, just return to the `setjmp'
* setjmp that saves all registers as the call frame may not
* be available to recover them in the usual mannor by longjmp.
* Called before swapping out the u. area, restored by resume()
movq APLOC(fp),(r0)+ # save ap, fp
addl3 $8,ap,(r0)+ # save sp
movl PCLOC(fp),(r0) # save pc
* The following primitives use the fancy VAX instructions
* much like VMS does. _whichqs tells which of the 32 queues _qs
* have processes in them. Setrq puts processes into queues, Remrq
* removes them from queues. The running process is on no queue,
* other processes are on a queue related to p->p_pri, divided by 4
* actually to shrink the 0-127 range of priorities into the 32 available
* Setrq(p), using fancy VAX instructions.
* Call should be made at splclock(), and p->p_stat should be SRUN
tstl P_RLINK(r0) ## firewall: p->p_rlink must be 0
movzbl P_PRI(r0),r1 # put on queue which is p->p_pri / 4
insque (r0),*4(r2) # at end of queue
bbss r1,_whichqs,set2 # mark queue non-empty
* Remrq(p), using fancy VAX instructions
* Call should be made at splclock().
pushab rem3 # it wasn't recorded to be on its q
clrl P_RLINK(r0) ## for firewall checking
* Masterpaddr is the p->p_addr of the running process on the master
* processor. When a multiprocessor system, the slave processors will have
* an array of slavepaddr's.
* When no processes are on the runq, Swtch branches to idle
* to wait for something to come ready.
mtpr $0,$IPL # must allow interrupts here
tstl _whichqs # look for non-empty queue
* Swtch(), using fancy VAX instructions
sw1: ffs $0,$32,_whichqs,r0 # look for non-empty queue
beql idle # if none, idle
mtpr $0x18,$IPL # lock out all so _whichqs==_qs
bbcc r0,_whichqs,sw1 # proc moved via lbolt interrupt
remque *(r1),r2 # r2 = p = highest pri process
bvs badsw # make sure something was there
insv $1,r0,$1,_whichqs # still more procs in this queue
tstl P_WCHAN(r2) ## firewalls
cmpl r0,_masterpaddr # resume of current proc is easy
ashl $PGSHIFT,r0,r0 # r0 = pcbb(p)
mtpr $HIGH,$IPL # no interrupts, please
movl _CMAP2,_u+PCB_CMAP2 # yech
movl _u+PCB_CMAP2,_CMAP2 # yech
movl _u+PCB_SSWAP,r0 # longjmp to saved context
cmpl r1,sp # must be a pop
movl (r0),(sp) # address to return to
movl $PSL_PRVMOD,4(sp) # ``cheating'' (jfr)
* {fu,su},{byte,word}, all massaged by asm.sed to jsb's
* Copy 1 relocation unit (NBPG bytes)
* from user virtual address to physical address
bisl3 $PG_V|PG_KW,8(ap),_CMAP2
mtpr $_CADDR2,$TBIS # invalidate entry for copy
movc3 $NBPG,*4(ap),_CADDR2
* zero out physical memory
* specified in relocation units (NBPG bytes)
bisl3 $PG_V|PG_KW,4(ap),_CMAP1
movc5 $0,(sp),$0,$NBPG,_CADDR1
* Given virtual address, byte count, and rw flag
* returns 0 on no access.
tstl 12(ap) # test for read access ?
cmpl $NBPG,r1 # can we do it in one probe ?
acbl $NBPG+1,$-NBPG,r1,uaw1
acbl $NBPG+1,$-NBPG,r1,uar1
* kernacc - check for kernel access privileges
* We can't use the probe instruction directly because
* it ors together current and previous mode.
movl 4(ap),r0 # virtual address
mfpr $SBR,r2 # address and length of page table (system)
mfpr $P1BR,r2 # user P1 (stack)
addl3 8(ap),r0,r1 # ending virtual address
blss kacerr # address too low
cmpl r1,r3 # compare last page to P0LR or SLR
bgtr kacerr # address too high
bbc $31,r3,kacerr # valid bit is off
cmpzv $27,$4,r3,$1 # check protection code
bleq kacerr # no access allowed
bneq kacc5 # only check read access
cmpzv $27,$2,r3,$3 # check low 2 bits of prot code
beql kacerr # no write access
aoblss r1,r0,kacc4 # next page
* Extracted and unrolled most common case of pagein (hopefully):
* resident and not on free list (reclaim of page is purely
* for the purpose of simulating a reference bit)
* CLSIZE of 2, any bit fields in pte's
extzv $9,$23,28(sp),r3 # virtual address
bicl2 $1,r3 # v = clbase(btop(virtaddr));
movl _u+U_PROCP,r5 # p = u.u_procp
movl $1,r2 # type = CTEXT;
jlssu 1f # if (isatsv(p, v)) {
addl3 P_TSIZE(r5),P_DSIZE(r5),r0
addl2 P_P0BR(r5),r4 # tptopte(p, vtotp(p, v));
cvtwl P_SZPT(r5),r4 # } else (isassv(p, v)) {
addl2 P_P0BR(r5),r4 # sptopte(p, vtosp(p, v));
beql 2f # if (pte->pg_v || pte->pg_fod)
POPR; rsb # let pagein handle it
bicl3 $0xffe00000,(r4),r0
jneq 2f # if (pte->pg_pfnum == 0)
POPR; rsb # let pagein handle it
incl r0 # pgtocm(pte->pg_pfnum)
addl2 _cmap,r0 # &cmap[pgtocm(pte->pg_pfnum)]
jeql 2f # if (type == CTEXT &&
jbc $C_INTRANS,(r0),2f # c_intrans)
POPR; rsb # let pagein handle it
jbc $C_FREE,(r0),2f # if (c_free)
POPR; rsb # let pagein handle it
bisb2 $0x80,3(r4) # pte->pg_v = 1;
jbc $26,4(r4),2f # if (anycl(pte, pg_m)
bisb2 $0x04,3(r4) # pte->pg_m = 1;
bisw3 r0,r1,6(r4) # distcl(pte);
mtpr r0,$TBIS # tbiscl(v);
jeql 2f # if (type == CTEXT)
movl X_CADDR(r0),r5 # for (p = p->p_textp->x_caddr; p; ) {
addl3 P_P0BR(r5),r3,r0 # tpte = tptopte(p, tp);
bisb2 $1,P_FLAG+3(r5) # p->p_flag |= SPTECHG;
movl (r4),(r0)+ # for (i = 0; i < CLSIZE; i++)
movl 4(r4),(r0) # tpte[i] = pte[i];
movl P_XLINK(r5),r5 # p = p->p_xlink;
2: # collect a few statistics...
incl _u+U_RU+RU_MINFLT # u.u_ru.ru_minflt++;
incl V_FAULTS(r0) # cnt.v_faults++;
incl V_PGREC(r0) # cnt.v_pgrec++;
incl V_FASTPGREC(r0) # cnt.v_fastpgrec++;
incl V_TRAP(r0) # cnt.v_trap++;
addl2 $8,sp # pop pc, code
mtpr $HIGH,$IPL ## dont go to a higher IPL (GROT)