* Copyright (c) 1980, 1986 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
* @(#)locore.s 1.1 (Berkeley) %G%
#include "../i386/trap.h"
.set SYSTEM,0xFE000000 # virtual address of system start
/*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */
.set SYSPDROFF,0x3F8 # Page dir
* User structure is UPAGES at top of user space.
* I/O Memory Map is 0xfffa000-0xffffffff (virtual == real)
.set _IOmembase,0xFFFA0000
* Mbmap and Usrptmap are enlarged by CLSIZE entries
* as they are managed by resource maps starting with index 1 or CLSIZE.
#define SYSMAP(mname, vname, npte) \
_##mname: .globl _##mname; \
.set _##vname,ptes*4096+SYSTEM; \
#define ZSYSMAP(mname, vname, npte) \
_##mname: .globl _##mname; \
.set _##vname,ptes*4096+SYSTEM; \
# assumed to start at data mod 4096
SYSMAP(Sysmap,Sysbase,SYSPTSIZE)
SYSMAP(Forkmap,forkutl,UPAGES)
SYSMAP(Xswapmap,xswaputl,UPAGES)
SYSMAP(Xswap2map,xswap2utl,UPAGES)
SYSMAP(Swapmap,swaputl,UPAGES)
SYSMAP(Pushmap,pushutl,UPAGES)
SYSMAP(Vfmap,vfutl,UPAGES)
SYSMAP(alignmap,alignutl,1) /* XXX */
SYSMAP(msgbufmap,msgbuf,MSGBUFPTECNT)
.set mbxxx,(NMBCLUSTERS*MCLBYTES)
.set mbyyy,(mbxxx>>PGSHIFT)
.set mbpgs,(mbyyy+CLSIZE)
SYSMAP(Mbmap,mbutl,mbpgs)
* XXX: NEED way to compute kmem size from maxusers,
SYSMAP(kmempt,kmembase,300*CLSIZE)
SYSMAP(profmap,profbase,600*CLSIZE)
.set atmemsz,0x100000-0xa0000
.set atpgs,(atmemsz>>PGSHIFT)
SYSMAP(ATDevmem,atdevbase,atpgs)
ZSYSMAP(ekmempt,kmemlimit,0)
SYSMAP(Usrptmap,usrpt,USRPTSIZE+CLSIZE)
# .set _Syssize,(eSysmap-_Sysmap)/4
/* align on next page boundary */
# . = . + NBPG - 1 & -NBPG /* align to page boundry-does not work*/
# .space (PGSIZE - ((eSysmap-_Sysmap) % PGSIZE)) % PGSIZE
_cpu: .long 0 # are we 386, 386sx, or 486
# XXX pass parameters on stack
xorl %eax,%eax # start with base memory at 0x0
movl $(0xA0000/NBPG),%ecx # look every 4K up to 640K
1: movl 0(%eax),%ebx # save location to check
movl $0xa55a5aa5,0(%eax) # write test pattern
cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover
movl %ebx,0(%eax) # restore memory
2: movl %eax,_basemem-SYSTEM
movl $0x100000,%eax # next, talley remaining memory
movl $((0xFA0000-0x100000)/NBPG),%ecx
1: movl 0(%eax),%ebx # save location to check
movl $0xa55a5aa5,0(%eax) # write test pattern
cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover
movl %ebx,0(%eax) # restore memory
2: movl %eax,_abovemem-SYSTEM
/* clear memory. is this redundant ? */
addl $(UPAGES*NBPG)+NBPG+NBPG+NBPG,%ecx
# txt+data+proc zero pt+u.
# shrl $2,%ecx # convert to long word count
* N.B. don't bother with making kernel text RO, as 386
* ignores R/W bit on kernel access!
# movl $_Syssize,%ecx # for this many pte s,
movl %esi,%ecx # this much memory,
shrl $PGSHIFT,%ecx # for this many pte s
movl $PG_V,%eax # having these bits set,
movl $_Sysmap-SYSTEM,%ebx # in the kernel page table,
# fill in kernel page table.
addl $NBPG,%eax # increment physical address
/* temporary double map virt == real */
movl $1024,%ecx # for this many pte s,
movl $PG_V,%eax # having these bits set,
movl $_tMap-SYSTEM,%ebx # in the temporary page table,
# fill in kernel page table.
addl $NBPG,%eax # increment physical address
/* map I/O memory virt == real */
movl $(1024-IOPTEOFF),%ecx # for this many pte s,
movl $(_IOmembase|PG_V),%eax # having these bits set, (perhaps URW?)
movl $_IOMap-SYSTEM,%ebx # in the temporary page table,
# fill in kernel page table.
addl $NBPG,%eax # increment physical address
movl $atpgs,%ecx # for this many pte s,
movl $(IOPHYSmem|PG_V),%eax # having these bits set, (perhaps URW?)
movl $_ATDevmem-SYSTEM,%ebx # in the temporary page table,
# fill in kernel page table.
addl $NBPG,%eax # increment physical address
/*# map proc 0's page table*/
movl $_Usrptmap-SYSTEM,%ebx # get pt map address
lea (0*NBPG)(%esi),%eax # physical address of pt in proc 0
orl $PG_V,%eax # having these bits set,
movl $UPAGES,%ecx # for this many pte s,
lea (1*NBPG)(%esi),%eax # physical address of _u in proc 0
orl $PG_V|PG_URKW,%eax # having these bits set,
lea (0*NBPG)(%esi),%ebx # physical address of stack pt in proc 0
# fill in proc 0 stack page table.
addl $NBPG,%eax # increment physical address
* Construct a page table directory
* (of page directory elements - pde's)
movl $_Sysmap-SYSTEM,%eax # physical address of kernel page table
orl $PG_V,%eax # pde entry is valid
movl $Npdes,%ecx # for this many pde s,
movl $_PDR-SYSTEM,%ebx # address of start of ptd
# lea (2*NBPG)(%esi),%ebx # address of ptd in proc 0 pt
addl $(SYSPDROFF*4), %ebx # offset of pde for kernel
addl $NBPG,%eax # increment physical address
# install a pde for temporary double map
movl $_tMap-SYSTEM,%eax # physical address of temp page table
orl $PG_V,%eax # pde entry is valid
movl $_PDR-SYSTEM,%ebx # address of start of ptd
# lea (2*NBPG)(%esi),%ebx # address of ptd in proc 0 pt
movl %eax,0(%ebx) # which is where temp maps!
# install a pde for IO memory
movl $_IOMap-SYSTEM,%eax # physical address of temp page table
orl $PG_V,%eax # pde entry is valid
movl $_PDR-SYSTEM,%ebx # address of start of ptd
# lea (2*NBPG)(%esi),%ebx # address of ptd in proc 0 pt
addl $(IOPDROFF*4), %ebx # offset of pde for kernel
movl %eax,0(%ebx) # which is where temp maps!
# install a pde to map _u for proc 0
lea (0*NBPG)(%esi),%eax # physical address of pt in proc 0
orl $PG_V,%eax # pde entry is valid
movl $_PDR-SYSTEM,%ebx # address of start of ptd
# lea (2*NBPG)(%esi),%ebx # address of ptd in proc 0 pt
addl $(UPDROFF*4), %ebx # offset of pde for kernel
movl %eax,0(%ebx) # which is where _u maps!
# movl %eax,_PDR-SYSTEM+(1024-16-1)*4
# shrl $PGSHIFT+IDXSHF,%eax
movl $_PDR-SYSTEM,%eax # address of start of ptd
# lea (2*NBPG)(%esi),%eax # address o ptd in proc 0 pt
movl %eax,%cr3 # load ptd addr into mmu
movl $0x80000001,%eax # and let s page!
pushl $begin # jump to high mem!
movl $_u+UPAGES*NBPG-4,%eax
movw %ax,0x472 # warm boot
movl $0,%esp # segment violation
# build a pte pointing to physical p; leave it at loc b+o \
# build a pde at virtual addr v, pointing to physical pte p \
shrl $PGSHIFT+IDXSHFT,%edx \
movl $Syssize,%ecx # this many pte s
xorl %ebx,%ebx # starting at physical 0
xorl %edx,%edx # starting at virtual XX000000
/* Initialize Proc 0 page table map */
/* Initialize Udot map */
movl $UPAGES,%ecx # this many pte s
xorl %ebx,%ebx # starting at physical 0
movl $_u,%edx # starting at virtual _u
/* Initialize page table directory */
PTD(P(_ptd), 0, P(SysMap)) # bottom double mapped to system
PTD(P(_ptd), SYSTEM, P(SysMap)) # system location
PTD(P(_ptd), _u, P(_end)) # udot&ptd
/* clear memory from kernel bss and pages for proc 0 u. and page table */
bisl3 $SYSTEM,r5,r9 # convert to virtual address
addl2 $NBPG-1,r9 # roundup to next page
addl2 $(UPAGES*NBPG)+NBPG+NBPG,r5
1: clrq (r6); acbl r5,$8,r6,1b
/* initialize system page table: uba vectors and int stack writeable */
movab eintstack,r1; bbcc $31,r1,1f;
1: bisl3 $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
/* make kernel text space read-only */
movab _etext+NBPG-1,r1; bbcc $31,r1,1f;
1: bisl3 $PG_V|PG_URKR,r2,_Sysmap[r2]; aoblss r1,r2,1b
/* make kernel data, bss, read-write */
bicl3 $SYSTEM,r9,r1; ashl $-PGSHIFT,r1,r1
1: bisl3 $PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
/* now go to mapped mode */
mtpr $0,$TBIA; mtpr $1,$MAPEN; jmp *$0f; 0:
ashl $-PGSHIFT,r7,_maxmem
/* setup context for proc[0] == Scheduler */
bicl3 $SYSTEM|(NBPG-1),r9,r6 # make phys, page boundary
/* setup page table for proc[0] */
ashl $-PGSHIFT,r6,r3 # r3 = btoc(r6)
bisl3 $PG_V|PG_KW,r3,_Usrptmap # init first upt entry
/* double map the kernel into the virtual user addresses of phys mem */
moval -4*UPAGES(r0)[r1],r2
/* setup mapping for UPAGES of _u */
movl $UPAGES,r2; movab _u+NBPG*UPAGES,r1; addl2 $UPAGES,r3; jbr 2f
bisl3 $PG_V|PG_URKW,r3,-(r0)
/* initialize (slightly) the pcb */
movab UPAGES*NBPG(r1),PCB_KSP(r1)
movb $4,PCB_P0LR+3(r1) # disable ast
movl $CLSIZE,PCB_SZPT(r1) # init u.u_pcb.pcb_szpt
movab 1f,PCB_PC(r1) # initial pc
clrl PCB_PSL(r1) # mode(k,k), ipl=0
mtpr r3,$PCBB # first pcbb
/* set regs, p0br, p0lr, p1br, p1lr, astlvl, ksp and change to kernel mode */
/* put signal trampoline code in u. area */
movc3 $19,sigcode,PCB_SIGC(r0)
/* save boot device in global _bootdev */
/* save reboot flags in global _boothowto */
/* save end of symbol & string table in global _bootesym */
subl3 $NBPG-1,r9,_bootesym
/* calculate firstaddr, and call main() */
bicl3 $SYSTEM,r9,r0; ashl $-PGSHIFT,r0,-(sp)
addl2 $UPAGES+1,(sp); calls $1,_main
/* proc[1] == /etc/init now running here; run icode */
pushl $PSL_CURMOD|PSL_PRVMOD; pushl $0; rei
/* signal trampoline code: it is known that this code takes exactly 19 bytes */
/* in ../vax/pcb.h and in the movc3 above */
calls $4,8(pc) # params pushed by sendsig
movl sp,ap # calls frame built by sendsig
chmk $103 # cleanup mask and onsigstack
halt # sigreturn() does not return!
.word 0x3f # registers 0-5
callg (ap),*16(ap) # call the signal handler
ret # return to code above
* Icode is copied out to process 1 to exec /etc/init.
* If the exec fails, process 1 exits.
init: .asciz "/sbin/init"
argv: .long init+6-_icode
#define ENTRY(name, regs) \
.globl _##name; .align 1; _##name: .word regs; jsb mcount
#define ENTRY(name, regs) \
.globl _##name; .align 1; _##name: .word regs
* see if access addr with a len type instruction causes a machine check
* len is length of access (1=byte, 2=short, 4=long)
movab 2f,nofault # jump to 2f on machcheck
1: bbc $1,r4,1f; tstw (r3)
1: bbc $2,r4,1f; tstl (r3)
1: clrl r0 # made it w/o machine checks
* update profiling information for the user
* addupc(pc, &u.u_prof, ticks)
movl 8(ap),r2 # &u.u_prof
subl3 8(r2),4(ap),r0 # corrected pc
extzv $1,$31,r0,r0 # logical right shift
extzv $1,$31,12(r2),r1 # ditto for scale
* Copy a null terminated string from the user address space into
* the kernel address space.
* copyinstr(fromaddr, toaddr, maxlength, &lencopied)
movl 12(ap),r6 # r6 = max length
movl 4(ap),r1 # r1 = user address
bicl3 $~(NBPG*CLSIZE-1),r1,r2 # r2 = bytes on first page
movl 8(ap),r3 # r3 = kernel address
cmpl r6,r2 # r2 = min(bytes on page, length left);
prober $3,r2,(r1) # bytes accessible?
subl2 r2,r6 # update bytes left count
# fake the locc instr. for processors that don t have it
locc $0,r2,(r1) # null byte found?
subl2 r2,r1 # back up pointer updated by `locc
movc3 r2,(r1),(r3) # copy in next piece
movl $(NBPG*CLSIZE),r2 # check next page
tstl r6 # run out of space?
movl $ENOENT,r0 # set error code and return
tstl 16(ap) # return length?
subl3 r6,12(ap),r6 # actual len = maxlen - unused pages
subl2 r0,r6 # - unused on this page
addl3 $1,r6,*16(ap) # + the null byte
subl2 r0,r2 # r2 = number of bytes to move
subl2 r2,r1 # back up pointer updated by `locc
incl r2 # copy null byte as well
movc3 r2,(r1),(r3) # copy in last piece
* Copy a null terminated string from the kernel
* address space to the user address space.
* copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
movl 12(ap),r6 # r6 = max length
movl 4(ap),r1 # r1 = kernel address
movl 8(ap),r3 # r3 = user address
bicl3 $~(NBPG*CLSIZE-1),r3,r2 # r2 = bytes on first page
cmpl r6,r2 # r2 = min(bytes on page, length left);
probew $3,r2,(r3) # bytes accessible?
subl2 r2,r6 # update bytes left count
# fake the locc instr. for processors that don t have it
locc $0,r2,(r1) # null byte found?
subl2 r2,r1 # back up pointer updated by `locc
movc3 r2,(r1),(r3) # copy in next piece
movl $(NBPG*CLSIZE),r2 # check next page
tstl r6 # run out of space?
movl $ENOENT,r0 # set error code and return
* Copy a null terminated string from one point to another in
* the kernel address space.
* copystr(fromaddr, toaddr, maxlength, &lencopied)
movl 12(ap),r6 # r6 = max length
movl 4(ap),r1 # r1 = src address
movl 8(ap),r3 # r3 = dest address
movzwl $65535,r2 # r2 = bytes in first chunk
cmpl r6,r2 # r2 = min(bytes in chunk, length left);
subl2 r2,r6 # update bytes left count
# fake the locc instr. for processors that don t have it
locc $0,r2,(r1) # null byte found?
subl2 r2,r1 # back up pointer updated by `locc
movc3 r2,(r1),(r3) # copy in next piece
tstl r6 # run out of space?
movl $ENOENT,r0 # set error code and return
* Copy specified amount of data from user space into the kernel
* r1 == from (user source address)
* r3 == to (kernel destination address)
JSBENTRY(Copyin, R1|R3|R5)
cmpl r5,$(NBPG*CLSIZE) # probing one page or less ?
prober $3,r5,(r1) # bytes accessible ?
/* clrl r0 # redundant */
blss ersb # negative length?
bicl3 $~(NBPG*CLSIZE-1),r1,r0 # r0 = bytes on first page
subl3 r0,$(NBPG*CLSIZE),r0
addl2 $(NBPG*CLSIZE),r0 # plus one additional full page
movl $(2*NBPG*CLSIZE),r0 # next amount to move
prober $3,r0,(r1) # bytes accessible ?
/* clrl r0 # redundant */
movl (sp)+,r6 # restore r6
movl (sp)+,r6 # restore r6
* Copy specified amount of data from kernel to the user space
* r1 == from (kernel source address)
* r3 == to (user destination address)
JSBENTRY(Copyout, R1|R3|R5)
cmpl r5,$(NBPG*CLSIZE) # moving one page or less ?
probew $3,r5,(r3) # bytes writeable?
/* clrl r0 # redundant */
blss ersb # negative length?
bicl3 $~(NBPG*CLSIZE-1),r3,r0 # r0 = bytes on first page
subl3 r0,$(NBPG*CLSIZE),r0
addl2 $(NBPG*CLSIZE),r0 # plus one additional full page
movl $(2*NBPG*CLSIZE),r0 # next amount to move
probew $3,r0,(r3) # bytes writeable?
/* clrl r0 # redundant */
movl (sp)+,r6 # restore r6
#ifdef notdef /* this is now expanded completely inline */
movl fp,(r0)+ # current stack frame
movl (sp),(r0) # resuming pc
#define PCLOC 16 /* location of pc in calls frame */
#define APLOC 8 /* location of ap,fp in calls frame */
movl (r0)+,newfp # must save parameters in memory as all
movl (r0),newpc # registers may be clobbered.
cmpl fp,newfp # are we there yet?
moval 1b,PCLOC(fp) # redirect return pc to us!
beql 3f # did we miss our frame?
movl newpc,r0 # all done, just return to the `setjmp
* setjmp that saves all registers as the call frame may not
* be available to recover them in the usual mannor by longjmp.
* Called before swapping out the u. area, restored by resume()
movq APLOC(fp),(r0)+ # save ap, fp
addl3 $8,ap,(r0)+ # save sp
movl PCLOC(fp),(r0) # save pc
* C library -- reset, setexit
* will generate a "return" from
* by restoring r6 - r12, ap, fp
* The returned value is x; on the original
* call the returned value is 0.
movq 8(fp),(r0)+ # ap, fp
movl 4(ap),r0 # returned value
* The following primitives use the fancy VAX instructions
* much like VMS does. _whichqs tells which of the 32 queues _qs
* have processes in them. Setrq puts processes into queues, Remrq
* removes them from queues. The running process is on no queue,
* other processes are on a queue related to p->p_pri, divided by 4
* actually to shrink the 0-127 range of priorities into the 32 available
* Setrq(p), using fancy VAX instructions.
* Call should be made at splclock(), and p->p_stat should be SRUN
tstl P_RLINK(r0) ## firewall: p->p_rlink must be 0
movzbl P_PRI(r0),r1 # put on queue which is p->p_pri / 4
insque (r0),*4(r2) # at end of queue
bbss r1,_whichqs,set2 # mark queue non-empty
* Remrq(p), using fancy VAX instructions
* Call should be made at splclock().
pushab rem3 # it wasn t recorded to be on its q
clrl P_RLINK(r0) ## for firewall checking
* Masterpaddr is the p->p_addr of the running process on the master
* processor. When a multiprocessor system, the slave processors will have
* an array of slavepaddr s.
* When no processes are on the runq, Swtch branches to idle
* to wait for something to come ready.
mtpr $0,$IPL # must allow interrupts here
tstl _whichqs # look for non-empty queue
* Swtch(), using fancy VAX instructions
sw1: ffs $0,$32,_whichqs,r0 # look for non-empty queue
beql idle # if none, idle
mtpr $0x18,$IPL # lock out all so _whichqs==_qs
bbcc r0,_whichqs,sw1 # proc moved via interrupt
remque *(r1),r2 # r2 = p = highest pri process
bvs badsw # make sure something was there
insv $1,r0,$1,_whichqs # still more procs in this queue
tstl P_WCHAN(r2) ## firewalls
cmpl r0,_masterpaddr # resume of current proc is easy
ashl $PGSHIFT,r0,r0 # r0 = pcbb(p)
mtpr $HIGH,$IPL # no interrupts, please
movl _CMAP2,_u+PCB_CMAP2 # yech
movl _u+PCB_CMAP2,_CMAP2 # yech
movl _u+PCB_SSWAP,r0 # longjmp to saved context
movq (r0)+,r6 # restore r6, r7
movq (r0)+,r8 # restore r8, r9
movq (r0)+,r10 # restore r10, r11
movq (r0)+,r12 # restore ap, fp
cmpl r1,sp # must be a pop
pushl $PSL_PRVMOD # return psl
pushl (r0) # address to return to
* {fu,su},{byte,word}, all massaged by asm.sed to jsb s
* Copy 1 relocation unit (NBPG bytes)
* from user virtual address to physical address
bisl3 $PG_V|PG_KW,8(ap),_CMAP2
mtpr $_CADDR2,$TBIS # invalidate entry for copy
movc3 $NBPG,*4(ap),_CADDR2
* zero out physical memory
* specified in relocation units (NBPG bytes)
bisl3 $PG_V|PG_KW,4(ap),_CMAP1
movc5 $0,(sp),$0,$NBPG,_CADDR1
* Given virtual address, byte count, and rw flag
* returns 0 on no access.
tstl 12(ap) # test for read access ?
cmpl $NBPG,r1 # can we do it in one probe ?
acbl $NBPG+1,$-NBPG,r1,uaw1
acbl $NBPG+1,$-NBPG,r1,uar1
* kernacc - check for kernel access privileges
* We can t use the probe instruction directly because
* it ors together current and previous mode.
movl 4(ap),r0 # virtual address
mfpr $SBR,r2 # address and length of page table (system)
mfpr $P1BR,r2 # user P1 (stack)
addl3 8(ap),r0,r1 # ending virtual address
blss kacerr # address too low
cmpl r1,r3 # compare last page to P0LR or SLR
bgtr kacerr # address too high
bbc $31,r3,kacerr # valid bit is off
cmpzv $27,$4,r3,$1 # check protection code
bleq kacerr # no access allowed
bneq kacc5 # only check read access
cmpzv $27,$2,r3,$3 # check low 2 bits of prot code
beql kacerr # no write access
aoblss r1,r0,kacc4 # next page
* Extracted and unrolled most common case of pagein (hopefully):
* resident and not on free list (reclaim of page is purely
* for the purpose of simulating a reference bit)
* CLSIZE of 2, any bit fields in pte s
extzv $9,$23,28(sp),r3 # virtual address
bicl2 $1,r3 # v = clbase(btop(virtaddr));
movl _u+U_PROCP,r5 # p = u.u_procp
movl $1,r2 # type = CTEXT;
jlssu 1f # if (isatsv(p, v)) {
addl3 P_TSIZE(r5),P_DSIZE(r5),r0
addl2 P_P0BR(r5),r4 # tptopte(p, vtotp(p, v));
cvtwl P_SZPT(r5),r4 # } else (isassv(p, v)) {
addl2 P_P0BR(r5),r4 # sptopte(p, vtosp(p, v));
beql 2f # if (pte->pg_v || pte->pg_fod)
POPR; rsb # let pagein handle it
bicl3 $0xffe00000,(r4),r0
jneq 2f # if (pte->pg_pfnum == 0)
POPR; rsb # let pagein handle it
incl r0 # pgtocm(pte->pg_pfnum)
addl2 _cmap,r0 # &cmap[pgtocm(pte->pg_pfnum)]
jeql 2f # if (type == CTEXT &&
jbc $C_INTRANS,(r0),2f # c_intrans)
POPR; rsb # let pagein handle it
jbc $C_FREE,(r0),2f # if (c_free)
POPR; rsb # let pagein handle it
bisb2 $0x80,3(r4) # pte->pg_v = 1;
jbc $26,4(r4),2f # if (anycl(pte, pg_m)
bisb2 $0x04,3(r4) # pte->pg_m = 1;
bisw3 r0,r1,6(r4) # distcl(pte);
mtpr r0,$TBIS # tbiscl(v);
jeql 2f # if (type == CTEXT)
movl X_CADDR(r0),r5 # for (p = p->p_textp->x_caddr; p; ) {
addl3 P_P0BR(r5),r3,r0 # tpte = tptopte(p, tp);
bisb2 $1,P_FLAG+3(r5) # p->p_flag |= SPTECHG;
movl (r4),(r0)+ # for (i = 0; i < CLSIZE; i++)
movl 4(r4),(r0) # tpte[i] = pte[i];
movl P_XLINK(r5),r5 # p = p->p_xlink;
2: # collect a few statistics...
incl _u+U_RU+RU_MINFLT # u.u_ru.ru_minflt++;
incl V_FAULTS(r0) # cnt.v_faults++;
incl V_PGREC(r0) # cnt.v_pgrec++;
incl V_FASTPGREC(r0) # cnt.v_fastpgrec++;
incl V_TRAP(r0) # cnt.v_trap++;
addl2 $8,sp # pop pc, code
mtpr $HIGH,$IPL ## dont go to a higher IPL (GROT)