LUNA-II (68040 based LUNA) support
authorAkito Fujita <akito@ucbvax.Berkeley.EDU>
Wed, 12 May 1993 19:13:41 +0000 (11:13 -0800)
committerAkito Fujita <akito@ucbvax.Berkeley.EDU>
Wed, 12 May 1993 19:13:41 +0000 (11:13 -0800)
SCCS-vsn: sys/luna68k/dev/if_le.c 7.6
SCCS-vsn: sys/luna68k/include/cpu.h 7.4
SCCS-vsn: sys/luna68k/include/pmap.h 7.4
SCCS-vsn: sys/luna68k/luna68k/clock.c 7.8
SCCS-vsn: sys/luna68k/luna68k/clockreg.h 7.4
SCCS-vsn: sys/luna68k/luna68k/locore.s 7.11
SCCS-vsn: sys/luna68k/luna68k/machdep.c 7.10
SCCS-vsn: sys/luna68k/luna68k/pmap.c 7.5
SCCS-vsn: sys/luna68k/luna68k/pmap_bootstrap.c 7.3
SCCS-vsn: sys/luna68k/luna68k/sys_machdep.c 7.4
SCCS-vsn: sys/luna68k/luna68k/trap.c 7.5
SCCS-vsn: sys/luna68k/luna68k/vectors.s 7.3
SCCS-vsn: sys/luna68k/luna68k/vm_machdep.c 7.5

13 files changed:
usr/src/sys/luna68k/dev/if_le.c
usr/src/sys/luna68k/include/cpu.h
usr/src/sys/luna68k/include/pmap.h
usr/src/sys/luna68k/luna68k/clock.c
usr/src/sys/luna68k/luna68k/clockreg.h
usr/src/sys/luna68k/luna68k/locore.s
usr/src/sys/luna68k/luna68k/machdep.c
usr/src/sys/luna68k/luna68k/pmap.c
usr/src/sys/luna68k/luna68k/pmap_bootstrap.c
usr/src/sys/luna68k/luna68k/sys_machdep.c
usr/src/sys/luna68k/luna68k/trap.c
usr/src/sys/luna68k/luna68k/vectors.s
usr/src/sys/luna68k/luna68k/vm_machdep.c

index 50840cd..f1b41d2 100644 (file)
@@ -6,7 +6,7 @@
  *
  * from: hp300/dev/if_le.c     7.16 (Berkeley) 3/11/93
  *
  *
  * from: hp300/dev/if_le.c     7.16 (Berkeley) 3/11/93
  *
- *     @(#)if_le.c     7.5 (Berkeley) %G%
+ *     @(#)if_le.c     7.6 (Berkeley) %G%
  */
 
 #include "le.h"
  */
 
 #include "le.h"
@@ -142,7 +142,23 @@ leattach(hd)
 #ifdef NOROM
        cp = "00000a02456c";
 #else
 #ifdef NOROM
        cp = "00000a02456c";
 #else
-       cp = (char *) 0x4101FFE0;
+#if defined(LUNA2)
+       if (machineid == LUNA_II) {
+               static char rom_data[128];
+               volatile u_int *from = (u_int *)0xf1000004;
+               for (i = 0; i < 128; i++) {
+                       *from = (i * 2) << 16;
+                       rom_data[i] |= (*from >> 12) & 0xf0;
+                       *from = (i * 2 + 1) << 16;
+                       rom_data[i] |= (*from >> 16) & 0xf;
+               }
+               cp =&rom_data[6]; /* ETHER0 must be here */
+               /* one port only now  XXX */
+       } else
+#endif
+       {
+               cp = (char *) 0x4101FFE0;
+       }
 #endif
        for (i = 0; i < sizeof(le->sc_addr); i++) {
                le->sc_addr[i]  = (*cp < 'A' ? (*cp & 0xF) : (*cp & 0xF) + 9) << 4;
 #endif
        for (i = 0; i < sizeof(le->sc_addr); i++) {
                le->sc_addr[i]  = (*cp < 'A' ? (*cp & 0xF) : (*cp & 0xF) + 9) << 4;
index f7ec850..2033018 100644 (file)
@@ -13,7 +13,7 @@
  * from: Utah $Hdr: cpu.h 1.16 91/03/25$
  * from: hp300/include/cpu.h   7.13 (Berkeley) 12/27/92
  *
  * from: Utah $Hdr: cpu.h 1.16 91/03/25$
  * from: hp300/include/cpu.h   7.13 (Berkeley) 12/27/92
  *
- *     @(#)cpu.h       7.3 (Berkeley) %G%
+ *     @(#)cpu.h       7.4 (Berkeley) %G%
  */
 
 /*
  */
 
 /*
@@ -93,6 +93,14 @@ extern unsigned char ssir;
 #define setsoftnet()   ssir |= SIR_NET
 #define setsoftclock() ssir |= SIR_CLOCK
 
 #define setsoftnet()   ssir |= SIR_NET
 #define setsoftclock() ssir |= SIR_CLOCK
 
+#ifdef KERNEL
+extern int mmutype, machineid;
+#endif
+
+/* values for machineid */
+#define        LUNA_I          1       /* 20Mhz 68030 */
+#define        LUNA_II         2       /* 25Mhz 68040 */
+
 /* values for mmutype (assigned for quick testing) */
 #define        MMU_68040       -2      /* 68040 on-chip MMU */
 #define        MMU_68030       -1      /* 68030 on-chip subset of 68851 */
 /* values for mmutype (assigned for quick testing) */
 #define        MMU_68040       -2      /* 68040 on-chip MMU */
 #define        MMU_68030       -1      /* 68030 on-chip subset of 68851 */
index 91be3c7..5c8aafc 100644 (file)
  *
  * from: hp300/include/pmap.h  7.11 (Berkeley) 12/27/92
  *
  *
  * from: hp300/include/pmap.h  7.11 (Berkeley) 12/27/92
  *
- *     @(#)pmap.h      7.3 (Berkeley) %G%
+ *     @(#)pmap.h      7.4 (Berkeley) %G%
  */
 
 #ifndef        _PMAP_MACHINE_
 #define        _PMAP_MACHINE_
 
 #define LUNA_PAGE_SIZE NBPG
  */
 
 #ifndef        _PMAP_MACHINE_
 #define        _PMAP_MACHINE_
 
 #define LUNA_PAGE_SIZE NBPG
+#if defined(LUNA2)
+#define LUNA_SEG_SIZE  (mmutype == MMU_68040 ? 0x40000 : NBSEG)
+#else
 #define LUNA_SEG_SIZE  NBSEG
 #define LUNA_SEG_SIZE  NBSEG
+#endif
 
 #define luna_trunc_seg(x)      (((unsigned)(x)) & ~(LUNA_SEG_SIZE-1))
 #define luna_round_seg(x)      luna_trunc_seg((unsigned)(x) + LUNA_SEG_SIZE-1)
 
 #define luna_trunc_seg(x)      (((unsigned)(x)) & ~(LUNA_SEG_SIZE-1))
 #define luna_round_seg(x)      luna_trunc_seg((unsigned)(x) + LUNA_SEG_SIZE-1)
index b4145c7..8d78158 100644 (file)
@@ -13,7 +13,7 @@
  * from: Utah $Hdr: clock.c 1.18 91/01/21$
  * from: hp300/hp300/clock.c   7.19 (Berkeley) 2/18/93
  *
  * from: Utah $Hdr: clock.c 1.18 91/01/21$
  * from: hp300/hp300/clock.c   7.19 (Berkeley) 2/18/93
  *
- *     @(#)clock.c     7.7 (Berkeley) %G%
+ *     @(#)clock.c     7.8 (Berkeley) %G%
  */
 
 #include <sys/param.h>
  */
 
 #include <sys/param.h>
 
 #include <luna68k/luna68k/clockreg.h>
 
 
 #include <luna68k/luna68k/clockreg.h>
 
+#ifdef LUNA2
+#include <machine/cpu.h>
+#endif
+
 extern int clock_on;
 
 static int month_days[12] = {
 extern int clock_on;
 
 static int month_days[12] = {
@@ -29,6 +33,9 @@ static int month_days[12] = {
 struct bbc_tm *gmt_to_bbc();
 
 volatile struct bbc *bbc = (struct bbc *)BBC_ADDR;
 struct bbc_tm *gmt_to_bbc();
 
 volatile struct bbc *bbc = (struct bbc *)BBC_ADDR;
+#ifdef LUNA2
+volatile struct bbc2 *bbc2 = (struct bbc2 *)BBC_ADDR;
+#endif
 
 int battery_clock;
 int battery_chkfg;
 
 int battery_clock;
 int battery_chkfg;
@@ -58,6 +65,15 @@ cpu_initclocks()
        /* set flag for clockintr. */
        clock_on = 1;
 
        /* set flag for clockintr. */
        clock_on = 1;
 
+#ifdef LUNA2
+       if (machineid == LUNA_II) {
+               /* not yet */
+               battery_chkfg = 1;
+               battery_clock = 1;
+               return;
+       }
+#endif
+
        batterychk();
        if (!battery_clock)
          return;
        batterychk();
        if (!battery_clock)
          return;
@@ -137,14 +153,34 @@ resettodr()
        s = splimp();
 
        /* set bb-clock */
        s = splimp();
 
        /* set bb-clock */
-       bbc->cal_ctl |= BBC_WRT;
-       bbc->cal_sec = binary_to_bcd(tmptr->tm_sec);
-       bbc->cal_min = binary_to_bcd(tmptr->tm_min);
-       bbc->cal_hour = binary_to_bcd(tmptr->tm_hour);
-       bbc->cal_day = binary_to_bcd(tmptr->tm_mday);
-       bbc->cal_mon = binary_to_bcd(tmptr->tm_mon);
-       bbc->cal_year = binary_to_bcd(tmptr->tm_year);
-       bbc->cal_ctl &= ~BBC_WRT;
+#ifdef LUNA2
+       if (machineid == LUNA_II) {
+#if 1
+               /* not yet */
+               printf("WARNING: not supported resettodr() at LUNA2 yet.\n");
+               return;
+#else
+               /* bbc2->cal_ctl_? |= BBC_WRT; */
+               bbc2->cal_sec = tmptr->tm_sec;
+               bbc2->cal_min = tmptr->tm_min;
+               bbc2->cal_hour =tmptr->tm_hour;
+               bbc2->cal_day = tmptr->tm_mday;
+               bbc2->cal_mon = tmptr->tm_mon;
+               bbc2->cal_year = tmptr->tm_year);
+               /* bbc2->cal_ctl_? &= ~BBC_WRT; */
+#endif
+       } else 
+#endif
+       {
+               bbc->cal_ctl |= BBC_WRT;
+               bbc->cal_sec = binary_to_bcd(tmptr->tm_sec);
+               bbc->cal_min = binary_to_bcd(tmptr->tm_min);
+               bbc->cal_hour = binary_to_bcd(tmptr->tm_hour);
+               bbc->cal_day = binary_to_bcd(tmptr->tm_mday);
+               bbc->cal_mon = binary_to_bcd(tmptr->tm_mon);
+               bbc->cal_year = binary_to_bcd(tmptr->tm_year);
+               bbc->cal_ctl &= ~BBC_WRT;
+       }
 
        splx(s);
 }
 
        splx(s);
 }
@@ -197,21 +233,39 @@ bbc_to_gmt(timbuf)
        s = splimp();
 
        /* read bb-clock */
        s = splimp();
 
        /* read bb-clock */
-       bbc->cal_ctl |= BBC_RD;
-       sec = bcd_to_binary(bbc->cal_sec);
-       min = bcd_to_binary(bbc->cal_min);
-       hour = bcd_to_binary(bbc->cal_hour);
-       day = bcd_to_binary(bbc->cal_day);
-       month = bcd_to_binary(bbc->cal_mon);
-       year = bcd_to_binary(bbc->cal_year) + 1900;
-       bbc->cal_ctl &= ~BBC_RD;
-       
+#ifdef LUNA2
+       if (machineid == LUNA_II) {
+               sec = bbc2->cal_sec;
+               min = bbc2->cal_min;
+               hour = bbc2->cal_hour;
+               day = bbc2->cal_day;
+               month = bbc2->cal_mon;
+               year = bbc2->cal_year + 1900;
+       } else
+#endif
+       {
+               bbc->cal_ctl |= BBC_RD;
+               sec = bcd_to_binary(bbc->cal_sec);
+               min = bcd_to_binary(bbc->cal_min);
+               hour = bcd_to_binary(bbc->cal_hour);
+               day = bcd_to_binary(bbc->cal_day);
+               month = bcd_to_binary(bbc->cal_mon);
+               year = bcd_to_binary(bbc->cal_year) + 1900;
+               bbc->cal_ctl &= ~BBC_RD;
+       }
+
        splx(s);
 
        range_test(hour, 0, 23);
        range_test(day, 1, 31);
        range_test(month, 1, 12);
        splx(s);
 
        range_test(hour, 0, 23);
        range_test(day, 1, 31);
        range_test(month, 1, 12);
+#if 1  /* limitted 2000 now ... */
        range_test(year, STARTOFTIME, 2000);
        range_test(year, STARTOFTIME, 2000);
+#else
+       if (year < 1970) {
+               year += 100;
+       }
+#endif
        
        tmp = 0;
        
        
        tmp = 0;
        
@@ -234,6 +288,14 @@ batterychk()
 {
        static char btchkdata[] = "chk";
 
 {
        static char btchkdata[] = "chk";
 
+#ifdef LUNA2
+       if (machineid == LUNA_II) {
+               /* not yet */
+               battery_chkfg = 1;
+               battery_clock = 1;
+               return;
+       }
+#endif
        /* if already checked, return */
        if (battery_chkfg)
                return;
        /* if already checked, return */
        if (battery_chkfg)
                return;
index d61a6c1..af5b74d 100644 (file)
@@ -13,7 +13,7 @@
  * from: Utah $Hdr: clockreg.h 1.14 91/01/18$
  * from: hp300/hp300/clockreg.h        7.4 (Berkeley) 12/27/92
  *
  * from: Utah $Hdr: clockreg.h 1.14 91/01/18$
  * from: hp300/hp300/clockreg.h        7.4 (Berkeley) 12/27/92
  *
- *     @(#)clockreg.h  7.3 (Berkeley) %G%
+ *     @(#)clockreg.h  7.4 (Berkeley) %G%
  */
 
 /*
  */
 
 /*
@@ -78,3 +78,23 @@ struct bbc {
 
 #define        binary_to_bcd(i)        (((i) / 10) << 4 | ((i) % 10))
 #define        bcd_to_binary(i)        (((i) >> 4) *10 + ((i) & 0x0F))
 
 #define        binary_to_bcd(i)        (((i) / 10) << 4 | ((i) % 10))
 #define        bcd_to_binary(i)        (((i) >> 4) *10 + ((i) & 0x0F))
+
+#ifdef LUNA2
+struct bbc2 {
+    unsigned char      cal_sec;        /* secons resistor */
+    unsigned char      cal_sec_alarm;  /* secons alarm resistor */
+    unsigned char      cal_min;        /* minutes resistor */
+    unsigned char      cal_min_alarm;  /* minutes alarm resistor */
+    unsigned char      cal_hour;       /* hours resitor */
+    unsigned char      cal_hour_alarm; /* hours alarm resitor */
+    unsigned char      cal_dow;        /* day of the weeks */
+    unsigned char      cal_day;        /* days resistor */
+    unsigned char      cal_mon;        /* months resistor */
+    unsigned char      cal_year;       /* years resistor */
+    unsigned char      cal_ctl_a;      /* calender control resistor */
+    unsigned char      cal_ctl_b;      /* calender control resistor */
+    unsigned char      cal_ctl_c;      /* calender control resistor */
+    unsigned char      cal_ctl_d;      /* calender control resistor */
+    unsigned char      nvram[50];      /* non-volatile RAM area */
+};
+#endif
index bf92bb0..666076e 100644 (file)
@@ -13,7 +13,7 @@
  * from: Utah $Hdr: locore.s 1.62 92/01/20$
  * from: hp300/hp300/locore.s  7.22 (Berkeley) 2/18/93
  *
  * from: Utah $Hdr: locore.s 1.62 92/01/20$
  * from: hp300/hp300/locore.s  7.22 (Berkeley) 2/18/93
  *
- *     @(#)locore.s    7.10 (Berkeley) %G%
+ *     @(#)locore.s    7.11 (Berkeley) %G%
  */
 
 /*
  */
 
 /*
@@ -74,15 +74,53 @@ _doadump:
        .globl  _trap, _nofault, _longjmp
 _buserr:
        tstl    _nofault                | device probe?
        .globl  _trap, _nofault, _longjmp
 _buserr:
        tstl    _nofault                | device probe?
-       jeq     _addrerr                | no, handle as usual
+       jeq     Lberr                   | no, handle as usual
        movl    _nofault,sp@-           | yes,
        jbsr    _longjmp                |  longjmp(nofault)
        movl    _nofault,sp@-           | yes,
        jbsr    _longjmp                |  longjmp(nofault)
+Lberr:
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     _addrerr                | no, skip
+       clrl    sp@-                    | stack adjust count
+       moveml  #0xFFFF,sp@-            | save user registers
+       movl    usp,a0                  | save the user SP
+       movl    a0,sp@(FR_SP)           |   in the savearea
+       lea     sp@(FR_HW),a1           | grab base of HW berr frame
+       moveq   #0,d0
+       movw    a1@(12),d0              | grab SSW
+       movl    a1@(20),d1              | and fault VA
+       btst    #11,d0                  | check for mis-aligned access
+       jeq     Lberr2                  | no, skip
+       addl    #3,d1                   | yes, get into next page
+       andl    #PG_FRAME,d1            | and truncate
+Lberr2:
+       movl    d1,sp@-                 | push fault VA
+       movl    d0,sp@-                 | and padded SSW
+       btst    #10,d0                  | ATC bit set?
+       jeq     Lisberr                 | no, must be a real bus error
+       movc    dfc,d1                  | yes, get MMU fault
+       movc    d0,dfc                  | store faulting function code
+       movl    sp@(4),a0               | get faulting address
+       .word   0xf568                  | ptestr a0@
+       movc    d1,dfc
+       .long   0x4e7a0805              | movc mmusr,d0
+       movw    d0,sp@                  | save (ONLY LOW 16 BITS!)
+       jra     Lismerr
+#endif
 _addrerr:
        clrl    sp@-                    | stack adjust count
        moveml  #0xFFFF,sp@-            | save user registers
        movl    usp,a0                  | save the user SP
        movl    a0,sp@(FR_SP)           |   in the savearea
        lea     sp@(FR_HW),a1           | grab base of HW berr frame
 _addrerr:
        clrl    sp@-                    | stack adjust count
        moveml  #0xFFFF,sp@-            | save user registers
        movl    usp,a0                  | save the user SP
        movl    a0,sp@(FR_SP)           |   in the savearea
        lea     sp@(FR_HW),a1           | grab base of HW berr frame
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lbenot040               | no, skip
+       movl    a1@(8),sp@-             | yes, push fault address
+       clrl    sp@-                    | no SSW for address fault
+       jra     Lisaerr                 | go deal with it
+Lbenot040:
+#endif
        moveq   #0,d0
        movw    a1@(10),d0              | grab SSW for fault processing
        btst    #12,d0                  | RB set?
        moveq   #0,d0
        movw    a1@(10),d0              | grab SSW for fault processing
        btst    #12,d0                  | RB set?
@@ -165,10 +203,38 @@ Lstkadj:
  * FP exceptions.
  */
 _fpfline:
  * FP exceptions.
  */
 _fpfline:
+#if defined(LUNA2)
+       cmpw    #0x202c,sp@(6)          | format type 2?
+       jne     _illinst                | no, not an FP emulation
+#ifdef HPFPLIB
+       .globl fpsp_unimp
+       jmp     fpsp_unimp              | yes, go handle it
+#else
+       clrl    sp@-                    | stack adjust count
+       moveml  #0xFFFF,sp@-            | save registers
+       moveq   #T_FPEMULI,d0           | denote as FP emulation trap
+       jra     fault                   | do it
+#endif
+#else
        jra     _illinst
        jra     _illinst
+#endif
 
 _fpunsupp:
 
 _fpunsupp:
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     _illinst                | no, treat as illinst
+#ifdef HPFPLIB
+       .globl  fpsp_unsupp
+       jmp     fpsp_unsupp             | yes, go handle it
+#else
+       clrl    sp@-                    | stack adjust count
+       moveml  #0xFFFF,sp@-            | save registers
+       moveq   #T_FPEMULD,d0           | denote as FP emulation trap
+       jra     fault                   | do it
+#endif
+#else
        jra     _illinst
        jra     _illinst
+#endif
 
 /*
  * Handles all other FP coprocessor exceptions.
 
 /*
  * Handles all other FP coprocessor exceptions.
@@ -200,6 +266,37 @@ Lfptnull:
        jra     _badtrap        | treat as an unexpected trap
 #endif
 
        jra     _badtrap        | treat as an unexpected trap
 #endif
 
+#ifdef HPFPLIB
+/*
+ * We wind up here from the 040 FP emulation library after
+ * the exception has been processed.
+ */
+       .globl  _fault
+_fault:
+       subql   #4,sp           | space for rts addr
+       movl    d0,sp@-         | scratch register
+       movw    sp@(14),d0      | get vector offset
+       andl    #0xFFF,d0       | mask out frame type and clear high word
+       cmpl    #0x100,d0       | HP-UX style reschedule trap?
+       jne     Lfault1         | no, skip
+       movl    sp@+,d0         | restore scratch register
+       addql   #4,sp           | pop space
+       jra     Lrei1           | go do AST
+Lfault1:
+       cmpl    #0xC0,d0        | FP exception?
+       jlt     Lfault2         | no, skip
+       movl    sp@+,d0         | yes, backoff
+       addql   #4,sp           |  and prepare for normal trap frame
+       jra     _fpfault        | go to it
+Lfault2:
+       addl    #Lvectab,d0     | convert to vector table offset
+       exg     d0,a0
+       movl    a0@,sp@(4)      | get exception vector and save for rts
+       exg     d0,a0
+       movl    sp@+,d0         |   scratch registers
+       rts                     | return to handler from vectab
+#endif
+
 /*
  * Coprocessor and format errors can generate mid-instruction stack
  * frames and cause signal delivery hence we need to check for potential
 /*
  * Coprocessor and format errors can generate mid-instruction stack
  * frames and cause signal delivery hence we need to check for potential
@@ -688,6 +785,25 @@ start:
        movl    #CACHE_OFF,d0
        movc    d0,cacr                 | clear and disable on-chip cache(s)
 
        movl    #CACHE_OFF,d0
        movc    d0,cacr                 | clear and disable on-chip cache(s)
 
+#if defined(LUNA2)
+/* determine our CPU/MMU combo - check for all regardless of kernel config */
+       movl    #0x200,d0               | data freeze bit
+       movc    d0,cacr                 |   only exists on 68030
+       movc    cacr,d0                 | read it back
+       tstl    d0                      | zero?
+       jeq     Lnot68030               | yes, we have 68040(LUNA2)
+       movl    #1,_machineid           | no, must be a LUNA-I
+       movl    #-1,_mmutype            | set to reflect 68030 PMMU
+       jra     Lstart1
+Lnot68030:
+       movl    #2,_machineid           | must be a LUNA-II
+       movl    #-2,_mmutype            | set to reflect 68040 MMU
+#ifdef HPFPLIB
+       movl    #3,_processor           | HP-UX style processor id
+#endif
+Lstart1:
+#endif
+
 /* initialize source/destination control registers for movs */
        moveq   #FC_USERD,d0            | user space
        movc    d0,sfc                  |   as source
 /* initialize source/destination control registers for movs */
        moveq   #FC_USERD,d0            | user space
        movc    d0,sfc                  |   as source
@@ -713,25 +829,48 @@ start:
 /*
  * Prepare to enable MMU.
  */
 /*
  * Prepare to enable MMU.
  */
-       movl    _Sysseg,a1              | system segment table addr read value (a KVA)
+       movl    _Sysseg,d1              | system segment table addr read value (a KVA)
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lmotommu1               | no, skip
+       
+       .long   0x4e7b1807              | movc d1,srp
+/* we must set tt-registers here */
+       movl    #0x403FA040,d0          | tt0 for LUNA2 0x40000000-0x7fffffff
+       .long   0x4e7b0004              | movc d0,itt0
+       .long   0x4e7b0006              | movc d0,dtt0
+       movl    #0x807FA040,d0          | tt1 for LUNA2 0x80000000-0xffffffff
+       .long   0x4e7b0005              | movc d0,itt1
+       .long   0x4e7b0007              | movc d0,dtt1
+       .word   0xf4d8                  | cinva bc
+       .word   0xf518                  | pflusha
+       movl    #0x8000,d0
+       .long   0x4e7b0003              | movc d0,tc
+       movl    #0x80008000,d0
+       movc    d0,cacr                 | turn on both caches
+       jmp     Lenab1
+Lmotommu1:
+#endif
        lea     _protorp,a0
        movl    #0x80000202,a0@         | nolimit + share global + 4 byte PTEs
        lea     _protorp,a0
        movl    #0x80000202,a0@         | nolimit + share global + 4 byte PTEs
-       movl    a1,a0@(4)               | + segtable address
+       movl    d1,a0@(4)               | + segtable address
        pmove   a0@,srp                 | load the supervisor root pointer
        movl    #0x80000002,a0@         | reinit upper half for CRP loads
 /* we must set tt-registers here */
        pmove   a0@,srp                 | load the supervisor root pointer
        movl    #0x80000002,a0@         | reinit upper half for CRP loads
 /* we must set tt-registers here */
-       lea     _protott0,a0
+       lea     _protott0,a0            | tt0 for LUNA1 0x40000000-0x7fffffff
        .word   0xf010                  | pmove a0@,mmutt0
        .word   0x0800
        .word   0xf010                  | pmove a0@,mmutt0
        .word   0x0800
-       lea     _protott1,a0
+       lea     _protott1,a0            | tt1 for LUNA1 0x80000000-0xffffffff
        .word   0xf010                  | pmove a0@,mmutt1
        .word   0x0c00
        .word   0xf010                  | pmove a0@,mmutt1
        .word   0x0c00
+       lea     _mapping_tc,a2
        movl    #0x82c0aa00,a2@         | value to load TC with
        pmove   a2@,tc                  | load it
 
 /*
  * Should be running mapped from this point on
  */
        movl    #0x82c0aa00,a2@         | value to load TC with
        pmove   a2@,tc                  | load it
 
 /*
  * Should be running mapped from this point on
  */
+Lenab1:
 #ifdef FPCOPROC
 /* fpp check */
        movl    a1,sp@-
 #ifdef FPCOPROC
 /* fpp check */
        movl    a1,sp@-
@@ -757,8 +896,13 @@ start:
 #endif
 /* flush TLB and turn on caches */
        jbsr    _TBIA                   | invalidate TLB
 #endif
 /* flush TLB and turn on caches */
        jbsr    _TBIA                   | invalidate TLB
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jeq     Lnocache0               | yes, cache already on
+#endif
        movl    #CACHE_ON,d0
        movc    d0,cacr                 | clear cache(s)
        movl    #CACHE_ON,d0
        movc    d0,cacr                 | clear cache(s)
+Lnocache0:
 /* final setup for C code */
        movw    #PSL_LOWIPL,sr          | lower SPL
        movl    d7,_boothowto           | save reboot flags
 /* final setup for C code */
        movw    #PSL_LOWIPL,sr          | lower SPL
        movl    d7,_boothowto           | save reboot flags
@@ -768,6 +912,13 @@ start:
 /* proc[1] == init now running here;
  * create a null exception frame and return to user mode in icode
  */
 /* proc[1] == init now running here;
  * create a null exception frame and return to user mode in icode
  */
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lnoflush                | no, skip
+       .word   0xf478                  | cpusha dc
+       .word   0xf498                  | cinva ic
+Lnoflush:
+#endif
        clrw    sp@-                    | vector offset/frame type
        clrl    sp@-                    | return to icode location 0
        movw    #PSL_USER,sp@-          | in user mode
        clrw    sp@-                    | vector offset/frame type
        clrl    sp@-                    | return to icode location 0
        movw    #PSL_USER,sp@-          | in user mode
@@ -832,16 +983,21 @@ _szicode:
  * Primitives
  */ 
 
  * Primitives
  */ 
 
+#ifdef __STDC__
+#define EXPORT(name)   .globl _ ## name; _ ## name:
+#else
+#define EXPORT(name)   .globl _/**/name; _/**/name:
+#endif
 #ifdef GPROF
 #define        ENTRY(name) \
 #ifdef GPROF
 #define        ENTRY(name) \
-       .globl _/**/name; _/**/name: link a6,#0; jbsr mcount; unlk a6
+       EXPORT(name) link a6,\#0; jbsr mcount; unlk a6
 #define ALTENTRY(name, rname) \
        ENTRY(name); jra rname+12
 #else
 #define        ENTRY(name) \
 #define ALTENTRY(name, rname) \
        ENTRY(name); jra rname+12
 #else
 #define        ENTRY(name) \
-       .globl _/**/name; _/**/name:
+       EXPORT(name)
 #define ALTENTRY(name, rname) \
 #define ALTENTRY(name, rname) \
-       .globl _/**/name; _/**/name:
+       ENTRY(name)
 #endif
 
 /*
 #endif
 
 /*
@@ -1359,6 +1515,17 @@ Lres1:
        orl     #PG_RW+PG_V,d1          | ensure valid and writable
        movl    d1,a2@+                 | load it up
        dbf     d0,Lres1                | til done
        orl     #PG_RW+PG_V,d1          | ensure valid and writable
        movl    d1,a2@+                 | load it up
        dbf     d0,Lres1                | til done
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lres1a                  | no, skip
+       .word   0xf518                  | yes, pflusha
+       movl    a1@(PCB_USTP),d0        | get USTP
+       moveq   #PGSHIFT,d1
+       lsll    d1,d0                   | convert to addr
+       .long   0x4e7b0806              | movc d0,urp
+       jra     Lcxswdone
+Lres1a:
+#endif
        movl    #CACHE_CLR,d0
        movc    d0,cacr                 | invalidate cache(s)
        pflusha                         | flush entire TLB
        movl    #CACHE_CLR,d0
        movc    d0,cacr                 | invalidate cache(s)
        pflusha                         | flush entire TLB
@@ -1368,6 +1535,7 @@ Lres1:
        lea     _protorp,a0             | CRP prototype
        movl    d0,a0@(4)               | stash USTP
        pmove   a0@,crp                 | load new user root pointer
        lea     _protorp,a0             | CRP prototype
        movl    d0,a0@(4)               | stash USTP
        pmove   a0@,crp                 | load new user root pointer
+Lcxswdone:
        moveml  a1@(PCB_REGS),#0xFCFC   | and registers
        movl    a1@(PCB_USP),a0
        movl    a0,usp                  | and USP
        moveml  a1@(PCB_REGS),#0xFCFC   | and registers
        movl    a1@(PCB_USP),a0
        movl    a0,usp                  | and USP
@@ -1375,6 +1543,13 @@ Lres1:
        lea     a1@(PCB_FPCTX),a0       | pointer to FP save area
        tstb    a0@                     | null state frame?
        jeq     Lresfprest              | yes, easy
        lea     a1@(PCB_FPCTX),a0       | pointer to FP save area
        tstb    a0@                     | null state frame?
        jeq     Lresfprest              | yes, easy
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lresnot040              | no, skip
+       clrl    sp@-                    | yes...
+       frestore sp@+                   | ...magic!
+Lresnot040:
+#endif
        fmovem  a0@(312),fpcr/fpsr/fpi  | restore FP control registers
        fmovem  a0@(216),fp0-fp7        | restore FP general registers
 Lresfprest:
        fmovem  a0@(312),fpcr/fpsr/fpi  | restore FP control registers
        fmovem  a0@(216),fp0-fp7        | restore FP general registers
 Lresfprest:
@@ -1481,6 +1656,13 @@ ENTRY(suiword)
        movsl   d0,a0@                  | do write to user space
        nop
        moveq   #0,d0                   | indicate no fault
        movsl   d0,a0@                  | do write to user space
        nop
        moveq   #0,d0                   | indicate no fault
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lsuicpurge              | no, skip
+       .word   0xf498                  | cinva ic (XXX overkill)
+       jra     Lfsdone
+Lsuicpurge:
+#endif
        movl    #IC_CLEAR,d1
        movc    d1,cacr                 | invalidate i-cache
        jra     Lfsdone
        movl    #IC_CLEAR,d1
        movc    d1,cacr                 | invalidate i-cache
        jra     Lfsdone
@@ -1526,11 +1708,46 @@ ENTRY(subyte)
        moveq   #0,d0                   | indicate no fault
        jra     Lfsdone
 
        moveq   #0,d0                   | indicate no fault
        jra     Lfsdone
 
+#if defined(LUNA2)
+ENTRY(suline)
+       movl    sp@(4),a0               | address to write
+       movl    _curpcb,a1              | current pcb
+       movl    #Lslerr,a1@(PCB_ONFAULT) | where to return to on a fault
+       movl    sp@(8),a1               | address of line
+       movl    a1@+,d0                 | get lword
+       movsl   d0,a0@+                 | put lword
+       nop                             | sync
+       movl    a1@+,d0                 | get lword
+       movsl   d0,a0@+                 | put lword
+       nop                             | sync
+       movl    a1@+,d0                 | get lword
+       movsl   d0,a0@+                 | put lword
+       nop                             | sync
+       movl    a1@+,d0                 | get lword
+       movsl   d0,a0@+                 | put lword
+       nop                             | sync
+       moveq   #0,d0                   | indicate no fault
+       jra     Lsldone
+Lslerr:
+       moveq   #-1,d0
+Lsldone:
+       movl    _curpcb,a1              | current pcb
+       clrl    a1@(PCB_ONFAULT)        | clear fault address
+       rts
+#endif
+
 /*
  * Invalidate entire TLB.
  */
 ENTRY(TBIA)
 __TBIA:
 /*
  * Invalidate entire TLB.
  */
 ENTRY(TBIA)
 __TBIA:
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lmotommu3               | no, skip
+       .word   0xf518                  | yes, pflusha
+       rts
+Lmotommu3:
+#endif
        pflusha                         | flush entire TLB
        movl    #DC_CLEAR,d0
        movc    d0,cacr                 | invalidate on-chip d-cache
        pflusha                         | flush entire TLB
        movl    #DC_CLEAR,d0
        movc    d0,cacr                 | invalidate on-chip d-cache
@@ -1543,6 +1760,21 @@ ENTRY(TBIS)
 #ifdef DEBUG
        tstl    fulltflush              | being conservative?
        jne     __TBIA                  | yes, flush entire TLB
 #ifdef DEBUG
        tstl    fulltflush              | being conservative?
        jne     __TBIA                  | yes, flush entire TLB
+#endif
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lmotommu4               | no, skip
+       movl    sp@(4),a0
+       movc    dfc,d1
+       moveq   #1,d0                   | user space
+       movc    d0,dfc
+       .word   0xf508                  | pflush a0@
+       moveq   #5,d0                   | super space
+       movc    d0,dfc
+       .word   0xf508                  | pflush a0@
+       movc    d1,dfc
+       rts
+Lmotommu4:
 #endif
 
        movl    sp@(4),a0               | get addr to flush
 #endif
 
        movl    sp@(4),a0               | get addr to flush
@@ -1558,6 +1790,13 @@ ENTRY(TBIAS)
 #ifdef DEBUG
        tstl    fulltflush              | being conservative?
        jne     __TBIA                  | yes, flush everything
 #ifdef DEBUG
        tstl    fulltflush              | being conservative?
        jne     __TBIA                  | yes, flush everything
+#endif
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lmotommu5               | no, skip
+       .word   0xf518                  | yes, pflusha (for now) XXX
+       rts
+Lmotommu5:
 #endif
        pflush #4,#4                    | flush supervisor TLB entries
        movl    #DC_CLEAR,d0
 #endif
        pflush #4,#4                    | flush supervisor TLB entries
        movl    #DC_CLEAR,d0
@@ -1571,6 +1810,13 @@ ENTRY(TBIAU)
 #ifdef DEBUG
        tstl    fulltflush              | being conservative?
        jne     __TBIA                  | yes, flush everything
 #ifdef DEBUG
        tstl    fulltflush              | being conservative?
        jne     __TBIA                  | yes, flush everything
+#endif
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     Lmotommu6               | no, skip
+       .word   0xf518                  | yes, pflusha (for now) XXX
+       rts
+Lmotommu6:
 #endif
        pflush  #0,#4                   | flush user TLB entries
        movl    #DC_CLEAR,d0
 #endif
        pflush  #0,#4                   | flush user TLB entries
        movl    #DC_CLEAR,d0
@@ -1581,18 +1827,114 @@ ENTRY(TBIAU)
  * Invalidate instruction cache
  */
 ENTRY(ICIA)
  * Invalidate instruction cache
  */
 ENTRY(ICIA)
+#if defined(LUNA2)
+ENTRY(ICPA)
+       cmpl    #-2,_mmutype            | 68040
+       jne     Lmotommu7               | no, skip
+       .word   0xf498                  | cinva ic
+       rts
+Lmotommu7:
+#endif
        movl    #IC_CLEAR,d0
        movc    d0,cacr                 | invalidate i-cache
        rts
 
        movl    #IC_CLEAR,d0
        movc    d0,cacr                 | invalidate i-cache
        rts
 
+/*
+ * Invalidate data cache.
+ * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
+ * problems with DC_WA.  The only cases we have to worry about are context
+ * switch and TLB changes, both of which are handled "in-line" in resume
+ * and TBI*.
+ */
+ENTRY(DCIA)
+__DCIA:
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040
+       jne     Lmotommu8               | no, skip
+       /* XXX implement */
+       rts
+Lmotommu8:
+#endif
+       rts
+
+ENTRY(DCIS)
+__DCIS:
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040
+       jne     Lmotommu9               | no, skip
+       /* XXX implement */
+       rts
+Lmotommu9:
+#endif
+       rts
+
+ENTRY(DCIU)
+__DCIU:
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040
+       jne     LmotommuA               | no, skip
+       /* XXX implement */
+       rts
+LmotommuA:
+#endif
+       rts
+
+#if defined(LUNA2)
+ENTRY(ICPL)
+       movl    sp@(4),a0               | address
+       .word   0xf488                  | cinvl ic,a0@
+       rts
+ENTRY(ICPP)
+       movl    sp@(4),a0               | address
+       .word   0xf490                  | cinvp ic,a0@
+       rts
+ENTRY(DCPL)
+       movl    sp@(4),a0               | address
+       .word   0xf448                  | cinvl dc,a0@
+       rts
+ENTRY(DCPP)
+       movl    sp@(4),a0               | address
+       .word   0xf450                  | cinvp dc,a0@
+       rts
+ENTRY(DCPA)
+       .word   0xf458                  | cinva dc
+       rts
+ENTRY(DCFL)
+       movl    sp@(4),a0               | address
+       .word   0xf468                  | cpushl dc,a0@
+       rts
+ENTRY(DCFP)
+       movl    sp@(4),a0               | address
+       .word   0xf470                  | cpushp dc,a0@
+       rts
+#endif
+
 ENTRY(PCIA)
 ENTRY(PCIA)
+#if defined(LUNA2)
+ENTRY(DCFA)
+       cmpl    #-2,_mmutype            | 68040
+       jne     LmotommuB               | no, skip
+       .word   0xf478                  | cpusha dc
+       rts
+LmotommuB:
+#endif
        movl    #DC_CLEAR,d0
        movc    d0,cacr                 | invalidate on-chip d-cache
        rts
 
        movl    #DC_CLEAR,d0
        movc    d0,cacr                 | invalidate on-chip d-cache
        rts
 
+#if 0 /****************************************************************/
+/* external cache control */
+ENTRY(ecacheon)
+       rts
+
+ENTRY(ecacheoff)
+       rts
+#endif /****************************************************************/
+
 /*
  * Get callers current SP value.
  * Note that simply taking the address of a local variable in a C function
 /*
  * Get callers current SP value.
  * Note that simply taking the address of a local variable in a C function
+
  * doesn't work because callee saved registers may be outside the stack frame
  * defined by A6 (e.g. GCC generated code).
  */
  * doesn't work because callee saved registers may be outside the stack frame
  * defined by A6 (e.g. GCC generated code).
  */
@@ -1617,6 +1959,13 @@ ENTRY(loadustp)
        movl    sp@(4),d0               | new USTP
        moveq   #PGSHIFT,d1
        lsll    d1,d0                   | convert to addr
        movl    sp@(4),d0               | new USTP
        moveq   #PGSHIFT,d1
        lsll    d1,d0                   | convert to addr
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     LmotommuC               | no, skip
+       .long   0x4e7b0806              | movc d0,urp
+       rts
+LmotommuC:
+#endif
        lea     _protorp,a0             | CRP prototype
        movl    d0,a0@(4)               | stash USTP
        pmove   a0@,crp                 | load root pointer
        lea     _protorp,a0             | CRP prototype
        movl    d0,a0@(4)               | stash USTP
        pmove   a0@,crp                 | load root pointer
@@ -1965,6 +2314,23 @@ _fpvec:  movl    a0,a2@(FLINE_VEC)       | restore vectors
 _doboot:
        movl    #0x41000004,a0
        movl    a0@,a1                  | get PROM restart entry address
 _doboot:
        movl    #0x41000004,a0
        movl    a0@,a1                  | get PROM restart entry address
+#if defined(LUNA2)
+       cmpl    #-2,_mmutype            | 68040?
+       jne     LmotommuF               | no, skip
+       .word   0xf4f8                  | cpusha bc
+       movl    #0,d0
+       movc    d0,cacr                 | caches off
+       movql   #0,d0
+       .long   0x4e7b0004              | movc d0,itt0
+       .long   0x4e7b0005              | movc d0,itt1
+       .long   0x4e7b0006              | movc d0,dtt0
+       .long   0x4e7b0007              | movc d0,dtt1
+
+       .long   0x4e7b0003              | movc d0,tc
+
+       jmp     a1@                     | goto REBOOT
+LmotommuF:
+#endif
        movl    #CACHE_OFF,d0
        movc    d0,cacr                 | disable on-chip cache(s)
        movl    #_tcroff,a0             | value for pmove to TC (turn off MMU)
        movl    #CACHE_OFF,d0
        movc    d0,cacr                 | disable on-chip cache(s)
        movl    #_tcroff,a0             | value for pmove to TC (turn off MMU)
@@ -1972,13 +2338,20 @@ _doboot:
        jmp     a1@                     | goto REBOOT
 
        .data
        jmp     a1@                     | goto REBOOT
 
        .data
+       .globl  _machineid,_mmutype
+_machineid:
+       .long   1               | default to LUNA-I
+_mmutype:
+       .long   -1              | default to 68030 PMMU
        .globl  _protorp,_protott0,_protott1
 _protorp:
        .long   0,0             | prototype root pointer
 _protott0:
        .globl  _protorp,_protott0,_protott1
 _protorp:
        .long   0,0             | prototype root pointer
 _protott0:
-       .long   0x807F8543      | prototype tt0 register (for kernel)
+       .long   0x403f8543      | tt0 (for LUNA1 kernel 0x40000000-0x7fffffff)
 _protott1:
 _protott1:
-       .long   0               | prototype tt0 register (for user)
+       .long   0x807F8543      | tt1 (for LUNA1 kernel 0x80000000-0xffffffff)
+_mapping_tc:
+       .long   0
        .globl  _cold
 _cold:
        .long   1               | cold start flag
        .globl  _cold
 _cold:
        .long   1               | cold start flag
@@ -2014,6 +2387,27 @@ fulltflush:
 fullcflush:
        .long   0
 #endif
 fullcflush:
        .long   0
 #endif
+#ifdef HPFPLIB
+/*
+ * Undefined symbols from hpux_float.o:
+ *
+ * kdb_printf: A kernel debugger print routine, we just use printf instead.
+ * processor:  HP-UX equiv. of machineid, set to 3 if it is a 68040.
+ * u:          Ye ole u-area.  The code wants to grab the first longword
+ *             indirect off of that and clear the 0x40000 bit there.
+ *             Oddly enough this was incorrect even in HP-UX!
+ * runrun:     Old name for want_resched.
+ */
+       .globl  _kdb_printf,_processor,_u,_runrun
+_kdb_printf:
+       .long   _printf
+_processor:
+       .long   0
+_u:
+       .long   .+4
+       .long   0
+       .set    _runrun,_want_resched
+#endif
 /* interrupt counters */
        .globl  _intrcnt,_eintrcnt,_intrnames,_eintrnames
 _intrnames:
 /* interrupt counters */
        .globl  _intrcnt,_eintrcnt,_intrnames,_eintrnames
 _intrnames:
index bafa3c0..c430e5b 100644 (file)
@@ -13,7 +13,7 @@
  * from: Utah $Hdr: machdep.c 1.63 91/04/24$
  * from: hp300/hp300/machdep.c   7.36 (Berkeley) 2/10/93
  *
  * from: Utah $Hdr: machdep.c 1.63 91/04/24$
  * from: hp300/hp300/machdep.c   7.36 (Berkeley) 2/10/93
  *
- *     @(#)machdep.c   7.9 (Berkeley) %G%
+ *     @(#)machdep.c   7.10 (Berkeley) %G%
  */
 
 #include <sys/param.h>
  */
 
 #include <sys/param.h>
@@ -314,11 +314,17 @@ extern    char ostype[], osrelease[], version[];
 
 identifyfpu()
 {
 
 identifyfpu()
 {
+#ifdef LUNA2
+       if (machineid == LUNA_II) {
+               sprintf(cpu_model, "LUNA-II (25MHz MC68040 CPU+MMU+FPU)");
+               printf("%s\n", cpu_model);
+               return;
+       }
+#endif
        if ( fpptype == -1 ) {
                printf("unknow FPU type \n");
                panic("startup");
        }
        if ( fpptype == -1 ) {
                printf("unknow FPU type \n");
                panic("startup");
        }
-
        sprintf(cpu_model, "LUNA-I (20MHz MC68030 CPU+MMU, 20MHz MC6888%d FPU)", fpptype);
        printf("%s\n", cpu_model);
 
        sprintf(cpu_model, "LUNA-I (20MHz MC68030 CPU+MMU, 20MHz MC6888%d FPU)", fpptype);
        printf("%s\n", cpu_model);
 
@@ -1091,6 +1097,11 @@ unsigned char fpp_svarea[212];
 
 void checkfpp()
 {
 
 void checkfpp()
 {
+#ifdef LUNA2
+       if (machineid == LUNA_II) {
+               return;
+       }
+#endif
     SET_INT_FPP;       /* internal = on, external = off */
     if( is_68882() )
       fpptype = FPP68882;
     SET_INT_FPP;       /* internal = on, external = off */
     if( is_68882() )
       fpptype = FPP68882;
@@ -1109,6 +1120,11 @@ void checkfpp()
 {
 int    internal_exist,external_exist;
 int    external_68882;
 {
 int    internal_exist,external_exist;
 int    external_68882;
+#ifdef LUNA2
+       if (machineid == LUNA_II) {
+               return;
+       }
+#endif
 
     SET_INT_FPP;       /* internal = on, external = off */
     if ( internal_exist = havefpp() && is_68882() ) {  /* internal = 68882 */
 
     SET_INT_FPP;       /* internal = on, external = off */
     if ( internal_exist = havefpp() && is_68882() ) {  /* internal = 68882 */
index a1998a3..1ba9fd0 100644 (file)
@@ -9,19 +9,37 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     OMRON: $Id: pmap.c,v 1.2 92/06/14 06:19:40 moti Exp $
+ * from: hp300/hp300/pmap.c    7.15 (Berkeley) 12/27/92
  *
  *
- * from: hp300/hp300/pmap.c    7.11 (Berkeley) 7/12/92
- *
- *     @(#)pmap.c      7.4 (Berkeley) %G%
+ *     @(#)pmap.c      7.5 (Berkeley) %G%
  */
 
 /*
  */
 
 /*
- *     LUNA physical map management code taken from:
- *     HP9000/300 series physical map management code.
+ * LUNA physical map management code taken from:
+ * HP9000/300 series physical map management code.
+ *
+ * Supports:
+ *     68030 with on-chip MMU  (LUNA-I)
+ *     68040 with on-chip MMU  (LUNA-II)
+ *
+ * Notes:
+ *     Don't even pay lip service to multiprocessor support.
+ *
+ *     We assume TLB entries don't have process tags (except for the
+ *     supervisor/user distinction) so we only invalidate TLB entries
+ *     when changing mappings for the current (or kernel) pmap.  This is
+ *     technically not true for the 68551 but we flush the TLB on every
+ *     context switch, so it effectively winds up that way.
  *
  *
- *     XXX will only work for PAGE_SIZE == NBPG (i.e. 4096 bytes).
- *     Hence, there is no point in defining DYNPGSIZE.
+ *     Bitwise and/or operations are significantly faster than bitfield
+ *     references so we use them when accessing STE/PTEs in the pmap_pte_*
+ *     macros.  Note also that the two are not always equivalent; e.g.:
+ *             (*(int *)pte & PG_PROT) [4] != pte->pg_prot [1]
+ *     and a couple of routines that deal with protection and wiring take
+ *     some shortcuts that assume the and/or definitions.
+ *
+ *     This implementation will only work for PAGE_SIZE == NBPG
+ *     (i.e. 4096 bytes).
  */
 
 /*
  */
 
 /*
@@ -64,7 +82,7 @@
 
 #include <machine/cpu.h>
 
 
 #include <machine/cpu.h>
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
 struct {
        int collectscans;
        int collectpages;
 struct {
        int collectscans;
        int collectpages;
@@ -76,8 +94,10 @@ struct {
        int kernel;     /* entering kernel mapping */
        int user;       /* entering user mapping */
        int ptpneeded;  /* needed to allocate a PT page */
        int kernel;     /* entering kernel mapping */
        int user;       /* entering user mapping */
        int ptpneeded;  /* needed to allocate a PT page */
+       int nochange;   /* no change at all */
        int pwchange;   /* no mapping change, just wiring or protection */
        int wchange;    /* no mapping change, just wiring */
        int pwchange;   /* no mapping change, just wiring or protection */
        int wchange;    /* no mapping change, just wiring */
+       int pchange;    /* no mapping change, just protection */
        int mchange;    /* was mapped but mapping to different page */
        int managed;    /* a managed page */
        int firstpv;    /* first mapping for this PA */
        int mchange;    /* was mapped but mapping to different page */
        int managed;    /* a managed page */
        int firstpv;    /* first mapping for this PA */
@@ -97,11 +117,21 @@ struct {
 } remove_stats;
 struct {
        int calls;
 } remove_stats;
 struct {
        int calls;
-       int pages;
+       int changed;
        int alreadyro;
        int alreadyrw;
 } protect_stats;
        int alreadyro;
        int alreadyrw;
 } protect_stats;
+struct chgstats {
+       int setcalls;
+       int sethits;
+       int setmiss;
+       int clrcalls;
+       int clrhits;
+       int clrmiss;
+} changebit_stats[16];
+#endif
 
 
+#ifdef DEBUG
 int debugmap = 0;
 int pmapdebug = 0x2000;
 #define PDB_FOLLOW     0x0001
 int debugmap = 0;
 int pmapdebug = 0x2000;
 #define PDB_FOLLOW     0x0001
@@ -115,34 +145,69 @@ int pmapdebug = 0x2000;
 #define PDB_COLLECT    0x0100
 #define PDB_PROTECT    0x0200
 #define PDB_SEGTAB     0x0400
 #define PDB_COLLECT    0x0100
 #define PDB_PROTECT    0x0200
 #define PDB_SEGTAB     0x0400
+#define PDB_MULTIMAP   0x0800
 #define PDB_PARANOIA   0x2000
 #define PDB_WIRING     0x4000
 #define PDB_PVDUMP     0x8000
 
 #define PDB_PARANOIA   0x2000
 #define PDB_WIRING     0x4000
 #define PDB_PVDUMP     0x8000
 
+#ifdef HAVEVAC
+int pmapvacflush = 0;
+#define        PVF_ENTER       0x01
+#define        PVF_REMOVE      0x02
+#define        PVF_PROTECT     0x04
+#define        PVF_TOTAL       0x80
+#endif
+
+#if defined(LUNA2)
+int dowriteback = 1;   /* 68040: enable writeback caching */
+int dokwriteback = 1;  /* 68040: enable writeback caching of kernel AS */
+#endif
+
 extern vm_offset_t pager_sva, pager_eva;
 #endif
 
 /*
  * Get STEs and PTEs for user/kernel address space
  */
 extern vm_offset_t pager_sva, pager_eva;
 #endif
 
 /*
  * Get STEs and PTEs for user/kernel address space
  */
-#define        pmap_ste(m, v)  (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
-#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
-
-#define pmap_ste_v(m, v)       (pmap_ste(m, v)->sg_v)
+#if defined(LUNA2)
+#define        pmap_ste1(m, v) \
+       (&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
+/* XXX assumes physically contiguous ST pages (if more than one) */
+#define pmap_ste2(m, v) \
+       (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
+                       - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
+#define        pmap_ste(m, v)  \
+       (&((m)->pm_stab[(vm_offset_t)(v) \
+                       >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
+#define pmap_ste_v(m, v) \
+       (mmutype == MMU_68040 \
+        ? ((*(int *)pmap_ste1(m, v) & SG_V) && \
+           (*(int *)pmap_ste2(m, v) & SG_V)) \
+        : (*(int *)pmap_ste(m, v) & SG_V))
+#else
+#define        pmap_ste(m, v)   (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
+#define pmap_ste_v(m, v) (*(int *)pmap_ste(m, v) & SG_V)
+#endif
 
 
+#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
 #define pmap_pte_pa(pte)       (*(int *)(pte) & PG_FRAME)
 #define pmap_pte_pa(pte)       (*(int *)(pte) & PG_FRAME)
-#define pmap_pte_w(pte)                ((pte)->pg_w)
-#define pmap_pte_ci(pte)       ((pte)->pg_ci)
-#define pmap_pte_m(pte)                ((pte)->pg_m)
-#define pmap_pte_u(pte)                ((pte)->pg_u)
-#define pmap_pte_prot(pte)     ((pte)->pg_prot)
-#define pmap_pte_v(pte)                ((pte)->pg_v)
-#define pmap_pte_set_w(pte, v)         ((pte)->pg_w = (v))
-#define pmap_pte_set_prot(pte, v)      ((pte)->pg_prot = (v))
+#define pmap_pte_w(pte)                (*(int *)(pte) & PG_W)
+#define pmap_pte_ci(pte)       (*(int *)(pte) & PG_CI)
+#define pmap_pte_m(pte)                (*(int *)(pte) & PG_M)
+#define pmap_pte_u(pte)                (*(int *)(pte) & PG_U)
+#define pmap_pte_prot(pte)     (*(int *)(pte) & PG_PROT)
+#define pmap_pte_v(pte)                (*(int *)(pte) & PG_V)
+
+#define pmap_pte_set_w(pte, v) \
+       if (v) *(int *)(pte) |= PG_W; else *(int *)(pte) &= ~PG_W
+#define pmap_pte_set_prot(pte, v) \
+       if (v) *(int *)(pte) |= PG_PROT; else *(int *)(pte) &= ~PG_PROT
+#define pmap_pte_w_chg(pte, nw)                ((nw) ^ pmap_pte_w(pte))
+#define pmap_pte_prot_chg(pte, np)     ((np) ^ pmap_pte_prot(pte))
 
 /*
  * Given a map and a machine independent protection code,
 
 /*
  * Given a map and a machine independent protection code,
- * convert to a vax protection code.
+ * convert to an luna protection code.
  */
 #define pte_prot(m, p) (protection_codes[p])
 int    protection_codes[8];
  */
 #define pte_prot(m, p) (protection_codes[p])
 int    protection_codes[8];
@@ -182,16 +247,30 @@ vm_offset_t       virtual_avail;  /* VA of first avail page (after kernel bss)*/
 vm_offset_t    virtual_end;    /* VA of last avail page (end of kernel AS) */
 vm_offset_t    vm_first_phys;  /* PA of first managed page */
 vm_offset_t    vm_last_phys;   /* PA just past last managed page */
 vm_offset_t    virtual_end;    /* VA of last avail page (end of kernel AS) */
 vm_offset_t    vm_first_phys;  /* PA of first managed page */
 vm_offset_t    vm_last_phys;   /* PA just past last managed page */
-#if defined(DYNPGSIZE)
-int            lunapagesperpage;       /* PAGE_SIZE / LUNA_PAGE_SIZE */
-#endif
 boolean_t      pmap_initialized = FALSE;       /* Has pmap_init completed? */
 char           *pmap_attributes;       /* reference and modify bits */
 boolean_t      pmap_initialized = FALSE;       /* Has pmap_init completed? */
 char           *pmap_attributes;       /* reference and modify bits */
+#ifdef HAVEVAC
+int            pmap_aliasmask; /* seperation at which VA aliasing ok */
+#endif
+#if defined(LUNA2)
+int            protostfree;    /* prototype (default) free ST map */
+#endif
 
 
-boolean_t      pmap_testbit();
-void           pmap_enter_ptpage();
+/*
+ * Internal routines
+ */
+void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
+boolean_t pmap_testbit __P((vm_offset_t, int));
+void pmap_changebit    __P((vm_offset_t, int, boolean_t));
+void pmap_enter_ptpage __P((pmap_t, vm_offset_t));
+#ifdef DEBUG
+void pmap_pvdump       __P((vm_offset_t));
+void pmap_check_wiring __P((char *, vm_offset_t));
+#endif
 
 
-extern struct physmap io_physmap[];    /* LUNA: io mapping information */
+/* pmap_remove_mapping flags */
+#define        PRM_TFLUSH      1
+#define        PRM_CFLUSH      2
 
 /*
  * Bootstrap memory allocator. This function allows for early dynamic
 
 /*
  * Bootstrap memory allocator. This function allows for early dynamic
@@ -234,7 +313,7 @@ pmap_init(phys_start, phys_end)
 {
        vm_offset_t     addr, addr2;
        vm_size_t       npg, s;
 {
        vm_offset_t     addr, addr2;
        vm_size_t       npg, s;
-       int             rv, index;
+       int             rv;
        extern char kstack[];
 
 #ifdef DEBUG
        extern char kstack[];
 
 #ifdef DEBUG
@@ -245,15 +324,6 @@ pmap_init(phys_start, phys_end)
         * Now that kernel map has been allocated, we can mark as
         * unavailable regions which we have mapped in locore.
         */
         * Now that kernel map has been allocated, we can mark as
         * unavailable regions which we have mapped in locore.
         */
-       for(index = 0; io_physmap[index].pm_phys; index++)
-         {
-             addr = io_physmap[index].pm_phys;
-             (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
-                                &addr, luna_round_page(io_physmap[index].pm_size), FALSE);
-             if (addr != io_physmap[index].pm_phys)
-               goto bogons;
-         }
-
        addr = (vm_offset_t) Sysmap;
        vm_object_reference(kernel_object);
        (void) vm_map_find(kernel_map, kernel_object, addr,
        addr = (vm_offset_t) Sysmap;
        vm_object_reference(kernel_object);
        (void) vm_map_find(kernel_map, kernel_object, addr,
@@ -337,8 +407,10 @@ bogons:
                kpt_pages->kpt_va = addr2;
                kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
        } while (addr != addr2);
                kpt_pages->kpt_va = addr2;
                kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
        } while (addr != addr2);
-#ifdef DEBUG
+#ifdef PMAPSTATS
        kpt_stats.kpttotal = atop(s);
        kpt_stats.kpttotal = atop(s);
+#endif
+#ifdef DEBUG
        if (pmapdebug & PDB_INIT)
                printf("pmap_init: KPT: %d pages from %x to %x\n",
                       atop(s), addr, addr + s);
        if (pmapdebug & PDB_INIT)
                printf("pmap_init: KPT: %d pages from %x to %x\n",
                       atop(s), addr, addr + s);
@@ -366,6 +438,14 @@ bogons:
                printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
 #endif
 
                printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
 #endif
 
+#if defined(LUNA2)
+       if (mmutype == MMU_68040) {
+               protostfree = ~l2tobm(0);
+               for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
+                       protostfree &= ~l2tobm(rv);
+       }
+#endif
+
        /*
         * Now it is safe to enable pv_table recording.
         */
        /*
         * Now it is safe to enable pv_table recording.
         */
@@ -460,6 +540,10 @@ pmap_pinit(pmap)
         */
        pmap->pm_stab = Segtabzero;
        pmap->pm_stpa = Segtabzeropa;
         */
        pmap->pm_stab = Segtabzero;
        pmap->pm_stpa = Segtabzeropa;
+#if defined(LUNA2)
+       if (mmutype == MMU_68040)
+               pmap->pm_stfree = protostfree;
+#endif
        pmap->pm_stchanged = TRUE;
        pmap->pm_count = 1;
        simple_lock_init(&pmap->pm_lock);
        pmap->pm_stchanged = TRUE;
        pmap->pm_count = 1;
        simple_lock_init(&pmap->pm_lock);
@@ -546,18 +630,14 @@ pmap_reference(pmap)
 void
 pmap_remove(pmap, sva, eva)
        register pmap_t pmap;
 void
 pmap_remove(pmap, sva, eva)
        register pmap_t pmap;
-       vm_offset_t sva, eva;
+       register vm_offset_t sva, eva;
 {
 {
-       register vm_offset_t pa, va;
+       register vm_offset_t nssva;
        register pt_entry_t *pte;
        register pt_entry_t *pte;
-       register pv_entry_t pv, npv;
-       pmap_t ptpmap;
-       int *ste, s, bits;
-       boolean_t firstpage = TRUE;
-       boolean_t flushcache = FALSE;
-#ifdef DEBUG
-       pt_entry_t opte;
+       boolean_t firstpage, needcflush;
+       int flags;
 
 
+#ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
                printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
 #endif
        if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
                printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
 #endif
@@ -565,206 +645,101 @@ pmap_remove(pmap, sva, eva)
        if (pmap == NULL)
                return;
 
        if (pmap == NULL)
                return;
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
        remove_stats.calls++;
 #endif
        remove_stats.calls++;
 #endif
-       for (va = sva; va < eva; va += PAGE_SIZE) {
+       firstpage = TRUE;
+       needcflush = FALSE;
+       flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
+       while (sva < eva) {
+               nssva = luna_trunc_seg(sva) + LUNA_SEG_SIZE;
+               if (nssva == 0 || nssva > eva)
+                       nssva = eva;
                /*
                /*
-                * Weed out invalid mappings.
-                * Note: we assume that the segment table is always allocated.
+                * If VA belongs to an unallocated segment,
+                * skip to the next segment boundary.
                 */
                 */
-               if (!pmap_ste_v(pmap, va)) {
-                       /* XXX: avoid address wrap around */
-                       if (va >= luna_trunc_seg(VM_MAX_ADDRESS))
-                               break;
-                       va = luna_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
+               if (!pmap_ste_v(pmap, sva)) {
+                       sva = nssva;
                        continue;
                }
                        continue;
                }
-               pte = pmap_pte(pmap, va);
-               pa = pmap_pte_pa(pte);
-               if (pa == 0)
-                       continue;
-#ifdef DEBUG
-               opte = *pte;
-               remove_stats.removes++;
-#endif
-               /*
-                * Update statistics
-                */
-               if (pmap_pte_w(pte))
-                       pmap->pm_stats.wired_count--;
-               pmap->pm_stats.resident_count--;
-
                /*
                /*
-                * Invalidate the PTEs.
-                * XXX: should cluster them up and invalidate as many
-                * as possible at once.
+                * Invalidate every valid mapping within this segment.
                 */
                 */
-#ifdef DEBUG
-               if (pmapdebug & PDB_REMOVE)
-                       printf("remove: invalidating pte at %x\n", pte);
-                       remove_stats.sflushes++;
+               pte = pmap_pte(pmap, sva);
+               while (sva < nssva) {
+                       if (pmap_pte_v(pte)) {
+#ifdef HAVEVAC
+                               if (pmap_aliasmask) {
+                                       /*
+                                        * Purge kernel side of VAC to ensure
+                                        * we get the correct state of any
+                                        * hardware maintained bits.
+                                        */
+                                       if (firstpage) {
+                                               DCIS();
+#ifdef PMAPSTATS
+                                               remove_stats.sflushes++;
 #endif
 #endif
-#if defined(DYNPGSIZE)
-               {
-                       register int ix = 0;
+                                       }
+                                       /*
+                                        * Remember if we may need to
+                                        * flush the VAC due to a non-CI
+                                        * mapping.
+                                        */
+                                       if (!needcflush && !pmap_pte_ci(pte))
+                                               needcflush = TRUE;
 
 
-                       bits = 0;
-                       do {
-                               bits |= *(int *)pte & (PG_U|PG_M);
-                               *(int *)pte++ = PG_NV;
-                               TBIS(va + ix * LUNA_PAGE_SIZE);
-                       } while (++ix != lunapagesperpage);
-               }
-#else
-               bits = *(int *)pte & (PG_U|PG_M);
-               *(int *)pte = PG_NV;
-               TBIS(va);
-#endif
-               /*
-                * For user mappings decrement the wiring count on
-                * the PT page.  We do this after the PTE has been
-                * invalidated because vm_map_pageable winds up in
-                * pmap_pageable which clears the modify bit for the
-                * PT page.
-                */
-               if (pmap != kernel_pmap) {
-                       pte = pmap_pte(pmap, va);
-                       vm_map_pageable(pt_map, trunc_page(pte),
-                                       round_page(pte+1), TRUE);
-#ifdef DEBUG
-                       if (pmapdebug & PDB_WIRING)
-                               pmap_check_wiring("remove", trunc_page(pte));
-#endif
-               }
-               /*
-                * Remove from the PV table (raise IPL since we
-                * may be called at interrupt time).
-                */
-               if (pa < vm_first_phys || pa >= vm_last_phys)
-                       continue;
-               pv = pa_to_pvh(pa);
-               ste = (int *)0;
-               s = splimp();
-               /*
-                * If it is the first entry on the list, it is actually
-                * in the header and we must copy the following entry up
-                * to the header.  Otherwise we must search the list for
-                * the entry.  In either case we free the now unused entry.
-                */
-               if (pmap == pv->pv_pmap && va == pv->pv_va) {
-                       ste = (int *)pv->pv_ptste;
-                       ptpmap = pv->pv_ptpmap;
-                       npv = pv->pv_next;
-                       if (npv) {
-                               *pv = *npv;
-                               free((caddr_t)npv, M_VMPVENT);
-                       } else
-                               pv->pv_pmap = NULL;
-#ifdef DEBUG
-                       remove_stats.pvfirst++;
-#endif
-               } else {
-                       for (npv = pv->pv_next; npv; npv = npv->pv_next) {
-#ifdef DEBUG
-                               remove_stats.pvsearch++;
+                               }
 #endif
 #endif
-                               if (pmap == npv->pv_pmap && va == npv->pv_va)
-                                       break;
-                               pv = npv;
+                               pmap_remove_mapping(pmap, sva, pte, flags);
+                               firstpage = FALSE;
                        }
                        }
-#ifdef DEBUG
-                       if (npv == NULL)
-                               panic("pmap_remove: PA not in pv_tab");
-#endif
-                       ste = (int *)npv->pv_ptste;
-                       ptpmap = npv->pv_ptpmap;
-                       pv->pv_next = npv->pv_next;
-                       free((caddr_t)npv, M_VMPVENT);
-                       pv = pa_to_pvh(pa);
-               }
-               /*
-                * If only one mapping left we no longer need to cache inhibit
-                */
-               if (pv->pv_pmap &&
-                   pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
-#ifdef DEBUG
-                       if (pmapdebug & PDB_CACHE)
-                               printf("remove: clearing CI for pa %x\n", pa);
-#endif
-                       pv->pv_flags &= ~PV_CI;
-                       pmap_changebit(pa, PG_CI, FALSE);
-#ifdef DEBUG
-                       if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
-                           (PDB_CACHE|PDB_PVDUMP))
-                               pmap_pvdump(pa);
-#endif
+                       pte++;
+                       sva += PAGE_SIZE;
                }
                }
-
-               /*
-                * If this was a PT page we must also remove the
-                * mapping from the associated segment table.
-                */
-               if (ste) {
-#ifdef DEBUG
-                       remove_stats.ptinvalid++;
-                       if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) {
-                               printf("remove: ste was %x@%x pte was %x@%x\n",
-                                      *ste, ste,
-                                      *(int *)&opte, pmap_pte(pmap, va));
-                       }
+       }
+       /*
+        * Didn't do anything, no need for cache flushes
+        */
+       if (firstpage)
+               return;
+#ifdef HAVEVAC
+       /*
+        * In a couple of cases, we don't need to worry about flushing
+        * the VAC:
+        *      1. if this is a kernel mapping,
+        *         we have already done it
+        *      2. if it is a user mapping not for the current process,
+        *         it won't be there
+        */
+       if (pmap_aliasmask &&
+           (pmap == kernel_pmap || pmap != curproc->p_vmspace->vm_map.pmap))
+               needcflush = FALSE;
+#ifdef DEBUG
+       if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) {
+               if (pmapvacflush & PVF_TOTAL)
+                       DCIA();
+               else if (pmap == kernel_pmap)
+                       DCIS();
+               else
+                       DCIU();
+       } else
 #endif
 #endif
-                       *ste = SG_NV;
-                       /*
-                        * If it was a user PT page, we decrement the
-                        * reference count on the segment table as well,
-                        * freeing it if it is now empty.
-                        */
-                       if (ptpmap != kernel_pmap) {
-#ifdef DEBUG
-                               if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
-                                       printf("remove: stab %x, refcnt %d\n",
-                                              ptpmap->pm_stab,
-                                              ptpmap->pm_sref - 1);
-                               if ((pmapdebug & PDB_PARANOIA) &&
-                                   ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
-                                       panic("remove: bogus ste");
+       if (needcflush) {
+               if (pmap == kernel_pmap) {
+                       DCIS();
+#ifdef PMAPSTATS
+                       remove_stats.sflushes++;
 #endif
 #endif
-                               if (--(ptpmap->pm_sref) == 0) {
-#ifdef DEBUG
-                                       if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
-                                       printf("remove: free stab %x\n",
-                                              ptpmap->pm_stab);
+               } else {
+                       DCIU();
+#ifdef PMAPSTATS
+                       remove_stats.uflushes++;
 #endif
 #endif
-                                       kmem_free(kernel_map,
-                                                 (vm_offset_t)ptpmap->pm_stab,
-                                                 LUNA_STSIZE);
-                                       ptpmap->pm_stab = Segtabzero;
-                                       ptpmap->pm_stpa = Segtabzeropa;
-                                       ptpmap->pm_stchanged = TRUE;
-                                       /*
-                                        * XXX may have changed segment table
-                                        * pointer for current process so
-                                        * update now to reload hardware.
-                                        */
-                                       if (ptpmap == curproc->p_vmspace->vm_map.pmap)
-                                               PMAP_ACTIVATE(ptpmap,
-                                                       (struct pcb *)curproc->p_addr, 1);
-                               }
-                       }
-                       if (ptpmap == kernel_pmap)
-                               TBIAS();
-                       else
-                               TBIAU();
-                       pv->pv_flags &= ~PV_PTPAGE;
-                       ptpmap->pm_ptpages--;
                }
                }
-               /*
-                * Update saved attributes for managed page
-                */
-               pmap_attributes[pa_index(pa)] |= bits;
-               splx(s);
        }
        }
+#endif
 }
 
 /*
 }
 
 /*
@@ -789,6 +764,7 @@ pmap_page_protect(pa, prot)
                return;
 
        switch (prot) {
                return;
 
        switch (prot) {
+       case VM_PROT_READ|VM_PROT_WRITE:
        case VM_PROT_ALL:
                break;
        /* copy_on_write */
        case VM_PROT_ALL:
                break;
        /* copy_on_write */
@@ -806,8 +782,9 @@ pmap_page_protect(pa, prot)
                            pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
                                panic("pmap_page_protect: bad mapping");
 #endif
                            pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
                                panic("pmap_page_protect: bad mapping");
 #endif
-                       pmap_remove(pv->pv_pmap, pv->pv_va,
-                                   pv->pv_va + PAGE_SIZE);
+                       pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+                                           PT_ENTRY_NULL,
+                                           PRM_TFLUSH|PRM_CFLUSH);
                }
                splx(s);
                break;
                }
                splx(s);
                break;
@@ -821,22 +798,25 @@ pmap_page_protect(pa, prot)
 void
 pmap_protect(pmap, sva, eva, prot)
        register pmap_t pmap;
 void
 pmap_protect(pmap, sva, eva, prot)
        register pmap_t pmap;
-       vm_offset_t     sva, eva;
-       vm_prot_t       prot;
+       register vm_offset_t sva, eva;
+       vm_prot_t prot;
 {
 {
+       register vm_offset_t nssva;
        register pt_entry_t *pte;
        register pt_entry_t *pte;
-       register vm_offset_t va;
-       int lunaprot;
-       boolean_t firstpage = TRUE;
+       boolean_t firstpage, needtflush;
+       int isro;
 
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
                printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
 
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
                printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
-       protect_stats.calls++;
 #endif
 #endif
+
        if (pmap == NULL)
                return;
 
        if (pmap == NULL)
                return;
 
+#ifdef PMAPSTATS
+       protect_stats.calls++;
+#endif
        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                pmap_remove(pmap, sva, eva);
                return;
        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                pmap_remove(pmap, sva, eva);
                return;
@@ -844,74 +824,82 @@ pmap_protect(pmap, sva, eva, prot)
        if (prot & VM_PROT_WRITE)
                return;
 
        if (prot & VM_PROT_WRITE)
                return;
 
-       pte = pmap_pte(pmap, sva);
-       lunaprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
-       for (va = sva; va < eva; va += PAGE_SIZE) {
+       isro = pte_prot(pmap, prot);
+       needtflush = active_pmap(pmap);
+       firstpage = TRUE;
+       while (sva < eva) {
+               nssva = luna_trunc_seg(sva) + LUNA_SEG_SIZE;
+               if (nssva == 0 || nssva > eva)
+                       nssva = eva;
                /*
                /*
-                * Page table page is not allocated.
-                * Skip it, we don't want to force allocation
-                * of unnecessary PTE pages just to set the protection.
+                * If VA belongs to an unallocated segment,
+                * skip to the next segment boundary.
                 */
                 */
-               if (!pmap_ste_v(pmap, va)) {
-                       /* XXX: avoid address wrap around */
-                       if (va >= luna_trunc_seg((vm_offset_t)-1))
-                               break;
-                       va = luna_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
-#if defined(DYNPGSIZE)
-                       pte = pmap_pte(pmap, va) + lunapagesperpage;
-#else
-                       pte = pmap_pte(pmap, va) + 1;
-#endif
+               if (!pmap_ste_v(pmap, sva)) {
+                       sva = nssva;
                        continue;
                }
                /*
                        continue;
                }
                /*
-                * Page not valid.  Again, skip it.
-                * Should we do this?  Or set protection anyway?
+                * Change protection on mapping if it is valid and doesn't
+                * already have the correct protection.
                 */
                 */
-               if (!pmap_pte_v(pte)) {
-#if defined(DYNPGSIZE)
-                       pte += lunapagesperpage;
-#else
-                       pte++;
+               pte = pmap_pte(pmap, sva);
+               while (sva < nssva) {
+                       if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
+#ifdef HAVEVAC
+                               /*
+                                * Purge kernel side of VAC to ensure we
+                                * get the correct state of any hardware
+                                * maintained bits.
+                                *
+                                * XXX do we need to clear the VAC in
+                                * general to reflect the new protection?
+                                */
+                               if (firstpage && pmap_aliasmask)
+                                       DCIS();
 #endif
 #endif
-                       continue;
-               }
-#if defined(DYNPGSIZE)
-               {
-                       register int ix = 0;
-
-                       do {
+#if defined(LUNA2)
                                /*
                                /*
-                                * Clear caches as necessary if making RO.
-                                * XXX clear VAC?  Doesn't seem to be needed.
+                                * Clear caches if making RO (see section
+                                * "7.3 Cache Coherency" in the manual).
                                 */
                                 */
-#ifdef DEBUG
-                               protect_stats.pages++;
-                               if (lunaprot && pmap_pte_prot(pte))
+                               if (isro && mmutype == MMU_68040) {
+                                       vm_offset_t pa = pmap_pte_pa(pte);
+
+                                       DCFP(pa);
+                                       ICPP(pa);
+                               }
+#endif
+                               pmap_pte_set_prot(pte, isro);
+                               if (needtflush)
+                                       TBIS(sva);
+#ifdef PMAPSTATS
+                               protect_stats.changed++;
+#endif
+                               firstpage = FALSE;
+                       }
+#ifdef PMAPSTATS
+                       else if (pmap_pte_v(pte)) {
+                               if (isro)
                                        protect_stats.alreadyro++;
                                        protect_stats.alreadyro++;
-                               if (!lunaprot && !pmap_pte_prot(pte))
+                               else
                                        protect_stats.alreadyrw++;
                                        protect_stats.alreadyrw++;
+                       }
 #endif
 #endif
-                               pmap_pte_set_prot(pte++, lunaprot);
-                               TBIS(va + ix * LUNA_PAGE_SIZE);
-                       } while (++ix != lunapagesperpage);
+                       pte++;
+                       sva += PAGE_SIZE;
                }
                }
-#else
-               /*
-                * Clear caches as necessary if making RO.
-                * XXX clear VAC?  Doesn't seem to be needed.
-                */
-#ifdef DEBUG
-               protect_stats.pages++;
-               if (lunaprot && pmap_pte_prot(pte))
-                       protect_stats.alreadyro++;
-               if (!lunaprot && !pmap_pte_prot(pte))
-                       protect_stats.alreadyrw++;
-#endif
-               pmap_pte_set_prot(pte++, lunaprot);
-               TBIS(va);
-#endif
        }
        }
+#if defined(HAVEVAC) && defined(DEBUG)
+       if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) {
+               if (pmapvacflush & PVF_TOTAL)
+                       DCIA();
+               else if (pmap == kernel_pmap)
+                       DCIS();
+               else
+                       DCIU();
+       }
+#endif
 }
 
 /*
 }
 
 /*
@@ -948,7 +936,7 @@ pmap_enter(pmap, va, pa, prot, wired)
        if (pmap == NULL)
                return;
 
        if (pmap == NULL)
                return;
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
        if (pmap == kernel_pmap)
                enter_stats.kernel++;
        else
        if (pmap == kernel_pmap)
                enter_stats.kernel++;
        else
@@ -967,6 +955,7 @@ pmap_enter(pmap, va, pa, prot, wired)
        if (!pmap_ste_v(pmap, va))
                pmap_enter_ptpage(pmap, va);
 
        if (!pmap_ste_v(pmap, va))
                pmap_enter_ptpage(pmap, va);
 
+       pa = luna_trunc_page(pa);
        pte = pmap_pte(pmap, va);
        opa = pmap_pte_pa(pte);
 #ifdef DEBUG
        pte = pmap_pte(pmap, va);
        opa = pmap_pte_pa(pte);
 #ifdef DEBUG
@@ -978,7 +967,7 @@ pmap_enter(pmap, va, pa, prot, wired)
         * Mapping has not changed, must be protection or wiring change.
         */
        if (opa == pa) {
         * Mapping has not changed, must be protection or wiring change.
         */
        if (opa == pa) {
-#ifdef DEBUG
+#ifdef PMAPSTATS
                enter_stats.pwchange++;
 #endif
                /*
                enter_stats.pwchange++;
 #endif
                /*
@@ -987,7 +976,7 @@ pmap_enter(pmap, va, pa, prot, wired)
                 * resident as long as there are valid mappings in them.
                 * Hence, if a user page is wired, the PT page will be also.
                 */
                 * resident as long as there are valid mappings in them.
                 * Hence, if a user page is wired, the PT page will be also.
                 */
-               if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+               if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
 #ifdef DEBUG
                        if (pmapdebug & PDB_ENTER)
                                printf("enter: wiring change -> %x\n", wired);
 #ifdef DEBUG
                        if (pmapdebug & PDB_ENTER)
                                printf("enter: wiring change -> %x\n", wired);
@@ -996,10 +985,17 @@ pmap_enter(pmap, va, pa, prot, wired)
                                pmap->pm_stats.wired_count++;
                        else
                                pmap->pm_stats.wired_count--;
                                pmap->pm_stats.wired_count++;
                        else
                                pmap->pm_stats.wired_count--;
-#ifdef DEBUG
-                       enter_stats.wchange++;
+#ifdef PMAPSTATS
+                       if (pmap_pte_prot(pte) == pte_prot(pmap, prot))
+                               enter_stats.wchange++;
 #endif
                }
 #endif
                }
+#ifdef PMAPSTATS
+               else if (pmap_pte_prot(pte) != pte_prot(pmap, prot))
+                       enter_stats.pchange++;
+               else
+                       enter_stats.nochange++;
+#endif
                /*
                 * Retain cache inhibition status
                 */
                /*
                 * Retain cache inhibition status
                 */
@@ -1018,8 +1014,8 @@ pmap_enter(pmap, va, pa, prot, wired)
                if (pmapdebug & PDB_ENTER)
                        printf("enter: removing old mapping %x\n", va);
 #endif
                if (pmapdebug & PDB_ENTER)
                        printf("enter: removing old mapping %x\n", va);
 #endif
-               pmap_remove(pmap, va, va + PAGE_SIZE);
-#ifdef DEBUG
+               pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);
+#ifdef PMAPSTATS
                enter_stats.mchange++;
 #endif
        }
                enter_stats.mchange++;
 #endif
        }
@@ -1042,7 +1038,7 @@ pmap_enter(pmap, va, pa, prot, wired)
                register pv_entry_t pv, npv;
                int s;
 
                register pv_entry_t pv, npv;
                int s;
 
-#ifdef DEBUG
+#ifdef PMAPSTATS
                enter_stats.managed++;
 #endif
                pv = pa_to_pvh(pa);
                enter_stats.managed++;
 #endif
                pv = pa_to_pvh(pa);
@@ -1056,7 +1052,7 @@ pmap_enter(pmap, va, pa, prot, wired)
                 * No entries yet, use header as the first entry
                 */
                if (pv->pv_pmap == NULL) {
                 * No entries yet, use header as the first entry
                 */
                if (pv->pv_pmap == NULL) {
-#ifdef DEBUG
+#ifdef PMAPSTATS
                        enter_stats.firstpv++;
 #endif
                        pv->pv_va = va;
                        enter_stats.firstpv++;
 #endif
                        pv->pv_va = va;
@@ -1083,10 +1079,61 @@ pmap_enter(pmap, va, pa, prot, wired)
                        npv->pv_next = pv->pv_next;
                        npv->pv_ptste = NULL;
                        npv->pv_ptpmap = NULL;
                        npv->pv_next = pv->pv_next;
                        npv->pv_ptste = NULL;
                        npv->pv_ptpmap = NULL;
+                       npv->pv_flags = 0;
                        pv->pv_next = npv;
                        pv->pv_next = npv;
-#ifdef DEBUG
+#ifdef PMAPSTATS
                        if (!npv->pv_next)
                                enter_stats.secondpv++;
                        if (!npv->pv_next)
                                enter_stats.secondpv++;
+#endif
+#ifdef HAVEVAC
+                       /*
+                        * Since there is another logical mapping for the
+                        * same page we may need to cache-inhibit the
+                        * descriptors on those CPUs with external VACs.
+                        * We don't need to CI if:
+                        *
+                        * - No two mappings belong to the same user pmaps.
+                        *   Since the cache is flushed on context switches
+                        *   there is no problem between user processes.
+                        *
+                        * - Mappings within a single pmap are a certain
+                        *   magic distance apart.  VAs at these appropriate
+                        *   boundaries map to the same cache entries or
+                        *   otherwise don't conflict.
+                        *
+                        * To keep it simple, we only check for these special
+                        * cases if there are only two mappings, otherwise we
+                        * punt and always CI.
+                        *
+                        * Note that there are no aliasing problems with the
+                        * on-chip data-cache when the WA bit is set.
+                        */
+                       if (pmap_aliasmask) {
+                               if (pv->pv_flags & PV_CI) {
+#ifdef DEBUG
+                                       if (pmapdebug & PDB_CACHE)
+                                       printf("enter: pa %x already CI'ed\n",
+                                              pa);
+#endif
+                                       checkpv = cacheable = FALSE;
+                               } else if (npv->pv_next ||
+                                          ((pmap == pv->pv_pmap ||
+                                            pmap == kernel_pmap ||
+                                            pv->pv_pmap == kernel_pmap) &&
+                                           ((pv->pv_va & pmap_aliasmask) !=
+                                            (va & pmap_aliasmask)))) {
+#ifdef DEBUG
+                                       if (pmapdebug & PDB_CACHE)
+                                       printf("enter: pa %x CI'ing all\n",
+                                              pa);
+#endif
+                                       cacheable = FALSE;
+                                       pv->pv_flags |= PV_CI;
+#ifdef PMAPSTATS
+                                       enter_stats.ci++;
+#endif
+                               }
+                       }
 #endif
                }
                splx(s);
 #endif
                }
                splx(s);
@@ -1097,7 +1144,7 @@ pmap_enter(pmap, va, pa, prot, wired)
         */
        else if (pmap_initialized) {
                checkpv = cacheable = FALSE;
         */
        else if (pmap_initialized) {
                checkpv = cacheable = FALSE;
-#ifdef DEBUG
+#ifdef PMAPSTATS
                enter_stats.unmanaged++;
 #endif
        }
                enter_stats.unmanaged++;
 #endif
        }
@@ -1110,36 +1157,48 @@ pmap_enter(pmap, va, pa, prot, wired)
                pmap->pm_stats.wired_count++;
 
 validate:
                pmap->pm_stats.wired_count++;
 
 validate:
+#ifdef HAVEVAC
+       /*
+        * Purge kernel side of VAC to ensure we get correct state
+        * of HW bits so we don't clobber them.
+        */
+       if (pmap_aliasmask)
+               DCIS();
+#endif
        /*
        /*
-        * Now validate mapping with desired protection/wiring.
-        * Assume uniform modified and referenced status for all
-        * LUNA pages in a MACH page.
+        * Build the new PTE.
         */
         */
-       npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
-       npte |= (*(int *)pte & (PG_M|PG_U));
+       npte = pa | pte_prot(pmap, prot) | (*(int *)pte & (PG_M|PG_U)) | PG_V;
        if (wired)
                npte |= PG_W;
        if (!checkpv && !cacheable)
                npte |= PG_CI;
        if (wired)
                npte |= PG_W;
        if (!checkpv && !cacheable)
                npte |= PG_CI;
+#if defined(LUNA2)
+       if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
+#ifdef DEBUG
+               if (dowriteback && (dokwriteback || pmap != kernel_pmap))
+#endif
+               npte |= PG_CCB;
+#endif
 #ifdef DEBUG
        if (pmapdebug & PDB_ENTER)
                printf("enter: new pte value %x\n", npte);
 #endif
 #ifdef DEBUG
        if (pmapdebug & PDB_ENTER)
                printf("enter: new pte value %x\n", npte);
 #endif
-#if defined(DYNPGSIZE)
-       {
-               register int ix = 0;
-
-               do {
-                       *(int *)pte++ = npte;
-                       TBIS(va);
-                       npte += LUNA_PAGE_SIZE;
-                       va += LUNA_PAGE_SIZE;
-               } while (++ix != lunapagesperpage);
+       /*
+        * Remember if this was a wiring-only change.
+        * If so, we need not flush the TLB and caches.
+        */
+       wired = ((*(int *)pte ^ npte) == PG_W);
+#if defined(LUNA2)
+       if (mmutype == MMU_68040 && !wired) {
+               DCFP(pa);
+               ICPP(pa);
        }
        }
-#else
-       *(int *)pte = npte;
-       TBIS(va);
 #endif
 #endif
+       *(int *)pte = npte;
+       if (!wired && active_pmap(pmap))
+               TBIS(va);
+#ifdef HAVEVAC
        /*
         * The following is executed if we are entering a second
         * (or greater) mapping for a physical page and the mappings
        /*
         * The following is executed if we are entering a second
         * (or greater) mapping for a physical page and the mappings
@@ -1149,7 +1208,8 @@ validate:
         */
        if (checkpv && !cacheable) {
                pmap_changebit(pa, PG_CI, TRUE);
         */
        if (checkpv && !cacheable) {
                pmap_changebit(pa, PG_CI, TRUE);
-#ifdef DEBUG
+               DCIA();
+#ifdef PMAPSTATS
                enter_stats.flushes++;
 #endif
 #ifdef DEBUG
                enter_stats.flushes++;
 #endif
 #ifdef DEBUG
@@ -1158,13 +1218,20 @@ validate:
                        pmap_pvdump(pa);
 #endif
        }
                        pmap_pvdump(pa);
 #endif
        }
-#ifdef DEEBUG
-       if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
-#if defined(DYNPGSIZE)
-               va -= PAGE_SIZE;
+#ifdef DEBUG
+       else if (pmapvacflush & PVF_ENTER) {
+               if (pmapvacflush & PVF_TOTAL)
+                       DCIA();
+               else if (pmap == kernel_pmap)
+                       DCIS();
+               else
+                       DCIU();
+       }
+#endif
 #endif
 #endif
+#ifdef DEBUG
+       if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
                pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
                pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
-       }
 #endif
 }
 
 #endif
 }
 
@@ -1211,27 +1278,18 @@ pmap_change_wiring(pmap, va, wired)
                        printf("pmap_change_wiring: invalid PTE for %x\n", va);
        }
 #endif
                        printf("pmap_change_wiring: invalid PTE for %x\n", va);
        }
 #endif
-       if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+       /*
+        * If wiring actually changed (always?) set the wire bit and
+        * update the wire count.  Note that wiring is not a hardware
+        * characteristic so there is no need to invalidate the TLB.
+        */
+       if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
+               pmap_pte_set_w(pte, wired);
                if (wired)
                        pmap->pm_stats.wired_count++;
                else
                        pmap->pm_stats.wired_count--;
        }
                if (wired)
                        pmap->pm_stats.wired_count++;
                else
                        pmap->pm_stats.wired_count--;
        }
-       /*
-        * Wiring is not a hardware characteristic so there is no need
-        * to invalidate TLB.
-        */
-#if defined(DYNPGSIZE)
-       {
-               register int ix = 0;
-
-               do {
-                       pmap_pte_set_w(pte++, wired);
-               } while (++ix != lunapagesperpage);
-       }
-#else
-       pmap_pte_set_w(pte, wired);
-#endif
 }
 
 /*
 }
 
 /*
@@ -1333,6 +1391,8 @@ pmap_collect(pmap)
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_collect(%x)\n", pmap);
 #ifdef DEBUG
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_collect(%x)\n", pmap);
+#endif
+#ifdef PMAPSTATS
        kpt_stats.collectscans++;
 #endif
        s = splimp();
        kpt_stats.collectscans++;
 #endif
        s = splimp();
@@ -1380,11 +1440,12 @@ ok:
 #endif
                /*
                 * If all entries were invalid we can remove the page.
 #endif
                /*
                 * If all entries were invalid we can remove the page.
-                * We call pmap_remove to take care of invalidating ST
-                * and Sysptmap entries.
+                * We call pmap_remove_entry to take care of invalidating
+                * ST and Sysptmap entries.
                 */
                kpa = pmap_extract(pmap, pv->pv_va);
                 */
                kpa = pmap_extract(pmap, pv->pv_va);
-               pmap_remove(pmap, pv->pv_va, pv->pv_va + LUNA_PAGE_SIZE);
+               pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
+                                   PRM_TFLUSH|PRM_CFLUSH);
                /*
                 * Use the physical address to locate the original
                 * (kmem_alloc assigned) address for the page and put
                /*
                 * Use the physical address to locate the original
                 * (kmem_alloc assigned) address for the page and put
@@ -1405,9 +1466,11 @@ ok:
                *pkpt = kpt->kpt_next;
                kpt->kpt_next = kpt_free_list;
                kpt_free_list = kpt;
                *pkpt = kpt->kpt_next;
                kpt->kpt_next = kpt_free_list;
                kpt_free_list = kpt;
-#ifdef DEBUG
+#ifdef PMAPSTATS
                kpt_stats.kptinuse--;
                kpt_stats.collectpages++;
                kpt_stats.kptinuse--;
                kpt_stats.collectpages++;
+#endif
+#ifdef DEBUG
                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                        pmapdebug = opmapdebug;
 
                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                        pmapdebug = opmapdebug;
 
@@ -1459,23 +1522,10 @@ pmap_zero_page(phys)
                printf("pmap_zero_page(%x)\n", phys);
 #endif
        kva = (vm_offset_t) CADDR1;
                printf("pmap_zero_page(%x)\n", phys);
 #endif
        kva = (vm_offset_t) CADDR1;
-#if defined(DYNPGSIZE)
-       {
-               register int ix = 0;
-
-               do {
-                       pmap_enter(kernel_pmap, kva, phys,
-                                  VM_PROT_READ|VM_PROT_WRITE, TRUE);
-                       bzero((caddr_t)kva, LUNA_PAGE_SIZE);
-                       pmap_remove(kernel_pmap, kva, kva+LUNA_PAGE_SIZE);
-                       phys += LUNA_PAGE_SIZE;
-               } while (++ix != lunapagesperpage);
-       }
-#else
        pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
        bzero((caddr_t)kva, LUNA_PAGE_SIZE);
        pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
        bzero((caddr_t)kva, LUNA_PAGE_SIZE);
-       pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
-#endif
+       pmap_remove_mapping(kernel_pmap, kva, PT_ENTRY_NULL,
+                           PRM_TFLUSH|PRM_CFLUSH);
 }
 
 /*
 }
 
 /*
@@ -1504,28 +1554,11 @@ pmap_copy_page(src, dst)
 #endif
        skva = (vm_offset_t) CADDR1;
        dkva = (vm_offset_t) CADDR2;
 #endif
        skva = (vm_offset_t) CADDR1;
        dkva = (vm_offset_t) CADDR2;
-#if defined(DYNPGSIZE)
-       {
-               register int ix = 0;
-
-               do {
-                       pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
-                       pmap_enter(kernel_pmap, dkva, dst,
-                                  VM_PROT_READ|VM_PROT_WRITE, TRUE);
-                       bcopy((caddr_t)skva, (caddr_t)dkva, PAGE_SIZE);
-                       /* CADDR1 and CADDR2 are virtually contiguous */
-                       pmap_remove(kernel_pmap, skva, skva+2*LUNA_PAGE_SIZE);
-                       src += LUNA_PAGE_SIZE;
-                       dst += LUNA_PAGE_SIZE;
-               } while (++ix != lunapagesperpage);
-       }
-#else
        pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
        pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
        bcopy((caddr_t)skva, (caddr_t)dkva, PAGE_SIZE);
        /* CADDR1 and CADDR2 are virtually contiguous */
        pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
        pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
        pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
        bcopy((caddr_t)skva, (caddr_t)dkva, PAGE_SIZE);
        /* CADDR1 and CADDR2 are virtually contiguous */
        pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
-#endif
 }
 
 /*
 }
 
 /*
@@ -1683,6 +1716,258 @@ pmap_phys_address(ppn)
  * Miscellaneous support routines follow
  */
 
  * Miscellaneous support routines follow
  */
 
+/*
+ * Invalidate a single page denoted by pmap/va.
+ * If (pte != NULL), it is the already computed PTE for the page.
+ * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
+ * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
+ */
+/* static */
+void
+pmap_remove_mapping(pmap, va, pte, flags)
+       register pmap_t pmap;
+       register vm_offset_t va;
+       register pt_entry_t *pte;
+       int flags;
+{
+       register vm_offset_t pa;
+       register pv_entry_t pv, npv;
+       pmap_t ptpmap;
+       int *ste, s, bits;
+#ifdef DEBUG
+       pt_entry_t opte;
+
+       if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+               printf("pmap_remove_mapping(%x, %x, %x, %x)\n",
+                      pmap, va, pte, flags);
+#endif
+
+       /*
+        * PTE not provided, compute it from pmap and va.
+        */
+       if (pte == PT_ENTRY_NULL) {
+               pte = pmap_pte(pmap, va);
+               if (*(int *)pte == PG_NV)
+                       return;
+       }
+#ifdef HAVEVAC
+       if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
+               /*
+                * Purge kernel side of VAC to ensure we get the correct
+                * state of any hardware maintained bits.
+                */
+               DCIS();
+#ifdef PMAPSTATS
+               remove_stats.sflushes++;
+#endif
+               /*
+                * If this is a non-CI user mapping for the current process,
+                * flush the VAC.  Note that the kernel side was flushed
+                * above so we don't worry about non-CI kernel mappings.
+                */
+               if (pmap == curproc->p_vmspace->vm_map.pmap &&
+                   !pmap_pte_ci(pte)) {
+                       DCIU();
+#ifdef PMAPSTATS
+                       remove_stats.uflushes++;
+#endif
+               }
+       }
+#endif
+       pa = pmap_pte_pa(pte);
+#ifdef DEBUG
+       opte = *pte;
+#endif
+#ifdef PMAPSTATS
+       remove_stats.removes++;
+#endif
+       /*
+        * Update statistics
+        */
+       if (pmap_pte_w(pte))
+               pmap->pm_stats.wired_count--;
+       pmap->pm_stats.resident_count--;
+
+       /*
+        * Invalidate the PTE after saving the reference modify info.
+        */
+#ifdef DEBUG
+       if (pmapdebug & PDB_REMOVE)
+               printf("remove: invalidating pte at %x\n", pte);
+#endif
+       bits = *(int *)pte & (PG_U|PG_M);
+       *(int *)pte = PG_NV;
+       if ((flags & PRM_TFLUSH) && active_pmap(pmap))
+               TBIS(va);
+       /*
+        * For user mappings decrement the wiring count on
+        * the PT page.  We do this after the PTE has been
+        * invalidated because vm_map_pageable winds up in
+        * pmap_pageable which clears the modify bit for the
+        * PT page.
+        */
+       if (pmap != kernel_pmap) {
+               vm_map_pageable(pt_map, trunc_page(pte),
+                               round_page(pte+1), TRUE);
+#ifdef DEBUG
+               if (pmapdebug & PDB_WIRING)
+                       pmap_check_wiring("remove", trunc_page(pte));
+#endif
+       }
+       /*
+        * If this isn't a managed page, we are all done.
+        */
+       if (pa < vm_first_phys || pa >= vm_last_phys)
+               return;
+       /*
+        * Otherwise remove it from the PV table
+        * (raise IPL since we may be called at interrupt time).
+        */
+       pv = pa_to_pvh(pa);
+       ste = (int *)0;
+       s = splimp();
+       /*
+        * If it is the first entry on the list, it is actually
+        * in the header and we must copy the following entry up
+        * to the header.  Otherwise we must search the list for
+        * the entry.  In either case we free the now unused entry.
+        */
+       if (pmap == pv->pv_pmap && va == pv->pv_va) {
+               ste = (int *)pv->pv_ptste;
+               ptpmap = pv->pv_ptpmap;
+               npv = pv->pv_next;
+               if (npv) {
+                       npv->pv_flags = pv->pv_flags;
+                       *pv = *npv;
+                       free((caddr_t)npv, M_VMPVENT);
+               } else
+                       pv->pv_pmap = NULL;
+#ifdef PMAPSTATS
+               remove_stats.pvfirst++;
+#endif
+       } else {
+               for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+#ifdef PMAPSTATS
+                       remove_stats.pvsearch++;
+#endif
+                       if (pmap == npv->pv_pmap && va == npv->pv_va)
+                               break;
+                       pv = npv;
+               }
+#ifdef DEBUG
+               if (npv == NULL)
+                       panic("pmap_remove: PA not in pv_tab");
+#endif
+               ste = (int *)npv->pv_ptste;
+               ptpmap = npv->pv_ptpmap;
+               pv->pv_next = npv->pv_next;
+               free((caddr_t)npv, M_VMPVENT);
+               pv = pa_to_pvh(pa);
+       }
+#ifdef HAVEVAC
+       /*
+        * If only one mapping left we no longer need to cache inhibit
+        */
+       if (pmap_aliasmask &&
+           pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
+#ifdef DEBUG
+               if (pmapdebug & PDB_CACHE)
+                       printf("remove: clearing CI for pa %x\n", pa);
+#endif
+               pv->pv_flags &= ~PV_CI;
+               pmap_changebit(pa, PG_CI, FALSE);
+#ifdef DEBUG
+               if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
+                   (PDB_CACHE|PDB_PVDUMP))
+                       pmap_pvdump(pa);
+#endif
+       }
+#endif
+       /*
+        * If this was a PT page we must also remove the
+        * mapping from the associated segment table.
+        */
+       if (ste) {
+#ifdef PMAPSTATS
+               remove_stats.ptinvalid++;
+#endif
+#ifdef DEBUG
+               if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+                       printf("remove: ste was %x@%x pte was %x@%x\n",
+                              *ste, ste, *(int *)&opte, pmap_pte(pmap, va));
+#endif
+#if defined(LUNA2)
+               if (mmutype == MMU_68040) {
+                       int *este = &ste[NPTEPG/SG4_LEV3SIZE];
+
+                       while (ste < este)
+                               *ste++ = SG_NV;
+#ifdef DEBUG
+                       ste -= NPTEPG/SG4_LEV3SIZE;
+#endif
+               } else
+#endif
+               *ste = SG_NV;
+               /*
+                * If it was a user PT page, we decrement the
+                * reference count on the segment table as well,
+                * freeing it if it is now empty.
+                */
+               if (ptpmap != kernel_pmap) {
+#ifdef DEBUG
+                       if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
+                               printf("remove: stab %x, refcnt %d\n",
+                                      ptpmap->pm_stab, ptpmap->pm_sref - 1);
+                       if ((pmapdebug & PDB_PARANOIA) &&
+                           ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
+                               panic("remove: bogus ste");
+#endif
+                       if (--(ptpmap->pm_sref) == 0) {
+#ifdef DEBUG
+                               if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
+                                       printf("remove: free stab %x\n",
+                                              ptpmap->pm_stab);
+#endif
+                               kmem_free(kernel_map,
+                                         (vm_offset_t)ptpmap->pm_stab,
+                                         LUNA_STSIZE);
+                               ptpmap->pm_stab = Segtabzero;
+                               ptpmap->pm_stpa = Segtabzeropa;
+#if defined(LUNA2)
+                               if (mmutype == MMU_68040)
+                                       ptpmap->pm_stfree = protostfree;
+#endif
+                               ptpmap->pm_stchanged = TRUE;
+                               /*
+                                * XXX may have changed segment table
+                                * pointer for current process so
+                                * update now to reload hardware.
+                                */
+                               if (ptpmap == curproc->p_vmspace->vm_map.pmap)
+                                       PMAP_ACTIVATE(ptpmap,
+                                           (struct pcb *)curproc->p_addr, 1);
+                       }
+               }
+#if 0
+               /*
+                * XXX this should be unnecessary as we have been
+                * flushing individual mappings as we go.
+                */
+               if (ptpmap == kernel_pmap)
+                       TBIAS();
+               else
+                       TBIAU();
+#endif
+               pv->pv_flags &= ~PV_PTPAGE;
+               ptpmap->pm_ptpages--;
+       }
+       /*
+        * Update saved attributes for managed page
+        */
+       pmap_attributes[pa_index(pa)] |= bits;
+       splx(s);
+}
+
 /* static */
 boolean_t
 pmap_testbit(pa, bit)
 /* static */
 boolean_t
 pmap_testbit(pa, bit)
@@ -1705,6 +1990,13 @@ pmap_testbit(pa, bit)
                splx(s);
                return(TRUE);
        }
                splx(s);
                return(TRUE);
        }
+#ifdef HAVEVAC
+       /*
+        * Flush VAC to get correct state of any hardware maintained bits.
+        */
+       if (pmap_aliasmask && (bit & (PG_U|PG_M)))
+               DCIS();
+#endif
        /*
         * Not found, check current mappings returning
         * immediately if found.
        /*
         * Not found, check current mappings returning
         * immediately if found.
@@ -1712,23 +2004,10 @@ pmap_testbit(pa, bit)
        if (pv->pv_pmap != NULL) {
                for (; pv; pv = pv->pv_next) {
                        pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
        if (pv->pv_pmap != NULL) {
                for (; pv; pv = pv->pv_next) {
                        pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
-#if defined(DYNPGSIZE)
-                       {
-                               register int ix = 0;
-
-                               do {
-                                       if (*pte++ & bit) {
-                                               splx(s);
-                                               return(TRUE);
-                                       }
-                               } while (++ix != lunapagesperpage);
-                       }
-#else
                        if (*pte & bit) {
                                splx(s);
                                return(TRUE);
                        }
                        if (*pte & bit) {
                                splx(s);
                                return(TRUE);
                        }
-#endif
                }
        }
        splx(s);
                }
        }
        splx(s);
@@ -1736,6 +2015,7 @@ pmap_testbit(pa, bit)
 }
 
 /* static */
 }
 
 /* static */
+void
 pmap_changebit(pa, bit, setem)
        register vm_offset_t pa;
        int bit;
 pmap_changebit(pa, bit, setem)
        register vm_offset_t pa;
        int bit;
@@ -1746,6 +2026,9 @@ pmap_changebit(pa, bit, setem)
        vm_offset_t va;
        int s;
        boolean_t firstpage = TRUE;
        vm_offset_t va;
        int s;
        boolean_t firstpage = TRUE;
+#ifdef PMAPSTATS
+       struct chgstats *chgp;
+#endif
 
 #ifdef DEBUG
        if (pmapdebug & PDB_BITS)
 
 #ifdef DEBUG
        if (pmapdebug & PDB_BITS)
@@ -1755,6 +2038,13 @@ pmap_changebit(pa, bit, setem)
        if (pa < vm_first_phys || pa >= vm_last_phys)
                return;
 
        if (pa < vm_first_phys || pa >= vm_last_phys)
                return;
 
+#ifdef PMAPSTATS
+       chgp = &changebit_stats[(bit>>2)-1];
+       if (setem)
+               chgp->setcalls++;
+       else
+               chgp->clrcalls++;
+#endif
        pv = pa_to_pvh(pa);
        s = splimp();
        /*
        pv = pa_to_pvh(pa);
        s = splimp();
        /*
@@ -1787,34 +2077,64 @@ pmap_changebit(pa, bit, setem)
                        }
 
                        pte = (int *) pmap_pte(pv->pv_pmap, va);
                        }
 
                        pte = (int *) pmap_pte(pv->pv_pmap, va);
-#if defined(DYNPGSIZE)
-                       {
-                               register int ix = 0;
-
-                               do {
-                                       if (setem)
-                                               npte = *pte | bit;
-                                       else
-                                               npte = *pte & ~bit;
-                                       if (*pte != npte) {
-                                               *pte = npte;
-                                               TBIS(va);
-                                       }
-                                       va += LUNA_PAGE_SIZE;
-                                       pte++;
-                               } while (++ix != lunapagesperpage);
+#ifdef HAVEVAC
+                       /*
+                        * Flush VAC to ensure we get correct state of HW bits
+                        * so we don't clobber them.
+                        */
+                       if (firstpage && pmap_aliasmask) {
+                               firstpage = FALSE;
+                               DCIS();
                        }
                        }
-#else
+#endif
                        if (setem)
                                npte = *pte | bit;
                        else
                                npte = *pte & ~bit;
                        if (*pte != npte) {
                        if (setem)
                                npte = *pte | bit;
                        else
                                npte = *pte & ~bit;
                        if (*pte != npte) {
+#if defined(LUNA2)
+                               /*
+                                * If we are changing caching status or
+                                * protection make sure the caches are
+                                * flushed (but only once).
+                                */
+                               if (firstpage && mmutype == MMU_68040 &&
+                                   (bit == PG_RO && setem ||
+                                    (bit & PG_CMASK))) {
+                                       firstpage = FALSE;
+                                       DCFP(pa);
+                                       ICPP(pa);
+                               }
+#endif
                                *pte = npte;
                                *pte = npte;
-                               TBIS(va);
+                               if (active_pmap(pv->pv_pmap))
+                                       TBIS(va);
+#ifdef PMAPSTATS
+                               if (setem)
+                                       chgp->sethits++;
+                               else
+                                       chgp->clrhits++;
+#endif
+                       }
+#ifdef PMAPSTATS
+                       else {
+                               if (setem)
+                                       chgp->setmiss++;
+                               else
+                                       chgp->clrmiss++;
                        }
 #endif
                }
                        }
 #endif
                }
+#if defined(HAVEVAC) && defined(DEBUG)
+               if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
+                       if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
+                               DCIA();
+                       else if (toflush == 2)
+                               DCIS();
+                       else
+                               DCIU();
+               }
+#endif
        }
        splx(s);
 }
        }
        splx(s);
 }
@@ -1833,6 +2153,8 @@ pmap_enter_ptpage(pmap, va)
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
                printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
                printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
+#endif
+#ifdef PMAPSTATS
        enter_stats.ptpneeded++;
 #endif
        /*
        enter_stats.ptpneeded++;
 #endif
        /*
@@ -1847,6 +2169,15 @@ pmap_enter_ptpage(pmap, va)
                        kmem_alloc(kernel_map, LUNA_STSIZE);
                pmap->pm_stpa = (st_entry_t *)
                        pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
                        kmem_alloc(kernel_map, LUNA_STSIZE);
                pmap->pm_stpa = (st_entry_t *)
                        pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
+#if defined(LUNA2)
+               if (mmutype == MMU_68040) {
+#ifdef DEBUG
+                       if (dowriteback && dokwriteback)
+#endif
+                       pmap_changebit((vm_offset_t)pmap->pm_stab, PG_CCB, 0);
+                       pmap->pm_stfree = protostfree;
+               }
+#endif
                pmap->pm_stchanged = TRUE;
                /*
                 * XXX may have changed segment table pointer for current
                pmap->pm_stchanged = TRUE;
                /*
                 * XXX may have changed segment table pointer for current
@@ -1862,6 +2193,45 @@ pmap_enter_ptpage(pmap, va)
        }
 
        ste = pmap_ste(pmap, va);
        }
 
        ste = pmap_ste(pmap, va);
+#if defined(LUNA2)
+       /*
+        * Allocate level 2 descriptor block if necessary
+        */
+       if (mmutype == MMU_68040) {
+               if (!ste->sg_v) {
+                       int ix;
+                       caddr_t addr;
+                       
+                       ix = bmtol2(pmap->pm_stfree);
+                       if (ix == -1)
+                               panic("enter: out of address space"); /* XXX */
+                       pmap->pm_stfree &= ~l2tobm(ix);
+                       addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
+                       bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
+                       addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
+                       *(int *)ste = (u_int)addr | SG_RW | SG_U | SG_V;
+#ifdef DEBUG
+                       if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+                               printf("enter: alloc ste2 %d(%x)\n", ix, addr);
+#endif
+               }
+               ste = pmap_ste2(pmap, va);
+               /*
+                * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
+                * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
+                * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
+                * PT page--the unit of allocation.  We set `ste' to point
+                * to the first entry of that chunk which is validated in its
+                * entirety below.
+                */
+               ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
+#ifdef DEBUG
+               if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+                       printf("enter: ste2 %x (%x)\n",
+                              pmap_ste2(pmap, va), ste);
+#endif
+       }
+#endif
        va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
 
        /*
        va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
 
        /*
@@ -1886,7 +2256,7 @@ pmap_enter_ptpage(pmap, va)
                        if ((kpt = kpt_free_list) == (struct kpt_page *)0)
                                panic("pmap_enter_ptpage: can't get KPT page");
                }
                        if ((kpt = kpt_free_list) == (struct kpt_page *)0)
                                panic("pmap_enter_ptpage: can't get KPT page");
                }
-#ifdef DEBUG
+#ifdef PMAPSTATS
                if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
                        kpt_stats.kptmaxuse = kpt_stats.kptinuse;
 #endif
                if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
                        kpt_stats.kptmaxuse = kpt_stats.kptinuse;
 #endif
@@ -1925,10 +2295,29 @@ pmap_enter_ptpage(pmap, va)
                        panic("pmap_enter: vm_fault failed");
 #endif
                ptpa = pmap_extract(kernel_pmap, va);
                        panic("pmap_enter: vm_fault failed");
 #endif
                ptpa = pmap_extract(kernel_pmap, va);
-#ifdef notdef
-               PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE;
+#ifdef DEBUG
+               PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
+#endif
+       }
+#if defined(LUNA2)
+       /*
+        * Turn off copyback caching of page table pages,
+        * could get ugly otherwise.
+        */
+#ifdef DEBUG
+       if (dowriteback && dokwriteback)
+#endif
+       if (mmutype == MMU_68040) {
+               int *pte = (int *)pmap_pte(kernel_pmap, va);
+#ifdef DEBUG
+               if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
+                       printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
+                              pmap == kernel_pmap ? "Kernel" : "User",
+                              va, ptpa, pte, *pte);
 #endif
 #endif
+               pmap_changebit(ptpa, PG_CCB, 0);
        }
        }
+#endif
        /*
         * Locate the PV entry in the kernel for this PT page and
         * record the STE address.  This is so that we can invalidate
        /*
         * Locate the PV entry in the kernel for this PT page and
         * record the STE address.  This is so that we can invalidate
@@ -1962,6 +2351,16 @@ pmap_enter_ptpage(pmap, va)
         * it would be difficult to identify ST pages in pmap_pageable to
         * release them.  We also avoid the overhead of vm_map_pageable.
         */
         * it would be difficult to identify ST pages in pmap_pageable to
         * release them.  We also avoid the overhead of vm_map_pageable.
         */
+#if defined(LUNA2)
+       if (mmutype == MMU_68040) {
+               st_entry_t *este;
+
+               for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
+                       *(int *)ste = ptpa | SG_U | SG_RW | SG_V;
+                       ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
+               }
+       } else
+#endif
        *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
        if (pmap != kernel_pmap) {
                pmap->pm_sref++;
        *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
        if (pmap != kernel_pmap) {
                pmap->pm_sref++;
@@ -1971,6 +2370,7 @@ pmap_enter_ptpage(pmap, va)
                               pmap->pm_stab, pmap->pm_sref);
 #endif
        }
                               pmap->pm_stab, pmap->pm_sref);
 #endif
        }
+#if 0
        /*
         * Flush stale TLB info.
         */
        /*
         * Flush stale TLB info.
         */
@@ -1978,11 +2378,14 @@ pmap_enter_ptpage(pmap, va)
                TBIAS();
        else
                TBIAU();
                TBIAS();
        else
                TBIAU();
+#endif
        pmap->pm_ptpages++;
        splx(s);
 }
 
 #ifdef DEBUG
        pmap->pm_ptpages++;
        splx(s);
 }
 
 #ifdef DEBUG
+/* static */
+void
 pmap_pvdump(pa)
        vm_offset_t pa;
 {
 pmap_pvdump(pa)
        vm_offset_t pa;
 {
@@ -1996,6 +2399,8 @@ pmap_pvdump(pa)
        printf("\n");
 }
 
        printf("\n");
 }
 
+/* static */
+void
 pmap_check_wiring(str, va)
        char *str;
        vm_offset_t va;
 pmap_check_wiring(str, va)
        char *str;
        vm_offset_t va;
index 7ec16b7..b87af4a 100644 (file)
@@ -9,29 +9,21 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     OMRON: $Id: pmap_bootstrap.c,v 1.2 92/06/14 18:11:27 moti Exp $
+ * from: hp300/hp300/pmap_bootstrap.c  7.4 (Berkeley) 12/27/92
  *
  *
- * from: hp300/hp300/pmap_bootstrap.c  7.1 (Berkeley) 6/5/92
- *
- *     @(#)pmap_bootstrap.c    7.2 (Berkeley) %G%
+ *     @(#)pmap_bootstrap.c    7.3 (Berkeley) %G%
  */
 
 #include <sys/param.h>
  */
 
 #include <sys/param.h>
+#include <sys/msgbuf.h>
 #include <luna68k/luna68k/pte.h>
 #include <machine/vmparam.h>
 #include <machine/cpu.h>
 
 #include <vm/vm.h>
 
 #include <luna68k/luna68k/pte.h>
 #include <machine/vmparam.h>
 #include <machine/cpu.h>
 
 #include <vm/vm.h>
 
-/*
- * Allocate various and sundry SYSMAPs used in the days of old VM
- * and not yet converted.  XXX.
- */
-#define BSDVM_COMPAT   1
-
 extern char *etext;
 extern int Sysptsize;
 extern char *etext;
 extern int Sysptsize;
-
 extern char *proc0paddr;
 extern struct ste *Sysseg;
 extern struct pte *Sysptmap, *Sysmap;
 extern char *proc0paddr;
 extern struct ste *Sysseg;
 extern struct pte *Sysptmap, *Sysmap;
@@ -41,43 +33,18 @@ extern int maxmem, physmem;
 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end;
 extern vm_size_t mem_size;
 extern int protection_codes[];
 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end;
 extern vm_size_t mem_size;
 extern int protection_codes[];
-#if defined(DYNPGSIZE)
-extern int lunapagesperpage;
-#endif
-
-#if BSDVM_COMPAT
-#include <sys/msgbuf.h>
 
 /*
 
 /*
- * All those kernel PT submaps that BSD is so fond of
+ * Special purpose kernel virtual addresses, used for mapping
+ * physical pages for a variety of temporary or permanent purposes:
+ *
+ *     CADDR1, CADDR2: pmap zero/copy operations
+ *     vmmap:          /dev/mem, crash dumps, parity error checking
+ *     ledbase:        SPU LEDs
+ *     msgbufp:        kernel message buffer
  */
  */
-struct pte     *CMAP1, *CMAP2, *mmap;
-caddr_t                CADDR1, CADDR2, vmmap;
-struct pte     *msgbufmap;
+caddr_t                CADDR1, CADDR2, vmmap, ledbase;
 struct msgbuf  *msgbufp;
 struct msgbuf  *msgbufp;
-#endif
-
-/* 
- * LUNA H/W information.
- */
-struct physmap io_physmap[] =
-{
-       {0x40000000,0x00100000,1},      /* debugger */
-       {0x41000000,0x00020000,1},      /* PROM */
-       {0x45000000,0x00000800,0},      /* calendar clock */
-       {0x49000000,0x00000004,0},      /* pio-0 */
-       {0x4D000000,0x00000004,0},      /* pio-1 */
-       {0x51000000,0x00000008,0},      /* sio */
-       {0x61000000,0x00000001,0},      /* TAS register */
-       {0x63000000,0x00000001,0},      /* SYSINT flag */
-       {0x6B000000,0x00000001,0},      /* internal FPP enable/disable */
-       {0x6F000000,0x00000001,0},      /* external FPP enable/disable */
-       {0x71000000,0x00020000,0},      /* 3 port RAM */
-       {0,0,0}                         /* terminate */
-};
-#define        IO_DBG_OFF      0               /* debugger offset in io_physmap[] */
-#define        IOPTPAGE        ((sizeof(io_physmap)/sizeof(struct physmap))-1)
-int    ioptpage = IOPTPAGE;            /* for locore */
 
 /*
  * Bootstrap the VM system.
 
 /*
  * Bootstrap the VM system.
@@ -95,7 +62,7 @@ pmap_bootstrap(nextpa, firstpa)
        vm_offset_t nextpa;
        register vm_offset_t firstpa;
 {
        vm_offset_t nextpa;
        register vm_offset_t firstpa;
 {
-       vm_offset_t kstpa, kptpa, iopa, kptmpa, ukptpa, p0upa;
+       vm_offset_t kstpa, kptpa, kptmpa, lkptpa, p0upa;
        u_int nptpages, kstsize;
        register u_int protoste, protopte, *ste, *pte, *epte;
 
        u_int nptpages, kstsize;
        register u_int protoste, protopte, *ste, *pte, *epte;
 
@@ -108,20 +75,22 @@ pmap_bootstrap(nextpa, firstpa)
         *      kptpa           statically allocated
         *                      kernel PT pages         Sysptsize+ pages
         *
         *      kptpa           statically allocated
         *                      kernel PT pages         Sysptsize+ pages
         *
-        *      kptmpa          kernel PT map           1 page
-        *
-        *      ukptpa          Uarea kernel PT page    1 page
+        * [ Sysptsize is the number of pages of PT, hence we need to
+        *   round the total to a page boundary at the end. ]
         *
         *
-        *      iopa            IO and debbuger space
-        *                      PT pages                IOPTPAGE pages
+        *      kptmpa          kernel PT map           1 page
         *
         *
+        *      lkptpa          last kernel PT page     1 page
         *
         *      p0upa           proc 0 u-area           UPAGES pages
         *
         * The KVA corresponding to any of these PAs is:
         *      (PA - firstpa + KERNBASE).
         */
         *
         *      p0upa           proc 0 u-area           UPAGES pages
         *
         * The KVA corresponding to any of these PAs is:
         *      (PA - firstpa + KERNBASE).
         */
-       kstsize = 1;
+       if (mmutype == MMU_68040)
+               kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
+       else
+               kstsize = 1;
        kstpa = nextpa;
        nextpa += kstsize * NBPG;
        kptpa = nextpa;
        kstpa = nextpa;
        nextpa += kstsize * NBPG;
        kptpa = nextpa;
@@ -129,10 +98,8 @@ pmap_bootstrap(nextpa, firstpa)
        nextpa += nptpages * NBPG;
        kptmpa = nextpa;
        nextpa += NBPG;
        nextpa += nptpages * NBPG;
        kptmpa = nextpa;
        nextpa += NBPG;
-       ukptpa = nextpa;
+       lkptpa = nextpa;
        nextpa += NBPG;
        nextpa += NBPG;
-       iopa = nextpa;
-       nextpa += IOPTPAGE * NBPG;
        p0upa = nextpa;
        nextpa += UPAGES * NBPG;
 
        p0upa = nextpa;
        nextpa += UPAGES * NBPG;
 
@@ -141,47 +108,138 @@ pmap_bootstrap(nextpa, firstpa)
         *
         * On 68030s and earlier MMUs the two are identical except for
         * the valid bits so both are initialized with essentially the
         *
         * On 68030s and earlier MMUs the two are identical except for
         * the valid bits so both are initialized with essentially the
-        * same values.
-        * 0x3FF00000 for UPAGES is used for mapping the current process u-area
-        * (u + kernel stack). 
-        */
-
-       /*
-        * Map the page table pages in both the HW segment table
-        * and the software Sysptmap.  Note that Sysptmap is also
-        * considered a PT page hence the +1.
-        */
-       ste = (u_int *)kstpa;
-       pte = (u_int *)kptmpa;
-       epte = &pte[nptpages+1];
-       protoste = kptpa | SG_RW | SG_V;
-       protopte = kptpa | PG_RW | PG_CI | PG_V;
-       while (pte < epte) {
-           *ste++ = protoste;
-           *pte++ = protopte;
-           protoste += NBPG;
-           protopte += NBPG;
-       }
-       /*
-        * Invalidate all but the last remaining entries in both.
+        * same values.  On the 68040, which has a mandatory 3-level
+        * structure, the segment table holds the level 1 table and part
+        * (or all) of the level 2 table and hence is considerably
+        * different.  Here the first level consists of 128 descriptors
+        * (512 bytes) each mapping 32mb of address space.  Each of these
+        * points to blocks of 128 second level descriptors (512 bytes)
+        * each mapping 256kb.  Note that there may be additional "segment
+        * table" pages depending on how large MAXKL2SIZE is.
+        *
+        * Portions of the last segment of KVA space (0x3FF00000 -
+        * 0x3FFFFFFF) are mapped for a couple of purposes.  0x3FF00000
+        * for UPAGES is used for mapping the current process u-area
+        * (u + kernel stack).  The very last page (0x3FFFF000) is mapped
+        * to the last physical page of RAM to give us a region in which
+        * PA == VA.  We use the first part of this page for enabling
+        * and disabling mapping.  The last part of this page also contains
+        * info left by the boot ROM.
+        *
+        * XXX cramming two levels of mapping into the single "segment"
+        * table on the 68040 is intended as a temporary hack to get things
+        * working.  The 224mb of address space that this allows will most
+        * likely be insufficient in the future (at least for the kernel).
         */
         */
-       epte = &((u_int *)kptmpa)[NPTEPG];
-       while (pte < epte) {
-           *ste++ = SG_NV;
-           *pte++ = PG_NV;
+#if defined(LUNA2)
+       if (mmutype == MMU_68040) {
+               register int num;
+
+               /*
+                * First invalidate the entire "segment table" pages
+                * (levels 1 and 2 have the same "invalid" value).
+                */
+               pte = (u_int *)kstpa;
+               epte = &pte[kstsize * NPTEPG];
+               while (pte < epte)
+                       *pte++ = SG_NV;
+               /*
+                * Initialize level 2 descriptors (which immediately
+                * follow the level 1 table).  We need:
+                *      NPTEPG / SG4_LEV3SIZE
+                * level 2 descriptors to map each of the nptpages+1
+                * pages of PTEs.  Note that we set the "used" bit
+                * now to save the HW the expense of doing it.
+                */
+               num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE);
+               pte = &((u_int *)kstpa)[SG4_LEV1SIZE];
+               epte = &pte[num];
+               protoste = kptpa | SG_U | SG_RW | SG_V;
+               while (pte < epte) {
+                       *pte++ = protoste;
+                       protoste += (SG4_LEV3SIZE * sizeof(struct ste));
+               }
+               /*
+                * Initialize level 1 descriptors.  We need:
+                *      roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE
+                * level 1 descriptors to map the `num' level 2's.
+                */
+               pte = (u_int *)kstpa;
+               epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE];
+               protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
+               while (pte < epte) {
+                       *pte++ = protoste;
+                       protoste += (SG4_LEV2SIZE * sizeof(struct ste));
+               }
+               /*
+                * Initialize the final level 1 descriptor to map the last
+                * block of level 2 descriptors.
+                */
+               ste = &((u_int *)kstpa)[KERNELSTACK >> SG4_SHIFT1];
+               pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE];
+               *ste = (u_int)pte | SG_U | SG_RW | SG_V;
+               /*
+                * Now initialize the final portion of that block of
+                * descriptors to map the "last PT page".
+                */
+               pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE];
+               epte = &pte[NPTEPG/SG4_LEV3SIZE];
+               protoste = lkptpa | SG_U | SG_RW | SG_V;
+               while (pte < epte) {
+                       *pte++ = protoste;
+                       protoste += (SG4_LEV3SIZE * sizeof(struct ste));
+               }
+               /*
+                * Initialize Sysptmap
+                */
+               pte = (u_int *)kptmpa;
+               epte = &pte[nptpages+1];
+               protopte = kptpa | PG_RW | PG_CI | PG_V;
+               while (pte < epte) {
+                       *pte++ = protopte;
+                       protopte += NBPG;
+               }
+               pte = &((u_int *)kptmpa)[KERNELSTACK>>SG_ISHIFT];
+               *pte = lkptpa | PG_RW | PG_CI | PG_V;
+       } else
+#endif
+       {
+               /*
+                * Map the page table pages in both the HW segment table
+                * and the software Sysptmap.  Note that Sysptmap is also
+                * considered a PT page hence the +1.
+                */
+               ste = (u_int *)kstpa;
+               pte = (u_int *)kptmpa;
+               epte = &pte[nptpages+1];
+               protoste = kptpa | SG_RW | SG_V;
+               protopte = kptpa | PG_RW | PG_CI | PG_V;
+               while (pte < epte) {
+                       *ste++ = protoste;
+                       *pte++ = protopte;
+                       protoste += NBPG;
+                       protopte += NBPG;
+               }
+               /*
+                * Invalidate all entries.
+                */
+               epte = &((u_int *)kptmpa)[NPTEPG];
+               while (pte < epte) {
+                       *ste++ = SG_NV;
+                       *pte++ = PG_NV;
+               }
+               /* LUNA: Uarea pt map */
+               ste = (u_int *)kstpa;
+               pte = (u_int *)kptmpa;
+               ste[KERNELSTACK>>SG_ISHIFT] = lkptpa | SG_RW | SG_V;
+               pte[KERNELSTACK>>SG_ISHIFT] = lkptpa | PG_RW | PG_CI | PG_V;
        }
        }
-       /* LUNA: Uarea pt map */
-       ste = (u_int *)kstpa;
-       pte = (u_int *)kptmpa;
-       ste[KERNELSTACK>>SG_ISHIFT] = ukptpa | SG_RW | SG_V;
-       pte[KERNELSTACK>>SG_ISHIFT] = ukptpa | PG_RW | PG_CI | PG_V;
-
        /*
         * Invalidate all but the final entry in the last kernel PT page
         * (u-area PTEs will be validated later).  The final entry maps
         * the last page of physical memory.
         */
        /*
         * Invalidate all but the final entry in the last kernel PT page
         * (u-area PTEs will be validated later).  The final entry maps
         * the last page of physical memory.
         */
-       pte = (u_int *)ukptpa;
+       pte = (u_int *)lkptpa;
        epte = &pte[NPTEPG];
        while (pte < epte)
                *pte++ = PG_NV;
        epte = &pte[NPTEPG];
        while (pte < epte)
                *pte++ = PG_NV;
@@ -214,58 +272,17 @@ pmap_bootstrap(nextpa, firstpa)
         */
        epte = &((u_int *)kptpa)[luna_btop(nextpa - firstpa)];
        protopte = (protopte & ~PG_PROT) | PG_RW;
         */
        epte = &((u_int *)kptpa)[luna_btop(nextpa - firstpa)];
        protopte = (protopte & ~PG_PROT) | PG_RW;
+#if defined(LUNA2)
+       /*
+        * Enable copy-back caching of data pages
+        */
+       if (mmutype == MMU_68040)
+               protopte |= PG_CCB;
+#endif
        while (pte < epte) {
                *pte++ = protopte;
                protopte += NBPG;
        }
        while (pte < epte) {
                *pte++ = protopte;
                protopte += NBPG;
        }
-
-       /* initialize; all IO pte invalidate */
-       pte = (u_int *)iopa;
-       epte = &pte[IOPTPAGE * NPTEPG];
-       while (pte < epte)
-               *pte++ = PG_NV;
-       /*
-        * Here, we validate STEs and kernel page table PTEs
-        * for io space.
-        */
-       {
-           int index;
-
-           protoste = iopa | SG_RW | SG_V;
-           protopte = iopa | PG_RW | PG_CI | PG_V;
-           for (index = 0; io_physmap[index].pm_phys; index++)
-             {
-                 ste = &((u_int *)kstpa)[io_physmap[index].pm_phys/NBSEG];
-                 pte = &((u_int *)kptmpa)[io_physmap[index].pm_phys/NBSEG];
-                 *ste = protoste;
-                 *pte = protopte;
-                 protoste += NBPG;
-                 protopte += NBPG;
-             }
-           /*
-            * Finally, validate the IO space PTEs.
-            */
-           /* create io(and debbuger) PTEs */
-           for (index = 0; io_physmap[index].pm_phys; index++)
-             {
-                 pte = (u_int *)iopa + index*NPTEPG;
-                 epte = &pte[(luna_round_page(io_physmap[index].pm_size))>>PG_SHIFT];
-                 /* 
-                  * First entry(index == IO_DBG_OFF) is very special, 
-                  * we map debugger at fixed address(0x40000000).
-                  * Debugger is always loaded (maxmem+1) page.
-                  */
-                 protopte = (index == IO_DBG_OFF ? 
-                             ((maxmem+1)<<PG_SHIFT) : io_physmap[index].pm_phys) |
-                   PG_RW |(io_physmap[index].pm_cache == 0 ? PG_CI : 0) | PG_V;
-                 
-                 /* physical page setup loop */
-                 while (pte < epte) {
-                     *pte++ = protopte;
-                     protopte += NBPG;
-                 }
-             }
-       }
        /*
         * Calculate important exported kernel virtual addresses
         */
        /*
         * Calculate important exported kernel virtual addresses
         */
@@ -288,6 +305,7 @@ pmap_bootstrap(nextpa, firstpa)
         * LUNA: User stack address = 0x3ff00000.
         */
        Umap = (vm_offset_t)Sysmap + (LUNA_MAX_PTSIZE/4 - HIGHPAGES * sizeof(struct pte));
         * LUNA: User stack address = 0x3ff00000.
         */
        Umap = (vm_offset_t)Sysmap + (LUNA_MAX_PTSIZE/4 - HIGHPAGES * sizeof(struct pte));
+
        /*
         * Setup u-area for process 0.
         */
        /*
         * Setup u-area for process 0.
         */
@@ -296,7 +314,7 @@ pmap_bootstrap(nextpa, firstpa)
         * which are HIGHPAGES from the end of the last kernel PT page
         * allocated earlier.
         */
         * which are HIGHPAGES from the end of the last kernel PT page
         * allocated earlier.
         */
-       pte = &((u_int *)ukptpa)[NPTEPG - HIGHPAGES];
+       pte = &((u_int *)lkptpa)[NPTEPG - HIGHPAGES];
        epte = &pte[UPAGES];
        protopte = p0upa | PG_RW | PG_V;
        while (pte < epte) {
        epte = &pte[UPAGES];
        protopte = p0upa | PG_RW | PG_V;
        while (pte < epte) {
@@ -322,43 +340,30 @@ pmap_bootstrap(nextpa, firstpa)
         * the pmap module.
         */
        avail_start = nextpa;
         * the pmap module.
         */
        avail_start = nextpa;
-       avail_end = luna_ptob(maxmem);
-#if BSDVM_COMPAT
+       avail_end = luna_ptob(maxmem)
                        /* XXX allow for msgbuf */
                        /* XXX allow for msgbuf */
-                       - luna_round_page(sizeof(struct msgbuf))
-#endif
-                               ;
+                       - luna_round_page(sizeof(struct msgbuf));
        mem_size = luna_ptob(physmem);
        mem_size = luna_ptob(physmem);
-       virtual_avail = VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
+       virtual_avail = VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
        virtual_end = VM_MAX_KERNEL_ADDRESS;
        virtual_end = VM_MAX_KERNEL_ADDRESS;
-#if defined(DYNPGSIZE)
-       lunapagesperpage = 1;           /* XXX */
-#endif
+
        /*
         * Initialize protection array.
        /*
         * Initialize protection array.
+        * XXX don't use a switch statement, it might produce an
+        * absolute "jmp" table.
         */
        {
         */
        {
-               register int *kp, prot;
+               register int *kp;
 
                kp = protection_codes;
 
                kp = protection_codes;
-               for (prot = 0; prot < 8; prot++) {
-                       switch (prot) {
-                       case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
-                               *kp++ = 0;
-                               break;
-                       case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
-                       case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
-                       case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
-                               *kp++ = PG_RO;
-                               break;
-                       case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
-                       case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
-                       case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
-                       case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
-                               *kp++ = PG_RW;
-                               break;
-                       }
-               }
+               kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
+               kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
+               kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+               kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+               kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+               kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+               kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+               kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
        }
 
        /*
        }
 
        /*
@@ -373,133 +378,47 @@ pmap_bootstrap(nextpa, firstpa)
                simple_lock_init(&kpm->pm_lock);
                kpm->pm_count = 1;
                kpm->pm_stpa = (struct ste *)kstpa;
                simple_lock_init(&kpm->pm_lock);
                kpm->pm_count = 1;
                kpm->pm_stpa = (struct ste *)kstpa;
+#if defined(LUNA2)
+               /*
+                * For the 040 we also initialize the free level 2
+                * descriptor mask noting that we have used:
+                *      0:              level 1 table
+                *      1 to `num':     map page tables
+                *      MAXKL2SIZE-1:   maps last-page page table
+                */
+               if (mmutype == MMU_68040) {
+                       register int num;
+                       
+                       kpm->pm_stfree = ~l2tobm(0);
+                       num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE),
+                                     SG4_LEV2SIZE) / SG4_LEV2SIZE;
+                       while (num)
+                               kpm->pm_stfree &= ~l2tobm(num--);
+                       kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);
+                       for (num = MAXKL2SIZE;
+                            num < sizeof(kpm->pm_stfree)*NBBY;
+                            num++)
+                               kpm->pm_stfree &= ~l2tobm(num);
+               }
+#endif
        }
 
        }
 
-#if BSDVM_COMPAT
-#define        SYSMAP(c, p, v, n) \
-       v = (c)va; va += ((n)*LUNA_PAGE_SIZE); \
-       p = (struct pte *)pte; pte += (n);
-
        /*
        /*
-        * Allocate all the submaps we need
+        * Allocate some fixed, special purpose kernel virtual addresses
         */
        {
                vm_offset_t va = virtual_avail;
 
         */
        {
                vm_offset_t va = virtual_avail;
 
-               pte = &Sysmap[luna_btop(va)];
-       
-               SYSMAP(caddr_t          ,CMAP1          ,CADDR1    ,1   )
-               SYSMAP(caddr_t          ,CMAP2          ,CADDR2    ,1   )
-               SYSMAP(caddr_t          ,mmap           ,vmmap     ,1   )
-               SYSMAP(struct msgbuf *  ,msgbufmap      ,msgbufp   ,1   )
-
+               CADDR1 = (caddr_t)va;
+               va += LUNA_PAGE_SIZE;
+               CADDR2 = (caddr_t)va;
+               va += LUNA_PAGE_SIZE;
+               vmmap = (caddr_t)va;
+               va += LUNA_PAGE_SIZE;
+               ledbase = (caddr_t)va;
+               va += LUNA_PAGE_SIZE;
+               msgbufp = (struct msgbuf *)va;
+               va += LUNA_PAGE_SIZE;
                virtual_avail = va;
        }
                virtual_avail = va;
        }
-#undef SYSMAP
-#endif
-}
-
-pmap_showstuff()
-{
-       int i;
-       printf("CADDR1=%x pte at CMAP1=%x\n", CADDR1, CMAP1);
-       printf("CADDR2=%x pte at CMAP2=%x\n", CADDR2, CMAP2);
-       printf("vmmap=%x pte at mmap=%x\n", vmmap, mmap);
-       printf("msgbufp=%x pte at msgbufmap=%x\n", msgbufp, msgbufmap);
-       printf("virtual_avail=%x, virtual_end=%x\n", virtual_avail, virtual_end);
-       for (i = 0; i < 8; i++)
-               printf("%x ", protection_codes[i]);
-       printf("\n");
 }
 }
-
-#ifdef BOOTDEBUG
-/*
- *     Bootstrap the system enough to run with virtual memory.
- *     Map the kernel's code and data, and allocate the system page table.
- *
- *     On the HP this is called after mapping has already been enabled
- *     and just syncs the pmap module with what has already been done.
- *     [We can't call it easily with mapping off since the kernel is not
- *     mapped with PA == VA, hence we would have to relocate every address
- *     from the linked base (virtual) address 0 to the actual (physical)
- *     address of 0xFFxxxxxx.]
- */
-void
-Opmap_bootstrap(firstaddr, loadaddr)
-       vm_offset_t firstaddr;
-       vm_offset_t loadaddr;
-{
-#if BSDVM_COMPAT
-       vm_offset_t va;
-       struct pte *pte;
-#endif
-
-       avail_start = firstaddr;
-       avail_end = maxmem << PGSHIFT;
-
-#if BSDVM_COMPAT
-       /* XXX: allow for msgbuf */
-       avail_end -= luna_round_page(sizeof(struct msgbuf));
-#endif
-
-       mem_size = physmem << PGSHIFT;
-       virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
-       virtual_end = VM_MAX_KERNEL_ADDRESS;
-#if defined(DYNPGSIZE)
-       lunapagesperpage = PAGE_SIZE / LUNA_PAGE_SIZE;
-#endif
-       /*
-        * Initialize protection array.
-        */
-       {
-               register int *kp, prot;
-
-               kp = protection_codes;
-               for (prot = 0; prot < 8; prot++) {
-                       switch (prot) {
-                       case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
-                               *kp++ = 0;
-                               break;
-                       case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
-                       case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
-                       case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
-                               *kp++ = PG_RO;
-                               break;
-                       case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
-                       case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
-                       case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
-                       case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
-                               *kp++ = PG_RW;
-                               break;
-                       }
-               }
-       }
-       /*
-        * Kernel page/segment table allocated in locore,
-        * just initialize pointers.
-        */
-       kernel_pmap->pm_stab = Sysseg;
-       kernel_pmap->pm_ptab = Sysmap;
-
-       simple_lock_init(&kernel_pmap->pm_lock);
-       kernel_pmap->pm_count = 1;
-
-#if BSDVM_COMPAT
-       /*
-        * Allocate all the submaps we need
-        */
-#define        SYSMAP(c, p, v, n)      \
-       v = (c)va; va += ((n)*LUNA_PAGE_SIZE); p = pte; pte += (n);
-
-       va = virtual_avail;
-       pte = &Sysmap[luna_btop(va)];
-
-       SYSMAP(caddr_t          ,CMAP1          ,CADDR1    ,1           )
-       SYSMAP(caddr_t          ,CMAP2          ,CADDR2    ,1           )
-       SYSMAP(caddr_t          ,mmap           ,vmmap     ,1           )
-       SYSMAP(struct msgbuf *  ,msgbufmap      ,msgbufp   ,1           )
-       virtual_avail = va;
-#undef SYSMAP
-#endif
-}
-#endif
index 0732bbb..44651ec 100644 (file)
@@ -7,7 +7,7 @@
  *
  * from: hp300/hp300/sys_machdep.c     7.11 (Berkeley) 12/27/92
  *
  *
  * from: hp300/hp300/sys_machdep.c     7.11 (Berkeley) 12/27/92
  *
- *     @(#)sys_machdep.c       7.3 (Berkeley) %G%
+ *     @(#)sys_machdep.c       7.4 (Berkeley) %G%
  */
 
 #include <sys/param.h>
  */
 
 #include <sys/param.h>
@@ -107,13 +107,104 @@ cachectl(req, addr, len)
 {
        int error = 0;
 
 {
        int error = 0;
 
+#if defined(LUNA2)
+       if (mmutype == MMU_68040) {
+               register int inc;
+               int pa = 0, doall = 0;
+               caddr_t end;
+
+               if (addr == 0 ||
+                   (req & ~CC_EXTPURGE) != CC_PURGE && len > 2*NBPG)
+                       doall = 1;
+#ifdef HPUXCOMPAT
+               if ((curproc->p_md.md_flags & MDP_HPUX) &&
+                   len != 16 && len != NBPG)
+                       doall = 1;
+#endif
+               if (!doall) {
+                       end = addr + len;
+                       if (len <= 1024) {
+                               addr = (caddr_t)((int)addr & ~0xF);
+                               inc = 16;
+                       } else {
+                               addr = (caddr_t)((int)addr & ~PGOFSET);
+                               inc = NBPG;
+                       }
+               }
+               do {
+                       /*
+                        * Convert to physical address if needed.
+                        * If translation fails, we perform operation on
+                        * entire cache (XXX is this a rational thing to do?)
+                        */
+                       if (!doall &&
+                           (pa == 0 || ((int)addr & PGOFSET) == 0)) {
+                               pa = pmap_extract(&curproc->p_vmspace->vm_pmap,
+                                                 (vm_offset_t)addr);
+                               if (pa == 0)
+                                       doall = 1;
+                       }
+                       switch (req) {
+                       case CC_EXTPURGE|CC_IPURGE:
+                       case CC_IPURGE:
+                               if (doall) {
+                                       DCFA();
+                                       ICPA();
+                               } else if (inc == 16) {
+                                       DCFL(pa);
+                                       ICPL(pa);
+                               } else if (inc == NBPG) {
+                                       DCFP(pa);
+                                       ICPP(pa);
+                               }
+                               break;
+                       
+                       case CC_EXTPURGE|CC_PURGE:
+                       case CC_PURGE:
+                               if (doall)
+                                       DCFA(); /* note: flush not purge */
+                               else if (inc == 16)
+                                       DCPL(pa);
+                               else if (inc == NBPG)
+                                       DCPP(pa);
+                               break;
+
+                       case CC_EXTPURGE|CC_FLUSH:
+                       case CC_FLUSH:
+                               if (doall)
+                                       DCFA();
+                               else if (inc == 16)
+                                       DCFL(pa);
+                               else if (inc == NBPG)
+                                       DCFP(pa);
+                               break;
+                               
+                       default:
+                               error = EINVAL;
+                               break;
+                       }
+                       if (doall)
+                               break;
+                       pa += inc;
+                       addr += inc;
+               } while (addr < end);
+               return(error);
+       }
+#endif
        switch (req) {
        case CC_EXTPURGE|CC_PURGE:
        case CC_EXTPURGE|CC_FLUSH:
        case CC_PURGE:
        case CC_FLUSH:
        switch (req) {
        case CC_EXTPURGE|CC_PURGE:
        case CC_EXTPURGE|CC_FLUSH:
        case CC_PURGE:
        case CC_FLUSH:
+#if defined(LUNA2)
+               DCIU();
+#endif
                break;
        case CC_EXTPURGE|CC_IPURGE:
                break;
        case CC_EXTPURGE|CC_IPURGE:
+#if defined(LUNA2)
+               DCIU();
+               /* fall into... */
+#endif
        case CC_IPURGE:
                ICIA();
                break;
        case CC_IPURGE:
                ICIA();
                break;
index 786dd36..2219895 100644 (file)
@@ -13,7 +13,7 @@
  * from: Utah $Hdr: trap.c 1.35 91/12/26$
  * from: hp300/hp300/trap.c    7.26 (Berkeley) 12/27/92
  *
  * from: Utah $Hdr: trap.c 1.35 91/12/26$
  * from: hp300/hp300/trap.c    7.26 (Berkeley) 12/27/92
  *
- *     @(#)trap.c      7.4 (Berkeley) %G%
+ *     @(#)trap.c      7.5 (Berkeley) %G%
  */
 
 #include <sys/param.h>
  */
 
 #include <sys/param.h>
@@ -77,8 +77,17 @@ short        exframesize[] = {
        -1, -1, -1, -1  /* type C-F - undefined */
 };
 
        -1, -1, -1, -1  /* type C-F - undefined */
 };
 
+#ifdef LUNA2
+#define KDFAULT(c)     (mmutype == MMU_68040 ? \
+                           ((c) & SSW4_TMMASK) == SSW4_TMKD : \
+                           ((c) & (SSW_DF|FC_SUPERD)) == (SSW_DF|FC_SUPERD))
+#define WRFAULT(c)     (mmutype == MMU_68040 ? \
+                           ((c) & SSW4_RW) == 0 : \
+                           ((c) & (SSW_DF|SSW_RW)) == SSW_DF)
+#else
 #define KDFAULT(c)     (((c) & (SSW_DF|SSW_FCMASK)) == (SSW_DF|FC_SUPERD))
 #define WRFAULT(c)     (((c) & (SSW_DF|SSW_RW)) == SSW_DF)
 #define KDFAULT(c)     (((c) & (SSW_DF|SSW_FCMASK)) == (SSW_DF|FC_SUPERD))
 #define WRFAULT(c)     (((c) & (SSW_DF|SSW_RW)) == SSW_DF)
+#endif
 
 #ifdef DEBUG
 int mmudebug = 0;
 
 #ifdef DEBUG
 int mmudebug = 0;
@@ -102,7 +111,11 @@ userret(p, fp, oticks, faultaddr, fromtrap)
        int fromtrap;
 {
        int sig, s;
        int fromtrap;
 {
        int sig, s;
+#ifdef LUNA2
+       int beenhere = 0;
 
 
+again:
+#endif
        /* take pending signals */
        while ((sig = CURSIG(p)) != 0)
                psig(sig);
        /* take pending signals */
        while ((sig = CURSIG(p)) != 0)
                psig(sig);
@@ -134,6 +147,32 @@ userret(p, fp, oticks, faultaddr, fromtrap)
                addupc_task(p, fp->f_pc,
                            (int)(p->p_sticks - oticks) * psratio);
        }
                addupc_task(p, fp->f_pc,
                            (int)(p->p_sticks - oticks) * psratio);
        }
+#ifdef LUNA2
+       /*
+        * Deal with user mode writebacks (from trap, or from sigreturn).
+        * If any writeback fails, go back and attempt signal delivery.
+        * unless we have already been here and attempted the writeback
+        * (e.g. bad address with user ignoring SIGSEGV).  In that case
+        * we just return to the user without sucessfully completing
+        * the writebacks.  Maybe we should just drop the sucker?
+        */
+       if (mmutype == MMU_68040 && fp->f_format == FMT7) {
+               if (beenhere) {
+#ifdef DEBUG
+                       if (mmudebug & MDB_WBFAILED)
+                               printf(fromtrap ?
+               "pid %d(%s): writeback aborted, pc=%x, fa=%x\n" :
+               "pid %d(%s): writeback aborted in sigreturn, pc=%x\n",
+                                   p->p_pid, p->p_comm, fp->f_pc, faultaddr);
+#endif
+               } else if (sig = writeback(fp, fromtrap)) {
+                       beenhere = 1;
+                       oticks = p->p_sticks;
+                       trapsignal(p, sig, faultaddr);
+                       goto again;
+               }
+       }
+#endif
        curpri = p->p_pri;
 }
 
        curpri = p->p_pri;
 }
 
@@ -239,6 +278,19 @@ copyfault:
                break;
 #endif
 
                break;
 #endif
 
+#ifdef LUNA2
+       case T_FPEMULI|T_USER:  /* unimplemented FP instuction */
+       case T_FPEMULD|T_USER:  /* unimplemented FP data type */
+               /* XXX need to FSAVE */
+               printf("pid %d(%s): unimplemented FP %s at %x (EA %x)\n",
+                      p->p_pid, p->p_comm,
+                      frame.f_format == 2 ? "instruction" : "data type",
+                      frame.f_pc, frame.f_fmt2.f_iaddr);
+               /* XXX need to FRESTORE */
+               i = SIGFPE;
+               break;
+#endif
+
        case T_ILLINST|T_USER:  /* illegal instruction fault */
        case T_PRIVINST|T_USER: /* privileged instruction fault */
                ucode = frame.f_format; /* XXX was ILL_PRIVIN_FAULT */
        case T_ILLINST|T_USER:  /* illegal instruction fault */
        case T_PRIVINST|T_USER: /* privileged instruction fault */
                ucode = frame.f_format; /* XXX was ILL_PRIVIN_FAULT */
@@ -398,6 +450,10 @@ copyfault:
                }
                if (rv == KERN_SUCCESS) {
                        if (type == T_MMUFLT) {
                }
                if (rv == KERN_SUCCESS) {
                        if (type == T_MMUFLT) {
+#ifdef LUNA2
+                               if (mmutype == MMU_68040)
+                                       (void) writeback(&frame, 1);
+#endif
                                return;
                        }
                        goto out;
                                return;
                        }
                        goto out;
@@ -423,6 +479,318 @@ out:
        userret(p, &frame, sticks, v, 1);
 }
 
        userret(p, &frame, sticks, v, 1);
 }
 
+#ifdef LUNA2
+#ifdef DEBUG
+struct writebackstats {
+       int calls;
+       int cpushes;
+       int move16s;
+       int wb1s, wb2s, wb3s;
+       int wbsize[4];
+} wbstats;
+
+char *f7sz[] = { "longword", "byte", "word", "line" };
+char *f7tt[] = { "normal", "MOVE16", "AFC", "ACK" };
+char *f7tm[] = { "d-push", "u-data", "u-code", "M-data",
+                "M-code", "k-data", "k-code", "RES" };
+char wberrstr[] =
+    "WARNING: pid %d(%s) writeback [%s] failed, pc=%x fa=%x wba=%x wbd=%x\n";
+#endif
+
+writeback(fp, docachepush)
+       struct frame *fp;
+       int docachepush;
+{
+       register struct fmt7 *f = &fp->f_fmt7;
+       register struct proc *p = curproc;
+       int err = 0;
+       u_int fa;
+       caddr_t oonfault = p->p_addr->u_pcb.pcb_onfault;
+
+#ifdef DEBUG
+       if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) {
+               printf(" pid=%d, fa=%x,", p->p_pid, f->f_fa);
+               dumpssw(f->f_ssw);
+       }
+       wbstats.calls++;
+#endif
+       /*
+        * Deal with special cases first.
+        */
+       if ((f->f_ssw & SSW4_TMMASK) == SSW4_TMDCP) {
+               /*
+                * Dcache push fault.
+                * Line-align the address and write out the push data to
+                * the indicated physical address.
+                */
+#ifdef DEBUG
+               if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) {
+                       printf(" pushing %s to PA %x, data %x",
+                              f7sz[(f->f_ssw & SSW4_SZMASK) >> 5],
+                              f->f_fa, f->f_pd0);
+                       if ((f->f_ssw & SSW4_SZMASK) == SSW4_SZLN)
+                               printf("/%x/%x/%x",
+                                      f->f_pd1, f->f_pd2, f->f_pd3);
+                       printf("\n");
+               }
+               if (f->f_wb1s & SSW4_WBSV)
+                       panic("writeback: cache push with WB1S valid");
+               wbstats.cpushes++;
+#endif
+               /*
+                * XXX there are security problems if we attempt to do a
+                * cache push after a signal handler has been called.
+                */
+               if (docachepush) {
+                       pmap_enter(kernel_pmap, (vm_offset_t)vmmap,
+                                  trunc_page(f->f_fa), VM_PROT_WRITE, TRUE);
+                       fa = (u_int)&vmmap[(f->f_fa & PGOFSET) & ~0xF];
+                       bcopy((caddr_t)&f->f_pd0, (caddr_t)fa, 16);
+                       DCFL(pmap_extract(kernel_pmap, (vm_offset_t)fa));
+                       pmap_remove(kernel_pmap, (vm_offset_t)vmmap,
+                                   (vm_offset_t)&vmmap[NBPG]);
+               } else
+                       printf("WARNING: pid %d(%s) uid %d: CPUSH not done\n",
+                              p->p_pid, p->p_comm, p->p_ucred->cr_uid);
+       } else if ((f->f_ssw & (SSW4_RW|SSW4_TTMASK)) == SSW4_TTM16) {
+               /*
+                * MOVE16 fault.
+                * Line-align the address and write out the push data to
+                * the indicated virtual address.
+                */
+#ifdef DEBUG
+               if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid))
+                       printf(" MOVE16 to VA %x(%x), data %x/%x/%x/%x\n",
+                              f->f_fa, f->f_fa & ~0xF, f->f_pd0, f->f_pd1,
+                              f->f_pd2, f->f_pd3);
+               if (f->f_wb1s & SSW4_WBSV)
+                       panic("writeback: MOVE16 with WB1S valid");
+               wbstats.move16s++;
+#endif
+               if (KDFAULT(f->f_wb1s))
+                       bcopy((caddr_t)&f->f_pd0, (caddr_t)(f->f_fa & ~0xF), 16);
+               else
+                       err = suline((caddr_t)(f->f_fa & ~0xF), (caddr_t)&f->f_pd0);
+               if (err) {
+                       fa = f->f_fa & ~0xF;
+#ifdef DEBUG
+                       if (mmudebug & MDB_WBFAILED)
+                               printf(wberrstr, p->p_pid, p->p_comm,
+                                      "MOVE16", fp->f_pc, f->f_fa,
+                                      f->f_fa & ~0xF, f->f_pd0);
+#endif
+               }
+       } else if (f->f_wb1s & SSW4_WBSV) {
+               /*
+                * Writeback #1.
+                * Position the "memory-aligned" data and write it out.
+                */
+               register u_int wb1d = f->f_wb1d;
+               register int off;
+
+#ifdef DEBUG
+               if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid))
+                       dumpwb(1, f->f_wb1s, f->f_wb1a, f->f_wb1d);
+               wbstats.wb1s++;
+               wbstats.wbsize[(f->f_wb2s&SSW4_SZMASK)>>5]++;
+#endif
+               off = (f->f_wb1a & 3) * 8;
+               switch (f->f_wb1s & SSW4_SZMASK) {
+               case SSW4_SZLW:
+                       if (off)
+                               wb1d = (wb1d >> (32 - off)) | (wb1d << off);
+                       if (KDFAULT(f->f_wb1s))
+                               *(long *)f->f_wb1a = wb1d;
+                       else
+                               err = suword((caddr_t)f->f_wb1a, wb1d);
+                       break;
+               case SSW4_SZB:
+                       off = 24 - off;
+                       if (off)
+                               wb1d >>= off;
+                       if (KDFAULT(f->f_wb1s))
+                               *(char *)f->f_wb1a = wb1d;
+                       else
+                               err = subyte((caddr_t)f->f_wb1a, wb1d);
+                       break;
+               case SSW4_SZW:
+                       off = (off + 16) % 32;
+                       if (off)
+                               wb1d = (wb1d >> (32 - off)) | (wb1d << off);
+                       if (KDFAULT(f->f_wb1s))
+                               *(short *)f->f_wb1a = wb1d;
+                       else
+                               err = susword((caddr_t)f->f_wb1a, wb1d);
+                       break;
+               }
+               if (err) {
+                       fa = f->f_wb1a;
+#ifdef DEBUG
+                       if (mmudebug & MDB_WBFAILED)
+                               printf(wberrstr, p->p_pid, p->p_comm,
+                                      "#1", fp->f_pc, f->f_fa,
+                                      f->f_wb1a, f->f_wb1d);
+#endif
+               }
+       }
+       /*
+        * Deal with the "normal" writebacks.
+        *
+        * XXX writeback2 is known to reflect a LINE size writeback after
+        * a MOVE16 was already dealt with above.  Ignore it.
+        */
+       if (err == 0 && (f->f_wb2s & SSW4_WBSV) &&
+           (f->f_wb2s & SSW4_SZMASK) != SSW4_SZLN) {
+#ifdef DEBUG
+               if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid))
+                       dumpwb(2, f->f_wb2s, f->f_wb2a, f->f_wb2d);
+               wbstats.wb2s++;
+               wbstats.wbsize[(f->f_wb2s&SSW4_SZMASK)>>5]++;
+#endif
+               switch (f->f_wb2s & SSW4_SZMASK) {
+               case SSW4_SZLW:
+                       if (KDFAULT(f->f_wb2s))
+                               *(long *)f->f_wb2a = f->f_wb2d;
+                       else
+                               err = suword((caddr_t)f->f_wb2a, f->f_wb2d);
+                       break;
+               case SSW4_SZB:
+                       if (KDFAULT(f->f_wb2s))
+                               *(char *)f->f_wb2a = f->f_wb2d;
+                       else
+                               err = subyte((caddr_t)f->f_wb2a, f->f_wb2d);
+                       break;
+               case SSW4_SZW:
+                       if (KDFAULT(f->f_wb2s))
+                               *(short *)f->f_wb2a = f->f_wb2d;
+                       else
+                               err = susword((caddr_t)f->f_wb2a, f->f_wb2d);
+                       break;
+               }
+               if (err) {
+                       fa = f->f_wb2a;
+#ifdef DEBUG
+                       if (mmudebug & MDB_WBFAILED) {
+                               printf(wberrstr, p->p_pid, p->p_comm,
+                                      "#2", fp->f_pc, f->f_fa,
+                                      f->f_wb2a, f->f_wb2d);
+                               dumpssw(f->f_ssw);
+                               dumpwb(2, f->f_wb2s, f->f_wb2a, f->f_wb2d);
+                       }
+#endif
+               }
+       }
+       if (err == 0 && (f->f_wb3s & SSW4_WBSV)) {
+#ifdef DEBUG
+               if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid))
+                       dumpwb(3, f->f_wb3s, f->f_wb3a, f->f_wb3d);
+               wbstats.wb3s++;
+               wbstats.wbsize[(f->f_wb3s&SSW4_SZMASK)>>5]++;
+#endif
+               switch (f->f_wb3s & SSW4_SZMASK) {
+               case SSW4_SZLW:
+                       if (KDFAULT(f->f_wb3s))
+                               *(long *)f->f_wb3a = f->f_wb3d;
+                       else
+                               err = suword((caddr_t)f->f_wb3a, f->f_wb3d);
+                       break;
+               case SSW4_SZB:
+                       if (KDFAULT(f->f_wb3s))
+                               *(char *)f->f_wb3a = f->f_wb3d;
+                       else
+                               err = subyte((caddr_t)f->f_wb3a, f->f_wb3d);
+                       break;
+               case SSW4_SZW:
+                       if (KDFAULT(f->f_wb3s))
+                               *(short *)f->f_wb3a = f->f_wb3d;
+                       else
+                               err = susword((caddr_t)f->f_wb3a, f->f_wb3d);
+                       break;
+#ifdef DEBUG
+               case SSW4_SZLN:
+                       panic("writeback: wb3s indicates LINE write");
+#endif
+               }
+               if (err) {
+                       fa = f->f_wb3a;
+#ifdef DEBUG
+                       if (mmudebug & MDB_WBFAILED)
+                               printf(wberrstr, p->p_pid, p->p_comm,
+                                      "#3", fp->f_pc, f->f_fa,
+                                      f->f_wb3a, f->f_wb3d);
+#endif
+               }
+       }
+       p->p_addr->u_pcb.pcb_onfault = oonfault;
+       /*
+        * Determine the cause of the failure if any translating to
+        * a signal.  If the corresponding VA is valid and RO it is
+        * a protection fault (SIGBUS) otherwise consider it an
+        * illegal reference (SIGSEGV).
+        */
+       if (err) {
+               if (vm_map_check_protection(&p->p_vmspace->vm_map,      
+                                           trunc_page(fa), round_page(fa),
+                                           VM_PROT_READ) &&
+                   !vm_map_check_protection(&p->p_vmspace->vm_map,
+                                            trunc_page(fa), round_page(fa),
+                                            VM_PROT_WRITE))
+                       err = SIGBUS;
+               else
+                       err = SIGSEGV;
+       }
+       return(err);
+}
+
+#ifdef DEBUG
+dumpssw(ssw)
+       register u_short ssw;
+{
+       printf(" SSW: %x: ", ssw);
+       if (ssw & SSW4_CP)
+               printf("CP,");
+       if (ssw & SSW4_CU)
+               printf("CU,");
+       if (ssw & SSW4_CT)
+               printf("CT,");
+       if (ssw & SSW4_CM)
+               printf("CM,");
+       if (ssw & SSW4_MA)
+               printf("MA,");
+       if (ssw & SSW4_ATC)
+               printf("ATC,");
+       if (ssw & SSW4_LK)
+               printf("LK,");
+       if (ssw & SSW4_RW)
+               printf("RW,");
+       printf(" SZ=%s, TT=%s, TM=%s\n",
+              f7sz[(ssw & SSW4_SZMASK) >> 5],
+              f7tt[(ssw & SSW4_TTMASK) >> 3],
+              f7tm[ssw & SSW4_TMMASK]);
+}
+
+dumpwb(num, s, a, d)
+       int num;
+       u_short s;
+       u_int a, d;
+{
+       register struct proc *p = curproc;
+       vm_offset_t pa;
+
+       printf(" writeback #%d: VA %x, data %x, SZ=%s, TT=%s, TM=%s\n",
+              num, a, d, f7sz[(s & SSW4_SZMASK) >> 5],
+              f7tt[(s & SSW4_TTMASK) >> 3], f7tm[s & SSW4_TMMASK]);
+       printf("               PA ");
+       pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)a);
+       if (pa == 0)
+               printf("<invalid address>");
+       else
+               printf("%x, current value %x", pa, fuword((caddr_t)a));
+       printf("\n");
+}
+#endif
+#endif
+
 /*
  * Proces a system call.
  */
 /*
  * Proces a system call.
  */
index 9f0e5ad..4ba2561 100644 (file)
@@ -6,9 +6,24 @@
 |
 | from: hp300/hp300/vectors.s  7.3 (Berkeley) 6/5/92
 |
 |
 | from: hp300/hp300/vectors.s  7.3 (Berkeley) 6/5/92
 |
-|      @(#)vectors.s   7.2 (Berkeley) %G%
+|      @(#)vectors.s   7.3 (Berkeley) %G%
 |
 
 |
 
+#ifdef HPFPLIB
+/*
+ * XXX the HP FP library mishandles "normal" F-line faults causing
+ * the kernel to crash, hence we detect it ourselves rather than just
+ * vectoring to "_fline".  We also always catch unsupported data type
+ * faults ourselves for no particular reason.
+ */
+#define        _fpbsun         _bsun
+#define        _fpinex         _inex
+#define        _fpdz           _dz
+#define        _fpunfl         _unfl
+#define        _fpoperr        _operr
+#define        _fpovfl         _ovfl
+#define        _fpsnan         _snan
+#else
 #define        _fpbsun         _fpfault
 #define        _fpinex         _fpfault
 #define        _fpdz           _fpfault
 #define        _fpbsun         _fpfault
 #define        _fpinex         _fpfault
 #define        _fpdz           _fpfault
@@ -16,6 +31,7 @@
 #define        _fpoperr        _fpfault
 #define        _fpovfl         _fpfault
 #define        _fpsnan         _fpfault
 #define        _fpoperr        _fpfault
 #define        _fpovfl         _fpfault
 #define        _fpsnan         _fpfault
+#endif
 
        .text
        .globl  _buserr,_addrerr
 
        .text
        .globl  _buserr,_addrerr
index b064e6e..2063819 100644 (file)
@@ -13,7 +13,7 @@
  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
  * from: hp300/hp300/vm_machdep.c      7.14 (Berkeley) 12/27/92
  *
  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
  * from: hp300/hp300/vm_machdep.c      7.14 (Berkeley) 12/27/92
  *
- *     @(#)vm_machdep.c        7.4 (Berkeley) %G%
+ *     @(#)vm_machdep.c        7.5 (Berkeley) %G%
  */
 
 #include <sys/param.h>
  */
 
 #include <sys/param.h>
@@ -140,6 +140,9 @@ pagemove(from, to, size)
                to += NBPG;
                size -= NBPG;
        }
                to += NBPG;
                size -= NBPG;
        }
+#ifdef LUNA2
+       DCIS();
+#endif
 }
 
 /*
 }
 
 /*