forgot some proto defs.
[unix-history] / usr / src / sys / vm / vm_glue.c
index 82989d8..4406e02 100644 (file)
@@ -7,7 +7,7 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)vm_glue.c   7.9 (Berkeley) %G%
+ *     @(#)vm_glue.c   7.11 (Berkeley) %G%
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -157,11 +157,18 @@ vm_fork(p1, p2, isvfork)
                shmfork(p1, p2, isvfork);
 #endif
 
                shmfork(p1, p2, isvfork);
 #endif
 
+#ifndef        i386
        /*
         * Allocate a wired-down (for now) pcb and kernel stack for the process
         */
        addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
        vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
        /*
         * Allocate a wired-down (for now) pcb and kernel stack for the process
         */
        addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
        vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
+#else
+/* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack,
+and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is
+not yet clear, yet it does... */
+       addr = kmem_alloc(kernel_map, ctob(UPAGES));
+#endif
        up = (struct user *)addr;
        p2->p_addr = up;
 
        up = (struct user *)addr;
        p2->p_addr = up;
 
@@ -185,8 +192,7 @@ vm_fork(p1, p2, isvfork)
        { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
 
        vp = &p2->p_vmspace->vm_map;
        { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
 
        vp = &p2->p_vmspace->vm_map;
-       (void)vm_map_pageable(vp, addr, 0xfe000000 - addr, TRUE);
-       (void)vm_deallocate(vp, addr, 0xfe000000 - addr);
+       (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
        (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
        (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
        }
        (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
        (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
        }
@@ -221,7 +227,7 @@ vm_init_limits(p)
         p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
         p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
        p->p_rlimit[RLIMIT_RSS].rlim_cur = p->p_rlimit[RLIMIT_RSS].rlim_max =
         p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
         p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
        p->p_rlimit[RLIMIT_RSS].rlim_cur = p->p_rlimit[RLIMIT_RSS].rlim_max =
-               ptoa(vm_stat.free_count);
+               ptoa(cnt.v_free_count);
 }
 
 #include "../vm/vm_pageout.h"
 }
 
 #include "../vm/vm_pageout.h"
@@ -287,12 +293,12 @@ noswap:
         */
        size = round_page(ctob(UPAGES));
        addr = (vm_offset_t) p->p_addr;
         */
        size = round_page(ctob(UPAGES));
        addr = (vm_offset_t) p->p_addr;
-       if (vm_stat.free_count > atop(size)) {
+       if (cnt.v_free_count > atop(size)) {
 #ifdef DEBUG
                if (swapdebug & SDB_SWAPIN)
                        printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
                               p->p_pid, p->p_comm, p->p_addr,
 #ifdef DEBUG
                if (swapdebug & SDB_SWAPIN)
                        printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
                               p->p_pid, p->p_comm, p->p_addr,
-                              ppri, vm_stat.free_count);
+                              ppri, cnt.v_free_count);
 #endif
                vm_map_pageable(kernel_map, addr, addr+size, FALSE);
                (void) splclock();
 #endif
                vm_map_pageable(kernel_map, addr, addr+size, FALSE);
                (void) splclock();
@@ -310,14 +316,14 @@ noswap:
 #ifdef DEBUG
        if (swapdebug & SDB_FOLLOW)
                printf("sched: no room for pid %d(%s), free %d\n",
 #ifdef DEBUG
        if (swapdebug & SDB_FOLLOW)
                printf("sched: no room for pid %d(%s), free %d\n",
-                      p->p_pid, p->p_comm, vm_stat.free_count);
+                      p->p_pid, p->p_comm, cnt.v_free_count);
 #endif
        (void) splhigh();
        VM_WAIT;
        (void) spl0();
 #ifdef DEBUG
        if (swapdebug & SDB_FOLLOW)
 #endif
        (void) splhigh();
        VM_WAIT;
        (void) spl0();
 #ifdef DEBUG
        if (swapdebug & SDB_FOLLOW)
-               printf("sched: room again, free %d\n", vm_stat.free_count);
+               printf("sched: room again, free %d\n", cnt.v_free_count);
 #endif
        goto loop;
 }
 #endif
        goto loop;
 }
@@ -377,7 +383,7 @@ swapout_threads()
         * it (UPAGES pages).
         */
        if (didswap == 0 &&
         * it (UPAGES pages).
         */
        if (didswap == 0 &&
-           vm_stat.free_count <= atop(round_page(ctob(UPAGES)))) {
+           cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
                if ((p = outp) == 0)
                        p = outp2;
 #ifdef DEBUG
                if ((p = outp) == 0)
                        p = outp2;
 #ifdef DEBUG
@@ -399,7 +405,7 @@ swapout(p)
        if (swapdebug & SDB_SWAPOUT)
                printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
                       p->p_pid, p->p_comm, p->p_addr, p->p_stat,
        if (swapdebug & SDB_SWAPOUT)
                printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
                       p->p_pid, p->p_comm, p->p_addr, p->p_stat,
-                      p->p_slptime, vm_stat.free_count);
+                      p->p_slptime, cnt.v_free_count);
 #endif
        size = round_page(ctob(UPAGES));
        addr = (vm_offset_t) p->p_addr;
 #endif
        size = round_page(ctob(UPAGES));
        addr = (vm_offset_t) p->p_addr;
@@ -423,8 +429,10 @@ swapout(p)
                addr = (vm_offset_t) p->p_addr;
        }
 #endif
                addr = (vm_offset_t) p->p_addr;
        }
 #endif
+#ifndef        i386 /* temporary measure till we find spontaineous unwire of kstack */
        vm_map_pageable(kernel_map, addr, addr+size, TRUE);
        pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
        vm_map_pageable(kernel_map, addr, addr+size, TRUE);
        pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
+#endif
        (void) splhigh();
        p->p_flag &= ~SLOAD;
        if (p->p_stat == SRUN)
        (void) splhigh();
        p->p_flag &= ~SLOAD;
        if (p->p_stat == SRUN)