* Copyright (c) 1987 Carnegie-Mellon University
* Copyright (c) 1991 Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
* The CMU software License Agreement specifies the terms and conditions
* for use and redistribution.
* @(#)vm_glue.c 7.1 (Berkeley) %G%
#include "../vm/vm_param.h"
#include "../vm/vm_map.h"
#include "../vm/vm_page.h"
#include "../vm/vm_kern.h"
int avefree
= 0; /* XXX */
unsigned maxdmap
= MAXDSIZ
; /* XXX */
vm_prot_t prot
= rw
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
;
rv
= vm_map_check_protection(kernel_map
, trunc_page(addr
),
round_page(addr
+len
-1), prot
);
vm_prot_t prot
= rw
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
;
rv
= vm_map_check_protection(u
.u_procp
->p_map
, trunc_page(addr
),
round_page(addr
+len
-1), prot
);
* Change protections on kernel pages from addr to addr+size
* (presumably so debugger can plant a breakpoint).
* All addresses are assumed to reside in the Sysmap,
vm_prot_t prot
= rw
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
;
vm_map_protect(kernel_map
, trunc_page(addr
),
round_page(addr
+len
-1), prot
, FALSE
);
vm_map_pageable(u
.u_procp
->p_map
, trunc_page(addr
),
round_page(addr
+len
-1), FALSE
);
vsunlock(addr
, len
, dirtied
)
vm_map_pageable(u
.u_procp
->p_map
, trunc_page(addr
),
round_page(addr
+len
-1), TRUE
);
register struct user
*up
;
* Duplicate the process address space.
* XXX if this is a vfork we arrange to share data/stack to
* preserve brain-dead semantics of vfork().
* XXX this doesn't work due to a bug in the VM code.
* Once a process has done a vfork setting up sharing maps,
* any future forks may fail as the source VM range doesn't
* properly get write-protected. This causes the parent to
* not create copies and instead modifies the originals.
* If the parent activates before the child, the child will
* get a corrupted address space.
addr
= trunc_page(u
.u_daddr
);
(void) vm_map_inherit(u
.u_procp
->p_map
, addr
,
addr
+ size
, VM_INHERIT_SHARE
);
(void) vm_map_inherit(u
.u_procp
->p_map
, u
.u_maxsaddr
,
VM_MAX_ADDRESS
, VM_INHERIT_SHARE
);
p
->p_map
= vm_map_fork(u
.u_procp
->p_map
);
(void) vm_map_inherit(u
.u_procp
->p_map
, addr
,
addr
+ size
, VM_INHERIT_COPY
);
(void) vm_map_inherit(u
.u_procp
->p_map
, u
.u_maxsaddr
,
VM_MAX_ADDRESS
, VM_INHERIT_COPY
);
* Allocate a wired-down (for now) u-area for the process
size
= round_page(ctob(UPAGES
));
addr
= kmem_alloc_pageable(kernel_map
, size
);
vm_map_pageable(kernel_map
, addr
, addr
+size
, FALSE
);
p
->p_addr
= (caddr_t
)addr
;
up
= (struct user
*)addr
;
* Update the current u-area and copy it to the new one
bcopy(u
.u_procp
->p_addr
, p
->p_addr
, size
);
PMAP_ACTIVATE(p
->p_map
->pmap
, (struct pcb
*)p
->p_addr
);
* Arrange for a non-local goto when the new process
* is started, to resume here, returning nonzero from setjmp.
up
->u_pcb
.pcb_sswap
= (int *)&u
.u_ssave
;
if (savectx(&up
->u_ssave
)) {
* Clear vm statistics of new process.
bzero((caddr_t
)&up
->u_ru
, sizeof (struct rusage
));
bzero((caddr_t
)&up
->u_cru
, sizeof (struct rusage
));
* XXX Scaled down version from vm_page.c
* Set up the initial limits on process VM.
* Set the maximum resident set size to be all
* of (reasonably) available memory. This causes
* any single, large process to start random page
* replacement once it fills memory.
u
.u_rlimit
[RLIMIT_STACK
].rlim_cur
= DFLSSIZ
;
u
.u_rlimit
[RLIMIT_STACK
].rlim_max
= MAXSSIZ
;
u
.u_rlimit
[RLIMIT_DATA
].rlim_cur
= DFLDSIZ
;
u
.u_rlimit
[RLIMIT_DATA
].rlim_max
= MAXDSIZ
;
u
.u_rlimit
[RLIMIT_RSS
].rlim_cur
= u
.u_rlimit
[RLIMIT_RSS
].rlim_max
=
ptoa(vm_page_free_count
);
proc
[0].p_maxrss
= vm_page_free_count
;
#include "../vm/vm_pageout.h"
* 1. Attempt to swapin every swaped-out, runnable process in
* 2. If not enough memory, wake the pageout daemon and let it
register struct proc
*rp
;
for (rp
= allproc
; rp
!= NULL
; rp
= rp
->p_nxt
)
if (rp
->p_stat
== SRUN
&& (rp
->p_flag
& SLOAD
) == 0) {
rp
->p_slptime
- (rp
->p_nice
-NZERO
)*8;
if (swapdebug
& SDB_FOLLOW
)
printf("sched: running, procp %x pri %d\n", inp
, inpri
);
* Nothing to do, back to sleep
if ((rp
= inp
) == NULL
) {
sleep((caddr_t
)&runout
, PVM
);
* We would like to bring someone in.
* This part is really bogus cuz we could deadlock on memory
* despite our feeble check.
size
= round_page(ctob(UPAGES
));
addr
= (vm_offset_t
) rp
->p_addr
;
if (vm_page_free_count
> atop(size
)) {
if (swapdebug
& SDB_SWAPIN
)
printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
rp
->p_pid
, rp
->p_comm
, rp
->p_addr
,
inpri
, vm_page_free_count
);
vm_map_pageable(kernel_map
, addr
, addr
+size
, FALSE
);
* Not enough memory, jab the pageout daemon and wait til the
if (swapdebug
& SDB_FOLLOW
)
printf("sched: no room for pid %d(%s), free %d\n",
rp
->p_pid
, rp
->p_comm
, vm_page_free_count
);
if (swapdebug
& SDB_FOLLOW
)
printf("sched: room again, free %d\n", vm_page_free_count
);
(((p)->p_flag & (SSYS|SULOCK|SLOAD|SKEEP|SWEXIT|SPHYSIO)) == SLOAD)
* Swapout is driven by the pageout daemon. Very simple, we find eligible
* procs and unwire their u-areas. We try to always "swap" at least one
* process in case we need the room for a swapin.
register struct proc
*rp
;
struct proc
*outp
, *outp2
;
outpri
= outpri2
= -20000;
for (rp
= allproc
; rp
!= NULL
; rp
= rp
->p_nxt
) {
if (rp
->p_slptime
> outpri2
) {
if (rp
->p_slptime
> maxslp
) {
} else if (rp
->p_slptime
> outpri
) {
* If we didn't get rid of any real duds, toss out the next most
* likely sleeping/stopped or running candidate. We only do this
* if we are real low on memory since we don't gain much by doing
vm_page_free_count
<= atop(round_page(ctob(UPAGES
)))) {
if (swapdebug
& SDB_SWAPOUT
)
printf("swapout_threads: no duds, try procp %x\n", rp
);
if (swapdebug
& SDB_SWAPOUT
)
printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
p
->p_pid
, p
->p_comm
, p
->p_addr
, p
->p_stat
,
p
->p_slptime
, vm_page_free_count
);
size
= round_page(ctob(UPAGES
));
addr
= (vm_offset_t
) p
->p_addr
;
vm_map_pageable(kernel_map
, addr
, addr
+size
, TRUE
);
pmap_collect(vm_map_pmap(p
->p_map
));
* The rest of these routines fake thread handling
assert_wait(event
, ruptible
)
u
.u_procp
->p_thread
= event
;
sleep((caddr_t
)u
.u_procp
->p_thread
, PVM
);
thread_sleep(event
, lock
, ruptible
)
u
.u_procp
->p_thread
= event
;
sleep((caddr_t
)u
.u_procp
->p_thread
, PVM
);
iprintf(a
, b
, c
, d
, e
, f
, g
, h
)
for (i
= indent
; i
> 0; ) {
putchar('\t', 1, (caddr_t
)0);
putchar(' ', 1, (caddr_t
)0);
printf(a
, b
, c
, d
, e
, f
, g
, h
);