/* vmsched.c 2.2 2/10/80 */
/* In main.c since LOTSFREE is variable */
/* int lotsfree = LOTSFREE; */
int multprog
= -1; /* so we don't count process 2 */
double avenrun
[3]; /* load average, of runnable procs */
* The main loop of the scheduling (swapping) process.
* see if anyone wants to be swapped in;
* swap out processes until there is room;
* If the paging rate is too high, or the average free memory
* is very low, then we do not consider swapping anyone in,
* but rather look for someone to swap out.
* The runout flag is set whenever someone is swapped out.
* Sched sleeps on it awaiting work.
* Sched sleeps on runin whenever it cannot find enough
* core (by swapping out or otherwise) to fit the
* selected swapped process. It is awakened when the
* core situation changes and in any case once per second.
(((p)->p_flag&(SSYS|SLOCK|SULOCK|SLOAD|SPAGE|SKEEP|SWEXIT))==SLOAD)
#define nz(x) (x != 0 ? x : 1)
register struct proc
*rp
, *p
, *inp
;
register int outpri
, inpri
, rppri
;
* Check if paging rate is too high, or average of
* free list very low and if so, adjust multiprogramming
* load by swapping someone out.
* Avoid glitches: don't swap out only process to do this,
* and don't swap based on paging rate if there is a reasonable
if (kmapwnt
|| (multprog
> 1 && avefree
< desfree
&&
(rate
.v_pgin
+ rate
.v_pgout
> maxpgio
|| avefree
< minfree
))) {
* Number of pages available and paging rate seem
* reasonable, consider increasing multiprogramming
* by swapping in process which has been out longest.
* If you went out with a lot of pages, then you are
* lower priority to come in... but are not brought in
* until there is a reasonable fraction of the memory
* you are expected to need available. The system will
* also protect memory for you to some extent in this
* case by computing the expected ``deficit'' (pages
* ``owed'' to you) and not giving them away via further
* swapins of process which want many pages.
for (rp
= &proc
[0]; rp
< &proc
[NPROC
]; rp
++) {
rppri
= rp
->p_time
- (rp
->p_nice
-NZERO
)*8;
rppri
-= rp
->p_swrss
/ nz(maxpgio
/ 2);
if (rp
->p_stat
==SRUN
&& (rp
->p_flag
&SLOAD
)==0 &&
(rp
->p_textp
==0||rp
->p_textp
->x_poip
==0) &&
} else if ((rp
->p_stat
==SSLEEP
||rp
->p_stat
==SSTOP
) &&
(freemem
< desfree
|| rp
->p_rssize
== 0) &&
rp
->p_slptime
> maxslp
&&
(!rp
->p_textp
|| (rp
->p_textp
->x_flag
&XLOCK
)==0) &&
* We found a process which has been blocked
* in core for a long time, and memory is
* not as free as we would prefer.
* Swap it out to free its u. and page table
* pages, then start over. We do this here
* because we want to get rid of this guy
* even if noone wants to come in.
VOID
swapout(rp
, rp
->p_dsize
, rp
->p_ssize
);
* If there is no one there, wait.
sleep((caddr_t
)&runout
, PSWP
);
* If there are resources (kernel map, memory), swap p in.
* If the process was swapped out while it still had pages,
* don't bring it back unless there is a reasonable amount
* of memory for it to work with.
if (freemem
> imin(deficit
, lotsfree
) + imin(p
->p_swrss
/ 2, 2 * maxpgio
) ||
p
->p_swrss
< 2 * maxpgio
&& freemem
> desfree
) {
* Need resources (kernel map or memory), swap someone out.
* Select the person who has been sleeping longest
* at bad priority; if none, select the oldest.
for (rp
= &proc
[0]; rp
< &proc
[NPROC
]; rp
++) {
if (rp
->p_textp
&& rp
->p_textp
->x_flag
&XLOCK
)
if ((rp
->p_stat
==SSLEEP
&&rp
->p_pri
>=PZERO
|| rp
->p_stat
==SSTOP
)
&& rp
->p_slptime
> maxslp
) {
if (smax
< rp
->p_slptime
) {
} else if (smax
<0 && (rp
->p_stat
==SRUN
||rp
->p_stat
==SSLEEP
)) {
rppri
= rp
->p_time
+rp
->p_nice
-NZERO
;
rppri
-= imin(rp
->p_swrss
/ nz(maxpgio
), maxslp
/ 2);
* Swap found user out if sleeping at bad pri for maxslp seconds,
* or if he has spent at least 5 seconds in core and
* the swapped-out process has spent at least 5 seconds out.
* Otherwise wait a bit and try again.
* (Note these are not really ``times'' but priorities.
if (smax
>=0 || (outpri
>=5 && inpri
>=5)) {
VOID
swapout(p
, p
->p_dsize
, p
->p_ssize
);
sleep((caddr_t
)&runin
, PSWP
);
#define vave(field, time) \
ave(rate.field, cnt.field, time); sum.field += cnt.field; cnt.field = 0
deficit
-= imin(deficit
, imax(deficit
/ 10, maxpgio
/ 2));
ave(avefree
, freemem
, 5);
/* v_pgin is maintained by clock.c */
if (avefree
< minfree
&& runout
|| proc
[0].p_slptime
> 5) {
wakeup((caddr_t
)&runout
);
* Compute new rate for clock; if
* nonzero, restart clock.
* Rate ranges linearly from one rev per
* slowscan seconds when there is lotsfree memory
* available to one rev per fastscan seconds when
* there is no memory available.
vavail
= freemem
- deficit
;
scanrate
= (slowscan
* vavail
+ fastscan
* (lotsfree
- vavail
)) / nz(lotsfree
);
desscan
= LOOPSIZ
/ nz(scanrate
);
wakeup((caddr_t
)&proc
[2]);
register struct text
*xp
;
for (xp
= &text
[0]; xp
< &text
[NTEXT
]; xp
++)
total
.t_vmtxt
+= xp
->x_size
;
total
.t_rmtxt
+= xp
->x_rssize
;
for (p
= xp
->x_caddr
; p
; p
= p
->p_xlink
)
if (p
->p_slptime
>= maxslp
)
total
.t_avmtxt
+= xp
->x_size
;
total
.t_armtxt
+= xp
->x_rssize
;
for (p
= &proc
[0]; p
< &proc
[NPROC
]; p
++) {
total
.t_vm
+= p
->p_dsize
+ p
->p_ssize
;
total
.t_rm
+= p
->p_rssize
;
else if (p
->p_flag
& SLOAD
) {
else if (p
->p_slptime
< maxslp
)
} else if (p
->p_slptime
< maxslp
)
if (p
->p_slptime
< maxslp
)
total
.t_avm
+= p
->p_dsize
+ p
->p_ssize
;
total
.t_arm
+= p
->p_rssize
;
total
.t_vm
+= total
.t_vmtxt
;
total
.t_avm
+= total
.t_avmtxt
;
total
.t_rm
+= total
.t_rmtxt
;
total
.t_arm
+= total
.t_armtxt
;
* Constants for averages over 1, 5, and 15 minutes
* when sampling at 5 second intervals.
0.9200444146293232, /* exp(-1/12) */
0.9834714538216174, /* exp(-1/60) */
0.9944598480048967, /* exp(-1/180) */
* Compute a tenex style load average of a quantity on
* 1, 5 and 15 minute intervals.
avg
[i
] = cexp
[i
] * avg
[i
] + n
* (1.0 - cexp
[i
]);