int lotsfree
= 0; /* set to LOTSFREE in main unless adbed */
int multprog
= -1; /* so we don't count process 2 */
double avenrun
[3]; /* load average, of runnable procs */
* The main loop of the scheduling (swapping) process.
* see if anyone wants to be swapped in;
* swap out processes until there is room;
* If the paging rate is too high, or the average free memory
* is very low, then we do not consider swapping anyone in,
* but rather look for someone to swap out.
* The runout flag is set whenever someone is swapped out.
* Sched sleeps on it awaiting work.
* Sched sleeps on runin whenever it cannot find enough
* core (by swapping out or otherwise) to fit the
* selected swapped process. It is awakened when the
* core situation changes and in any case once per second.
* sched DOESN'T ACCOUNT FOR PAGE TABLE SIZE IN CALCULATIONS.
(((p)->p_flag&(SSYS|SLOCK|SULOCK|SLOAD|SPAGE|SKEEP|SWEXIT|SPHYSIO))==SLOAD)
#define nz(x) (x != 0 ? x : 1)
register struct proc
*rp
, *p
, *inp
;
int outpri
, inpri
, rppri
;
int sleeper
, desparate
, deservin
, needs
, divisor
;
register struct bigp
*bp
, *nbp
;
* Check if paging rate is too high, or average of
* free list very low and if so, adjust multiprogramming
* load by swapping someone out.
* Avoid glitches: don't hard swap the only process,
* and don't swap based on paging rate if there is a reasonable
if (kmapwnt
|| (multprog
> 1 && avefree
< desfree
&&
(rate
.v_pgin
+ rate
.v_pgout
> maxpgio
|| avefree
< minfree
))) {
* Not desparate for core,
* look for someone who deserves to be brought in.
for (rp
= &proc
[0]; rp
< &proc
[NPROC
]; rp
++) switch(rp
->p_stat
) {
if ((rp
->p_flag
&SLOAD
) == 0) {
rppri
= rp
->p_time
- rp
->p_swrss
/ nz((maxpgio
/2) * CLSIZE
) +
rp
->p_slptime
- (rp
->p_nice
-NZERO
)*8;
if (rp
->p_textp
&& rp
->p_textp
->x_poip
)
if ((freemem
< desfree
|| rp
->p_rssize
== 0) &&
rp
->p_slptime
> maxslp
&&
(!rp
->p_textp
|| (rp
->p_textp
->x_flag
&XLOCK
)==0) &&
* FOLLOWING 3 LINES MUST BE AT spl6().
(void) swapout(rp
, rp
->p_dsize
, rp
->p_ssize
);
* No one wants in, so nothing to do.
sleep((caddr_t
)&runout
, PSWP
);
* Decide how deserving this guy is. If he is deserving
* we will be willing to work harder to bring him in.
* Needs is an estimate of how much core he will need.
* If he has been out for a while, then we will
* bring him in with 1/2 the core he will need, otherwise
if (p
->p_textp
&& p
->p_textp
->x_ccount
== 0)
needs
+= p
->p_textp
->x_swrss
;
if (freemem
- deficit
> needs
/ divisor
) {
deficit
-= imin(needs
, deficit
);
* Need resources (kernel map or memory), swap someone out.
* Select the nbig largest jobs, then the oldest of these
* is ``most likely to get booted.''
for (rp
= &proc
[0]; rp
< &proc
[NPROC
]; rp
++) {
if (rp
->p_textp
&& rp
->p_textp
->x_flag
&XLOCK
)
if (rp
->p_slptime
> maxslp
&&
(rp
->p_stat
==SSLEEP
&&rp
->p_pri
>PZERO
||rp
->p_stat
==SSTOP
)) {
if (sleeper
< rp
->p_slptime
) {
} else if (!sleeper
&& (rp
->p_stat
==SRUN
||rp
->p_stat
==SSLEEP
)) {
rppri
+= rp
->p_textp
->x_rssize
/rp
->p_textp
->x_ccount
;
bplist
.bp_link
= nbp
->bp_link
;
for (bp
= &bplist
; bp
->bp_link
; bp
= bp
->bp_link
)
if (rppri
< bp
->bp_link
->bp_pri
)
nbp
->bp_link
= bp
->bp_link
;
for (bp
= bplist
.bp_link
; bp
; bp
= bp
->bp_link
) {
rppri
= rp
->p_time
+rp
->p_nice
-NZERO
;
* If we found a long-time sleeper, or we are desparate and
* found anyone to swap out, or if someone deserves to come
* in and we didn't find a sleeper, but found someone who
* has been in core for a reasonable length of time, then
* we kick the poor luser out.
if (sleeper
|| desparate
&& p
|| deservin
&& inpri
> maxslp
) {
* Want to give this space to the rest of
* the processes in core so give them a chance
* by increasing the deficit.
gives
+= p
->p_textp
->x_rssize
/ p
->p_textp
->x_ccount
;
gives
= 0; /* someone else taketh away */
if (swapout(p
, p
->p_dsize
, p
->p_ssize
) == 0)
deficit
-= imin(gives
, deficit
);
* Want to swap someone in, but can't
sleep((caddr_t
)&runin
, PSWP
);
register unsigned *cp
, *rp
, *sp
;
deficit
-= imin(deficit
, imax(deficit
/ 10, maxpgio
/ 2));
ave(avefree
, freemem
, 5);
/* v_pgin is maintained by clock.c */
cp
= &cnt
.v_first
; rp
= &rate
.v_first
; sp
= &sum
.v_first
;
while (cp
<= &cnt
.v_last
) {
rate
.v_swpin
= cnt
.v_swpin
;
sum
.v_swpin
+= cnt
.v_swpin
;
rate
.v_swpout
= cnt
.v_swpout
;
sum
.v_swpout
+= cnt
.v_swpout
;
if (avefree
< minfree
&& runout
|| proc
[0].p_slptime
> maxslp
/2) {
wakeup((caddr_t
)&runout
);
* Compute new rate for clock; if
* nonzero, restart clock.
* Rate ranges linearly from one rev per
* slowscan seconds when there is lotsfree memory
* available to one rev per fastscan seconds when
* there is no memory available.
vavail
= freemem
- deficit
;
scanrate
= (slowscan
* vavail
+ fastscan
* (lotsfree
- vavail
)) / nz(lotsfree
);
desscan
= LOOPSIZ
/ nz(scanrate
);
* DIVIDE BY 4 TO ACCOUNT FOR RUNNING 4* A SECOND (see clock.c)
wakeup((caddr_t
)&proc
[2]);
register struct text
*xp
;
for (xp
= &text
[0]; xp
< &text
[NTEXT
]; xp
++)
total
.t_vmtxt
+= xp
->x_size
;
total
.t_rmtxt
+= xp
->x_rssize
;
for (p
= xp
->x_caddr
; p
; p
= p
->p_xlink
)
if (p
->p_slptime
>= maxslp
)
total
.t_avmtxt
+= xp
->x_size
;
total
.t_armtxt
+= xp
->x_rssize
;
for (p
= &proc
[0]; p
< &proc
[NPROC
]; p
++) {
total
.t_vm
+= p
->p_dsize
+ p
->p_ssize
;
total
.t_rm
+= p
->p_rssize
;
else if (p
->p_flag
& SLOAD
) {
else if (p
->p_slptime
< maxslp
)
} else if (p
->p_slptime
< maxslp
)
if (p
->p_slptime
< maxslp
)
total
.t_avm
+= p
->p_dsize
+ p
->p_ssize
;
total
.t_arm
+= p
->p_rssize
;
total
.t_vm
+= total
.t_vmtxt
;
total
.t_avm
+= total
.t_avmtxt
;
total
.t_rm
+= total
.t_rmtxt
;
total
.t_arm
+= total
.t_armtxt
;
* Constants for averages over 1, 5, and 15 minutes
* when sampling at 5 second intervals.
0.9200444146293232, /* exp(-1/12) */
0.9834714538216174, /* exp(-1/60) */
0.9944598480048967, /* exp(-1/180) */
* Compute a tenex style load average of a quantity on
* 1, 5 and 15 minute intervals.
avg
[i
] = cexp
[i
] * avg
[i
] + n
* (1.0 - cexp
[i
]);