-/* kern_clock.c 4.20 81/04/17 */
+/* kern_clock.c 4.37 82/09/04 */
#include "../h/param.h"
#include "../h/systm.h"
#include "../h/seg.h"
#include "../h/dir.h"
#include "../h/user.h"
+#include "../h/kernel.h"
#include "../h/proc.h"
#include "../h/reg.h"
#include "../h/psl.h"
#include "../h/vm.h"
#include "../h/buf.h"
#include "../h/text.h"
-#include "../h/vlimit.h"
#include "../h/mtpr.h"
-#include "../h/clock.h"
#include "../h/cpu.h"
+#include "../h/protosw.h"
+#include "../h/socket.h"
+#include "../net/if.h"
+#ifdef MUSH
+#include "../h/quota.h"
+#include "../h/share.h"
+#endif
#include "bk.h"
#include "dh.h"
#include "dz.h"
+#include "ps.h"
/*
* Hardclock is called straight from
* interrupts compressed into one (due to excessive interrupt load),
* but that hardclock interrupts should never be lost.
*/
+#ifdef GPROF
+extern int profiling;
+extern char *s_lowpc;
+extern u_long s_textsize;
+extern u_short *kcount;
+#endif
+
+/*
+ * Protoslow is like lbolt, but for slow protocol timeouts, counting
+ * up to (hz/PR_SLOWHZ), then causing a pfslowtimo().
+ * Protofast is like lbolt, but for fast protocol timeouts, counting
+ * up to (hz/PR_FASTHZ), then causing a pffasttimo().
+ */
+int protoslow;
+int protofast;
+int ifnetslow;
/*ARGSUSED*/
hardclock(pc, ps)
register struct callout *p1;
register struct proc *pp;
register int s, cpstate;
+ extern double avenrun[];
/*
* reprime clock
*/
- clkreld();
+
+#if NPS > 0
+ /*
+ * sync referesh of picture system
+ */
+ psextsync(pc, ps);
+#endif
/*
* update callout times
;
if (p1)
p1->c_time--;
-out:
/*
* Maintain iostat and per-process cpu statistics
*/
if (!noproc) {
s = u.u_procp->p_rssize;
- u.u_vm.vm_idsrss += s;
+ u.u_ru.ru_idrss += s;
+ u.u_ru.ru_isrss += 0; /* XXX */
if (u.u_procp->p_textp) {
register int xrss = u.u_procp->p_textp->x_rssize;
s += xrss;
- u.u_vm.vm_ixrss += xrss;
+ u.u_ru.ru_ixrss += xrss;
}
- if (s > u.u_vm.vm_maxrss)
- u.u_vm.vm_maxrss = s;
- if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/hz > u.u_limit[LIM_CPU]) {
+ if (s > u.u_ru.ru_maxrss)
+ u.u_ru.ru_maxrss = s;
+ if ((u.u_ru.ru_utime.tv_sec+u.u_ru.ru_stime.tv_sec+1) >
+ u.u_rlimit[RLIMIT_CPU].rlim_cur) {
psignal(u.u_procp, SIGXCPU);
- if (u.u_limit[LIM_CPU] < INFINITY - 5)
- u.u_limit[LIM_CPU] += 5;
+ if (u.u_rlimit[RLIMIT_CPU].rlim_cur <
+ u.u_rlimit[RLIMIT_CPU].rlim_max)
+ u.u_rlimit[RLIMIT_CPU].rlim_cur += 5;
}
}
/*
* Update iostat information.
*/
if (USERMODE(ps)) {
- u.u_vm.vm_utime++;
- if(u.u_procp->p_nice > NZERO)
+ u.u_ru.ru_utime.tv_usec += 1000000/hz;
+ if (u.u_ru.ru_utime.tv_usec > 1000000) {
+ u.u_ru.ru_utime.tv_sec++;
+ u.u_ru.ru_utime.tv_usec -= 1000000;
+ }
+ if (u.u_procp->p_nice > NZERO)
cpstate = CP_NICE;
else
cpstate = CP_USER;
} else {
+#ifdef GPROF
+ int k = pc - s_lowpc;
+ if (profiling < 2 && k < s_textsize)
+ kcount[k / sizeof (*kcount)]++;
+#endif
cpstate = CP_SYS;
- if (noproc)
- cpstate = CP_IDLE;
- else
- u.u_vm.vm_stime++;
+ if (noproc) {
+ if ((ps&PSL_IPL) != 0)
+ cpstate = CP_IDLE;
+ } else {
+ u.u_ru.ru_stime.tv_usec += 1000000/hz;
+ if (u.u_ru.ru_stime.tv_usec > 1000000) {
+ u.u_ru.ru_stime.tv_sec++;
+ u.u_ru.ru_stime.tv_usec -= 1000000;
+ }
+ }
}
cp_time[cpstate]++;
for (s = 0; s < DK_NDRIVE; s++)
pp->p_cpticks++;
if(++pp->p_cpu == 0)
pp->p_cpu--;
- if(pp->p_cpu % 16 == 0) {
+#ifdef MUSH
+ pp->p_quota->q_cost += (pp->p_nice > NZERO ?
+ (shconsts.sc_tic * ((2*NZERO)-pp->p_nice)) / NZERO :
+ shconsts.sc_tic) * (((int)avenrun[0]+2)/3);
+#endif
+ if(pp->p_cpu % 4 == 0) {
(void) setpri(pp);
if (pp->p_pri >= PUSER)
pp->p_pri = pp->p_usrpri;
* Time moves on.
*/
++lbolt;
+
+ /*
+ * Time moves on for protocols.
+ */
+ --protoslow; --protofast; --ifnetslow;
+
#if VAX780
/*
* On 780's, impelement a fast UBA watcher,
}
/*
- * SCHMAG is the constant in the digital decay cpu
- * usage priority assignment. Each second we multiply
- * the previous cpu usage estimate by SCHMAG. At 9/10
- * it tends to decay away all knowledge of previous activity
- * in about 10 seconds.
+ * The digital decay cpu usage priority assignment is scaled to run in
+ * time as expanded by the 1 minute load average. Each second we
+ * multiply the the previous cpu usage estimate by
+ * nrscale*avenrun[0]
+ * The following relates the load average to the period over which
+ * cpu usage is 90% forgotten:
+ * loadav 1 5 seconds
+ * loadav 5 24 seconds
+ * loadav 10 47 seconds
+ * loadav 20 93 seconds
+ * This is a great improvement on the previous algorithm which
+ * decayed the priorities by a constant, and decayed away all knowledge
+ * of previous activity in about 20 seconds. Under heavy load,
+ * the previous algorithm degenerated to round-robin with poor response
+ * time when there was a high load average.
*/
-#define SCHMAG 9/10
+#undef ave
+#define ave(a,b) ((int)(((int)(a*b))/(b+1)))
+int nrscale = 2;
+double avenrun[];
/*
* Constant for decay filter for cpu usage field
*/
double ccpu = 0.95122942450071400909; /* exp(-1/20) */
+#ifdef MELB
+/*
+ * Automatic niceness rate & max constants
+ */
+#define MAXNICE (8 + NZERO) /* maximum auto nice value */
+#define NFACT (40 * hz) /* nice++ every 40 secs cpu+sys time */
+#endif
+
/*
* Software clock interrupt.
* This routine runs at lower priority than device interrupts.
softclock(pc, ps)
caddr_t pc;
{
- register struct callout *p1, *p2;
+ register struct callout *p1;
register struct proc *pp;
register int a, s;
caddr_t arg;
if (panicstr == 0) {
for (;;) {
s = spl7();
- if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0)
+ if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) {
+ splx(s);
break;
+ }
calltodo.c_next = p1->c_next;
arg = p1->c_arg;
func = p1->c_func;
/*
* Drain silos.
*/
-#if NBK > 0
#if NDH > 0
s = spl5(); dhtimer(); splx(s);
#endif
#if NDZ > 0
s = spl5(); dztimer(); splx(s);
-#endif
#endif
/*
}
/*
- * Run paging daemon and reschedule every 1/4 sec.
+ * Run paging daemon every 1/4 sec.
*/
if (lbolt % (hz/4) == 0) {
vmpago();
+ }
+
+ /*
+ * Reschedule every 1/10 sec.
+ */
+ if (lbolt % (hz/10) == 0) {
runrun++;
aston();
}
+ /*
+ * Run network slow and fast timeouts.
+ */
+ if (protofast <= 0) {
+ protofast = hz / PR_FASTHZ;
+ pffasttimo();
+ }
+ if (protoslow <= 0) {
+ protoslow = hz / PR_SLOWHZ;
+ pfslowtimo();
+ }
+ if (ifnetslow <= 0) {
+ ifnetslow = hz / IFNET_SLOWHZ;
+ if_slowtimo();
+ }
+
/*
* Lightning bolt every second:
* sleep timeouts
* really want to run this code several times,
* so squish out all multiples of hz here.
*/
- time += lbolt / hz;
- lbolt %= hz;
+ s = spl6();
+ time.tv_sec += lbolt / hz; lbolt %= hz;
+ splx(s);
/*
* Wakeup lightning bolt sleepers.
*/
for (pp = proc; pp < procNPROC; pp++)
if (pp->p_stat && pp->p_stat!=SZOMB) {
+#ifdef MUSH
+ /*
+ * Charge process for memory in use
+ */
+ if (pp->p_quota->q_uid)
+ pp->p_quota->q_cost +=
+ shconsts.sc_click * pp->p_rssize;
+#endif
/*
* Increase resident time, to max of 127 seconds
* (it is kept in a character.) For
if (pp->p_time != 127)
pp->p_time++;
/*
- * If process has clock counting down, and it
- * expires, set it running (if this is a tsleep()),
- * or give it an SIGALRM (if the user process
- * is using alarm signals.
+ * Time processes out of select.
*/
- if (pp->p_clktim && --pp->p_clktim == 0)
- if (pp->p_flag & STIMO) {
- s = spl6();
- switch (pp->p_stat) {
-
- case SSLEEP:
- setrun(pp);
- break;
-
- case SSTOP:
- unsleep(pp);
- break;
- }
- pp->p_flag &= ~STIMO;
- splx(s);
- } else
- psignal(pp, SIGALRM);
+ if (timerisset(&pp->p_seltimer) &&
+ --pp->p_seltimer.tv_sec <= 0) {
+ timerclear(&pp->p_seltimer);
+ s = spl6();
+ switch (pp->p_stat) {
+
+ case SSLEEP:
+ setrun(pp);
+ break;
+
+ case SSTOP:
+ unsleep(pp);
+ break;
+ }
+ splx(s);
+ }
+ if (timerisset(&pp->p_realtimer.itimer_value) &&
+ itimerdecr(&pp->p_realtimer, 1000000) == 0)
+ psignal(pp, SIGALRM);
+
/*
* If process is blocked, increment computed
* time blocked. This is used in swap scheduling.
* is a weighted estimate of cpu time consumed.
* A process which consumes cpu time has this
* increase regularly. We here decrease it by
- * a fraction (SCHMAG is 90%), giving a digital
- * decay filter which damps out in about 10 seconds.
+ * a fraction based on load average giving a digital
+ * decay filter which damps out in about 5 seconds
+ * when seconds are measured in time expanded by the
+ * load average.
*
* If a process is niced, then the nice directly
* affects the new priority. The final priority
* is in the range 0 to 255, to fit in a character.
*/
pp->p_cpticks = 0;
- a = (pp->p_cpu & 0377)*SCHMAG + pp->p_nice - NZERO;
+#ifdef MUSH
+ a = ave((pp->p_cpu & 0377), avenrun[0]*nrscale) +
+ pp->p_nice - NZERO + pp->p_quota->q_nice;
+#else
+ a = ave((pp->p_cpu & 0377), avenrun[0]*nrscale) +
+ pp->p_nice - NZERO;
+#endif
if (a < 0)
a = 0;
if (a > 255)
if (bclnlist != NULL)
wakeup((caddr_t)&proc[2]);
+#ifdef MELB
+ /*
+ * If a process was running, see if time to make it nicer
+ */
+ if (!noproc) {
+ pp = u.u_procp;
+ if (pp->p_uid
+#ifdef MUSH
+ && !(pp->p_flag & SLOGIN)
+#else
+ /* this is definitely not good enough */
+ && (pp->p_pid != pp->p_pgrp || pp->p_ppid != 1)
+#endif
+ && (u.u_ru.ru_utime + u.u_ru.ru_stime) >
+ (pp->p_nice-NZERO+1)*NFACT
+ && pp->p_nice >= NZERO
+ && pp->p_nice < MAXNICE
+ ) {
+ pp->p_nice++;
+ (void) setpri(pp);
+ pp->p_pri = pp->p_usrpri;
+ }
+ }
+#else
/*
* If the trap occurred from usermode,
* then check to see if it has now been
if (USERMODE(ps)) {
pp = u.u_procp;
if (pp->p_uid && pp->p_nice == NZERO &&
- u.u_vm.vm_utime > 600 * hz)
+ u.u_ru.ru_utime.tv_sec > 600)
pp->p_nice = NZERO+4;
(void) setpri(pp);
pp->p_pri = pp->p_usrpri;
}
+#endif
}
/*
* If trapped user-mode, give it a profiling tick.
*/
- if (USERMODE(ps) && u.u_prof.pr_scale) {
- u.u_procp->p_flag |= SOWEUPC;
- aston();
- }
+ if (USERMODE(ps) &&
+ timerisset(&u.u_timer[ITIMER_VIRTUAL].itimer_value) &&
+ itimerdecr(&u.u_timer[ITIMER_VIRTUAL].itimer_value, 1000000/hz) == 0)
+ psignal(u.u_procp, SIGPROF);
}
/*
register int t;
int s;
-/* DEBUGGING CODE */
- int ttrstrt();
-
- if (fun == ttrstrt && arg == 0)
- panic("timeout ttrstr arg");
-/* END DEBUGGING CODE */
t = tim;
s = spl7();
pnew = callfree;
p2->c_time -= t;
splx(s);
}
+
+#ifdef notdef
+/*
+ * untimeout is called to remove a function timeout call
+ * from the callout structure.
+ */
+untimeout (fun, arg)
+ int (*fun)();
+ caddr_t arg;
+{
+
+ register struct callout *p1, *p2;
+ register int s;
+
+ s = spl7();
+ for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) {
+ if (p2->c_func == fun && p2->c_arg == arg) {
+ if (p2->c_next)
+ p2->c_next->c_time += p2->c_time;
+ p1->c_next = p2->c_next;
+ p2->c_next = callfree;
+ callfree = p2;
+ break;
+ }
+ }
+ splx(s);
+}
+#endif