X-Git-Url: https://git.subgeniuskitty.com/unix-history/.git/blobdiff_plain/260ea6815a856bf13eb266389c458db23e952298..f4c13170101fd0eb5e60b14d3d74b3b34cba690c:/usr/src/sys/kern/kern_clock.c diff --git a/usr/src/sys/kern/kern_clock.c b/usr/src/sys/kern/kern_clock.c index c54911de9f..a483580313 100644 --- a/usr/src/sys/kern/kern_clock.c +++ b/usr/src/sys/kern/kern_clock.c @@ -1,9 +1,9 @@ -/* kern_clock.c 4.9 %G% */ +/* kern_clock.c 4.15 %G% */ #include "../h/param.h" #include "../h/systm.h" #include "../h/dk.h" -#include "../h/callo.h" +#include "../h/callout.h" #include "../h/seg.h" #include "../h/dir.h" #include "../h/user.h" @@ -16,6 +16,7 @@ #include "../h/vlimit.h" #include "../h/mtpr.h" #include "../h/clock.h" +#include "../h/cpu.h" #include "dh.h" #include "dz.h" @@ -51,7 +52,7 @@ hardclock(pc, ps) caddr_t pc; { - register struct callo *p1; + register struct callout *p1; register struct proc *pp; register int s, cpstate; @@ -85,7 +86,7 @@ out: } if (s > u.u_vm.vm_maxrss) u.u_vm.vm_maxrss = s; - if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/HZ > u.u_limit[LIM_CPU]) { + if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/hz > u.u_limit[LIM_CPU]) { psignal(u.u_procp, SIGXCPU); if (u.u_limit[LIM_CPU] < INFINITY - 5) u.u_limit[LIM_CPU] += 5; @@ -120,8 +121,8 @@ out: } } ++lbolt; -#if VAX==780 - if (!BASEPRI(ps)) +#if VAX780 + if (cpu == VAX_780 && panicstr == 0 && !BASEPRI(ps)) unhang(); #endif setsoftclock(); @@ -141,21 +142,21 @@ double ccpu = 0.95122942450071400909; /* exp(-1/20) */ softclock(pc, ps) caddr_t pc; { - register struct callo *p1, *p2; + register struct callout *p1, *p2; register struct proc *pp; register int a, s; /* - * callout + * Perform callouts (but not after panic's!) */ - if(callout[0].c_time <= 0) { + if (panicstr == 0 && callout[0].c_time <= 0) { p1 = &callout[0]; - while(p1->c_func != 0 && p1->c_time <= 0) { + while (p1->c_func != 0 && p1->c_time <= 0) { (*p1->c_func)(p1->c_arg); p1++; } p2 = &callout[0]; - while(p2->c_func = p1->c_func) { + while (p2->c_func = p1->c_func) { p2->c_time = p1->c_time; p2->c_arg = p1->c_arg; p1++; @@ -166,10 +167,10 @@ softclock(pc, ps) /* * Drain silos. */ -#if NDH11 > 0 +#if NDH > 0 s = spl5(); dhtimer(); splx(s); #endif -#if NDZ11 > 0 +#if NDZ > 0 s = spl5(); dztimer(); splx(s); #endif @@ -185,7 +186,7 @@ softclock(pc, ps) /* * Run paging daemon and reschedule every 1/4 sec. */ - if (lbolt % (HZ/4) == 0) { + if (lbolt % (hz/4) == 0) { vmpago(); runrun++; aston(); @@ -199,50 +200,114 @@ softclock(pc, ps) * virtual memory metering * kick swapper if processes want in */ - if (lbolt >= HZ) { + if (lbolt >= hz) { + /* + * This doesn't mean much since we run at + * software interrupt time... if hardclock() + * calls softclock() directly, it prevents + * this code from running when the priority + * was raised when the clock interrupt occurred. + */ if (BASEPRI(ps)) return; - lbolt -= HZ; - ++time; + + /* + * If we didn't run a few times because of + * long blockage at high ipl, we don't + * really want to run this code several times, + * so squish out all multiples of hz here. + */ + time += lbolt / hz; + lbolt %= hz; + + /* + * Wakeup lightning bolt sleepers. + * Processes sleep on lbolt to wait + * for short amounts of time (e.g. 1 second). + */ wakeup((caddr_t)&lbolt); - for(pp = &proc[0]; pp < &proc[NPROC]; pp++) + + /* + * Recompute process priority and process + * sleep() system calls as well as internal + * sleeps with timeouts (tsleep() kernel routine). + */ + for (pp = proc; pp < procNPROC; pp++) if (pp->p_stat && pp->p_stat!=SZOMB) { - if(pp->p_time != 127) + /* + * Increase resident time, to max of 127 seconds + * (it is kept in a character.) For + * loaded processes this is time in core; for + * swapped processes, this is time on drum. + */ + if (pp->p_time != 127) pp->p_time++; - if(pp->p_clktim) - if(--pp->p_clktim == 0) - if (pp->p_flag & STIMO) { - s = spl6(); - switch (pp->p_stat) { + /* + * If process has clock counting down, and it + * expires, set it running (if this is a tsleep()), + * or give it an SIGALRM (if the user process + * is using alarm signals. + */ + if (pp->p_clktim && --pp->p_clktim == 0) + if (pp->p_flag & STIMO) { + s = spl6(); + switch (pp->p_stat) { - case SSLEEP: - setrun(pp); - break; + case SSLEEP: + setrun(pp); + break; - case SSTOP: - unsleep(pp); - break; - } - pp->p_flag &= ~STIMO; - splx(s); - } else - psignal(pp, SIGALRM); - if(pp->p_stat==SSLEEP||pp->p_stat==SSTOP) + case SSTOP: + unsleep(pp); + break; + } + pp->p_flag &= ~STIMO; + splx(s); + } else + psignal(pp, SIGALRM); + /* + * If process is blocked, increment computed + * time blocked. This is used in swap scheduling. + */ + if (pp->p_stat==SSLEEP || pp->p_stat==SSTOP) if (pp->p_slptime != 127) pp->p_slptime++; + /* + * Update digital filter estimation of process + * cpu utilization for loaded processes. + */ if (pp->p_flag&SLOAD) pp->p_pctcpu = ccpu * pp->p_pctcpu + - (1.0 - ccpu) * (pp->p_cpticks/(float)HZ); + (1.0 - ccpu) * (pp->p_cpticks/(float)hz); + /* + * Recompute process priority. The number p_cpu + * is a weighted estimate of cpu time consumed. + * A process which consumes cpu time has this + * increase regularly. We here decrease it by + * a fraction (SCHMAG is 90%), giving a digital + * decay filter which damps out in about 10 seconds. + * + * If a process is niced, then the nice directly + * affects the new priority. The final priority + * is in the range 0 to 255, to fit in a character. + */ pp->p_cpticks = 0; a = (pp->p_cpu & 0377)*SCHMAG + pp->p_nice - NZERO; - if(a < 0) + if (a < 0) a = 0; - if(a > 255) + if (a > 255) a = 255; pp->p_cpu = a; (void) setpri(pp); + /* + * Now have computed new process priority + * in p->p_usrpri. Carefully change p->p_pri. + * A process is on a run queue associated with + * this priority, so we must block out process + * state changes during the transition. + */ s = spl6(); - if(pp->p_pri >= PUSER) { + if (pp->p_pri >= PUSER) { if ((pp != u.u_procp || noproc) && pp->p_stat == SRUN && (pp->p_flag & SLOAD) && @@ -255,11 +320,22 @@ softclock(pc, ps) } splx(s); } + + /* + * Perform virtual memory metering. + */ vmmeter(); - if(runin!=0) { + + /* + * If the swap process is trying to bring + * a process in, have it look again to see + * if it is possible now. + */ + if (runin!=0) { runin = 0; wakeup((caddr_t)&runin); } + /* * If there are pages that have been cleaned, * jolt the pageout daemon to process them. @@ -269,15 +345,26 @@ softclock(pc, ps) */ if (bclnlist != NULL) wakeup((caddr_t)&proc[2]); + + /* + * If the trap occurred from usermode, + * then check to see if it has now been + * running more than 10 minutes of user time + * and should thus run with reduced priority + * to give other processes a chance. + */ if (USERMODE(ps)) { pp = u.u_procp; - if (pp->p_uid) - if (pp->p_nice == NZERO && u.u_vm.vm_utime > 600 * HZ) - pp->p_nice = NZERO+4; + if (pp->p_uid && pp->p_nice == NZERO && + u.u_vm.vm_utime > 600 * hz) + pp->p_nice = NZERO+4; (void) setpri(pp); pp->p_pri = pp->p_usrpri; } } + /* + * If trapped user-mode, give it a profiling tick. + */ if (USERMODE(ps) && u.u_prof.pr_scale) { u.u_procp->p_flag |= SOWEUPC; aston(); @@ -286,10 +373,10 @@ softclock(pc, ps) /* * timeout is called to arrange that - * fun(arg) is called in tim/HZ seconds. + * fun(arg) is called in tim/hz seconds. * An entry is sorted into the callout * structure. The time in each structure - * entry is the number of HZ's more + * entry is the number of hz's more * than the previous entry. * In this way, decrementing the * first entry has the effect of @@ -302,7 +389,7 @@ timeout(fun, arg, tim) int (*fun)(); caddr_t arg; { - register struct callo *p1, *p2, *p3; + register struct callout *p1, *p2, *p3; register int t; int s; @@ -315,7 +402,7 @@ timeout(fun, arg, tim) } p1->c_time -= t; p2 = p1; - p3 = &callout[NCALL-2]; + p3 = callout+(ncallout-2); while(p2->c_func != 0) { if (p2 >= p3) panic("timeout");