3com updates for 780 (from Bill Reeves)
[unix-history] / usr / src / sys / kern / kern_clock.c
CommitLineData
3484be37 1/* kern_clock.c 4.33 82/07/13 */
83be5fac
BJ
2
3#include "../h/param.h"
4#include "../h/systm.h"
d9b8447e 5#include "../h/dk.h"
0a34b6fd 6#include "../h/callout.h"
83be5fac
BJ
7#include "../h/seg.h"
8#include "../h/dir.h"
9#include "../h/user.h"
10#include "../h/proc.h"
11#include "../h/reg.h"
12#include "../h/psl.h"
13#include "../h/vm.h"
14#include "../h/buf.h"
15#include "../h/text.h"
95ce0d37
BJ
16#include "../h/vlimit.h"
17#include "../h/mtpr.h"
18#include "../h/clock.h"
e5a79c70 19#include "../h/cpu.h"
72857acf 20#include "../h/protosw.h"
83be5fac 21
738a68d6 22#include "bk.h"
ec213dfb
BJ
23#include "dh.h"
24#include "dz.h"
1fa9ff62 25#include "ps.h"
6602c75b 26
83be5fac 27/*
f403d99f 28 * Hardclock is called straight from
83be5fac 29 * the real time clock interrupt.
f403d99f
BJ
30 * We limit the work we do at real clock interrupt time to:
31 * reloading clock
32 * decrementing time to callouts
33 * recording cpu time usage
4512b9a4 34 * modifying priority of current process
f403d99f
BJ
35 * arrange for soft clock interrupt
36 * kernel pc profiling
83be5fac 37 *
964bcfb1 38 * At software (softclock) interrupt time we:
83be5fac 39 * implement callouts
83be5fac 40 * maintain date
83be5fac
BJ
41 * lightning bolt wakeup (every second)
42 * alarm clock signals
43 * jab the scheduler
f403d99f
BJ
44 *
45 * On the vax softclock interrupts are implemented by
46 * software interrupts. Note that we may have multiple softclock
47 * interrupts compressed into one (due to excessive interrupt load),
48 * but that hardclock interrupts should never be lost.
83be5fac 49 */
3484be37
BJ
50#ifdef GPROF
51extern int profiling;
52extern char *s_lowpc;
53extern u_long s_textsize;
54extern u_short *kcount;
2752c877 55#endif
83be5fac 56
72857acf
BJ
57/*
58 * Protoslow is like lbolt, but for slow protocol timeouts, counting
59 * up to (hz/PR_SLOWHZ), then causing a pfslowtimo().
60 * Protofast is like lbolt, but for fast protocol timeouts, counting
61 * up to (hz/PR_FASTHZ), then causing a pffasttimo().
62 */
63int protoslow;
64int protofast;
65
260ea681 66/*ARGSUSED*/
f403d99f 67hardclock(pc, ps)
4512b9a4 68 caddr_t pc;
83be5fac 69{
0a34b6fd 70 register struct callout *p1;
83be5fac 71 register struct proc *pp;
f403d99f 72 register int s, cpstate;
83be5fac
BJ
73
74 /*
75 * reprime clock
76 */
77 clkreld();
78
1fa9ff62
SL
79#if NPS > 0
80 /*
81 * sync referesh of picture system
82 */
83 psextsync(pc, ps);
84#endif
85
83be5fac 86 /*
f403d99f 87 * update callout times
83be5fac 88 */
c4710996
BJ
89 for (p1 = calltodo.c_next; p1 && p1->c_time <= 0; p1 = p1->c_next)
90 ;
91 if (p1)
92 p1->c_time--;
5da67d35
BJ
93
94 /*
f403d99f 95 * Maintain iostat and per-process cpu statistics
5da67d35 96 */
83be5fac
BJ
97 if (!noproc) {
98 s = u.u_procp->p_rssize;
99 u.u_vm.vm_idsrss += s;
100 if (u.u_procp->p_textp) {
101 register int xrss = u.u_procp->p_textp->x_rssize;
102
103 s += xrss;
104 u.u_vm.vm_ixrss += xrss;
105 }
106 if (s > u.u_vm.vm_maxrss)
107 u.u_vm.vm_maxrss = s;
0a34b6fd 108 if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/hz > u.u_limit[LIM_CPU]) {
39f2f769
BJ
109 psignal(u.u_procp, SIGXCPU);
110 if (u.u_limit[LIM_CPU] < INFINITY - 5)
111 u.u_limit[LIM_CPU] += 5;
112 }
83be5fac 113 }
964bcfb1
BJ
114 /*
115 * Update iostat information.
116 */
83be5fac
BJ
117 if (USERMODE(ps)) {
118 u.u_vm.vm_utime++;
119 if(u.u_procp->p_nice > NZERO)
41888f16
BJ
120 cpstate = CP_NICE;
121 else
122 cpstate = CP_USER;
83be5fac 123 } else {
3484be37
BJ
124#ifdef GPROF
125 int k = pc - s_lowpc;
126 if (profiling < 2 && k < s_textsize)
127 kcount[k / sizeof (*kcount)]++;
2752c877 128#endif
41888f16 129 cpstate = CP_SYS;
ddb3ced5
SL
130 if (noproc) {
131 if ((ps&PSL_IPL) != 0)
132 cpstate = CP_IDLE;
133 } else
83be5fac
BJ
134 u.u_vm.vm_stime++;
135 }
2d7d59e9 136 cp_time[cpstate]++;
f403d99f
BJ
137 for (s = 0; s < DK_NDRIVE; s++)
138 if (dk_busy&(1<<s))
139 dk_time[s]++;
964bcfb1
BJ
140 /*
141 * Adjust priority of current process.
142 */
83be5fac
BJ
143 if (!noproc) {
144 pp = u.u_procp;
dd808ba3 145 pp->p_cpticks++;
83be5fac
BJ
146 if(++pp->p_cpu == 0)
147 pp->p_cpu--;
16a64baa 148 if(pp->p_cpu % 4 == 0) {
81263dba 149 (void) setpri(pp);
83be5fac
BJ
150 if (pp->p_pri >= PUSER)
151 pp->p_pri = pp->p_usrpri;
152 }
153 }
964bcfb1
BJ
154 /*
155 * Time moves on.
156 */
83be5fac 157 ++lbolt;
72857acf
BJ
158
159 /*
160 * Time moves on for protocols.
161 */
20bbf2f5 162 --protoslow; --protofast;
72857acf 163
e5a79c70 164#if VAX780
964bcfb1
BJ
165 /*
166 * On 780's, impelement a fast UBA watcher,
167 * to make sure uba's don't get stuck.
168 */
287d9996 169 if (cpu == VAX_780 && panicstr == 0 && !BASEPRI(ps))
f403d99f
BJ
170 unhang();
171#endif
964bcfb1
BJ
172 /*
173 * Schedule a software interrupt for the rest
174 * of clock activities.
175 */
f403d99f
BJ
176 setsoftclock();
177}
178
179/*
16a64baa
BJ
180 * The digital decay cpu usage priority assignment is scaled to run in
181 * time as expanded by the 1 minute load average. Each second we
182 * multiply the the previous cpu usage estimate by
183 * nrscale*avenrun[0]
184 * The following relates the load average to the period over which
185 * cpu usage is 90% forgotten:
186 * loadav 1 5 seconds
187 * loadav 5 24 seconds
188 * loadav 10 47 seconds
189 * loadav 20 93 seconds
190 * This is a great improvement on the previous algorithm which
191 * decayed the priorities by a constant, and decayed away all knowledge
192 * of previous activity in about 20 seconds. Under heavy load,
193 * the previous algorithm degenerated to round-robin with poor response
194 * time when there was a high load average.
964bcfb1 195 */
b620b354 196#undef ave
16a64baa
BJ
197#define ave(a,b) ((int)(((int)(a*b))/(b+1)))
198int nrscale = 2;
199double avenrun[];
964bcfb1
BJ
200
201/*
202 * Constant for decay filter for cpu usage field
203 * in process table (used by ps au).
f403d99f
BJ
204 */
205double ccpu = 0.95122942450071400909; /* exp(-1/20) */
206
207/*
208 * Software clock interrupt.
964bcfb1 209 * This routine runs at lower priority than device interrupts.
f403d99f 210 */
260ea681 211/*ARGSUSED*/
f403d99f 212softclock(pc, ps)
4512b9a4 213 caddr_t pc;
f403d99f 214{
dee48a1b 215 register struct callout *p1;
f403d99f
BJ
216 register struct proc *pp;
217 register int a, s;
c4710996
BJ
218 caddr_t arg;
219 int (*func)();
f403d99f
BJ
220
221 /*
287d9996 222 * Perform callouts (but not after panic's!)
f403d99f 223 */
c4710996
BJ
224 if (panicstr == 0) {
225 for (;;) {
226 s = spl7();
849fc3ee
BJ
227 if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) {
228 splx(s);
c4710996 229 break;
849fc3ee 230 }
c4710996
BJ
231 calltodo.c_next = p1->c_next;
232 arg = p1->c_arg;
233 func = p1->c_func;
234 p1->c_next = callfree;
235 callfree = p1;
236 (void) splx(s);
237 (*func)(arg);
f403d99f
BJ
238 }
239 }
240
241 /*
242 * Drain silos.
243 */
3b90686d 244#if NDH > 0
f403d99f
BJ
245 s = spl5(); dhtimer(); splx(s);
246#endif
3b90686d 247#if NDZ > 0
f403d99f
BJ
248 s = spl5(); dztimer(); splx(s);
249#endif
250
4512b9a4
BJ
251 /*
252 * If idling and processes are waiting to swap in,
253 * check on them.
254 */
255 if (noproc && runin) {
256 runin = 0;
257 wakeup((caddr_t)&runin);
258 }
259
f403d99f 260 /*
16a64baa 261 * Run paging daemon every 1/4 sec.
f403d99f 262 */
0a34b6fd 263 if (lbolt % (hz/4) == 0) {
83be5fac 264 vmpago();
16a64baa
BJ
265 }
266
267 /*
268 * Reschedule every 1/10 sec.
269 */
270 if (lbolt % (hz/10) == 0) {
83be5fac 271 runrun++;
f403d99f 272 aston();
83be5fac 273 }
f403d99f 274
72857acf
BJ
275 /*
276 * Run network slow and fast timeouts.
277 */
20bbf2f5
BJ
278 if (protofast <= 0) {
279 protofast = hz / PR_FASTHZ;
72857acf 280 pffasttimo();
20bbf2f5
BJ
281 }
282 if (protoslow <= 0) {
283 protoslow = hz / PR_SLOWHZ;
72857acf 284 pfslowtimo();
20bbf2f5 285 }
72857acf 286
f403d99f
BJ
287 /*
288 * Lightning bolt every second:
289 * sleep timeouts
290 * process priority recomputation
291 * process %cpu averaging
292 * virtual memory metering
293 * kick swapper if processes want in
294 */
0a34b6fd 295 if (lbolt >= hz) {
287d9996 296 /*
964bcfb1 297 * This doesn't mean much on VAX since we run at
287d9996
BJ
298 * software interrupt time... if hardclock()
299 * calls softclock() directly, it prevents
300 * this code from running when the priority
301 * was raised when the clock interrupt occurred.
302 */
83be5fac
BJ
303 if (BASEPRI(ps))
304 return;
287d9996
BJ
305
306 /*
307 * If we didn't run a few times because of
308 * long blockage at high ipl, we don't
309 * really want to run this code several times,
310 * so squish out all multiples of hz here.
311 */
ddb3ced5
SL
312 s = spl6();
313 time += lbolt / hz; lbolt %= hz;
314 splx(s);
287d9996
BJ
315
316 /*
317 * Wakeup lightning bolt sleepers.
318 * Processes sleep on lbolt to wait
319 * for short amounts of time (e.g. 1 second).
320 */
83be5fac 321 wakeup((caddr_t)&lbolt);
287d9996
BJ
322
323 /*
324 * Recompute process priority and process
325 * sleep() system calls as well as internal
326 * sleeps with timeouts (tsleep() kernel routine).
327 */
328 for (pp = proc; pp < procNPROC; pp++)
8418f526 329 if (pp->p_stat && pp->p_stat!=SZOMB) {
287d9996
BJ
330 /*
331 * Increase resident time, to max of 127 seconds
332 * (it is kept in a character.) For
333 * loaded processes this is time in core; for
334 * swapped processes, this is time on drum.
335 */
336 if (pp->p_time != 127)
83be5fac 337 pp->p_time++;
287d9996
BJ
338 /*
339 * If process has clock counting down, and it
340 * expires, set it running (if this is a tsleep()),
341 * or give it an SIGALRM (if the user process
342 * is using alarm signals.
343 */
344 if (pp->p_clktim && --pp->p_clktim == 0)
345 if (pp->p_flag & STIMO) {
346 s = spl6();
347 switch (pp->p_stat) {
daac5944 348
287d9996
BJ
349 case SSLEEP:
350 setrun(pp);
351 break;
daac5944 352
287d9996
BJ
353 case SSTOP:
354 unsleep(pp);
355 break;
356 }
357 pp->p_flag &= ~STIMO;
358 splx(s);
359 } else
360 psignal(pp, SIGALRM);
361 /*
362 * If process is blocked, increment computed
363 * time blocked. This is used in swap scheduling.
364 */
365 if (pp->p_stat==SSLEEP || pp->p_stat==SSTOP)
83be5fac
BJ
366 if (pp->p_slptime != 127)
367 pp->p_slptime++;
287d9996
BJ
368 /*
369 * Update digital filter estimation of process
370 * cpu utilization for loaded processes.
371 */
dd808ba3
BJ
372 if (pp->p_flag&SLOAD)
373 pp->p_pctcpu = ccpu * pp->p_pctcpu +
0a34b6fd 374 (1.0 - ccpu) * (pp->p_cpticks/(float)hz);
287d9996
BJ
375 /*
376 * Recompute process priority. The number p_cpu
377 * is a weighted estimate of cpu time consumed.
378 * A process which consumes cpu time has this
379 * increase regularly. We here decrease it by
16a64baa
BJ
380 * a fraction based on load average giving a digital
381 * decay filter which damps out in about 5 seconds
382 * when seconds are measured in time expanded by the
383 * load average.
287d9996
BJ
384 *
385 * If a process is niced, then the nice directly
386 * affects the new priority. The final priority
387 * is in the range 0 to 255, to fit in a character.
388 */
dd808ba3 389 pp->p_cpticks = 0;
16a64baa
BJ
390 a = ave((pp->p_cpu & 0377), avenrun[0]*nrscale) +
391 pp->p_nice - NZERO;
287d9996 392 if (a < 0)
83be5fac 393 a = 0;
287d9996 394 if (a > 255)
83be5fac
BJ
395 a = 255;
396 pp->p_cpu = a;
81263dba 397 (void) setpri(pp);
287d9996
BJ
398 /*
399 * Now have computed new process priority
400 * in p->p_usrpri. Carefully change p->p_pri.
401 * A process is on a run queue associated with
402 * this priority, so we must block out process
403 * state changes during the transition.
404 */
83be5fac 405 s = spl6();
287d9996 406 if (pp->p_pri >= PUSER) {
83be5fac
BJ
407 if ((pp != u.u_procp || noproc) &&
408 pp->p_stat == SRUN &&
409 (pp->p_flag & SLOAD) &&
410 pp->p_pri != pp->p_usrpri) {
411 remrq(pp);
412 pp->p_pri = pp->p_usrpri;
413 setrq(pp);
414 } else
415 pp->p_pri = pp->p_usrpri;
416 }
417 splx(s);
418 }
287d9996
BJ
419
420 /*
421 * Perform virtual memory metering.
422 */
83be5fac 423 vmmeter();
287d9996
BJ
424
425 /*
426 * If the swap process is trying to bring
427 * a process in, have it look again to see
428 * if it is possible now.
429 */
430 if (runin!=0) {
83be5fac
BJ
431 runin = 0;
432 wakeup((caddr_t)&runin);
433 }
287d9996 434
83be5fac
BJ
435 /*
436 * If there are pages that have been cleaned,
437 * jolt the pageout daemon to process them.
438 * We do this here so that these pages will be
439 * freed if there is an abundance of memory and the
440 * daemon would not be awakened otherwise.
441 */
442 if (bclnlist != NULL)
443 wakeup((caddr_t)&proc[2]);
287d9996
BJ
444
445 /*
446 * If the trap occurred from usermode,
447 * then check to see if it has now been
448 * running more than 10 minutes of user time
449 * and should thus run with reduced priority
450 * to give other processes a chance.
451 */
83be5fac
BJ
452 if (USERMODE(ps)) {
453 pp = u.u_procp;
287d9996
BJ
454 if (pp->p_uid && pp->p_nice == NZERO &&
455 u.u_vm.vm_utime > 600 * hz)
456 pp->p_nice = NZERO+4;
81263dba 457 (void) setpri(pp);
83be5fac 458 pp->p_pri = pp->p_usrpri;
054016e1 459 }
83be5fac 460 }
287d9996
BJ
461 /*
462 * If trapped user-mode, give it a profiling tick.
463 */
f403d99f
BJ
464 if (USERMODE(ps) && u.u_prof.pr_scale) {
465 u.u_procp->p_flag |= SOWEUPC;
466 aston();
83be5fac 467 }
83be5fac
BJ
468}
469
470/*
964bcfb1 471 * Timeout is called to arrange that
0a34b6fd 472 * fun(arg) is called in tim/hz seconds.
c4710996 473 * An entry is linked into the callout
964bcfb1 474 * structure. The time in each structure
0a34b6fd 475 * entry is the number of hz's more
83be5fac
BJ
476 * than the previous entry.
477 * In this way, decrementing the
478 * first entry has the effect of
479 * updating all entries.
480 *
481 * The panic is there because there is nothing
482 * intelligent to be done if an entry won't fit.
483 */
484timeout(fun, arg, tim)
4512b9a4
BJ
485 int (*fun)();
486 caddr_t arg;
83be5fac 487{
c4710996 488 register struct callout *p1, *p2, *pnew;
83be5fac
BJ
489 register int t;
490 int s;
491
47477f34
BJ
492/* DEBUGGING CODE */
493 int ttrstrt();
494
495 if (fun == ttrstrt && arg == 0)
496 panic("timeout ttrstr arg");
497/* END DEBUGGING CODE */
83be5fac 498 t = tim;
83be5fac 499 s = spl7();
c4710996
BJ
500 pnew = callfree;
501 if (pnew == NULL)
502 panic("timeout table overflow");
503 callfree = pnew->c_next;
504 pnew->c_arg = arg;
505 pnew->c_func = fun;
506 for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2)
507 t -= p2->c_time;
508 p1->c_next = pnew;
509 pnew->c_next = p2;
510 pnew->c_time = t;
511 if (p2)
512 p2->c_time -= t;
83be5fac
BJ
513 splx(s);
514}
1fa9ff62
SL
515
516/*
517 * untimeout is called to remove a function timeout call
518 * from the callout structure.
519 */
520untimeout (fun, arg)
521 int (*fun)();
522 caddr_t arg;
523{
524
525 register struct callout *p1, *p2;
526 register int s;
527
528 s = spl7();
529 for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) {
530 if (p2->c_func == fun && p2->c_arg == arg) {
531 if (p2->c_next)
532 p2->c_next->c_time += p2->c_time;
533 p1->c_next = p2->c_next;
534 p2->c_next = callfree;
535 callfree = p2;
536 break;
537 }
538 }
539 splx(s);
540}