fixes for lint's sake
[unix-history] / usr / src / sys / kern / kern_clock.c
CommitLineData
c4710996 1/* kern_clock.c 4.20 81/04/17 */
83be5fac
BJ
2
3#include "../h/param.h"
4#include "../h/systm.h"
d9b8447e 5#include "../h/dk.h"
0a34b6fd 6#include "../h/callout.h"
83be5fac
BJ
7#include "../h/seg.h"
8#include "../h/dir.h"
9#include "../h/user.h"
10#include "../h/proc.h"
11#include "../h/reg.h"
12#include "../h/psl.h"
13#include "../h/vm.h"
14#include "../h/buf.h"
15#include "../h/text.h"
95ce0d37
BJ
16#include "../h/vlimit.h"
17#include "../h/mtpr.h"
18#include "../h/clock.h"
e5a79c70 19#include "../h/cpu.h"
83be5fac 20
738a68d6 21#include "bk.h"
ec213dfb
BJ
22#include "dh.h"
23#include "dz.h"
6602c75b 24
83be5fac 25/*
f403d99f 26 * Hardclock is called straight from
83be5fac 27 * the real time clock interrupt.
f403d99f
BJ
28 * We limit the work we do at real clock interrupt time to:
29 * reloading clock
30 * decrementing time to callouts
31 * recording cpu time usage
4512b9a4 32 * modifying priority of current process
f403d99f
BJ
33 * arrange for soft clock interrupt
34 * kernel pc profiling
83be5fac 35 *
964bcfb1 36 * At software (softclock) interrupt time we:
83be5fac 37 * implement callouts
83be5fac 38 * maintain date
83be5fac
BJ
39 * lightning bolt wakeup (every second)
40 * alarm clock signals
41 * jab the scheduler
f403d99f
BJ
42 *
43 * On the vax softclock interrupts are implemented by
44 * software interrupts. Note that we may have multiple softclock
45 * interrupts compressed into one (due to excessive interrupt load),
46 * but that hardclock interrupts should never be lost.
83be5fac 47 */
83be5fac 48
260ea681 49/*ARGSUSED*/
f403d99f 50hardclock(pc, ps)
4512b9a4 51 caddr_t pc;
83be5fac 52{
0a34b6fd 53 register struct callout *p1;
83be5fac 54 register struct proc *pp;
f403d99f 55 register int s, cpstate;
83be5fac
BJ
56
57 /*
58 * reprime clock
59 */
60 clkreld();
61
62 /*
f403d99f 63 * update callout times
83be5fac 64 */
c4710996
BJ
65 for (p1 = calltodo.c_next; p1 && p1->c_time <= 0; p1 = p1->c_next)
66 ;
67 if (p1)
68 p1->c_time--;
83be5fac 69out:
5da67d35
BJ
70
71 /*
f403d99f 72 * Maintain iostat and per-process cpu statistics
5da67d35 73 */
83be5fac
BJ
74 if (!noproc) {
75 s = u.u_procp->p_rssize;
76 u.u_vm.vm_idsrss += s;
77 if (u.u_procp->p_textp) {
78 register int xrss = u.u_procp->p_textp->x_rssize;
79
80 s += xrss;
81 u.u_vm.vm_ixrss += xrss;
82 }
83 if (s > u.u_vm.vm_maxrss)
84 u.u_vm.vm_maxrss = s;
0a34b6fd 85 if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/hz > u.u_limit[LIM_CPU]) {
39f2f769
BJ
86 psignal(u.u_procp, SIGXCPU);
87 if (u.u_limit[LIM_CPU] < INFINITY - 5)
88 u.u_limit[LIM_CPU] += 5;
89 }
83be5fac 90 }
964bcfb1
BJ
91 /*
92 * Update iostat information.
93 */
83be5fac
BJ
94 if (USERMODE(ps)) {
95 u.u_vm.vm_utime++;
96 if(u.u_procp->p_nice > NZERO)
41888f16
BJ
97 cpstate = CP_NICE;
98 else
99 cpstate = CP_USER;
83be5fac 100 } else {
41888f16 101 cpstate = CP_SYS;
83be5fac 102 if (noproc)
41888f16 103 cpstate = CP_IDLE;
83be5fac
BJ
104 else
105 u.u_vm.vm_stime++;
106 }
2d7d59e9 107 cp_time[cpstate]++;
f403d99f
BJ
108 for (s = 0; s < DK_NDRIVE; s++)
109 if (dk_busy&(1<<s))
110 dk_time[s]++;
964bcfb1
BJ
111 /*
112 * Adjust priority of current process.
113 */
83be5fac
BJ
114 if (!noproc) {
115 pp = u.u_procp;
dd808ba3 116 pp->p_cpticks++;
83be5fac
BJ
117 if(++pp->p_cpu == 0)
118 pp->p_cpu--;
119 if(pp->p_cpu % 16 == 0) {
81263dba 120 (void) setpri(pp);
83be5fac
BJ
121 if (pp->p_pri >= PUSER)
122 pp->p_pri = pp->p_usrpri;
123 }
124 }
964bcfb1
BJ
125 /*
126 * Time moves on.
127 */
83be5fac 128 ++lbolt;
e5a79c70 129#if VAX780
964bcfb1
BJ
130 /*
131 * On 780's, impelement a fast UBA watcher,
132 * to make sure uba's don't get stuck.
133 */
287d9996 134 if (cpu == VAX_780 && panicstr == 0 && !BASEPRI(ps))
f403d99f
BJ
135 unhang();
136#endif
964bcfb1
BJ
137 /*
138 * Schedule a software interrupt for the rest
139 * of clock activities.
140 */
f403d99f
BJ
141 setsoftclock();
142}
143
144/*
964bcfb1
BJ
145 * SCHMAG is the constant in the digital decay cpu
146 * usage priority assignment. Each second we multiply
147 * the previous cpu usage estimate by SCHMAG. At 9/10
148 * it tends to decay away all knowledge of previous activity
149 * in about 10 seconds.
150 */
151#define SCHMAG 9/10
152
153/*
154 * Constant for decay filter for cpu usage field
155 * in process table (used by ps au).
f403d99f
BJ
156 */
157double ccpu = 0.95122942450071400909; /* exp(-1/20) */
158
159/*
160 * Software clock interrupt.
964bcfb1 161 * This routine runs at lower priority than device interrupts.
f403d99f 162 */
260ea681 163/*ARGSUSED*/
f403d99f 164softclock(pc, ps)
4512b9a4 165 caddr_t pc;
f403d99f 166{
0a34b6fd 167 register struct callout *p1, *p2;
f403d99f
BJ
168 register struct proc *pp;
169 register int a, s;
c4710996
BJ
170 caddr_t arg;
171 int (*func)();
f403d99f
BJ
172
173 /*
287d9996 174 * Perform callouts (but not after panic's!)
f403d99f 175 */
c4710996
BJ
176 if (panicstr == 0) {
177 for (;;) {
178 s = spl7();
179 if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0)
180 break;
181 calltodo.c_next = p1->c_next;
182 arg = p1->c_arg;
183 func = p1->c_func;
184 p1->c_next = callfree;
185 callfree = p1;
186 (void) splx(s);
187 (*func)(arg);
f403d99f
BJ
188 }
189 }
190
191 /*
192 * Drain silos.
193 */
738a68d6 194#if NBK > 0
3b90686d 195#if NDH > 0
f403d99f
BJ
196 s = spl5(); dhtimer(); splx(s);
197#endif
3b90686d 198#if NDZ > 0
f403d99f 199 s = spl5(); dztimer(); splx(s);
738a68d6 200#endif
f403d99f
BJ
201#endif
202
4512b9a4
BJ
203 /*
204 * If idling and processes are waiting to swap in,
205 * check on them.
206 */
207 if (noproc && runin) {
208 runin = 0;
209 wakeup((caddr_t)&runin);
210 }
211
f403d99f
BJ
212 /*
213 * Run paging daemon and reschedule every 1/4 sec.
214 */
0a34b6fd 215 if (lbolt % (hz/4) == 0) {
83be5fac
BJ
216 vmpago();
217 runrun++;
f403d99f 218 aston();
83be5fac 219 }
f403d99f
BJ
220
221 /*
222 * Lightning bolt every second:
223 * sleep timeouts
224 * process priority recomputation
225 * process %cpu averaging
226 * virtual memory metering
227 * kick swapper if processes want in
228 */
0a34b6fd 229 if (lbolt >= hz) {
287d9996 230 /*
964bcfb1 231 * This doesn't mean much on VAX since we run at
287d9996
BJ
232 * software interrupt time... if hardclock()
233 * calls softclock() directly, it prevents
234 * this code from running when the priority
235 * was raised when the clock interrupt occurred.
236 */
83be5fac
BJ
237 if (BASEPRI(ps))
238 return;
287d9996
BJ
239
240 /*
241 * If we didn't run a few times because of
242 * long blockage at high ipl, we don't
243 * really want to run this code several times,
244 * so squish out all multiples of hz here.
245 */
246 time += lbolt / hz;
247 lbolt %= hz;
248
249 /*
250 * Wakeup lightning bolt sleepers.
251 * Processes sleep on lbolt to wait
252 * for short amounts of time (e.g. 1 second).
253 */
83be5fac 254 wakeup((caddr_t)&lbolt);
287d9996
BJ
255
256 /*
257 * Recompute process priority and process
258 * sleep() system calls as well as internal
259 * sleeps with timeouts (tsleep() kernel routine).
260 */
261 for (pp = proc; pp < procNPROC; pp++)
8418f526 262 if (pp->p_stat && pp->p_stat!=SZOMB) {
287d9996
BJ
263 /*
264 * Increase resident time, to max of 127 seconds
265 * (it is kept in a character.) For
266 * loaded processes this is time in core; for
267 * swapped processes, this is time on drum.
268 */
269 if (pp->p_time != 127)
83be5fac 270 pp->p_time++;
287d9996
BJ
271 /*
272 * If process has clock counting down, and it
273 * expires, set it running (if this is a tsleep()),
274 * or give it an SIGALRM (if the user process
275 * is using alarm signals.
276 */
277 if (pp->p_clktim && --pp->p_clktim == 0)
278 if (pp->p_flag & STIMO) {
279 s = spl6();
280 switch (pp->p_stat) {
daac5944 281
287d9996
BJ
282 case SSLEEP:
283 setrun(pp);
284 break;
daac5944 285
287d9996
BJ
286 case SSTOP:
287 unsleep(pp);
288 break;
289 }
290 pp->p_flag &= ~STIMO;
291 splx(s);
292 } else
293 psignal(pp, SIGALRM);
294 /*
295 * If process is blocked, increment computed
296 * time blocked. This is used in swap scheduling.
297 */
298 if (pp->p_stat==SSLEEP || pp->p_stat==SSTOP)
83be5fac
BJ
299 if (pp->p_slptime != 127)
300 pp->p_slptime++;
287d9996
BJ
301 /*
302 * Update digital filter estimation of process
303 * cpu utilization for loaded processes.
304 */
dd808ba3
BJ
305 if (pp->p_flag&SLOAD)
306 pp->p_pctcpu = ccpu * pp->p_pctcpu +
0a34b6fd 307 (1.0 - ccpu) * (pp->p_cpticks/(float)hz);
287d9996
BJ
308 /*
309 * Recompute process priority. The number p_cpu
310 * is a weighted estimate of cpu time consumed.
311 * A process which consumes cpu time has this
312 * increase regularly. We here decrease it by
313 * a fraction (SCHMAG is 90%), giving a digital
314 * decay filter which damps out in about 10 seconds.
315 *
316 * If a process is niced, then the nice directly
317 * affects the new priority. The final priority
318 * is in the range 0 to 255, to fit in a character.
319 */
dd808ba3 320 pp->p_cpticks = 0;
83be5fac 321 a = (pp->p_cpu & 0377)*SCHMAG + pp->p_nice - NZERO;
287d9996 322 if (a < 0)
83be5fac 323 a = 0;
287d9996 324 if (a > 255)
83be5fac
BJ
325 a = 255;
326 pp->p_cpu = a;
81263dba 327 (void) setpri(pp);
287d9996
BJ
328 /*
329 * Now have computed new process priority
330 * in p->p_usrpri. Carefully change p->p_pri.
331 * A process is on a run queue associated with
332 * this priority, so we must block out process
333 * state changes during the transition.
334 */
83be5fac 335 s = spl6();
287d9996 336 if (pp->p_pri >= PUSER) {
83be5fac
BJ
337 if ((pp != u.u_procp || noproc) &&
338 pp->p_stat == SRUN &&
339 (pp->p_flag & SLOAD) &&
340 pp->p_pri != pp->p_usrpri) {
341 remrq(pp);
342 pp->p_pri = pp->p_usrpri;
343 setrq(pp);
344 } else
345 pp->p_pri = pp->p_usrpri;
346 }
347 splx(s);
348 }
287d9996
BJ
349
350 /*
351 * Perform virtual memory metering.
352 */
83be5fac 353 vmmeter();
287d9996
BJ
354
355 /*
356 * If the swap process is trying to bring
357 * a process in, have it look again to see
358 * if it is possible now.
359 */
360 if (runin!=0) {
83be5fac
BJ
361 runin = 0;
362 wakeup((caddr_t)&runin);
363 }
287d9996 364
83be5fac
BJ
365 /*
366 * If there are pages that have been cleaned,
367 * jolt the pageout daemon to process them.
368 * We do this here so that these pages will be
369 * freed if there is an abundance of memory and the
370 * daemon would not be awakened otherwise.
371 */
372 if (bclnlist != NULL)
373 wakeup((caddr_t)&proc[2]);
287d9996
BJ
374
375 /*
376 * If the trap occurred from usermode,
377 * then check to see if it has now been
378 * running more than 10 minutes of user time
379 * and should thus run with reduced priority
380 * to give other processes a chance.
381 */
83be5fac
BJ
382 if (USERMODE(ps)) {
383 pp = u.u_procp;
287d9996
BJ
384 if (pp->p_uid && pp->p_nice == NZERO &&
385 u.u_vm.vm_utime > 600 * hz)
386 pp->p_nice = NZERO+4;
81263dba 387 (void) setpri(pp);
83be5fac 388 pp->p_pri = pp->p_usrpri;
054016e1 389 }
83be5fac 390 }
287d9996
BJ
391 /*
392 * If trapped user-mode, give it a profiling tick.
393 */
f403d99f
BJ
394 if (USERMODE(ps) && u.u_prof.pr_scale) {
395 u.u_procp->p_flag |= SOWEUPC;
396 aston();
83be5fac 397 }
83be5fac
BJ
398}
399
400/*
964bcfb1 401 * Timeout is called to arrange that
0a34b6fd 402 * fun(arg) is called in tim/hz seconds.
c4710996 403 * An entry is linked into the callout
964bcfb1 404 * structure. The time in each structure
0a34b6fd 405 * entry is the number of hz's more
83be5fac
BJ
406 * than the previous entry.
407 * In this way, decrementing the
408 * first entry has the effect of
409 * updating all entries.
410 *
411 * The panic is there because there is nothing
412 * intelligent to be done if an entry won't fit.
413 */
414timeout(fun, arg, tim)
4512b9a4
BJ
415 int (*fun)();
416 caddr_t arg;
83be5fac 417{
c4710996 418 register struct callout *p1, *p2, *pnew;
83be5fac
BJ
419 register int t;
420 int s;
421
47477f34
BJ
422/* DEBUGGING CODE */
423 int ttrstrt();
424
425 if (fun == ttrstrt && arg == 0)
426 panic("timeout ttrstr arg");
427/* END DEBUGGING CODE */
83be5fac 428 t = tim;
83be5fac 429 s = spl7();
c4710996
BJ
430 pnew = callfree;
431 if (pnew == NULL)
432 panic("timeout table overflow");
433 callfree = pnew->c_next;
434 pnew->c_arg = arg;
435 pnew->c_func = fun;
436 for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2)
437 t -= p2->c_time;
438 p1->c_next = pnew;
439 pnew->c_next = p2;
440 pnew->c_time = t;
441 if (p2)
442 p2->c_time -= t;
83be5fac
BJ
443 splx(s);
444}