Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /*- |
2 | * Copyright (c) 1982, 1986, 1991 The Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
78ed81a3 | 33 | * from: @(#)kern_clock.c 7.16 (Berkeley) 5/9/91 |
34 | * $Id: kern_clock.c,v 1.4 1993/10/19 09:05:51 davidg Exp $ | |
15637ed4 RG |
35 | */ |
36 | ||
37 | #include "param.h" | |
38 | #include "systm.h" | |
39 | #include "dkstat.h" | |
40 | #include "callout.h" | |
41 | #include "kernel.h" | |
42 | #include "proc.h" | |
43 | #include "resourcevar.h" | |
44 | ||
45 | #include "machine/cpu.h" | |
46 | ||
78ed81a3 | 47 | #include "resource.h" |
48 | #include "vm/vm.h" | |
49 | ||
15637ed4 RG |
50 | #ifdef GPROF |
51 | #include "gprof.h" | |
52 | #endif | |
53 | ||
54 | /* | |
55 | * Clock handling routines. | |
56 | * | |
57 | * This code is written to operate with two timers which run | |
58 | * independently of each other. The main clock, running at hz | |
59 | * times per second, is used to do scheduling and timeout calculations. | |
60 | * The second timer does resource utilization estimation statistically | |
61 | * based on the state of the machine phz times a second. Both functions | |
62 | * can be performed by a single clock (ie hz == phz), however the | |
63 | * statistics will be much more prone to errors. Ideally a machine | |
64 | * would have separate clocks measuring time spent in user state, system | |
65 | * state, interrupt state, and idle state. These clocks would allow a non- | |
66 | * approximate measure of resource utilization. | |
67 | */ | |
68 | ||
69 | /* | |
70 | * TODO: | |
71 | * time of day, system/user timing, timeouts, profiling on separate timers | |
72 | * allocate more timeout table slots when table overflows. | |
73 | */ | |
74 | ||
75 | /* | |
76 | * Bump a timeval by a small number of usec's. | |
77 | */ | |
78 | #define BUMPTIME(t, usec) { \ | |
79 | register struct timeval *tp = (t); \ | |
80 | \ | |
81 | tp->tv_usec += (usec); \ | |
82 | if (tp->tv_usec >= 1000000) { \ | |
83 | tp->tv_usec -= 1000000; \ | |
84 | tp->tv_sec++; \ | |
85 | } \ | |
86 | } | |
87 | ||
88 | /* | |
89 | * The hz hardware interval timer. | |
90 | * We update the events relating to real time. | |
91 | * If this timer is also being used to gather statistics, | |
92 | * we run through the statistics gathering routine as well. | |
93 | */ | |
94 | hardclock(frame) | |
95 | clockframe frame; | |
96 | { | |
97 | register struct callout *p1; | |
98 | register struct proc *p = curproc; | |
99 | register struct pstats *pstats; | |
78ed81a3 | 100 | register struct rusage *ru; |
101 | register struct vmspace *vm; | |
15637ed4 RG |
102 | register int s; |
103 | int needsoft = 0; | |
104 | extern int tickdelta; | |
105 | extern long timedelta; | |
106 | ||
107 | /* | |
108 | * Update real-time timeout queue. | |
109 | * At front of queue are some number of events which are ``due''. | |
110 | * The time to these is <= 0 and if negative represents the | |
111 | * number of ticks which have passed since it was supposed to happen. | |
112 | * The rest of the q elements (times > 0) are events yet to happen, | |
113 | * where the time for each is given as a delta from the previous. | |
114 | * Decrementing just the first of these serves to decrement the time | |
115 | * to all events. | |
116 | */ | |
117 | p1 = calltodo.c_next; | |
118 | while (p1) { | |
119 | if (--p1->c_time > 0) | |
120 | break; | |
121 | needsoft = 1; | |
122 | if (p1->c_time == 0) | |
123 | break; | |
124 | p1 = p1->c_next; | |
125 | } | |
126 | ||
127 | /* | |
128 | * Curproc (now in p) is null if no process is running. | |
129 | * We assume that curproc is set in user mode! | |
130 | */ | |
131 | if (p) | |
132 | pstats = p->p_stats; | |
133 | /* | |
134 | * Charge the time out based on the mode the cpu is in. | |
135 | * Here again we fudge for the lack of proper interval timers | |
136 | * assuming that the current state has been around at least | |
137 | * one tick. | |
138 | */ | |
139 | if (CLKF_USERMODE(&frame)) { | |
140 | if (pstats->p_prof.pr_scale) | |
141 | needsoft = 1; | |
142 | /* | |
143 | * CPU was in user state. Increment | |
144 | * user time counter, and process process-virtual time | |
145 | * interval timer. | |
146 | */ | |
147 | BUMPTIME(&p->p_utime, tick); | |
148 | if (timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && | |
149 | itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) | |
150 | psignal(p, SIGVTALRM); | |
151 | } else { | |
152 | /* | |
153 | * CPU was in system state. | |
154 | */ | |
155 | if (p) | |
156 | BUMPTIME(&p->p_stime, tick); | |
157 | } | |
158 | ||
78ed81a3 | 159 | /* bump the resource usage of integral space use */ |
160 | if (p && pstats && (ru = &pstats->p_ru) && (vm = p->p_vmspace)) { | |
161 | ru->ru_ixrss += vm->vm_tsize * NBPG / 1024; | |
162 | ru->ru_idrss += vm->vm_dsize * NBPG / 1024; | |
163 | ru->ru_isrss += vm->vm_ssize * NBPG / 1024; | |
164 | if ((vm->vm_pmap.pm_stats.resident_count * NBPG / 1024) > | |
165 | ru->ru_maxrss) { | |
166 | ru->ru_maxrss = | |
167 | vm->vm_pmap.pm_stats.resident_count * NBPG / 1024; | |
168 | } | |
169 | } | |
170 | ||
15637ed4 RG |
171 | /* |
172 | * If the cpu is currently scheduled to a process, then | |
173 | * charge it with resource utilization for a tick, updating | |
174 | * statistics which run in (user+system) virtual time, | |
175 | * such as the cpu time limit and profiling timers. | |
176 | * This assumes that the current process has been running | |
177 | * the entire last tick. | |
178 | */ | |
179 | if (p) { | |
180 | if ((p->p_utime.tv_sec+p->p_stime.tv_sec+1) > | |
181 | p->p_rlimit[RLIMIT_CPU].rlim_cur) { | |
182 | psignal(p, SIGXCPU); | |
183 | if (p->p_rlimit[RLIMIT_CPU].rlim_cur < | |
184 | p->p_rlimit[RLIMIT_CPU].rlim_max) | |
185 | p->p_rlimit[RLIMIT_CPU].rlim_cur += 5; | |
186 | } | |
187 | if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) && | |
188 | itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) | |
189 | psignal(p, SIGPROF); | |
190 | ||
191 | /* | |
192 | * We adjust the priority of the current process. | |
193 | * The priority of a process gets worse as it accumulates | |
194 | * CPU time. The cpu usage estimator (p_cpu) is increased here | |
195 | * and the formula for computing priorities (in kern_synch.c) | |
196 | * will compute a different value each time the p_cpu increases | |
197 | * by 4. The cpu usage estimator ramps up quite quickly when | |
198 | * the process is running (linearly), and decays away | |
199 | * exponentially, * at a rate which is proportionally slower | |
200 | * when the system is busy. The basic principal is that the | |
201 | * system will 90% forget that a process used a lot of CPU | |
202 | * time in 5*loadav seconds. This causes the system to favor | |
203 | * processes which haven't run much recently, and to | |
204 | * round-robin among other processes. | |
205 | */ | |
206 | p->p_cpticks++; | |
207 | if (++p->p_cpu == 0) | |
208 | p->p_cpu--; | |
209 | if ((p->p_cpu&3) == 0) { | |
210 | setpri(p); | |
211 | if (p->p_pri >= PUSER) | |
212 | p->p_pri = p->p_usrpri; | |
213 | } | |
214 | } | |
215 | ||
216 | /* | |
217 | * If the alternate clock has not made itself known then | |
218 | * we must gather the statistics. | |
219 | */ | |
220 | if (phz == 0) | |
221 | gatherstats(&frame); | |
222 | ||
223 | /* | |
224 | * Increment the time-of-day, and schedule | |
225 | * processing of the callouts at a very low cpu priority, | |
226 | * so we don't keep the relatively high clock interrupt | |
227 | * priority any longer than necessary. | |
228 | */ | |
229 | if (timedelta == 0) | |
230 | BUMPTIME(&time, tick) | |
231 | else { | |
232 | register delta; | |
233 | ||
234 | if (timedelta < 0) { | |
235 | delta = tick - tickdelta; | |
236 | timedelta += tickdelta; | |
237 | } else { | |
238 | delta = tick + tickdelta; | |
239 | timedelta -= tickdelta; | |
240 | } | |
241 | BUMPTIME(&time, delta); | |
242 | } | |
243 | #ifdef DCFCLK | |
244 | /* | |
245 | * This is lousy, but until I can get the $&^%&^(!!! signal onto one | |
246 | * of the interrupt's I'll have to poll it. No, it will not work if | |
247 | * you attempt -DHZ=1000, things break. | |
248 | * But keep the NDCFCLK low, to avoid waste of cycles... | |
249 | * phk@data.fls.dk | |
250 | */ | |
251 | dcfclk_worker(); | |
252 | #endif | |
253 | if (needsoft) { | |
254 | #if 0 | |
255 | /* | |
256 | * XXX - hardclock runs at splhigh, so the splsoftclock is useless and | |
257 | * softclock runs at splhigh as well if we do this. It is not much of | |
258 | * an optimization, since the "software interrupt" is done with a call | |
259 | * from doreti, and the overhead of checking there is sometimes less | |
260 | * than checking here. Moreover, the whole %$$%$^ frame is passed by | |
261 | * value here. | |
262 | */ | |
263 | if (CLKF_BASEPRI(&frame)) { | |
264 | /* | |
265 | * Save the overhead of a software interrupt; | |
266 | * it will happen as soon as we return, so do it now. | |
267 | */ | |
268 | (void) splsoftclock(); | |
269 | softclock(frame); | |
270 | } else | |
271 | #endif | |
272 | setsoftclock(); | |
273 | } | |
274 | } | |
275 | ||
276 | int dk_ndrive = DK_NDRIVE; | |
277 | /* | |
278 | * Gather statistics on resource utilization. | |
279 | * | |
280 | * We make a gross assumption: that the system has been in the | |
281 | * state it is in (user state, kernel state, interrupt state, | |
282 | * or idle state) for the entire last time interval, and | |
283 | * update statistics accordingly. | |
284 | */ | |
285 | gatherstats(framep) | |
286 | clockframe *framep; | |
287 | { | |
288 | register int cpstate, s; | |
289 | ||
290 | /* | |
291 | * Determine what state the cpu is in. | |
292 | */ | |
293 | if (CLKF_USERMODE(framep)) { | |
294 | /* | |
295 | * CPU was in user state. | |
296 | */ | |
297 | if (curproc->p_nice > NZERO) | |
298 | cpstate = CP_NICE; | |
299 | else | |
300 | cpstate = CP_USER; | |
301 | } else { | |
302 | /* | |
303 | * CPU was in system state. If profiling kernel | |
304 | * increment a counter. If no process is running | |
305 | * then this is a system tick if we were running | |
306 | * at a non-zero IPL (in a driver). If a process is running, | |
307 | * then we charge it with system time even if we were | |
308 | * at a non-zero IPL, since the system often runs | |
309 | * this way during processing of system calls. | |
310 | * This is approximate, but the lack of true interval | |
311 | * timers makes doing anything else difficult. | |
312 | */ | |
313 | cpstate = CP_SYS; | |
314 | if (curproc == NULL && CLKF_BASEPRI(framep)) | |
315 | cpstate = CP_IDLE; | |
316 | #ifdef GPROF | |
317 | s = (u_long) CLKF_PC(framep) - (u_long) s_lowpc; | |
318 | if (profiling < 2 && s < s_textsize) | |
319 | kcount[s / (HISTFRACTION * sizeof (*kcount))]++; | |
320 | #endif | |
321 | } | |
322 | /* | |
323 | * We maintain statistics shown by user-level statistics | |
324 | * programs: the amount of time in each cpu state, and | |
325 | * the amount of time each of DK_NDRIVE ``drives'' is busy. | |
326 | */ | |
327 | cp_time[cpstate]++; | |
328 | for (s = 0; s < DK_NDRIVE; s++) | |
329 | if (dk_busy&(1<<s)) | |
330 | dk_time[s]++; | |
331 | } | |
332 | ||
333 | /* | |
334 | * Software priority level clock interrupt. | |
335 | * Run periodic events from timeout queue. | |
336 | */ | |
337 | /*ARGSUSED*/ | |
338 | softclock(frame) | |
339 | clockframe frame; | |
340 | { | |
341 | ||
342 | for (;;) { | |
343 | register struct callout *p1; | |
344 | register caddr_t arg; | |
345 | register int (*func)(); | |
346 | register int a, s; | |
347 | ||
348 | s = splhigh(); | |
349 | if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) { | |
350 | splx(s); | |
351 | break; | |
352 | } | |
353 | arg = p1->c_arg; func = p1->c_func; a = p1->c_time; | |
354 | calltodo.c_next = p1->c_next; | |
355 | p1->c_next = callfree; | |
356 | callfree = p1; | |
357 | splx(s); | |
358 | (*func)(arg, a); | |
359 | } | |
360 | ||
361 | /* | |
362 | * If no process to work with, we're finished. | |
363 | */ | |
364 | if (curproc == 0) return; | |
365 | ||
366 | /* | |
367 | * If trapped user-mode and profiling, give it | |
368 | * a profiling tick. | |
369 | */ | |
370 | if (CLKF_USERMODE(&frame)) { | |
371 | register struct proc *p = curproc; | |
372 | ||
373 | if (p->p_stats->p_prof.pr_scale) | |
374 | profile_tick(p, &frame); | |
375 | /* | |
376 | * Check to see if process has accumulated | |
377 | * more than 10 minutes of user time. If so | |
378 | * reduce priority to give others a chance. | |
379 | */ | |
380 | if (p->p_ucred->cr_uid && p->p_nice == NZERO && | |
381 | p->p_utime.tv_sec > 10 * 60) { | |
382 | p->p_nice = NZERO + 4; | |
383 | setpri(p); | |
384 | p->p_pri = p->p_usrpri; | |
385 | } | |
386 | } | |
387 | } | |
388 | ||
389 | /* | |
390 | * Arrange that (*func)(arg) is called in t/hz seconds. | |
391 | */ | |
392 | timeout(func, arg, t) | |
393 | int (*func)(); | |
394 | caddr_t arg; | |
395 | register int t; | |
396 | { | |
397 | register struct callout *p1, *p2, *pnew; | |
398 | register int s = splhigh(); | |
399 | ||
400 | if (t <= 0) | |
401 | t = 1; | |
402 | pnew = callfree; | |
403 | if (pnew == NULL) | |
404 | panic("timeout table overflow"); | |
405 | callfree = pnew->c_next; | |
406 | pnew->c_arg = arg; | |
407 | pnew->c_func = func; | |
408 | for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2) | |
409 | if (p2->c_time > 0) | |
410 | t -= p2->c_time; | |
411 | p1->c_next = pnew; | |
412 | pnew->c_next = p2; | |
413 | pnew->c_time = t; | |
414 | if (p2) | |
415 | p2->c_time -= t; | |
416 | splx(s); | |
417 | } | |
418 | ||
419 | /* | |
420 | * untimeout is called to remove a function timeout call | |
421 | * from the callout structure. | |
422 | */ | |
423 | untimeout(func, arg) | |
424 | int (*func)(); | |
425 | caddr_t arg; | |
426 | { | |
427 | register struct callout *p1, *p2; | |
428 | register int s; | |
429 | ||
430 | s = splhigh(); | |
431 | for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) { | |
432 | if (p2->c_func == func && p2->c_arg == arg) { | |
433 | if (p2->c_next && p2->c_time > 0) | |
434 | p2->c_next->c_time += p2->c_time; | |
435 | p1->c_next = p2->c_next; | |
436 | p2->c_next = callfree; | |
437 | callfree = p2; | |
438 | break; | |
439 | } | |
440 | } | |
441 | splx(s); | |
442 | } | |
443 | ||
444 | /* | |
445 | * Compute number of hz until specified time. | |
446 | * Used to compute third argument to timeout() from an | |
447 | * absolute time. | |
448 | */ | |
449 | hzto(tv) | |
450 | struct timeval *tv; | |
451 | { | |
452 | register long ticks; | |
453 | register long sec; | |
454 | int s = splhigh(); | |
455 | ||
456 | /* | |
457 | * If number of milliseconds will fit in 32 bit arithmetic, | |
458 | * then compute number of milliseconds to time and scale to | |
459 | * ticks. Otherwise just compute number of hz in time, rounding | |
460 | * times greater than representible to maximum value. | |
461 | * | |
462 | * Delta times less than 25 days can be computed ``exactly''. | |
463 | * Maximum value for any timeout in 10ms ticks is 250 days. | |
464 | */ | |
465 | sec = tv->tv_sec - time.tv_sec; | |
466 | if (sec <= 0x7fffffff / 1000 - 1000) | |
467 | ticks = ((tv->tv_sec - time.tv_sec) * 1000 + | |
468 | (tv->tv_usec - time.tv_usec) / 1000) / (tick / 1000); | |
469 | else if (sec <= 0x7fffffff / hz) | |
470 | ticks = sec * hz; | |
471 | else | |
472 | ticks = 0x7fffffff; | |
473 | splx(s); | |
474 | return (ticks); | |
475 | } |