| 1 | /*- |
| 2 | * Copyright (c) 1982, 1986, 1991 The Regents of the University of California. |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * 3. All advertising materials mentioning features or use of this software |
| 14 | * must display the following acknowledgement: |
| 15 | * This product includes software developed by the University of |
| 16 | * California, Berkeley and its contributors. |
| 17 | * 4. Neither the name of the University nor the names of its contributors |
| 18 | * may be used to endorse or promote products derived from this software |
| 19 | * without specific prior written permission. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 31 | * SUCH DAMAGE. |
| 32 | * |
| 33 | * @(#)kern_clock.c 7.16 (Berkeley) 5/9/91 |
| 34 | */ |
| 35 | |
| 36 | #include "param.h" |
| 37 | #include "systm.h" |
| 38 | #include "dkstat.h" |
| 39 | #include "callout.h" |
| 40 | #include "kernel.h" |
| 41 | #include "proc.h" |
| 42 | #include "resourcevar.h" |
| 43 | |
| 44 | #include "machine/cpu.h" |
| 45 | |
| 46 | #ifdef GPROF |
| 47 | #include "gprof.h" |
| 48 | #endif |
| 49 | |
| 50 | /* |
| 51 | * Clock handling routines. |
| 52 | * |
| 53 | * This code is written to operate with two timers which run |
| 54 | * independently of each other. The main clock, running at hz |
| 55 | * times per second, is used to do scheduling and timeout calculations. |
| 56 | * The second timer does resource utilization estimation statistically |
| 57 | * based on the state of the machine phz times a second. Both functions |
| 58 | * can be performed by a single clock (ie hz == phz), however the |
| 59 | * statistics will be much more prone to errors. Ideally a machine |
| 60 | * would have separate clocks measuring time spent in user state, system |
| 61 | * state, interrupt state, and idle state. These clocks would allow a non- |
| 62 | * approximate measure of resource utilization. |
| 63 | */ |
| 64 | |
| 65 | /* |
| 66 | * TODO: |
| 67 | * time of day, system/user timing, timeouts, profiling on separate timers |
| 68 | * allocate more timeout table slots when table overflows. |
| 69 | */ |
| 70 | |
| 71 | /* |
| 72 | * Bump a timeval by a small number of usec's. |
| 73 | */ |
| 74 | #define BUMPTIME(t, usec) { \ |
| 75 | register struct timeval *tp = (t); \ |
| 76 | \ |
| 77 | tp->tv_usec += (usec); \ |
| 78 | if (tp->tv_usec >= 1000000) { \ |
| 79 | tp->tv_usec -= 1000000; \ |
| 80 | tp->tv_sec++; \ |
| 81 | } \ |
| 82 | } |
| 83 | |
| 84 | /* |
| 85 | * The hz hardware interval timer. |
| 86 | * We update the events relating to real time. |
| 87 | * If this timer is also being used to gather statistics, |
| 88 | * we run through the statistics gathering routine as well. |
| 89 | */ |
| 90 | hardclock(frame) |
| 91 | clockframe frame; |
| 92 | { |
| 93 | register struct callout *p1; |
| 94 | register struct proc *p = curproc; |
| 95 | register struct pstats *pstats; |
| 96 | register int s; |
| 97 | int needsoft = 0; |
| 98 | extern int tickdelta; |
| 99 | extern long timedelta; |
| 100 | |
| 101 | /* |
| 102 | * Update real-time timeout queue. |
| 103 | * At front of queue are some number of events which are ``due''. |
| 104 | * The time to these is <= 0 and if negative represents the |
| 105 | * number of ticks which have passed since it was supposed to happen. |
| 106 | * The rest of the q elements (times > 0) are events yet to happen, |
| 107 | * where the time for each is given as a delta from the previous. |
| 108 | * Decrementing just the first of these serves to decrement the time |
| 109 | * to all events. |
| 110 | */ |
| 111 | p1 = calltodo.c_next; |
| 112 | while (p1) { |
| 113 | if (--p1->c_time > 0) |
| 114 | break; |
| 115 | needsoft = 1; |
| 116 | if (p1->c_time == 0) |
| 117 | break; |
| 118 | p1 = p1->c_next; |
| 119 | } |
| 120 | |
| 121 | /* |
| 122 | * Curproc (now in p) is null if no process is running. |
| 123 | * We assume that curproc is set in user mode! |
| 124 | */ |
| 125 | if (p) |
| 126 | pstats = p->p_stats; |
| 127 | /* |
| 128 | * Charge the time out based on the mode the cpu is in. |
| 129 | * Here again we fudge for the lack of proper interval timers |
| 130 | * assuming that the current state has been around at least |
| 131 | * one tick. |
| 132 | */ |
| 133 | if (CLKF_USERMODE(&frame)) { |
| 134 | if (pstats->p_prof.pr_scale) |
| 135 | needsoft = 1; |
| 136 | /* |
| 137 | * CPU was in user state. Increment |
| 138 | * user time counter, and process process-virtual time |
| 139 | * interval timer. |
| 140 | */ |
| 141 | BUMPTIME(&p->p_utime, tick); |
| 142 | if (timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && |
| 143 | itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) |
| 144 | psignal(p, SIGVTALRM); |
| 145 | } else { |
| 146 | /* |
| 147 | * CPU was in system state. |
| 148 | */ |
| 149 | if (p) |
| 150 | BUMPTIME(&p->p_stime, tick); |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * If the cpu is currently scheduled to a process, then |
| 155 | * charge it with resource utilization for a tick, updating |
| 156 | * statistics which run in (user+system) virtual time, |
| 157 | * such as the cpu time limit and profiling timers. |
| 158 | * This assumes that the current process has been running |
| 159 | * the entire last tick. |
| 160 | */ |
| 161 | if (p) { |
| 162 | if ((p->p_utime.tv_sec+p->p_stime.tv_sec+1) > |
| 163 | p->p_rlimit[RLIMIT_CPU].rlim_cur) { |
| 164 | psignal(p, SIGXCPU); |
| 165 | if (p->p_rlimit[RLIMIT_CPU].rlim_cur < |
| 166 | p->p_rlimit[RLIMIT_CPU].rlim_max) |
| 167 | p->p_rlimit[RLIMIT_CPU].rlim_cur += 5; |
| 168 | } |
| 169 | if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) && |
| 170 | itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) |
| 171 | psignal(p, SIGPROF); |
| 172 | |
| 173 | /* |
| 174 | * We adjust the priority of the current process. |
| 175 | * The priority of a process gets worse as it accumulates |
| 176 | * CPU time. The cpu usage estimator (p_cpu) is increased here |
| 177 | * and the formula for computing priorities (in kern_synch.c) |
| 178 | * will compute a different value each time the p_cpu increases |
| 179 | * by 4. The cpu usage estimator ramps up quite quickly when |
| 180 | * the process is running (linearly), and decays away |
| 181 | * exponentially, * at a rate which is proportionally slower |
| 182 | * when the system is busy. The basic principal is that the |
| 183 | * system will 90% forget that a process used a lot of CPU |
| 184 | * time in 5*loadav seconds. This causes the system to favor |
| 185 | * processes which haven't run much recently, and to |
| 186 | * round-robin among other processes. |
| 187 | */ |
| 188 | p->p_cpticks++; |
| 189 | if (++p->p_cpu == 0) |
| 190 | p->p_cpu--; |
| 191 | if ((p->p_cpu&3) == 0) { |
| 192 | setpri(p); |
| 193 | if (p->p_pri >= PUSER) |
| 194 | p->p_pri = p->p_usrpri; |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | /* |
| 199 | * If the alternate clock has not made itself known then |
| 200 | * we must gather the statistics. |
| 201 | */ |
| 202 | if (phz == 0) |
| 203 | gatherstats(&frame); |
| 204 | |
| 205 | /* |
| 206 | * Increment the time-of-day, and schedule |
| 207 | * processing of the callouts at a very low cpu priority, |
| 208 | * so we don't keep the relatively high clock interrupt |
| 209 | * priority any longer than necessary. |
| 210 | */ |
| 211 | if (timedelta == 0) |
| 212 | BUMPTIME(&time, tick) |
| 213 | else { |
| 214 | register delta; |
| 215 | |
| 216 | if (timedelta < 0) { |
| 217 | delta = tick - tickdelta; |
| 218 | timedelta += tickdelta; |
| 219 | } else { |
| 220 | delta = tick + tickdelta; |
| 221 | timedelta -= tickdelta; |
| 222 | } |
| 223 | BUMPTIME(&time, delta); |
| 224 | } |
| 225 | if (needsoft) { |
| 226 | if (CLKF_BASEPRI(&frame)) { |
| 227 | /* |
| 228 | * Save the overhead of a software interrupt; |
| 229 | * it will happen as soon as we return, so do it now. |
| 230 | */ |
| 231 | (void) splsoftclock(); |
| 232 | softclock(frame); |
| 233 | } else |
| 234 | setsoftclock(); |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | int dk_ndrive = DK_NDRIVE; |
| 239 | /* |
| 240 | * Gather statistics on resource utilization. |
| 241 | * |
| 242 | * We make a gross assumption: that the system has been in the |
| 243 | * state it is in (user state, kernel state, interrupt state, |
| 244 | * or idle state) for the entire last time interval, and |
| 245 | * update statistics accordingly. |
| 246 | */ |
| 247 | gatherstats(framep) |
| 248 | clockframe *framep; |
| 249 | { |
| 250 | register int cpstate, s; |
| 251 | |
| 252 | /* |
| 253 | * Determine what state the cpu is in. |
| 254 | */ |
| 255 | if (CLKF_USERMODE(framep)) { |
| 256 | /* |
| 257 | * CPU was in user state. |
| 258 | */ |
| 259 | if (curproc->p_nice > NZERO) |
| 260 | cpstate = CP_NICE; |
| 261 | else |
| 262 | cpstate = CP_USER; |
| 263 | } else { |
| 264 | /* |
| 265 | * CPU was in system state. If profiling kernel |
| 266 | * increment a counter. If no process is running |
| 267 | * then this is a system tick if we were running |
| 268 | * at a non-zero IPL (in a driver). If a process is running, |
| 269 | * then we charge it with system time even if we were |
| 270 | * at a non-zero IPL, since the system often runs |
| 271 | * this way during processing of system calls. |
| 272 | * This is approximate, but the lack of true interval |
| 273 | * timers makes doing anything else difficult. |
| 274 | */ |
| 275 | cpstate = CP_SYS; |
| 276 | if (curproc == NULL && CLKF_BASEPRI(framep)) |
| 277 | cpstate = CP_IDLE; |
| 278 | #ifdef GPROF |
| 279 | s = CLKF_PC(framep) - s_lowpc; |
| 280 | if (profiling < 2 && s < s_textsize) |
| 281 | kcount[s / (HISTFRACTION * sizeof (*kcount))]++; |
| 282 | #endif |
| 283 | } |
| 284 | /* |
| 285 | * We maintain statistics shown by user-level statistics |
| 286 | * programs: the amount of time in each cpu state, and |
| 287 | * the amount of time each of DK_NDRIVE ``drives'' is busy. |
| 288 | */ |
| 289 | cp_time[cpstate]++; |
| 290 | for (s = 0; s < DK_NDRIVE; s++) |
| 291 | if (dk_busy&(1<<s)) |
| 292 | dk_time[s]++; |
| 293 | } |
| 294 | |
| 295 | /* |
| 296 | * Software priority level clock interrupt. |
| 297 | * Run periodic events from timeout queue. |
| 298 | */ |
| 299 | /*ARGSUSED*/ |
| 300 | softclock(frame) |
| 301 | clockframe frame; |
| 302 | { |
| 303 | |
| 304 | for (;;) { |
| 305 | register struct callout *p1; |
| 306 | register caddr_t arg; |
| 307 | register int (*func)(); |
| 308 | register int a, s; |
| 309 | |
| 310 | s = splhigh(); |
| 311 | if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) { |
| 312 | splx(s); |
| 313 | break; |
| 314 | } |
| 315 | arg = p1->c_arg; func = p1->c_func; a = p1->c_time; |
| 316 | calltodo.c_next = p1->c_next; |
| 317 | p1->c_next = callfree; |
| 318 | callfree = p1; |
| 319 | splx(s); |
| 320 | (*func)(arg, a); |
| 321 | } |
| 322 | /* |
| 323 | * If trapped user-mode and profiling, give it |
| 324 | * a profiling tick. |
| 325 | */ |
| 326 | if (CLKF_USERMODE(&frame)) { |
| 327 | register struct proc *p = curproc; |
| 328 | |
| 329 | if (p->p_stats->p_prof.pr_scale) |
| 330 | profile_tick(p, &frame); |
| 331 | /* |
| 332 | * Check to see if process has accumulated |
| 333 | * more than 10 minutes of user time. If so |
| 334 | * reduce priority to give others a chance. |
| 335 | */ |
| 336 | if (p->p_ucred->cr_uid && p->p_nice == NZERO && |
| 337 | p->p_utime.tv_sec > 10 * 60) { |
| 338 | p->p_nice = NZERO + 4; |
| 339 | setpri(p); |
| 340 | p->p_pri = p->p_usrpri; |
| 341 | } |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | /* |
| 346 | * Arrange that (*func)(arg) is called in t/hz seconds. |
| 347 | */ |
| 348 | timeout(func, arg, t) |
| 349 | int (*func)(); |
| 350 | caddr_t arg; |
| 351 | register int t; |
| 352 | { |
| 353 | register struct callout *p1, *p2, *pnew; |
| 354 | register int s = splhigh(); |
| 355 | |
| 356 | if (t <= 0) |
| 357 | t = 1; |
| 358 | pnew = callfree; |
| 359 | if (pnew == NULL) |
| 360 | panic("timeout table overflow"); |
| 361 | callfree = pnew->c_next; |
| 362 | pnew->c_arg = arg; |
| 363 | pnew->c_func = func; |
| 364 | for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2) |
| 365 | if (p2->c_time > 0) |
| 366 | t -= p2->c_time; |
| 367 | p1->c_next = pnew; |
| 368 | pnew->c_next = p2; |
| 369 | pnew->c_time = t; |
| 370 | if (p2) |
| 371 | p2->c_time -= t; |
| 372 | splx(s); |
| 373 | } |
| 374 | |
| 375 | /* |
| 376 | * untimeout is called to remove a function timeout call |
| 377 | * from the callout structure. |
| 378 | */ |
| 379 | untimeout(func, arg) |
| 380 | int (*func)(); |
| 381 | caddr_t arg; |
| 382 | { |
| 383 | register struct callout *p1, *p2; |
| 384 | register int s; |
| 385 | |
| 386 | s = splhigh(); |
| 387 | for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) { |
| 388 | if (p2->c_func == func && p2->c_arg == arg) { |
| 389 | if (p2->c_next && p2->c_time > 0) |
| 390 | p2->c_next->c_time += p2->c_time; |
| 391 | p1->c_next = p2->c_next; |
| 392 | p2->c_next = callfree; |
| 393 | callfree = p2; |
| 394 | break; |
| 395 | } |
| 396 | } |
| 397 | splx(s); |
| 398 | } |
| 399 | |
| 400 | /* |
| 401 | * Compute number of hz until specified time. |
| 402 | * Used to compute third argument to timeout() from an |
| 403 | * absolute time. |
| 404 | */ |
| 405 | hzto(tv) |
| 406 | struct timeval *tv; |
| 407 | { |
| 408 | register long ticks; |
| 409 | register long sec; |
| 410 | int s = splhigh(); |
| 411 | |
| 412 | /* |
| 413 | * If number of milliseconds will fit in 32 bit arithmetic, |
| 414 | * then compute number of milliseconds to time and scale to |
| 415 | * ticks. Otherwise just compute number of hz in time, rounding |
| 416 | * times greater than representible to maximum value. |
| 417 | * |
| 418 | * Delta times less than 25 days can be computed ``exactly''. |
| 419 | * Maximum value for any timeout in 10ms ticks is 250 days. |
| 420 | */ |
| 421 | sec = tv->tv_sec - time.tv_sec; |
| 422 | if (sec <= 0x7fffffff / 1000 - 1000) |
| 423 | ticks = ((tv->tv_sec - time.tv_sec) * 1000 + |
| 424 | (tv->tv_usec - time.tv_usec) / 1000) / (tick / 1000); |
| 425 | else if (sec <= 0x7fffffff / hz) |
| 426 | ticks = sec * hz; |
| 427 | else |
| 428 | ticks = 0x7fffffff; |
| 429 | splx(s); |
| 430 | return (ticks); |
| 431 | } |