adding GNU dc ("desk calculator")
[unix-history] / sys / kern / kern_synch.c
CommitLineData
15637ed4
RG
1/*-
2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_synch.c 7.18 (Berkeley) 6/27/91
35 *
36 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
37 * -------------------- ----- ----------------------
38 * CURRENT PATCH LEVEL: 1 00077
39 * -------------------- ----- ----------------------
40 *
41 * 11 Dec 92 Williams Jolitz Fixed panic:remrq hangs
42 */
43
44#include "param.h"
45#include "systm.h"
46#include "proc.h"
47#include "kernel.h"
48#include "buf.h"
49#include "signalvar.h"
50#include "resourcevar.h"
51
52#include "machine/cpu.h"
53
54u_char curpri; /* usrpri of curproc */
55
56/*
57 * Force switch among equal priority processes every 100ms.
58 */
59roundrobin()
60{
61
62 need_resched();
63 timeout(roundrobin, (caddr_t)0, hz / 10);
64}
65
66/*
67 * constants for digital decay and forget
68 * 90% of (p_cpu) usage in 5*loadav time
69 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
70 * Note that, as ps(1) mentions, this can let percentages
71 * total over 100% (I've seen 137.9% for 3 processes).
72 *
73 * Note that hardclock updates p_cpu and p_cpticks independently.
74 *
75 * We wish to decay away 90% of p_cpu in (5 * loadavg) seconds.
76 * That is, the system wants to compute a value of decay such
77 * that the following for loop:
78 * for (i = 0; i < (5 * loadavg); i++)
79 * p_cpu *= decay;
80 * will compute
81 * p_cpu *= 0.1;
82 * for all values of loadavg:
83 *
84 * Mathematically this loop can be expressed by saying:
85 * decay ** (5 * loadavg) ~= .1
86 *
87 * The system computes decay as:
88 * decay = (2 * loadavg) / (2 * loadavg + 1)
89 *
90 * We wish to prove that the system's computation of decay
91 * will always fulfill the equation:
92 * decay ** (5 * loadavg) ~= .1
93 *
94 * If we compute b as:
95 * b = 2 * loadavg
96 * then
97 * decay = b / (b + 1)
98 *
99 * We now need to prove two things:
100 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
101 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
102 *
103 * Facts:
104 * For x close to zero, exp(x) =~ 1 + x, since
105 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
106 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
107 * For x close to zero, ln(1+x) =~ x, since
108 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
109 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
110 * ln(.1) =~ -2.30
111 *
112 * Proof of (1):
113 * Solve (factor)**(power) =~ .1 given power (5*loadav):
114 * solving for factor,
115 * ln(factor) =~ (-2.30/5*loadav), or
116 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
117 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
118 *
119 * Proof of (2):
120 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
121 * solving for power,
122 * power*ln(b/(b+1)) =~ -2.30, or
123 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
124 *
125 * Actual power values for the implemented algorithm are as follows:
126 * loadav: 1 2 3 4
127 * power: 5.68 10.32 14.94 19.55
128 */
129
130/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
131#define loadfactor(loadav) (2 * (loadav))
132#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
133
134/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
135fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
136
137/*
138 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
139 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
140 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
141 *
142 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
143 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
144 *
145 * If you dont want to bother with the faster/more-accurate formula, you
146 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
147 * (more general) method of calculating the %age of CPU used by a process.
148 */
149#define CCPU_SHIFT 11
150
151/*
152 * Recompute process priorities, once a second
153 */
154schedcpu()
155{
156 register fixpt_t loadfac = loadfactor(averunnable[0]);
157 register struct proc *p;
158 register int s;
159 register unsigned int newcpu;
160
161 wakeup((caddr_t)&lbolt);
162 for (p = allproc; p != NULL; p = p->p_nxt) {
163 /*
164 * Increment time in/out of memory and sleep time
165 * (if sleeping). We ignore overflow; with 16-bit int's
166 * (remember them?) overflow takes 45 days.
167 */
168 p->p_time++;
169 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
170 p->p_slptime++;
171 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
172 /*
173 * If the process has slept the entire second,
174 * stop recalculating its priority until it wakes up.
175 */
176 if (p->p_slptime > 1)
177 continue;
178 /*
179 * p_pctcpu is only for ps.
180 */
181#if (FSHIFT >= CCPU_SHIFT)
182 p->p_pctcpu += (hz == 100)?
183 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
184 100 * (((fixpt_t) p->p_cpticks)
185 << (FSHIFT - CCPU_SHIFT)) / hz;
186#else
187 p->p_pctcpu += ((FSCALE - ccpu) *
188 (p->p_cpticks * FSCALE / hz)) >> FSHIFT;
189#endif
190 p->p_cpticks = 0;
191 newcpu = (u_int) decay_cpu(loadfac, p->p_cpu) + p->p_nice;
192 p->p_cpu = min(newcpu, UCHAR_MAX);
193 setpri(p);
194 s = splhigh(); /* prevent state changes */
195 if (p->p_pri >= PUSER) {
196#define PPQ (128 / NQS) /* priorities per queue */
197 if ((p != curproc) &&
198 p->p_stat == SRUN &&
199 (p->p_flag & (SLOAD|SWEXIT)) == SLOAD &&
200 (p->p_pri / PPQ) != (p->p_usrpri / PPQ)) {
201 remrq(p);
202 p->p_pri = p->p_usrpri;
203 setrq(p);
204 } else
205 p->p_pri = p->p_usrpri;
206 }
207 splx(s);
208 }
209 vmmeter();
210 if (bclnlist != NULL)
211 wakeup((caddr_t)pageproc);
212 timeout(schedcpu, (caddr_t)0, hz);
213}
214
215/*
216 * Recalculate the priority of a process after it has slept for a while.
217 * For all load averages >= 1 and max p_cpu of 255, sleeping for at least
218 * six times the loadfactor will decay p_cpu to zero.
219 */
220updatepri(p)
221 register struct proc *p;
222{
223 register unsigned int newcpu = p->p_cpu;
224 register fixpt_t loadfac = loadfactor(averunnable[0]);
225
226 if (p->p_slptime > 5 * loadfac)
227 p->p_cpu = 0;
228 else {
229 p->p_slptime--; /* the first time was done in schedcpu */
230 while (newcpu && --p->p_slptime)
231 newcpu = (int) decay_cpu(loadfac, newcpu);
232 p->p_cpu = min(newcpu, UCHAR_MAX);
233 }
234 setpri(p);
235}
236
237#define SQSIZE 0100 /* Must be power of 2 */
238#define HASH(x) (( (int) x >> 5) & (SQSIZE-1))
239struct slpque {
240 struct proc *sq_head;
241 struct proc **sq_tailp;
242} slpque[SQSIZE];
243
244/*
245 * During autoconfiguration or after a panic, a sleep will simply
246 * lower the priority briefly to allow interrupts, then return.
247 * The priority to be used (safepri) is machine-dependent, thus this
248 * value is initialized and maintained in the machine-dependent layers.
249 * This priority will typically be 0, or the lowest priority
250 * that is safe for use on the interrupt stack; it can be made
251 * higher to block network software interrupts after panics.
252 */
253int safepri;
254
255/*
256 * General sleep call.
257 * Suspends current process until a wakeup is made on chan.
258 * The process will then be made runnable with priority pri.
259 * Sleeps at most timo/hz seconds (0 means no timeout).
260 * If pri includes PCATCH flag, signals are checked
261 * before and after sleeping, else signals are not checked.
262 * Returns 0 if awakened, EWOULDBLOCK if the timeout expires.
263 * If PCATCH is set and a signal needs to be delivered,
264 * ERESTART is returned if the current system call should be restarted
265 * if possible, and EINTR is returned if the system call should
266 * be interrupted by the signal (return EINTR).
267 */
268tsleep(chan, pri, wmesg, timo)
269 caddr_t chan;
270 int pri;
271 char *wmesg;
272 int timo;
273{
274 register struct proc *p = curproc;
275 register struct slpque *qp;
276 register s;
277 int sig, catch = pri & PCATCH;
278 extern int cold;
279 int endtsleep();
280
281 s = splhigh();
282 if (cold || panicstr) {
283 /*
284 * After a panic, or during autoconfiguration,
285 * just give interrupts a chance, then just return;
286 * don't run any other procs or panic below,
287 * in case this is the idle process and already asleep.
288 */
289 splx(safepri);
290 splx(s);
291 return (0);
292 }
293#ifdef DIAGNOSTIC
294 if (chan == 0 || p->p_stat != SRUN || p->p_rlink)
295 panic("tsleep");
296#endif
297 p->p_wchan = chan;
298 p->p_wmesg = wmesg;
299 p->p_slptime = 0;
300 p->p_pri = pri & PRIMASK;
301 qp = &slpque[HASH(chan)];
302 if (qp->sq_head == 0)
303 qp->sq_head = p;
304 else
305 *qp->sq_tailp = p;
306 *(qp->sq_tailp = &p->p_link) = 0;
307 if (timo)
308 timeout(endtsleep, (caddr_t)p, timo);
309 /*
310 * We put ourselves on the sleep queue and start our timeout
311 * before calling CURSIG, as we could stop there, and a wakeup
312 * or a SIGCONT (or both) could occur while we were stopped.
313 * A SIGCONT would cause us to be marked as SSLEEP
314 * without resuming us, thus we must be ready for sleep
315 * when CURSIG is called. If the wakeup happens while we're
316 * stopped, p->p_wchan will be 0 upon return from CURSIG.
317 */
318 if (catch) {
319 p->p_flag |= SSINTR;
320 if (sig = CURSIG(p)) {
321 if (p->p_wchan)
322 unsleep(p);
323 p->p_stat = SRUN;
324 goto resume;
325 }
326 if (p->p_wchan == 0) {
327 catch = 0;
328 goto resume;
329 }
330 }
331 p->p_stat = SSLEEP;
332 p->p_stats->p_ru.ru_nvcsw++;
333 swtch();
334#include "ddb.h"
335#ifdef NDDB
336 /* handy breakpoint location after process "wakes" */
337 asm(".globl bpendtsleep ; bpendtsleep:");
338#endif
339resume:
340 curpri = p->p_usrpri;
341 splx(s);
342 p->p_flag &= ~SSINTR;
343 if (p->p_flag & STIMO) {
344 p->p_flag &= ~STIMO;
345 if (catch == 0 || sig == 0)
346 return (EWOULDBLOCK);
347 } else if (timo)
348 untimeout(endtsleep, (caddr_t)p);
349 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
350 if (p->p_sigacts->ps_sigintr & sigmask(sig))
351 return (EINTR);
352 return (ERESTART);
353 }
354 return (0);
355}
356
357/*
358 * Implement timeout for tsleep.
359 * If process hasn't been awakened (wchan non-zero),
360 * set timeout flag and undo the sleep. If proc
361 * is stopped, just unsleep so it will remain stopped.
362 */
363endtsleep(p)
364 register struct proc *p;
365{
366 int s = splhigh();
367
368 if (p->p_wchan) {
369 if (p->p_stat == SSLEEP)
370 setrun(p);
371 else
372 unsleep(p);
373 p->p_flag |= STIMO;
374 }
375 splx(s);
376}
377
378/*
379 * Short-term, non-interruptable sleep.
380 */
381sleep(chan, pri)
382 caddr_t chan;
383 int pri;
384{
385 register struct proc *p = curproc;
386 register struct slpque *qp;
387 register s;
388 extern int cold;
389
390#ifdef DIAGNOSTIC
391 if (pri > PZERO) {
392 printf("sleep called with pri %d > PZERO, wchan: %x\n",
393 pri, chan);
394 panic("old sleep");
395 }
396#endif
397 s = splhigh();
398 if (cold || panicstr) {
399 /*
400 * After a panic, or during autoconfiguration,
401 * just give interrupts a chance, then just return;
402 * don't run any other procs or panic below,
403 * in case this is the idle process and already asleep.
404 */
405 splx(safepri);
406 splx(s);
407 return;
408 }
409#ifdef DIAGNOSTIC
410 if (chan==0 || p->p_stat != SRUN || p->p_rlink)
411 panic("sleep");
412#endif
413 p->p_wchan = chan;
414 p->p_wmesg = NULL;
415 p->p_slptime = 0;
416 p->p_pri = pri;
417 qp = &slpque[HASH(chan)];
418 if (qp->sq_head == 0)
419 qp->sq_head = p;
420 else
421 *qp->sq_tailp = p;
422 *(qp->sq_tailp = &p->p_link) = 0;
423 p->p_stat = SSLEEP;
424 p->p_stats->p_ru.ru_nvcsw++;
425 swtch();
426#ifdef NDDB
427 /* handy breakpoint location after process "wakes" */
428 asm(".globl bpendsleep ; bpendsleep:");
429#endif
430 curpri = p->p_usrpri;
431 splx(s);
432}
433
434/*
435 * Remove a process from its wait queue
436 */
437unsleep(p)
438 register struct proc *p;
439{
440 register struct slpque *qp;
441 register struct proc **hp;
442 int s;
443
444 s = splhigh();
445 if (p->p_wchan) {
446 hp = &(qp = &slpque[HASH(p->p_wchan)])->sq_head;
447 while (*hp != p)
448 hp = &(*hp)->p_link;
449 *hp = p->p_link;
450 if (qp->sq_tailp == &p->p_link)
451 qp->sq_tailp = hp;
452 p->p_wchan = 0;
453 }
454 splx(s);
455}
456
457/*
458 * Wakeup on "chan"; set all processes
459 * sleeping on chan to run state.
460 */
461wakeup(chan)
462 register caddr_t chan;
463{
464 register struct slpque *qp;
465 register struct proc *p, **q;
466 int s;
467
468 s = splhigh();
469 qp = &slpque[HASH(chan)];
470restart:
471 for (q = &qp->sq_head; p = *q; ) {
472#ifdef DIAGNOSTIC
473 if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP)
474 panic("wakeup");
475#endif
476 if (p->p_wchan == chan) {
477 p->p_wchan = 0;
478 *q = p->p_link;
479 if (qp->sq_tailp == &p->p_link)
480 qp->sq_tailp = q;
481 if (p->p_stat == SSLEEP) {
482 /* OPTIMIZED INLINE EXPANSION OF setrun(p) */
483 if (p->p_slptime > 1)
484 updatepri(p);
485 p->p_slptime = 0;
486 p->p_stat = SRUN;
487 if (p->p_flag & SLOAD)
488 setrq(p);
489 /*
490 * Since curpri is a usrpri,
491 * p->p_pri is always better than curpri.
492 */
493 if ((p->p_flag&SLOAD) == 0)
494 wakeup((caddr_t)&proc0);
495 else
496 need_resched();
497 /* END INLINE EXPANSION */
498 goto restart;
499 }
500 } else
501 q = &p->p_link;
502 }
503 splx(s);
504}
505
506/*
507 * Initialize the (doubly-linked) run queues
508 * to be empty.
509 */
510rqinit()
511{
512 register int i;
513
514 for (i = 0; i < NQS; i++)
515 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
516}
517
518/*
519 * Change process state to be runnable,
520 * placing it on the run queue if it is in memory,
521 * and awakening the swapper if it isn't in memory.
522 */
523setrun(p)
524 register struct proc *p;
525{
526 register int s;
527
528 s = splhigh();
529 switch (p->p_stat) {
530
531 case 0:
532 case SWAIT:
533 case SRUN:
534 case SZOMB:
535 default:
536 panic("setrun");
537
538 case SSTOP:
539 case SSLEEP:
540 unsleep(p); /* e.g. when sending signals */
541 break;
542
543 case SIDL:
544 break;
545 }
546 p->p_stat = SRUN;
547 if (p->p_flag & SLOAD)
548 setrq(p);
549 splx(s);
550 if (p->p_slptime > 1)
551 updatepri(p);
552 p->p_slptime = 0;
553 if ((p->p_flag&SLOAD) == 0)
554 wakeup((caddr_t)&proc0);
555 else if (p->p_pri < curpri)
556 need_resched();
557}
558
559/*
560 * Compute priority of process when running in user mode.
561 * Arrange to reschedule if the resulting priority
562 * is better than that of the current process.
563 */
564setpri(p)
565 register struct proc *p;
566{
567 register unsigned int newpri;
568
569 newpri = PUSER + p->p_cpu / 4 + 2 * p->p_nice;
570 newpri = min(newpri, MAXPRI);
571 p->p_usrpri = newpri;
572 if (newpri < curpri)
573 need_resched();
574}
575
576#ifdef NDDB
577#define DDBFUNC(s) ddb_##s
578DDBFUNC(ps) () {
579 int np;
580 struct proc *ap, *p, *pp;
581 np = nprocs;
582 p = ap = allproc;
583 printf(" pid proc addr uid ppid pgrp flag stat comm wchan\n");
584 while (--np >= 0) {
585 pp = p->p_pptr;
586 if (pp == 0)
587 pp = p;
588 if (p->p_stat) {
589 printf("%5d %06x %06x %3d %5d %5d %06x %d %s ",
590 p->p_pid, ap, p->p_addr, p->p_cred->p_ruid, pp->p_pid,
591 p->p_pgrp->pg_id, p->p_flag, p->p_stat,
592 p->p_comm);
593 if (p->p_wchan) {
594 if (p->p_wmesg)
595 printf("%s ", p->p_wmesg);
596 printf("%x", p->p_wchan);
597 }
598 printf("\n");
599 }
600 ap = p->p_nxt;
601 if (ap == 0 && np > 0)
602 ap = zombproc;
603 p = ap;
604 }
605}
606#endif