386BSD 0.1 development
[unix-history] / usr / src / sys.386bsd / kern / kern_synch.c
CommitLineData
301dd481
WJ
1/*-
2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_synch.c 7.18 (Berkeley) 6/27/91
35 */
36
37#include "param.h"
38#include "systm.h"
39#include "proc.h"
40#include "kernel.h"
41#include "buf.h"
42#include "signalvar.h"
43#include "resourcevar.h"
44
45#include "machine/cpu.h"
46
47u_char curpri; /* usrpri of curproc */
48
49/*
50 * Force switch among equal priority processes every 100ms.
51 */
52roundrobin()
53{
54
55 need_resched();
56 timeout(roundrobin, (caddr_t)0, hz / 10);
57}
58
59/*
60 * constants for digital decay and forget
61 * 90% of (p_cpu) usage in 5*loadav time
62 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
63 * Note that, as ps(1) mentions, this can let percentages
64 * total over 100% (I've seen 137.9% for 3 processes).
65 *
66 * Note that hardclock updates p_cpu and p_cpticks independently.
67 *
68 * We wish to decay away 90% of p_cpu in (5 * loadavg) seconds.
69 * That is, the system wants to compute a value of decay such
70 * that the following for loop:
71 * for (i = 0; i < (5 * loadavg); i++)
72 * p_cpu *= decay;
73 * will compute
74 * p_cpu *= 0.1;
75 * for all values of loadavg:
76 *
77 * Mathematically this loop can be expressed by saying:
78 * decay ** (5 * loadavg) ~= .1
79 *
80 * The system computes decay as:
81 * decay = (2 * loadavg) / (2 * loadavg + 1)
82 *
83 * We wish to prove that the system's computation of decay
84 * will always fulfill the equation:
85 * decay ** (5 * loadavg) ~= .1
86 *
87 * If we compute b as:
88 * b = 2 * loadavg
89 * then
90 * decay = b / (b + 1)
91 *
92 * We now need to prove two things:
93 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
94 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
95 *
96 * Facts:
97 * For x close to zero, exp(x) =~ 1 + x, since
98 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
99 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
100 * For x close to zero, ln(1+x) =~ x, since
101 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
102 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
103 * ln(.1) =~ -2.30
104 *
105 * Proof of (1):
106 * Solve (factor)**(power) =~ .1 given power (5*loadav):
107 * solving for factor,
108 * ln(factor) =~ (-2.30/5*loadav), or
109 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
110 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
111 *
112 * Proof of (2):
113 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
114 * solving for power,
115 * power*ln(b/(b+1)) =~ -2.30, or
116 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
117 *
118 * Actual power values for the implemented algorithm are as follows:
119 * loadav: 1 2 3 4
120 * power: 5.68 10.32 14.94 19.55
121 */
122
123/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
124#define loadfactor(loadav) (2 * (loadav))
125#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
126
127/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
128fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
129
130/*
131 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
132 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
133 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
134 *
135 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
136 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
137 *
138 * If you dont want to bother with the faster/more-accurate formula, you
139 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
140 * (more general) method of calculating the %age of CPU used by a process.
141 */
142#define CCPU_SHIFT 11
143
144/*
145 * Recompute process priorities, once a second
146 */
147schedcpu()
148{
149 register fixpt_t loadfac = loadfactor(averunnable[0]);
150 register struct proc *p;
151 register int s;
152 register unsigned int newcpu;
153
154 wakeup((caddr_t)&lbolt);
155 for (p = allproc; p != NULL; p = p->p_nxt) {
156 /*
157 * Increment time in/out of memory and sleep time
158 * (if sleeping). We ignore overflow; with 16-bit int's
159 * (remember them?) overflow takes 45 days.
160 */
161 p->p_time++;
162 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
163 p->p_slptime++;
164 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
165 /*
166 * If the process has slept the entire second,
167 * stop recalculating its priority until it wakes up.
168 */
169 if (p->p_slptime > 1)
170 continue;
171 /*
172 * p_pctcpu is only for ps.
173 */
174#if (FSHIFT >= CCPU_SHIFT)
175 p->p_pctcpu += (hz == 100)?
176 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
177 100 * (((fixpt_t) p->p_cpticks)
178 << (FSHIFT - CCPU_SHIFT)) / hz;
179#else
180 p->p_pctcpu += ((FSCALE - ccpu) *
181 (p->p_cpticks * FSCALE / hz)) >> FSHIFT;
182#endif
183 p->p_cpticks = 0;
184 newcpu = (u_int) decay_cpu(loadfac, p->p_cpu) + p->p_nice;
185 p->p_cpu = min(newcpu, UCHAR_MAX);
186 setpri(p);
187 s = splhigh(); /* prevent state changes */
188 if (p->p_pri >= PUSER) {
189#define PPQ (128 / NQS) /* priorities per queue */
190 if ((p != curproc) &&
191 p->p_stat == SRUN &&
192 (p->p_flag & SLOAD) &&
193 (p->p_pri / PPQ) != (p->p_usrpri / PPQ)) {
194 remrq(p);
195 p->p_pri = p->p_usrpri;
196 setrq(p);
197 } else
198 p->p_pri = p->p_usrpri;
199 }
200 splx(s);
201 }
202 vmmeter();
203 if (bclnlist != NULL)
204 wakeup((caddr_t)pageproc);
205 timeout(schedcpu, (caddr_t)0, hz);
206}
207
208/*
209 * Recalculate the priority of a process after it has slept for a while.
210 * For all load averages >= 1 and max p_cpu of 255, sleeping for at least
211 * six times the loadfactor will decay p_cpu to zero.
212 */
213updatepri(p)
214 register struct proc *p;
215{
216 register unsigned int newcpu = p->p_cpu;
217 register fixpt_t loadfac = loadfactor(averunnable[0]);
218
219 if (p->p_slptime > 5 * loadfac)
220 p->p_cpu = 0;
221 else {
222 p->p_slptime--; /* the first time was done in schedcpu */
223 while (newcpu && --p->p_slptime)
224 newcpu = (int) decay_cpu(loadfac, newcpu);
225 p->p_cpu = min(newcpu, UCHAR_MAX);
226 }
227 setpri(p);
228}
229
230#define SQSIZE 0100 /* Must be power of 2 */
231#define HASH(x) (( (int) x >> 5) & (SQSIZE-1))
232struct slpque {
233 struct proc *sq_head;
234 struct proc **sq_tailp;
235} slpque[SQSIZE];
236
237/*
238 * During autoconfiguration or after a panic, a sleep will simply
239 * lower the priority briefly to allow interrupts, then return.
240 * The priority to be used (safepri) is machine-dependent, thus this
241 * value is initialized and maintained in the machine-dependent layers.
242 * This priority will typically be 0, or the lowest priority
243 * that is safe for use on the interrupt stack; it can be made
244 * higher to block network software interrupts after panics.
245 */
246int safepri;
247
248/*
249 * General sleep call.
250 * Suspends current process until a wakeup is made on chan.
251 * The process will then be made runnable with priority pri.
252 * Sleeps at most timo/hz seconds (0 means no timeout).
253 * If pri includes PCATCH flag, signals are checked
254 * before and after sleeping, else signals are not checked.
255 * Returns 0 if awakened, EWOULDBLOCK if the timeout expires.
256 * If PCATCH is set and a signal needs to be delivered,
257 * ERESTART is returned if the current system call should be restarted
258 * if possible, and EINTR is returned if the system call should
259 * be interrupted by the signal (return EINTR).
260 */
261tsleep(chan, pri, wmesg, timo)
262 caddr_t chan;
263 int pri;
264 char *wmesg;
265 int timo;
266{
267 register struct proc *p = curproc;
268 register struct slpque *qp;
269 register s;
270 int sig, catch = pri & PCATCH;
271 extern int cold;
272 int endtsleep();
273
274 s = splhigh();
275 if (cold || panicstr) {
276 /*
277 * After a panic, or during autoconfiguration,
278 * just give interrupts a chance, then just return;
279 * don't run any other procs or panic below,
280 * in case this is the idle process and already asleep.
281 */
282 splx(safepri);
283 splx(s);
284 return (0);
285 }
286#ifdef DIAGNOSTIC
287 if (chan == 0 || p->p_stat != SRUN || p->p_rlink)
288 panic("tsleep");
289#endif
290 p->p_wchan = chan;
291 p->p_wmesg = wmesg;
292 p->p_slptime = 0;
293 p->p_pri = pri & PRIMASK;
294 qp = &slpque[HASH(chan)];
295 if (qp->sq_head == 0)
296 qp->sq_head = p;
297 else
298 *qp->sq_tailp = p;
299 *(qp->sq_tailp = &p->p_link) = 0;
300 if (timo)
301 timeout(endtsleep, (caddr_t)p, timo);
302 /*
303 * We put ourselves on the sleep queue and start our timeout
304 * before calling CURSIG, as we could stop there, and a wakeup
305 * or a SIGCONT (or both) could occur while we were stopped.
306 * A SIGCONT would cause us to be marked as SSLEEP
307 * without resuming us, thus we must be ready for sleep
308 * when CURSIG is called. If the wakeup happens while we're
309 * stopped, p->p_wchan will be 0 upon return from CURSIG.
310 */
311 if (catch) {
312 p->p_flag |= SSINTR;
313 if (sig = CURSIG(p)) {
314 if (p->p_wchan)
315 unsleep(p);
316 p->p_stat = SRUN;
317 goto resume;
318 }
319 if (p->p_wchan == 0) {
320 catch = 0;
321 goto resume;
322 }
323 }
324 p->p_stat = SSLEEP;
325 p->p_stats->p_ru.ru_nvcsw++;
326 swtch();
327#include "ddb.h"
328#ifdef NDDB
329 /* handy breakpoint location after process "wakes" */
330 asm(".globl bpendtsleep ; bpendtsleep:");
331#endif
332resume:
333 curpri = p->p_usrpri;
334 splx(s);
335 p->p_flag &= ~SSINTR;
336 if (p->p_flag & STIMO) {
337 p->p_flag &= ~STIMO;
338 if (catch == 0 || sig == 0)
339 return (EWOULDBLOCK);
340 } else if (timo)
341 untimeout(endtsleep, (caddr_t)p);
342 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
343 if (p->p_sigacts->ps_sigintr & sigmask(sig))
344 return (EINTR);
345 return (ERESTART);
346 }
347 return (0);
348}
349
350/*
351 * Implement timeout for tsleep.
352 * If process hasn't been awakened (wchan non-zero),
353 * set timeout flag and undo the sleep. If proc
354 * is stopped, just unsleep so it will remain stopped.
355 */
356endtsleep(p)
357 register struct proc *p;
358{
359 int s = splhigh();
360
361 if (p->p_wchan) {
362 if (p->p_stat == SSLEEP)
363 setrun(p);
364 else
365 unsleep(p);
366 p->p_flag |= STIMO;
367 }
368 splx(s);
369}
370
371/*
372 * Short-term, non-interruptable sleep.
373 */
374sleep(chan, pri)
375 caddr_t chan;
376 int pri;
377{
378 register struct proc *p = curproc;
379 register struct slpque *qp;
380 register s;
381 extern int cold;
382
383#ifdef DIAGNOSTIC
384 if (pri > PZERO) {
385 printf("sleep called with pri %d > PZERO, wchan: %x\n",
386 pri, chan);
387 panic("old sleep");
388 }
389#endif
390 s = splhigh();
391 if (cold || panicstr) {
392 /*
393 * After a panic, or during autoconfiguration,
394 * just give interrupts a chance, then just return;
395 * don't run any other procs or panic below,
396 * in case this is the idle process and already asleep.
397 */
398 splx(safepri);
399 splx(s);
400 return;
401 }
402#ifdef DIAGNOSTIC
403 if (chan==0 || p->p_stat != SRUN || p->p_rlink)
404 panic("sleep");
405#endif
406 p->p_wchan = chan;
407 p->p_wmesg = NULL;
408 p->p_slptime = 0;
409 p->p_pri = pri;
410 qp = &slpque[HASH(chan)];
411 if (qp->sq_head == 0)
412 qp->sq_head = p;
413 else
414 *qp->sq_tailp = p;
415 *(qp->sq_tailp = &p->p_link) = 0;
416 p->p_stat = SSLEEP;
417 p->p_stats->p_ru.ru_nvcsw++;
418 swtch();
419#ifdef NDDB
420 /* handy breakpoint location after process "wakes" */
421 asm(".globl bpendsleep ; bpendsleep:");
422#endif
423 curpri = p->p_usrpri;
424 splx(s);
425}
426
427/*
428 * Remove a process from its wait queue
429 */
430unsleep(p)
431 register struct proc *p;
432{
433 register struct slpque *qp;
434 register struct proc **hp;
435 int s;
436
437 s = splhigh();
438 if (p->p_wchan) {
439 hp = &(qp = &slpque[HASH(p->p_wchan)])->sq_head;
440 while (*hp != p)
441 hp = &(*hp)->p_link;
442 *hp = p->p_link;
443 if (qp->sq_tailp == &p->p_link)
444 qp->sq_tailp = hp;
445 p->p_wchan = 0;
446 }
447 splx(s);
448}
449
450/*
451 * Wakeup on "chan"; set all processes
452 * sleeping on chan to run state.
453 */
454wakeup(chan)
455 register caddr_t chan;
456{
457 register struct slpque *qp;
458 register struct proc *p, **q;
459 int s;
460
461 s = splhigh();
462 qp = &slpque[HASH(chan)];
463restart:
464 for (q = &qp->sq_head; p = *q; ) {
465#ifdef DIAGNOSTIC
466 if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP)
467 panic("wakeup");
468#endif
469 if (p->p_wchan == chan) {
470 p->p_wchan = 0;
471 *q = p->p_link;
472 if (qp->sq_tailp == &p->p_link)
473 qp->sq_tailp = q;
474 if (p->p_stat == SSLEEP) {
475 /* OPTIMIZED INLINE EXPANSION OF setrun(p) */
476 if (p->p_slptime > 1)
477 updatepri(p);
478 p->p_slptime = 0;
479 p->p_stat = SRUN;
480 if (p->p_flag & SLOAD)
481 setrq(p);
482 /*
483 * Since curpri is a usrpri,
484 * p->p_pri is always better than curpri.
485 */
486 if ((p->p_flag&SLOAD) == 0)
487 wakeup((caddr_t)&proc0);
488 else
489 need_resched();
490 /* END INLINE EXPANSION */
491 goto restart;
492 }
493 } else
494 q = &p->p_link;
495 }
496 splx(s);
497}
498
499/*
500 * Initialize the (doubly-linked) run queues
501 * to be empty.
502 */
503rqinit()
504{
505 register int i;
506
507 for (i = 0; i < NQS; i++)
508 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
509}
510
511/*
512 * Change process state to be runnable,
513 * placing it on the run queue if it is in memory,
514 * and awakening the swapper if it isn't in memory.
515 */
516setrun(p)
517 register struct proc *p;
518{
519 register int s;
520
521 s = splhigh();
522 switch (p->p_stat) {
523
524 case 0:
525 case SWAIT:
526 case SRUN:
527 case SZOMB:
528 default:
529 panic("setrun");
530
531 case SSTOP:
532 case SSLEEP:
533 unsleep(p); /* e.g. when sending signals */
534 break;
535
536 case SIDL:
537 break;
538 }
539 p->p_stat = SRUN;
540 if (p->p_flag & SLOAD)
541 setrq(p);
542 splx(s);
543 if (p->p_slptime > 1)
544 updatepri(p);
545 p->p_slptime = 0;
546 if ((p->p_flag&SLOAD) == 0)
547 wakeup((caddr_t)&proc0);
548 else if (p->p_pri < curpri)
549 need_resched();
550}
551
552/*
553 * Compute priority of process when running in user mode.
554 * Arrange to reschedule if the resulting priority
555 * is better than that of the current process.
556 */
557setpri(p)
558 register struct proc *p;
559{
560 register unsigned int newpri;
561
562 newpri = PUSER + p->p_cpu / 4 + 2 * p->p_nice;
563 newpri = min(newpri, MAXPRI);
564 p->p_usrpri = newpri;
565 if (newpri < curpri)
566 need_resched();
567}
568
569#ifdef NDDB
570#define DDBFUNC(s) ddb_##s
571DDBFUNC(ps) () {
572 int np;
573 struct proc *ap, *p, *pp;
574 np = nprocs;
575 p = ap = allproc;
576 printf(" pid proc addr uid ppid pgrp flag stat comm wchan\n");
577 while (--np >= 0) {
578 pp = p->p_pptr;
579 if (pp == 0)
580 pp = p;
581 if (p->p_stat) {
582 printf("%5d %06x %06x %3d %5d %5d %06x %d %s ",
583 p->p_pid, ap, p->p_addr, p->p_cred->p_ruid, pp->p_pid,
584 p->p_pgrp->pg_id, p->p_flag, p->p_stat,
585 p->p_comm);
586 if (p->p_wchan) {
587 if (p->p_wmesg)
588 printf("%s ", p->p_wmesg);
589 printf("%x", p->p_wchan);
590 }
591 printf("\n");
592 }
593 ap = p->p_nxt;
594 if (ap == 0 && np > 0)
595 ap = zombproc;
596 p = ap;
597 }
598}
599#endif