/* kern_clock.c 4.14 %G% */
#include "../h/callout.h"
* Hardclock is called straight from
* the real time clock interrupt.
* We limit the work we do at real clock interrupt time to:
* decrementing time to callouts
* recording cpu time usage
* modifying priority of current process
* arrange for soft clock interrupt
* At softclock interrupt time we:
* lightning bolt wakeup (every second)
* On the vax softclock interrupts are implemented by
* software interrupts. Note that we may have multiple softclock
* interrupts compressed into one (due to excessive interrupt load),
* but that hardclock interrupts should never be lost.
register struct callout
*p1
;
register struct proc
*pp
;
if(callout
[0].c_func
== NULL
)
while(p1
->c_time
<=0 && p1
->c_func
!=NULL
)
* Maintain iostat and per-process cpu statistics
if (u
.u_procp
->p_textp
) {
register int xrss
= u
.u_procp
->p_textp
->x_rssize
;
if (s
> u
.u_vm
.vm_maxrss
)
if ((u
.u_vm
.vm_utime
+u
.u_vm
.vm_stime
+1)/hz
> u
.u_limit
[LIM_CPU
]) {
psignal(u
.u_procp
, SIGXCPU
);
if (u
.u_limit
[LIM_CPU
] < INFINITY
- 5)
if(u
.u_procp
->p_nice
> NZERO
)
for (s
= 0; s
< DK_NDRIVE
; s
++)
if(pp
->p_cpu
% 16 == 0) {
pp
->p_pri
= pp
->p_usrpri
;
if (cpu
== VAX_780
&& !BASEPRI(ps
))
* Constant for decay filter for cpu usage.
double ccpu
= 0.95122942450071400909; /* exp(-1/20) */
* Software clock interrupt.
* This routine is blocked by spl1(),
* which doesn't block device interrupts!
register struct callout
*p1
, *p2
;
register struct proc
*pp
;
if(callout
[0].c_time
<= 0) {
while(p1
->c_func
!= 0 && p1
->c_time
<= 0) {
(*p1
->c_func
)(p1
->c_arg
);
while(p2
->c_func
= p1
->c_func
) {
s
= spl5(); dhtimer(); splx(s
);
s
= spl5(); dztimer(); splx(s
);
* If idling and processes are waiting to swap in,
* Run paging daemon and reschedule every 1/4 sec.
if (lbolt
% (hz
/4) == 0) {
* Lightning bolt every second:
* process priority recomputation
* virtual memory metering
* kick swapper if processes want in
for(pp
= proc
; pp
< procNPROC
; pp
++)
if (pp
->p_stat
&& pp
->p_stat
!=SZOMB
) {
if (pp
->p_flag
& STIMO
) {
if(pp
->p_stat
==SSLEEP
||pp
->p_stat
==SSTOP
)
if (pp
->p_slptime
!= 127)
pp
->p_pctcpu
= ccpu
* pp
->p_pctcpu
+
(1.0 - ccpu
) * (pp
->p_cpticks
/(float)hz
);
a
= (pp
->p_cpu
& 0377)*SCHMAG
+ pp
->p_nice
- NZERO
;
if ((pp
!= u
.u_procp
|| noproc
) &&
pp
->p_pri
!= pp
->p_usrpri
) {
pp
->p_pri
= pp
->p_usrpri
;
pp
->p_pri
= pp
->p_usrpri
;
* If there are pages that have been cleaned,
* jolt the pageout daemon to process them.
* We do this here so that these pages will be
* freed if there is an abundance of memory and the
* daemon would not be awakened otherwise.
wakeup((caddr_t
)&proc
[2]);
if (pp
->p_nice
== NZERO
&& u
.u_vm
.vm_utime
> 600 * hz
)
pp
->p_pri
= pp
->p_usrpri
;
if (USERMODE(ps
) && u
.u_prof
.pr_scale
) {
u
.u_procp
->p_flag
|= SOWEUPC
;
* timeout is called to arrange that
* fun(arg) is called in tim/hz seconds.
* An entry is sorted into the callout
* structure. The time in each structure
* entry is the number of hz's more
* than the previous entry.
* In this way, decrementing the
* first entry has the effect of
* The panic is there because there is nothing
* intelligent to be done if an entry won't fit.
register struct callout
*p1
, *p2
, *p3
;
while(p1
->c_func
!= 0 && p1
->c_time
<= t
) {
p3
= callout
+(ncallout
-2);
(p2
+1)->c_time
= p2
->c_time
;
(p2
+1)->c_func
= p2
->c_func
;
(p2
+1)->c_arg
= p2
->c_arg
;