add definition for ability to produce a backtrace
[unix-history] / usr / src / sys / kern / kern_fork.c
index efe13ba..d4f373d 100644 (file)
 /*
 /*
- * Copyright (c) 1982 Regents of the University of California.
- * All rights reserved.  The Berkeley software License Agreement
- * specifies the terms and conditions for redistribution.
+ * Copyright (c) 1982, 1986, 1989, 1991, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
  *
  *
- *     @(#)kern_fork.c 6.8 (Berkeley) %G%
+ * %sccs.include.redist.c%
+ *
+ *     @(#)kern_fork.c 8.8 (Berkeley) %G%
  */
 
  */
 
-#include "../machine/reg.h"
-#include "../machine/pte.h"
-#include "../machine/psl.h"
-
-#include "param.h"
-#include "systm.h"
-#include "map.h"
-#include "dir.h"
-#include "user.h"
-#include "kernel.h"
-#include "proc.h"
-#include "inode.h"
-#include "seg.h"
-#include "vm.h"
-#include "text.h"
-#include "file.h"
-#include "acct.h"
-#include "quota.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/map.h>
+#include <sys/filedesc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/resourcevar.h>
+#include <sys/vnode.h>
+#include <sys/file.h>
+#include <sys/acct.h>
+#include <sys/ktrace.h>
 
 
-/*
- * fork system call.
- */
-fork()
+/* ARGSUSED */
+fork(p, uap, retval)
+       struct proc *p;
+       void *uap;
+       register_t *retval;
 {
 
 {
 
-       u.u_cdmap = zdmap;
-       u.u_csmap = zdmap;
-       if (swpexpand(u.u_dsize, u.u_ssize, &u.u_cdmap, &u.u_csmap) == 0) {
-               u.u_r.r_val2 = 0;
-               return;
-       }
-       fork1(0);
+       return (fork1(p, 0, retval));
 }
 
 }
 
-vfork()
+/* ARGSUSED */
+vfork(p, uap, retval)
+       struct proc *p;
+       void *uap;
+       register_t *retval;
 {
 
 {
 
-       fork1(1);
+       return (fork1(p, 1, retval));
 }
 
 }
 
-fork1(isvfork)
+int    nprocs = 1;             /* process 0 */
+
+fork1(p1, isvfork, retval)
+       register struct proc *p1;
        int isvfork;
        int isvfork;
+       register_t *retval;
 {
 {
-       register struct proc *p1, *p2;
-       register a;
+       register struct proc *p2;
+       register uid_t uid;
+       struct proc *newproc;
+       struct proc **hash;
+       int count;
+       static int nextpid, pidchecked = 0;
 
 
-       a = 0;
-       if (u.u_uid != 0) {
-               for (p1 = allproc; p1; p1 = p1->p_nxt)
-                       if (p1->p_uid == u.u_uid)
-                               a++;
-               for (p1 = zombproc; p1; p1 = p1->p_nxt)
-                       if (p1->p_uid == u.u_uid)
-                               a++;
-       }
        /*
        /*
-        * Disallow if
-        *  No processes at all;
-        *  not su and too many procs owned; or
-        *  not su and would take last slot.
+        * Although process entries are dynamically created, we still keep
+        * a global limit on the maximum number we will create.  Don't allow
+        * a nonprivileged user to use the last process; don't let root
+        * exceed the limit. The variable nprocs is the current number of
+        * processes, maxproc is the limit.
         */
         */
-       p2 = freeproc;
-       if (p2==NULL)
+       uid = p1->p_cred->p_ruid;
+       if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
                tablefull("proc");
                tablefull("proc");
-       if (p2==NULL || (u.u_uid!=0 && (p2->p_nxt == NULL || a>MAXUPRC))) {
-               u.u_error = EAGAIN;
-               if (!isvfork) {
-                       (void) vsexpand(0, &u.u_cdmap, 1);
-                       (void) vsexpand(0, &u.u_csmap, 1);
-               }
-               goto out;
+               return (EAGAIN);
        }
        }
-       p1 = u.u_procp;
-       if (newproc(isvfork)) {
-               u.u_r.r_val1 = p1->p_pid;
-               u.u_r.r_val2 = 1;  /* child */
-               u.u_start = time;
-               u.u_acflag = AFORK;
-               return;
-       }
-       u.u_r.r_val1 = p2->p_pid;
 
 
-out:
-       u.u_r.r_val2 = 0;
-}
+       /*
+        * Increment the count of procs running with this uid. Don't allow
+        * a nonprivileged user to exceed their current limit.
+        */
+       count = chgproccnt(uid, 1);
+       if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) {
+               (void)chgproccnt(uid, -1);
+               return (EAGAIN);
+       }
 
 
-/*
- * Create a new process-- the internal version of
- * sys fork.
- * It returns 1 in the new process, 0 in the old.
- */
-newproc(isvfork)
-       int isvfork;
-{
-       register struct proc *rpp, *rip;
-       register int n;
-       register struct file *fp;
-       static int pidchecked = 0;
+       /* Allocate new proc. */
+       MALLOC(newproc, struct proc *, sizeof(struct proc), M_PROC, M_WAITOK);
 
        /*
 
        /*
-        * First, just locate a slot for a process
-        * and copy the useful info from this process into it.
-        * The panic "cannot happen" because fork has already
-        * checked for the existence of a slot.
+        * Find an unused process ID.  We remember a range of unused IDs
+        * ready to use (from nextpid+1 through pidchecked-1).
         */
         */
-       mpid++;
+       nextpid++;
 retry:
 retry:
-       if (mpid >= 30000) {
-               mpid = 100;
+       /*
+        * If the process ID prototype has wrapped around,
+        * restart somewhat above 0, as the low-numbered procs
+        * tend to include daemons that don't exit.
+        */
+       if (nextpid >= PID_MAX) {
+               nextpid = 100;
                pidchecked = 0;
        }
                pidchecked = 0;
        }
-       if (mpid >= pidchecked) {
+       if (nextpid >= pidchecked) {
                int doingzomb = 0;
 
                int doingzomb = 0;
 
-               pidchecked = 30000;
+               pidchecked = PID_MAX;
                /*
                /*
-                * Scan the proc table to check whether this pid
+                * Scan the active and zombie procs to check whether this pid
                 * is in use.  Remember the lowest pid that's greater
                 * is in use.  Remember the lowest pid that's greater
-                * than mpid, so we can avoid checking for a while.
+                * than nextpid, so we can avoid checking for a while.
                 */
                 */
-               rpp = allproc;
+               p2 = allproc.lh_first;
 again:
 again:
-               for (; rpp != NULL; rpp = rpp->p_nxt) {
-                       if (rpp->p_pid == mpid || rpp->p_pgrp == mpid) {
-                               mpid++;
-                               if (mpid >= pidchecked)
+               for (; p2 != 0; p2 = p2->p_list.le_next) {
+                       while (p2->p_pid == nextpid ||
+                           p2->p_pgrp->pg_id == nextpid) {
+                               nextpid++;
+                               if (nextpid >= pidchecked)
                                        goto retry;
                        }
                                        goto retry;
                        }
-                       if (rpp->p_pid > mpid && pidchecked > rpp->p_pid)
-                               pidchecked = rpp->p_pid;
-                       if (rpp->p_pgrp > mpid && pidchecked > rpp->p_pgrp)
-                               pidchecked = rpp->p_pgrp;
+                       if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
+                               pidchecked = p2->p_pid;
+                       if (p2->p_pgrp->pg_id > nextpid && 
+                           pidchecked > p2->p_pgrp->pg_id)
+                               pidchecked = p2->p_pgrp->pg_id;
                }
                if (!doingzomb) {
                        doingzomb = 1;
                }
                if (!doingzomb) {
                        doingzomb = 1;
-                       rpp = zombproc;
+                       p2 = zombproc.lh_first;
                        goto again;
                }
        }
                        goto again;
                }
        }
-       if ((rpp = freeproc) == NULL)
-               panic("no procs");
 
 
-       freeproc = rpp->p_nxt;                  /* off freeproc */
-       rpp->p_nxt = allproc;                   /* onto allproc */
-       rpp->p_nxt->p_prev = &rpp->p_nxt;       /*   (allproc is never NULL) */
-       rpp->p_prev = &allproc;
-       allproc = rpp;
+       nprocs++;
+       p2 = newproc;
+       p2->p_stat = SIDL;                      /* protect against others */
+       p2->p_pid = nextpid;
+       LIST_INSERT_HEAD(&allproc, p2, p_list);
+       p2->p_forw = p2->p_back = NULL;         /* shouldn't be necessary */
+       LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
 
        /*
         * Make a proc table entry for the new process.
 
        /*
         * Make a proc table entry for the new process.
+        * Start by zeroing the section of proc that is zero-initialized,
+        * then copy the section that is copied directly from the parent.
         */
         */
-       rip = u.u_procp;
-#ifdef QUOTA
-       rpp->p_quota = rip->p_quota;
-       rpp->p_quota->q_cnt++;
-#endif
-       rpp->p_stat = SIDL;
-       timerclear(&rpp->p_realtimer.it_value);
-       rpp->p_flag = SLOAD | (rip->p_flag & (SPAGI|SOUSIG));
-       if (isvfork) {
-               rpp->p_flag |= SVFORK;
-               rpp->p_ndx = rip->p_ndx;
-       } else
-               rpp->p_ndx = rpp - proc;
-       rpp->p_uid = rip->p_uid;
-       rpp->p_pgrp = rip->p_pgrp;
-       rpp->p_nice = rip->p_nice;
-       rpp->p_textp = isvfork ? 0 : rip->p_textp;
-       rpp->p_pid = mpid;
-       rpp->p_ppid = rip->p_pid;
-       rpp->p_pptr = rip;
-       rpp->p_osptr = rip->p_cptr;
-       if (rip->p_cptr)
-               rip->p_cptr->p_ysptr = rpp;
-       rpp->p_ysptr = NULL;
-       rpp->p_cptr = NULL;
-       rip->p_cptr = rpp;
-       rpp->p_time = 0;
-       rpp->p_cpu = 0;
-       rpp->p_sigmask = rip->p_sigmask;
-       rpp->p_sigcatch = rip->p_sigcatch;
-       rpp->p_sigignore = rip->p_sigignore;
-       /* take along any pending signals like stops? */
-       if (isvfork) {
-               rpp->p_tsize = rpp->p_dsize = rpp->p_ssize = 0;
-               rpp->p_szpt = clrnd(ctopt(UPAGES));
-               forkstat.cntvfork++;
-               forkstat.sizvfork += rip->p_dsize + rip->p_ssize;
-       } else {
-               rpp->p_tsize = rip->p_tsize;
-               rpp->p_dsize = rip->p_dsize;
-               rpp->p_ssize = rip->p_ssize;
-               rpp->p_szpt = rip->p_szpt;
-               forkstat.cntfork++;
-               forkstat.sizfork += rip->p_dsize + rip->p_ssize;
-       }
-       rpp->p_rssize = 0;
-       rpp->p_maxrss = rip->p_maxrss;
-       rpp->p_wchan = 0;
-       rpp->p_slptime = 0;
-       rpp->p_pctcpu = 0;
-       rpp->p_cpticks = 0;
-       n = PIDHASH(rpp->p_pid);
-       rpp->p_idhash = pidhash[n];
-       pidhash[n] = rpp - proc;
-       multprog++;
+       bzero(&p2->p_startzero,
+           (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
+       bcopy(&p1->p_startcopy, &p2->p_startcopy,
+           (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
 
        /*
 
        /*
+        * Duplicate sub-structures as needed.
         * Increase reference counts on shared objects.
         * Increase reference counts on shared objects.
+        * The p_stats and p_sigacts substructs are set in vm_fork.
+        */
+       p2->p_flag = P_INMEM;
+       if (p1->p_flag & P_PROFIL)
+               startprofclock(p2);
+       MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred),
+           M_SUBPROC, M_WAITOK);
+       bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
+       p2->p_cred->p_refcnt = 1;
+       crhold(p1->p_ucred);
+
+       /* bump references to the text vnode (for procfs) */
+       p2->p_textvp = p1->p_textvp;
+       if (p2->p_textvp)
+               VREF(p2->p_textvp);
+
+       p2->p_fd = fdcopy(p1);
+       /*
+        * If p_limit is still copy-on-write, bump refcnt,
+        * otherwise get a copy that won't be modified.
+        * (If PL_SHAREMOD is clear, the structure is shared
+        * copy-on-write.)
+        */
+       if (p1->p_limit->p_lflags & PL_SHAREMOD)
+               p2->p_limit = limcopy(p1->p_limit);
+       else {
+               p2->p_limit = p1->p_limit;
+               p2->p_limit->p_refcnt++;
+       }
+
+       if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
+               p2->p_flag |= P_CONTROLT;
+       if (isvfork)
+               p2->p_flag |= P_PPWAIT;
+       LIST_INSERT_AFTER(p1, p2, p_pglist);
+       p2->p_pptr = p1;
+       LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
+       LIST_INIT(&p2->p_children);
+
+#ifdef KTRACE
+       /*
+        * Copy traceflag and tracefile if enabled.
+        * If not inherited, these were zeroed above.
         */
         */
-       for (n = 0; n <= u.u_lastfile; n++) {
-               fp = u.u_ofile[n];
-               if (fp == NULL)
-                       continue;
-               fp->f_count++;
+       if (p1->p_traceflag&KTRFAC_INHERIT) {
+               p2->p_traceflag = p1->p_traceflag;
+               if ((p2->p_tracep = p1->p_tracep) != NULL)
+                       VREF(p2->p_tracep);
        }
        }
-       u.u_cdir->i_count++;
-       if (u.u_rdir)
-               u.u_rdir->i_count++;
+#endif
 
        /*
         * This begins the section where we must prevent the parent
         * from being swapped.
         */
 
        /*
         * This begins the section where we must prevent the parent
         * from being swapped.
         */
-       rip->p_flag |= SKEEP;
-       if (procdup(rpp, isvfork))
-               return (1);
-
+       p1->p_flag |= P_NOSWAP;
        /*
        /*
-        * Make child runnable and add to run queue.
+        * Set return values for child before vm_fork,
+        * so they can be copied to child stack.
+        * We return parent pid, and mark as child in retval[1].
+        * NOTE: the kernel stack may be at a different location in the child
+        * process, and thus addresses of automatic variables (including retval)
+        * may be invalid after vm_fork returns in the child process.
         */
         */
-       (void) splclock();
-       rpp->p_stat = SRUN;
-       setrq(rpp);
-       (void) spl0();
+       retval[0] = p1->p_pid;
+       retval[1] = 1;
+       if (vm_fork(p1, p2, isvfork)) {
+               /*
+                * Child process.  Set start time and get to work.
+                */
+               (void) splclock();
+               p2->p_stats->p_start = time;
+               (void) spl0();
+               p2->p_acflag = AFORK;
+               return (0);
+       }
 
        /*
 
        /*
-        * Cause child to take a non-local goto as soon as it runs.
-        * On older systems this was done with SSWAP bit in proc
-        * table; on VAX we use u.u_pcb.pcb_sswap so don't need
-        * to do rpp->p_flag |= SSWAP.  Actually do nothing here.
+        * Make child runnable and add to run queue.
         */
         */
-       /* rpp->p_flag |= SSWAP; */
+       (void) splhigh();
+       p2->p_stat = SRUN;
+       setrunqueue(p2);
+       (void) spl0();
 
        /*
         * Now can be swapped.
         */
 
        /*
         * Now can be swapped.
         */
-       rip->p_flag &= ~SKEEP;
+       p1->p_flag &= ~P_NOSWAP;
 
        /*
 
        /*
-        * If vfork make chain from parent process to child
-        * (where virtal memory is temporarily).  Wait for
-        * child to finish, steal virtual memory back,
-        * and wakeup child to let it die.
+        * Preserve synchronization semantics of vfork.  If waiting for
+        * child to exec or exit, set P_PPWAIT on child, and sleep on our
+        * proc (in case of exit).
         */
         */
-       if (isvfork) {
-               u.u_procp->p_xlink = rpp;
-               u.u_procp->p_flag |= SNOVM;
-               while (rpp->p_flag & SVFORK)
-                       sleep((caddr_t)rpp, PZERO - 1);
-               if ((rpp->p_flag & SLOAD) == 0)
-                       panic("newproc vfork");
-               uaccess(rpp, Vfmap, &vfutl);
-               u.u_procp->p_xlink = 0;
-               vpassvm(rpp, u.u_procp, &vfutl, &u, Vfmap);
-               u.u_procp->p_flag &= ~SNOVM;
-               rpp->p_ndx = rpp - proc;
-               rpp->p_flag |= SVFDONE;
-               wakeup((caddr_t)rpp);
-       }
+       if (isvfork)
+               while (p2->p_flag & P_PPWAIT)
+                       tsleep(p1, PWAIT, "ppwait", 0);
 
        /*
 
        /*
-        * 0 return means parent.
+        * Return child pid to parent process,
+        * marking us as parent via retval[1].
         */
         */
+       retval[0] = p2->p_pid;
+       retval[1] = 0;
        return (0);
 }
        return (0);
 }