add proc pointers to appropriate VOP operations
[unix-history] / usr / src / sys / kern / kern_fork.c
CommitLineData
da7c5cc6 1/*
c4ec2128
KM
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
da7c5cc6 4 *
dbf0c423 5 * %sccs.include.redist.c%
c4ec2128 6 *
ae2e0122 7 * @(#)kern_fork.c 7.25 (Berkeley) %G%
da7c5cc6 8 */
50108d5c 9
94368568
JB
10#include "param.h"
11#include "systm.h"
12#include "map.h"
5e00df3b 13#include "filedesc.h"
94368568 14#include "kernel.h"
ae2e0122 15#include "malloc.h"
94368568 16#include "proc.h"
ae2e0122 17#include "resourcevar.h"
c4ec2128 18#include "vnode.h"
94368568 19#include "seg.h"
94368568
JB
20#include "file.h"
21#include "acct.h"
56a12c82 22#include "ktrace.h"
50108d5c 23
c9714ae3
KM
24/* ARGSUSED */
25fork(p, uap, retval)
26 struct proc *p;
27 struct args *uap;
28 int retval[];
50108d5c
SL
29{
30
d9c2f47f 31 return (fork1(p, 0, retval));
50108d5c
SL
32}
33
c9714ae3
KM
34/* ARGSUSED */
35vfork(p, uap, retval)
36 struct proc *p;
37 struct args *uap;
38 int retval[];
50108d5c
SL
39{
40
d9c2f47f 41 return (fork1(p, 1, retval));
50108d5c
SL
42}
43
ae2e0122
MK
44int nprocs = 1; /* process 0 */
45
c9714ae3
KM
46fork1(p1, isvfork, retval)
47 register struct proc *p1;
48 int isvfork, retval[];
50108d5c 49{
c9714ae3 50 register struct proc *p2;
ae2e0122
MK
51 register int count, uid;
52 static int nextpid, pidchecked = 0;
50108d5c 53
ae2e0122
MK
54 count = 0;
55 if ((uid = p1->p_ucred->cr_uid) != 0) {
c9714ae3 56 for (p2 = allproc; p2; p2 = p2->p_nxt)
ae2e0122
MK
57 if (p2->p_ucred->cr_uid == uid)
58 count++;
c9714ae3 59 for (p2 = zombproc; p2; p2 = p2->p_nxt)
ae2e0122
MK
60 if (p2->p_ucred->cr_uid == uid)
61 count++;
50108d5c
SL
62 }
63 /*
ae2e0122
MK
64 * Although process entries are dynamically entries,
65 * we still keep a global limit on the maximum number
66 * we will create. Don't allow a nonprivileged user
67 * to exceed its current limit or to bring us within one
68 * of the global limit; don't let root exceed the limit.
69 * nprocs is the current number of processes,
70 * maxproc is the limit.
50108d5c 71 */
ae2e0122
MK
72 retval[1] = 0;
73 if (nprocs >= maxproc || uid == 0 && nprocs >= maxproc + 1) {
50108d5c 74 tablefull("proc");
c9714ae3 75 return (EAGAIN);
50108d5c 76 }
ae2e0122
MK
77 if (count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)
78 return (EAGAIN);
50108d5c 79
50108d5c 80 /*
ae2e0122
MK
81 * Find an unused process ID.
82 * We remember a range of unused IDs ready to use
83 * (from nextpid+1 through pidchecked-1).
50108d5c 84 */
ae2e0122 85 nextpid++;
1d348849 86retry:
ae2e0122
MK
87 /*
88 * If the process ID prototype has wrapped around,
89 * restart somewhat above 0, as the low-numbered procs
90 * tend to include daemons that don't exit.
91 */
92 if (nextpid >= PID_MAX) {
93 nextpid = 100;
1d348849 94 pidchecked = 0;
50108d5c 95 }
ae2e0122 96 if (nextpid >= pidchecked) {
1d348849 97 int doingzomb = 0;
e9539440 98
27ed98f2 99 pidchecked = PID_MAX;
1d348849 100 /*
ae2e0122 101 * Scan the active and zombie procs to check whether this pid
1d348849 102 * is in use. Remember the lowest pid that's greater
ae2e0122 103 * than nextpid, so we can avoid checking for a while.
1d348849 104 */
ae2e0122 105 p2 = allproc;
1d348849 106again:
ae2e0122
MK
107 for (; p2 != NULL; p2 = p2->p_nxt) {
108 if (p2->p_pid == nextpid ||
109 p2->p_pgrp->pg_id == nextpid) {
110 nextpid++;
111 if (nextpid >= pidchecked)
1d348849
MK
112 goto retry;
113 }
ae2e0122
MK
114 if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
115 pidchecked = p2->p_pid;
116 if (p2->p_pgrp->pg_id > nextpid &&
117 pidchecked > p2->p_pgrp->pg_id)
118 pidchecked = p2->p_pgrp->pg_id;
1d348849
MK
119 }
120 if (!doingzomb) {
121 doingzomb = 1;
ae2e0122 122 p2 = zombproc;
1d348849
MK
123 goto again;
124 }
50108d5c 125 }
50108d5c 126
ae2e0122
MK
127
128 /*
129 * Allocate new proc.
130 * Link onto allproc (this should probably be delayed).
131 */
132 MALLOC(p2, struct proc *, sizeof(struct proc), M_PROC, M_WAITOK);
133 nprocs++;
134 p2->p_nxt = allproc;
135 p2->p_nxt->p_prev = &p2->p_nxt; /* allproc is never NULL */
136 p2->p_prev = &allproc;
137 allproc = p2;
138 p2->p_link = NULL; /* shouldn't be necessary */
139 p2->p_rlink = NULL; /* shouldn't be necessary */
1d348849 140
50108d5c
SL
141 /*
142 * Make a proc table entry for the new process.
ae2e0122
MK
143 * Start by zeroing the section of proc that is zero-initialized,
144 * then copy the section that is copied directly from the parent.
50108d5c 145 */
ae2e0122
MK
146 bzero(&p2->p_startzero,
147 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
148 bcopy(&p1->p_startcopy, &p2->p_startcopy,
149 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
150
151 /*
152 * Duplicate sub-structures as needed.
153 * Increase reference counts on shared objects.
154 */
155 MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred),
156 M_SUBPROC, M_WAITOK);
157 bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
158 crhold(p1->p_ucred);
159
160 p2->p_fd = fdcopy(p1);
161 p2->p_stats = p1->p_stats; /* XXX move; in u. */
162 /*
163 * If p_limit is still copy-on-write, bump refcnt,
164 * otherwise get a copy that won't be modified.
165 * (If PL_SHAREMOD is clear, the structure is shared
166 * copy-on-write.)
167 */
168 if (p1->p_limit->p_lflags & PL_SHAREMOD)
169 p2->p_limit = limcopy(p1->p_limit);
170 else {
171 p2->p_limit = p1->p_limit;
172 p2->p_limit->p_refcnt++;
98111078 173 }
ae2e0122
MK
174 p2->p_sigacts = p1->p_sigacts; /* XXX move; in u. */
175
176 p2->p_flag = SLOAD | (p1->p_flag & (SPAGV|SHPUX));
177 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & SCTTY)
178 p2->p_flag |= SCTTY;
179 if (isvfork)
180 p2->p_flag |= SPPWAIT;
181 p2->p_stat = SIDL;
182 p2->p_pid = nextpid;
56a12c82 183 {
ae2e0122 184 struct proc **hash = &pidhash[PIDHASH(p2->p_pid)];
56a12c82 185
ae2e0122
MK
186 p2->p_hash = *hash;
187 *hash = p2;
56a12c82 188 }
ae2e0122
MK
189 p2->p_pgrpnxt = p1->p_pgrpnxt;
190 p1->p_pgrpnxt = p2;
191 p2->p_pptr = p1;
192 p2->p_osptr = p1->p_cptr;
193 if (p1->p_cptr)
194 p1->p_cptr->p_ysptr = p2;
195 p1->p_cptr = p2;
196#ifdef KTRACE
50108d5c 197 /*
ae2e0122
MK
198 * Copy traceflag and tracefile if enabled.
199 * If not inherited, these were zeroed above.
50108d5c 200 */
ae2e0122
MK
201 if (p1->p_traceflag&KTRFAC_INHERIT) {
202 p2->p_traceflag = p1->p_traceflag;
203 if ((p2->p_tracep = p1->p_tracep) != NULL)
204 VREF(p2->p_tracep);
205 }
206#endif
207
208 p2->p_regs = p1->p_regs; /* XXX move this */
209#if defined(tahoe)
210 p2->p_vmspace->p_ckey = p1->p_vmspace->p_ckey; /* XXX move this */
9d4095a1 211#endif
50108d5c
SL
212
213 /*
50108d5c
SL
214 * This begins the section where we must prevent the parent
215 * from being swapped.
216 */
ae2e0122
MK
217 p1->p_flag |= SKEEP;
218 if (vm_fork(p1, p2, isvfork)) {
219 /*
220 * Child process. Set start time, return parent pid,
221 * and mark as child in retval[1].
222 */
d848aba6 223 (void) splclock();
ae2e0122 224 p2->p_stats->p_start = time;
d848aba6 225 (void) spl0();
ae2e0122
MK
226 retval[0] = p1->p_pid;
227 retval[1] = 1;
228 p2->p_acflag = AFORK;
229 return (0);
d848aba6 230 }
50108d5c
SL
231
232 /*
233 * Make child runnable and add to run queue.
234 */
ae2e0122
MK
235 (void) splhigh();
236 p2->p_stat = SRUN;
237 setrq(p2);
50108d5c
SL
238 (void) spl0();
239
50108d5c
SL
240 /*
241 * Now can be swapped.
242 */
ae2e0122 243 p1->p_flag &= ~SKEEP;
50108d5c
SL
244
245 /*
9d4095a1 246 * XXX preserve synchronization semantics of vfork
ae2e0122
MK
247 * If waiting for child to exec or exit, set SPPWAIT
248 * on child, and sleep on our proc (in case of exit).
50108d5c 249 */
ae2e0122
MK
250 if (isvfork)
251 while (p2->p_flag & SPPWAIT)
252 sleep((caddr_t)p1, PZERO - 1);
50108d5c
SL
253
254 /*
ae2e0122
MK
255 * Return child pid to parent process.
256 * retval[1] was set above.
50108d5c 257 */
ae2e0122 258 retval[0] = p2->p_pid;
50108d5c
SL
259 return (0);
260}