convert VOP_UNLOCK and vrele into vput's; add proc parameter to union_dircache
[unix-history] / usr / src / sys / kern / sysv_shm.c
CommitLineData
6f843dc9
KM
1/*
2 * Copyright (c) 1988 University of Utah.
7a6e4544
KB
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
adb35f79
KB
5 * (c) UNIX System Laboratories, Inc.
6 * All or some portions of this file are derived from material licensed
7 * to the University of California by American Telephone and Telegraph
8 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
9 * the permission of UNIX System Laboratories, Inc.
6f843dc9
KM
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
adb35f79 13 * Science Department. Originally from the University of Wisconsin.
6f843dc9 14 *
6e36b147 15 * %sccs.include.proprietary.c%
6f843dc9 16 *
3cf1b235 17 * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
6f843dc9 18 *
30b603e9 19 * @(#)sysv_shm.c 8.7 (Berkeley) %G%
6f843dc9
KM
20 */
21
22/*
23 * System V shared memory routines.
e4f7cdf5
MK
24 * TEMPORARY, until mmap is in place;
25 * needed now for HP-UX compatibility and X server (yech!).
6f843dc9
KM
26 */
27
28#ifdef SYSVSHM
29
38a01dbe
KB
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/proc.h>
34#include <sys/shm.h>
35#include <sys/malloc.h>
36#include <sys/mman.h>
2ac345ab 37#include <sys/stat.h>
38a01dbe
KB
38
39#include <vm/vm.h>
40#include <vm/vm_kern.h>
41#include <vm/vm_inherit.h>
42#include <vm/vm_pager.h>
6f843dc9 43
6f843dc9
KM
44int shmat(), shmctl(), shmdt(), shmget();
45int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
46int shmtot = 0;
47
9db58063
KM
48/*
49 * Per process internal structure for managing segments.
50 * Each process using shm will have an array of ``shmseg'' of these.
51 */
52struct shmdesc {
53 vm_offset_t shmd_uva;
54 int shmd_id;
55};
56
57/*
58 * Per segment internal structure (shm_handle).
59 */
60struct shmhandle {
61 vm_offset_t shmh_kva;
62 caddr_t shmh_id;
63};
64
65vm_map_t shm_map; /* address space for shared memory segments */
6f843dc9
KM
66
67shminit()
68{
69 register int i;
9db58063 70 vm_offset_t whocares1, whocares2;
6f843dc9 71
9db58063 72 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
ec109ecb 73 shminfo.shmall * NBPG, TRUE);
6f843dc9
KM
74 if (shminfo.shmmni > SHMMMNI)
75 shminfo.shmmni = SHMMMNI;
76 for (i = 0; i < shminfo.shmmni; i++) {
77 shmsegs[i].shm_perm.mode = 0;
78 shmsegs[i].shm_perm.seq = 0;
79 }
80}
81
161c14a2
KM
82/*
83 * Entry point for all SHM calls
84 */
afc12c69
CT
85struct shmsys_args {
86 u_int which;
87};
30b603e9 88compat_43_shmsys(p, uap, retval)
161c14a2 89 struct proc *p;
afc12c69 90 struct shmsys_args *uap;
161c14a2
KM
91 int *retval;
92{
6f843dc9 93
161c14a2 94 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
d9c2f47f
MK
95 return (EINVAL);
96 return ((*shmcalls[uap->which])(p, &uap[1], retval));
6f843dc9
KM
97}
98
161c14a2
KM
99/*
100 * Get a shared memory segment
101 */
afc12c69
CT
102struct shmget_args {
103 key_t key;
104 int size;
105 int shmflg;
106};
161c14a2
KM
107shmget(p, uap, retval)
108 struct proc *p;
afc12c69 109 register struct shmget_args *uap;
161c14a2
KM
110 int *retval;
111{
6f843dc9 112 register struct shmid_ds *shp;
8429d022 113 register struct ucred *cred = p->p_ucred;
6f843dc9 114 register int i;
161c14a2 115 int error, size, rval = 0;
9db58063 116 register struct shmhandle *shmh;
6f843dc9
KM
117
118 /* look up the specified shm_id */
119 if (uap->key != IPC_PRIVATE) {
120 for (i = 0; i < shminfo.shmmni; i++)
121 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
122 shmsegs[i].shm_perm.key == uap->key) {
123 rval = i;
124 break;
125 }
126 } else
127 i = shminfo.shmmni;
128
129 /* create a new shared segment if necessary */
130 if (i == shminfo.shmmni) {
161c14a2
KM
131 if ((uap->shmflg & IPC_CREAT) == 0)
132 return (ENOENT);
133 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
134 return (EINVAL);
6f843dc9
KM
135 for (i = 0; i < shminfo.shmmni; i++)
136 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
137 rval = i;
138 break;
139 }
161c14a2
KM
140 if (i == shminfo.shmmni)
141 return (ENOSPC);
6f843dc9 142 size = clrnd(btoc(uap->size));
161c14a2
KM
143 if (shmtot + size > shminfo.shmall)
144 return (ENOMEM);
6f843dc9
KM
145 shp = &shmsegs[rval];
146 /*
147 * We need to do a couple of things to ensure consistency
148 * in case we sleep in malloc(). We mark segment as
149 * allocated so that other shmgets() will not allocate it.
150 * We mark it as "destroyed" to insure that shmvalid() is
151 * false making most operations fail (XXX). We set the key,
152 * so that other shmget()s will fail.
153 */
154 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
155 shp->shm_perm.key = uap->key;
9db58063
KM
156 shmh = (struct shmhandle *)
157 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
158 shmh->shmh_kva = 0;
159 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
160 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
79f39708
MH
161 VM_PROT_ALL, VM_PROT_ALL,
162 MAP_ANON, shmh->shmh_id, 0);
9db58063
KM
163 if (error) {
164 free((caddr_t)shmh, M_SHM);
6f843dc9 165 shp->shm_perm.mode = 0;
9db58063 166 return(ENOMEM);
6f843dc9 167 }
9db58063 168 shp->shm_handle = (void *) shmh;
6f843dc9 169 shmtot += size;
161c14a2
KM
170 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
171 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
2ac345ab 172 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg & ACCESSPERMS);
6f843dc9 173 shp->shm_segsz = uap->size;
c9714ae3 174 shp->shm_cpid = p->p_pid;
6f843dc9
KM
175 shp->shm_lpid = shp->shm_nattch = 0;
176 shp->shm_atime = shp->shm_dtime = 0;
177 shp->shm_ctime = time.tv_sec;
178 } else {
179 shp = &shmsegs[rval];
180 /* XXX: probably not the right thing to do */
161c14a2
KM
181 if (shp->shm_perm.mode & SHM_DEST)
182 return (EBUSY);
2ac345ab
KM
183 if (error = ipcaccess(&shp->shm_perm, uap->shmflg & ACCESSPERMS,
184 cred))
161c14a2
KM
185 return (error);
186 if (uap->size && uap->size > shp->shm_segsz)
187 return (EINVAL);
188 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
189 return (EEXIST);
6f843dc9 190 }
161c14a2 191 *retval = shp->shm_perm.seq * SHMMMNI + rval;
015c074c 192 return (0);
6f843dc9
KM
193}
194
161c14a2
KM
195/*
196 * Shared memory control
197 */
afc12c69
CT
198struct shmctl_args {
199 int shmid;
200 int cmd;
201 caddr_t buf;
202};
161c14a2
KM
203/* ARGSUSED */
204shmctl(p, uap, retval)
205 struct proc *p;
afc12c69 206 register struct shmctl_args *uap;
161c14a2
KM
207 int *retval;
208{
6f843dc9 209 register struct shmid_ds *shp;
8429d022 210 register struct ucred *cred = p->p_ucred;
6f843dc9 211 struct shmid_ds sbuf;
161c14a2 212 int error;
6f843dc9 213
161c14a2
KM
214 if (error = shmvalid(uap->shmid))
215 return (error);
6f843dc9
KM
216 shp = &shmsegs[uap->shmid % SHMMMNI];
217 switch (uap->cmd) {
218 case IPC_STAT:
015c074c 219 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
161c14a2
KM
220 return (error);
221 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
6f843dc9
KM
222
223 case IPC_SET:
161c14a2
KM
224 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
225 cred->cr_uid != shp->shm_perm.cuid)
226 return (EPERM);
227 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
228 return (error);
229 shp->shm_perm.uid = sbuf.shm_perm.uid;
230 shp->shm_perm.gid = sbuf.shm_perm.gid;
2ac345ab
KM
231 shp->shm_perm.mode = (shp->shm_perm.mode & ~ACCESSPERMS)
232 | (sbuf.shm_perm.mode & ACCESSPERMS);
161c14a2 233 shp->shm_ctime = time.tv_sec;
6f843dc9
KM
234 break;
235
236 case IPC_RMID:
161c14a2
KM
237 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
238 cred->cr_uid != shp->shm_perm.cuid)
239 return (EPERM);
6f843dc9
KM
240 /* set ctime? */
241 shp->shm_perm.key = IPC_PRIVATE;
242 shp->shm_perm.mode |= SHM_DEST;
243 if (shp->shm_nattch <= 0)
244 shmfree(shp);
245 break;
246
6f843dc9 247 default:
161c14a2 248 return (EINVAL);
6f843dc9 249 }
161c14a2 250 return (0);
6f843dc9
KM
251}
252
161c14a2
KM
253/*
254 * Attach to shared memory segment.
255 */
afc12c69
CT
256struct shmat_args {
257 int shmid;
258 caddr_t shmaddr;
259 int shmflg;
260};
161c14a2
KM
261shmat(p, uap, retval)
262 struct proc *p;
afc12c69 263 register struct shmat_args *uap;
161c14a2
KM
264 int *retval;
265{
6f843dc9
KM
266 register struct shmid_ds *shp;
267 register int size;
6f843dc9 268 caddr_t uva;
9db58063
KM
269 int error;
270 int flags;
271 vm_prot_t prot;
272 struct shmdesc *shmd;
6f843dc9 273
9db58063
KM
274 /*
275 * Allocate descriptors now (before validity check)
276 * in case malloc() blocks.
277 */
8429d022 278 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
279 size = shminfo.shmseg * sizeof(struct shmdesc);
280 if (shmd == NULL) {
281 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
282 bzero((caddr_t)shmd, size);
8429d022 283 p->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063 284 }
161c14a2
KM
285 if (error = shmvalid(uap->shmid))
286 return (error);
6f843dc9
KM
287 shp = &shmsegs[uap->shmid % SHMMMNI];
288 if (shp->shm_handle == NULL)
96ea38ce 289 panic("shmat NULL handle");
015c074c 290 if (error = ipcaccess(&shp->shm_perm,
8429d022 291 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
161c14a2 292 return (error);
6f843dc9
KM
293 uva = uap->shmaddr;
294 if (uva && ((int)uva & (SHMLBA-1))) {
295 if (uap->shmflg & SHM_RND)
296 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
161c14a2
KM
297 else
298 return (EINVAL);
6f843dc9
KM
299 }
300 /*
301 * Make sure user doesn't use more than their fair share
302 */
9db58063
KM
303 for (size = 0; size < shminfo.shmseg; size++) {
304 if (shmd->shmd_uva == 0)
305 break;
306 shmd++;
307 }
161c14a2
KM
308 if (size >= shminfo.shmseg)
309 return (EMFILE);
6f843dc9 310 size = ctob(clrnd(btoc(shp->shm_segsz)));
9db58063
KM
311 prot = VM_PROT_READ;
312 if ((uap->shmflg & SHM_RDONLY) == 0)
313 prot |= VM_PROT_WRITE;
314 flags = MAP_ANON|MAP_SHARED;
315 if (uva)
316 flags |= MAP_FIXED;
317 else
318 uva = (caddr_t)0x1000000; /* XXX */
451df175 319 error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
79f39708
MH
320 (vm_size_t)size, prot, VM_PROT_ALL, flags,
321 ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
161c14a2 322 if (error)
9db58063
KM
323 return(error);
324 shmd->shmd_uva = (vm_offset_t)uva;
325 shmd->shmd_id = uap->shmid;
6f843dc9
KM
326 /*
327 * Fill in the remaining fields
328 */
c9714ae3 329 shp->shm_lpid = p->p_pid;
6f843dc9
KM
330 shp->shm_atime = time.tv_sec;
331 shp->shm_nattch++;
161c14a2 332 *retval = (int) uva;
015c074c 333 return (0);
6f843dc9
KM
334}
335
161c14a2
KM
336/*
337 * Detach from shared memory segment.
338 */
afc12c69
CT
339struct shmdt_args {
340 caddr_t shmaddr;
341};
161c14a2
KM
342/* ARGSUSED */
343shmdt(p, uap, retval)
344 struct proc *p;
afc12c69 345 struct shmdt_args *uap;
161c14a2
KM
346 int *retval;
347{
9db58063
KM
348 register struct shmdesc *shmd;
349 register int i;
6f843dc9 350
8429d022 351 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
352 for (i = 0; i < shminfo.shmseg; i++, shmd++)
353 if (shmd->shmd_uva &&
354 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
6f843dc9 355 break;
9db58063 356 if (i == shminfo.shmseg)
7096c89e 357 return (EINVAL);
9db58063
KM
358 shmufree(p, shmd);
359 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
7096c89e 360 return (0);
6f843dc9
KM
361}
362
8429d022
MK
363shmfork(p1, p2, isvfork)
364 struct proc *p1, *p2;
9db58063 365 int isvfork;
6f843dc9 366{
9db58063
KM
367 register struct shmdesc *shmd;
368 register int size;
6f843dc9 369
9db58063
KM
370 /*
371 * Copy parents descriptive information
372 */
373 size = shminfo.shmseg * sizeof(struct shmdesc);
374 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
8429d022
MK
375 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
376 p2->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063
KM
377 /*
378 * Increment reference counts
379 */
380 for (size = 0; size < shminfo.shmseg; size++, shmd++)
381 if (shmd->shmd_uva)
382 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
6f843dc9
KM
383}
384
9db58063
KM
385shmexit(p)
386 struct proc *p;
6f843dc9 387{
9db58063
KM
388 register struct shmdesc *shmd;
389 register int i;
c9714ae3 390
8429d022 391 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
392 for (i = 0; i < shminfo.shmseg; i++, shmd++)
393 if (shmd->shmd_uva)
394 shmufree(p, shmd);
8429d022
MK
395 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
396 p->p_vmspace->vm_shm = NULL;
6f843dc9
KM
397}
398
399shmvalid(id)
400 register int id;
401{
402 register struct shmid_ds *shp;
403
404 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
161c14a2 405 return(EINVAL);
6f843dc9
KM
406 shp = &shmsegs[id % SHMMMNI];
407 if (shp->shm_perm.seq == (id / SHMMMNI) &&
408 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
161c14a2
KM
409 return(0);
410 return(EINVAL);
6f843dc9
KM
411}
412
413/*
414 * Free user resources associated with a shared memory segment
415 */
9db58063 416shmufree(p, shmd)
c9714ae3 417 struct proc *p;
9db58063 418 struct shmdesc *shmd;
6f843dc9
KM
419{
420 register struct shmid_ds *shp;
421
9db58063 422 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
aa6d6b7e 423 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
9db58063
KM
424 ctob(clrnd(btoc(shp->shm_segsz))));
425 shmd->shmd_id = 0;
426 shmd->shmd_uva = 0;
6f843dc9
KM
427 shp->shm_dtime = time.tv_sec;
428 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
429 shmfree(shp);
430}
431
432/*
433 * Deallocate resources associated with a shared memory segment
434 */
435shmfree(shp)
436 register struct shmid_ds *shp;
437{
6f843dc9
KM
438
439 if (shp->shm_handle == NULL)
440 panic("shmfree");
9db58063
KM
441 /*
442 * Lose our lingering object reference by deallocating space
443 * in kernel. Pager will also be deallocated as a side-effect.
444 */
445 vm_deallocate(shm_map,
446 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
9d81fbb9 447 ctob(clrnd(btoc(shp->shm_segsz))));
9db58063 448 free((caddr_t)shp->shm_handle, M_SHM);
6f843dc9
KM
449 shp->shm_handle = NULL;
450 shmtot -= clrnd(btoc(shp->shm_segsz));
451 shp->shm_perm.mode = 0;
452 /*
453 * Increment the sequence number to ensure that outstanding
454 * shmids for this segment will be invalid in the event that
455 * the segment is reallocated. Note that shmids must be
456 * positive as decreed by SVID.
457 */
458 shp->shm_perm.seq++;
459 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
460 shp->shm_perm.seq = 0;
461}
462
463/*
464 * XXX This routine would be common to all sysV style IPC
465 * (if the others were implemented).
466 */
161c14a2 467ipcaccess(ipc, mode, cred)
6f843dc9 468 register struct ipc_perm *ipc;
161c14a2
KM
469 int mode;
470 register struct ucred *cred;
6f843dc9
KM
471{
472 register int m;
473
161c14a2 474 if (cred->cr_uid == 0)
6f843dc9
KM
475 return(0);
476 /*
477 * Access check is based on only one of owner, group, public.
478 * If not owner, then check group.
479 * If not a member of the group, then check public access.
480 */
481 mode &= 0700;
482 m = ipc->mode;
161c14a2 483 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
6f843dc9 484 m <<= 3;
161c14a2
KM
485 if (!groupmember(ipc->gid, cred) &&
486 !groupmember(ipc->cgid, cred))
6f843dc9
KM
487 m <<= 3;
488 }
489 if ((mode&m) == mode)
161c14a2
KM
490 return (0);
491 return (EACCES);
6f843dc9 492}
6f843dc9 493#endif /* SYSVSHM */