This commit was generated by cvs2svn to track changes on a CVS vendor
[unix-history] / sys / kern / sysv_shm.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
600f7f07
RG
38 * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
39 * from: @(#)sysv_shm.c 7.15 (Berkeley) 5/13/91
4c45483e 40 * $Id: sysv_shm.c,v 1.5 1993/11/07 17:46:20 wollman Exp $
15637ed4
RG
41 */
42
43/*
44 * System V shared memory routines.
45 * TEMPORARY, until mmap is in place;
46 * needed now for HP-UX compatibility and X server (yech!).
47 */
48
49#ifdef SYSVSHM
50
51#include "param.h"
52#include "systm.h"
53#include "kernel.h"
54#include "proc.h"
55#include "shm.h"
56#include "malloc.h"
57#include "mman.h"
58#include "vm/vm.h"
59#include "vm/vm_kern.h"
60#include "vm/vm_inherit.h"
61#include "vm/vm_pager.h"
62
63#ifdef HPUXCOMPAT
64#include "hp300/hpux/hpux.h"
65#endif
66
bbc3f849
GW
67/* From shm.h */
68struct shmid_ds *shmsegs;
69struct shminfo shminfo;
70
4c45483e
GW
71int shmat(), shmctl(), shmdt(), shmget(); /* XXX */
72int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; /* XXX */
15637ed4
RG
73int shmtot = 0;
74
75/*
76 * Per process internal structure for managing segments.
77 * Each process using shm will have an array of ``shmseg'' of these.
78 */
79struct shmdesc {
80 vm_offset_t shmd_uva;
81 int shmd_id;
82};
83
84/*
85 * Per segment internal structure (shm_handle).
86 */
87struct shmhandle {
88 vm_offset_t shmh_kva;
89 caddr_t shmh_id;
90};
91
4c45483e
GW
92static void shmufree(struct proc *, struct shmdesc *);
93static void shmfree(struct shmid_ds *);
94
15637ed4
RG
95vm_map_t shm_map; /* address space for shared memory segments */
96
4c45483e 97void
15637ed4
RG
98shminit()
99{
100 register int i;
101 vm_offset_t whocares1, whocares2;
102
103 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
104 shminfo.shmall * NBPG, FALSE);
105 if (shminfo.shmmni > SHMMMNI)
106 shminfo.shmmni = SHMMMNI;
107 for (i = 0; i < shminfo.shmmni; i++) {
108 shmsegs[i].shm_perm.mode = 0;
109 shmsegs[i].shm_perm.seq = 0;
110 }
111}
112
113/*
114 * Entry point for all SHM calls
115 */
3c7eb27c
DG
116
117struct shmsys_args {
118 u_int which;
119};
120
4c45483e 121int
15637ed4
RG
122shmsys(p, uap, retval)
123 struct proc *p;
3c7eb27c 124 struct shmsys_args *uap;
15637ed4
RG
125 int *retval;
126{
127
128 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
129 return (EINVAL);
130 return ((*shmcalls[uap->which])(p, &uap[1], retval));
131}
132
133/*
134 * Get a shared memory segment
135 */
3c7eb27c
DG
136
137struct shmget_args {
138 key_t key;
139 int size;
140 int shmflg;
141};
142
4c45483e 143int
15637ed4
RG
144shmget(p, uap, retval)
145 struct proc *p;
3c7eb27c 146 register struct shmget_args *uap;
15637ed4
RG
147 int *retval;
148{
149 register struct shmid_ds *shp;
150 register struct ucred *cred = p->p_ucred;
151 register int i;
152 int error, size, rval = 0;
153 register struct shmhandle *shmh;
154
155 /* look up the specified shm_id */
156 if (uap->key != IPC_PRIVATE) {
157 for (i = 0; i < shminfo.shmmni; i++)
158 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
159 shmsegs[i].shm_perm.key == uap->key) {
160 rval = i;
161 break;
162 }
163 } else
164 i = shminfo.shmmni;
165
166 /* create a new shared segment if necessary */
167 if (i == shminfo.shmmni) {
168 if ((uap->shmflg & IPC_CREAT) == 0)
169 return (ENOENT);
170 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
171 return (EINVAL);
172 for (i = 0; i < shminfo.shmmni; i++)
173 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
174 rval = i;
175 break;
176 }
177 if (i == shminfo.shmmni)
178 return (ENOSPC);
179 size = clrnd(btoc(uap->size));
180 if (shmtot + size > shminfo.shmall)
181 return (ENOMEM);
182 shp = &shmsegs[rval];
183 /*
184 * We need to do a couple of things to ensure consistency
185 * in case we sleep in malloc(). We mark segment as
186 * allocated so that other shmgets() will not allocate it.
187 * We mark it as "destroyed" to insure that shmvalid() is
188 * false making most operations fail (XXX). We set the key,
189 * so that other shmget()s will fail.
190 */
191 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
192 shp->shm_perm.key = uap->key;
193 shmh = (struct shmhandle *)
194 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
195 shmh->shmh_kva = 0;
196 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
197 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
5af262ca 198 VM_PROT_ALL, VM_PROT_DEFAULT, MAP_ANON, shmh->shmh_id, 0);
15637ed4
RG
199 if (error) {
200 free((caddr_t)shmh, M_SHM);
201 shp->shm_perm.mode = 0;
202 return(ENOMEM);
203 }
204 shp->shm_handle = (void *) shmh;
205 shmtot += size;
206 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
207 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
208 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
209 shp->shm_segsz = uap->size;
210 shp->shm_cpid = p->p_pid;
211 shp->shm_lpid = shp->shm_nattch = 0;
212 shp->shm_atime = shp->shm_dtime = 0;
213 shp->shm_ctime = time.tv_sec;
214 } else {
215 shp = &shmsegs[rval];
216 /* XXX: probably not the right thing to do */
217 if (shp->shm_perm.mode & SHM_DEST)
218 return (EBUSY);
219 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
220 return (error);
221 if (uap->size && uap->size > shp->shm_segsz)
222 return (EINVAL);
223 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
224 return (EEXIST);
225 }
226 *retval = shp->shm_perm.seq * SHMMMNI + rval;
227 return (0);
228}
229
230/*
231 * Shared memory control
232 */
3c7eb27c
DG
233
234struct shmctl_args {
235 int shmid;
236 int cmd;
237 caddr_t buf;
238};
239
15637ed4 240/* ARGSUSED */
4c45483e 241int
15637ed4
RG
242shmctl(p, uap, retval)
243 struct proc *p;
3c7eb27c 244 register struct shmctl_args *uap;
15637ed4
RG
245 int *retval;
246{
247 register struct shmid_ds *shp;
248 register struct ucred *cred = p->p_ucred;
249 struct shmid_ds sbuf;
250 int error;
251
252 if (error = shmvalid(uap->shmid))
253 return (error);
254 shp = &shmsegs[uap->shmid % SHMMMNI];
255 switch (uap->cmd) {
256 case IPC_STAT:
257 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
258 return (error);
259 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
260
261 case IPC_SET:
262 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
263 cred->cr_uid != shp->shm_perm.cuid)
264 return (EPERM);
265 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
266 return (error);
267 shp->shm_perm.uid = sbuf.shm_perm.uid;
268 shp->shm_perm.gid = sbuf.shm_perm.gid;
269 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
270 | (sbuf.shm_perm.mode & 0777);
271 shp->shm_ctime = time.tv_sec;
272 break;
273
274 case IPC_RMID:
275 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
276 cred->cr_uid != shp->shm_perm.cuid)
277 return (EPERM);
278 /* set ctime? */
279 shp->shm_perm.key = IPC_PRIVATE;
280 shp->shm_perm.mode |= SHM_DEST;
281 if (shp->shm_nattch <= 0)
282 shmfree(shp);
283 break;
284
285#ifdef HPUXCOMPAT
286 case SHM_LOCK:
287 case SHM_UNLOCK:
288 /* don't really do anything, but make them think we did */
289 if ((p->p_flag & SHPUX) == 0)
290 return (EINVAL);
291 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
292 cred->cr_uid != shp->shm_perm.cuid)
293 return (EPERM);
294 break;
295#endif
296
297 default:
298 return (EINVAL);
299 }
300 return (0);
301}
302
303/*
304 * Attach to shared memory segment.
305 */
3c7eb27c
DG
306
307struct shmat_args {
308 int shmid;
309 caddr_t shmaddr;
310 int shmflg;
311};
312
4c45483e 313int
15637ed4
RG
314shmat(p, uap, retval)
315 struct proc *p;
3c7eb27c 316 register struct shmat_args *uap;
15637ed4
RG
317 int *retval;
318{
319 register struct shmid_ds *shp;
320 register int size;
321 caddr_t uva;
322 int error;
323 int flags;
324 vm_prot_t prot;
325 struct shmdesc *shmd;
326
327 /*
328 * Allocate descriptors now (before validity check)
329 * in case malloc() blocks.
330 */
331 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
332 size = shminfo.shmseg * sizeof(struct shmdesc);
333 if (shmd == NULL) {
334 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
335 bzero((caddr_t)shmd, size);
336 p->p_vmspace->vm_shm = (caddr_t)shmd;
337 }
338 if (error = shmvalid(uap->shmid))
339 return (error);
340 shp = &shmsegs[uap->shmid % SHMMMNI];
341 if (shp->shm_handle == NULL)
342 panic("shmat NULL handle");
343 if (error = ipcaccess(&shp->shm_perm,
344 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
345 return (error);
346 uva = uap->shmaddr;
347 if (uva && ((int)uva & (SHMLBA-1))) {
348 if (uap->shmflg & SHM_RND)
349 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
350 else
351 return (EINVAL);
352 }
353 /*
354 * Make sure user doesn't use more than their fair share
355 */
356 for (size = 0; size < shminfo.shmseg; size++) {
357 if (shmd->shmd_uva == 0)
358 break;
359 shmd++;
360 }
361 if (size >= shminfo.shmseg)
362 return (EMFILE);
363 size = ctob(clrnd(btoc(shp->shm_segsz)));
364 prot = VM_PROT_READ;
365 if ((uap->shmflg & SHM_RDONLY) == 0)
366 prot |= VM_PROT_WRITE;
367 flags = MAP_ANON|MAP_SHARED;
368 if (uva)
369 flags |= MAP_FIXED;
370 else
371 uva = (caddr_t)0x1000000; /* XXX */
5af262ca 372 error = vm_mmap(&p->p_vmspace->vm_map, &uva, (vm_size_t)size, prot, VM_PROT_DEFAULT,
15637ed4
RG
373 flags, ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
374 if (error)
375 return(error);
376 shmd->shmd_uva = (vm_offset_t)uva;
377 shmd->shmd_id = uap->shmid;
378 /*
379 * Fill in the remaining fields
380 */
381 shp->shm_lpid = p->p_pid;
382 shp->shm_atime = time.tv_sec;
383 shp->shm_nattch++;
384 *retval = (int) uva;
385 return (0);
386}
387
388/*
389 * Detach from shared memory segment.
390 */
3c7eb27c
DG
391
392struct shmdt_args {
393 caddr_t shmaddr;
394};
395
15637ed4 396/* ARGSUSED */
4c45483e 397int
15637ed4
RG
398shmdt(p, uap, retval)
399 struct proc *p;
3c7eb27c 400 struct shmdt_args *uap;
15637ed4
RG
401 int *retval;
402{
403 register struct shmdesc *shmd;
404 register int i;
405
406 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
407 for (i = 0; i < shminfo.shmseg; i++, shmd++)
408 if (shmd->shmd_uva &&
409 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
410 break;
411 if (i == shminfo.shmseg)
412 return(EINVAL);
413 shmufree(p, shmd);
414 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
4c45483e 415 return 0;
15637ed4
RG
416}
417
4c45483e 418void
15637ed4
RG
419shmfork(p1, p2, isvfork)
420 struct proc *p1, *p2;
421 int isvfork;
422{
423 register struct shmdesc *shmd;
424 register int size;
425
426 /*
427 * Copy parents descriptive information
428 */
429 size = shminfo.shmseg * sizeof(struct shmdesc);
430 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
431 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
432 p2->p_vmspace->vm_shm = (caddr_t)shmd;
433 /*
434 * Increment reference counts
435 */
436 for (size = 0; size < shminfo.shmseg; size++, shmd++)
437 if (shmd->shmd_uva)
438 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
439}
440
4c45483e 441void
15637ed4
RG
442shmexit(p)
443 struct proc *p;
444{
445 register struct shmdesc *shmd;
446 register int i;
447
448 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
449 for (i = 0; i < shminfo.shmseg; i++, shmd++)
450 if (shmd->shmd_uva)
451 shmufree(p, shmd);
452 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
453 p->p_vmspace->vm_shm = NULL;
454}
455
4c45483e 456int
15637ed4
RG
457shmvalid(id)
458 register int id;
459{
460 register struct shmid_ds *shp;
461
462 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
463 return(EINVAL);
464 shp = &shmsegs[id % SHMMMNI];
465 if (shp->shm_perm.seq == (id / SHMMMNI) &&
466 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
467 return(0);
468 return(EINVAL);
469}
470
471/*
472 * Free user resources associated with a shared memory segment
473 */
4c45483e 474static void
15637ed4
RG
475shmufree(p, shmd)
476 struct proc *p;
477 struct shmdesc *shmd;
478{
479 register struct shmid_ds *shp;
480
481 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
482 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
483 ctob(clrnd(btoc(shp->shm_segsz))));
484 shmd->shmd_id = 0;
485 shmd->shmd_uva = 0;
486 shp->shm_dtime = time.tv_sec;
487 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
488 shmfree(shp);
489}
490
491/*
492 * Deallocate resources associated with a shared memory segment
493 */
4c45483e 494static void
15637ed4
RG
495shmfree(shp)
496 register struct shmid_ds *shp;
497{
498
499 if (shp->shm_handle == NULL)
500 panic("shmfree");
501 /*
502 * Lose our lingering object reference by deallocating space
503 * in kernel. Pager will also be deallocated as a side-effect.
504 */
505 vm_deallocate(shm_map,
506 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
507 ctob(clrnd(btoc(shp->shm_segsz))));
508 free((caddr_t)shp->shm_handle, M_SHM);
509 shp->shm_handle = NULL;
510 shmtot -= clrnd(btoc(shp->shm_segsz));
511 shp->shm_perm.mode = 0;
512 /*
513 * Increment the sequence number to ensure that outstanding
514 * shmids for this segment will be invalid in the event that
515 * the segment is reallocated. Note that shmids must be
516 * positive as decreed by SVID.
517 */
518 shp->shm_perm.seq++;
519 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
520 shp->shm_perm.seq = 0;
521}
522
523/*
524 * XXX This routine would be common to all sysV style IPC
525 * (if the others were implemented).
526 */
4c45483e 527int
15637ed4
RG
528ipcaccess(ipc, mode, cred)
529 register struct ipc_perm *ipc;
530 int mode;
531 register struct ucred *cred;
532{
533 register int m;
534
535 if (cred->cr_uid == 0)
536 return(0);
537 /*
538 * Access check is based on only one of owner, group, public.
539 * If not owner, then check group.
540 * If not a member of the group, then check public access.
541 */
542 mode &= 0700;
543 m = ipc->mode;
544 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
545 m <<= 3;
546 if (!groupmember(ipc->gid, cred) &&
547 !groupmember(ipc->cgid, cred))
548 m <<= 3;
549 }
550 if ((mode&m) == mode)
551 return (0);
552 return (EACCES);
553}
554#endif /* SYSVSHM */