This commit was manufactured by cvs2svn to create tag 'FreeBSD-release/1.0'.
[unix-history] / sys / kern / sysv_shm.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
78ed81a3 38 * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
39 * from: @(#)sysv_shm.c 7.15 (Berkeley) 5/13/91
40 * $Id$
15637ed4
RG
41 */
42
43/*
44 * System V shared memory routines.
45 * TEMPORARY, until mmap is in place;
46 * needed now for HP-UX compatibility and X server (yech!).
47 */
48
49#ifdef SYSVSHM
50
51#include "param.h"
52#include "systm.h"
53#include "kernel.h"
54#include "proc.h"
55#include "shm.h"
56#include "malloc.h"
57#include "mman.h"
58#include "vm/vm.h"
59#include "vm/vm_kern.h"
60#include "vm/vm_inherit.h"
61#include "vm/vm_pager.h"
62
63#ifdef HPUXCOMPAT
64#include "hp300/hpux/hpux.h"
65#endif
66
67int shmat(), shmctl(), shmdt(), shmget();
68int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
69int shmtot = 0;
70
71/*
72 * Per process internal structure for managing segments.
73 * Each process using shm will have an array of ``shmseg'' of these.
74 */
75struct shmdesc {
76 vm_offset_t shmd_uva;
77 int shmd_id;
78};
79
80/*
81 * Per segment internal structure (shm_handle).
82 */
83struct shmhandle {
84 vm_offset_t shmh_kva;
85 caddr_t shmh_id;
86};
87
88vm_map_t shm_map; /* address space for shared memory segments */
89
90shminit()
91{
92 register int i;
93 vm_offset_t whocares1, whocares2;
94
95 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
96 shminfo.shmall * NBPG, FALSE);
97 if (shminfo.shmmni > SHMMMNI)
98 shminfo.shmmni = SHMMMNI;
99 for (i = 0; i < shminfo.shmmni; i++) {
100 shmsegs[i].shm_perm.mode = 0;
101 shmsegs[i].shm_perm.seq = 0;
102 }
103}
104
105/*
106 * Entry point for all SHM calls
107 */
78ed81a3 108
109struct shmsys_args {
110 u_int which;
111};
112
15637ed4
RG
113shmsys(p, uap, retval)
114 struct proc *p;
78ed81a3 115 struct shmsys_args *uap;
15637ed4
RG
116 int *retval;
117{
118
119 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
120 return (EINVAL);
121 return ((*shmcalls[uap->which])(p, &uap[1], retval));
122}
123
124/*
125 * Get a shared memory segment
126 */
78ed81a3 127
128struct shmget_args {
129 key_t key;
130 int size;
131 int shmflg;
132};
133
15637ed4
RG
134shmget(p, uap, retval)
135 struct proc *p;
78ed81a3 136 register struct shmget_args *uap;
15637ed4
RG
137 int *retval;
138{
139 register struct shmid_ds *shp;
140 register struct ucred *cred = p->p_ucred;
141 register int i;
142 int error, size, rval = 0;
143 register struct shmhandle *shmh;
144
145 /* look up the specified shm_id */
146 if (uap->key != IPC_PRIVATE) {
147 for (i = 0; i < shminfo.shmmni; i++)
148 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
149 shmsegs[i].shm_perm.key == uap->key) {
150 rval = i;
151 break;
152 }
153 } else
154 i = shminfo.shmmni;
155
156 /* create a new shared segment if necessary */
157 if (i == shminfo.shmmni) {
158 if ((uap->shmflg & IPC_CREAT) == 0)
159 return (ENOENT);
160 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
161 return (EINVAL);
162 for (i = 0; i < shminfo.shmmni; i++)
163 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
164 rval = i;
165 break;
166 }
167 if (i == shminfo.shmmni)
168 return (ENOSPC);
169 size = clrnd(btoc(uap->size));
170 if (shmtot + size > shminfo.shmall)
171 return (ENOMEM);
172 shp = &shmsegs[rval];
173 /*
174 * We need to do a couple of things to ensure consistency
175 * in case we sleep in malloc(). We mark segment as
176 * allocated so that other shmgets() will not allocate it.
177 * We mark it as "destroyed" to insure that shmvalid() is
178 * false making most operations fail (XXX). We set the key,
179 * so that other shmget()s will fail.
180 */
181 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
182 shp->shm_perm.key = uap->key;
183 shmh = (struct shmhandle *)
184 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
185 shmh->shmh_kva = 0;
186 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
187 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
78ed81a3 188 VM_PROT_ALL, VM_PROT_DEFAULT, MAP_ANON, shmh->shmh_id, 0);
15637ed4
RG
189 if (error) {
190 free((caddr_t)shmh, M_SHM);
191 shp->shm_perm.mode = 0;
192 return(ENOMEM);
193 }
194 shp->shm_handle = (void *) shmh;
195 shmtot += size;
196 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
197 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
198 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
199 shp->shm_segsz = uap->size;
200 shp->shm_cpid = p->p_pid;
201 shp->shm_lpid = shp->shm_nattch = 0;
202 shp->shm_atime = shp->shm_dtime = 0;
203 shp->shm_ctime = time.tv_sec;
204 } else {
205 shp = &shmsegs[rval];
206 /* XXX: probably not the right thing to do */
207 if (shp->shm_perm.mode & SHM_DEST)
208 return (EBUSY);
209 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
210 return (error);
211 if (uap->size && uap->size > shp->shm_segsz)
212 return (EINVAL);
213 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
214 return (EEXIST);
215 }
216 *retval = shp->shm_perm.seq * SHMMMNI + rval;
217 return (0);
218}
219
220/*
221 * Shared memory control
222 */
78ed81a3 223
224struct shmctl_args {
225 int shmid;
226 int cmd;
227 caddr_t buf;
228};
229
15637ed4
RG
230/* ARGSUSED */
231shmctl(p, uap, retval)
232 struct proc *p;
78ed81a3 233 register struct shmctl_args *uap;
15637ed4
RG
234 int *retval;
235{
236 register struct shmid_ds *shp;
237 register struct ucred *cred = p->p_ucred;
238 struct shmid_ds sbuf;
239 int error;
240
241 if (error = shmvalid(uap->shmid))
242 return (error);
243 shp = &shmsegs[uap->shmid % SHMMMNI];
244 switch (uap->cmd) {
245 case IPC_STAT:
246 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
247 return (error);
248 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
249
250 case IPC_SET:
251 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
252 cred->cr_uid != shp->shm_perm.cuid)
253 return (EPERM);
254 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
255 return (error);
256 shp->shm_perm.uid = sbuf.shm_perm.uid;
257 shp->shm_perm.gid = sbuf.shm_perm.gid;
258 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
259 | (sbuf.shm_perm.mode & 0777);
260 shp->shm_ctime = time.tv_sec;
261 break;
262
263 case IPC_RMID:
264 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
265 cred->cr_uid != shp->shm_perm.cuid)
266 return (EPERM);
267 /* set ctime? */
268 shp->shm_perm.key = IPC_PRIVATE;
269 shp->shm_perm.mode |= SHM_DEST;
270 if (shp->shm_nattch <= 0)
271 shmfree(shp);
272 break;
273
274#ifdef HPUXCOMPAT
275 case SHM_LOCK:
276 case SHM_UNLOCK:
277 /* don't really do anything, but make them think we did */
278 if ((p->p_flag & SHPUX) == 0)
279 return (EINVAL);
280 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
281 cred->cr_uid != shp->shm_perm.cuid)
282 return (EPERM);
283 break;
284#endif
285
286 default:
287 return (EINVAL);
288 }
289 return (0);
290}
291
292/*
293 * Attach to shared memory segment.
294 */
78ed81a3 295
296struct shmat_args {
297 int shmid;
298 caddr_t shmaddr;
299 int shmflg;
300};
301
15637ed4
RG
302shmat(p, uap, retval)
303 struct proc *p;
78ed81a3 304 register struct shmat_args *uap;
15637ed4
RG
305 int *retval;
306{
307 register struct shmid_ds *shp;
308 register int size;
309 caddr_t uva;
310 int error;
311 int flags;
312 vm_prot_t prot;
313 struct shmdesc *shmd;
314
315 /*
316 * Allocate descriptors now (before validity check)
317 * in case malloc() blocks.
318 */
319 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
320 size = shminfo.shmseg * sizeof(struct shmdesc);
321 if (shmd == NULL) {
322 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
323 bzero((caddr_t)shmd, size);
324 p->p_vmspace->vm_shm = (caddr_t)shmd;
325 }
326 if (error = shmvalid(uap->shmid))
327 return (error);
328 shp = &shmsegs[uap->shmid % SHMMMNI];
329 if (shp->shm_handle == NULL)
330 panic("shmat NULL handle");
331 if (error = ipcaccess(&shp->shm_perm,
332 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
333 return (error);
334 uva = uap->shmaddr;
335 if (uva && ((int)uva & (SHMLBA-1))) {
336 if (uap->shmflg & SHM_RND)
337 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
338 else
339 return (EINVAL);
340 }
341 /*
342 * Make sure user doesn't use more than their fair share
343 */
344 for (size = 0; size < shminfo.shmseg; size++) {
345 if (shmd->shmd_uva == 0)
346 break;
347 shmd++;
348 }
349 if (size >= shminfo.shmseg)
350 return (EMFILE);
351 size = ctob(clrnd(btoc(shp->shm_segsz)));
352 prot = VM_PROT_READ;
353 if ((uap->shmflg & SHM_RDONLY) == 0)
354 prot |= VM_PROT_WRITE;
355 flags = MAP_ANON|MAP_SHARED;
356 if (uva)
357 flags |= MAP_FIXED;
358 else
359 uva = (caddr_t)0x1000000; /* XXX */
78ed81a3 360 error = vm_mmap(&p->p_vmspace->vm_map, &uva, (vm_size_t)size, prot, VM_PROT_DEFAULT,
15637ed4
RG
361 flags, ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
362 if (error)
363 return(error);
364 shmd->shmd_uva = (vm_offset_t)uva;
365 shmd->shmd_id = uap->shmid;
366 /*
367 * Fill in the remaining fields
368 */
369 shp->shm_lpid = p->p_pid;
370 shp->shm_atime = time.tv_sec;
371 shp->shm_nattch++;
372 *retval = (int) uva;
373 return (0);
374}
375
376/*
377 * Detach from shared memory segment.
378 */
78ed81a3 379
380struct shmdt_args {
381 caddr_t shmaddr;
382};
383
15637ed4
RG
384/* ARGSUSED */
385shmdt(p, uap, retval)
386 struct proc *p;
78ed81a3 387 struct shmdt_args *uap;
15637ed4
RG
388 int *retval;
389{
390 register struct shmdesc *shmd;
391 register int i;
392
393 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
394 for (i = 0; i < shminfo.shmseg; i++, shmd++)
395 if (shmd->shmd_uva &&
396 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
397 break;
398 if (i == shminfo.shmseg)
399 return(EINVAL);
400 shmufree(p, shmd);
401 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
402}
403
404shmfork(p1, p2, isvfork)
405 struct proc *p1, *p2;
406 int isvfork;
407{
408 register struct shmdesc *shmd;
409 register int size;
410
411 /*
412 * Copy parents descriptive information
413 */
414 size = shminfo.shmseg * sizeof(struct shmdesc);
415 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
416 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
417 p2->p_vmspace->vm_shm = (caddr_t)shmd;
418 /*
419 * Increment reference counts
420 */
421 for (size = 0; size < shminfo.shmseg; size++, shmd++)
422 if (shmd->shmd_uva)
423 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
424}
425
426shmexit(p)
427 struct proc *p;
428{
429 register struct shmdesc *shmd;
430 register int i;
431
432 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
433 for (i = 0; i < shminfo.shmseg; i++, shmd++)
434 if (shmd->shmd_uva)
435 shmufree(p, shmd);
436 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
437 p->p_vmspace->vm_shm = NULL;
438}
439
440shmvalid(id)
441 register int id;
442{
443 register struct shmid_ds *shp;
444
445 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
446 return(EINVAL);
447 shp = &shmsegs[id % SHMMMNI];
448 if (shp->shm_perm.seq == (id / SHMMMNI) &&
449 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
450 return(0);
451 return(EINVAL);
452}
453
454/*
455 * Free user resources associated with a shared memory segment
456 */
457shmufree(p, shmd)
458 struct proc *p;
459 struct shmdesc *shmd;
460{
461 register struct shmid_ds *shp;
462
463 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
464 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
465 ctob(clrnd(btoc(shp->shm_segsz))));
466 shmd->shmd_id = 0;
467 shmd->shmd_uva = 0;
468 shp->shm_dtime = time.tv_sec;
469 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
470 shmfree(shp);
471}
472
473/*
474 * Deallocate resources associated with a shared memory segment
475 */
476shmfree(shp)
477 register struct shmid_ds *shp;
478{
479
480 if (shp->shm_handle == NULL)
481 panic("shmfree");
482 /*
483 * Lose our lingering object reference by deallocating space
484 * in kernel. Pager will also be deallocated as a side-effect.
485 */
486 vm_deallocate(shm_map,
487 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
488 ctob(clrnd(btoc(shp->shm_segsz))));
489 free((caddr_t)shp->shm_handle, M_SHM);
490 shp->shm_handle = NULL;
491 shmtot -= clrnd(btoc(shp->shm_segsz));
492 shp->shm_perm.mode = 0;
493 /*
494 * Increment the sequence number to ensure that outstanding
495 * shmids for this segment will be invalid in the event that
496 * the segment is reallocated. Note that shmids must be
497 * positive as decreed by SVID.
498 */
499 shp->shm_perm.seq++;
500 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
501 shp->shm_perm.seq = 0;
502}
503
504/*
505 * XXX This routine would be common to all sysV style IPC
506 * (if the others were implemented).
507 */
508ipcaccess(ipc, mode, cred)
509 register struct ipc_perm *ipc;
510 int mode;
511 register struct ucred *cred;
512{
513 register int m;
514
515 if (cred->cr_uid == 0)
516 return(0);
517 /*
518 * Access check is based on only one of owner, group, public.
519 * If not owner, then check group.
520 * If not a member of the group, then check public access.
521 */
522 mode &= 0700;
523 m = ipc->mode;
524 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
525 m <<= 3;
526 if (!groupmember(ipc->gid, cred) &&
527 !groupmember(ipc->cgid, cred))
528 m <<= 3;
529 }
530 if ((mode&m) == mode)
531 return (0);
532 return (EACCES);
533}
534#endif /* SYSVSHM */