This commit was generated by cvs2svn to track changes on a CVS vendor
[unix-history] / sys / kern / sysv_shm.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
39 *
40 * @(#)sysv_shm.c 7.15 (Berkeley) 5/13/91
41 */
42
43/*
44 * System V shared memory routines.
45 * TEMPORARY, until mmap is in place;
46 * needed now for HP-UX compatibility and X server (yech!).
47 */
48
49#ifdef SYSVSHM
50
51#include "param.h"
52#include "systm.h"
53#include "kernel.h"
54#include "proc.h"
55#include "shm.h"
56#include "malloc.h"
57#include "mman.h"
58#include "vm/vm.h"
59#include "vm/vm_kern.h"
60#include "vm/vm_inherit.h"
61#include "vm/vm_pager.h"
62
63#ifdef HPUXCOMPAT
64#include "hp300/hpux/hpux.h"
65#endif
66
67int shmat(), shmctl(), shmdt(), shmget();
68int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
69int shmtot = 0;
70
71/*
72 * Per process internal structure for managing segments.
73 * Each process using shm will have an array of ``shmseg'' of these.
74 */
75struct shmdesc {
76 vm_offset_t shmd_uva;
77 int shmd_id;
78};
79
80/*
81 * Per segment internal structure (shm_handle).
82 */
83struct shmhandle {
84 vm_offset_t shmh_kva;
85 caddr_t shmh_id;
86};
87
88vm_map_t shm_map; /* address space for shared memory segments */
89
90shminit()
91{
92 register int i;
93 vm_offset_t whocares1, whocares2;
94
95 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
96 shminfo.shmall * NBPG, FALSE);
97 if (shminfo.shmmni > SHMMMNI)
98 shminfo.shmmni = SHMMMNI;
99 for (i = 0; i < shminfo.shmmni; i++) {
100 shmsegs[i].shm_perm.mode = 0;
101 shmsegs[i].shm_perm.seq = 0;
102 }
103}
104
105/*
106 * Entry point for all SHM calls
107 */
108shmsys(p, uap, retval)
109 struct proc *p;
110 struct args {
111 u_int which;
112 } *uap;
113 int *retval;
114{
115
116 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
117 return (EINVAL);
118 return ((*shmcalls[uap->which])(p, &uap[1], retval));
119}
120
121/*
122 * Get a shared memory segment
123 */
124shmget(p, uap, retval)
125 struct proc *p;
126 register struct args {
127 key_t key;
128 int size;
129 int shmflg;
130 } *uap;
131 int *retval;
132{
133 register struct shmid_ds *shp;
134 register struct ucred *cred = p->p_ucred;
135 register int i;
136 int error, size, rval = 0;
137 register struct shmhandle *shmh;
138
139 /* look up the specified shm_id */
140 if (uap->key != IPC_PRIVATE) {
141 for (i = 0; i < shminfo.shmmni; i++)
142 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
143 shmsegs[i].shm_perm.key == uap->key) {
144 rval = i;
145 break;
146 }
147 } else
148 i = shminfo.shmmni;
149
150 /* create a new shared segment if necessary */
151 if (i == shminfo.shmmni) {
152 if ((uap->shmflg & IPC_CREAT) == 0)
153 return (ENOENT);
154 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
155 return (EINVAL);
156 for (i = 0; i < shminfo.shmmni; i++)
157 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
158 rval = i;
159 break;
160 }
161 if (i == shminfo.shmmni)
162 return (ENOSPC);
163 size = clrnd(btoc(uap->size));
164 if (shmtot + size > shminfo.shmall)
165 return (ENOMEM);
166 shp = &shmsegs[rval];
167 /*
168 * We need to do a couple of things to ensure consistency
169 * in case we sleep in malloc(). We mark segment as
170 * allocated so that other shmgets() will not allocate it.
171 * We mark it as "destroyed" to insure that shmvalid() is
172 * false making most operations fail (XXX). We set the key,
173 * so that other shmget()s will fail.
174 */
175 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
176 shp->shm_perm.key = uap->key;
177 shmh = (struct shmhandle *)
178 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
179 shmh->shmh_kva = 0;
180 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
181 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
182 VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
183 if (error) {
184 free((caddr_t)shmh, M_SHM);
185 shp->shm_perm.mode = 0;
186 return(ENOMEM);
187 }
188 shp->shm_handle = (void *) shmh;
189 shmtot += size;
190 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
191 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
192 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
193 shp->shm_segsz = uap->size;
194 shp->shm_cpid = p->p_pid;
195 shp->shm_lpid = shp->shm_nattch = 0;
196 shp->shm_atime = shp->shm_dtime = 0;
197 shp->shm_ctime = time.tv_sec;
198 } else {
199 shp = &shmsegs[rval];
200 /* XXX: probably not the right thing to do */
201 if (shp->shm_perm.mode & SHM_DEST)
202 return (EBUSY);
203 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
204 return (error);
205 if (uap->size && uap->size > shp->shm_segsz)
206 return (EINVAL);
207 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
208 return (EEXIST);
209 }
210 *retval = shp->shm_perm.seq * SHMMMNI + rval;
211 return (0);
212}
213
214/*
215 * Shared memory control
216 */
217/* ARGSUSED */
218shmctl(p, uap, retval)
219 struct proc *p;
220 register struct args {
221 int shmid;
222 int cmd;
223 caddr_t buf;
224 } *uap;
225 int *retval;
226{
227 register struct shmid_ds *shp;
228 register struct ucred *cred = p->p_ucred;
229 struct shmid_ds sbuf;
230 int error;
231
232 if (error = shmvalid(uap->shmid))
233 return (error);
234 shp = &shmsegs[uap->shmid % SHMMMNI];
235 switch (uap->cmd) {
236 case IPC_STAT:
237 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
238 return (error);
239 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
240
241 case IPC_SET:
242 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
243 cred->cr_uid != shp->shm_perm.cuid)
244 return (EPERM);
245 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
246 return (error);
247 shp->shm_perm.uid = sbuf.shm_perm.uid;
248 shp->shm_perm.gid = sbuf.shm_perm.gid;
249 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
250 | (sbuf.shm_perm.mode & 0777);
251 shp->shm_ctime = time.tv_sec;
252 break;
253
254 case IPC_RMID:
255 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
256 cred->cr_uid != shp->shm_perm.cuid)
257 return (EPERM);
258 /* set ctime? */
259 shp->shm_perm.key = IPC_PRIVATE;
260 shp->shm_perm.mode |= SHM_DEST;
261 if (shp->shm_nattch <= 0)
262 shmfree(shp);
263 break;
264
265#ifdef HPUXCOMPAT
266 case SHM_LOCK:
267 case SHM_UNLOCK:
268 /* don't really do anything, but make them think we did */
269 if ((p->p_flag & SHPUX) == 0)
270 return (EINVAL);
271 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
272 cred->cr_uid != shp->shm_perm.cuid)
273 return (EPERM);
274 break;
275#endif
276
277 default:
278 return (EINVAL);
279 }
280 return (0);
281}
282
283/*
284 * Attach to shared memory segment.
285 */
286shmat(p, uap, retval)
287 struct proc *p;
288 register struct args {
289 int shmid;
290 caddr_t shmaddr;
291 int shmflg;
292 } *uap;
293 int *retval;
294{
295 register struct shmid_ds *shp;
296 register int size;
297 caddr_t uva;
298 int error;
299 int flags;
300 vm_prot_t prot;
301 struct shmdesc *shmd;
302
303 /*
304 * Allocate descriptors now (before validity check)
305 * in case malloc() blocks.
306 */
307 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
308 size = shminfo.shmseg * sizeof(struct shmdesc);
309 if (shmd == NULL) {
310 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
311 bzero((caddr_t)shmd, size);
312 p->p_vmspace->vm_shm = (caddr_t)shmd;
313 }
314 if (error = shmvalid(uap->shmid))
315 return (error);
316 shp = &shmsegs[uap->shmid % SHMMMNI];
317 if (shp->shm_handle == NULL)
318 panic("shmat NULL handle");
319 if (error = ipcaccess(&shp->shm_perm,
320 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
321 return (error);
322 uva = uap->shmaddr;
323 if (uva && ((int)uva & (SHMLBA-1))) {
324 if (uap->shmflg & SHM_RND)
325 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
326 else
327 return (EINVAL);
328 }
329 /*
330 * Make sure user doesn't use more than their fair share
331 */
332 for (size = 0; size < shminfo.shmseg; size++) {
333 if (shmd->shmd_uva == 0)
334 break;
335 shmd++;
336 }
337 if (size >= shminfo.shmseg)
338 return (EMFILE);
339 size = ctob(clrnd(btoc(shp->shm_segsz)));
340 prot = VM_PROT_READ;
341 if ((uap->shmflg & SHM_RDONLY) == 0)
342 prot |= VM_PROT_WRITE;
343 flags = MAP_ANON|MAP_SHARED;
344 if (uva)
345 flags |= MAP_FIXED;
346 else
347 uva = (caddr_t)0x1000000; /* XXX */
348 error = vm_mmap(&p->p_vmspace->vm_map, &uva, (vm_size_t)size, prot,
349 flags, ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
350 if (error)
351 return(error);
352 shmd->shmd_uva = (vm_offset_t)uva;
353 shmd->shmd_id = uap->shmid;
354 /*
355 * Fill in the remaining fields
356 */
357 shp->shm_lpid = p->p_pid;
358 shp->shm_atime = time.tv_sec;
359 shp->shm_nattch++;
360 *retval = (int) uva;
361 return (0);
362}
363
364/*
365 * Detach from shared memory segment.
366 */
367/* ARGSUSED */
368shmdt(p, uap, retval)
369 struct proc *p;
370 struct args {
371 caddr_t shmaddr;
372 } *uap;
373 int *retval;
374{
375 register struct shmdesc *shmd;
376 register int i;
377
378 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
379 for (i = 0; i < shminfo.shmseg; i++, shmd++)
380 if (shmd->shmd_uva &&
381 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
382 break;
383 if (i == shminfo.shmseg)
384 return(EINVAL);
385 shmufree(p, shmd);
386 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
387}
388
389shmfork(p1, p2, isvfork)
390 struct proc *p1, *p2;
391 int isvfork;
392{
393 register struct shmdesc *shmd;
394 register int size;
395
396 /*
397 * Copy parents descriptive information
398 */
399 size = shminfo.shmseg * sizeof(struct shmdesc);
400 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
401 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
402 p2->p_vmspace->vm_shm = (caddr_t)shmd;
403 /*
404 * Increment reference counts
405 */
406 for (size = 0; size < shminfo.shmseg; size++, shmd++)
407 if (shmd->shmd_uva)
408 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
409}
410
411shmexit(p)
412 struct proc *p;
413{
414 register struct shmdesc *shmd;
415 register int i;
416
417 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
418 for (i = 0; i < shminfo.shmseg; i++, shmd++)
419 if (shmd->shmd_uva)
420 shmufree(p, shmd);
421 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
422 p->p_vmspace->vm_shm = NULL;
423}
424
425shmvalid(id)
426 register int id;
427{
428 register struct shmid_ds *shp;
429
430 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
431 return(EINVAL);
432 shp = &shmsegs[id % SHMMMNI];
433 if (shp->shm_perm.seq == (id / SHMMMNI) &&
434 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
435 return(0);
436 return(EINVAL);
437}
438
439/*
440 * Free user resources associated with a shared memory segment
441 */
442shmufree(p, shmd)
443 struct proc *p;
444 struct shmdesc *shmd;
445{
446 register struct shmid_ds *shp;
447
448 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
449 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
450 ctob(clrnd(btoc(shp->shm_segsz))));
451 shmd->shmd_id = 0;
452 shmd->shmd_uva = 0;
453 shp->shm_dtime = time.tv_sec;
454 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
455 shmfree(shp);
456}
457
458/*
459 * Deallocate resources associated with a shared memory segment
460 */
461shmfree(shp)
462 register struct shmid_ds *shp;
463{
464
465 if (shp->shm_handle == NULL)
466 panic("shmfree");
467 /*
468 * Lose our lingering object reference by deallocating space
469 * in kernel. Pager will also be deallocated as a side-effect.
470 */
471 vm_deallocate(shm_map,
472 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
473 ctob(clrnd(btoc(shp->shm_segsz))));
474 free((caddr_t)shp->shm_handle, M_SHM);
475 shp->shm_handle = NULL;
476 shmtot -= clrnd(btoc(shp->shm_segsz));
477 shp->shm_perm.mode = 0;
478 /*
479 * Increment the sequence number to ensure that outstanding
480 * shmids for this segment will be invalid in the event that
481 * the segment is reallocated. Note that shmids must be
482 * positive as decreed by SVID.
483 */
484 shp->shm_perm.seq++;
485 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
486 shp->shm_perm.seq = 0;
487}
488
489/*
490 * XXX This routine would be common to all sysV style IPC
491 * (if the others were implemented).
492 */
493ipcaccess(ipc, mode, cred)
494 register struct ipc_perm *ipc;
495 int mode;
496 register struct ucred *cred;
497{
498 register int m;
499
500 if (cred->cr_uid == 0)
501 return(0);
502 /*
503 * Access check is based on only one of owner, group, public.
504 * If not owner, then check group.
505 * If not a member of the group, then check public access.
506 */
507 mode &= 0700;
508 m = ipc->mode;
509 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
510 m <<= 3;
511 if (!groupmember(ipc->gid, cred) &&
512 !groupmember(ipc->cgid, cred))
513 m <<= 3;
514 }
515 if ((mode&m) == mode)
516 return (0);
517 return (EACCES);
518}
519#endif /* SYSVSHM */