use symbolic names for access permissions
[unix-history] / usr / src / sys / kern / sysv_shm.c
CommitLineData
6f843dc9
KM
1/*
2 * Copyright (c) 1988 University of Utah.
7a6e4544
KB
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6f843dc9
KM
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * %sccs.include.redist.c%
11 *
3cf1b235 12 * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
6f843dc9 13 *
2ac345ab 14 * @(#)sysv_shm.c 8.2 (Berkeley) %G%
6f843dc9
KM
15 */
16
17/*
18 * System V shared memory routines.
e4f7cdf5
MK
19 * TEMPORARY, until mmap is in place;
20 * needed now for HP-UX compatibility and X server (yech!).
6f843dc9
KM
21 */
22
23#ifdef SYSVSHM
24
38a01dbe
KB
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/kernel.h>
28#include <sys/proc.h>
29#include <sys/shm.h>
30#include <sys/malloc.h>
31#include <sys/mman.h>
2ac345ab 32#include <sys/stat.h>
38a01dbe
KB
33
34#include <vm/vm.h>
35#include <vm/vm_kern.h>
36#include <vm/vm_inherit.h>
37#include <vm/vm_pager.h>
6f843dc9 38
6f843dc9
KM
39int shmat(), shmctl(), shmdt(), shmget();
40int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
41int shmtot = 0;
42
9db58063
KM
43/*
44 * Per process internal structure for managing segments.
45 * Each process using shm will have an array of ``shmseg'' of these.
46 */
47struct shmdesc {
48 vm_offset_t shmd_uva;
49 int shmd_id;
50};
51
52/*
53 * Per segment internal structure (shm_handle).
54 */
55struct shmhandle {
56 vm_offset_t shmh_kva;
57 caddr_t shmh_id;
58};
59
60vm_map_t shm_map; /* address space for shared memory segments */
6f843dc9
KM
61
62shminit()
63{
64 register int i;
9db58063 65 vm_offset_t whocares1, whocares2;
6f843dc9 66
9db58063
KM
67 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
68 shminfo.shmall * NBPG, FALSE);
6f843dc9
KM
69 if (shminfo.shmmni > SHMMMNI)
70 shminfo.shmmni = SHMMMNI;
71 for (i = 0; i < shminfo.shmmni; i++) {
72 shmsegs[i].shm_perm.mode = 0;
73 shmsegs[i].shm_perm.seq = 0;
74 }
75}
76
161c14a2
KM
77/*
78 * Entry point for all SHM calls
79 */
afc12c69
CT
80struct shmsys_args {
81 u_int which;
82};
161c14a2
KM
83shmsys(p, uap, retval)
84 struct proc *p;
afc12c69 85 struct shmsys_args *uap;
161c14a2
KM
86 int *retval;
87{
6f843dc9 88
161c14a2 89 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
d9c2f47f
MK
90 return (EINVAL);
91 return ((*shmcalls[uap->which])(p, &uap[1], retval));
6f843dc9
KM
92}
93
161c14a2
KM
94/*
95 * Get a shared memory segment
96 */
afc12c69
CT
97struct shmget_args {
98 key_t key;
99 int size;
100 int shmflg;
101};
161c14a2
KM
102shmget(p, uap, retval)
103 struct proc *p;
afc12c69 104 register struct shmget_args *uap;
161c14a2
KM
105 int *retval;
106{
6f843dc9 107 register struct shmid_ds *shp;
8429d022 108 register struct ucred *cred = p->p_ucred;
6f843dc9 109 register int i;
161c14a2 110 int error, size, rval = 0;
9db58063 111 register struct shmhandle *shmh;
6f843dc9
KM
112
113 /* look up the specified shm_id */
114 if (uap->key != IPC_PRIVATE) {
115 for (i = 0; i < shminfo.shmmni; i++)
116 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
117 shmsegs[i].shm_perm.key == uap->key) {
118 rval = i;
119 break;
120 }
121 } else
122 i = shminfo.shmmni;
123
124 /* create a new shared segment if necessary */
125 if (i == shminfo.shmmni) {
161c14a2
KM
126 if ((uap->shmflg & IPC_CREAT) == 0)
127 return (ENOENT);
128 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
129 return (EINVAL);
6f843dc9
KM
130 for (i = 0; i < shminfo.shmmni; i++)
131 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
132 rval = i;
133 break;
134 }
161c14a2
KM
135 if (i == shminfo.shmmni)
136 return (ENOSPC);
6f843dc9 137 size = clrnd(btoc(uap->size));
161c14a2
KM
138 if (shmtot + size > shminfo.shmall)
139 return (ENOMEM);
6f843dc9
KM
140 shp = &shmsegs[rval];
141 /*
142 * We need to do a couple of things to ensure consistency
143 * in case we sleep in malloc(). We mark segment as
144 * allocated so that other shmgets() will not allocate it.
145 * We mark it as "destroyed" to insure that shmvalid() is
146 * false making most operations fail (XXX). We set the key,
147 * so that other shmget()s will fail.
148 */
149 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
150 shp->shm_perm.key = uap->key;
9db58063
KM
151 shmh = (struct shmhandle *)
152 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
153 shmh->shmh_kva = 0;
154 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
155 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
79f39708
MH
156 VM_PROT_ALL, VM_PROT_ALL,
157 MAP_ANON, shmh->shmh_id, 0);
9db58063
KM
158 if (error) {
159 free((caddr_t)shmh, M_SHM);
6f843dc9 160 shp->shm_perm.mode = 0;
9db58063 161 return(ENOMEM);
6f843dc9 162 }
9db58063 163 shp->shm_handle = (void *) shmh;
6f843dc9 164 shmtot += size;
161c14a2
KM
165 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
166 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
2ac345ab 167 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg & ACCESSPERMS);
6f843dc9 168 shp->shm_segsz = uap->size;
c9714ae3 169 shp->shm_cpid = p->p_pid;
6f843dc9
KM
170 shp->shm_lpid = shp->shm_nattch = 0;
171 shp->shm_atime = shp->shm_dtime = 0;
172 shp->shm_ctime = time.tv_sec;
173 } else {
174 shp = &shmsegs[rval];
175 /* XXX: probably not the right thing to do */
161c14a2
KM
176 if (shp->shm_perm.mode & SHM_DEST)
177 return (EBUSY);
2ac345ab
KM
178 if (error = ipcaccess(&shp->shm_perm, uap->shmflg & ACCESSPERMS,
179 cred))
161c14a2
KM
180 return (error);
181 if (uap->size && uap->size > shp->shm_segsz)
182 return (EINVAL);
183 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
184 return (EEXIST);
6f843dc9 185 }
161c14a2 186 *retval = shp->shm_perm.seq * SHMMMNI + rval;
015c074c 187 return (0);
6f843dc9
KM
188}
189
161c14a2
KM
190/*
191 * Shared memory control
192 */
afc12c69
CT
193struct shmctl_args {
194 int shmid;
195 int cmd;
196 caddr_t buf;
197};
161c14a2
KM
198/* ARGSUSED */
199shmctl(p, uap, retval)
200 struct proc *p;
afc12c69 201 register struct shmctl_args *uap;
161c14a2
KM
202 int *retval;
203{
6f843dc9 204 register struct shmid_ds *shp;
8429d022 205 register struct ucred *cred = p->p_ucred;
6f843dc9 206 struct shmid_ds sbuf;
161c14a2 207 int error;
6f843dc9 208
161c14a2
KM
209 if (error = shmvalid(uap->shmid))
210 return (error);
6f843dc9
KM
211 shp = &shmsegs[uap->shmid % SHMMMNI];
212 switch (uap->cmd) {
213 case IPC_STAT:
015c074c 214 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
161c14a2
KM
215 return (error);
216 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
6f843dc9
KM
217
218 case IPC_SET:
161c14a2
KM
219 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
220 cred->cr_uid != shp->shm_perm.cuid)
221 return (EPERM);
222 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
223 return (error);
224 shp->shm_perm.uid = sbuf.shm_perm.uid;
225 shp->shm_perm.gid = sbuf.shm_perm.gid;
2ac345ab
KM
226 shp->shm_perm.mode = (shp->shm_perm.mode & ~ACCESSPERMS)
227 | (sbuf.shm_perm.mode & ACCESSPERMS);
161c14a2 228 shp->shm_ctime = time.tv_sec;
6f843dc9
KM
229 break;
230
231 case IPC_RMID:
161c14a2
KM
232 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
233 cred->cr_uid != shp->shm_perm.cuid)
234 return (EPERM);
6f843dc9
KM
235 /* set ctime? */
236 shp->shm_perm.key = IPC_PRIVATE;
237 shp->shm_perm.mode |= SHM_DEST;
238 if (shp->shm_nattch <= 0)
239 shmfree(shp);
240 break;
241
6f843dc9 242 default:
161c14a2 243 return (EINVAL);
6f843dc9 244 }
161c14a2 245 return (0);
6f843dc9
KM
246}
247
161c14a2
KM
248/*
249 * Attach to shared memory segment.
250 */
afc12c69
CT
251struct shmat_args {
252 int shmid;
253 caddr_t shmaddr;
254 int shmflg;
255};
161c14a2
KM
256shmat(p, uap, retval)
257 struct proc *p;
afc12c69 258 register struct shmat_args *uap;
161c14a2
KM
259 int *retval;
260{
6f843dc9
KM
261 register struct shmid_ds *shp;
262 register int size;
6f843dc9 263 caddr_t uva;
9db58063
KM
264 int error;
265 int flags;
266 vm_prot_t prot;
267 struct shmdesc *shmd;
6f843dc9 268
9db58063
KM
269 /*
270 * Allocate descriptors now (before validity check)
271 * in case malloc() blocks.
272 */
8429d022 273 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
274 size = shminfo.shmseg * sizeof(struct shmdesc);
275 if (shmd == NULL) {
276 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
277 bzero((caddr_t)shmd, size);
8429d022 278 p->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063 279 }
161c14a2
KM
280 if (error = shmvalid(uap->shmid))
281 return (error);
6f843dc9
KM
282 shp = &shmsegs[uap->shmid % SHMMMNI];
283 if (shp->shm_handle == NULL)
96ea38ce 284 panic("shmat NULL handle");
015c074c 285 if (error = ipcaccess(&shp->shm_perm,
8429d022 286 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
161c14a2 287 return (error);
6f843dc9
KM
288 uva = uap->shmaddr;
289 if (uva && ((int)uva & (SHMLBA-1))) {
290 if (uap->shmflg & SHM_RND)
291 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
161c14a2
KM
292 else
293 return (EINVAL);
6f843dc9
KM
294 }
295 /*
296 * Make sure user doesn't use more than their fair share
297 */
9db58063
KM
298 for (size = 0; size < shminfo.shmseg; size++) {
299 if (shmd->shmd_uva == 0)
300 break;
301 shmd++;
302 }
161c14a2
KM
303 if (size >= shminfo.shmseg)
304 return (EMFILE);
6f843dc9 305 size = ctob(clrnd(btoc(shp->shm_segsz)));
9db58063
KM
306 prot = VM_PROT_READ;
307 if ((uap->shmflg & SHM_RDONLY) == 0)
308 prot |= VM_PROT_WRITE;
309 flags = MAP_ANON|MAP_SHARED;
310 if (uva)
311 flags |= MAP_FIXED;
312 else
313 uva = (caddr_t)0x1000000; /* XXX */
451df175 314 error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
79f39708
MH
315 (vm_size_t)size, prot, VM_PROT_ALL, flags,
316 ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
161c14a2 317 if (error)
9db58063
KM
318 return(error);
319 shmd->shmd_uva = (vm_offset_t)uva;
320 shmd->shmd_id = uap->shmid;
6f843dc9
KM
321 /*
322 * Fill in the remaining fields
323 */
c9714ae3 324 shp->shm_lpid = p->p_pid;
6f843dc9
KM
325 shp->shm_atime = time.tv_sec;
326 shp->shm_nattch++;
161c14a2 327 *retval = (int) uva;
015c074c 328 return (0);
6f843dc9
KM
329}
330
161c14a2
KM
331/*
332 * Detach from shared memory segment.
333 */
afc12c69
CT
334struct shmdt_args {
335 caddr_t shmaddr;
336};
161c14a2
KM
337/* ARGSUSED */
338shmdt(p, uap, retval)
339 struct proc *p;
afc12c69 340 struct shmdt_args *uap;
161c14a2
KM
341 int *retval;
342{
9db58063
KM
343 register struct shmdesc *shmd;
344 register int i;
6f843dc9 345
8429d022 346 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
347 for (i = 0; i < shminfo.shmseg; i++, shmd++)
348 if (shmd->shmd_uva &&
349 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
6f843dc9 350 break;
9db58063
KM
351 if (i == shminfo.shmseg)
352 return(EINVAL);
353 shmufree(p, shmd);
354 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
6f843dc9
KM
355}
356
8429d022
MK
357shmfork(p1, p2, isvfork)
358 struct proc *p1, *p2;
9db58063 359 int isvfork;
6f843dc9 360{
9db58063
KM
361 register struct shmdesc *shmd;
362 register int size;
6f843dc9 363
9db58063
KM
364 /*
365 * Copy parents descriptive information
366 */
367 size = shminfo.shmseg * sizeof(struct shmdesc);
368 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
8429d022
MK
369 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
370 p2->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063
KM
371 /*
372 * Increment reference counts
373 */
374 for (size = 0; size < shminfo.shmseg; size++, shmd++)
375 if (shmd->shmd_uva)
376 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
6f843dc9
KM
377}
378
9db58063
KM
379shmexit(p)
380 struct proc *p;
6f843dc9 381{
9db58063
KM
382 register struct shmdesc *shmd;
383 register int i;
c9714ae3 384
8429d022 385 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
386 for (i = 0; i < shminfo.shmseg; i++, shmd++)
387 if (shmd->shmd_uva)
388 shmufree(p, shmd);
8429d022
MK
389 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
390 p->p_vmspace->vm_shm = NULL;
6f843dc9
KM
391}
392
393shmvalid(id)
394 register int id;
395{
396 register struct shmid_ds *shp;
397
398 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
161c14a2 399 return(EINVAL);
6f843dc9
KM
400 shp = &shmsegs[id % SHMMMNI];
401 if (shp->shm_perm.seq == (id / SHMMMNI) &&
402 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
161c14a2
KM
403 return(0);
404 return(EINVAL);
6f843dc9
KM
405}
406
407/*
408 * Free user resources associated with a shared memory segment
409 */
9db58063 410shmufree(p, shmd)
c9714ae3 411 struct proc *p;
9db58063 412 struct shmdesc *shmd;
6f843dc9
KM
413{
414 register struct shmid_ds *shp;
415
9db58063 416 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
aa6d6b7e 417 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
9db58063
KM
418 ctob(clrnd(btoc(shp->shm_segsz))));
419 shmd->shmd_id = 0;
420 shmd->shmd_uva = 0;
6f843dc9
KM
421 shp->shm_dtime = time.tv_sec;
422 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
423 shmfree(shp);
424}
425
426/*
427 * Deallocate resources associated with a shared memory segment
428 */
429shmfree(shp)
430 register struct shmid_ds *shp;
431{
6f843dc9
KM
432
433 if (shp->shm_handle == NULL)
434 panic("shmfree");
9db58063
KM
435 /*
436 * Lose our lingering object reference by deallocating space
437 * in kernel. Pager will also be deallocated as a side-effect.
438 */
439 vm_deallocate(shm_map,
440 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
9d81fbb9 441 ctob(clrnd(btoc(shp->shm_segsz))));
9db58063 442 free((caddr_t)shp->shm_handle, M_SHM);
6f843dc9
KM
443 shp->shm_handle = NULL;
444 shmtot -= clrnd(btoc(shp->shm_segsz));
445 shp->shm_perm.mode = 0;
446 /*
447 * Increment the sequence number to ensure that outstanding
448 * shmids for this segment will be invalid in the event that
449 * the segment is reallocated. Note that shmids must be
450 * positive as decreed by SVID.
451 */
452 shp->shm_perm.seq++;
453 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
454 shp->shm_perm.seq = 0;
455}
456
457/*
458 * XXX This routine would be common to all sysV style IPC
459 * (if the others were implemented).
460 */
161c14a2 461ipcaccess(ipc, mode, cred)
6f843dc9 462 register struct ipc_perm *ipc;
161c14a2
KM
463 int mode;
464 register struct ucred *cred;
6f843dc9
KM
465{
466 register int m;
467
161c14a2 468 if (cred->cr_uid == 0)
6f843dc9
KM
469 return(0);
470 /*
471 * Access check is based on only one of owner, group, public.
472 * If not owner, then check group.
473 * If not a member of the group, then check public access.
474 */
475 mode &= 0700;
476 m = ipc->mode;
161c14a2 477 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
6f843dc9 478 m <<= 3;
161c14a2
KM
479 if (!groupmember(ipc->gid, cred) &&
480 !groupmember(ipc->cgid, cred))
6f843dc9
KM
481 m <<= 3;
482 }
483 if ((mode&m) == mode)
161c14a2
KM
484 return (0);
485 return (EACCES);
6f843dc9 486}
6f843dc9 487#endif /* SYSVSHM */