minor wording updates
[unix-history] / usr / src / sys / kern / sysv_shm.c
CommitLineData
6f843dc9
KM
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * %sccs.include.redist.c%
11 *
3cf1b235 12 * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
6f843dc9 13 *
afc12c69 14 * @(#)sysv_shm.c 7.19 (Berkeley) %G%
6f843dc9
KM
15 */
16
17/*
18 * System V shared memory routines.
e4f7cdf5
MK
19 * TEMPORARY, until mmap is in place;
20 * needed now for HP-UX compatibility and X server (yech!).
6f843dc9
KM
21 */
22
23#ifdef SYSVSHM
24
6f843dc9
KM
25#include "param.h"
26#include "systm.h"
6f843dc9
KM
27#include "kernel.h"
28#include "proc.h"
6f843dc9 29#include "shm.h"
6f843dc9 30#include "malloc.h"
9db58063 31#include "mman.h"
0a464d07
MK
32#include "vm/vm.h"
33#include "vm/vm_kern.h"
34#include "vm/vm_inherit.h"
35#include "vm/vm_pager.h"
6f843dc9 36
6f843dc9
KM
37int shmat(), shmctl(), shmdt(), shmget();
38int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
39int shmtot = 0;
40
9db58063
KM
41/*
42 * Per process internal structure for managing segments.
43 * Each process using shm will have an array of ``shmseg'' of these.
44 */
45struct shmdesc {
46 vm_offset_t shmd_uva;
47 int shmd_id;
48};
49
50/*
51 * Per segment internal structure (shm_handle).
52 */
53struct shmhandle {
54 vm_offset_t shmh_kva;
55 caddr_t shmh_id;
56};
57
58vm_map_t shm_map; /* address space for shared memory segments */
6f843dc9
KM
59
60shminit()
61{
62 register int i;
9db58063 63 vm_offset_t whocares1, whocares2;
6f843dc9 64
9db58063
KM
65 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
66 shminfo.shmall * NBPG, FALSE);
6f843dc9
KM
67 if (shminfo.shmmni > SHMMMNI)
68 shminfo.shmmni = SHMMMNI;
69 for (i = 0; i < shminfo.shmmni; i++) {
70 shmsegs[i].shm_perm.mode = 0;
71 shmsegs[i].shm_perm.seq = 0;
72 }
73}
74
161c14a2
KM
75/*
76 * Entry point for all SHM calls
77 */
afc12c69
CT
78struct shmsys_args {
79 u_int which;
80};
161c14a2
KM
81shmsys(p, uap, retval)
82 struct proc *p;
afc12c69 83 struct shmsys_args *uap;
161c14a2
KM
84 int *retval;
85{
6f843dc9 86
161c14a2 87 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
d9c2f47f
MK
88 return (EINVAL);
89 return ((*shmcalls[uap->which])(p, &uap[1], retval));
6f843dc9
KM
90}
91
161c14a2
KM
92/*
93 * Get a shared memory segment
94 */
afc12c69
CT
95struct shmget_args {
96 key_t key;
97 int size;
98 int shmflg;
99};
161c14a2
KM
100shmget(p, uap, retval)
101 struct proc *p;
afc12c69 102 register struct shmget_args *uap;
161c14a2
KM
103 int *retval;
104{
6f843dc9 105 register struct shmid_ds *shp;
8429d022 106 register struct ucred *cred = p->p_ucred;
6f843dc9 107 register int i;
161c14a2 108 int error, size, rval = 0;
9db58063 109 register struct shmhandle *shmh;
6f843dc9
KM
110
111 /* look up the specified shm_id */
112 if (uap->key != IPC_PRIVATE) {
113 for (i = 0; i < shminfo.shmmni; i++)
114 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
115 shmsegs[i].shm_perm.key == uap->key) {
116 rval = i;
117 break;
118 }
119 } else
120 i = shminfo.shmmni;
121
122 /* create a new shared segment if necessary */
123 if (i == shminfo.shmmni) {
161c14a2
KM
124 if ((uap->shmflg & IPC_CREAT) == 0)
125 return (ENOENT);
126 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
127 return (EINVAL);
6f843dc9
KM
128 for (i = 0; i < shminfo.shmmni; i++)
129 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
130 rval = i;
131 break;
132 }
161c14a2
KM
133 if (i == shminfo.shmmni)
134 return (ENOSPC);
6f843dc9 135 size = clrnd(btoc(uap->size));
161c14a2
KM
136 if (shmtot + size > shminfo.shmall)
137 return (ENOMEM);
6f843dc9
KM
138 shp = &shmsegs[rval];
139 /*
140 * We need to do a couple of things to ensure consistency
141 * in case we sleep in malloc(). We mark segment as
142 * allocated so that other shmgets() will not allocate it.
143 * We mark it as "destroyed" to insure that shmvalid() is
144 * false making most operations fail (XXX). We set the key,
145 * so that other shmget()s will fail.
146 */
147 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
148 shp->shm_perm.key = uap->key;
9db58063
KM
149 shmh = (struct shmhandle *)
150 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
151 shmh->shmh_kva = 0;
152 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
153 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
154 VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
155 if (error) {
156 free((caddr_t)shmh, M_SHM);
6f843dc9 157 shp->shm_perm.mode = 0;
9db58063 158 return(ENOMEM);
6f843dc9 159 }
9db58063 160 shp->shm_handle = (void *) shmh;
6f843dc9 161 shmtot += size;
161c14a2
KM
162 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
163 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
6f843dc9 164 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
6f843dc9 165 shp->shm_segsz = uap->size;
c9714ae3 166 shp->shm_cpid = p->p_pid;
6f843dc9
KM
167 shp->shm_lpid = shp->shm_nattch = 0;
168 shp->shm_atime = shp->shm_dtime = 0;
169 shp->shm_ctime = time.tv_sec;
170 } else {
171 shp = &shmsegs[rval];
172 /* XXX: probably not the right thing to do */
161c14a2
KM
173 if (shp->shm_perm.mode & SHM_DEST)
174 return (EBUSY);
015c074c 175 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
161c14a2
KM
176 return (error);
177 if (uap->size && uap->size > shp->shm_segsz)
178 return (EINVAL);
179 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
180 return (EEXIST);
6f843dc9 181 }
161c14a2 182 *retval = shp->shm_perm.seq * SHMMMNI + rval;
015c074c 183 return (0);
6f843dc9
KM
184}
185
161c14a2
KM
186/*
187 * Shared memory control
188 */
afc12c69
CT
189struct shmctl_args {
190 int shmid;
191 int cmd;
192 caddr_t buf;
193};
161c14a2
KM
194/* ARGSUSED */
195shmctl(p, uap, retval)
196 struct proc *p;
afc12c69 197 register struct shmctl_args *uap;
161c14a2
KM
198 int *retval;
199{
6f843dc9 200 register struct shmid_ds *shp;
8429d022 201 register struct ucred *cred = p->p_ucred;
6f843dc9 202 struct shmid_ds sbuf;
161c14a2 203 int error;
6f843dc9 204
161c14a2
KM
205 if (error = shmvalid(uap->shmid))
206 return (error);
6f843dc9
KM
207 shp = &shmsegs[uap->shmid % SHMMMNI];
208 switch (uap->cmd) {
209 case IPC_STAT:
015c074c 210 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
161c14a2
KM
211 return (error);
212 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
6f843dc9
KM
213
214 case IPC_SET:
161c14a2
KM
215 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
216 cred->cr_uid != shp->shm_perm.cuid)
217 return (EPERM);
218 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
219 return (error);
220 shp->shm_perm.uid = sbuf.shm_perm.uid;
221 shp->shm_perm.gid = sbuf.shm_perm.gid;
222 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
223 | (sbuf.shm_perm.mode & 0777);
224 shp->shm_ctime = time.tv_sec;
6f843dc9
KM
225 break;
226
227 case IPC_RMID:
161c14a2
KM
228 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
229 cred->cr_uid != shp->shm_perm.cuid)
230 return (EPERM);
6f843dc9
KM
231 /* set ctime? */
232 shp->shm_perm.key = IPC_PRIVATE;
233 shp->shm_perm.mode |= SHM_DEST;
234 if (shp->shm_nattch <= 0)
235 shmfree(shp);
236 break;
237
6f843dc9 238 default:
161c14a2 239 return (EINVAL);
6f843dc9 240 }
161c14a2 241 return (0);
6f843dc9
KM
242}
243
161c14a2
KM
244/*
245 * Attach to shared memory segment.
246 */
afc12c69
CT
247struct shmat_args {
248 int shmid;
249 caddr_t shmaddr;
250 int shmflg;
251};
161c14a2
KM
252shmat(p, uap, retval)
253 struct proc *p;
afc12c69 254 register struct shmat_args *uap;
161c14a2
KM
255 int *retval;
256{
6f843dc9
KM
257 register struct shmid_ds *shp;
258 register int size;
6f843dc9 259 caddr_t uva;
9db58063
KM
260 int error;
261 int flags;
262 vm_prot_t prot;
263 struct shmdesc *shmd;
6f843dc9 264
9db58063
KM
265 /*
266 * Allocate descriptors now (before validity check)
267 * in case malloc() blocks.
268 */
8429d022 269 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
270 size = shminfo.shmseg * sizeof(struct shmdesc);
271 if (shmd == NULL) {
272 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
273 bzero((caddr_t)shmd, size);
8429d022 274 p->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063 275 }
161c14a2
KM
276 if (error = shmvalid(uap->shmid))
277 return (error);
6f843dc9
KM
278 shp = &shmsegs[uap->shmid % SHMMMNI];
279 if (shp->shm_handle == NULL)
96ea38ce 280 panic("shmat NULL handle");
015c074c 281 if (error = ipcaccess(&shp->shm_perm,
8429d022 282 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
161c14a2 283 return (error);
6f843dc9
KM
284 uva = uap->shmaddr;
285 if (uva && ((int)uva & (SHMLBA-1))) {
286 if (uap->shmflg & SHM_RND)
287 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
161c14a2
KM
288 else
289 return (EINVAL);
6f843dc9
KM
290 }
291 /*
292 * Make sure user doesn't use more than their fair share
293 */
9db58063
KM
294 for (size = 0; size < shminfo.shmseg; size++) {
295 if (shmd->shmd_uva == 0)
296 break;
297 shmd++;
298 }
161c14a2
KM
299 if (size >= shminfo.shmseg)
300 return (EMFILE);
6f843dc9 301 size = ctob(clrnd(btoc(shp->shm_segsz)));
9db58063
KM
302 prot = VM_PROT_READ;
303 if ((uap->shmflg & SHM_RDONLY) == 0)
304 prot |= VM_PROT_WRITE;
305 flags = MAP_ANON|MAP_SHARED;
306 if (uva)
307 flags |= MAP_FIXED;
308 else
309 uva = (caddr_t)0x1000000; /* XXX */
451df175
KM
310 error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
311 (vm_size_t)size, prot, flags,
312 ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
161c14a2 313 if (error)
9db58063
KM
314 return(error);
315 shmd->shmd_uva = (vm_offset_t)uva;
316 shmd->shmd_id = uap->shmid;
6f843dc9
KM
317 /*
318 * Fill in the remaining fields
319 */
c9714ae3 320 shp->shm_lpid = p->p_pid;
6f843dc9
KM
321 shp->shm_atime = time.tv_sec;
322 shp->shm_nattch++;
161c14a2 323 *retval = (int) uva;
015c074c 324 return (0);
6f843dc9
KM
325}
326
161c14a2
KM
327/*
328 * Detach from shared memory segment.
329 */
afc12c69
CT
330struct shmdt_args {
331 caddr_t shmaddr;
332};
161c14a2
KM
333/* ARGSUSED */
334shmdt(p, uap, retval)
335 struct proc *p;
afc12c69 336 struct shmdt_args *uap;
161c14a2
KM
337 int *retval;
338{
9db58063
KM
339 register struct shmdesc *shmd;
340 register int i;
6f843dc9 341
8429d022 342 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
343 for (i = 0; i < shminfo.shmseg; i++, shmd++)
344 if (shmd->shmd_uva &&
345 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
6f843dc9 346 break;
9db58063
KM
347 if (i == shminfo.shmseg)
348 return(EINVAL);
349 shmufree(p, shmd);
350 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
6f843dc9
KM
351}
352
8429d022
MK
353shmfork(p1, p2, isvfork)
354 struct proc *p1, *p2;
9db58063 355 int isvfork;
6f843dc9 356{
9db58063
KM
357 register struct shmdesc *shmd;
358 register int size;
6f843dc9 359
9db58063
KM
360 /*
361 * Copy parents descriptive information
362 */
363 size = shminfo.shmseg * sizeof(struct shmdesc);
364 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
8429d022
MK
365 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
366 p2->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063
KM
367 /*
368 * Increment reference counts
369 */
370 for (size = 0; size < shminfo.shmseg; size++, shmd++)
371 if (shmd->shmd_uva)
372 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
6f843dc9
KM
373}
374
9db58063
KM
375shmexit(p)
376 struct proc *p;
6f843dc9 377{
9db58063
KM
378 register struct shmdesc *shmd;
379 register int i;
c9714ae3 380
8429d022 381 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
382 for (i = 0; i < shminfo.shmseg; i++, shmd++)
383 if (shmd->shmd_uva)
384 shmufree(p, shmd);
8429d022
MK
385 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
386 p->p_vmspace->vm_shm = NULL;
6f843dc9
KM
387}
388
389shmvalid(id)
390 register int id;
391{
392 register struct shmid_ds *shp;
393
394 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
161c14a2 395 return(EINVAL);
6f843dc9
KM
396 shp = &shmsegs[id % SHMMMNI];
397 if (shp->shm_perm.seq == (id / SHMMMNI) &&
398 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
161c14a2
KM
399 return(0);
400 return(EINVAL);
6f843dc9
KM
401}
402
403/*
404 * Free user resources associated with a shared memory segment
405 */
9db58063 406shmufree(p, shmd)
c9714ae3 407 struct proc *p;
9db58063 408 struct shmdesc *shmd;
6f843dc9
KM
409{
410 register struct shmid_ds *shp;
411
9db58063 412 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
aa6d6b7e 413 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
9db58063
KM
414 ctob(clrnd(btoc(shp->shm_segsz))));
415 shmd->shmd_id = 0;
416 shmd->shmd_uva = 0;
6f843dc9
KM
417 shp->shm_dtime = time.tv_sec;
418 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
419 shmfree(shp);
420}
421
422/*
423 * Deallocate resources associated with a shared memory segment
424 */
425shmfree(shp)
426 register struct shmid_ds *shp;
427{
6f843dc9
KM
428
429 if (shp->shm_handle == NULL)
430 panic("shmfree");
9db58063
KM
431 /*
432 * Lose our lingering object reference by deallocating space
433 * in kernel. Pager will also be deallocated as a side-effect.
434 */
435 vm_deallocate(shm_map,
436 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
9d81fbb9 437 ctob(clrnd(btoc(shp->shm_segsz))));
9db58063 438 free((caddr_t)shp->shm_handle, M_SHM);
6f843dc9
KM
439 shp->shm_handle = NULL;
440 shmtot -= clrnd(btoc(shp->shm_segsz));
441 shp->shm_perm.mode = 0;
442 /*
443 * Increment the sequence number to ensure that outstanding
444 * shmids for this segment will be invalid in the event that
445 * the segment is reallocated. Note that shmids must be
446 * positive as decreed by SVID.
447 */
448 shp->shm_perm.seq++;
449 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
450 shp->shm_perm.seq = 0;
451}
452
453/*
454 * XXX This routine would be common to all sysV style IPC
455 * (if the others were implemented).
456 */
161c14a2 457ipcaccess(ipc, mode, cred)
6f843dc9 458 register struct ipc_perm *ipc;
161c14a2
KM
459 int mode;
460 register struct ucred *cred;
6f843dc9
KM
461{
462 register int m;
463
161c14a2 464 if (cred->cr_uid == 0)
6f843dc9
KM
465 return(0);
466 /*
467 * Access check is based on only one of owner, group, public.
468 * If not owner, then check group.
469 * If not a member of the group, then check public access.
470 */
471 mode &= 0700;
472 m = ipc->mode;
161c14a2 473 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
6f843dc9 474 m <<= 3;
161c14a2
KM
475 if (!groupmember(ipc->gid, cred) &&
476 !groupmember(ipc->cgid, cred))
6f843dc9
KM
477 m <<= 3;
478 }
479 if ((mode&m) == mode)
161c14a2
KM
480 return (0);
481 return (EACCES);
6f843dc9 482}
6f843dc9 483#endif /* SYSVSHM */