ANSIfy syscall args; readv does not need ARGSUSED; no longer need
[unix-history] / usr / src / sys / kern / sysv_shm.c
CommitLineData
6f843dc9
KM
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * %sccs.include.redist.c%
11 *
3cf1b235 12 * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
6f843dc9 13 *
3cf1b235 14 * @(#)sysv_shm.c 7.18 (Berkeley) %G%
6f843dc9
KM
15 */
16
17/*
18 * System V shared memory routines.
e4f7cdf5
MK
19 * TEMPORARY, until mmap is in place;
20 * needed now for HP-UX compatibility and X server (yech!).
6f843dc9
KM
21 */
22
23#ifdef SYSVSHM
24
6f843dc9
KM
25#include "param.h"
26#include "systm.h"
6f843dc9
KM
27#include "kernel.h"
28#include "proc.h"
6f843dc9 29#include "shm.h"
6f843dc9 30#include "malloc.h"
9db58063 31#include "mman.h"
0a464d07
MK
32#include "vm/vm.h"
33#include "vm/vm_kern.h"
34#include "vm/vm_inherit.h"
35#include "vm/vm_pager.h"
6f843dc9 36
6f843dc9
KM
37int shmat(), shmctl(), shmdt(), shmget();
38int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
39int shmtot = 0;
40
9db58063
KM
41/*
42 * Per process internal structure for managing segments.
43 * Each process using shm will have an array of ``shmseg'' of these.
44 */
45struct shmdesc {
46 vm_offset_t shmd_uva;
47 int shmd_id;
48};
49
50/*
51 * Per segment internal structure (shm_handle).
52 */
53struct shmhandle {
54 vm_offset_t shmh_kva;
55 caddr_t shmh_id;
56};
57
58vm_map_t shm_map; /* address space for shared memory segments */
6f843dc9
KM
59
60shminit()
61{
62 register int i;
9db58063 63 vm_offset_t whocares1, whocares2;
6f843dc9 64
9db58063
KM
65 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
66 shminfo.shmall * NBPG, FALSE);
6f843dc9
KM
67 if (shminfo.shmmni > SHMMMNI)
68 shminfo.shmmni = SHMMMNI;
69 for (i = 0; i < shminfo.shmmni; i++) {
70 shmsegs[i].shm_perm.mode = 0;
71 shmsegs[i].shm_perm.seq = 0;
72 }
73}
74
161c14a2
KM
75/*
76 * Entry point for all SHM calls
77 */
78shmsys(p, uap, retval)
79 struct proc *p;
80 struct args {
e4f7cdf5 81 u_int which;
161c14a2
KM
82 } *uap;
83 int *retval;
84{
6f843dc9 85
161c14a2 86 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
d9c2f47f
MK
87 return (EINVAL);
88 return ((*shmcalls[uap->which])(p, &uap[1], retval));
6f843dc9
KM
89}
90
161c14a2
KM
91/*
92 * Get a shared memory segment
93 */
94shmget(p, uap, retval)
95 struct proc *p;
96 register struct args {
6f843dc9
KM
97 key_t key;
98 int size;
99 int shmflg;
161c14a2
KM
100 } *uap;
101 int *retval;
102{
6f843dc9 103 register struct shmid_ds *shp;
8429d022 104 register struct ucred *cred = p->p_ucred;
6f843dc9 105 register int i;
161c14a2 106 int error, size, rval = 0;
9db58063 107 register struct shmhandle *shmh;
6f843dc9
KM
108
109 /* look up the specified shm_id */
110 if (uap->key != IPC_PRIVATE) {
111 for (i = 0; i < shminfo.shmmni; i++)
112 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
113 shmsegs[i].shm_perm.key == uap->key) {
114 rval = i;
115 break;
116 }
117 } else
118 i = shminfo.shmmni;
119
120 /* create a new shared segment if necessary */
121 if (i == shminfo.shmmni) {
161c14a2
KM
122 if ((uap->shmflg & IPC_CREAT) == 0)
123 return (ENOENT);
124 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
125 return (EINVAL);
6f843dc9
KM
126 for (i = 0; i < shminfo.shmmni; i++)
127 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
128 rval = i;
129 break;
130 }
161c14a2
KM
131 if (i == shminfo.shmmni)
132 return (ENOSPC);
6f843dc9 133 size = clrnd(btoc(uap->size));
161c14a2
KM
134 if (shmtot + size > shminfo.shmall)
135 return (ENOMEM);
6f843dc9
KM
136 shp = &shmsegs[rval];
137 /*
138 * We need to do a couple of things to ensure consistency
139 * in case we sleep in malloc(). We mark segment as
140 * allocated so that other shmgets() will not allocate it.
141 * We mark it as "destroyed" to insure that shmvalid() is
142 * false making most operations fail (XXX). We set the key,
143 * so that other shmget()s will fail.
144 */
145 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
146 shp->shm_perm.key = uap->key;
9db58063
KM
147 shmh = (struct shmhandle *)
148 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
149 shmh->shmh_kva = 0;
150 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
151 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
152 VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
153 if (error) {
154 free((caddr_t)shmh, M_SHM);
6f843dc9 155 shp->shm_perm.mode = 0;
9db58063 156 return(ENOMEM);
6f843dc9 157 }
9db58063 158 shp->shm_handle = (void *) shmh;
6f843dc9 159 shmtot += size;
161c14a2
KM
160 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
161 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
6f843dc9 162 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
6f843dc9 163 shp->shm_segsz = uap->size;
c9714ae3 164 shp->shm_cpid = p->p_pid;
6f843dc9
KM
165 shp->shm_lpid = shp->shm_nattch = 0;
166 shp->shm_atime = shp->shm_dtime = 0;
167 shp->shm_ctime = time.tv_sec;
168 } else {
169 shp = &shmsegs[rval];
170 /* XXX: probably not the right thing to do */
161c14a2
KM
171 if (shp->shm_perm.mode & SHM_DEST)
172 return (EBUSY);
015c074c 173 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
161c14a2
KM
174 return (error);
175 if (uap->size && uap->size > shp->shm_segsz)
176 return (EINVAL);
177 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
178 return (EEXIST);
6f843dc9 179 }
161c14a2 180 *retval = shp->shm_perm.seq * SHMMMNI + rval;
015c074c 181 return (0);
6f843dc9
KM
182}
183
161c14a2
KM
184/*
185 * Shared memory control
186 */
187/* ARGSUSED */
188shmctl(p, uap, retval)
189 struct proc *p;
190 register struct args {
6f843dc9
KM
191 int shmid;
192 int cmd;
193 caddr_t buf;
161c14a2
KM
194 } *uap;
195 int *retval;
196{
6f843dc9 197 register struct shmid_ds *shp;
8429d022 198 register struct ucred *cred = p->p_ucred;
6f843dc9 199 struct shmid_ds sbuf;
161c14a2 200 int error;
6f843dc9 201
161c14a2
KM
202 if (error = shmvalid(uap->shmid))
203 return (error);
6f843dc9
KM
204 shp = &shmsegs[uap->shmid % SHMMMNI];
205 switch (uap->cmd) {
206 case IPC_STAT:
015c074c 207 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
161c14a2
KM
208 return (error);
209 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
6f843dc9
KM
210
211 case IPC_SET:
161c14a2
KM
212 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
213 cred->cr_uid != shp->shm_perm.cuid)
214 return (EPERM);
215 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
216 return (error);
217 shp->shm_perm.uid = sbuf.shm_perm.uid;
218 shp->shm_perm.gid = sbuf.shm_perm.gid;
219 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
220 | (sbuf.shm_perm.mode & 0777);
221 shp->shm_ctime = time.tv_sec;
6f843dc9
KM
222 break;
223
224 case IPC_RMID:
161c14a2
KM
225 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
226 cred->cr_uid != shp->shm_perm.cuid)
227 return (EPERM);
6f843dc9
KM
228 /* set ctime? */
229 shp->shm_perm.key = IPC_PRIVATE;
230 shp->shm_perm.mode |= SHM_DEST;
231 if (shp->shm_nattch <= 0)
232 shmfree(shp);
233 break;
234
6f843dc9 235 default:
161c14a2 236 return (EINVAL);
6f843dc9 237 }
161c14a2 238 return (0);
6f843dc9
KM
239}
240
161c14a2
KM
241/*
242 * Attach to shared memory segment.
243 */
244shmat(p, uap, retval)
245 struct proc *p;
246 register struct args {
6f843dc9
KM
247 int shmid;
248 caddr_t shmaddr;
249 int shmflg;
161c14a2
KM
250 } *uap;
251 int *retval;
252{
6f843dc9
KM
253 register struct shmid_ds *shp;
254 register int size;
6f843dc9 255 caddr_t uva;
9db58063
KM
256 int error;
257 int flags;
258 vm_prot_t prot;
259 struct shmdesc *shmd;
6f843dc9 260
9db58063
KM
261 /*
262 * Allocate descriptors now (before validity check)
263 * in case malloc() blocks.
264 */
8429d022 265 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
266 size = shminfo.shmseg * sizeof(struct shmdesc);
267 if (shmd == NULL) {
268 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
269 bzero((caddr_t)shmd, size);
8429d022 270 p->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063 271 }
161c14a2
KM
272 if (error = shmvalid(uap->shmid))
273 return (error);
6f843dc9
KM
274 shp = &shmsegs[uap->shmid % SHMMMNI];
275 if (shp->shm_handle == NULL)
96ea38ce 276 panic("shmat NULL handle");
015c074c 277 if (error = ipcaccess(&shp->shm_perm,
8429d022 278 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
161c14a2 279 return (error);
6f843dc9
KM
280 uva = uap->shmaddr;
281 if (uva && ((int)uva & (SHMLBA-1))) {
282 if (uap->shmflg & SHM_RND)
283 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
161c14a2
KM
284 else
285 return (EINVAL);
6f843dc9
KM
286 }
287 /*
288 * Make sure user doesn't use more than their fair share
289 */
9db58063
KM
290 for (size = 0; size < shminfo.shmseg; size++) {
291 if (shmd->shmd_uva == 0)
292 break;
293 shmd++;
294 }
161c14a2
KM
295 if (size >= shminfo.shmseg)
296 return (EMFILE);
6f843dc9 297 size = ctob(clrnd(btoc(shp->shm_segsz)));
9db58063
KM
298 prot = VM_PROT_READ;
299 if ((uap->shmflg & SHM_RDONLY) == 0)
300 prot |= VM_PROT_WRITE;
301 flags = MAP_ANON|MAP_SHARED;
302 if (uva)
303 flags |= MAP_FIXED;
304 else
305 uva = (caddr_t)0x1000000; /* XXX */
451df175
KM
306 error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
307 (vm_size_t)size, prot, flags,
308 ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
161c14a2 309 if (error)
9db58063
KM
310 return(error);
311 shmd->shmd_uva = (vm_offset_t)uva;
312 shmd->shmd_id = uap->shmid;
6f843dc9
KM
313 /*
314 * Fill in the remaining fields
315 */
c9714ae3 316 shp->shm_lpid = p->p_pid;
6f843dc9
KM
317 shp->shm_atime = time.tv_sec;
318 shp->shm_nattch++;
161c14a2 319 *retval = (int) uva;
015c074c 320 return (0);
6f843dc9
KM
321}
322
161c14a2
KM
323/*
324 * Detach from shared memory segment.
325 */
326/* ARGSUSED */
327shmdt(p, uap, retval)
328 struct proc *p;
329 struct args {
6f843dc9 330 caddr_t shmaddr;
161c14a2
KM
331 } *uap;
332 int *retval;
333{
9db58063
KM
334 register struct shmdesc *shmd;
335 register int i;
6f843dc9 336
8429d022 337 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
338 for (i = 0; i < shminfo.shmseg; i++, shmd++)
339 if (shmd->shmd_uva &&
340 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
6f843dc9 341 break;
9db58063
KM
342 if (i == shminfo.shmseg)
343 return(EINVAL);
344 shmufree(p, shmd);
345 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
6f843dc9
KM
346}
347
8429d022
MK
348shmfork(p1, p2, isvfork)
349 struct proc *p1, *p2;
9db58063 350 int isvfork;
6f843dc9 351{
9db58063
KM
352 register struct shmdesc *shmd;
353 register int size;
6f843dc9 354
9db58063
KM
355 /*
356 * Copy parents descriptive information
357 */
358 size = shminfo.shmseg * sizeof(struct shmdesc);
359 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
8429d022
MK
360 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
361 p2->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063
KM
362 /*
363 * Increment reference counts
364 */
365 for (size = 0; size < shminfo.shmseg; size++, shmd++)
366 if (shmd->shmd_uva)
367 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
6f843dc9
KM
368}
369
9db58063
KM
370shmexit(p)
371 struct proc *p;
6f843dc9 372{
9db58063
KM
373 register struct shmdesc *shmd;
374 register int i;
c9714ae3 375
8429d022 376 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
377 for (i = 0; i < shminfo.shmseg; i++, shmd++)
378 if (shmd->shmd_uva)
379 shmufree(p, shmd);
8429d022
MK
380 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
381 p->p_vmspace->vm_shm = NULL;
6f843dc9
KM
382}
383
384shmvalid(id)
385 register int id;
386{
387 register struct shmid_ds *shp;
388
389 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
161c14a2 390 return(EINVAL);
6f843dc9
KM
391 shp = &shmsegs[id % SHMMMNI];
392 if (shp->shm_perm.seq == (id / SHMMMNI) &&
393 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
161c14a2
KM
394 return(0);
395 return(EINVAL);
6f843dc9
KM
396}
397
398/*
399 * Free user resources associated with a shared memory segment
400 */
9db58063 401shmufree(p, shmd)
c9714ae3 402 struct proc *p;
9db58063 403 struct shmdesc *shmd;
6f843dc9
KM
404{
405 register struct shmid_ds *shp;
406
9db58063 407 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
aa6d6b7e 408 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
9db58063
KM
409 ctob(clrnd(btoc(shp->shm_segsz))));
410 shmd->shmd_id = 0;
411 shmd->shmd_uva = 0;
6f843dc9
KM
412 shp->shm_dtime = time.tv_sec;
413 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
414 shmfree(shp);
415}
416
417/*
418 * Deallocate resources associated with a shared memory segment
419 */
420shmfree(shp)
421 register struct shmid_ds *shp;
422{
6f843dc9
KM
423
424 if (shp->shm_handle == NULL)
425 panic("shmfree");
9db58063
KM
426 /*
427 * Lose our lingering object reference by deallocating space
428 * in kernel. Pager will also be deallocated as a side-effect.
429 */
430 vm_deallocate(shm_map,
431 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
9d81fbb9 432 ctob(clrnd(btoc(shp->shm_segsz))));
9db58063 433 free((caddr_t)shp->shm_handle, M_SHM);
6f843dc9
KM
434 shp->shm_handle = NULL;
435 shmtot -= clrnd(btoc(shp->shm_segsz));
436 shp->shm_perm.mode = 0;
437 /*
438 * Increment the sequence number to ensure that outstanding
439 * shmids for this segment will be invalid in the event that
440 * the segment is reallocated. Note that shmids must be
441 * positive as decreed by SVID.
442 */
443 shp->shm_perm.seq++;
444 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
445 shp->shm_perm.seq = 0;
446}
447
448/*
449 * XXX This routine would be common to all sysV style IPC
450 * (if the others were implemented).
451 */
161c14a2 452ipcaccess(ipc, mode, cred)
6f843dc9 453 register struct ipc_perm *ipc;
161c14a2
KM
454 int mode;
455 register struct ucred *cred;
6f843dc9
KM
456{
457 register int m;
458
161c14a2 459 if (cred->cr_uid == 0)
6f843dc9
KM
460 return(0);
461 /*
462 * Access check is based on only one of owner, group, public.
463 * If not owner, then check group.
464 * If not a member of the group, then check public access.
465 */
466 mode &= 0700;
467 m = ipc->mode;
161c14a2 468 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
6f843dc9 469 m <<= 3;
161c14a2
KM
470 if (!groupmember(ipc->gid, cred) &&
471 !groupmember(ipc->cgid, cred))
6f843dc9
KM
472 m <<= 3;
473 }
474 if ((mode&m) == mode)
161c14a2
KM
475 return (0);
476 return (EACCES);
6f843dc9 477}
6f843dc9 478#endif /* SYSVSHM */