delete osetreuid as it is simulated in libc/compat-43;
[unix-history] / usr / src / sys / kern / sysv_shm.c
CommitLineData
6f843dc9
KM
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * %sccs.include.redist.c%
11 *
12 * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
13 *
294d579e 14 * @(#)sysv_shm.c 7.17 (Berkeley) %G%
6f843dc9
KM
15 */
16
17/*
18 * System V shared memory routines.
e4f7cdf5
MK
19 * TEMPORARY, until mmap is in place;
20 * needed now for HP-UX compatibility and X server (yech!).
6f843dc9
KM
21 */
22
23#ifdef SYSVSHM
24
6f843dc9
KM
25#include "param.h"
26#include "systm.h"
6f843dc9
KM
27#include "kernel.h"
28#include "proc.h"
6f843dc9 29#include "shm.h"
6f843dc9 30#include "malloc.h"
9db58063 31#include "mman.h"
0a464d07
MK
32#include "vm/vm.h"
33#include "vm/vm_kern.h"
34#include "vm/vm_inherit.h"
35#include "vm/vm_pager.h"
6f843dc9
KM
36
37#ifdef HPUXCOMPAT
294d579e 38#include "hp/hpux/hpux.h"
6f843dc9
KM
39#endif
40
41int shmat(), shmctl(), shmdt(), shmget();
42int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
43int shmtot = 0;
44
9db58063
KM
45/*
46 * Per process internal structure for managing segments.
47 * Each process using shm will have an array of ``shmseg'' of these.
48 */
49struct shmdesc {
50 vm_offset_t shmd_uva;
51 int shmd_id;
52};
53
54/*
55 * Per segment internal structure (shm_handle).
56 */
57struct shmhandle {
58 vm_offset_t shmh_kva;
59 caddr_t shmh_id;
60};
61
62vm_map_t shm_map; /* address space for shared memory segments */
6f843dc9
KM
63
64shminit()
65{
66 register int i;
9db58063 67 vm_offset_t whocares1, whocares2;
6f843dc9 68
9db58063
KM
69 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
70 shminfo.shmall * NBPG, FALSE);
6f843dc9
KM
71 if (shminfo.shmmni > SHMMMNI)
72 shminfo.shmmni = SHMMMNI;
73 for (i = 0; i < shminfo.shmmni; i++) {
74 shmsegs[i].shm_perm.mode = 0;
75 shmsegs[i].shm_perm.seq = 0;
76 }
77}
78
161c14a2
KM
79/*
80 * Entry point for all SHM calls
81 */
82shmsys(p, uap, retval)
83 struct proc *p;
84 struct args {
e4f7cdf5 85 u_int which;
161c14a2
KM
86 } *uap;
87 int *retval;
88{
6f843dc9 89
161c14a2 90 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
d9c2f47f
MK
91 return (EINVAL);
92 return ((*shmcalls[uap->which])(p, &uap[1], retval));
6f843dc9
KM
93}
94
161c14a2
KM
95/*
96 * Get a shared memory segment
97 */
98shmget(p, uap, retval)
99 struct proc *p;
100 register struct args {
6f843dc9
KM
101 key_t key;
102 int size;
103 int shmflg;
161c14a2
KM
104 } *uap;
105 int *retval;
106{
6f843dc9 107 register struct shmid_ds *shp;
8429d022 108 register struct ucred *cred = p->p_ucred;
6f843dc9 109 register int i;
161c14a2 110 int error, size, rval = 0;
9db58063 111 register struct shmhandle *shmh;
6f843dc9
KM
112
113 /* look up the specified shm_id */
114 if (uap->key != IPC_PRIVATE) {
115 for (i = 0; i < shminfo.shmmni; i++)
116 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
117 shmsegs[i].shm_perm.key == uap->key) {
118 rval = i;
119 break;
120 }
121 } else
122 i = shminfo.shmmni;
123
124 /* create a new shared segment if necessary */
125 if (i == shminfo.shmmni) {
161c14a2
KM
126 if ((uap->shmflg & IPC_CREAT) == 0)
127 return (ENOENT);
128 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
129 return (EINVAL);
6f843dc9
KM
130 for (i = 0; i < shminfo.shmmni; i++)
131 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
132 rval = i;
133 break;
134 }
161c14a2
KM
135 if (i == shminfo.shmmni)
136 return (ENOSPC);
6f843dc9 137 size = clrnd(btoc(uap->size));
161c14a2
KM
138 if (shmtot + size > shminfo.shmall)
139 return (ENOMEM);
6f843dc9
KM
140 shp = &shmsegs[rval];
141 /*
142 * We need to do a couple of things to ensure consistency
143 * in case we sleep in malloc(). We mark segment as
144 * allocated so that other shmgets() will not allocate it.
145 * We mark it as "destroyed" to insure that shmvalid() is
146 * false making most operations fail (XXX). We set the key,
147 * so that other shmget()s will fail.
148 */
149 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
150 shp->shm_perm.key = uap->key;
9db58063
KM
151 shmh = (struct shmhandle *)
152 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
153 shmh->shmh_kva = 0;
154 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
155 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
156 VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
157 if (error) {
158 free((caddr_t)shmh, M_SHM);
6f843dc9 159 shp->shm_perm.mode = 0;
9db58063 160 return(ENOMEM);
6f843dc9 161 }
9db58063 162 shp->shm_handle = (void *) shmh;
6f843dc9 163 shmtot += size;
161c14a2
KM
164 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
165 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
6f843dc9 166 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
6f843dc9 167 shp->shm_segsz = uap->size;
c9714ae3 168 shp->shm_cpid = p->p_pid;
6f843dc9
KM
169 shp->shm_lpid = shp->shm_nattch = 0;
170 shp->shm_atime = shp->shm_dtime = 0;
171 shp->shm_ctime = time.tv_sec;
172 } else {
173 shp = &shmsegs[rval];
174 /* XXX: probably not the right thing to do */
161c14a2
KM
175 if (shp->shm_perm.mode & SHM_DEST)
176 return (EBUSY);
015c074c 177 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
161c14a2
KM
178 return (error);
179 if (uap->size && uap->size > shp->shm_segsz)
180 return (EINVAL);
181 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
182 return (EEXIST);
6f843dc9 183 }
161c14a2 184 *retval = shp->shm_perm.seq * SHMMMNI + rval;
015c074c 185 return (0);
6f843dc9
KM
186}
187
161c14a2
KM
188/*
189 * Shared memory control
190 */
191/* ARGSUSED */
192shmctl(p, uap, retval)
193 struct proc *p;
194 register struct args {
6f843dc9
KM
195 int shmid;
196 int cmd;
197 caddr_t buf;
161c14a2
KM
198 } *uap;
199 int *retval;
200{
6f843dc9 201 register struct shmid_ds *shp;
8429d022 202 register struct ucred *cred = p->p_ucred;
6f843dc9 203 struct shmid_ds sbuf;
161c14a2 204 int error;
6f843dc9 205
161c14a2
KM
206 if (error = shmvalid(uap->shmid))
207 return (error);
6f843dc9
KM
208 shp = &shmsegs[uap->shmid % SHMMMNI];
209 switch (uap->cmd) {
210 case IPC_STAT:
015c074c 211 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
161c14a2
KM
212 return (error);
213 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
6f843dc9
KM
214
215 case IPC_SET:
161c14a2
KM
216 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
217 cred->cr_uid != shp->shm_perm.cuid)
218 return (EPERM);
219 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
220 return (error);
221 shp->shm_perm.uid = sbuf.shm_perm.uid;
222 shp->shm_perm.gid = sbuf.shm_perm.gid;
223 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
224 | (sbuf.shm_perm.mode & 0777);
225 shp->shm_ctime = time.tv_sec;
6f843dc9
KM
226 break;
227
228 case IPC_RMID:
161c14a2
KM
229 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
230 cred->cr_uid != shp->shm_perm.cuid)
231 return (EPERM);
6f843dc9
KM
232 /* set ctime? */
233 shp->shm_perm.key = IPC_PRIVATE;
234 shp->shm_perm.mode |= SHM_DEST;
235 if (shp->shm_nattch <= 0)
236 shmfree(shp);
237 break;
238
239#ifdef HPUXCOMPAT
240 case SHM_LOCK:
241 case SHM_UNLOCK:
242 /* don't really do anything, but make them think we did */
c9714ae3 243 if ((p->p_flag & SHPUX) == 0)
161c14a2
KM
244 return (EINVAL);
245 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
246 cred->cr_uid != shp->shm_perm.cuid)
247 return (EPERM);
6f843dc9
KM
248 break;
249#endif
250
251 default:
161c14a2 252 return (EINVAL);
6f843dc9 253 }
161c14a2 254 return (0);
6f843dc9
KM
255}
256
161c14a2
KM
257/*
258 * Attach to shared memory segment.
259 */
260shmat(p, uap, retval)
261 struct proc *p;
262 register struct args {
6f843dc9
KM
263 int shmid;
264 caddr_t shmaddr;
265 int shmflg;
161c14a2
KM
266 } *uap;
267 int *retval;
268{
6f843dc9
KM
269 register struct shmid_ds *shp;
270 register int size;
6f843dc9 271 caddr_t uva;
9db58063
KM
272 int error;
273 int flags;
274 vm_prot_t prot;
275 struct shmdesc *shmd;
6f843dc9 276
9db58063
KM
277 /*
278 * Allocate descriptors now (before validity check)
279 * in case malloc() blocks.
280 */
8429d022 281 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
282 size = shminfo.shmseg * sizeof(struct shmdesc);
283 if (shmd == NULL) {
284 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
285 bzero((caddr_t)shmd, size);
8429d022 286 p->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063 287 }
161c14a2
KM
288 if (error = shmvalid(uap->shmid))
289 return (error);
6f843dc9
KM
290 shp = &shmsegs[uap->shmid % SHMMMNI];
291 if (shp->shm_handle == NULL)
96ea38ce 292 panic("shmat NULL handle");
015c074c 293 if (error = ipcaccess(&shp->shm_perm,
8429d022 294 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
161c14a2 295 return (error);
6f843dc9
KM
296 uva = uap->shmaddr;
297 if (uva && ((int)uva & (SHMLBA-1))) {
298 if (uap->shmflg & SHM_RND)
299 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
161c14a2
KM
300 else
301 return (EINVAL);
6f843dc9
KM
302 }
303 /*
304 * Make sure user doesn't use more than their fair share
305 */
9db58063
KM
306 for (size = 0; size < shminfo.shmseg; size++) {
307 if (shmd->shmd_uva == 0)
308 break;
309 shmd++;
310 }
161c14a2
KM
311 if (size >= shminfo.shmseg)
312 return (EMFILE);
6f843dc9 313 size = ctob(clrnd(btoc(shp->shm_segsz)));
9db58063
KM
314 prot = VM_PROT_READ;
315 if ((uap->shmflg & SHM_RDONLY) == 0)
316 prot |= VM_PROT_WRITE;
317 flags = MAP_ANON|MAP_SHARED;
318 if (uva)
319 flags |= MAP_FIXED;
320 else
321 uva = (caddr_t)0x1000000; /* XXX */
451df175
KM
322 error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
323 (vm_size_t)size, prot, flags,
324 ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
161c14a2 325 if (error)
9db58063
KM
326 return(error);
327 shmd->shmd_uva = (vm_offset_t)uva;
328 shmd->shmd_id = uap->shmid;
6f843dc9
KM
329 /*
330 * Fill in the remaining fields
331 */
c9714ae3 332 shp->shm_lpid = p->p_pid;
6f843dc9
KM
333 shp->shm_atime = time.tv_sec;
334 shp->shm_nattch++;
161c14a2 335 *retval = (int) uva;
015c074c 336 return (0);
6f843dc9
KM
337}
338
161c14a2
KM
339/*
340 * Detach from shared memory segment.
341 */
342/* ARGSUSED */
343shmdt(p, uap, retval)
344 struct proc *p;
345 struct args {
6f843dc9 346 caddr_t shmaddr;
161c14a2
KM
347 } *uap;
348 int *retval;
349{
9db58063
KM
350 register struct shmdesc *shmd;
351 register int i;
6f843dc9 352
8429d022 353 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
354 for (i = 0; i < shminfo.shmseg; i++, shmd++)
355 if (shmd->shmd_uva &&
356 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
6f843dc9 357 break;
9db58063
KM
358 if (i == shminfo.shmseg)
359 return(EINVAL);
360 shmufree(p, shmd);
361 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
6f843dc9
KM
362}
363
8429d022
MK
364shmfork(p1, p2, isvfork)
365 struct proc *p1, *p2;
9db58063 366 int isvfork;
6f843dc9 367{
9db58063
KM
368 register struct shmdesc *shmd;
369 register int size;
6f843dc9 370
9db58063
KM
371 /*
372 * Copy parents descriptive information
373 */
374 size = shminfo.shmseg * sizeof(struct shmdesc);
375 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
8429d022
MK
376 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
377 p2->p_vmspace->vm_shm = (caddr_t)shmd;
9db58063
KM
378 /*
379 * Increment reference counts
380 */
381 for (size = 0; size < shminfo.shmseg; size++, shmd++)
382 if (shmd->shmd_uva)
383 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
6f843dc9
KM
384}
385
9db58063
KM
386shmexit(p)
387 struct proc *p;
6f843dc9 388{
9db58063
KM
389 register struct shmdesc *shmd;
390 register int i;
c9714ae3 391
8429d022 392 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
9db58063
KM
393 for (i = 0; i < shminfo.shmseg; i++, shmd++)
394 if (shmd->shmd_uva)
395 shmufree(p, shmd);
8429d022
MK
396 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
397 p->p_vmspace->vm_shm = NULL;
6f843dc9
KM
398}
399
400shmvalid(id)
401 register int id;
402{
403 register struct shmid_ds *shp;
404
405 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
161c14a2 406 return(EINVAL);
6f843dc9
KM
407 shp = &shmsegs[id % SHMMMNI];
408 if (shp->shm_perm.seq == (id / SHMMMNI) &&
409 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
161c14a2
KM
410 return(0);
411 return(EINVAL);
6f843dc9
KM
412}
413
414/*
415 * Free user resources associated with a shared memory segment
416 */
9db58063 417shmufree(p, shmd)
c9714ae3 418 struct proc *p;
9db58063 419 struct shmdesc *shmd;
6f843dc9
KM
420{
421 register struct shmid_ds *shp;
422
9db58063 423 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
aa6d6b7e 424 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
9db58063
KM
425 ctob(clrnd(btoc(shp->shm_segsz))));
426 shmd->shmd_id = 0;
427 shmd->shmd_uva = 0;
6f843dc9
KM
428 shp->shm_dtime = time.tv_sec;
429 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
430 shmfree(shp);
431}
432
433/*
434 * Deallocate resources associated with a shared memory segment
435 */
436shmfree(shp)
437 register struct shmid_ds *shp;
438{
6f843dc9
KM
439
440 if (shp->shm_handle == NULL)
441 panic("shmfree");
9db58063
KM
442 /*
443 * Lose our lingering object reference by deallocating space
444 * in kernel. Pager will also be deallocated as a side-effect.
445 */
446 vm_deallocate(shm_map,
447 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
9d81fbb9 448 ctob(clrnd(btoc(shp->shm_segsz))));
9db58063 449 free((caddr_t)shp->shm_handle, M_SHM);
6f843dc9
KM
450 shp->shm_handle = NULL;
451 shmtot -= clrnd(btoc(shp->shm_segsz));
452 shp->shm_perm.mode = 0;
453 /*
454 * Increment the sequence number to ensure that outstanding
455 * shmids for this segment will be invalid in the event that
456 * the segment is reallocated. Note that shmids must be
457 * positive as decreed by SVID.
458 */
459 shp->shm_perm.seq++;
460 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
461 shp->shm_perm.seq = 0;
462}
463
464/*
465 * XXX This routine would be common to all sysV style IPC
466 * (if the others were implemented).
467 */
161c14a2 468ipcaccess(ipc, mode, cred)
6f843dc9 469 register struct ipc_perm *ipc;
161c14a2
KM
470 int mode;
471 register struct ucred *cred;
6f843dc9
KM
472{
473 register int m;
474
161c14a2 475 if (cred->cr_uid == 0)
6f843dc9
KM
476 return(0);
477 /*
478 * Access check is based on only one of owner, group, public.
479 * If not owner, then check group.
480 * If not a member of the group, then check public access.
481 */
482 mode &= 0700;
483 m = ipc->mode;
161c14a2 484 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
6f843dc9 485 m <<= 3;
161c14a2
KM
486 if (!groupmember(ipc->gid, cred) &&
487 !groupmember(ipc->cgid, cred))
6f843dc9
KM
488 m <<= 3;
489 }
490 if ((mode&m) == mode)
161c14a2
KM
491 return (0);
492 return (EACCES);
6f843dc9 493}
6f843dc9 494#endif /* SYSVSHM */