fold strn{casecmp,cat,cmp,cpy}.3 into str{casecmp,cat,cmp,cpy}.3
[unix-history] / usr / src / sys / kern / sysv_shm.c
CommitLineData
6f843dc9
KM
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * %sccs.include.redist.c%
11 *
12 * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
13 *
015c074c 14 * @(#)sysv_shm.c 7.7 (Berkeley) %G%
6f843dc9
KM
15 */
16
17/*
18 * System V shared memory routines.
19 */
20
21#ifdef SYSVSHM
22
23#include "machine/pte.h"
24
25#include "param.h"
26#include "systm.h"
161c14a2 27#include "syscontext.h"
6f843dc9
KM
28#include "kernel.h"
29#include "proc.h"
30#include "vm.h"
31#include "shm.h"
32#include "mapmem.h"
33#include "malloc.h"
34
35#ifdef HPUXCOMPAT
36#include "../hpux/hpux.h"
37#endif
38
39int shmat(), shmctl(), shmdt(), shmget();
40int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
41int shmtot = 0;
42
43int shmfork(), shmexit();
44struct mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit };
45
46shminit()
47{
48 register int i;
49
50 if (shminfo.shmmni > SHMMMNI)
51 shminfo.shmmni = SHMMMNI;
52 for (i = 0; i < shminfo.shmmni; i++) {
53 shmsegs[i].shm_perm.mode = 0;
54 shmsegs[i].shm_perm.seq = 0;
55 }
56}
57
161c14a2
KM
58/*
59 * Entry point for all SHM calls
60 */
61shmsys(p, uap, retval)
62 struct proc *p;
63 struct args {
6f843dc9 64 int which;
161c14a2
KM
65 } *uap;
66 int *retval;
67{
6f843dc9 68
161c14a2
KM
69 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
70 RETURN (EINVAL);
71 RETURN ((*shmcalls[uap->which])(p, &uap[1], retval));
6f843dc9
KM
72}
73
161c14a2
KM
74/*
75 * Get a shared memory segment
76 */
77shmget(p, uap, retval)
78 struct proc *p;
79 register struct args {
6f843dc9
KM
80 key_t key;
81 int size;
82 int shmflg;
161c14a2
KM
83 } *uap;
84 int *retval;
85{
6f843dc9 86 register struct shmid_ds *shp;
161c14a2 87 register struct ucred *cred = u.u_cred;
6f843dc9 88 register int i;
161c14a2 89 int error, size, rval = 0;
6f843dc9
KM
90 caddr_t kva;
91
92 /* look up the specified shm_id */
93 if (uap->key != IPC_PRIVATE) {
94 for (i = 0; i < shminfo.shmmni; i++)
95 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
96 shmsegs[i].shm_perm.key == uap->key) {
97 rval = i;
98 break;
99 }
100 } else
101 i = shminfo.shmmni;
102
103 /* create a new shared segment if necessary */
104 if (i == shminfo.shmmni) {
161c14a2
KM
105 if ((uap->shmflg & IPC_CREAT) == 0)
106 return (ENOENT);
107 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
108 return (EINVAL);
6f843dc9
KM
109 for (i = 0; i < shminfo.shmmni; i++)
110 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
111 rval = i;
112 break;
113 }
161c14a2
KM
114 if (i == shminfo.shmmni)
115 return (ENOSPC);
6f843dc9 116 size = clrnd(btoc(uap->size));
161c14a2
KM
117 if (shmtot + size > shminfo.shmall)
118 return (ENOMEM);
6f843dc9
KM
119 shp = &shmsegs[rval];
120 /*
121 * We need to do a couple of things to ensure consistency
122 * in case we sleep in malloc(). We mark segment as
123 * allocated so that other shmgets() will not allocate it.
124 * We mark it as "destroyed" to insure that shmvalid() is
125 * false making most operations fail (XXX). We set the key,
126 * so that other shmget()s will fail.
127 */
128 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
129 shp->shm_perm.key = uap->key;
130 kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK);
131 if (kva == NULL) {
132 shp->shm_perm.mode = 0;
161c14a2 133 return (ENOMEM);
6f843dc9
KM
134 }
135 if (!claligned(kva))
136 panic("shmget: non-aligned memory");
137 bzero(kva, (u_int)ctob(size));
138 shmtot += size;
161c14a2
KM
139 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
140 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
6f843dc9
KM
141 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
142 shp->shm_handle = (void *) kvtopte(kva);
143 shp->shm_segsz = uap->size;
c9714ae3 144 shp->shm_cpid = p->p_pid;
6f843dc9
KM
145 shp->shm_lpid = shp->shm_nattch = 0;
146 shp->shm_atime = shp->shm_dtime = 0;
147 shp->shm_ctime = time.tv_sec;
148 } else {
149 shp = &shmsegs[rval];
150 /* XXX: probably not the right thing to do */
161c14a2
KM
151 if (shp->shm_perm.mode & SHM_DEST)
152 return (EBUSY);
015c074c 153 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
161c14a2
KM
154 return (error);
155 if (uap->size && uap->size > shp->shm_segsz)
156 return (EINVAL);
157 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
158 return (EEXIST);
6f843dc9 159 }
161c14a2 160 *retval = shp->shm_perm.seq * SHMMMNI + rval;
015c074c 161 return (0);
6f843dc9
KM
162}
163
161c14a2
KM
164/*
165 * Shared memory control
166 */
167/* ARGSUSED */
168shmctl(p, uap, retval)
169 struct proc *p;
170 register struct args {
6f843dc9
KM
171 int shmid;
172 int cmd;
173 caddr_t buf;
161c14a2
KM
174 } *uap;
175 int *retval;
176{
6f843dc9 177 register struct shmid_ds *shp;
161c14a2 178 register struct ucred *cred = u.u_cred;
6f843dc9 179 struct shmid_ds sbuf;
161c14a2 180 int error;
6f843dc9 181
161c14a2
KM
182 if (error = shmvalid(uap->shmid))
183 return (error);
6f843dc9
KM
184 shp = &shmsegs[uap->shmid % SHMMMNI];
185 switch (uap->cmd) {
186 case IPC_STAT:
015c074c 187 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
161c14a2
KM
188 return (error);
189 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
6f843dc9
KM
190
191 case IPC_SET:
161c14a2
KM
192 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
193 cred->cr_uid != shp->shm_perm.cuid)
194 return (EPERM);
195 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
196 return (error);
197 shp->shm_perm.uid = sbuf.shm_perm.uid;
198 shp->shm_perm.gid = sbuf.shm_perm.gid;
199 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
200 | (sbuf.shm_perm.mode & 0777);
201 shp->shm_ctime = time.tv_sec;
6f843dc9
KM
202 break;
203
204 case IPC_RMID:
161c14a2
KM
205 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
206 cred->cr_uid != shp->shm_perm.cuid)
207 return (EPERM);
6f843dc9
KM
208 /* set ctime? */
209 shp->shm_perm.key = IPC_PRIVATE;
210 shp->shm_perm.mode |= SHM_DEST;
211 if (shp->shm_nattch <= 0)
212 shmfree(shp);
213 break;
214
215#ifdef HPUXCOMPAT
216 case SHM_LOCK:
217 case SHM_UNLOCK:
218 /* don't really do anything, but make them think we did */
c9714ae3 219 if ((p->p_flag & SHPUX) == 0)
161c14a2
KM
220 return (EINVAL);
221 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
222 cred->cr_uid != shp->shm_perm.cuid)
223 return (EPERM);
6f843dc9
KM
224 break;
225#endif
226
227 default:
161c14a2 228 return (EINVAL);
6f843dc9 229 }
161c14a2 230 return (0);
6f843dc9
KM
231}
232
161c14a2
KM
233/*
234 * Attach to shared memory segment.
235 */
236shmat(p, uap, retval)
237 struct proc *p;
238 register struct args {
6f843dc9
KM
239 int shmid;
240 caddr_t shmaddr;
241 int shmflg;
161c14a2
KM
242 } *uap;
243 int *retval;
244{
6f843dc9
KM
245 register struct shmid_ds *shp;
246 register int size;
247 struct mapmem *mp;
248 caddr_t uva;
015c074c 249 int error, prot, shmmapin();
6f843dc9 250
161c14a2
KM
251 if (error = shmvalid(uap->shmid))
252 return (error);
6f843dc9
KM
253 shp = &shmsegs[uap->shmid % SHMMMNI];
254 if (shp->shm_handle == NULL)
96ea38ce 255 panic("shmat NULL handle");
015c074c
MH
256 if (error = ipcaccess(&shp->shm_perm,
257 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, u.u_cred))
161c14a2 258 return (error);
6f843dc9
KM
259 uva = uap->shmaddr;
260 if (uva && ((int)uva & (SHMLBA-1))) {
261 if (uap->shmflg & SHM_RND)
262 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
161c14a2
KM
263 else
264 return (EINVAL);
6f843dc9
KM
265 }
266 /*
267 * Make sure user doesn't use more than their fair share
268 */
269 size = 0;
270 for (mp = u.u_mmap; mp; mp = mp->mm_next)
271 if (mp->mm_ops == &shmops)
272 size++;
161c14a2
KM
273 if (size >= shminfo.shmseg)
274 return (EMFILE);
6f843dc9
KM
275 /*
276 * Allocate a mapped memory region descriptor and
277 * attempt to expand the user page table to allow for region
278 */
279 prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW;
280#if defined(hp300)
281 prot |= MM_CI;
282#endif
283 size = ctob(clrnd(btoc(shp->shm_segsz)));
c9714ae3 284 error = mmalloc(p, uap->shmid, &uva, (segsz_t)size, prot, &shmops, &mp);
161c14a2
KM
285 if (error)
286 return (error);
287 if (error = mmmapin(p, mp, shmmapin)) {
015c074c 288 (void) mmfree(p, mp);
161c14a2 289 return (error);
6f843dc9
KM
290 }
291 /*
292 * Fill in the remaining fields
293 */
c9714ae3 294 shp->shm_lpid = p->p_pid;
6f843dc9
KM
295 shp->shm_atime = time.tv_sec;
296 shp->shm_nattch++;
161c14a2 297 *retval = (int) uva;
015c074c 298 return (0);
6f843dc9
KM
299}
300
161c14a2
KM
301/*
302 * Detach from shared memory segment.
303 */
304/* ARGSUSED */
305shmdt(p, uap, retval)
306 struct proc *p;
307 struct args {
6f843dc9 308 caddr_t shmaddr;
161c14a2
KM
309 } *uap;
310 int *retval;
311{
6f843dc9
KM
312 register struct mapmem *mp;
313
314 for (mp = u.u_mmap; mp; mp = mp->mm_next)
315 if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr)
316 break;
161c14a2
KM
317 if (mp == MMNIL)
318 return (EINVAL);
c9714ae3 319 shmsegs[mp->mm_id % SHMMMNI].shm_lpid = p->p_pid;
161c14a2 320 return (shmufree(p, mp));
6f843dc9
KM
321}
322
323shmmapin(mp, off)
324 struct mapmem *mp;
325{
326 register struct shmid_ds *shp;
327
328 shp = &shmsegs[mp->mm_id % SHMMMNI];
329 if (off >= ctob(clrnd(btoc(shp->shm_segsz))))
330 return(-1);
331 return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum);
332}
333
334/*
335 * Increment attach count on fork
336 */
6be55866 337/* ARGSUSED */
6f843dc9
KM
338shmfork(mp, ischild)
339 register struct mapmem *mp;
340{
341 if (!ischild)
342 shmsegs[mp->mm_id % SHMMMNI].shm_nattch++;
343}
344
345/*
346 * Detach from shared memory segment on exit (or exec)
347 */
015c074c 348shmexit(mp)
6be55866 349 struct mapmem *mp;
6f843dc9 350{
015c074c 351 struct proc *p = u.u_procp; /* XXX */
c9714ae3 352
161c14a2 353 return (shmufree(p, mp));
6f843dc9
KM
354}
355
356shmvalid(id)
357 register int id;
358{
359 register struct shmid_ds *shp;
360
361 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
161c14a2 362 return(EINVAL);
6f843dc9
KM
363 shp = &shmsegs[id % SHMMMNI];
364 if (shp->shm_perm.seq == (id / SHMMMNI) &&
365 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
161c14a2
KM
366 return(0);
367 return(EINVAL);
6f843dc9
KM
368}
369
370/*
371 * Free user resources associated with a shared memory segment
372 */
c9714ae3
KM
373shmufree(p, mp)
374 struct proc *p;
6f843dc9
KM
375 struct mapmem *mp;
376{
377 register struct shmid_ds *shp;
c9714ae3 378 int error;
6f843dc9
KM
379
380 shp = &shmsegs[mp->mm_id % SHMMMNI];
c9714ae3
KM
381 mmmapout(p, mp);
382 error = mmfree(p, mp);
6f843dc9
KM
383 shp->shm_dtime = time.tv_sec;
384 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
385 shmfree(shp);
c9714ae3 386 return (error);
6f843dc9
KM
387}
388
389/*
390 * Deallocate resources associated with a shared memory segment
391 */
392shmfree(shp)
393 register struct shmid_ds *shp;
394{
395 caddr_t kva;
396
397 if (shp->shm_handle == NULL)
398 panic("shmfree");
399 kva = (caddr_t) ptetokv(shp->shm_handle);
400 free(kva, M_SHM);
401 shp->shm_handle = NULL;
402 shmtot -= clrnd(btoc(shp->shm_segsz));
403 shp->shm_perm.mode = 0;
404 /*
405 * Increment the sequence number to ensure that outstanding
406 * shmids for this segment will be invalid in the event that
407 * the segment is reallocated. Note that shmids must be
408 * positive as decreed by SVID.
409 */
410 shp->shm_perm.seq++;
411 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
412 shp->shm_perm.seq = 0;
413}
414
415/*
416 * XXX This routine would be common to all sysV style IPC
417 * (if the others were implemented).
418 */
161c14a2 419ipcaccess(ipc, mode, cred)
6f843dc9 420 register struct ipc_perm *ipc;
161c14a2
KM
421 int mode;
422 register struct ucred *cred;
6f843dc9
KM
423{
424 register int m;
425
161c14a2 426 if (cred->cr_uid == 0)
6f843dc9
KM
427 return(0);
428 /*
429 * Access check is based on only one of owner, group, public.
430 * If not owner, then check group.
431 * If not a member of the group, then check public access.
432 */
433 mode &= 0700;
434 m = ipc->mode;
161c14a2 435 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
6f843dc9 436 m <<= 3;
161c14a2
KM
437 if (!groupmember(ipc->gid, cred) &&
438 !groupmember(ipc->cgid, cred))
6f843dc9
KM
439 m <<= 3;
440 }
441 if ((mode&m) == mode)
161c14a2
KM
442 return (0);
443 return (EACCES);
6f843dc9
KM
444}
445
446#endif /* SYSVSHM */