"update from Mike Hibler at Utah"
[unix-history] / usr / src / sys / kern / sysv_shm.c
CommitLineData
6f843dc9
KM
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * %sccs.include.redist.c%
11 *
12 * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
13 *
e5b27ef1 14 * @(#)sysv_shm.c 7.2 (Berkeley) %G%
6f843dc9
KM
15 */
16
17/*
18 * System V shared memory routines.
19 */
20
21#ifdef SYSVSHM
22
23#include "machine/pte.h"
24
25#include "param.h"
26#include "systm.h"
27#include "user.h"
28#include "kernel.h"
29#include "proc.h"
30#include "vm.h"
31#include "shm.h"
32#include "mapmem.h"
33#include "malloc.h"
34
35#ifdef HPUXCOMPAT
36#include "../hpux/hpux.h"
37#endif
38
39int shmat(), shmctl(), shmdt(), shmget();
40int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
41int shmtot = 0;
42
43int shmfork(), shmexit();
44struct mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit };
45
46shminit()
47{
48 register int i;
49
50 if (shminfo.shmmni > SHMMMNI)
51 shminfo.shmmni = SHMMMNI;
52 for (i = 0; i < shminfo.shmmni; i++) {
53 shmsegs[i].shm_perm.mode = 0;
54 shmsegs[i].shm_perm.seq = 0;
55 }
56}
57
58/* entry point for all SHM calls */
59shmsys()
60{
61 struct a {
62 int which;
63 } *uap = (struct a *)u.u_ap;
64
65 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) {
66 u.u_error = EINVAL;
67 return;
68 }
69 (*shmcalls[uap->which])(u.u_ap+1);
70}
71
72/* get a shared memory segment */
73shmget(ap)
74 int *ap;
75{
76 register struct a {
77 key_t key;
78 int size;
79 int shmflg;
80 } *uap = (struct a *)ap;
81 register struct shmid_ds *shp;
82 register int i;
83 int rval = 0, size;
84 caddr_t kva;
85
86 /* look up the specified shm_id */
87 if (uap->key != IPC_PRIVATE) {
88 for (i = 0; i < shminfo.shmmni; i++)
89 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
90 shmsegs[i].shm_perm.key == uap->key) {
91 rval = i;
92 break;
93 }
94 } else
95 i = shminfo.shmmni;
96
97 /* create a new shared segment if necessary */
98 if (i == shminfo.shmmni) {
99 if ((uap->shmflg & IPC_CREAT) == 0) {
100 u.u_error = ENOENT;
101 return;
102 }
103 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) {
104 u.u_error = EINVAL;
105 return;
106 }
107 for (i = 0; i < shminfo.shmmni; i++)
108 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
109 rval = i;
110 break;
111 }
112 if (i == shminfo.shmmni) {
113 u.u_error = ENOSPC;
114 return;
115 }
116 size = clrnd(btoc(uap->size));
117 if (shmtot + size > shminfo.shmall) {
118 u.u_error = ENOMEM;
119 return;
120 }
121 shp = &shmsegs[rval];
122 /*
123 * We need to do a couple of things to ensure consistency
124 * in case we sleep in malloc(). We mark segment as
125 * allocated so that other shmgets() will not allocate it.
126 * We mark it as "destroyed" to insure that shmvalid() is
127 * false making most operations fail (XXX). We set the key,
128 * so that other shmget()s will fail.
129 */
130 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
131 shp->shm_perm.key = uap->key;
132 kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK);
133 if (kva == NULL) {
134 shp->shm_perm.mode = 0;
135 u.u_error = ENOMEM;
136 return;
137 }
138 if (!claligned(kva))
139 panic("shmget: non-aligned memory");
140 bzero(kva, (u_int)ctob(size));
141 shmtot += size;
142 shp->shm_perm.cuid = shp->shm_perm.uid = u.u_uid;
143 shp->shm_perm.cgid = shp->shm_perm.gid = u.u_gid;
144 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
145 shp->shm_handle = (void *) kvtopte(kva);
146 shp->shm_segsz = uap->size;
147 shp->shm_cpid = u.u_procp->p_pid;
148 shp->shm_lpid = shp->shm_nattch = 0;
149 shp->shm_atime = shp->shm_dtime = 0;
150 shp->shm_ctime = time.tv_sec;
151 } else {
152 shp = &shmsegs[rval];
153 /* XXX: probably not the right thing to do */
154 if (shp->shm_perm.mode & SHM_DEST) {
155 u.u_error = EBUSY;
156 return;
157 }
158 if (!ipcaccess(&shp->shm_perm, uap->shmflg&0777))
159 return;
160 if (uap->size && uap->size > shp->shm_segsz) {
161 u.u_error = EINVAL;
162 return;
163 }
164 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL)) {
165 u.u_error = EEXIST;
166 return;
167 }
168 }
169 u.u_r.r_val1 = shp->shm_perm.seq * SHMMMNI + rval;
170}
171
172/* shared memory control */
173shmctl(ap)
174 int *ap;
175{
176 register struct a {
177 int shmid;
178 int cmd;
179 caddr_t buf;
180 } *uap = (struct a *)ap;
181 register struct shmid_ds *shp;
182 struct shmid_ds sbuf;
183
184 if (!shmvalid(uap->shmid))
185 return;
186 shp = &shmsegs[uap->shmid % SHMMMNI];
187 switch (uap->cmd) {
188 case IPC_STAT:
189 if (ipcaccess(&shp->shm_perm, IPC_R))
190 u.u_error =
191 copyout((caddr_t)shp, uap->buf, sizeof(*shp));
192 break;
193
194 case IPC_SET:
195 if (u.u_uid && u.u_uid != shp->shm_perm.uid &&
196 u.u_uid != shp->shm_perm.cuid) {
197 u.u_error = EPERM;
198 break;
199 }
200 u.u_error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf);
201 if (!u.u_error) {
202 shp->shm_perm.uid = sbuf.shm_perm.uid;
203 shp->shm_perm.gid = sbuf.shm_perm.gid;
204 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
205 | (sbuf.shm_perm.mode & 0777);
206 shp->shm_ctime = time.tv_sec;
207 }
208 break;
209
210 case IPC_RMID:
211 if (u.u_uid && u.u_uid != shp->shm_perm.uid &&
212 u.u_uid != shp->shm_perm.cuid) {
213 u.u_error = EPERM;
214 break;
215 }
216 /* set ctime? */
217 shp->shm_perm.key = IPC_PRIVATE;
218 shp->shm_perm.mode |= SHM_DEST;
219 if (shp->shm_nattch <= 0)
220 shmfree(shp);
221 break;
222
223#ifdef HPUXCOMPAT
224 case SHM_LOCK:
225 case SHM_UNLOCK:
226 /* don't really do anything, but make them think we did */
227 if ((u.u_procp->p_flag & SHPUX) == 0)
228 u.u_error = EINVAL;
229 else if (u.u_uid && u.u_uid != shp->shm_perm.uid &&
230 u.u_uid != shp->shm_perm.cuid)
231 u.u_error = EPERM;
232 break;
233#endif
234
235 default:
236 u.u_error = EINVAL;
237 break;
238 }
239}
240
241shmat(ap)
242 int *ap;
243{
244 struct a {
245 int shmid;
246 caddr_t shmaddr;
247 int shmflg;
248 } *uap = (struct a *)ap;
249 register struct shmid_ds *shp;
250 register int size;
251 struct mapmem *mp;
252 caddr_t uva;
253 int prot, shmmapin();
254
255 if (!shmvalid(uap->shmid))
256 return;
257 shp = &shmsegs[uap->shmid % SHMMMNI];
258 if (shp->shm_handle == NULL)
259 panic("shmat NULL ptbl");
260 if (!ipcaccess(&shp->shm_perm,
261 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
262 return;
263 uva = uap->shmaddr;
264 if (uva && ((int)uva & (SHMLBA-1))) {
265 if (uap->shmflg & SHM_RND)
266 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
267 else {
268 u.u_error = EINVAL;
269 return;
270 }
271 }
272 /*
273 * Make sure user doesn't use more than their fair share
274 */
275 size = 0;
276 for (mp = u.u_mmap; mp; mp = mp->mm_next)
277 if (mp->mm_ops == &shmops)
278 size++;
279 if (size >= shminfo.shmseg) {
280 u.u_error = EMFILE;
281 return;
282 }
283 /*
284 * Allocate a mapped memory region descriptor and
285 * attempt to expand the user page table to allow for region
286 */
287 prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW;
288#if defined(hp300)
289 prot |= MM_CI;
290#endif
291 size = ctob(clrnd(btoc(shp->shm_segsz)));
e5b27ef1 292 mp = mmalloc(uap->shmid, &uva, (segsz_t)size, prot, &shmops);
6f843dc9
KM
293 if (mp == MMNIL)
294 return;
295 if (!mmmapin(mp, shmmapin)) {
296 mmfree(mp);
297 return;
298 }
299 /*
300 * Fill in the remaining fields
301 */
302 shp->shm_lpid = u.u_procp->p_pid;
303 shp->shm_atime = time.tv_sec;
304 shp->shm_nattch++;
305 u.u_r.r_val1 = (int) uva;
306}
307
308shmdt(ap)
309 int *ap;
310{
311 register struct a {
312 caddr_t shmaddr;
313 } *uap = (struct a *)ap;
314 register struct mapmem *mp;
315
316 for (mp = u.u_mmap; mp; mp = mp->mm_next)
317 if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr)
318 break;
319 if (mp == MMNIL) {
320 u.u_error = EINVAL;
321 return;
322 }
323 shmsegs[mp->mm_id % SHMMMNI].shm_lpid = u.u_procp->p_pid;
324 shmufree(mp);
325}
326
327shmmapin(mp, off)
328 struct mapmem *mp;
329{
330 register struct shmid_ds *shp;
331
332 shp = &shmsegs[mp->mm_id % SHMMMNI];
333 if (off >= ctob(clrnd(btoc(shp->shm_segsz))))
334 return(-1);
335 return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum);
336}
337
338/*
339 * Increment attach count on fork
340 */
341shmfork(mp, ischild)
342 register struct mapmem *mp;
343{
344 if (!ischild)
345 shmsegs[mp->mm_id % SHMMMNI].shm_nattch++;
346}
347
348/*
349 * Detach from shared memory segment on exit (or exec)
350 */
351shmexit(mp)
352 register struct mapmem *mp;
353{
354 shmufree(mp);
355}
356
357shmvalid(id)
358 register int id;
359{
360 register struct shmid_ds *shp;
361
362 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
363 return(0);
364 shp = &shmsegs[id % SHMMMNI];
365 if (shp->shm_perm.seq == (id / SHMMMNI) &&
366 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
367 return(1);
368 u.u_error = EINVAL;
369 return(0);
370}
371
372/*
373 * Free user resources associated with a shared memory segment
374 */
375shmufree(mp)
376 struct mapmem *mp;
377{
378 register struct shmid_ds *shp;
379
380 shp = &shmsegs[mp->mm_id % SHMMMNI];
381 mmmapout(mp);
382 mmfree(mp);
383 shp->shm_dtime = time.tv_sec;
384 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
385 shmfree(shp);
386}
387
388/*
389 * Deallocate resources associated with a shared memory segment
390 */
391shmfree(shp)
392 register struct shmid_ds *shp;
393{
394 caddr_t kva;
395
396 if (shp->shm_handle == NULL)
397 panic("shmfree");
398 kva = (caddr_t) ptetokv(shp->shm_handle);
399 free(kva, M_SHM);
400 shp->shm_handle = NULL;
401 shmtot -= clrnd(btoc(shp->shm_segsz));
402 shp->shm_perm.mode = 0;
403 /*
404 * Increment the sequence number to ensure that outstanding
405 * shmids for this segment will be invalid in the event that
406 * the segment is reallocated. Note that shmids must be
407 * positive as decreed by SVID.
408 */
409 shp->shm_perm.seq++;
410 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
411 shp->shm_perm.seq = 0;
412}
413
414/*
415 * XXX This routine would be common to all sysV style IPC
416 * (if the others were implemented).
417 */
418ipcaccess(ipc, mode)
419 register struct ipc_perm *ipc;
420{
421 register int m;
422
423 if (u.u_uid == 0)
424 return(0);
425 /*
426 * Access check is based on only one of owner, group, public.
427 * If not owner, then check group.
428 * If not a member of the group, then check public access.
429 */
430 mode &= 0700;
431 m = ipc->mode;
432 if (u.u_uid != ipc->uid && u.u_uid != ipc->cuid) {
433 m <<= 3;
434 if (!groupmember(ipc->gid, u.u_cred) &&
435 !groupmember(ipc->cgid, u.u_cred))
436 m <<= 3;
437 }
438 if ((mode&m) == mode)
439 return (1);
440 u.u_error = EACCES;
441 return (0);
442}
443
444#endif /* SYSVSHM */