Use M_IOBUF instead of M_TEMP for malloc'd buffers.
[unix-history] / sys / kern / sysv_shm.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department. Originally from University of Wisconsin.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
600f7f07
RG
38 * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
39 * from: @(#)sysv_shm.c 7.15 (Berkeley) 5/13/91
24fd64ab 40 * $Id: sysv_shm.c,v 1.8 1993/12/20 19:31:18 wollman Exp $
15637ed4
RG
41 */
42
43/*
44 * System V shared memory routines.
45 * TEMPORARY, until mmap is in place;
46 * needed now for HP-UX compatibility and X server (yech!).
47 */
48
49#ifdef SYSVSHM
50
51#include "param.h"
52#include "systm.h"
53#include "kernel.h"
54#include "proc.h"
55#include "shm.h"
56#include "malloc.h"
57#include "mman.h"
58#include "vm/vm.h"
59#include "vm/vm_kern.h"
60#include "vm/vm_inherit.h"
61#include "vm/vm_pager.h"
fde1aeb2 62#include "vm/vm_user.h"
15637ed4
RG
63
64#ifdef HPUXCOMPAT
65#include "hp300/hpux/hpux.h"
66#endif
67
bbc3f849
GW
68/* From shm.h */
69struct shmid_ds *shmsegs;
70struct shminfo shminfo;
71
4c45483e
GW
72int shmat(), shmctl(), shmdt(), shmget(); /* XXX */
73int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; /* XXX */
15637ed4
RG
74int shmtot = 0;
75
76/*
77 * Per process internal structure for managing segments.
78 * Each process using shm will have an array of ``shmseg'' of these.
79 */
80struct shmdesc {
81 vm_offset_t shmd_uva;
82 int shmd_id;
83};
84
85/*
86 * Per segment internal structure (shm_handle).
87 */
88struct shmhandle {
89 vm_offset_t shmh_kva;
90 caddr_t shmh_id;
91};
92
fde1aeb2 93static int ipcaccess(struct ipc_perm *, int, struct ucred *);
4c45483e
GW
94static void shmufree(struct proc *, struct shmdesc *);
95static void shmfree(struct shmid_ds *);
fde1aeb2
GW
96static int shmvalid(int);
97
4c45483e 98
15637ed4
RG
99vm_map_t shm_map; /* address space for shared memory segments */
100
4c45483e 101void
15637ed4
RG
102shminit()
103{
104 register int i;
105 vm_offset_t whocares1, whocares2;
106
107 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
108 shminfo.shmall * NBPG, FALSE);
109 if (shminfo.shmmni > SHMMMNI)
110 shminfo.shmmni = SHMMMNI;
111 for (i = 0; i < shminfo.shmmni; i++) {
112 shmsegs[i].shm_perm.mode = 0;
113 shmsegs[i].shm_perm.seq = 0;
114 }
115}
116
3228baa0
GW
117TEXT_SET(pseudo_set, shminit);
118
15637ed4
RG
119/*
120 * Entry point for all SHM calls
121 */
3c7eb27c
DG
122
123struct shmsys_args {
124 u_int which;
125};
126
4c45483e 127int
15637ed4
RG
128shmsys(p, uap, retval)
129 struct proc *p;
3c7eb27c 130 struct shmsys_args *uap;
15637ed4
RG
131 int *retval;
132{
133
134 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
135 return (EINVAL);
136 return ((*shmcalls[uap->which])(p, &uap[1], retval));
137}
138
139/*
140 * Get a shared memory segment
141 */
3c7eb27c
DG
142
143struct shmget_args {
144 key_t key;
145 int size;
146 int shmflg;
147};
148
4c45483e 149int
15637ed4
RG
150shmget(p, uap, retval)
151 struct proc *p;
3c7eb27c 152 register struct shmget_args *uap;
15637ed4
RG
153 int *retval;
154{
155 register struct shmid_ds *shp;
156 register struct ucred *cred = p->p_ucred;
157 register int i;
158 int error, size, rval = 0;
159 register struct shmhandle *shmh;
160
161 /* look up the specified shm_id */
162 if (uap->key != IPC_PRIVATE) {
163 for (i = 0; i < shminfo.shmmni; i++)
164 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
165 shmsegs[i].shm_perm.key == uap->key) {
166 rval = i;
167 break;
168 }
169 } else
170 i = shminfo.shmmni;
171
172 /* create a new shared segment if necessary */
173 if (i == shminfo.shmmni) {
174 if ((uap->shmflg & IPC_CREAT) == 0)
175 return (ENOENT);
176 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
177 return (EINVAL);
178 for (i = 0; i < shminfo.shmmni; i++)
179 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
180 rval = i;
181 break;
182 }
183 if (i == shminfo.shmmni)
184 return (ENOSPC);
185 size = clrnd(btoc(uap->size));
186 if (shmtot + size > shminfo.shmall)
187 return (ENOMEM);
188 shp = &shmsegs[rval];
189 /*
190 * We need to do a couple of things to ensure consistency
191 * in case we sleep in malloc(). We mark segment as
192 * allocated so that other shmgets() will not allocate it.
193 * We mark it as "destroyed" to insure that shmvalid() is
194 * false making most operations fail (XXX). We set the key,
195 * so that other shmget()s will fail.
196 */
197 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
198 shp->shm_perm.key = uap->key;
199 shmh = (struct shmhandle *)
200 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
201 shmh->shmh_kva = 0;
fde1aeb2 202 shmh->shmh_id = (caddr_t)(0xc0000000UL|rval); /* XXX */
15637ed4 203 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
5af262ca 204 VM_PROT_ALL, VM_PROT_DEFAULT, MAP_ANON, shmh->shmh_id, 0);
15637ed4
RG
205 if (error) {
206 free((caddr_t)shmh, M_SHM);
207 shp->shm_perm.mode = 0;
208 return(ENOMEM);
209 }
210 shp->shm_handle = (void *) shmh;
211 shmtot += size;
212 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
213 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
214 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
215 shp->shm_segsz = uap->size;
216 shp->shm_cpid = p->p_pid;
217 shp->shm_lpid = shp->shm_nattch = 0;
218 shp->shm_atime = shp->shm_dtime = 0;
219 shp->shm_ctime = time.tv_sec;
220 } else {
221 shp = &shmsegs[rval];
222 /* XXX: probably not the right thing to do */
223 if (shp->shm_perm.mode & SHM_DEST)
224 return (EBUSY);
225 if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
226 return (error);
227 if (uap->size && uap->size > shp->shm_segsz)
228 return (EINVAL);
229 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
230 return (EEXIST);
231 }
232 *retval = shp->shm_perm.seq * SHMMMNI + rval;
233 return (0);
234}
235
236/*
237 * Shared memory control
238 */
3c7eb27c
DG
239
240struct shmctl_args {
241 int shmid;
242 int cmd;
243 caddr_t buf;
244};
245
15637ed4 246/* ARGSUSED */
4c45483e 247int
15637ed4
RG
248shmctl(p, uap, retval)
249 struct proc *p;
3c7eb27c 250 register struct shmctl_args *uap;
15637ed4
RG
251 int *retval;
252{
253 register struct shmid_ds *shp;
254 register struct ucred *cred = p->p_ucred;
255 struct shmid_ds sbuf;
256 int error;
257
258 if (error = shmvalid(uap->shmid))
259 return (error);
260 shp = &shmsegs[uap->shmid % SHMMMNI];
261 switch (uap->cmd) {
262 case IPC_STAT:
263 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
264 return (error);
265 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
266
267 case IPC_SET:
268 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
269 cred->cr_uid != shp->shm_perm.cuid)
270 return (EPERM);
271 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
272 return (error);
273 shp->shm_perm.uid = sbuf.shm_perm.uid;
274 shp->shm_perm.gid = sbuf.shm_perm.gid;
275 shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
276 | (sbuf.shm_perm.mode & 0777);
277 shp->shm_ctime = time.tv_sec;
278 break;
279
280 case IPC_RMID:
281 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
282 cred->cr_uid != shp->shm_perm.cuid)
283 return (EPERM);
284 /* set ctime? */
285 shp->shm_perm.key = IPC_PRIVATE;
286 shp->shm_perm.mode |= SHM_DEST;
287 if (shp->shm_nattch <= 0)
288 shmfree(shp);
289 break;
290
291#ifdef HPUXCOMPAT
292 case SHM_LOCK:
293 case SHM_UNLOCK:
294 /* don't really do anything, but make them think we did */
295 if ((p->p_flag & SHPUX) == 0)
296 return (EINVAL);
297 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
298 cred->cr_uid != shp->shm_perm.cuid)
299 return (EPERM);
300 break;
301#endif
302
303 default:
304 return (EINVAL);
305 }
306 return (0);
307}
308
309/*
310 * Attach to shared memory segment.
311 */
3c7eb27c
DG
312
313struct shmat_args {
314 int shmid;
315 caddr_t shmaddr;
316 int shmflg;
317};
318
4c45483e 319int
15637ed4
RG
320shmat(p, uap, retval)
321 struct proc *p;
3c7eb27c 322 register struct shmat_args *uap;
15637ed4
RG
323 int *retval;
324{
325 register struct shmid_ds *shp;
326 register int size;
327 caddr_t uva;
328 int error;
329 int flags;
330 vm_prot_t prot;
331 struct shmdesc *shmd;
332
333 /*
334 * Allocate descriptors now (before validity check)
335 * in case malloc() blocks.
336 */
337 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
338 size = shminfo.shmseg * sizeof(struct shmdesc);
339 if (shmd == NULL) {
340 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
341 bzero((caddr_t)shmd, size);
342 p->p_vmspace->vm_shm = (caddr_t)shmd;
343 }
344 if (error = shmvalid(uap->shmid))
345 return (error);
346 shp = &shmsegs[uap->shmid % SHMMMNI];
347 if (shp->shm_handle == NULL)
348 panic("shmat NULL handle");
349 if (error = ipcaccess(&shp->shm_perm,
350 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
351 return (error);
352 uva = uap->shmaddr;
353 if (uva && ((int)uva & (SHMLBA-1))) {
354 if (uap->shmflg & SHM_RND)
355 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
356 else
357 return (EINVAL);
358 }
359 /*
360 * Make sure user doesn't use more than their fair share
361 */
362 for (size = 0; size < shminfo.shmseg; size++) {
363 if (shmd->shmd_uva == 0)
364 break;
365 shmd++;
366 }
367 if (size >= shminfo.shmseg)
368 return (EMFILE);
369 size = ctob(clrnd(btoc(shp->shm_segsz)));
370 prot = VM_PROT_READ;
371 if ((uap->shmflg & SHM_RDONLY) == 0)
372 prot |= VM_PROT_WRITE;
373 flags = MAP_ANON|MAP_SHARED;
374 if (uva)
375 flags |= MAP_FIXED;
376 else
fde1aeb2
GW
377 uva = (caddr_t)0x1000000UL; /* XXX */
378 error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
379 (vm_size_t)size, prot, VM_PROT_DEFAULT,
380 flags, ((struct shmhandle *)shp->shm_handle)->shmh_id,
381 0);
15637ed4
RG
382 if (error)
383 return(error);
384 shmd->shmd_uva = (vm_offset_t)uva;
385 shmd->shmd_id = uap->shmid;
386 /*
387 * Fill in the remaining fields
388 */
389 shp->shm_lpid = p->p_pid;
390 shp->shm_atime = time.tv_sec;
391 shp->shm_nattch++;
392 *retval = (int) uva;
393 return (0);
394}
395
396/*
397 * Detach from shared memory segment.
398 */
3c7eb27c
DG
399
400struct shmdt_args {
401 caddr_t shmaddr;
402};
403
15637ed4 404/* ARGSUSED */
4c45483e 405int
15637ed4
RG
406shmdt(p, uap, retval)
407 struct proc *p;
3c7eb27c 408 struct shmdt_args *uap;
15637ed4
RG
409 int *retval;
410{
411 register struct shmdesc *shmd;
412 register int i;
413
414 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
415 for (i = 0; i < shminfo.shmseg; i++, shmd++)
416 if (shmd->shmd_uva &&
417 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
418 break;
419 if (i == shminfo.shmseg)
420 return(EINVAL);
421 shmufree(p, shmd);
422 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
4c45483e 423 return 0;
15637ed4
RG
424}
425
4c45483e 426void
15637ed4
RG
427shmfork(p1, p2, isvfork)
428 struct proc *p1, *p2;
429 int isvfork;
430{
431 register struct shmdesc *shmd;
432 register int size;
433
434 /*
435 * Copy parents descriptive information
436 */
437 size = shminfo.shmseg * sizeof(struct shmdesc);
438 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
439 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
440 p2->p_vmspace->vm_shm = (caddr_t)shmd;
441 /*
442 * Increment reference counts
443 */
444 for (size = 0; size < shminfo.shmseg; size++, shmd++)
445 if (shmd->shmd_uva)
446 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
447}
448
4c45483e 449void
15637ed4
RG
450shmexit(p)
451 struct proc *p;
452{
453 register struct shmdesc *shmd;
454 register int i;
455
456 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
457 for (i = 0; i < shminfo.shmseg; i++, shmd++)
458 if (shmd->shmd_uva)
459 shmufree(p, shmd);
460 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
461 p->p_vmspace->vm_shm = NULL;
462}
463
fde1aeb2 464static int
15637ed4
RG
465shmvalid(id)
466 register int id;
467{
468 register struct shmid_ds *shp;
469
470 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
471 return(EINVAL);
472 shp = &shmsegs[id % SHMMMNI];
473 if (shp->shm_perm.seq == (id / SHMMMNI) &&
474 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
475 return(0);
476 return(EINVAL);
477}
478
479/*
480 * Free user resources associated with a shared memory segment
481 */
4c45483e 482static void
15637ed4
RG
483shmufree(p, shmd)
484 struct proc *p;
485 struct shmdesc *shmd;
486{
487 register struct shmid_ds *shp;
488
489 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
490 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
491 ctob(clrnd(btoc(shp->shm_segsz))));
492 shmd->shmd_id = 0;
493 shmd->shmd_uva = 0;
494 shp->shm_dtime = time.tv_sec;
495 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
496 shmfree(shp);
497}
498
499/*
500 * Deallocate resources associated with a shared memory segment
501 */
4c45483e 502static void
15637ed4
RG
503shmfree(shp)
504 register struct shmid_ds *shp;
505{
506
507 if (shp->shm_handle == NULL)
508 panic("shmfree");
509 /*
510 * Lose our lingering object reference by deallocating space
511 * in kernel. Pager will also be deallocated as a side-effect.
512 */
513 vm_deallocate(shm_map,
514 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
515 ctob(clrnd(btoc(shp->shm_segsz))));
516 free((caddr_t)shp->shm_handle, M_SHM);
517 shp->shm_handle = NULL;
518 shmtot -= clrnd(btoc(shp->shm_segsz));
519 shp->shm_perm.mode = 0;
520 /*
521 * Increment the sequence number to ensure that outstanding
522 * shmids for this segment will be invalid in the event that
523 * the segment is reallocated. Note that shmids must be
524 * positive as decreed by SVID.
525 */
526 shp->shm_perm.seq++;
527 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
528 shp->shm_perm.seq = 0;
529}
15637ed4 530#endif /* SYSVSHM */