* Copyright (c) 1988 University of Utah.
* Copyright (c) 1990 The Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department. Originally from University of Wisconsin.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
* @(#)sysv_shm.c 7.15 (Berkeley) 5/13/91
* System V shared memory routines.
* TEMPORARY, until mmap is in place;
* needed now for HP-UX compatibility and X server (yech!).
#include "vm/vm_inherit.h"
#include "hp300/hpux/hpux.h"
int shmat(), shmctl(), shmdt(), shmget();
int (*shmcalls
[])() = { shmat
, shmctl
, shmdt
, shmget
};
* Per process internal structure for managing segments.
* Each process using shm will have an array of ``shmseg'' of these.
* Per segment internal structure (shm_handle).
vm_map_t shm_map
; /* address space for shared memory segments */
vm_offset_t whocares1
, whocares2
;
shm_map
= kmem_suballoc(kernel_map
, &whocares1
, &whocares2
,
shminfo
.shmall
* NBPG
, FALSE
);
if (shminfo
.shmmni
> SHMMMNI
)
shminfo
.shmmni
= SHMMMNI
;
for (i
= 0; i
< shminfo
.shmmni
; i
++) {
shmsegs
[i
].shm_perm
.mode
= 0;
shmsegs
[i
].shm_perm
.seq
= 0;
* Entry point for all SHM calls
if (uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
return ((*shmcalls
[uap
->which
])(p
, &uap
[1], retval
));
* Get a shared memory segment
register struct shmget_args
*uap
;
register struct shmid_ds
*shp
;
register struct ucred
*cred
= p
->p_ucred
;
int error
, size
, rval
= 0;
register struct shmhandle
*shmh
;
/* look up the specified shm_id */
if (uap
->key
!= IPC_PRIVATE
) {
for (i
= 0; i
< shminfo
.shmmni
; i
++)
if ((shmsegs
[i
].shm_perm
.mode
& SHM_ALLOC
) &&
shmsegs
[i
].shm_perm
.key
== uap
->key
) {
/* create a new shared segment if necessary */
if (i
== shminfo
.shmmni
) {
if ((uap
->shmflg
& IPC_CREAT
) == 0)
if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
for (i
= 0; i
< shminfo
.shmmni
; i
++)
if ((shmsegs
[i
].shm_perm
.mode
& SHM_ALLOC
) == 0) {
size
= clrnd(btoc(uap
->size
));
if (shmtot
+ size
> shminfo
.shmall
)
* We need to do a couple of things to ensure consistency
* in case we sleep in malloc(). We mark segment as
* allocated so that other shmgets() will not allocate it.
* We mark it as "destroyed" to insure that shmvalid() is
* false making most operations fail (XXX). We set the key,
* so that other shmget()s will fail.
shp
->shm_perm
.mode
= SHM_ALLOC
| SHM_DEST
;
shp
->shm_perm
.key
= uap
->key
;
shmh
= (struct shmhandle
*)
malloc(sizeof(struct shmhandle
), M_SHM
, M_WAITOK
);
shmh
->shmh_id
= (caddr_t
)(0xc0000000|rval
); /* XXX */
error
= vm_mmap(shm_map
, &shmh
->shmh_kva
, ctob(size
),
VM_PROT_ALL
, MAP_ANON
, shmh
->shmh_id
, 0);
free((caddr_t
)shmh
, M_SHM
);
shp
->shm_handle
= (void *) shmh
;
shp
->shm_perm
.cuid
= shp
->shm_perm
.uid
= cred
->cr_uid
;
shp
->shm_perm
.cgid
= shp
->shm_perm
.gid
= cred
->cr_gid
;
shp
->shm_perm
.mode
= SHM_ALLOC
| (uap
->shmflg
&0777);
shp
->shm_segsz
= uap
->size
;
shp
->shm_cpid
= p
->p_pid
;
shp
->shm_lpid
= shp
->shm_nattch
= 0;
shp
->shm_atime
= shp
->shm_dtime
= 0;
shp
->shm_ctime
= time
.tv_sec
;
/* XXX: probably not the right thing to do */
if (shp
->shm_perm
.mode
& SHM_DEST
)
if (error
= ipcaccess(&shp
->shm_perm
, uap
->shmflg
&0777, cred
))
if (uap
->size
&& uap
->size
> shp
->shm_segsz
)
if ((uap
->shmflg
&IPC_CREAT
) && (uap
->shmflg
&IPC_EXCL
))
*retval
= shp
->shm_perm
.seq
* SHMMMNI
+ rval
;
register struct shmctl_args
*uap
;
register struct shmid_ds
*shp
;
register struct ucred
*cred
= p
->p_ucred
;
if (error
= shmvalid(uap
->shmid
))
shp
= &shmsegs
[uap
->shmid
% SHMMMNI
];
if (error
= ipcaccess(&shp
->shm_perm
, IPC_R
, cred
))
return (copyout((caddr_t
)shp
, uap
->buf
, sizeof(*shp
)));
if (cred
->cr_uid
&& cred
->cr_uid
!= shp
->shm_perm
.uid
&&
cred
->cr_uid
!= shp
->shm_perm
.cuid
)
if (error
= copyin(uap
->buf
, (caddr_t
)&sbuf
, sizeof sbuf
))
shp
->shm_perm
.uid
= sbuf
.shm_perm
.uid
;
shp
->shm_perm
.gid
= sbuf
.shm_perm
.gid
;
shp
->shm_perm
.mode
= (shp
->shm_perm
.mode
& ~0777)
| (sbuf
.shm_perm
.mode
& 0777);
shp
->shm_ctime
= time
.tv_sec
;
if (cred
->cr_uid
&& cred
->cr_uid
!= shp
->shm_perm
.uid
&&
cred
->cr_uid
!= shp
->shm_perm
.cuid
)
shp
->shm_perm
.key
= IPC_PRIVATE
;
shp
->shm_perm
.mode
|= SHM_DEST
;
if (shp
->shm_nattch
<= 0)
/* don't really do anything, but make them think we did */
if ((p
->p_flag
& SHPUX
) == 0)
if (cred
->cr_uid
&& cred
->cr_uid
!= shp
->shm_perm
.uid
&&
cred
->cr_uid
!= shp
->shm_perm
.cuid
)
* Attach to shared memory segment.
register struct shmat_args
*uap
;
register struct shmid_ds
*shp
;
* Allocate descriptors now (before validity check)
* in case malloc() blocks.
shmd
= (struct shmdesc
*)p
->p_vmspace
->vm_shm
;
size
= shminfo
.shmseg
* sizeof(struct shmdesc
);
shmd
= (struct shmdesc
*)malloc(size
, M_SHM
, M_WAITOK
);
bzero((caddr_t
)shmd
, size
);
p
->p_vmspace
->vm_shm
= (caddr_t
)shmd
;
if (error
= shmvalid(uap
->shmid
))
shp
= &shmsegs
[uap
->shmid
% SHMMMNI
];
if (shp
->shm_handle
== NULL
)
panic("shmat NULL handle");
if (error
= ipcaccess(&shp
->shm_perm
,
(uap
->shmflg
&SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
, p
->p_ucred
))
if (uva
&& ((int)uva
& (SHMLBA
-1))) {
if (uap
->shmflg
& SHM_RND
)
uva
= (caddr_t
) ((int)uva
& ~(SHMLBA
-1));
* Make sure user doesn't use more than their fair share
for (size
= 0; size
< shminfo
.shmseg
; size
++) {
if (size
>= shminfo
.shmseg
)
size
= ctob(clrnd(btoc(shp
->shm_segsz
)));
if ((uap
->shmflg
& SHM_RDONLY
) == 0)
flags
= MAP_ANON
|MAP_SHARED
;
uva
= (caddr_t
)0x1000000; /* XXX */
error
= vm_mmap(&p
->p_vmspace
->vm_map
, &uva
, (vm_size_t
)size
, prot
,
flags
, ((struct shmhandle
*)shp
->shm_handle
)->shmh_id
, 0);
shmd
->shmd_uva
= (vm_offset_t
)uva
;
shmd
->shmd_id
= uap
->shmid
;
* Fill in the remaining fields
shp
->shm_lpid
= p
->p_pid
;
shp
->shm_atime
= time
.tv_sec
;
* Detach from shared memory segment.
register struct shmdesc
*shmd
;
shmd
= (struct shmdesc
*)p
->p_vmspace
->vm_shm
;
for (i
= 0; i
< shminfo
.shmseg
; i
++, shmd
++)
shmd
->shmd_uva
== (vm_offset_t
)uap
->shmaddr
)
shmsegs
[shmd
->shmd_id
% SHMMMNI
].shm_lpid
= p
->p_pid
;
register struct shmdesc
*shmd
;
* Copy parents descriptive information
size
= shminfo
.shmseg
* sizeof(struct shmdesc
);
shmd
= (struct shmdesc
*)malloc(size
, M_SHM
, M_WAITOK
);
bcopy((caddr_t
)p1
->p_vmspace
->vm_shm
, (caddr_t
)shmd
, size
);
p2
->p_vmspace
->vm_shm
= (caddr_t
)shmd
;
* Increment reference counts
for (size
= 0; size
< shminfo
.shmseg
; size
++, shmd
++)
shmsegs
[shmd
->shmd_id
% SHMMMNI
].shm_nattch
++;
register struct shmdesc
*shmd
;
shmd
= (struct shmdesc
*)p
->p_vmspace
->vm_shm
;
for (i
= 0; i
< shminfo
.shmseg
; i
++, shmd
++)
free((caddr_t
)p
->p_vmspace
->vm_shm
, M_SHM
);
p
->p_vmspace
->vm_shm
= NULL
;
register struct shmid_ds
*shp
;
if (id
< 0 || (id
% SHMMMNI
) >= shminfo
.shmmni
)
shp
= &shmsegs
[id
% SHMMMNI
];
if (shp
->shm_perm
.seq
== (id
/ SHMMMNI
) &&
(shp
->shm_perm
.mode
& (SHM_ALLOC
|SHM_DEST
)) == SHM_ALLOC
)
* Free user resources associated with a shared memory segment
register struct shmid_ds
*shp
;
shp
= &shmsegs
[shmd
->shmd_id
% SHMMMNI
];
(void) vm_deallocate(&p
->p_vmspace
->vm_map
, shmd
->shmd_uva
,
ctob(clrnd(btoc(shp
->shm_segsz
))));
shp
->shm_dtime
= time
.tv_sec
;
if (--shp
->shm_nattch
<= 0 && (shp
->shm_perm
.mode
& SHM_DEST
))
* Deallocate resources associated with a shared memory segment
register struct shmid_ds
*shp
;
if (shp
->shm_handle
== NULL
)
* Lose our lingering object reference by deallocating space
* in kernel. Pager will also be deallocated as a side-effect.
((struct shmhandle
*)shp
->shm_handle
)->shmh_kva
,
ctob(clrnd(btoc(shp
->shm_segsz
))));
free((caddr_t
)shp
->shm_handle
, M_SHM
);
shmtot
-= clrnd(btoc(shp
->shm_segsz
));
* Increment the sequence number to ensure that outstanding
* shmids for this segment will be invalid in the event that
* the segment is reallocated. Note that shmids must be
* positive as decreed by SVID.
if ((int)(shp
->shm_perm
.seq
* SHMMMNI
) < 0)
* XXX This routine would be common to all sysV style IPC
* (if the others were implemented).
ipcaccess(ipc
, mode
, cred
)
register struct ipc_perm
*ipc
;
register struct ucred
*cred
;
* Access check is based on only one of owner, group, public.
* If not owner, then check group.
* If not a member of the group, then check public access.
if (cred
->cr_uid
!= ipc
->uid
&& cred
->cr_uid
!= ipc
->cuid
) {
if (!groupmember(ipc
->gid
, cred
) &&
!groupmember(ipc
->cgid
, cred
))