* Copyright (c) 1988 University of Utah.
* Copyright (c) 1990 The Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department. Originally from University of Wisconsin.
* %sccs.include.redist.c%
* from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
* @(#)sysv_shm.c 7.8 (Berkeley) %G%
* System V shared memory routines.
* TEMPORARY, until mmap is in place;
* needed now for HP-UX compatibility and X server (yech!).
#include "../hpux/hpux.h"
int shmat(), shmctl(), shmdt(), shmget();
int (*shmcalls
[])() = { shmat
, shmctl
, shmdt
, shmget
};
int shmfork(), shmexit();
struct mapmemops shmops
= { shmfork
, (int (*)())0, shmexit
, shmexit
};
if (shminfo
.shmmni
> SHMMMNI
)
shminfo
.shmmni
= SHMMMNI
;
for (i
= 0; i
< shminfo
.shmmni
; i
++) {
shmsegs
[i
].shm_perm
.mode
= 0;
shmsegs
[i
].shm_perm
.seq
= 0;
* Entry point for all SHM calls
if (uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
RETURN ((*shmcalls
[uap
->which
])(p
, &uap
[1], retval
));
* Get a shared memory segment
register struct shmid_ds
*shp
;
register struct ucred
*cred
= u
.u_cred
;
int error
, size
, rval
= 0;
/* look up the specified shm_id */
if (uap
->key
!= IPC_PRIVATE
) {
for (i
= 0; i
< shminfo
.shmmni
; i
++)
if ((shmsegs
[i
].shm_perm
.mode
& SHM_ALLOC
) &&
shmsegs
[i
].shm_perm
.key
== uap
->key
) {
/* create a new shared segment if necessary */
if (i
== shminfo
.shmmni
) {
if ((uap
->shmflg
& IPC_CREAT
) == 0)
if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
for (i
= 0; i
< shminfo
.shmmni
; i
++)
if ((shmsegs
[i
].shm_perm
.mode
& SHM_ALLOC
) == 0) {
size
= clrnd(btoc(uap
->size
));
if (shmtot
+ size
> shminfo
.shmall
)
* We need to do a couple of things to ensure consistency
* in case we sleep in malloc(). We mark segment as
* allocated so that other shmgets() will not allocate it.
* We mark it as "destroyed" to insure that shmvalid() is
* false making most operations fail (XXX). We set the key,
* so that other shmget()s will fail.
shp
->shm_perm
.mode
= SHM_ALLOC
| SHM_DEST
;
shp
->shm_perm
.key
= uap
->key
;
kva
= (caddr_t
) malloc((u_long
)ctob(size
), M_SHM
, M_WAITOK
);
panic("shmget: non-aligned memory");
bzero(kva
, (u_int
)ctob(size
));
shp
->shm_perm
.cuid
= shp
->shm_perm
.uid
= cred
->cr_uid
;
shp
->shm_perm
.cgid
= shp
->shm_perm
.gid
= cred
->cr_gid
;
shp
->shm_perm
.mode
= SHM_ALLOC
| (uap
->shmflg
&0777);
shp
->shm_handle
= (void *) kvtopte(kva
);
shp
->shm_segsz
= uap
->size
;
shp
->shm_cpid
= p
->p_pid
;
shp
->shm_lpid
= shp
->shm_nattch
= 0;
shp
->shm_atime
= shp
->shm_dtime
= 0;
shp
->shm_ctime
= time
.tv_sec
;
/* XXX: probably not the right thing to do */
if (shp
->shm_perm
.mode
& SHM_DEST
)
if (error
= ipcaccess(&shp
->shm_perm
, uap
->shmflg
&0777, cred
))
if (uap
->size
&& uap
->size
> shp
->shm_segsz
)
if ((uap
->shmflg
&IPC_CREAT
) && (uap
->shmflg
&IPC_EXCL
))
*retval
= shp
->shm_perm
.seq
* SHMMMNI
+ rval
;
register struct shmid_ds
*shp
;
register struct ucred
*cred
= u
.u_cred
;
if (error
= shmvalid(uap
->shmid
))
shp
= &shmsegs
[uap
->shmid
% SHMMMNI
];
if (error
= ipcaccess(&shp
->shm_perm
, IPC_R
, cred
))
return (copyout((caddr_t
)shp
, uap
->buf
, sizeof(*shp
)));
if (cred
->cr_uid
&& cred
->cr_uid
!= shp
->shm_perm
.uid
&&
cred
->cr_uid
!= shp
->shm_perm
.cuid
)
if (error
= copyin(uap
->buf
, (caddr_t
)&sbuf
, sizeof sbuf
))
shp
->shm_perm
.uid
= sbuf
.shm_perm
.uid
;
shp
->shm_perm
.gid
= sbuf
.shm_perm
.gid
;
shp
->shm_perm
.mode
= (shp
->shm_perm
.mode
& ~0777)
| (sbuf
.shm_perm
.mode
& 0777);
shp
->shm_ctime
= time
.tv_sec
;
if (cred
->cr_uid
&& cred
->cr_uid
!= shp
->shm_perm
.uid
&&
cred
->cr_uid
!= shp
->shm_perm
.cuid
)
shp
->shm_perm
.key
= IPC_PRIVATE
;
shp
->shm_perm
.mode
|= SHM_DEST
;
if (shp
->shm_nattch
<= 0)
/* don't really do anything, but make them think we did */
if ((p
->p_flag
& SHPUX
) == 0)
if (cred
->cr_uid
&& cred
->cr_uid
!= shp
->shm_perm
.uid
&&
cred
->cr_uid
!= shp
->shm_perm
.cuid
)
* Attach to shared memory segment.
register struct shmid_ds
*shp
;
int error
, prot
, shmmapin();
if (error
= shmvalid(uap
->shmid
))
shp
= &shmsegs
[uap
->shmid
% SHMMMNI
];
if (shp
->shm_handle
== NULL
)
panic("shmat NULL handle");
if (error
= ipcaccess(&shp
->shm_perm
,
(uap
->shmflg
&SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
, u
.u_cred
))
if (uva
&& ((int)uva
& (SHMLBA
-1))) {
if (uap
->shmflg
& SHM_RND
)
uva
= (caddr_t
) ((int)uva
& ~(SHMLBA
-1));
* Make sure user doesn't use more than their fair share
for (mp
= u
.u_mmap
; mp
; mp
= mp
->mm_next
)
if (mp
->mm_ops
== &shmops
)
if (size
>= shminfo
.shmseg
)
* Allocate a mapped memory region descriptor and
* attempt to expand the user page table to allow for region
prot
= (uap
->shmflg
& SHM_RDONLY
) ? MM_RO
: MM_RW
;
size
= ctob(clrnd(btoc(shp
->shm_segsz
)));
error
= mmalloc(p
, uap
->shmid
, &uva
, (segsz_t
)size
, prot
, &shmops
, &mp
);
if (error
= mmmapin(p
, mp
, shmmapin
)) {
* Fill in the remaining fields
shp
->shm_lpid
= p
->p_pid
;
shp
->shm_atime
= time
.tv_sec
;
* Detach from shared memory segment.
register struct mapmem
*mp
;
for (mp
= u
.u_mmap
; mp
; mp
= mp
->mm_next
)
if (mp
->mm_ops
== &shmops
&& mp
->mm_uva
== uap
->shmaddr
)
shmsegs
[mp
->mm_id
% SHMMMNI
].shm_lpid
= p
->p_pid
;
return (shmufree(p
, mp
));
register struct shmid_ds
*shp
;
shp
= &shmsegs
[mp
->mm_id
% SHMMMNI
];
if (off
>= ctob(clrnd(btoc(shp
->shm_segsz
))))
return(((struct pte
*)shp
->shm_handle
)[btop(off
)].pg_pfnum
);
* Increment attach count on fork
register struct mapmem
*mp
;
shmsegs
[mp
->mm_id
% SHMMMNI
].shm_nattch
++;
* Detach from shared memory segment on exit (or exec)
struct proc
*p
= u
.u_procp
; /* XXX */
return (shmufree(p
, mp
));
register struct shmid_ds
*shp
;
if (id
< 0 || (id
% SHMMMNI
) >= shminfo
.shmmni
)
shp
= &shmsegs
[id
% SHMMMNI
];
if (shp
->shm_perm
.seq
== (id
/ SHMMMNI
) &&
(shp
->shm_perm
.mode
& (SHM_ALLOC
|SHM_DEST
)) == SHM_ALLOC
)
* Free user resources associated with a shared memory segment
register struct shmid_ds
*shp
;
shp
= &shmsegs
[mp
->mm_id
% SHMMMNI
];
shp
->shm_dtime
= time
.tv_sec
;
if (--shp
->shm_nattch
<= 0 && (shp
->shm_perm
.mode
& SHM_DEST
))
* Deallocate resources associated with a shared memory segment
register struct shmid_ds
*shp
;
if (shp
->shm_handle
== NULL
)
kva
= (caddr_t
) ptetokv(shp
->shm_handle
);
shmtot
-= clrnd(btoc(shp
->shm_segsz
));
* Increment the sequence number to ensure that outstanding
* shmids for this segment will be invalid in the event that
* the segment is reallocated. Note that shmids must be
* positive as decreed by SVID.
if ((int)(shp
->shm_perm
.seq
* SHMMMNI
) < 0)
* XXX This routine would be common to all sysV style IPC
* (if the others were implemented).
ipcaccess(ipc
, mode
, cred
)
register struct ipc_perm
*ipc
;
register struct ucred
*cred
;
* Access check is based on only one of owner, group, public.
* If not owner, then check group.
* If not a member of the group, then check public access.
if (cred
->cr_uid
!= ipc
->uid
&& cred
->cr_uid
!= ipc
->cuid
) {
if (!groupmember(ipc
->gid
, cred
) &&
!groupmember(ipc
->cgid
, cred
))