*
* %sccs.include.redist.c%
*
- * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
+ * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
*
- * @(#)sysv_shm.c 7.6 (Berkeley) %G%
+ * @(#)sysv_shm.c 7.19 (Berkeley) %G%
*/
/*
* System V shared memory routines.
+ * TEMPORARY, until mmap is in place;
+ * needed now for HP-UX compatibility and X server (yech!).
*/
#ifdef SYSVSHM
-#include "machine/pte.h"
-
#include "param.h"
#include "systm.h"
-#include "syscontext.h"
#include "kernel.h"
#include "proc.h"
-#include "vm.h"
#include "shm.h"
-#include "mapmem.h"
#include "malloc.h"
-
-#ifdef HPUXCOMPAT
-#include "../hpux/hpux.h"
-#endif
+#include "mman.h"
+#include "vm/vm.h"
+#include "vm/vm_kern.h"
+#include "vm/vm_inherit.h"
+#include "vm/vm_pager.h"
int shmat(), shmctl(), shmdt(), shmget();
int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
int shmtot = 0;
-int shmfork(), shmexit();
-struct mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit };
+/*
+ * Per process internal structure for managing segments.
+ * Each process using shm will have an array of ``shmseg'' of these.
+ */
+struct shmdesc {
+ vm_offset_t shmd_uva;
+ int shmd_id;
+};
+
+/*
+ * Per segment internal structure (shm_handle).
+ */
+struct shmhandle {
+ vm_offset_t shmh_kva;
+ caddr_t shmh_id;
+};
+
+vm_map_t shm_map; /* address space for shared memory segments */
shminit()
{
register int i;
+ vm_offset_t whocares1, whocares2;
+ shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
+ shminfo.shmall * NBPG, FALSE);
if (shminfo.shmmni > SHMMMNI)
shminfo.shmmni = SHMMMNI;
for (i = 0; i < shminfo.shmmni; i++) {
/*
* Entry point for all SHM calls
*/
+struct shmsys_args {
+ u_int which;
+};
shmsys(p, uap, retval)
struct proc *p;
- struct args {
- int which;
- } *uap;
+ struct shmsys_args *uap;
int *retval;
{
if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
- RETURN (EINVAL);
- RETURN ((*shmcalls[uap->which])(p, &uap[1], retval));
+ return (EINVAL);
+ return ((*shmcalls[uap->which])(p, &uap[1], retval));
}
/*
* Get a shared memory segment
*/
+struct shmget_args {
+ key_t key;
+ int size;
+ int shmflg;
+};
shmget(p, uap, retval)
struct proc *p;
- register struct args {
- key_t key;
- int size;
- int shmflg;
- } *uap;
+ register struct shmget_args *uap;
int *retval;
{
register struct shmid_ds *shp;
- register struct ucred *cred = u.u_cred;
+ register struct ucred *cred = p->p_ucred;
register int i;
int error, size, rval = 0;
- caddr_t kva;
+ register struct shmhandle *shmh;
/* look up the specified shm_id */
if (uap->key != IPC_PRIVATE) {
*/
shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
shp->shm_perm.key = uap->key;
- kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK);
- if (kva == NULL) {
+ shmh = (struct shmhandle *)
+ malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
+ shmh->shmh_kva = 0;
+ shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
+ error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
+ VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
+ if (error) {
+ free((caddr_t)shmh, M_SHM);
shp->shm_perm.mode = 0;
- return (ENOMEM);
+ return(ENOMEM);
}
- if (!claligned(kva))
- panic("shmget: non-aligned memory");
- bzero(kva, (u_int)ctob(size));
+ shp->shm_handle = (void *) shmh;
shmtot += size;
shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
- shp->shm_handle = (void *) kvtopte(kva);
shp->shm_segsz = uap->size;
shp->shm_cpid = p->p_pid;
shp->shm_lpid = shp->shm_nattch = 0;
/* XXX: probably not the right thing to do */
if (shp->shm_perm.mode & SHM_DEST)
return (EBUSY);
- if (error = ipcaccess(cred, &shp->shm_perm, uap->shmflg&0777))
+ if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
return (error);
if (uap->size && uap->size > shp->shm_segsz)
return (EINVAL);
return (EEXIST);
}
*retval = shp->shm_perm.seq * SHMMMNI + rval;
+ return (0);
}
/*
* Shared memory control
*/
+struct shmctl_args {
+ int shmid;
+ int cmd;
+ caddr_t buf;
+};
/* ARGSUSED */
shmctl(p, uap, retval)
struct proc *p;
- register struct args {
- int shmid;
- int cmd;
- caddr_t buf;
- } *uap;
+ register struct shmctl_args *uap;
int *retval;
{
register struct shmid_ds *shp;
- register struct ucred *cred = u.u_cred;
+ register struct ucred *cred = p->p_ucred;
struct shmid_ds sbuf;
int error;
shp = &shmsegs[uap->shmid % SHMMMNI];
switch (uap->cmd) {
case IPC_STAT:
- if (error = ipcaccess(cred, &shp->shm_perm, IPC_R))
+ if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
return (error);
return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
shmfree(shp);
break;
-#ifdef HPUXCOMPAT
- case SHM_LOCK:
- case SHM_UNLOCK:
- /* don't really do anything, but make them think we did */
- if ((p->p_flag & SHPUX) == 0)
- return (EINVAL);
- if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
- cred->cr_uid != shp->shm_perm.cuid)
- return (EPERM);
- break;
-#endif
-
default:
return (EINVAL);
}
/*
* Attach to shared memory segment.
*/
+struct shmat_args {
+ int shmid;
+ caddr_t shmaddr;
+ int shmflg;
+};
shmat(p, uap, retval)
struct proc *p;
- register struct args {
- int shmid;
- caddr_t shmaddr;
- int shmflg;
- } *uap;
+ register struct shmat_args *uap;
int *retval;
{
register struct shmid_ds *shp;
register int size;
- struct mapmem *mp;
caddr_t uva;
- int error, error1, prot, shmmapin();
+ int error;
+ int flags;
+ vm_prot_t prot;
+ struct shmdesc *shmd;
+ /*
+ * Allocate descriptors now (before validity check)
+ * in case malloc() blocks.
+ */
+ shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
+ size = shminfo.shmseg * sizeof(struct shmdesc);
+ if (shmd == NULL) {
+ shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
+ bzero((caddr_t)shmd, size);
+ p->p_vmspace->vm_shm = (caddr_t)shmd;
+ }
if (error = shmvalid(uap->shmid))
return (error);
shp = &shmsegs[uap->shmid % SHMMMNI];
if (shp->shm_handle == NULL)
panic("shmat NULL handle");
- if (error = ipcaccess(u.u_cred, &shp->shm_perm,
- (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
+ if (error = ipcaccess(&shp->shm_perm,
+ (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
return (error);
uva = uap->shmaddr;
if (uva && ((int)uva & (SHMLBA-1))) {
/*
* Make sure user doesn't use more than their fair share
*/
- size = 0;
- for (mp = u.u_mmap; mp; mp = mp->mm_next)
- if (mp->mm_ops == &shmops)
- size++;
+ for (size = 0; size < shminfo.shmseg; size++) {
+ if (shmd->shmd_uva == 0)
+ break;
+ shmd++;
+ }
if (size >= shminfo.shmseg)
return (EMFILE);
- /*
- * Allocate a mapped memory region descriptor and
- * attempt to expand the user page table to allow for region
- */
- prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW;
-#if defined(hp300)
- prot |= MM_CI;
-#endif
size = ctob(clrnd(btoc(shp->shm_segsz)));
- error = mmalloc(p, uap->shmid, &uva, (segsz_t)size, prot, &shmops, &mp);
+ prot = VM_PROT_READ;
+ if ((uap->shmflg & SHM_RDONLY) == 0)
+ prot |= VM_PROT_WRITE;
+ flags = MAP_ANON|MAP_SHARED;
+ if (uva)
+ flags |= MAP_FIXED;
+ else
+ uva = (caddr_t)0x1000000; /* XXX */
+ error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
+ (vm_size_t)size, prot, flags,
+ ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
if (error)
- return (error);
- if (error = mmmapin(p, mp, shmmapin)) {
- if (error1 = mmfree(p, mp))
- return (error1);
- return (error);
- }
+ return(error);
+ shmd->shmd_uva = (vm_offset_t)uva;
+ shmd->shmd_id = uap->shmid;
/*
* Fill in the remaining fields
*/
shp->shm_atime = time.tv_sec;
shp->shm_nattch++;
*retval = (int) uva;
+ return (0);
}
/*
* Detach from shared memory segment.
*/
+struct shmdt_args {
+ caddr_t shmaddr;
+};
/* ARGSUSED */
shmdt(p, uap, retval)
struct proc *p;
- struct args {
- caddr_t shmaddr;
- } *uap;
+ struct shmdt_args *uap;
int *retval;
{
- register struct mapmem *mp;
+ register struct shmdesc *shmd;
+ register int i;
- for (mp = u.u_mmap; mp; mp = mp->mm_next)
- if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr)
+ shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
+ for (i = 0; i < shminfo.shmseg; i++, shmd++)
+ if (shmd->shmd_uva &&
+ shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
break;
- if (mp == MMNIL)
- return (EINVAL);
- shmsegs[mp->mm_id % SHMMMNI].shm_lpid = p->p_pid;
- return (shmufree(p, mp));
+ if (i == shminfo.shmseg)
+ return(EINVAL);
+ shmufree(p, shmd);
+ shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
}
-shmmapin(mp, off)
- struct mapmem *mp;
+shmfork(p1, p2, isvfork)
+ struct proc *p1, *p2;
+ int isvfork;
{
- register struct shmid_ds *shp;
-
- shp = &shmsegs[mp->mm_id % SHMMMNI];
- if (off >= ctob(clrnd(btoc(shp->shm_segsz))))
- return(-1);
- return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum);
-}
+ register struct shmdesc *shmd;
+ register int size;
-/*
- * Increment attach count on fork
- */
-/* ARGSUSED */
-shmfork(mp, ischild)
- register struct mapmem *mp;
-{
- if (!ischild)
- shmsegs[mp->mm_id % SHMMMNI].shm_nattch++;
+ /*
+ * Copy parents descriptive information
+ */
+ size = shminfo.shmseg * sizeof(struct shmdesc);
+ shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
+ bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
+ p2->p_vmspace->vm_shm = (caddr_t)shmd;
+ /*
+ * Increment reference counts
+ */
+ for (size = 0; size < shminfo.shmseg; size++, shmd++)
+ if (shmd->shmd_uva)
+ shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
}
-/*
- * Detach from shared memory segment on exit (or exec)
- */
-shmexit(p, mp)
+shmexit(p)
struct proc *p;
- struct mapmem *mp;
{
+ register struct shmdesc *shmd;
+ register int i;
- return (shmufree(p, mp));
+ shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
+ for (i = 0; i < shminfo.shmseg; i++, shmd++)
+ if (shmd->shmd_uva)
+ shmufree(p, shmd);
+ free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
+ p->p_vmspace->vm_shm = NULL;
}
shmvalid(id)
/*
* Free user resources associated with a shared memory segment
*/
-shmufree(p, mp)
+shmufree(p, shmd)
struct proc *p;
- struct mapmem *mp;
+ struct shmdesc *shmd;
{
register struct shmid_ds *shp;
- int error;
- shp = &shmsegs[mp->mm_id % SHMMMNI];
- mmmapout(p, mp);
- error = mmfree(p, mp);
+ shp = &shmsegs[shmd->shmd_id % SHMMMNI];
+ (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
+ ctob(clrnd(btoc(shp->shm_segsz))));
+ shmd->shmd_id = 0;
+ shmd->shmd_uva = 0;
shp->shm_dtime = time.tv_sec;
if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
shmfree(shp);
- return (error);
}
/*
shmfree(shp)
register struct shmid_ds *shp;
{
- caddr_t kva;
if (shp->shm_handle == NULL)
panic("shmfree");
- kva = (caddr_t) ptetokv(shp->shm_handle);
- free(kva, M_SHM);
+ /*
+ * Lose our lingering object reference by deallocating space
+ * in kernel. Pager will also be deallocated as a side-effect.
+ */
+ vm_deallocate(shm_map,
+ ((struct shmhandle *)shp->shm_handle)->shmh_kva,
+ ctob(clrnd(btoc(shp->shm_segsz))));
+ free((caddr_t)shp->shm_handle, M_SHM);
shp->shm_handle = NULL;
shmtot -= clrnd(btoc(shp->shm_segsz));
shp->shm_perm.mode = 0;
return (0);
return (EACCES);
}
-
#endif /* SYSVSHM */