From 6f843dc9de6f8fae826f48ba2ad56319888ff62f Mon Sep 17 00:00:00 2001 From: Kirk McKusick Date: Wed, 9 May 1990 06:27:56 -0800 Subject: [PATCH] merge in hp300 support from Utah SCCS-vsn: sys/kern/sysv_shm.c 7.1 SCCS-vsn: sys/sys/ipc.h 7.1 SCCS-vsn: sys/sys/shm.h 7.1 SCCS-vsn: sys/hp300/stand/scsivar.h 7.1 --- usr/src/sys/hp300/stand/scsivar.h | 23 ++ usr/src/sys/kern/sysv_shm.c | 444 ++++++++++++++++++++++++++++++ usr/src/sys/sys/ipc.h | 48 ++++ usr/src/sys/sys/shm.h | 68 +++++ 4 files changed, 583 insertions(+) create mode 100644 usr/src/sys/hp300/stand/scsivar.h create mode 100644 usr/src/sys/kern/sysv_shm.c create mode 100644 usr/src/sys/sys/ipc.h create mode 100644 usr/src/sys/sys/shm.h diff --git a/usr/src/sys/hp300/stand/scsivar.h b/usr/src/sys/hp300/stand/scsivar.h new file mode 100644 index 0000000000..676d864871 --- /dev/null +++ b/usr/src/sys/hp300/stand/scsivar.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1982, 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Van Jacobson of Lawrence Berkeley Laboratory and the Systems + * Programming Group of the University of Utah Computer Science Department. + * + * %sccs.include.redist.c% + * + * @(#)scsivar.h 7.1 (Berkeley) %G% + */ + +struct scsi_softc { + int sc_ba; + char *sc_addr; + char sc_alive; + char sc_scsi_addr; + char sc_stat; + char sc_msg; +}; + +extern struct scsi_softc scsi_softc[]; diff --git a/usr/src/sys/kern/sysv_shm.c b/usr/src/sys/kern/sysv_shm.c new file mode 100644 index 0000000000..ab87f795e8 --- /dev/null +++ b/usr/src/sys/kern/sysv_shm.c @@ -0,0 +1,444 @@ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. Originally from University of Wisconsin. + * + * %sccs.include.redist.c% + * + * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$ + * + * @(#)sysv_shm.c 7.1 (Berkeley) %G% + */ + +/* + * System V shared memory routines. + */ + +#ifdef SYSVSHM + +#include "machine/pte.h" + +#include "param.h" +#include "systm.h" +#include "user.h" +#include "kernel.h" +#include "proc.h" +#include "vm.h" +#include "shm.h" +#include "mapmem.h" +#include "malloc.h" + +#ifdef HPUXCOMPAT +#include "../hpux/hpux.h" +#endif + +int shmat(), shmctl(), shmdt(), shmget(); +int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; +int shmtot = 0; + +int shmfork(), shmexit(); +struct mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit }; + +shminit() +{ + register int i; + + if (shminfo.shmmni > SHMMMNI) + shminfo.shmmni = SHMMMNI; + for (i = 0; i < shminfo.shmmni; i++) { + shmsegs[i].shm_perm.mode = 0; + shmsegs[i].shm_perm.seq = 0; + } +} + +/* entry point for all SHM calls */ +shmsys() +{ + struct a { + int which; + } *uap = (struct a *)u.u_ap; + + if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) { + u.u_error = EINVAL; + return; + } + (*shmcalls[uap->which])(u.u_ap+1); +} + +/* get a shared memory segment */ +shmget(ap) + int *ap; +{ + register struct a { + key_t key; + int size; + int shmflg; + } *uap = (struct a *)ap; + register struct shmid_ds *shp; + register int i; + int rval = 0, size; + caddr_t kva; + + /* look up the specified shm_id */ + if (uap->key != IPC_PRIVATE) { + for (i = 0; i < shminfo.shmmni; i++) + if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) && + shmsegs[i].shm_perm.key == uap->key) { + rval = i; + break; + } + } else + i = shminfo.shmmni; + + /* create a new shared segment if necessary */ + if (i == shminfo.shmmni) { + if ((uap->shmflg & IPC_CREAT) == 0) { + u.u_error = ENOENT; + return; + } + if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) { + u.u_error = EINVAL; + return; + } + for (i = 0; i < shminfo.shmmni; i++) + if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) { + rval = i; + break; + } + if (i == shminfo.shmmni) { + u.u_error = ENOSPC; + return; + } + size = clrnd(btoc(uap->size)); + if (shmtot + size > shminfo.shmall) { + u.u_error = ENOMEM; + return; + } + shp = &shmsegs[rval]; + /* + * We need to do a couple of things to ensure consistency + * in case we sleep in malloc(). We mark segment as + * allocated so that other shmgets() will not allocate it. + * We mark it as "destroyed" to insure that shmvalid() is + * false making most operations fail (XXX). We set the key, + * so that other shmget()s will fail. + */ + shp->shm_perm.mode = SHM_ALLOC | SHM_DEST; + shp->shm_perm.key = uap->key; + kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK); + if (kva == NULL) { + shp->shm_perm.mode = 0; + u.u_error = ENOMEM; + return; + } + if (!claligned(kva)) + panic("shmget: non-aligned memory"); + bzero(kva, (u_int)ctob(size)); + shmtot += size; + shp->shm_perm.cuid = shp->shm_perm.uid = u.u_uid; + shp->shm_perm.cgid = shp->shm_perm.gid = u.u_gid; + shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777); + shp->shm_handle = (void *) kvtopte(kva); + shp->shm_segsz = uap->size; + shp->shm_cpid = u.u_procp->p_pid; + shp->shm_lpid = shp->shm_nattch = 0; + shp->shm_atime = shp->shm_dtime = 0; + shp->shm_ctime = time.tv_sec; + } else { + shp = &shmsegs[rval]; + /* XXX: probably not the right thing to do */ + if (shp->shm_perm.mode & SHM_DEST) { + u.u_error = EBUSY; + return; + } + if (!ipcaccess(&shp->shm_perm, uap->shmflg&0777)) + return; + if (uap->size && uap->size > shp->shm_segsz) { + u.u_error = EINVAL; + return; + } + if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL)) { + u.u_error = EEXIST; + return; + } + } + u.u_r.r_val1 = shp->shm_perm.seq * SHMMMNI + rval; +} + +/* shared memory control */ +shmctl(ap) + int *ap; +{ + register struct a { + int shmid; + int cmd; + caddr_t buf; + } *uap = (struct a *)ap; + register struct shmid_ds *shp; + struct shmid_ds sbuf; + + if (!shmvalid(uap->shmid)) + return; + shp = &shmsegs[uap->shmid % SHMMMNI]; + switch (uap->cmd) { + case IPC_STAT: + if (ipcaccess(&shp->shm_perm, IPC_R)) + u.u_error = + copyout((caddr_t)shp, uap->buf, sizeof(*shp)); + break; + + case IPC_SET: + if (u.u_uid && u.u_uid != shp->shm_perm.uid && + u.u_uid != shp->shm_perm.cuid) { + u.u_error = EPERM; + break; + } + u.u_error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf); + if (!u.u_error) { + shp->shm_perm.uid = sbuf.shm_perm.uid; + shp->shm_perm.gid = sbuf.shm_perm.gid; + shp->shm_perm.mode = (shp->shm_perm.mode & ~0777) + | (sbuf.shm_perm.mode & 0777); + shp->shm_ctime = time.tv_sec; + } + break; + + case IPC_RMID: + if (u.u_uid && u.u_uid != shp->shm_perm.uid && + u.u_uid != shp->shm_perm.cuid) { + u.u_error = EPERM; + break; + } + /* set ctime? */ + shp->shm_perm.key = IPC_PRIVATE; + shp->shm_perm.mode |= SHM_DEST; + if (shp->shm_nattch <= 0) + shmfree(shp); + break; + +#ifdef HPUXCOMPAT + case SHM_LOCK: + case SHM_UNLOCK: + /* don't really do anything, but make them think we did */ + if ((u.u_procp->p_flag & SHPUX) == 0) + u.u_error = EINVAL; + else if (u.u_uid && u.u_uid != shp->shm_perm.uid && + u.u_uid != shp->shm_perm.cuid) + u.u_error = EPERM; + break; +#endif + + default: + u.u_error = EINVAL; + break; + } +} + +shmat(ap) + int *ap; +{ + struct a { + int shmid; + caddr_t shmaddr; + int shmflg; + } *uap = (struct a *)ap; + register struct shmid_ds *shp; + register int size; + struct mapmem *mp; + caddr_t uva; + int prot, shmmapin(); + + if (!shmvalid(uap->shmid)) + return; + shp = &shmsegs[uap->shmid % SHMMMNI]; + if (shp->shm_handle == NULL) + panic("shmat NULL ptbl"); + if (!ipcaccess(&shp->shm_perm, + (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W)) + return; + uva = uap->shmaddr; + if (uva && ((int)uva & (SHMLBA-1))) { + if (uap->shmflg & SHM_RND) + uva = (caddr_t) ((int)uva & ~(SHMLBA-1)); + else { + u.u_error = EINVAL; + return; + } + } + /* + * Make sure user doesn't use more than their fair share + */ + size = 0; + for (mp = u.u_mmap; mp; mp = mp->mm_next) + if (mp->mm_ops == &shmops) + size++; + if (size >= shminfo.shmseg) { + u.u_error = EMFILE; + return; + } + /* + * Allocate a mapped memory region descriptor and + * attempt to expand the user page table to allow for region + */ + prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW; +#if defined(hp300) + prot |= MM_CI; +#endif + size = ctob(clrnd(btoc(shp->shm_segsz))); + mp = mmalloc(uap->shmid, &uva, (size_t)size, prot, &shmops); + if (mp == MMNIL) + return; + if (!mmmapin(mp, shmmapin)) { + mmfree(mp); + return; + } + /* + * Fill in the remaining fields + */ + shp->shm_lpid = u.u_procp->p_pid; + shp->shm_atime = time.tv_sec; + shp->shm_nattch++; + u.u_r.r_val1 = (int) uva; +} + +shmdt(ap) + int *ap; +{ + register struct a { + caddr_t shmaddr; + } *uap = (struct a *)ap; + register struct mapmem *mp; + + for (mp = u.u_mmap; mp; mp = mp->mm_next) + if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr) + break; + if (mp == MMNIL) { + u.u_error = EINVAL; + return; + } + shmsegs[mp->mm_id % SHMMMNI].shm_lpid = u.u_procp->p_pid; + shmufree(mp); +} + +shmmapin(mp, off) + struct mapmem *mp; +{ + register struct shmid_ds *shp; + + shp = &shmsegs[mp->mm_id % SHMMMNI]; + if (off >= ctob(clrnd(btoc(shp->shm_segsz)))) + return(-1); + return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum); +} + +/* + * Increment attach count on fork + */ +shmfork(mp, ischild) + register struct mapmem *mp; +{ + if (!ischild) + shmsegs[mp->mm_id % SHMMMNI].shm_nattch++; +} + +/* + * Detach from shared memory segment on exit (or exec) + */ +shmexit(mp) + register struct mapmem *mp; +{ + shmufree(mp); +} + +shmvalid(id) + register int id; +{ + register struct shmid_ds *shp; + + if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni) + return(0); + shp = &shmsegs[id % SHMMMNI]; + if (shp->shm_perm.seq == (id / SHMMMNI) && + (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC) + return(1); + u.u_error = EINVAL; + return(0); +} + +/* + * Free user resources associated with a shared memory segment + */ +shmufree(mp) + struct mapmem *mp; +{ + register struct shmid_ds *shp; + + shp = &shmsegs[mp->mm_id % SHMMMNI]; + mmmapout(mp); + mmfree(mp); + shp->shm_dtime = time.tv_sec; + if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST)) + shmfree(shp); +} + +/* + * Deallocate resources associated with a shared memory segment + */ +shmfree(shp) + register struct shmid_ds *shp; +{ + caddr_t kva; + + if (shp->shm_handle == NULL) + panic("shmfree"); + kva = (caddr_t) ptetokv(shp->shm_handle); + free(kva, M_SHM); + shp->shm_handle = NULL; + shmtot -= clrnd(btoc(shp->shm_segsz)); + shp->shm_perm.mode = 0; + /* + * Increment the sequence number to ensure that outstanding + * shmids for this segment will be invalid in the event that + * the segment is reallocated. Note that shmids must be + * positive as decreed by SVID. + */ + shp->shm_perm.seq++; + if ((int)(shp->shm_perm.seq * SHMMMNI) < 0) + shp->shm_perm.seq = 0; +} + +/* + * XXX This routine would be common to all sysV style IPC + * (if the others were implemented). + */ +ipcaccess(ipc, mode) + register struct ipc_perm *ipc; +{ + register int m; + + if (u.u_uid == 0) + return(0); + /* + * Access check is based on only one of owner, group, public. + * If not owner, then check group. + * If not a member of the group, then check public access. + */ + mode &= 0700; + m = ipc->mode; + if (u.u_uid != ipc->uid && u.u_uid != ipc->cuid) { + m <<= 3; + if (!groupmember(ipc->gid, u.u_cred) && + !groupmember(ipc->cgid, u.u_cred)) + m <<= 3; + } + if ((mode&m) == mode) + return (1); + u.u_error = EACCES; + return (0); +} + +#endif /* SYSVSHM */ diff --git a/usr/src/sys/sys/ipc.h b/usr/src/sys/sys/ipc.h new file mode 100644 index 0000000000..bf04f613fa --- /dev/null +++ b/usr/src/sys/sys/ipc.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * %sccs.include.redist.c% + * + * @(#)ipc.h 7.1 (Berkeley) %G% + */ + +/* + * SVID compatible ipc.h file + */ +#ifndef _IPC_ +#define _IPC_ + +typedef long key_t; /* XXX should be in types.h */ + +struct ipc_perm { + ushort cuid; /* creator user id */ + ushort cgid; /* creator group id */ + ushort uid; /* user id */ + ushort gid; /* group id */ + ushort mode; /* r/w permission */ + ushort seq; /* sequence # (to generate unique msg/sem/shm id) */ + key_t key; /* user specified msg/sem/shm key */ +}; + +/* common mode bits */ +#define IPC_R 00400 /* read permission */ +#define IPC_W 00200 /* write/alter permission */ + +/* SVID required constants (same values as system 5) */ +#define IPC_CREAT 01000 /* create entry if key does not exist */ +#define IPC_EXCL 02000 /* fail if key exists */ +#define IPC_NOWAIT 04000 /* error if request must wait */ + +#define IPC_PRIVATE (key_t)0 /* private key */ + +#define IPC_RMID 0 /* remove identifier */ +#define IPC_SET 1 /* set options */ +#define IPC_STAT 2 /* get options */ + +#endif /* _IPC_ */ diff --git a/usr/src/sys/sys/shm.h b/usr/src/sys/sys/shm.h new file mode 100644 index 0000000000..db51e22338 --- /dev/null +++ b/usr/src/sys/sys/shm.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * %sccs.include.redist.c% + * + * @(#)shm.h 7.1 (Berkeley) %G% + */ + +/* + * SVID compatible shm.h file + */ +#ifndef _SHM_ +#define _SHM_ + +#ifdef KERNEL +#include "ipc.h" +#else +#include +#endif + +struct shmid_ds { + struct ipc_perm shm_perm; /* operation perms */ + int shm_segsz; /* size of segment (bytes) */ + ushort shm_cpid; /* pid, creator */ + ushort shm_lpid; /* pid, last operation */ + short shm_nattch; /* no. of current attaches */ + time_t shm_atime; /* last attach time */ + time_t shm_dtime; /* last detach time */ + time_t shm_ctime; /* last change time */ + void *shm_handle; /* internal handle for shm segment */ +}; + +/* + * System 5 style catch-all structure for shared memory constants that + * might be of interest to user programs. Do we really want/need this? + */ +struct shminfo { + int shmmax; /* max shared memory segment size (bytes) */ + int shmmin; /* min shared memory segment size (bytes) */ + int shmmni; /* max number of shared memory identifiers */ + int shmseg; /* max shared memory segments per process */ + int shmall; /* max amount of shared memory (pages) */ +}; + +/* internal "mode" bits */ +#define SHM_ALLOC 01000 /* segment is allocated */ +#define SHM_DEST 02000 /* segment will be destroyed on last detach */ + +/* SVID required constants (same values as system 5) */ +#define SHM_RDONLY 010000 /* read-only access */ +#define SHM_RND 020000 /* round attach address to SHMLBA boundary */ + +/* implementation constants */ +#define SHMLBA CLBYTES /* segment low boundary address multiple */ +#define SHMMMNI 512 /* maximum value for shminfo.shmmni */ + +#ifdef KERNEL +struct shmid_ds *shmsegs; +struct shminfo shminfo; +#endif + +#endif /* _SHM_ */ -- 2.20.1