| 1 | /* |
| 2 | * Copyright (c) 1988 University of Utah. |
| 3 | * Copyright (c) 1990 The Regents of the University of California. |
| 4 | * All rights reserved. |
| 5 | * |
| 6 | * This code is derived from software contributed to Berkeley by |
| 7 | * the Systems Programming Group of the University of Utah Computer |
| 8 | * Science Department. Originally from University of Wisconsin. |
| 9 | * |
| 10 | * %sccs.include.redist.c% |
| 11 | * |
| 12 | * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$ |
| 13 | * |
| 14 | * @(#)sysv_shm.c 7.18 (Berkeley) %G% |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * System V shared memory routines. |
| 19 | * TEMPORARY, until mmap is in place; |
| 20 | * needed now for HP-UX compatibility and X server (yech!). |
| 21 | */ |
| 22 | |
| 23 | #ifdef SYSVSHM |
| 24 | |
| 25 | #include "param.h" |
| 26 | #include "systm.h" |
| 27 | #include "kernel.h" |
| 28 | #include "proc.h" |
| 29 | #include "shm.h" |
| 30 | #include "malloc.h" |
| 31 | #include "mman.h" |
| 32 | #include "vm/vm.h" |
| 33 | #include "vm/vm_kern.h" |
| 34 | #include "vm/vm_inherit.h" |
| 35 | #include "vm/vm_pager.h" |
| 36 | |
| 37 | int shmat(), shmctl(), shmdt(), shmget(); |
| 38 | int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; |
| 39 | int shmtot = 0; |
| 40 | |
| 41 | /* |
| 42 | * Per process internal structure for managing segments. |
| 43 | * Each process using shm will have an array of ``shmseg'' of these. |
| 44 | */ |
| 45 | struct shmdesc { |
| 46 | vm_offset_t shmd_uva; |
| 47 | int shmd_id; |
| 48 | }; |
| 49 | |
| 50 | /* |
| 51 | * Per segment internal structure (shm_handle). |
| 52 | */ |
| 53 | struct shmhandle { |
| 54 | vm_offset_t shmh_kva; |
| 55 | caddr_t shmh_id; |
| 56 | }; |
| 57 | |
| 58 | vm_map_t shm_map; /* address space for shared memory segments */ |
| 59 | |
| 60 | shminit() |
| 61 | { |
| 62 | register int i; |
| 63 | vm_offset_t whocares1, whocares2; |
| 64 | |
| 65 | shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2, |
| 66 | shminfo.shmall * NBPG, FALSE); |
| 67 | if (shminfo.shmmni > SHMMMNI) |
| 68 | shminfo.shmmni = SHMMMNI; |
| 69 | for (i = 0; i < shminfo.shmmni; i++) { |
| 70 | shmsegs[i].shm_perm.mode = 0; |
| 71 | shmsegs[i].shm_perm.seq = 0; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * Entry point for all SHM calls |
| 77 | */ |
| 78 | shmsys(p, uap, retval) |
| 79 | struct proc *p; |
| 80 | struct args { |
| 81 | u_int which; |
| 82 | } *uap; |
| 83 | int *retval; |
| 84 | { |
| 85 | |
| 86 | if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) |
| 87 | return (EINVAL); |
| 88 | return ((*shmcalls[uap->which])(p, &uap[1], retval)); |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * Get a shared memory segment |
| 93 | */ |
| 94 | shmget(p, uap, retval) |
| 95 | struct proc *p; |
| 96 | register struct args { |
| 97 | key_t key; |
| 98 | int size; |
| 99 | int shmflg; |
| 100 | } *uap; |
| 101 | int *retval; |
| 102 | { |
| 103 | register struct shmid_ds *shp; |
| 104 | register struct ucred *cred = p->p_ucred; |
| 105 | register int i; |
| 106 | int error, size, rval = 0; |
| 107 | register struct shmhandle *shmh; |
| 108 | |
| 109 | /* look up the specified shm_id */ |
| 110 | if (uap->key != IPC_PRIVATE) { |
| 111 | for (i = 0; i < shminfo.shmmni; i++) |
| 112 | if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) && |
| 113 | shmsegs[i].shm_perm.key == uap->key) { |
| 114 | rval = i; |
| 115 | break; |
| 116 | } |
| 117 | } else |
| 118 | i = shminfo.shmmni; |
| 119 | |
| 120 | /* create a new shared segment if necessary */ |
| 121 | if (i == shminfo.shmmni) { |
| 122 | if ((uap->shmflg & IPC_CREAT) == 0) |
| 123 | return (ENOENT); |
| 124 | if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) |
| 125 | return (EINVAL); |
| 126 | for (i = 0; i < shminfo.shmmni; i++) |
| 127 | if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) { |
| 128 | rval = i; |
| 129 | break; |
| 130 | } |
| 131 | if (i == shminfo.shmmni) |
| 132 | return (ENOSPC); |
| 133 | size = clrnd(btoc(uap->size)); |
| 134 | if (shmtot + size > shminfo.shmall) |
| 135 | return (ENOMEM); |
| 136 | shp = &shmsegs[rval]; |
| 137 | /* |
| 138 | * We need to do a couple of things to ensure consistency |
| 139 | * in case we sleep in malloc(). We mark segment as |
| 140 | * allocated so that other shmgets() will not allocate it. |
| 141 | * We mark it as "destroyed" to insure that shmvalid() is |
| 142 | * false making most operations fail (XXX). We set the key, |
| 143 | * so that other shmget()s will fail. |
| 144 | */ |
| 145 | shp->shm_perm.mode = SHM_ALLOC | SHM_DEST; |
| 146 | shp->shm_perm.key = uap->key; |
| 147 | shmh = (struct shmhandle *) |
| 148 | malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK); |
| 149 | shmh->shmh_kva = 0; |
| 150 | shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */ |
| 151 | error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size), |
| 152 | VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0); |
| 153 | if (error) { |
| 154 | free((caddr_t)shmh, M_SHM); |
| 155 | shp->shm_perm.mode = 0; |
| 156 | return(ENOMEM); |
| 157 | } |
| 158 | shp->shm_handle = (void *) shmh; |
| 159 | shmtot += size; |
| 160 | shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid; |
| 161 | shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid; |
| 162 | shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777); |
| 163 | shp->shm_segsz = uap->size; |
| 164 | shp->shm_cpid = p->p_pid; |
| 165 | shp->shm_lpid = shp->shm_nattch = 0; |
| 166 | shp->shm_atime = shp->shm_dtime = 0; |
| 167 | shp->shm_ctime = time.tv_sec; |
| 168 | } else { |
| 169 | shp = &shmsegs[rval]; |
| 170 | /* XXX: probably not the right thing to do */ |
| 171 | if (shp->shm_perm.mode & SHM_DEST) |
| 172 | return (EBUSY); |
| 173 | if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred)) |
| 174 | return (error); |
| 175 | if (uap->size && uap->size > shp->shm_segsz) |
| 176 | return (EINVAL); |
| 177 | if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL)) |
| 178 | return (EEXIST); |
| 179 | } |
| 180 | *retval = shp->shm_perm.seq * SHMMMNI + rval; |
| 181 | return (0); |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Shared memory control |
| 186 | */ |
| 187 | /* ARGSUSED */ |
| 188 | shmctl(p, uap, retval) |
| 189 | struct proc *p; |
| 190 | register struct args { |
| 191 | int shmid; |
| 192 | int cmd; |
| 193 | caddr_t buf; |
| 194 | } *uap; |
| 195 | int *retval; |
| 196 | { |
| 197 | register struct shmid_ds *shp; |
| 198 | register struct ucred *cred = p->p_ucred; |
| 199 | struct shmid_ds sbuf; |
| 200 | int error; |
| 201 | |
| 202 | if (error = shmvalid(uap->shmid)) |
| 203 | return (error); |
| 204 | shp = &shmsegs[uap->shmid % SHMMMNI]; |
| 205 | switch (uap->cmd) { |
| 206 | case IPC_STAT: |
| 207 | if (error = ipcaccess(&shp->shm_perm, IPC_R, cred)) |
| 208 | return (error); |
| 209 | return (copyout((caddr_t)shp, uap->buf, sizeof(*shp))); |
| 210 | |
| 211 | case IPC_SET: |
| 212 | if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && |
| 213 | cred->cr_uid != shp->shm_perm.cuid) |
| 214 | return (EPERM); |
| 215 | if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf)) |
| 216 | return (error); |
| 217 | shp->shm_perm.uid = sbuf.shm_perm.uid; |
| 218 | shp->shm_perm.gid = sbuf.shm_perm.gid; |
| 219 | shp->shm_perm.mode = (shp->shm_perm.mode & ~0777) |
| 220 | | (sbuf.shm_perm.mode & 0777); |
| 221 | shp->shm_ctime = time.tv_sec; |
| 222 | break; |
| 223 | |
| 224 | case IPC_RMID: |
| 225 | if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && |
| 226 | cred->cr_uid != shp->shm_perm.cuid) |
| 227 | return (EPERM); |
| 228 | /* set ctime? */ |
| 229 | shp->shm_perm.key = IPC_PRIVATE; |
| 230 | shp->shm_perm.mode |= SHM_DEST; |
| 231 | if (shp->shm_nattch <= 0) |
| 232 | shmfree(shp); |
| 233 | break; |
| 234 | |
| 235 | default: |
| 236 | return (EINVAL); |
| 237 | } |
| 238 | return (0); |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * Attach to shared memory segment. |
| 243 | */ |
| 244 | shmat(p, uap, retval) |
| 245 | struct proc *p; |
| 246 | register struct args { |
| 247 | int shmid; |
| 248 | caddr_t shmaddr; |
| 249 | int shmflg; |
| 250 | } *uap; |
| 251 | int *retval; |
| 252 | { |
| 253 | register struct shmid_ds *shp; |
| 254 | register int size; |
| 255 | caddr_t uva; |
| 256 | int error; |
| 257 | int flags; |
| 258 | vm_prot_t prot; |
| 259 | struct shmdesc *shmd; |
| 260 | |
| 261 | /* |
| 262 | * Allocate descriptors now (before validity check) |
| 263 | * in case malloc() blocks. |
| 264 | */ |
| 265 | shmd = (struct shmdesc *)p->p_vmspace->vm_shm; |
| 266 | size = shminfo.shmseg * sizeof(struct shmdesc); |
| 267 | if (shmd == NULL) { |
| 268 | shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK); |
| 269 | bzero((caddr_t)shmd, size); |
| 270 | p->p_vmspace->vm_shm = (caddr_t)shmd; |
| 271 | } |
| 272 | if (error = shmvalid(uap->shmid)) |
| 273 | return (error); |
| 274 | shp = &shmsegs[uap->shmid % SHMMMNI]; |
| 275 | if (shp->shm_handle == NULL) |
| 276 | panic("shmat NULL handle"); |
| 277 | if (error = ipcaccess(&shp->shm_perm, |
| 278 | (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred)) |
| 279 | return (error); |
| 280 | uva = uap->shmaddr; |
| 281 | if (uva && ((int)uva & (SHMLBA-1))) { |
| 282 | if (uap->shmflg & SHM_RND) |
| 283 | uva = (caddr_t) ((int)uva & ~(SHMLBA-1)); |
| 284 | else |
| 285 | return (EINVAL); |
| 286 | } |
| 287 | /* |
| 288 | * Make sure user doesn't use more than their fair share |
| 289 | */ |
| 290 | for (size = 0; size < shminfo.shmseg; size++) { |
| 291 | if (shmd->shmd_uva == 0) |
| 292 | break; |
| 293 | shmd++; |
| 294 | } |
| 295 | if (size >= shminfo.shmseg) |
| 296 | return (EMFILE); |
| 297 | size = ctob(clrnd(btoc(shp->shm_segsz))); |
| 298 | prot = VM_PROT_READ; |
| 299 | if ((uap->shmflg & SHM_RDONLY) == 0) |
| 300 | prot |= VM_PROT_WRITE; |
| 301 | flags = MAP_ANON|MAP_SHARED; |
| 302 | if (uva) |
| 303 | flags |= MAP_FIXED; |
| 304 | else |
| 305 | uva = (caddr_t)0x1000000; /* XXX */ |
| 306 | error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva, |
| 307 | (vm_size_t)size, prot, flags, |
| 308 | ((struct shmhandle *)shp->shm_handle)->shmh_id, 0); |
| 309 | if (error) |
| 310 | return(error); |
| 311 | shmd->shmd_uva = (vm_offset_t)uva; |
| 312 | shmd->shmd_id = uap->shmid; |
| 313 | /* |
| 314 | * Fill in the remaining fields |
| 315 | */ |
| 316 | shp->shm_lpid = p->p_pid; |
| 317 | shp->shm_atime = time.tv_sec; |
| 318 | shp->shm_nattch++; |
| 319 | *retval = (int) uva; |
| 320 | return (0); |
| 321 | } |
| 322 | |
| 323 | /* |
| 324 | * Detach from shared memory segment. |
| 325 | */ |
| 326 | /* ARGSUSED */ |
| 327 | shmdt(p, uap, retval) |
| 328 | struct proc *p; |
| 329 | struct args { |
| 330 | caddr_t shmaddr; |
| 331 | } *uap; |
| 332 | int *retval; |
| 333 | { |
| 334 | register struct shmdesc *shmd; |
| 335 | register int i; |
| 336 | |
| 337 | shmd = (struct shmdesc *)p->p_vmspace->vm_shm; |
| 338 | for (i = 0; i < shminfo.shmseg; i++, shmd++) |
| 339 | if (shmd->shmd_uva && |
| 340 | shmd->shmd_uva == (vm_offset_t)uap->shmaddr) |
| 341 | break; |
| 342 | if (i == shminfo.shmseg) |
| 343 | return(EINVAL); |
| 344 | shmufree(p, shmd); |
| 345 | shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid; |
| 346 | } |
| 347 | |
| 348 | shmfork(p1, p2, isvfork) |
| 349 | struct proc *p1, *p2; |
| 350 | int isvfork; |
| 351 | { |
| 352 | register struct shmdesc *shmd; |
| 353 | register int size; |
| 354 | |
| 355 | /* |
| 356 | * Copy parents descriptive information |
| 357 | */ |
| 358 | size = shminfo.shmseg * sizeof(struct shmdesc); |
| 359 | shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK); |
| 360 | bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size); |
| 361 | p2->p_vmspace->vm_shm = (caddr_t)shmd; |
| 362 | /* |
| 363 | * Increment reference counts |
| 364 | */ |
| 365 | for (size = 0; size < shminfo.shmseg; size++, shmd++) |
| 366 | if (shmd->shmd_uva) |
| 367 | shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++; |
| 368 | } |
| 369 | |
| 370 | shmexit(p) |
| 371 | struct proc *p; |
| 372 | { |
| 373 | register struct shmdesc *shmd; |
| 374 | register int i; |
| 375 | |
| 376 | shmd = (struct shmdesc *)p->p_vmspace->vm_shm; |
| 377 | for (i = 0; i < shminfo.shmseg; i++, shmd++) |
| 378 | if (shmd->shmd_uva) |
| 379 | shmufree(p, shmd); |
| 380 | free((caddr_t)p->p_vmspace->vm_shm, M_SHM); |
| 381 | p->p_vmspace->vm_shm = NULL; |
| 382 | } |
| 383 | |
| 384 | shmvalid(id) |
| 385 | register int id; |
| 386 | { |
| 387 | register struct shmid_ds *shp; |
| 388 | |
| 389 | if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni) |
| 390 | return(EINVAL); |
| 391 | shp = &shmsegs[id % SHMMMNI]; |
| 392 | if (shp->shm_perm.seq == (id / SHMMMNI) && |
| 393 | (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC) |
| 394 | return(0); |
| 395 | return(EINVAL); |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * Free user resources associated with a shared memory segment |
| 400 | */ |
| 401 | shmufree(p, shmd) |
| 402 | struct proc *p; |
| 403 | struct shmdesc *shmd; |
| 404 | { |
| 405 | register struct shmid_ds *shp; |
| 406 | |
| 407 | shp = &shmsegs[shmd->shmd_id % SHMMMNI]; |
| 408 | (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva, |
| 409 | ctob(clrnd(btoc(shp->shm_segsz)))); |
| 410 | shmd->shmd_id = 0; |
| 411 | shmd->shmd_uva = 0; |
| 412 | shp->shm_dtime = time.tv_sec; |
| 413 | if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST)) |
| 414 | shmfree(shp); |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Deallocate resources associated with a shared memory segment |
| 419 | */ |
| 420 | shmfree(shp) |
| 421 | register struct shmid_ds *shp; |
| 422 | { |
| 423 | |
| 424 | if (shp->shm_handle == NULL) |
| 425 | panic("shmfree"); |
| 426 | /* |
| 427 | * Lose our lingering object reference by deallocating space |
| 428 | * in kernel. Pager will also be deallocated as a side-effect. |
| 429 | */ |
| 430 | vm_deallocate(shm_map, |
| 431 | ((struct shmhandle *)shp->shm_handle)->shmh_kva, |
| 432 | ctob(clrnd(btoc(shp->shm_segsz)))); |
| 433 | free((caddr_t)shp->shm_handle, M_SHM); |
| 434 | shp->shm_handle = NULL; |
| 435 | shmtot -= clrnd(btoc(shp->shm_segsz)); |
| 436 | shp->shm_perm.mode = 0; |
| 437 | /* |
| 438 | * Increment the sequence number to ensure that outstanding |
| 439 | * shmids for this segment will be invalid in the event that |
| 440 | * the segment is reallocated. Note that shmids must be |
| 441 | * positive as decreed by SVID. |
| 442 | */ |
| 443 | shp->shm_perm.seq++; |
| 444 | if ((int)(shp->shm_perm.seq * SHMMMNI) < 0) |
| 445 | shp->shm_perm.seq = 0; |
| 446 | } |
| 447 | |
| 448 | /* |
| 449 | * XXX This routine would be common to all sysV style IPC |
| 450 | * (if the others were implemented). |
| 451 | */ |
| 452 | ipcaccess(ipc, mode, cred) |
| 453 | register struct ipc_perm *ipc; |
| 454 | int mode; |
| 455 | register struct ucred *cred; |
| 456 | { |
| 457 | register int m; |
| 458 | |
| 459 | if (cred->cr_uid == 0) |
| 460 | return(0); |
| 461 | /* |
| 462 | * Access check is based on only one of owner, group, public. |
| 463 | * If not owner, then check group. |
| 464 | * If not a member of the group, then check public access. |
| 465 | */ |
| 466 | mode &= 0700; |
| 467 | m = ipc->mode; |
| 468 | if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) { |
| 469 | m <<= 3; |
| 470 | if (!groupmember(ipc->gid, cred) && |
| 471 | !groupmember(ipc->cgid, cred)) |
| 472 | m <<= 3; |
| 473 | } |
| 474 | if ((mode&m) == mode) |
| 475 | return (0); |
| 476 | return (EACCES); |
| 477 | } |
| 478 | #endif /* SYSVSHM */ |