Commit | Line | Data |
---|---|---|
6f843dc9 KM |
1 | /* |
2 | * Copyright (c) 1988 University of Utah. | |
3 | * Copyright (c) 1990 The Regents of the University of California. | |
4 | * All rights reserved. | |
5 | * | |
6 | * This code is derived from software contributed to Berkeley by | |
7 | * the Systems Programming Group of the University of Utah Computer | |
8 | * Science Department. Originally from University of Wisconsin. | |
9 | * | |
10 | * %sccs.include.redist.c% | |
11 | * | |
12 | * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$ | |
13 | * | |
e4f7cdf5 | 14 | * @(#)sysv_shm.c 7.8 (Berkeley) %G% |
6f843dc9 KM |
15 | */ |
16 | ||
17 | /* | |
18 | * System V shared memory routines. | |
e4f7cdf5 MK |
19 | * TEMPORARY, until mmap is in place; |
20 | * needed now for HP-UX compatibility and X server (yech!). | |
6f843dc9 KM |
21 | */ |
22 | ||
23 | #ifdef SYSVSHM | |
24 | ||
25 | #include "machine/pte.h" | |
26 | ||
27 | #include "param.h" | |
28 | #include "systm.h" | |
161c14a2 | 29 | #include "syscontext.h" |
6f843dc9 KM |
30 | #include "kernel.h" |
31 | #include "proc.h" | |
32 | #include "vm.h" | |
33 | #include "shm.h" | |
34 | #include "mapmem.h" | |
35 | #include "malloc.h" | |
36 | ||
37 | #ifdef HPUXCOMPAT | |
38 | #include "../hpux/hpux.h" | |
39 | #endif | |
40 | ||
41 | int shmat(), shmctl(), shmdt(), shmget(); | |
42 | int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; | |
43 | int shmtot = 0; | |
44 | ||
45 | int shmfork(), shmexit(); | |
46 | struct mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit }; | |
47 | ||
48 | shminit() | |
49 | { | |
50 | register int i; | |
51 | ||
52 | if (shminfo.shmmni > SHMMMNI) | |
53 | shminfo.shmmni = SHMMMNI; | |
54 | for (i = 0; i < shminfo.shmmni; i++) { | |
55 | shmsegs[i].shm_perm.mode = 0; | |
56 | shmsegs[i].shm_perm.seq = 0; | |
57 | } | |
58 | } | |
59 | ||
161c14a2 KM |
60 | /* |
61 | * Entry point for all SHM calls | |
62 | */ | |
63 | shmsys(p, uap, retval) | |
64 | struct proc *p; | |
65 | struct args { | |
e4f7cdf5 | 66 | u_int which; |
161c14a2 KM |
67 | } *uap; |
68 | int *retval; | |
69 | { | |
6f843dc9 | 70 | |
161c14a2 KM |
71 | if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) |
72 | RETURN (EINVAL); | |
73 | RETURN ((*shmcalls[uap->which])(p, &uap[1], retval)); | |
6f843dc9 KM |
74 | } |
75 | ||
161c14a2 KM |
76 | /* |
77 | * Get a shared memory segment | |
78 | */ | |
79 | shmget(p, uap, retval) | |
80 | struct proc *p; | |
81 | register struct args { | |
6f843dc9 KM |
82 | key_t key; |
83 | int size; | |
84 | int shmflg; | |
161c14a2 KM |
85 | } *uap; |
86 | int *retval; | |
87 | { | |
6f843dc9 | 88 | register struct shmid_ds *shp; |
161c14a2 | 89 | register struct ucred *cred = u.u_cred; |
6f843dc9 | 90 | register int i; |
161c14a2 | 91 | int error, size, rval = 0; |
6f843dc9 KM |
92 | caddr_t kva; |
93 | ||
94 | /* look up the specified shm_id */ | |
95 | if (uap->key != IPC_PRIVATE) { | |
96 | for (i = 0; i < shminfo.shmmni; i++) | |
97 | if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) && | |
98 | shmsegs[i].shm_perm.key == uap->key) { | |
99 | rval = i; | |
100 | break; | |
101 | } | |
102 | } else | |
103 | i = shminfo.shmmni; | |
104 | ||
105 | /* create a new shared segment if necessary */ | |
106 | if (i == shminfo.shmmni) { | |
161c14a2 KM |
107 | if ((uap->shmflg & IPC_CREAT) == 0) |
108 | return (ENOENT); | |
109 | if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) | |
110 | return (EINVAL); | |
6f843dc9 KM |
111 | for (i = 0; i < shminfo.shmmni; i++) |
112 | if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) { | |
113 | rval = i; | |
114 | break; | |
115 | } | |
161c14a2 KM |
116 | if (i == shminfo.shmmni) |
117 | return (ENOSPC); | |
6f843dc9 | 118 | size = clrnd(btoc(uap->size)); |
161c14a2 KM |
119 | if (shmtot + size > shminfo.shmall) |
120 | return (ENOMEM); | |
6f843dc9 KM |
121 | shp = &shmsegs[rval]; |
122 | /* | |
123 | * We need to do a couple of things to ensure consistency | |
124 | * in case we sleep in malloc(). We mark segment as | |
125 | * allocated so that other shmgets() will not allocate it. | |
126 | * We mark it as "destroyed" to insure that shmvalid() is | |
127 | * false making most operations fail (XXX). We set the key, | |
128 | * so that other shmget()s will fail. | |
129 | */ | |
130 | shp->shm_perm.mode = SHM_ALLOC | SHM_DEST; | |
131 | shp->shm_perm.key = uap->key; | |
132 | kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK); | |
133 | if (kva == NULL) { | |
134 | shp->shm_perm.mode = 0; | |
161c14a2 | 135 | return (ENOMEM); |
6f843dc9 KM |
136 | } |
137 | if (!claligned(kva)) | |
138 | panic("shmget: non-aligned memory"); | |
139 | bzero(kva, (u_int)ctob(size)); | |
140 | shmtot += size; | |
161c14a2 KM |
141 | shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid; |
142 | shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid; | |
6f843dc9 KM |
143 | shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777); |
144 | shp->shm_handle = (void *) kvtopte(kva); | |
145 | shp->shm_segsz = uap->size; | |
c9714ae3 | 146 | shp->shm_cpid = p->p_pid; |
6f843dc9 KM |
147 | shp->shm_lpid = shp->shm_nattch = 0; |
148 | shp->shm_atime = shp->shm_dtime = 0; | |
149 | shp->shm_ctime = time.tv_sec; | |
150 | } else { | |
151 | shp = &shmsegs[rval]; | |
152 | /* XXX: probably not the right thing to do */ | |
161c14a2 KM |
153 | if (shp->shm_perm.mode & SHM_DEST) |
154 | return (EBUSY); | |
015c074c | 155 | if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred)) |
161c14a2 KM |
156 | return (error); |
157 | if (uap->size && uap->size > shp->shm_segsz) | |
158 | return (EINVAL); | |
159 | if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL)) | |
160 | return (EEXIST); | |
6f843dc9 | 161 | } |
161c14a2 | 162 | *retval = shp->shm_perm.seq * SHMMMNI + rval; |
015c074c | 163 | return (0); |
6f843dc9 KM |
164 | } |
165 | ||
161c14a2 KM |
166 | /* |
167 | * Shared memory control | |
168 | */ | |
169 | /* ARGSUSED */ | |
170 | shmctl(p, uap, retval) | |
171 | struct proc *p; | |
172 | register struct args { | |
6f843dc9 KM |
173 | int shmid; |
174 | int cmd; | |
175 | caddr_t buf; | |
161c14a2 KM |
176 | } *uap; |
177 | int *retval; | |
178 | { | |
6f843dc9 | 179 | register struct shmid_ds *shp; |
161c14a2 | 180 | register struct ucred *cred = u.u_cred; |
6f843dc9 | 181 | struct shmid_ds sbuf; |
161c14a2 | 182 | int error; |
6f843dc9 | 183 | |
161c14a2 KM |
184 | if (error = shmvalid(uap->shmid)) |
185 | return (error); | |
6f843dc9 KM |
186 | shp = &shmsegs[uap->shmid % SHMMMNI]; |
187 | switch (uap->cmd) { | |
188 | case IPC_STAT: | |
015c074c | 189 | if (error = ipcaccess(&shp->shm_perm, IPC_R, cred)) |
161c14a2 KM |
190 | return (error); |
191 | return (copyout((caddr_t)shp, uap->buf, sizeof(*shp))); | |
6f843dc9 KM |
192 | |
193 | case IPC_SET: | |
161c14a2 KM |
194 | if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && |
195 | cred->cr_uid != shp->shm_perm.cuid) | |
196 | return (EPERM); | |
197 | if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf)) | |
198 | return (error); | |
199 | shp->shm_perm.uid = sbuf.shm_perm.uid; | |
200 | shp->shm_perm.gid = sbuf.shm_perm.gid; | |
201 | shp->shm_perm.mode = (shp->shm_perm.mode & ~0777) | |
202 | | (sbuf.shm_perm.mode & 0777); | |
203 | shp->shm_ctime = time.tv_sec; | |
6f843dc9 KM |
204 | break; |
205 | ||
206 | case IPC_RMID: | |
161c14a2 KM |
207 | if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && |
208 | cred->cr_uid != shp->shm_perm.cuid) | |
209 | return (EPERM); | |
6f843dc9 KM |
210 | /* set ctime? */ |
211 | shp->shm_perm.key = IPC_PRIVATE; | |
212 | shp->shm_perm.mode |= SHM_DEST; | |
213 | if (shp->shm_nattch <= 0) | |
214 | shmfree(shp); | |
215 | break; | |
216 | ||
217 | #ifdef HPUXCOMPAT | |
218 | case SHM_LOCK: | |
219 | case SHM_UNLOCK: | |
220 | /* don't really do anything, but make them think we did */ | |
c9714ae3 | 221 | if ((p->p_flag & SHPUX) == 0) |
161c14a2 KM |
222 | return (EINVAL); |
223 | if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && | |
224 | cred->cr_uid != shp->shm_perm.cuid) | |
225 | return (EPERM); | |
6f843dc9 KM |
226 | break; |
227 | #endif | |
228 | ||
229 | default: | |
161c14a2 | 230 | return (EINVAL); |
6f843dc9 | 231 | } |
161c14a2 | 232 | return (0); |
6f843dc9 KM |
233 | } |
234 | ||
161c14a2 KM |
235 | /* |
236 | * Attach to shared memory segment. | |
237 | */ | |
238 | shmat(p, uap, retval) | |
239 | struct proc *p; | |
240 | register struct args { | |
6f843dc9 KM |
241 | int shmid; |
242 | caddr_t shmaddr; | |
243 | int shmflg; | |
161c14a2 KM |
244 | } *uap; |
245 | int *retval; | |
246 | { | |
6f843dc9 KM |
247 | register struct shmid_ds *shp; |
248 | register int size; | |
249 | struct mapmem *mp; | |
250 | caddr_t uva; | |
015c074c | 251 | int error, prot, shmmapin(); |
6f843dc9 | 252 | |
161c14a2 KM |
253 | if (error = shmvalid(uap->shmid)) |
254 | return (error); | |
6f843dc9 KM |
255 | shp = &shmsegs[uap->shmid % SHMMMNI]; |
256 | if (shp->shm_handle == NULL) | |
96ea38ce | 257 | panic("shmat NULL handle"); |
015c074c MH |
258 | if (error = ipcaccess(&shp->shm_perm, |
259 | (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, u.u_cred)) | |
161c14a2 | 260 | return (error); |
6f843dc9 KM |
261 | uva = uap->shmaddr; |
262 | if (uva && ((int)uva & (SHMLBA-1))) { | |
263 | if (uap->shmflg & SHM_RND) | |
264 | uva = (caddr_t) ((int)uva & ~(SHMLBA-1)); | |
161c14a2 KM |
265 | else |
266 | return (EINVAL); | |
6f843dc9 KM |
267 | } |
268 | /* | |
269 | * Make sure user doesn't use more than their fair share | |
270 | */ | |
271 | size = 0; | |
272 | for (mp = u.u_mmap; mp; mp = mp->mm_next) | |
273 | if (mp->mm_ops == &shmops) | |
274 | size++; | |
161c14a2 KM |
275 | if (size >= shminfo.shmseg) |
276 | return (EMFILE); | |
6f843dc9 KM |
277 | /* |
278 | * Allocate a mapped memory region descriptor and | |
279 | * attempt to expand the user page table to allow for region | |
280 | */ | |
281 | prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW; | |
282 | #if defined(hp300) | |
283 | prot |= MM_CI; | |
284 | #endif | |
285 | size = ctob(clrnd(btoc(shp->shm_segsz))); | |
c9714ae3 | 286 | error = mmalloc(p, uap->shmid, &uva, (segsz_t)size, prot, &shmops, &mp); |
161c14a2 KM |
287 | if (error) |
288 | return (error); | |
289 | if (error = mmmapin(p, mp, shmmapin)) { | |
015c074c | 290 | (void) mmfree(p, mp); |
161c14a2 | 291 | return (error); |
6f843dc9 KM |
292 | } |
293 | /* | |
294 | * Fill in the remaining fields | |
295 | */ | |
c9714ae3 | 296 | shp->shm_lpid = p->p_pid; |
6f843dc9 KM |
297 | shp->shm_atime = time.tv_sec; |
298 | shp->shm_nattch++; | |
161c14a2 | 299 | *retval = (int) uva; |
015c074c | 300 | return (0); |
6f843dc9 KM |
301 | } |
302 | ||
161c14a2 KM |
303 | /* |
304 | * Detach from shared memory segment. | |
305 | */ | |
306 | /* ARGSUSED */ | |
307 | shmdt(p, uap, retval) | |
308 | struct proc *p; | |
309 | struct args { | |
6f843dc9 | 310 | caddr_t shmaddr; |
161c14a2 KM |
311 | } *uap; |
312 | int *retval; | |
313 | { | |
6f843dc9 KM |
314 | register struct mapmem *mp; |
315 | ||
316 | for (mp = u.u_mmap; mp; mp = mp->mm_next) | |
317 | if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr) | |
318 | break; | |
161c14a2 KM |
319 | if (mp == MMNIL) |
320 | return (EINVAL); | |
c9714ae3 | 321 | shmsegs[mp->mm_id % SHMMMNI].shm_lpid = p->p_pid; |
161c14a2 | 322 | return (shmufree(p, mp)); |
6f843dc9 KM |
323 | } |
324 | ||
325 | shmmapin(mp, off) | |
326 | struct mapmem *mp; | |
327 | { | |
328 | register struct shmid_ds *shp; | |
329 | ||
330 | shp = &shmsegs[mp->mm_id % SHMMMNI]; | |
331 | if (off >= ctob(clrnd(btoc(shp->shm_segsz)))) | |
332 | return(-1); | |
333 | return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum); | |
334 | } | |
335 | ||
336 | /* | |
337 | * Increment attach count on fork | |
338 | */ | |
6be55866 | 339 | /* ARGSUSED */ |
6f843dc9 KM |
340 | shmfork(mp, ischild) |
341 | register struct mapmem *mp; | |
342 | { | |
343 | if (!ischild) | |
344 | shmsegs[mp->mm_id % SHMMMNI].shm_nattch++; | |
345 | } | |
346 | ||
347 | /* | |
348 | * Detach from shared memory segment on exit (or exec) | |
349 | */ | |
015c074c | 350 | shmexit(mp) |
6be55866 | 351 | struct mapmem *mp; |
6f843dc9 | 352 | { |
015c074c | 353 | struct proc *p = u.u_procp; /* XXX */ |
c9714ae3 | 354 | |
161c14a2 | 355 | return (shmufree(p, mp)); |
6f843dc9 KM |
356 | } |
357 | ||
358 | shmvalid(id) | |
359 | register int id; | |
360 | { | |
361 | register struct shmid_ds *shp; | |
362 | ||
363 | if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni) | |
161c14a2 | 364 | return(EINVAL); |
6f843dc9 KM |
365 | shp = &shmsegs[id % SHMMMNI]; |
366 | if (shp->shm_perm.seq == (id / SHMMMNI) && | |
367 | (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC) | |
161c14a2 KM |
368 | return(0); |
369 | return(EINVAL); | |
6f843dc9 KM |
370 | } |
371 | ||
372 | /* | |
373 | * Free user resources associated with a shared memory segment | |
374 | */ | |
c9714ae3 KM |
375 | shmufree(p, mp) |
376 | struct proc *p; | |
6f843dc9 KM |
377 | struct mapmem *mp; |
378 | { | |
379 | register struct shmid_ds *shp; | |
c9714ae3 | 380 | int error; |
6f843dc9 KM |
381 | |
382 | shp = &shmsegs[mp->mm_id % SHMMMNI]; | |
c9714ae3 KM |
383 | mmmapout(p, mp); |
384 | error = mmfree(p, mp); | |
6f843dc9 KM |
385 | shp->shm_dtime = time.tv_sec; |
386 | if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST)) | |
387 | shmfree(shp); | |
c9714ae3 | 388 | return (error); |
6f843dc9 KM |
389 | } |
390 | ||
391 | /* | |
392 | * Deallocate resources associated with a shared memory segment | |
393 | */ | |
394 | shmfree(shp) | |
395 | register struct shmid_ds *shp; | |
396 | { | |
397 | caddr_t kva; | |
398 | ||
399 | if (shp->shm_handle == NULL) | |
400 | panic("shmfree"); | |
401 | kva = (caddr_t) ptetokv(shp->shm_handle); | |
402 | free(kva, M_SHM); | |
403 | shp->shm_handle = NULL; | |
404 | shmtot -= clrnd(btoc(shp->shm_segsz)); | |
405 | shp->shm_perm.mode = 0; | |
406 | /* | |
407 | * Increment the sequence number to ensure that outstanding | |
408 | * shmids for this segment will be invalid in the event that | |
409 | * the segment is reallocated. Note that shmids must be | |
410 | * positive as decreed by SVID. | |
411 | */ | |
412 | shp->shm_perm.seq++; | |
413 | if ((int)(shp->shm_perm.seq * SHMMMNI) < 0) | |
414 | shp->shm_perm.seq = 0; | |
415 | } | |
416 | ||
417 | /* | |
418 | * XXX This routine would be common to all sysV style IPC | |
419 | * (if the others were implemented). | |
420 | */ | |
161c14a2 | 421 | ipcaccess(ipc, mode, cred) |
6f843dc9 | 422 | register struct ipc_perm *ipc; |
161c14a2 KM |
423 | int mode; |
424 | register struct ucred *cred; | |
6f843dc9 KM |
425 | { |
426 | register int m; | |
427 | ||
161c14a2 | 428 | if (cred->cr_uid == 0) |
6f843dc9 KM |
429 | return(0); |
430 | /* | |
431 | * Access check is based on only one of owner, group, public. | |
432 | * If not owner, then check group. | |
433 | * If not a member of the group, then check public access. | |
434 | */ | |
435 | mode &= 0700; | |
436 | m = ipc->mode; | |
161c14a2 | 437 | if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) { |
6f843dc9 | 438 | m <<= 3; |
161c14a2 KM |
439 | if (!groupmember(ipc->gid, cred) && |
440 | !groupmember(ipc->cgid, cred)) | |
6f843dc9 KM |
441 | m <<= 3; |
442 | } | |
443 | if ((mode&m) == mode) | |
161c14a2 KM |
444 | return (0); |
445 | return (EACCES); | |
6f843dc9 KM |
446 | } |
447 | ||
448 | #endif /* SYSVSHM */ |