1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.21 2008/01/06 16:55:51 swildner Exp $ */
3 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
6 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Adam Glass and Charles
20 * 4. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include "opt_compat.h"
36 #include "opt_sysvipc.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
48 #include <sys/sysent.h>
51 #include <sys/mplock2.h>
54 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
62 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
65 static int sys_oshmctl (struct proc *p, struct oshmctl_args *uap);
67 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
68 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
70 /* XXX casting to (sy_call_t *) is bogus, as usual. */
71 static sy_call_t *shmcalls[] = {
72 (sy_call_t *)sys_shmat, (sy_call_t *)sys_oshmctl,
73 (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget,
74 (sy_call_t *)sys_shmctl
77 #define SHMSEG_FREE 0x0200
78 #define SHMSEG_REMOVED 0x0400
79 #define SHMSEG_ALLOCATED 0x0800
80 #define SHMSEG_WANTED 0x1000
82 static int shm_last_free, shm_committed, shmalloced;
84 static struct shmid_ds *shmsegs;
87 /* vm_offset_t kva; */
88 vm_object_t shm_object;
96 static void shm_deallocate_segment (struct shmid_ds *);
97 static int shm_find_segment_by_key (key_t);
98 static struct shmid_ds *shm_find_segment_by_shmid (int);
99 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
100 static void shmrealloc (void);
101 static void shminit (void *);
107 #define SHMMAXPGS 8192 /* note: sysv shared memory is swap backed */
110 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
122 #define SHMALL (SHMMAXPGS)
125 struct shminfo shminfo = {
133 static int shm_use_phys;
135 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin);
136 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni);
137 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg);
138 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall);
139 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, "");
145 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
146 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, "");
149 shm_find_segment_by_key(key_t key)
153 for (i = 0; i < shmalloced; i++)
154 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
155 shmsegs[i].shm_perm.key == key)
160 static struct shmid_ds *
161 shm_find_segment_by_shmid(int shmid)
164 struct shmid_ds *shmseg;
166 segnum = IPCID_TO_IX(shmid);
167 if (segnum < 0 || segnum >= shmalloced)
169 shmseg = &shmsegs[segnum];
170 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
171 != SHMSEG_ALLOCATED ||
172 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
178 shm_deallocate_segment(struct shmid_ds *shmseg)
180 struct shm_handle *shm_handle;
183 shm_handle = shmseg->shm_internal;
184 vm_object_deallocate(shm_handle->shm_object);
185 kfree((caddr_t)shm_handle, M_SHM);
186 shmseg->shm_internal = NULL;
187 size = round_page(shmseg->shm_segsz);
188 shm_committed -= btoc(size);
190 shmseg->shm_perm.mode = SHMSEG_FREE;
194 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
196 struct shmid_ds *shmseg;
200 segnum = IPCID_TO_IX(shmmap_s->shmid);
201 shmseg = &shmsegs[segnum];
202 size = round_page(shmseg->shm_segsz);
203 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
204 if (result != KERN_SUCCESS)
206 shmmap_s->shmid = -1;
207 shmseg->shm_dtime = time_second;
208 if ((--shmseg->shm_nattch <= 0) &&
209 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
210 shm_deallocate_segment(shmseg);
211 shm_last_free = segnum;
220 sys_shmdt(struct shmdt_args *uap)
222 struct thread *td = curthread;
223 struct proc *p = td->td_proc;
224 struct shmmap_state *shmmap_s;
228 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
232 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
233 if (shmmap_s == NULL) {
237 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
238 if (shmmap_s->shmid != -1 &&
239 shmmap_s->va == (vm_offset_t)uap->shmaddr)
242 if (i == shminfo.shmseg)
245 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
255 sys_shmat(struct shmat_args *uap)
257 struct thread *td = curthread;
258 struct proc *p = td->td_proc;
260 struct shmid_ds *shmseg;
261 struct shmmap_state *shmmap_s = NULL;
262 struct shm_handle *shm_handle;
263 vm_offset_t attach_va;
268 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
273 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
274 if (shmmap_s == NULL) {
275 size = shminfo.shmseg * sizeof(struct shmmap_state);
276 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
277 for (i = 0; i < shminfo.shmseg; i++)
278 shmmap_s[i].shmid = -1;
279 if (p->p_vmspace->vm_shm != NULL) {
280 kfree(shmmap_s, M_SHM);
283 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
285 shmseg = shm_find_segment_by_shmid(uap->shmid);
286 if (shmseg == NULL) {
290 error = ipcperm(p, &shmseg->shm_perm,
291 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
294 for (i = 0; i < shminfo.shmseg; i++) {
295 if (shmmap_s->shmid == -1)
299 if (i >= shminfo.shmseg) {
303 size = round_page(shmseg->shm_segsz);
304 #ifdef VM_PROT_READ_IS_EXEC
305 prot = VM_PROT_READ | VM_PROT_EXECUTE;
309 if ((uap->shmflg & SHM_RDONLY) == 0)
310 prot |= VM_PROT_WRITE;
311 flags = MAP_ANON | MAP_SHARED;
314 if (uap->shmflg & SHM_RND) {
315 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
316 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
317 attach_va = (vm_offset_t)uap->shmaddr;
324 * This is just a hint to vm_map_find() about where to put it.
326 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz);
329 shm_handle = shmseg->shm_internal;
330 vm_object_reference(shm_handle->shm_object);
331 rv = vm_map_find(&p->p_vmspace->vm_map,
332 shm_handle->shm_object, 0,
335 ((flags & MAP_FIXED) ? 0 : 1),
339 if (rv != KERN_SUCCESS) {
340 vm_object_deallocate(shm_handle->shm_object);
344 vm_map_inherit(&p->p_vmspace->vm_map,
345 attach_va, attach_va + size, VM_INHERIT_SHARE);
347 KKASSERT(shmmap_s->shmid == -1);
348 shmmap_s->va = attach_va;
349 shmmap_s->shmid = uap->shmid;
350 shmseg->shm_lpid = p->p_pid;
351 shmseg->shm_atime = time_second;
352 shmseg->shm_nattch++;
353 uap->sysmsg_resultp = (void *)attach_va;
361 struct ipc_perm shm_perm; /* operation perms */
362 int shm_segsz; /* size of segment (bytes) */
363 ushort shm_cpid; /* pid, creator */
364 ushort shm_lpid; /* pid, last operation */
365 short shm_nattch; /* no. of current attaches */
366 time_t shm_atime; /* last attach time */
367 time_t shm_dtime; /* last detach time */
368 time_t shm_ctime; /* last change time */
369 void *shm_handle; /* internal handle for shm segment */
372 struct oshmctl_args {
373 struct sysmsg sysmsg;
376 struct oshmid_ds *ubuf;
383 sys_oshmctl(struct proc *p, struct oshmctl_args *uap)
386 struct thread *td = curthread;
387 struct shmid_ds *shmseg;
388 struct oshmid_ds outbuf;
391 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
395 shmseg = shm_find_segment_by_shmid(uap->shmid);
396 if (shmseg == NULL) {
403 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
406 outbuf.shm_perm = shmseg->shm_perm;
407 outbuf.shm_segsz = shmseg->shm_segsz;
408 outbuf.shm_cpid = shmseg->shm_cpid;
409 outbuf.shm_lpid = shmseg->shm_lpid;
410 outbuf.shm_nattch = shmseg->shm_nattch;
411 outbuf.shm_atime = shmseg->shm_atime;
412 outbuf.shm_dtime = shmseg->shm_dtime;
413 outbuf.shm_ctime = shmseg->shm_ctime;
414 outbuf.shm_handle = shmseg->shm_internal;
415 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
418 /* XXX casting to (sy_call_t *) is bogus, as usual. */
419 error = sys_shmctl((struct shmctl_args *)uap);
433 sys_shmctl(struct shmctl_args *uap)
435 struct thread *td = curthread;
436 struct proc *p = td->td_proc;
438 struct shmid_ds inbuf;
439 struct shmid_ds *shmseg;
441 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
445 shmseg = shm_find_segment_by_shmid(uap->shmid);
446 if (shmseg == NULL) {
453 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
455 error = copyout(shmseg, uap->buf, sizeof(inbuf));
458 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
460 error = copyin(uap->buf, &inbuf, sizeof(inbuf));
462 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
463 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
464 shmseg->shm_perm.mode =
465 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
466 (inbuf.shm_perm.mode & ACCESSPERMS);
467 shmseg->shm_ctime = time_second;
471 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
473 shmseg->shm_perm.key = IPC_PRIVATE;
474 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
475 if (shmseg->shm_nattch <= 0) {
476 shm_deallocate_segment(shmseg);
477 shm_last_free = IPCID_TO_IX(uap->shmid);
495 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
497 struct shmid_ds *shmseg;
500 shmseg = &shmsegs[segnum];
501 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
503 * This segment is in the process of being allocated. Wait
504 * until it's done, and look the key up again (in case the
505 * allocation failed or it was freed).
507 shmseg->shm_perm.mode |= SHMSEG_WANTED;
508 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
513 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
515 error = ipcperm(p, &shmseg->shm_perm, mode);
518 if (uap->size && uap->size > shmseg->shm_segsz)
520 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
525 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
527 int i, segnum, shmid, size;
528 struct ucred *cred = p->p_ucred;
529 struct shmid_ds *shmseg;
530 struct shm_handle *shm_handle;
532 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
534 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
536 size = round_page(uap->size);
537 if (shm_committed + btoc(size) > shminfo.shmall)
539 if (shm_last_free < 0) {
540 shmrealloc(); /* maybe expand the shmsegs[] array */
541 for (i = 0; i < shmalloced; i++)
542 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
548 segnum = shm_last_free;
551 shmseg = &shmsegs[segnum];
553 * In case we sleep in malloc(), mark the segment present but deleted
554 * so that noone else tries to create the same key.
556 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
557 shmseg->shm_perm.key = uap->key;
558 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
559 shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
560 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
563 * We make sure that we have allocated a pager before we need
567 shm_handle->shm_object =
568 phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
570 shm_handle->shm_object =
571 swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
573 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
574 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
576 shmseg->shm_internal = shm_handle;
577 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
578 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
579 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
580 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
581 shmseg->shm_segsz = uap->size;
582 shmseg->shm_cpid = p->p_pid;
583 shmseg->shm_lpid = shmseg->shm_nattch = 0;
584 shmseg->shm_atime = shmseg->shm_dtime = 0;
585 shmseg->shm_ctime = time_second;
586 shm_committed += btoc(size);
588 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
590 * Somebody else wanted this key while we were asleep. Wake
593 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
594 wakeup((caddr_t)shmseg);
596 uap->sysmsg_result = shmid;
604 sys_shmget(struct shmget_args *uap)
606 struct thread *td = curthread;
607 struct proc *p = td->td_proc;
608 int segnum, mode, error;
610 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
613 mode = uap->shmflg & ACCESSPERMS;
616 if (uap->key != IPC_PRIVATE) {
618 segnum = shm_find_segment_by_key(uap->key);
620 error = shmget_existing(p, uap, mode, segnum);
625 if ((uap->shmflg & IPC_CREAT) == 0) {
630 error = shmget_allocate_segment(p, uap, mode);
637 * shmsys_args(int which, int a2, ...) (VARARGS)
642 sys_shmsys(struct shmsys_args *uap)
644 struct thread *td = curthread;
645 unsigned int which = (unsigned int)uap->which;
648 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
651 if (which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
654 bcopy(&uap->a2, &uap->which,
655 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2));
656 error = ((*shmcalls[which])(uap));
663 shmfork(struct proc *p1, struct proc *p2)
665 struct shmmap_state *shmmap_s;
669 size = shminfo.shmseg * sizeof(struct shmmap_state);
670 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
671 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
672 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
673 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
674 if (shmmap_s->shmid != -1)
675 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
679 shmexit(struct vmspace *vm)
681 struct shmmap_state *base, *shm;
684 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
686 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
687 if (shm->shmid != -1)
688 shm_delete_mapping(vm, shm);
698 struct shmid_ds *newsegs;
700 if (shmalloced >= shminfo.shmmni)
703 newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
704 for (i = 0; i < shmalloced; i++)
705 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
706 for (; i < shminfo.shmmni; i++) {
707 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
708 shmsegs[i].shm_perm.seq = 0;
710 kfree(shmsegs, M_SHM);
712 shmalloced = shminfo.shmmni;
720 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
721 shmalloced = shminfo.shmmni;
722 shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
723 for (i = 0; i < shmalloced; i++) {
724 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
725 shmsegs[i].shm_perm.seq = 0;
731 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);