Merge branch 'vendor/FILE'
[dragonfly.git] / sys / kern / sysv_shm.c
1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */
2 /*      $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $      */
3
4 /*
5  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by Adam Glass and Charles
18  *      Hannum.
19  * 4. The names of the authors may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "opt_compat.h"
35 #include "opt_sysvipc.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysproto.h>
40 #include <sys/kernel.h>
41 #include <sys/sysctl.h>
42 #include <sys/shm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/mman.h>
46 #include <sys/stat.h>
47 #include <sys/sysent.h>
48 #include <sys/jail.h>
49
50 #include <sys/mplock2.h>
51
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <sys/lock.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60
61 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
62
63 struct oshmctl_args;
64 static int sys_oshmctl (struct proc *p, struct oshmctl_args *uap);
65
66 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
67 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
68
69 /* XXX casting to (sy_call_t *) is bogus, as usual. */
70 static sy_call_t *shmcalls[] = {
71         (sy_call_t *)sys_shmat, (sy_call_t *)sys_oshmctl,
72         (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget,
73         (sy_call_t *)sys_shmctl
74 };
75
76 #define SHMSEG_FREE             0x0200
77 #define SHMSEG_REMOVED          0x0400
78 #define SHMSEG_ALLOCATED        0x0800
79 #define SHMSEG_WANTED           0x1000
80
81 static int shm_last_free, shm_committed, shmalloced;
82 int shm_nused;
83 static struct shmid_ds  *shmsegs;
84
85 struct shm_handle {
86         /* vm_offset_t kva; */
87         vm_object_t shm_object;
88 };
89
90 struct shmmap_state {
91         vm_offset_t va;
92         int shmid;
93 };
94
95 static void shm_deallocate_segment (struct shmid_ds *);
96 static int shm_find_segment_by_key (key_t);
97 static struct shmid_ds *shm_find_segment_by_shmid (int);
98 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
99 static void shmrealloc (void);
100 static void shminit (void *);
101
102 /*
103  * Tuneable values
104  */
105 #ifndef SHMMAXPGS
106 #define SHMMAXPGS       8192    /* note: sysv shared memory is swap backed */
107 #endif
108 #ifndef SHMMAX
109 #define SHMMAX  (SHMMAXPGS*PAGE_SIZE)
110 #endif
111 #ifndef SHMMIN
112 #define SHMMIN  1
113 #endif
114 #ifndef SHMMNI
115 #define SHMMNI  192
116 #endif
117 #ifndef SHMSEG
118 #define SHMSEG  128
119 #endif
120 #ifndef SHMALL
121 #define SHMALL  (SHMMAXPGS)
122 #endif
123
124 struct  shminfo shminfo = {
125         SHMMAX,
126         SHMMIN,
127         SHMMNI,
128         SHMSEG,
129         SHMALL
130 };
131
132 static int shm_use_phys;
133
134 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin);
135 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni);
136 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg);
137 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall);
138 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
139
140 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
141     "Max shared memory segment size");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
143     "Min shared memory segment size");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0,
145     "Max number of shared memory identifiers");
146 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0,
147     "Max shared memory segments per process");
148 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
149     "Max pages of shared memory");
150 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0,
151     "Use phys pager allocation instead of swap pager allocation");
152
153 static int
154 shm_find_segment_by_key(key_t key)
155 {
156         int i;
157
158         for (i = 0; i < shmalloced; i++)
159                 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
160                     shmsegs[i].shm_perm.key == key)
161                         return i;
162         return -1;
163 }
164
165 static struct shmid_ds *
166 shm_find_segment_by_shmid(int shmid)
167 {
168         int segnum;
169         struct shmid_ds *shmseg;
170
171         segnum = IPCID_TO_IX(shmid);
172         if (segnum < 0 || segnum >= shmalloced)
173                 return NULL;
174         shmseg = &shmsegs[segnum];
175         if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
176             != SHMSEG_ALLOCATED ||
177             shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
178                 return NULL;
179         return shmseg;
180 }
181
182 static void
183 shm_deallocate_segment(struct shmid_ds *shmseg)
184 {
185         struct shm_handle *shm_handle;
186         size_t size;
187
188         shm_handle = shmseg->shm_internal;
189         vm_object_deallocate(shm_handle->shm_object);
190         kfree((caddr_t)shm_handle, M_SHM);
191         shmseg->shm_internal = NULL;
192         size = round_page(shmseg->shm_segsz);
193         shm_committed -= btoc(size);
194         shm_nused--;
195         shmseg->shm_perm.mode = SHMSEG_FREE;
196 }
197
198 static int
199 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
200 {
201         struct shmid_ds *shmseg;
202         int segnum, result;
203         size_t size;
204
205         segnum = IPCID_TO_IX(shmmap_s->shmid);
206         shmseg = &shmsegs[segnum];
207         size = round_page(shmseg->shm_segsz);
208         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
209         if (result != KERN_SUCCESS)
210                 return EINVAL;
211         shmmap_s->shmid = -1;
212         shmseg->shm_dtime = time_second;
213         if ((--shmseg->shm_nattch <= 0) &&
214             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
215                 shm_deallocate_segment(shmseg);
216                 shm_last_free = segnum;
217         }
218         return 0;
219 }
220
221 /*
222  * MPALMOSTSAFE
223  */
224 int
225 sys_shmdt(struct shmdt_args *uap)
226 {
227         struct thread *td = curthread;
228         struct proc *p = td->td_proc;
229         struct shmmap_state *shmmap_s;
230         int i;
231         int error;
232
233         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
234                 return (ENOSYS);
235
236         get_mplock();
237         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
238         if (shmmap_s == NULL) {
239                 error = EINVAL;
240                 goto done;
241         }
242         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
243                 if (shmmap_s->shmid != -1 &&
244                     shmmap_s->va == (vm_offset_t)uap->shmaddr)
245                         break;
246         }
247         if (i == shminfo.shmseg)
248                 error = EINVAL;
249         else
250                 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
251 done:
252         rel_mplock();
253         return (error);
254 }
255
256 /*
257  * MPALMOSTSAFE
258  */
259 int
260 sys_shmat(struct shmat_args *uap)
261 {
262         struct thread *td = curthread;
263         struct proc *p = td->td_proc;
264         int error, i, flags;
265         struct shmid_ds *shmseg;
266         struct shmmap_state *shmmap_s = NULL;
267         struct shm_handle *shm_handle;
268         vm_offset_t attach_va;
269         vm_prot_t prot;
270         vm_size_t size;
271         int rv;
272
273         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
274                 return (ENOSYS);
275
276         get_mplock();
277 again:
278         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
279         if (shmmap_s == NULL) {
280                 size = shminfo.shmseg * sizeof(struct shmmap_state);
281                 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
282                 for (i = 0; i < shminfo.shmseg; i++)
283                         shmmap_s[i].shmid = -1;
284                 if (p->p_vmspace->vm_shm != NULL) {
285                         kfree(shmmap_s, M_SHM);
286                         goto again;
287                 }
288                 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
289         }
290         shmseg = shm_find_segment_by_shmid(uap->shmid);
291         if (shmseg == NULL) {
292                 error = EINVAL;
293                 goto done;
294         }
295         error = ipcperm(p, &shmseg->shm_perm,
296                         (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
297         if (error)
298                 goto done;
299         for (i = 0; i < shminfo.shmseg; i++) {
300                 if (shmmap_s->shmid == -1)
301                         break;
302                 shmmap_s++;
303         }
304         if (i >= shminfo.shmseg) {
305                 error = EMFILE;
306                 goto done;
307         }
308         size = round_page(shmseg->shm_segsz);
309 #ifdef VM_PROT_READ_IS_EXEC
310         prot = VM_PROT_READ | VM_PROT_EXECUTE;
311 #else
312         prot = VM_PROT_READ;
313 #endif
314         if ((uap->shmflg & SHM_RDONLY) == 0)
315                 prot |= VM_PROT_WRITE;
316         flags = MAP_ANON | MAP_SHARED;
317         if (uap->shmaddr) {
318                 flags |= MAP_FIXED;
319                 if (uap->shmflg & SHM_RND) {
320                         attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
321                 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
322                         attach_va = (vm_offset_t)uap->shmaddr;
323                 } else {
324                         error = EINVAL;
325                         goto done;
326                 }
327         } else {
328                 /*
329                  * This is just a hint to vm_map_find() about where to put it.
330                  */
331                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz);
332         }
333
334         shm_handle = shmseg->shm_internal;
335         vm_object_reference(shm_handle->shm_object);
336         rv = vm_map_find(&p->p_vmspace->vm_map, 
337                          shm_handle->shm_object, 0,
338                          &attach_va,
339                          size, PAGE_SIZE,
340                          ((flags & MAP_FIXED) ? 0 : 1), 
341                          VM_MAPTYPE_NORMAL,
342                          prot, prot,
343                          0);
344         if (rv != KERN_SUCCESS) {
345                 vm_object_deallocate(shm_handle->shm_object);
346                 error = ENOMEM;
347                 goto done;
348         }
349         vm_map_inherit(&p->p_vmspace->vm_map,
350                        attach_va, attach_va + size, VM_INHERIT_SHARE);
351
352         KKASSERT(shmmap_s->shmid == -1);
353         shmmap_s->va = attach_va;
354         shmmap_s->shmid = uap->shmid;
355         shmseg->shm_lpid = p->p_pid;
356         shmseg->shm_atime = time_second;
357         shmseg->shm_nattch++;
358         uap->sysmsg_resultp = (void *)attach_va;
359         error = 0;
360 done:
361         rel_mplock();
362         return error;
363 }
364
365 struct oshmid_ds {
366         struct  ipc_perm shm_perm;      /* operation perms */
367         int     shm_segsz;              /* size of segment (bytes) */
368         ushort  shm_cpid;               /* pid, creator */
369         ushort  shm_lpid;               /* pid, last operation */
370         short   shm_nattch;             /* no. of current attaches */
371         time_t  shm_atime;              /* last attach time */
372         time_t  shm_dtime;              /* last detach time */
373         time_t  shm_ctime;              /* last change time */
374         void    *shm_handle;            /* internal handle for shm segment */
375 };
376
377 struct oshmctl_args {
378         struct sysmsg sysmsg;
379         int shmid;
380         int cmd;
381         struct oshmid_ds *ubuf;
382 };
383
384 /*
385  * MPALMOSTSAFE
386  */
387 static int
388 sys_oshmctl(struct proc *p, struct oshmctl_args *uap)
389 {
390 #ifdef COMPAT_43
391         struct thread *td = curthread;
392         struct shmid_ds *shmseg;
393         struct oshmid_ds outbuf;
394         int error;
395
396         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
397                 return (ENOSYS);
398
399         get_mplock();
400         shmseg = shm_find_segment_by_shmid(uap->shmid);
401         if (shmseg == NULL) {
402                 error = EINVAL;
403                 goto done;
404         }
405
406         switch (uap->cmd) {
407         case IPC_STAT:
408                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
409                 if (error)
410                         break;
411                 outbuf.shm_perm = shmseg->shm_perm;
412                 outbuf.shm_segsz = shmseg->shm_segsz;
413                 outbuf.shm_cpid = shmseg->shm_cpid;
414                 outbuf.shm_lpid = shmseg->shm_lpid;
415                 outbuf.shm_nattch = shmseg->shm_nattch;
416                 outbuf.shm_atime = shmseg->shm_atime;
417                 outbuf.shm_dtime = shmseg->shm_dtime;
418                 outbuf.shm_ctime = shmseg->shm_ctime;
419                 outbuf.shm_handle = shmseg->shm_internal;
420                 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
421                 break;
422         default:
423                 /* XXX casting to (sy_call_t *) is bogus, as usual. */
424                 error = sys_shmctl((struct shmctl_args *)uap);
425         }
426 done:
427         rel_mplock();
428         return error;
429 #else
430         return EINVAL;
431 #endif
432 }
433
434 /*
435  * MPALMOSTSAFE
436  */
437 int
438 sys_shmctl(struct shmctl_args *uap)
439 {
440         struct thread *td = curthread;
441         struct proc *p = td->td_proc;
442         int error;
443         struct shmid_ds inbuf;
444         struct shmid_ds *shmseg;
445
446         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
447                 return (ENOSYS);
448
449         get_mplock();
450         shmseg = shm_find_segment_by_shmid(uap->shmid);
451         if (shmseg == NULL) {
452                 error = EINVAL;
453                 goto done;
454         }
455
456         switch (uap->cmd) {
457         case IPC_STAT:
458                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
459                 if (error == 0)
460                         error = copyout(shmseg, uap->buf, sizeof(inbuf));
461                 break;
462         case IPC_SET:
463                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
464                 if (error == 0)
465                         error = copyin(uap->buf, &inbuf, sizeof(inbuf));
466                 if (error == 0) {
467                         shmseg->shm_perm.uid = inbuf.shm_perm.uid;
468                         shmseg->shm_perm.gid = inbuf.shm_perm.gid;
469                         shmseg->shm_perm.mode =
470                             (shmseg->shm_perm.mode & ~ACCESSPERMS) |
471                             (inbuf.shm_perm.mode & ACCESSPERMS);
472                         shmseg->shm_ctime = time_second;
473                 }
474                 break;
475         case IPC_RMID:
476                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
477                 if (error == 0) {
478                         shmseg->shm_perm.key = IPC_PRIVATE;
479                         shmseg->shm_perm.mode |= SHMSEG_REMOVED;
480                         if (shmseg->shm_nattch <= 0) {
481                                 shm_deallocate_segment(shmseg);
482                                 shm_last_free = IPCID_TO_IX(uap->shmid);
483                         }
484                 }
485                 break;
486 #if 0
487         case SHM_LOCK:
488         case SHM_UNLOCK:
489 #endif
490         default:
491                 error = EINVAL;
492                 break;
493         }
494 done:
495         rel_mplock();
496         return error;
497 }
498
499 static int
500 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
501 {
502         struct shmid_ds *shmseg;
503         int error;
504
505         shmseg = &shmsegs[segnum];
506         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
507                 /*
508                  * This segment is in the process of being allocated.  Wait
509                  * until it's done, and look the key up again (in case the
510                  * allocation failed or it was freed).
511                  */
512                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
513                 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
514                 if (error)
515                         return error;
516                 return EAGAIN;
517         }
518         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
519                 return EEXIST;
520         error = ipcperm(p, &shmseg->shm_perm, mode);
521         if (error)
522                 return error;
523         if (uap->size && uap->size > shmseg->shm_segsz)
524                 return EINVAL;
525         uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
526         return 0;
527 }
528
529 static int
530 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
531 {
532         int i, segnum, shmid, size;
533         struct ucred *cred = p->p_ucred;
534         struct shmid_ds *shmseg;
535         struct shm_handle *shm_handle;
536
537         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
538                 return EINVAL;
539         if (shm_nused >= shminfo.shmmni) /* any shmids left? */
540                 return ENOSPC;
541         size = round_page(uap->size);
542         if (shm_committed + btoc(size) > shminfo.shmall)
543                 return ENOMEM;
544         if (shm_last_free < 0) {
545                 shmrealloc();   /* maybe expand the shmsegs[] array */
546                 for (i = 0; i < shmalloced; i++)
547                         if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
548                                 break;
549                 if (i == shmalloced)
550                         return ENOSPC;
551                 segnum = i;
552         } else  {
553                 segnum = shm_last_free;
554                 shm_last_free = -1;
555         }
556         shmseg = &shmsegs[segnum];
557         /*
558          * In case we sleep in malloc(), mark the segment present but deleted
559          * so that noone else tries to create the same key.
560          */
561         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
562         shmseg->shm_perm.key = uap->key;
563         shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
564         shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
565         shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
566         
567         /*
568          * We make sure that we have allocated a pager before we need
569          * to.
570          */
571         if (shm_use_phys) {
572                 shm_handle->shm_object =
573                    phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
574         } else {
575                 shm_handle->shm_object =
576                    swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
577         }
578         vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
579         vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
580
581         shmseg->shm_internal = shm_handle;
582         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
583         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
584         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
585             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
586         shmseg->shm_segsz = uap->size;
587         shmseg->shm_cpid = p->p_pid;
588         shmseg->shm_lpid = shmseg->shm_nattch = 0;
589         shmseg->shm_atime = shmseg->shm_dtime = 0;
590         shmseg->shm_ctime = time_second;
591         shm_committed += btoc(size);
592         shm_nused++;
593         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
594                 /*
595                  * Somebody else wanted this key while we were asleep.  Wake
596                  * them up now.
597                  */
598                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
599                 wakeup((caddr_t)shmseg);
600         }
601         uap->sysmsg_result = shmid;
602         return 0;
603 }
604
605 /*
606  * MPALMOSTSAFE
607  */
608 int
609 sys_shmget(struct shmget_args *uap)
610 {
611         struct thread *td = curthread;
612         struct proc *p = td->td_proc;
613         int segnum, mode, error;
614
615         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
616                 return (ENOSYS);
617
618         mode = uap->shmflg & ACCESSPERMS;
619         get_mplock();
620
621         if (uap->key != IPC_PRIVATE) {
622         again:
623                 segnum = shm_find_segment_by_key(uap->key);
624                 if (segnum >= 0) {
625                         error = shmget_existing(p, uap, mode, segnum);
626                         if (error == EAGAIN)
627                                 goto again;
628                         goto done;
629                 }
630                 if ((uap->shmflg & IPC_CREAT) == 0) {
631                         error = ENOENT;
632                         goto done;
633                 }
634         }
635         error = shmget_allocate_segment(p, uap, mode);
636 done:
637         rel_mplock();
638         return (error);
639 }
640
641 /*
642  * shmsys_args(int which, int a2, ...) (VARARGS)
643  *
644  * MPALMOSTSAFE
645  */
646 int
647 sys_shmsys(struct shmsys_args *uap)
648 {
649         struct thread *td = curthread;
650         unsigned int which = (unsigned int)uap->which;
651         int error;
652
653         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
654                 return (ENOSYS);
655
656         if (which >= NELEM(shmcalls))
657                 return EINVAL;
658         get_mplock();
659         bcopy(&uap->a2, &uap->which,
660                 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2));
661         error = ((*shmcalls[which])(uap));
662         rel_mplock();
663
664         return(error);
665 }
666
667 void
668 shmfork(struct proc *p1, struct proc *p2)
669 {
670         struct shmmap_state *shmmap_s;
671         size_t size;
672         int i;
673
674         size = shminfo.shmseg * sizeof(struct shmmap_state);
675         shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
676         bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
677         p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
678         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
679                 if (shmmap_s->shmid != -1)
680                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
681 }
682
683 void
684 shmexit(struct vmspace *vm)
685 {
686         struct shmmap_state *base, *shm;
687         int i;
688
689         if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
690                 vm->vm_shm = NULL;
691                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
692                         if (shm->shmid != -1)
693                                 shm_delete_mapping(vm, shm);
694                 }
695                 kfree(base, M_SHM);
696         }
697 }
698
699 static void
700 shmrealloc(void)
701 {
702         int i;
703         struct shmid_ds *newsegs;
704
705         if (shmalloced >= shminfo.shmmni)
706                 return;
707
708         newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
709         for (i = 0; i < shmalloced; i++)
710                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
711         for (; i < shminfo.shmmni; i++) {
712                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
713                 shmsegs[i].shm_perm.seq = 0;
714         }
715         kfree(shmsegs, M_SHM);
716         shmsegs = newsegs;
717         shmalloced = shminfo.shmmni;
718 }
719
720 static void
721 shminit(void *dummy)
722 {
723         int i;
724
725         shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
726         shmalloced = shminfo.shmmni;
727         shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
728         for (i = 0; i < shmalloced; i++) {
729                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
730                 shmsegs[i].shm_perm.seq = 0;
731         }
732         shm_last_free = 0;
733         shm_nused = 0;
734         shm_committed = 0;
735 }
736 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);