world/kernel: Use the rounddown2() macro in various places.
[dragonfly.git] / sys / kern / sysv_shm.c
1 /*
2  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. All advertising materials mentioning features or use of this software
13  *    must display the following acknowledgement:
14  *      This product includes software developed by Adam Glass and Charles
15  *      Hannum.
16  * 4. The names of the authors may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "opt_sysvipc.h"
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysproto.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/shm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/sysent.h>
44 #include <sys/jail.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <sys/lock.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pager.h>
54
55 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
56
57 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
58 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
59
60 #define SHMSEG_FREE             0x0200
61 #define SHMSEG_REMOVED          0x0400
62 #define SHMSEG_ALLOCATED        0x0800
63 #define SHMSEG_WANTED           0x1000
64
65 static int shm_last_free, shm_committed, shmalloced;
66 int shm_nused;
67 static struct shmid_ds  *shmsegs;
68 static struct lwkt_token shm_token = LWKT_TOKEN_INITIALIZER(shm_token);
69
70 struct shm_handle {
71         /* vm_offset_t kva; */
72         vm_object_t shm_object;
73 };
74
75 struct shmmap_state {
76         vm_offset_t va;
77         int shmid;
78         int reserved;
79 };
80
81 static void shm_deallocate_segment (struct shmid_ds *);
82 static int shm_find_segment_by_key (key_t);
83 static struct shmid_ds *shm_find_segment_by_shmid (int);
84 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
85 static void shmrealloc (void);
86 static void shminit (void *);
87
88 /*
89  * Tuneable values
90  */
91 #ifndef SHMMIN
92 #define SHMMIN  1
93 #endif
94 #ifndef SHMMNI
95 #define SHMMNI  512
96 #endif
97 #ifndef SHMSEG
98 #define SHMSEG  1024
99 #endif
100
101 struct  shminfo shminfo = {
102         0,
103         SHMMIN,
104         SHMMNI,
105         SHMSEG,
106         0
107 };
108
109 /*
110  * allow-removed    Allow a shared memory segment to be attached by its shmid
111  *                  even after it has been deleted, as long as it was still
112  *                  being referenced by someone.  This is a trick used by
113  *                  chrome and other applications to avoid leaving shm
114  *                  segments hanging around after the application is killed
115  *                  or seg-faults unexpectedly.
116  *
117  * use-phys         Shared memory segments are to use physical memory by
118  *                  default, which may allow the kernel to better-optimize
119  *                  the pmap and reduce overhead.  The pages are effectively
120  *                  wired.
121  */
122 static int shm_allow_removed = 1;
123 static int shm_use_phys = 1;
124
125 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin);
126 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni);
127 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg);
128 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall);
129 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
130
131 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
132     "Max shared memory segment size");
133 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
134     "Min shared memory segment size");
135 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0,
136     "Max number of shared memory identifiers");
137 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0,
138     "Max shared memory segments per process");
139 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
140     "Max pages of shared memory");
141 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0,
142     "Use phys pager allocation instead of swap pager allocation");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
144     &shm_allow_removed, 0,
145     "Enable/Disable attachment to attached segments marked for removal");
146
147 static int
148 shm_find_segment_by_key(key_t key)
149 {
150         int i;
151
152         for (i = 0; i < shmalloced; i++) {
153                 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
154                     shmsegs[i].shm_perm.key == key)
155                         return i;
156         }
157         return -1;
158 }
159
160 static struct shmid_ds *
161 shm_find_segment_by_shmid(int shmid)
162 {
163         int segnum;
164         struct shmid_ds *shmseg;
165
166         segnum = IPCID_TO_IX(shmid);
167         if (segnum < 0 || segnum >= shmalloced)
168                 return NULL;
169         shmseg = &shmsegs[segnum];
170         if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
171             (!shm_allow_removed &&
172             (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
173             shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) {
174                 return NULL;
175         }
176         return shmseg;
177 }
178
179 static void
180 shm_deallocate_segment(struct shmid_ds *shmseg)
181 {
182         struct shm_handle *shm_handle;
183         size_t size;
184
185         shm_handle = shmseg->shm_internal;
186         vm_object_deallocate(shm_handle->shm_object);
187         kfree((caddr_t)shm_handle, M_SHM);
188         shmseg->shm_internal = NULL;
189         size = round_page(shmseg->shm_segsz);
190         shm_committed -= btoc(size);
191         shm_nused--;
192         shmseg->shm_perm.mode = SHMSEG_FREE;
193 }
194
195 static int
196 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
197 {
198         struct shmid_ds *shmseg;
199         int segnum, result;
200         size_t size;
201
202         segnum = IPCID_TO_IX(shmmap_s->shmid);
203         shmseg = &shmsegs[segnum];
204         size = round_page(shmseg->shm_segsz);
205         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
206         if (result != KERN_SUCCESS)
207                 return EINVAL;
208         shmmap_s->shmid = -1;
209         shmseg->shm_dtime = time_second;
210         if ((--shmseg->shm_nattch <= 0) &&
211             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
212                 shm_deallocate_segment(shmseg);
213                 shm_last_free = segnum;
214         }
215         return 0;
216 }
217
218 /*
219  * MPALMOSTSAFE
220  */
221 int
222 sys_shmdt(struct shmdt_args *uap)
223 {
224         struct thread *td = curthread;
225         struct proc *p = td->td_proc;
226         struct shmmap_state *shmmap_s;
227         struct prison *pr = p->p_ucred->cr_prison;
228
229         long i;
230         int error;
231
232         if (pr && !pr->pr_sysvipc_allowed)
233                 return (ENOSYS);
234
235         lwkt_gettoken(&shm_token);
236         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
237         if (shmmap_s == NULL) {
238                 error = EINVAL;
239                 goto done;
240         }
241         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
242                 if (shmmap_s->shmid != -1 &&
243                     shmmap_s->va == (vm_offset_t)uap->shmaddr)
244                         break;
245         }
246         if (i == shminfo.shmseg)
247                 error = EINVAL;
248         else
249                 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
250 done:
251         lwkt_reltoken(&shm_token);
252
253         return (error);
254 }
255
256 /*
257  * MPALMOSTSAFE
258  */
259 int
260 sys_shmat(struct shmat_args *uap)
261 {
262         struct thread *td = curthread;
263         struct proc *p = td->td_proc;
264         struct prison *pr = p->p_ucred->cr_prison;
265         int error, flags;
266         long i;
267         struct shmid_ds *shmseg;
268         struct shmmap_state *shmmap_s = NULL;
269         struct shm_handle *shm_handle;
270         vm_offset_t attach_va;
271         vm_prot_t prot;
272         vm_size_t size;
273         vm_size_t align;
274         int rv;
275
276         if (pr && !pr->pr_sysvipc_allowed)
277                 return (ENOSYS);
278
279         lwkt_gettoken(&shm_token);
280 again:
281         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
282         if (shmmap_s == NULL) {
283                 size = shminfo.shmseg * sizeof(struct shmmap_state);
284                 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
285                 for (i = 0; i < shminfo.shmseg; i++) {
286                         shmmap_s[i].shmid = -1;
287                         shmmap_s[i].reserved = 0;
288                 }
289                 if (p->p_vmspace->vm_shm != NULL) {
290                         kfree(shmmap_s, M_SHM);
291                         goto again;
292                 }
293                 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
294         }
295         shmseg = shm_find_segment_by_shmid(uap->shmid);
296         if (shmseg == NULL) {
297                 error = EINVAL;
298                 goto done;
299         }
300         error = ipcperm(p, &shmseg->shm_perm,
301                         (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
302         if (error)
303                 goto done;
304
305         /*
306          * Find a free element and mark reserved.  This fixes races
307          * against concurrent allocations due to the token being
308          * interrupted by blocking operations.  The shmmap_s reservation
309          * will be cleared upon completion or error.
310          */
311         for (i = 0; i < shminfo.shmseg; i++) {
312                 if (shmmap_s->shmid == -1 && shmmap_s->reserved == 0) {
313                         shmmap_s->reserved = 1;
314                         break;
315                 }
316                 shmmap_s++;
317         }
318         if (i >= shminfo.shmseg) {
319                 error = EMFILE;
320                 goto done;
321         }
322         size = round_page(shmseg->shm_segsz);
323 #ifdef VM_PROT_READ_IS_EXEC
324         prot = VM_PROT_READ | VM_PROT_EXECUTE;
325 #else
326         prot = VM_PROT_READ;
327 #endif
328         if ((uap->shmflg & SHM_RDONLY) == 0)
329                 prot |= VM_PROT_WRITE;
330         flags = MAP_ANON | MAP_SHARED;
331         if (uap->shmaddr) {
332                 flags |= MAP_FIXED;
333                 if (uap->shmflg & SHM_RND) {
334                         attach_va =
335                             rounddown2((vm_offset_t)uap->shmaddr, SHMLBA);
336                 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
337                         attach_va = (vm_offset_t)uap->shmaddr;
338                 } else {
339                         error = EINVAL;
340                         shmmap_s->reserved = 0;
341                         goto done;
342                 }
343         } else {
344                 /*
345                  * This is just a hint to vm_map_find() about where to put it.
346                  */
347                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr +
348                                        maxtsiz + maxdsiz);
349         }
350
351         /*
352          * Handle alignment.  For large memory maps it is possible
353          * that the MMU can optimize the page table so align anything
354          * that is a multiple of SEG_SIZE to SEG_SIZE.
355          */
356         if ((flags & MAP_FIXED) == 0 && (size & SEG_MASK) == 0)
357                 align = SEG_SIZE;
358         else
359                 align = PAGE_SIZE;
360
361         shm_handle = shmseg->shm_internal;
362         vm_object_hold(shm_handle->shm_object);
363         vm_object_reference_locked(shm_handle->shm_object);
364         rv = vm_map_find(&p->p_vmspace->vm_map, 
365                          shm_handle->shm_object, NULL,
366                          0, &attach_va, size,
367                          align,
368                          ((flags & MAP_FIXED) ? 0 : 1), 
369                          VM_MAPTYPE_NORMAL, VM_SUBSYS_SHMEM,
370                          prot, prot, 0);
371         vm_object_drop(shm_handle->shm_object);
372         if (rv != KERN_SUCCESS) {
373                 vm_object_deallocate(shm_handle->shm_object);
374                 shmmap_s->reserved = 0;
375                 error = ENOMEM;
376                 goto done;
377         }
378         vm_map_inherit(&p->p_vmspace->vm_map,
379                        attach_va, attach_va + size, VM_INHERIT_SHARE);
380
381         KKASSERT(shmmap_s->shmid == -1);
382         shmmap_s->va = attach_va;
383         shmmap_s->shmid = uap->shmid;
384         shmmap_s->reserved = 0;
385         shmseg->shm_lpid = p->p_pid;
386         shmseg->shm_atime = time_second;
387         shmseg->shm_nattch++;
388         uap->sysmsg_resultp = (void *)attach_va;
389         error = 0;
390 done:
391         lwkt_reltoken(&shm_token);
392
393         return error;
394 }
395
396 /*
397  * MPALMOSTSAFE
398  */
399 int
400 sys_shmctl(struct shmctl_args *uap)
401 {
402         struct thread *td = curthread;
403         struct proc *p = td->td_proc;
404         struct prison *pr = p->p_ucred->cr_prison;
405         int error;
406         struct shmid_ds inbuf;
407         struct shmid_ds *shmseg;
408
409         if (pr && !pr->pr_sysvipc_allowed)
410                 return (ENOSYS);
411
412         lwkt_gettoken(&shm_token);
413         shmseg = shm_find_segment_by_shmid(uap->shmid);
414         if (shmseg == NULL) {
415                 error = EINVAL;
416                 goto done;
417         }
418
419         switch (uap->cmd) {
420         case IPC_STAT:
421                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
422                 if (error == 0)
423                         error = copyout(shmseg, uap->buf, sizeof(inbuf));
424                 break;
425         case IPC_SET:
426                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
427                 if (error == 0)
428                         error = copyin(uap->buf, &inbuf, sizeof(inbuf));
429                 if (error == 0) {
430                         shmseg->shm_perm.uid = inbuf.shm_perm.uid;
431                         shmseg->shm_perm.gid = inbuf.shm_perm.gid;
432                         shmseg->shm_perm.mode =
433                             (shmseg->shm_perm.mode & ~ACCESSPERMS) |
434                             (inbuf.shm_perm.mode & ACCESSPERMS);
435                         shmseg->shm_ctime = time_second;
436                 }
437                 break;
438         case IPC_RMID:
439                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
440                 if (error == 0) {
441                         shmseg->shm_perm.key = IPC_PRIVATE;
442                         shmseg->shm_perm.mode |= SHMSEG_REMOVED;
443                         if (shmseg->shm_nattch <= 0) {
444                                 shm_deallocate_segment(shmseg);
445                                 shm_last_free = IPCID_TO_IX(uap->shmid);
446                         }
447                 }
448                 break;
449 #if 0
450         case SHM_LOCK:
451         case SHM_UNLOCK:
452 #endif
453         default:
454                 error = EINVAL;
455                 break;
456         }
457 done:
458         lwkt_reltoken(&shm_token);
459
460         return error;
461 }
462
463 static int
464 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
465 {
466         struct shmid_ds *shmseg;
467         int error;
468
469         shmseg = &shmsegs[segnum];
470         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
471                 /*
472                  * This segment is in the process of being allocated.  Wait
473                  * until it's done, and look the key up again (in case the
474                  * allocation failed or it was freed).
475                  */
476                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
477                 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
478                 if (error)
479                         return error;
480                 return EAGAIN;
481         }
482         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
483                 return EEXIST;
484         error = ipcperm(p, &shmseg->shm_perm, mode);
485         if (error)
486                 return error;
487         if (uap->size && uap->size > shmseg->shm_segsz)
488                 return EINVAL;
489         uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
490         return 0;
491 }
492
493 static int
494 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
495 {
496         int i, segnum, shmid;
497         size_t size;
498         struct ucred *cred = p->p_ucred;
499         struct shmid_ds *shmseg;
500         struct shm_handle *shm_handle;
501
502         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
503                 return EINVAL;
504         if (shm_nused >= shminfo.shmmni) /* any shmids left? */
505                 return ENOSPC;
506         size = round_page(uap->size);
507         if (shm_committed + btoc(size) > shminfo.shmall)
508                 return ENOMEM;
509         if (shm_last_free < 0) {
510                 shmrealloc();   /* maybe expand the shmsegs[] array */
511                 for (i = 0; i < shmalloced; i++) {
512                         if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
513                                 break;
514                 }
515                 if (i == shmalloced)
516                         return ENOSPC;
517                 segnum = i;
518         } else  {
519                 segnum = shm_last_free;
520                 shm_last_free = -1;
521         }
522         shmseg = &shmsegs[segnum];
523         /*
524          * In case we sleep in malloc(), mark the segment present but deleted
525          * so that noone else tries to create the same key.
526          */
527         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
528         shmseg->shm_perm.key = uap->key;
529         shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
530         shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
531         shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
532         
533         /*
534          * We make sure that we have allocated a pager before we need
535          * to.
536          */
537         if (shm_use_phys) {
538                 shm_handle->shm_object =
539                    phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
540         } else {
541                 shm_handle->shm_object =
542                    swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
543         }
544         vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
545         vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
546
547         shmseg->shm_internal = shm_handle;
548         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
549         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
550         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
551             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
552         shmseg->shm_segsz = uap->size;
553         shmseg->shm_cpid = p->p_pid;
554         shmseg->shm_lpid = shmseg->shm_nattch = 0;
555         shmseg->shm_atime = shmseg->shm_dtime = 0;
556         shmseg->shm_ctime = time_second;
557         shm_committed += btoc(size);
558         shm_nused++;
559
560         /*
561          * If a physical mapping is desired and we have a ton of free pages
562          * we pre-allocate the pages here in order to avoid on-the-fly
563          * allocation later.  This has a big effect on database warm-up
564          * times since DFly supports concurrent page faults coming from the
565          * same VM object for pages which already exist.
566          *
567          * This can hang the kernel for a while so only do it if shm_use_phys
568          * is set to 2 or higher.
569          */
570         if (shm_use_phys > 1) {
571                 vm_pindex_t pi, pmax;
572                 vm_page_t m;
573
574                 pmax = round_page(shmseg->shm_segsz) >> PAGE_SHIFT;
575                 vm_object_hold(shm_handle->shm_object);
576                 if (pmax > vmstats.v_free_count)
577                         pmax = vmstats.v_free_count;
578                 for (pi = 0; pi < pmax; ++pi) {
579                         m = vm_page_grab(shm_handle->shm_object, pi,
580                                          VM_ALLOC_SYSTEM | VM_ALLOC_NULL_OK |
581                                          VM_ALLOC_ZERO);
582                         if (m == NULL)
583                                 break;
584                         vm_pager_get_page(shm_handle->shm_object, &m, 1);
585                         vm_page_activate(m);
586                         vm_page_wakeup(m);
587                         lwkt_yield();
588                 }
589                 vm_object_drop(shm_handle->shm_object);
590         }
591
592         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
593                 /*
594                  * Somebody else wanted this key while we were asleep.  Wake
595                  * them up now.
596                  */
597                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
598                 wakeup((caddr_t)shmseg);
599         }
600         uap->sysmsg_result = shmid;
601         return 0;
602 }
603
604 /*
605  * MPALMOSTSAFE
606  */
607 int
608 sys_shmget(struct shmget_args *uap)
609 {
610         struct thread *td = curthread;
611         struct proc *p = td->td_proc;
612         struct prison *pr = p->p_ucred->cr_prison;
613         int segnum, mode, error;
614
615         if (pr && !pr->pr_sysvipc_allowed)
616                 return (ENOSYS);
617
618         mode = uap->shmflg & ACCESSPERMS;
619
620         lwkt_gettoken(&shm_token);
621
622         if (uap->key != IPC_PRIVATE) {
623         again:
624                 segnum = shm_find_segment_by_key(uap->key);
625                 if (segnum >= 0) {
626                         error = shmget_existing(p, uap, mode, segnum);
627                         if (error == EAGAIN)
628                                 goto again;
629                         goto done;
630                 }
631                 if ((uap->shmflg & IPC_CREAT) == 0) {
632                         error = ENOENT;
633                         goto done;
634                 }
635         }
636         error = shmget_allocate_segment(p, uap, mode);
637 done:
638         lwkt_reltoken(&shm_token);
639
640         return (error);
641 }
642
643 void
644 shmfork(struct proc *p1, struct proc *p2)
645 {
646         struct shmmap_state *shmmap_s;
647         size_t size;
648         int i;
649
650         lwkt_gettoken(&shm_token);
651         size = shminfo.shmseg * sizeof(struct shmmap_state);
652         shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
653         bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
654         p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
655         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
656                 if (shmmap_s->shmid != -1)
657                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
658         }
659         lwkt_reltoken(&shm_token);
660 }
661
662 void
663 shmexit(struct vmspace *vm)
664 {
665         struct shmmap_state *base, *shm;
666         int i;
667
668         if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
669                 vm->vm_shm = NULL;
670                 lwkt_gettoken(&shm_token);
671                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
672                         if (shm->shmid != -1)
673                                 shm_delete_mapping(vm, shm);
674                 }
675                 kfree(base, M_SHM);
676                 lwkt_reltoken(&shm_token);
677         }
678 }
679
680 static void
681 shmrealloc(void)
682 {
683         int i;
684         struct shmid_ds *newsegs;
685
686         if (shmalloced >= shminfo.shmmni)
687                 return;
688
689         newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
690         for (i = 0; i < shmalloced; i++)
691                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
692         for (; i < shminfo.shmmni; i++) {
693                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
694                 shmsegs[i].shm_perm.seq = 0;
695         }
696         kfree(shmsegs, M_SHM);
697         shmsegs = newsegs;
698         shmalloced = shminfo.shmmni;
699 }
700
701 static void
702 shminit(void *dummy)
703 {
704         int i;
705
706         /*
707          * If not overridden by a tunable set the maximum shm to
708          * 2/3 of main memory.
709          */
710         if (shminfo.shmall == 0)
711                 shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3;
712
713         shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
714         shmalloced = shminfo.shmmni;
715         shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
716         for (i = 0; i < shmalloced; i++) {
717                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
718                 shmsegs[i].shm_perm.seq = 0;
719         }
720         shm_last_free = 0;
721         shm_nused = 0;
722         shm_committed = 0;
723 }
724 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);