kernel - pipe locks are not needed in the kqueue event code
[dragonfly.git] / sys / kern / sysv_shm.c
1 /*
2  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. All advertising materials mentioning features or use of this software
13  *    must display the following acknowledgement:
14  *      This product includes software developed by Adam Glass and Charles
15  *      Hannum.
16  * 4. The names of the authors may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "opt_sysvipc.h"
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysproto.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/shm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/sysent.h>
44 #include <sys/jail.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <sys/lock.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pager.h>
54
55 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
56
57 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
58 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
59
60 #define SHMSEG_FREE             0x0200
61 #define SHMSEG_REMOVED          0x0400
62 #define SHMSEG_ALLOCATED        0x0800
63 #define SHMSEG_WANTED           0x1000
64
65 static int shm_last_free, shm_committed, shmalloced;
66 int shm_nused;
67 static struct shmid_ds  *shmsegs;
68 static struct lwkt_token shm_token = LWKT_TOKEN_INITIALIZER(shm_token);
69
70 struct shm_handle {
71         /* vm_offset_t kva; */
72         vm_object_t shm_object;
73 };
74
75 struct shmmap_state {
76         vm_offset_t va;
77         int shmid;
78 };
79
80 static void shm_deallocate_segment (struct shmid_ds *);
81 static int shm_find_segment_by_key (key_t);
82 static struct shmid_ds *shm_find_segment_by_shmid (int);
83 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
84 static void shmrealloc (void);
85 static void shminit (void *);
86
87 /*
88  * Tuneable values
89  */
90 #ifndef SHMMIN
91 #define SHMMIN  1
92 #endif
93 #ifndef SHMMNI
94 #define SHMMNI  512
95 #endif
96 #ifndef SHMSEG
97 #define SHMSEG  1024
98 #endif
99
100 struct  shminfo shminfo = {
101         0,
102         SHMMIN,
103         SHMMNI,
104         SHMSEG,
105         0
106 };
107
108 /*
109  * allow-removed    Allow a shared memory segment to be attached by its shmid
110  *                  even after it has been deleted, as long as it was still
111  *                  being referenced by someone.  This is a trick used by
112  *                  chrome and other applications to avoid leaving shm
113  *                  segments hanging around after the application is killed
114  *                  or seg-faults unexpectedly.
115  *
116  * use-phys         Shared memory segments are to use physical memory by
117  *                  default, which may allow the kernel to better-optimize
118  *                  the pmap and reduce overhead.  The pages are effectively
119  *                  wired.
120  */
121 static int shm_allow_removed = 1;
122 static int shm_use_phys = 1;
123
124 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin);
125 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni);
126 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg);
127 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall);
128 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
129
130 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
131     "Max shared memory segment size");
132 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
133     "Min shared memory segment size");
134 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0,
135     "Max number of shared memory identifiers");
136 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0,
137     "Max shared memory segments per process");
138 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
139     "Max pages of shared memory");
140 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0,
141     "Use phys pager allocation instead of swap pager allocation");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
143     &shm_allow_removed, 0,
144     "Enable/Disable attachment to attached segments marked for removal");
145
146 static int
147 shm_find_segment_by_key(key_t key)
148 {
149         int i;
150
151         for (i = 0; i < shmalloced; i++) {
152                 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
153                     shmsegs[i].shm_perm.key == key)
154                         return i;
155         }
156         return -1;
157 }
158
159 static struct shmid_ds *
160 shm_find_segment_by_shmid(int shmid)
161 {
162         int segnum;
163         struct shmid_ds *shmseg;
164
165         segnum = IPCID_TO_IX(shmid);
166         if (segnum < 0 || segnum >= shmalloced)
167                 return NULL;
168         shmseg = &shmsegs[segnum];
169         if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
170             (!shm_allow_removed &&
171             (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
172             shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) {
173                 return NULL;
174         }
175         return shmseg;
176 }
177
178 static void
179 shm_deallocate_segment(struct shmid_ds *shmseg)
180 {
181         struct shm_handle *shm_handle;
182         size_t size;
183
184         shm_handle = shmseg->shm_internal;
185         vm_object_deallocate(shm_handle->shm_object);
186         kfree((caddr_t)shm_handle, M_SHM);
187         shmseg->shm_internal = NULL;
188         size = round_page(shmseg->shm_segsz);
189         shm_committed -= btoc(size);
190         shm_nused--;
191         shmseg->shm_perm.mode = SHMSEG_FREE;
192 }
193
194 static int
195 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
196 {
197         struct shmid_ds *shmseg;
198         int segnum, result;
199         size_t size;
200
201         segnum = IPCID_TO_IX(shmmap_s->shmid);
202         shmseg = &shmsegs[segnum];
203         size = round_page(shmseg->shm_segsz);
204         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
205         if (result != KERN_SUCCESS)
206                 return EINVAL;
207         shmmap_s->shmid = -1;
208         shmseg->shm_dtime = time_second;
209         if ((--shmseg->shm_nattch <= 0) &&
210             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
211                 shm_deallocate_segment(shmseg);
212                 shm_last_free = segnum;
213         }
214         return 0;
215 }
216
217 /*
218  * MPALMOSTSAFE
219  */
220 int
221 sys_shmdt(struct shmdt_args *uap)
222 {
223         struct thread *td = curthread;
224         struct proc *p = td->td_proc;
225         struct shmmap_state *shmmap_s;
226         long i;
227         int error;
228
229         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
230                 return (ENOSYS);
231
232         lwkt_gettoken(&shm_token);
233         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
234         if (shmmap_s == NULL) {
235                 error = EINVAL;
236                 goto done;
237         }
238         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
239                 if (shmmap_s->shmid != -1 &&
240                     shmmap_s->va == (vm_offset_t)uap->shmaddr)
241                         break;
242         }
243         if (i == shminfo.shmseg)
244                 error = EINVAL;
245         else
246                 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
247 done:
248         lwkt_reltoken(&shm_token);
249
250         return (error);
251 }
252
253 /*
254  * MPALMOSTSAFE
255  */
256 int
257 sys_shmat(struct shmat_args *uap)
258 {
259         struct thread *td = curthread;
260         struct proc *p = td->td_proc;
261         int error, flags;
262         long i;
263         struct shmid_ds *shmseg;
264         struct shmmap_state *shmmap_s = NULL;
265         struct shm_handle *shm_handle;
266         vm_offset_t attach_va;
267         vm_prot_t prot;
268         vm_size_t size;
269         vm_size_t align;
270         int rv;
271
272         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
273                 return (ENOSYS);
274
275         lwkt_gettoken(&shm_token);
276 again:
277         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
278         if (shmmap_s == NULL) {
279                 size = shminfo.shmseg * sizeof(struct shmmap_state);
280                 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
281                 for (i = 0; i < shminfo.shmseg; i++)
282                         shmmap_s[i].shmid = -1;
283                 if (p->p_vmspace->vm_shm != NULL) {
284                         kfree(shmmap_s, M_SHM);
285                         goto again;
286                 }
287                 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
288         }
289         shmseg = shm_find_segment_by_shmid(uap->shmid);
290         if (shmseg == NULL) {
291                 error = EINVAL;
292                 goto done;
293         }
294         error = ipcperm(p, &shmseg->shm_perm,
295                         (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
296         if (error)
297                 goto done;
298         for (i = 0; i < shminfo.shmseg; i++) {
299                 if (shmmap_s->shmid == -1)
300                         break;
301                 shmmap_s++;
302         }
303         if (i >= shminfo.shmseg) {
304                 error = EMFILE;
305                 goto done;
306         }
307         size = round_page(shmseg->shm_segsz);
308 #ifdef VM_PROT_READ_IS_EXEC
309         prot = VM_PROT_READ | VM_PROT_EXECUTE;
310 #else
311         prot = VM_PROT_READ;
312 #endif
313         if ((uap->shmflg & SHM_RDONLY) == 0)
314                 prot |= VM_PROT_WRITE;
315         flags = MAP_ANON | MAP_SHARED;
316         if (uap->shmaddr) {
317                 flags |= MAP_FIXED;
318                 if (uap->shmflg & SHM_RND) {
319                         attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
320                 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
321                         attach_va = (vm_offset_t)uap->shmaddr;
322                 } else {
323                         error = EINVAL;
324                         goto done;
325                 }
326         } else {
327                 /*
328                  * This is just a hint to vm_map_find() about where to put it.
329                  */
330                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr +
331                                        maxtsiz + maxdsiz);
332         }
333
334         /*
335          * Handle alignment.  For large memory maps it is possible
336          * that the MMU can optimize the page table so align anything
337          * that is a multiple of SEG_SIZE to SEG_SIZE.
338          */
339         if ((flags & MAP_FIXED) == 0 && (size & SEG_MASK) == 0)
340                 align = SEG_SIZE;
341         else
342                 align = PAGE_SIZE;
343
344         shm_handle = shmseg->shm_internal;
345         vm_object_hold(shm_handle->shm_object);
346         vm_object_reference_locked(shm_handle->shm_object);
347         rv = vm_map_find(&p->p_vmspace->vm_map, 
348                          shm_handle->shm_object, NULL,
349                          0, &attach_va, size,
350                          align,
351                          ((flags & MAP_FIXED) ? 0 : 1), 
352                          VM_MAPTYPE_NORMAL, VM_SUBSYS_SHMEM,
353                          prot, prot, 0);
354         vm_object_drop(shm_handle->shm_object);
355         if (rv != KERN_SUCCESS) {
356                 vm_object_deallocate(shm_handle->shm_object);
357                 error = ENOMEM;
358                 goto done;
359         }
360         vm_map_inherit(&p->p_vmspace->vm_map,
361                        attach_va, attach_va + size, VM_INHERIT_SHARE);
362
363         KKASSERT(shmmap_s->shmid == -1);
364         shmmap_s->va = attach_va;
365         shmmap_s->shmid = uap->shmid;
366         shmseg->shm_lpid = p->p_pid;
367         shmseg->shm_atime = time_second;
368         shmseg->shm_nattch++;
369         uap->sysmsg_resultp = (void *)attach_va;
370         error = 0;
371 done:
372         lwkt_reltoken(&shm_token);
373
374         return error;
375 }
376
377 /*
378  * MPALMOSTSAFE
379  */
380 int
381 sys_shmctl(struct shmctl_args *uap)
382 {
383         struct thread *td = curthread;
384         struct proc *p = td->td_proc;
385         int error;
386         struct shmid_ds inbuf;
387         struct shmid_ds *shmseg;
388
389         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
390                 return (ENOSYS);
391
392         lwkt_gettoken(&shm_token);
393         shmseg = shm_find_segment_by_shmid(uap->shmid);
394         if (shmseg == NULL) {
395                 error = EINVAL;
396                 goto done;
397         }
398
399         switch (uap->cmd) {
400         case IPC_STAT:
401                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
402                 if (error == 0)
403                         error = copyout(shmseg, uap->buf, sizeof(inbuf));
404                 break;
405         case IPC_SET:
406                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
407                 if (error == 0)
408                         error = copyin(uap->buf, &inbuf, sizeof(inbuf));
409                 if (error == 0) {
410                         shmseg->shm_perm.uid = inbuf.shm_perm.uid;
411                         shmseg->shm_perm.gid = inbuf.shm_perm.gid;
412                         shmseg->shm_perm.mode =
413                             (shmseg->shm_perm.mode & ~ACCESSPERMS) |
414                             (inbuf.shm_perm.mode & ACCESSPERMS);
415                         shmseg->shm_ctime = time_second;
416                 }
417                 break;
418         case IPC_RMID:
419                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
420                 if (error == 0) {
421                         shmseg->shm_perm.key = IPC_PRIVATE;
422                         shmseg->shm_perm.mode |= SHMSEG_REMOVED;
423                         if (shmseg->shm_nattch <= 0) {
424                                 shm_deallocate_segment(shmseg);
425                                 shm_last_free = IPCID_TO_IX(uap->shmid);
426                         }
427                 }
428                 break;
429 #if 0
430         case SHM_LOCK:
431         case SHM_UNLOCK:
432 #endif
433         default:
434                 error = EINVAL;
435                 break;
436         }
437 done:
438         lwkt_reltoken(&shm_token);
439
440         return error;
441 }
442
443 static int
444 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
445 {
446         struct shmid_ds *shmseg;
447         int error;
448
449         shmseg = &shmsegs[segnum];
450         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
451                 /*
452                  * This segment is in the process of being allocated.  Wait
453                  * until it's done, and look the key up again (in case the
454                  * allocation failed or it was freed).
455                  */
456                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
457                 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
458                 if (error)
459                         return error;
460                 return EAGAIN;
461         }
462         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
463                 return EEXIST;
464         error = ipcperm(p, &shmseg->shm_perm, mode);
465         if (error)
466                 return error;
467         if (uap->size && uap->size > shmseg->shm_segsz)
468                 return EINVAL;
469         uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
470         return 0;
471 }
472
473 static int
474 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
475 {
476         int i, segnum, shmid;
477         size_t size;
478         struct ucred *cred = p->p_ucred;
479         struct shmid_ds *shmseg;
480         struct shm_handle *shm_handle;
481
482         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
483                 return EINVAL;
484         if (shm_nused >= shminfo.shmmni) /* any shmids left? */
485                 return ENOSPC;
486         size = round_page(uap->size);
487         if (shm_committed + btoc(size) > shminfo.shmall)
488                 return ENOMEM;
489         if (shm_last_free < 0) {
490                 shmrealloc();   /* maybe expand the shmsegs[] array */
491                 for (i = 0; i < shmalloced; i++) {
492                         if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
493                                 break;
494                 }
495                 if (i == shmalloced)
496                         return ENOSPC;
497                 segnum = i;
498         } else  {
499                 segnum = shm_last_free;
500                 shm_last_free = -1;
501         }
502         shmseg = &shmsegs[segnum];
503         /*
504          * In case we sleep in malloc(), mark the segment present but deleted
505          * so that noone else tries to create the same key.
506          */
507         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
508         shmseg->shm_perm.key = uap->key;
509         shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
510         shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
511         shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
512         
513         /*
514          * We make sure that we have allocated a pager before we need
515          * to.
516          */
517         if (shm_use_phys) {
518                 shm_handle->shm_object =
519                    phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
520         } else {
521                 shm_handle->shm_object =
522                    swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
523         }
524         vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
525         vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
526
527         shmseg->shm_internal = shm_handle;
528         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
529         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
530         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
531             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
532         shmseg->shm_segsz = uap->size;
533         shmseg->shm_cpid = p->p_pid;
534         shmseg->shm_lpid = shmseg->shm_nattch = 0;
535         shmseg->shm_atime = shmseg->shm_dtime = 0;
536         shmseg->shm_ctime = time_second;
537         shm_committed += btoc(size);
538         shm_nused++;
539
540         /*
541          * If a physical mapping is desired and we have a ton of free pages
542          * we pre-allocate the pages here in order to avoid on-the-fly
543          * allocation later.  This has a big effect on database warm-up
544          * times since DFly supports concurrent page faults coming from the
545          * same VM object for pages which already exist.
546          *
547          * This can hang the kernel for a while so only do it if shm_use_phys
548          * is set to 2 or higher.
549          */
550         if (shm_use_phys > 1) {
551                 vm_pindex_t pi, pmax;
552                 vm_page_t m;
553
554                 pmax = round_page(shmseg->shm_segsz) >> PAGE_SHIFT;
555                 vm_object_hold(shm_handle->shm_object);
556                 if (pmax > vmstats.v_free_count)
557                         pmax = vmstats.v_free_count;
558                 for (pi = 0; pi < pmax; ++pi) {
559                         m = vm_page_grab(shm_handle->shm_object, pi,
560                                          VM_ALLOC_SYSTEM | VM_ALLOC_NULL_OK |
561                                          VM_ALLOC_ZERO);
562                         if (m == NULL)
563                                 break;
564                         vm_pager_get_page(shm_handle->shm_object, &m, 1);
565                         vm_page_activate(m);
566                         vm_page_wakeup(m);
567                         lwkt_yield();
568                 }
569                 vm_object_drop(shm_handle->shm_object);
570         }
571
572         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
573                 /*
574                  * Somebody else wanted this key while we were asleep.  Wake
575                  * them up now.
576                  */
577                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
578                 wakeup((caddr_t)shmseg);
579         }
580         uap->sysmsg_result = shmid;
581         return 0;
582 }
583
584 /*
585  * MPALMOSTSAFE
586  */
587 int
588 sys_shmget(struct shmget_args *uap)
589 {
590         struct thread *td = curthread;
591         struct proc *p = td->td_proc;
592         int segnum, mode, error;
593
594         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
595                 return (ENOSYS);
596
597         mode = uap->shmflg & ACCESSPERMS;
598
599         lwkt_gettoken(&shm_token);
600
601         if (uap->key != IPC_PRIVATE) {
602         again:
603                 segnum = shm_find_segment_by_key(uap->key);
604                 if (segnum >= 0) {
605                         error = shmget_existing(p, uap, mode, segnum);
606                         if (error == EAGAIN)
607                                 goto again;
608                         goto done;
609                 }
610                 if ((uap->shmflg & IPC_CREAT) == 0) {
611                         error = ENOENT;
612                         goto done;
613                 }
614         }
615         error = shmget_allocate_segment(p, uap, mode);
616 done:
617         lwkt_reltoken(&shm_token);
618
619         return (error);
620 }
621
622 void
623 shmfork(struct proc *p1, struct proc *p2)
624 {
625         struct shmmap_state *shmmap_s;
626         size_t size;
627         int i;
628
629         lwkt_gettoken(&shm_token);
630         size = shminfo.shmseg * sizeof(struct shmmap_state);
631         shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
632         bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
633         p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
634         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
635                 if (shmmap_s->shmid != -1)
636                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
637         }
638         lwkt_reltoken(&shm_token);
639 }
640
641 void
642 shmexit(struct vmspace *vm)
643 {
644         struct shmmap_state *base, *shm;
645         int i;
646
647         if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
648                 vm->vm_shm = NULL;
649                 lwkt_gettoken(&shm_token);
650                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
651                         if (shm->shmid != -1)
652                                 shm_delete_mapping(vm, shm);
653                 }
654                 kfree(base, M_SHM);
655                 lwkt_reltoken(&shm_token);
656         }
657 }
658
659 static void
660 shmrealloc(void)
661 {
662         int i;
663         struct shmid_ds *newsegs;
664
665         if (shmalloced >= shminfo.shmmni)
666                 return;
667
668         newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
669         for (i = 0; i < shmalloced; i++)
670                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
671         for (; i < shminfo.shmmni; i++) {
672                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
673                 shmsegs[i].shm_perm.seq = 0;
674         }
675         kfree(shmsegs, M_SHM);
676         shmsegs = newsegs;
677         shmalloced = shminfo.shmmni;
678 }
679
680 static void
681 shminit(void *dummy)
682 {
683         int i;
684
685         /*
686          * If not overridden by a tunable set the maximum shm to
687          * 2/3 of main memory.
688          */
689         if (shminfo.shmall == 0)
690                 shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3;
691
692         shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
693         shmalloced = shminfo.shmmni;
694         shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
695         for (i = 0; i < shmalloced; i++) {
696                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
697                 shmsegs[i].shm_perm.seq = 0;
698         }
699         shm_last_free = 0;
700         shm_nused = 0;
701         shm_committed = 0;
702 }
703 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);