MAP_VPAGETABLE support part 3/3.
[dragonfly.git] / sys / kern / sysv_shm.c
1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.19 2006/09/11 20:25:01 dillon Exp $ */
3 /*      $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $      */
4
5 /*
6  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Adam Glass and Charles
19  *      Hannum.
20  * 4. The names of the authors may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include "opt_compat.h"
36 #include "opt_sysvipc.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/shm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/mman.h>
47 #include <sys/stat.h>
48 #include <sys/sysent.h>
49 #include <sys/jail.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <sys/lock.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pager.h>
59
60 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
61
62 struct oshmctl_args;
63 static int sys_oshmctl (struct proc *p, struct oshmctl_args *uap);
64
65 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
66 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
67
68 /* XXX casting to (sy_call_t *) is bogus, as usual. */
69 static sy_call_t *shmcalls[] = {
70         (sy_call_t *)sys_shmat, (sy_call_t *)sys_oshmctl,
71         (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget,
72         (sy_call_t *)sys_shmctl
73 };
74
75 #define SHMSEG_FREE             0x0200
76 #define SHMSEG_REMOVED          0x0400
77 #define SHMSEG_ALLOCATED        0x0800
78 #define SHMSEG_WANTED           0x1000
79
80 static int shm_last_free, shm_nused, shm_committed, shmalloced;
81 static struct shmid_ds  *shmsegs;
82
83 struct shm_handle {
84         /* vm_offset_t kva; */
85         vm_object_t shm_object;
86 };
87
88 struct shmmap_state {
89         vm_offset_t va;
90         int shmid;
91 };
92
93 static void shm_deallocate_segment (struct shmid_ds *);
94 static int shm_find_segment_by_key (key_t);
95 static struct shmid_ds *shm_find_segment_by_shmid (int);
96 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
97 static void shmrealloc (void);
98 static void shminit (void *);
99
100 /*
101  * Tuneable values
102  */
103 #ifndef SHMMAXPGS
104 #define SHMMAXPGS       8192    /* note: sysv shared memory is swap backed */
105 #endif
106 #ifndef SHMMAX
107 #define SHMMAX  (SHMMAXPGS*PAGE_SIZE)
108 #endif
109 #ifndef SHMMIN
110 #define SHMMIN  1
111 #endif
112 #ifndef SHMMNI
113 #define SHMMNI  192
114 #endif
115 #ifndef SHMSEG
116 #define SHMSEG  128
117 #endif
118 #ifndef SHMALL
119 #define SHMALL  (SHMMAXPGS)
120 #endif
121
122 struct  shminfo shminfo = {
123         SHMMAX,
124         SHMMIN,
125         SHMMNI,
126         SHMSEG,
127         SHMALL
128 };
129
130 static int shm_use_phys;
131
132 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin);
133 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni);
134 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg);
135 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall);
136 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
137
138 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
139 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
140 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, "");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, "");
144
145 static int
146 shm_find_segment_by_key(key)
147         key_t key;
148 {
149         int i;
150
151         for (i = 0; i < shmalloced; i++)
152                 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
153                     shmsegs[i].shm_perm.key == key)
154                         return i;
155         return -1;
156 }
157
158 static struct shmid_ds *
159 shm_find_segment_by_shmid(shmid)
160         int shmid;
161 {
162         int segnum;
163         struct shmid_ds *shmseg;
164
165         segnum = IPCID_TO_IX(shmid);
166         if (segnum < 0 || segnum >= shmalloced)
167                 return NULL;
168         shmseg = &shmsegs[segnum];
169         if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
170             != SHMSEG_ALLOCATED ||
171             shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
172                 return NULL;
173         return shmseg;
174 }
175
176 static void
177 shm_deallocate_segment(shmseg)
178         struct shmid_ds *shmseg;
179 {
180         struct shm_handle *shm_handle;
181         size_t size;
182
183         shm_handle = shmseg->shm_internal;
184         vm_object_deallocate(shm_handle->shm_object);
185         kfree((caddr_t)shm_handle, M_SHM);
186         shmseg->shm_internal = NULL;
187         size = round_page(shmseg->shm_segsz);
188         shm_committed -= btoc(size);
189         shm_nused--;
190         shmseg->shm_perm.mode = SHMSEG_FREE;
191 }
192
193 static int
194 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
195 {
196         struct shmid_ds *shmseg;
197         int segnum, result;
198         size_t size;
199
200         segnum = IPCID_TO_IX(shmmap_s->shmid);
201         shmseg = &shmsegs[segnum];
202         size = round_page(shmseg->shm_segsz);
203         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
204         if (result != KERN_SUCCESS)
205                 return EINVAL;
206         shmmap_s->shmid = -1;
207         shmseg->shm_dtime = time_second;
208         if ((--shmseg->shm_nattch <= 0) &&
209             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
210                 shm_deallocate_segment(shmseg);
211                 shm_last_free = segnum;
212         }
213         return 0;
214 }
215
216 int
217 sys_shmdt(struct shmdt_args *uap)
218 {
219         struct proc *p = curproc;
220         struct shmmap_state *shmmap_s;
221         int i;
222
223         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
224                 return (ENOSYS);
225
226         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
227         if (shmmap_s == NULL)
228             return EINVAL;
229         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
230                 if (shmmap_s->shmid != -1 &&
231                     shmmap_s->va == (vm_offset_t)uap->shmaddr)
232                         break;
233         if (i == shminfo.shmseg)
234                 return EINVAL;
235         return shm_delete_mapping(p->p_vmspace, shmmap_s);
236 }
237
238 int
239 sys_shmat(struct shmat_args *uap)
240 {
241         struct proc *p = curproc;
242         int error, i, flags;
243         struct shmid_ds *shmseg;
244         struct shmmap_state *shmmap_s = NULL;
245         struct shm_handle *shm_handle;
246         vm_offset_t attach_va;
247         vm_prot_t prot;
248         vm_size_t size;
249         int rv;
250
251         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
252                 return (ENOSYS);
253
254         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
255         if (shmmap_s == NULL) {
256                 size = shminfo.shmseg * sizeof(struct shmmap_state);
257                 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
258                 for (i = 0; i < shminfo.shmseg; i++)
259                         shmmap_s[i].shmid = -1;
260                 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
261         }
262         shmseg = shm_find_segment_by_shmid(uap->shmid);
263         if (shmseg == NULL)
264                 return EINVAL;
265         error = ipcperm(p, &shmseg->shm_perm,
266             (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
267         if (error)
268                 return error;
269         for (i = 0; i < shminfo.shmseg; i++) {
270                 if (shmmap_s->shmid == -1)
271                         break;
272                 shmmap_s++;
273         }
274         if (i >= shminfo.shmseg)
275                 return EMFILE;
276         size = round_page(shmseg->shm_segsz);
277 #ifdef VM_PROT_READ_IS_EXEC
278         prot = VM_PROT_READ | VM_PROT_EXECUTE;
279 #else
280         prot = VM_PROT_READ;
281 #endif
282         if ((uap->shmflg & SHM_RDONLY) == 0)
283                 prot |= VM_PROT_WRITE;
284         flags = MAP_ANON | MAP_SHARED;
285         if (uap->shmaddr) {
286                 flags |= MAP_FIXED;
287                 if (uap->shmflg & SHM_RND)
288                         attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
289                 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
290                         attach_va = (vm_offset_t)uap->shmaddr;
291                 else
292                         return EINVAL;
293         } else {
294                 /* This is just a hint to vm_map_find() about where to put it. */
295                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz);
296         }
297
298         shm_handle = shmseg->shm_internal;
299         vm_object_reference(shm_handle->shm_object);
300         rv = vm_map_find(&p->p_vmspace->vm_map, 
301                          shm_handle->shm_object, 0,
302                          &attach_va, size,
303                          ((flags & MAP_FIXED) ? 0 : 1), 
304                          VM_MAPTYPE_NORMAL,
305                          prot, prot,
306                          0);
307         if (rv != KERN_SUCCESS) {
308                 vm_object_deallocate(shm_handle->shm_object);
309                 return ENOMEM;
310         }
311         vm_map_inherit(&p->p_vmspace->vm_map,
312                 attach_va, attach_va + size, VM_INHERIT_SHARE);
313
314         shmmap_s->va = attach_va;
315         shmmap_s->shmid = uap->shmid;
316         shmseg->shm_lpid = p->p_pid;
317         shmseg->shm_atime = time_second;
318         shmseg->shm_nattch++;
319         uap->sysmsg_result = attach_va;
320         return 0;
321 }
322
323 struct oshmid_ds {
324         struct  ipc_perm shm_perm;      /* operation perms */
325         int     shm_segsz;              /* size of segment (bytes) */
326         ushort  shm_cpid;               /* pid, creator */
327         ushort  shm_lpid;               /* pid, last operation */
328         short   shm_nattch;             /* no. of current attaches */
329         time_t  shm_atime;              /* last attach time */
330         time_t  shm_dtime;              /* last detach time */
331         time_t  shm_ctime;              /* last change time */
332         void    *shm_handle;            /* internal handle for shm segment */
333 };
334
335 struct oshmctl_args {
336         struct sysmsg sysmsg;
337         int shmid;
338         int cmd;
339         struct oshmid_ds *ubuf;
340 };
341
342 static int
343 sys_oshmctl(p, uap)
344         struct proc *p;
345         struct oshmctl_args *uap;
346 {
347 #ifdef COMPAT_43
348         int error;
349         struct shmid_ds *shmseg;
350         struct oshmid_ds outbuf;
351
352         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
353                 return (ENOSYS);
354
355         shmseg = shm_find_segment_by_shmid(uap->shmid);
356         if (shmseg == NULL)
357                 return EINVAL;
358         switch (uap->cmd) {
359         case IPC_STAT:
360                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
361                 if (error)
362                         return error;
363                 outbuf.shm_perm = shmseg->shm_perm;
364                 outbuf.shm_segsz = shmseg->shm_segsz;
365                 outbuf.shm_cpid = shmseg->shm_cpid;
366                 outbuf.shm_lpid = shmseg->shm_lpid;
367                 outbuf.shm_nattch = shmseg->shm_nattch;
368                 outbuf.shm_atime = shmseg->shm_atime;
369                 outbuf.shm_dtime = shmseg->shm_dtime;
370                 outbuf.shm_ctime = shmseg->shm_ctime;
371                 outbuf.shm_handle = shmseg->shm_internal;
372                 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
373                 if (error)
374                         return error;
375                 break;
376         default:
377                 /* XXX casting to (sy_call_t *) is bogus, as usual. */
378                 return (sys_shmctl((struct shmctl_args *)uap));
379         }
380         return 0;
381 #else
382         return EINVAL;
383 #endif
384 }
385
386 int
387 sys_shmctl(struct shmctl_args *uap)
388 {
389         struct proc *p = curproc;
390         int error;
391         struct shmid_ds inbuf;
392         struct shmid_ds *shmseg;
393
394         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
395                 return (ENOSYS);
396
397         shmseg = shm_find_segment_by_shmid(uap->shmid);
398         if (shmseg == NULL)
399                 return EINVAL;
400         switch (uap->cmd) {
401         case IPC_STAT:
402                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
403                 if (error)
404                         return error;
405                 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
406                 if (error)
407                         return error;
408                 break;
409         case IPC_SET:
410                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
411                 if (error)
412                         return error;
413                 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
414                 if (error)
415                         return error;
416                 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
417                 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
418                 shmseg->shm_perm.mode =
419                     (shmseg->shm_perm.mode & ~ACCESSPERMS) |
420                     (inbuf.shm_perm.mode & ACCESSPERMS);
421                 shmseg->shm_ctime = time_second;
422                 break;
423         case IPC_RMID:
424                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
425                 if (error)
426                         return error;
427                 shmseg->shm_perm.key = IPC_PRIVATE;
428                 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
429                 if (shmseg->shm_nattch <= 0) {
430                         shm_deallocate_segment(shmseg);
431                         shm_last_free = IPCID_TO_IX(uap->shmid);
432                 }
433                 break;
434 #if 0
435         case SHM_LOCK:
436         case SHM_UNLOCK:
437 #endif
438         default:
439                 return EINVAL;
440         }
441         return 0;
442 }
443
444 static int
445 shmget_existing(p, uap, mode, segnum)
446         struct proc *p;
447         struct shmget_args *uap;
448         int mode;
449         int segnum;
450 {
451         struct shmid_ds *shmseg;
452         int error;
453
454         shmseg = &shmsegs[segnum];
455         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
456                 /*
457                  * This segment is in the process of being allocated.  Wait
458                  * until it's done, and look the key up again (in case the
459                  * allocation failed or it was freed).
460                  */
461                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
462                 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
463                 if (error)
464                         return error;
465                 return EAGAIN;
466         }
467         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
468                 return EEXIST;
469         error = ipcperm(p, &shmseg->shm_perm, mode);
470         if (error)
471                 return error;
472         if (uap->size && uap->size > shmseg->shm_segsz)
473                 return EINVAL;
474         uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
475         return 0;
476 }
477
478 static int
479 shmget_allocate_segment(p, uap, mode)
480         struct proc *p;
481         struct shmget_args *uap;
482         int mode;
483 {
484         int i, segnum, shmid, size;
485         struct ucred *cred = p->p_ucred;
486         struct shmid_ds *shmseg;
487         struct shm_handle *shm_handle;
488
489         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
490                 return EINVAL;
491         if (shm_nused >= shminfo.shmmni) /* any shmids left? */
492                 return ENOSPC;
493         size = round_page(uap->size);
494         if (shm_committed + btoc(size) > shminfo.shmall)
495                 return ENOMEM;
496         if (shm_last_free < 0) {
497                 shmrealloc();   /* maybe expand the shmsegs[] array */
498                 for (i = 0; i < shmalloced; i++)
499                         if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
500                                 break;
501                 if (i == shmalloced)
502                         return ENOSPC;
503                 segnum = i;
504         } else  {
505                 segnum = shm_last_free;
506                 shm_last_free = -1;
507         }
508         shmseg = &shmsegs[segnum];
509         /*
510          * In case we sleep in malloc(), mark the segment present but deleted
511          * so that noone else tries to create the same key.
512          */
513         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
514         shmseg->shm_perm.key = uap->key;
515         shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
516         shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
517         shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
518         
519         /*
520          * We make sure that we have allocated a pager before we need
521          * to.
522          */
523         if (shm_use_phys) {
524                 shm_handle->shm_object =
525                     vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
526         } else {
527                 shm_handle->shm_object =
528                     vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
529         }
530         vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
531         vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
532
533         shmseg->shm_internal = shm_handle;
534         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
535         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
536         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
537             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
538         shmseg->shm_segsz = uap->size;
539         shmseg->shm_cpid = p->p_pid;
540         shmseg->shm_lpid = shmseg->shm_nattch = 0;
541         shmseg->shm_atime = shmseg->shm_dtime = 0;
542         shmseg->shm_ctime = time_second;
543         shm_committed += btoc(size);
544         shm_nused++;
545         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
546                 /*
547                  * Somebody else wanted this key while we were asleep.  Wake
548                  * them up now.
549                  */
550                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
551                 wakeup((caddr_t)shmseg);
552         }
553         uap->sysmsg_result = shmid;
554         return 0;
555 }
556
557 int
558 sys_shmget(struct shmget_args *uap)
559 {
560         struct proc *p = curproc;
561         int segnum, mode, error;
562
563         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
564                 return (ENOSYS);
565
566         mode = uap->shmflg & ACCESSPERMS;
567         if (uap->key != IPC_PRIVATE) {
568         again:
569                 segnum = shm_find_segment_by_key(uap->key);
570                 if (segnum >= 0) {
571                         error = shmget_existing(p, uap, mode, segnum);
572                         if (error == EAGAIN)
573                                 goto again;
574                         return error;
575                 }
576                 if ((uap->shmflg & IPC_CREAT) == 0)
577                         return ENOENT;
578         }
579         return shmget_allocate_segment(p, uap, mode);
580 }
581
582 /*
583  *  shmsys_args(int which, int a2, ...) (VARARGS)
584  */
585 int
586 sys_shmsys(struct shmsys_args *uap)
587 {
588         struct proc *p = curproc;
589         unsigned int which = (unsigned int)uap->which;
590         int error;
591
592         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
593                 return (ENOSYS);
594
595         if (which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
596                 return EINVAL;
597         bcopy(&uap->a2, &uap->which,
598                 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2));
599         error = ((*shmcalls[which])(uap));
600         return(error);
601 }
602
603 void
604 shmfork(p1, p2)
605         struct proc *p1, *p2;
606 {
607         struct shmmap_state *shmmap_s;
608         size_t size;
609         int i;
610
611         size = shminfo.shmseg * sizeof(struct shmmap_state);
612         shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
613         bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
614         p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
615         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
616                 if (shmmap_s->shmid != -1)
617                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
618 }
619
620 void
621 shmexit(struct vmspace *vm)
622 {
623         struct shmmap_state *base, *shm;
624         int i;
625
626         if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
627                 vm->vm_shm = NULL;
628                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
629                         if (shm->shmid != -1)
630                                 shm_delete_mapping(vm, shm);
631                 }
632                 kfree(base, M_SHM);
633         }
634 }
635
636 static void
637 shmrealloc(void)
638 {
639         int i;
640         struct shmid_ds *newsegs;
641
642         if (shmalloced >= shminfo.shmmni)
643                 return;
644
645         newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
646         if (newsegs == NULL)
647                 return;
648         for (i = 0; i < shmalloced; i++)
649                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
650         for (; i < shminfo.shmmni; i++) {
651                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
652                 shmsegs[i].shm_perm.seq = 0;
653         }
654         kfree(shmsegs, M_SHM);
655         shmsegs = newsegs;
656         shmalloced = shminfo.shmmni;
657 }
658
659 static void
660 shminit(dummy)
661         void *dummy;
662 {
663         int i;
664
665         shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
666         shmalloced = shminfo.shmmni;
667         shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
668         if (shmsegs == NULL)
669                 panic("cannot allocate initial memory for sysvshm");
670         for (i = 0; i < shmalloced; i++) {
671                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
672                 shmsegs[i].shm_perm.seq = 0;
673         }
674         shm_last_free = 0;
675         shm_nused = 0;
676         shm_committed = 0;
677 }
678 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);