Start removing the old build infrastructure for the a.out
[dragonfly.git] / sys / kern / sysv_shm.c
1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.13 2004/02/05 18:43:22 drhodus Exp $ */
3 /*      $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $      */
4
5 /*
6  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Adam Glass and Charles
19  *      Hannum.
20  * 4. The names of the authors may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include "opt_compat.h"
36 #include "opt_sysvipc.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/shm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/mman.h>
47 #include <sys/stat.h>
48 #include <sys/sysent.h>
49 #include <sys/jail.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <sys/lock.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pager.h>
59
60 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
61
62 struct oshmctl_args;
63 static int oshmctl (struct proc *p, struct oshmctl_args *uap);
64
65 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
66 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
67
68 /* XXX casting to (sy_call_t *) is bogus, as usual. */
69 static sy_call_t *shmcalls[] = {
70         (sy_call_t *)shmat, (sy_call_t *)oshmctl,
71         (sy_call_t *)shmdt, (sy_call_t *)shmget,
72         (sy_call_t *)shmctl
73 };
74
75 #define SHMSEG_FREE             0x0200
76 #define SHMSEG_REMOVED          0x0400
77 #define SHMSEG_ALLOCATED        0x0800
78 #define SHMSEG_WANTED           0x1000
79
80 static int shm_last_free, shm_nused, shm_committed, shmalloced;
81 static struct shmid_ds  *shmsegs;
82
83 struct shm_handle {
84         /* vm_offset_t kva; */
85         vm_object_t shm_object;
86 };
87
88 struct shmmap_state {
89         vm_offset_t va;
90         int shmid;
91 };
92
93 static void shm_deallocate_segment (struct shmid_ds *);
94 static int shm_find_segment_by_key (key_t);
95 static struct shmid_ds *shm_find_segment_by_shmid (int);
96 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
97 static void shmrealloc (void);
98 static void shminit (void *);
99
100 /*
101  * Tuneable values
102  */
103 #ifndef SHMMAXPGS
104 #define SHMMAXPGS       8192    /* note: sysv shared memory is swap backed */
105 #endif
106 #ifndef SHMMAX
107 #define SHMMAX  (SHMMAXPGS*PAGE_SIZE)
108 #endif
109 #ifndef SHMMIN
110 #define SHMMIN  1
111 #endif
112 #ifndef SHMMNI
113 #define SHMMNI  192
114 #endif
115 #ifndef SHMSEG
116 #define SHMSEG  128
117 #endif
118 #ifndef SHMALL
119 #define SHMALL  (SHMMAXPGS)
120 #endif
121
122 struct  shminfo shminfo = {
123         SHMMAX,
124         SHMMIN,
125         SHMMNI,
126         SHMSEG,
127         SHMALL
128 };
129
130 static int shm_use_phys;
131
132 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin);
133 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni);
134 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg);
135 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall);
136 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
137
138 SYSCTL_DECL(_kern_ipc);
139 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
140 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, "");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, "");
145
146 static int
147 shm_find_segment_by_key(key)
148         key_t key;
149 {
150         int i;
151
152         for (i = 0; i < shmalloced; i++)
153                 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
154                     shmsegs[i].shm_perm.key == key)
155                         return i;
156         return -1;
157 }
158
159 static struct shmid_ds *
160 shm_find_segment_by_shmid(shmid)
161         int shmid;
162 {
163         int segnum;
164         struct shmid_ds *shmseg;
165
166         segnum = IPCID_TO_IX(shmid);
167         if (segnum < 0 || segnum >= shmalloced)
168                 return NULL;
169         shmseg = &shmsegs[segnum];
170         if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
171             != SHMSEG_ALLOCATED ||
172             shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
173                 return NULL;
174         return shmseg;
175 }
176
177 static void
178 shm_deallocate_segment(shmseg)
179         struct shmid_ds *shmseg;
180 {
181         struct shm_handle *shm_handle;
182         size_t size;
183
184         shm_handle = shmseg->shm_internal;
185         vm_object_deallocate(shm_handle->shm_object);
186         free((caddr_t)shm_handle, M_SHM);
187         shmseg->shm_internal = NULL;
188         size = round_page(shmseg->shm_segsz);
189         shm_committed -= btoc(size);
190         shm_nused--;
191         shmseg->shm_perm.mode = SHMSEG_FREE;
192 }
193
194 static int
195 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
196 {
197         struct shmid_ds *shmseg;
198         int segnum, result;
199         size_t size;
200
201         segnum = IPCID_TO_IX(shmmap_s->shmid);
202         shmseg = &shmsegs[segnum];
203         size = round_page(shmseg->shm_segsz);
204         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
205         if (result != KERN_SUCCESS)
206                 return EINVAL;
207         shmmap_s->shmid = -1;
208         shmseg->shm_dtime = time_second;
209         if ((--shmseg->shm_nattch <= 0) &&
210             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
211                 shm_deallocate_segment(shmseg);
212                 shm_last_free = segnum;
213         }
214         return 0;
215 }
216
217 int
218 shmdt(struct shmdt_args *uap)
219 {
220         struct proc *p = curproc;
221         struct shmmap_state *shmmap_s;
222         int i;
223
224         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
225                 return (ENOSYS);
226
227         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
228         if (shmmap_s == NULL)
229             return EINVAL;
230         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
231                 if (shmmap_s->shmid != -1 &&
232                     shmmap_s->va == (vm_offset_t)uap->shmaddr)
233                         break;
234         if (i == shminfo.shmseg)
235                 return EINVAL;
236         return shm_delete_mapping(p->p_vmspace, shmmap_s);
237 }
238
239 int
240 shmat(struct shmat_args *uap)
241 {
242         struct proc *p = curproc;
243         int error, i, flags;
244         struct shmid_ds *shmseg;
245         struct shmmap_state *shmmap_s = NULL;
246         struct shm_handle *shm_handle;
247         vm_offset_t attach_va;
248         vm_prot_t prot;
249         vm_size_t size;
250         int rv;
251
252         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
253                 return (ENOSYS);
254
255         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
256         if (shmmap_s == NULL) {
257                 size = shminfo.shmseg * sizeof(struct shmmap_state);
258                 shmmap_s = malloc(size, M_SHM, M_WAITOK);
259                 for (i = 0; i < shminfo.shmseg; i++)
260                         shmmap_s[i].shmid = -1;
261                 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
262         }
263         shmseg = shm_find_segment_by_shmid(uap->shmid);
264         if (shmseg == NULL)
265                 return EINVAL;
266         error = ipcperm(p, &shmseg->shm_perm,
267             (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
268         if (error)
269                 return error;
270         for (i = 0; i < shminfo.shmseg; i++) {
271                 if (shmmap_s->shmid == -1)
272                         break;
273                 shmmap_s++;
274         }
275         if (i >= shminfo.shmseg)
276                 return EMFILE;
277         size = round_page(shmseg->shm_segsz);
278 #ifdef VM_PROT_READ_IS_EXEC
279         prot = VM_PROT_READ | VM_PROT_EXECUTE;
280 #else
281         prot = VM_PROT_READ;
282 #endif
283         if ((uap->shmflg & SHM_RDONLY) == 0)
284                 prot |= VM_PROT_WRITE;
285         flags = MAP_ANON | MAP_SHARED;
286         if (uap->shmaddr) {
287                 flags |= MAP_FIXED;
288                 if (uap->shmflg & SHM_RND)
289                         attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
290                 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
291                         attach_va = (vm_offset_t)uap->shmaddr;
292                 else
293                         return EINVAL;
294         } else {
295                 /* This is just a hint to vm_map_find() about where to put it. */
296                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz);
297         }
298
299         shm_handle = shmseg->shm_internal;
300         vm_object_reference(shm_handle->shm_object);
301         rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
302                 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
303         if (rv != KERN_SUCCESS) {
304                 vm_object_deallocate(shm_handle->shm_object);
305                 return ENOMEM;
306         }
307         vm_map_inherit(&p->p_vmspace->vm_map,
308                 attach_va, attach_va + size, VM_INHERIT_SHARE);
309
310         shmmap_s->va = attach_va;
311         shmmap_s->shmid = uap->shmid;
312         shmseg->shm_lpid = p->p_pid;
313         shmseg->shm_atime = time_second;
314         shmseg->shm_nattch++;
315         uap->sysmsg_result = attach_va;
316         return 0;
317 }
318
319 struct oshmid_ds {
320         struct  ipc_perm shm_perm;      /* operation perms */
321         int     shm_segsz;              /* size of segment (bytes) */
322         ushort  shm_cpid;               /* pid, creator */
323         ushort  shm_lpid;               /* pid, last operation */
324         short   shm_nattch;             /* no. of current attaches */
325         time_t  shm_atime;              /* last attach time */
326         time_t  shm_dtime;              /* last detach time */
327         time_t  shm_ctime;              /* last change time */
328         void    *shm_handle;            /* internal handle for shm segment */
329 };
330
331 struct oshmctl_args {
332         struct sysmsg sysmsg;
333         int shmid;
334         int cmd;
335         struct oshmid_ds *ubuf;
336 };
337
338 static int
339 oshmctl(p, uap)
340         struct proc *p;
341         struct oshmctl_args *uap;
342 {
343 #ifdef COMPAT_43
344         int error;
345         struct shmid_ds *shmseg;
346         struct oshmid_ds outbuf;
347
348         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
349                 return (ENOSYS);
350
351         shmseg = shm_find_segment_by_shmid(uap->shmid);
352         if (shmseg == NULL)
353                 return EINVAL;
354         switch (uap->cmd) {
355         case IPC_STAT:
356                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
357                 if (error)
358                         return error;
359                 outbuf.shm_perm = shmseg->shm_perm;
360                 outbuf.shm_segsz = shmseg->shm_segsz;
361                 outbuf.shm_cpid = shmseg->shm_cpid;
362                 outbuf.shm_lpid = shmseg->shm_lpid;
363                 outbuf.shm_nattch = shmseg->shm_nattch;
364                 outbuf.shm_atime = shmseg->shm_atime;
365                 outbuf.shm_dtime = shmseg->shm_dtime;
366                 outbuf.shm_ctime = shmseg->shm_ctime;
367                 outbuf.shm_handle = shmseg->shm_internal;
368                 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
369                 if (error)
370                         return error;
371                 break;
372         default:
373                 /* XXX casting to (sy_call_t *) is bogus, as usual. */
374                 return ((sy_call_t *)shmctl)(uap);
375         }
376         return 0;
377 #else
378         return EINVAL;
379 #endif
380 }
381
382 int
383 shmctl(struct shmctl_args *uap)
384 {
385         struct proc *p = curproc;
386         int error;
387         struct shmid_ds inbuf;
388         struct shmid_ds *shmseg;
389
390         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
391                 return (ENOSYS);
392
393         shmseg = shm_find_segment_by_shmid(uap->shmid);
394         if (shmseg == NULL)
395                 return EINVAL;
396         switch (uap->cmd) {
397         case IPC_STAT:
398                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
399                 if (error)
400                         return error;
401                 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
402                 if (error)
403                         return error;
404                 break;
405         case IPC_SET:
406                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
407                 if (error)
408                         return error;
409                 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
410                 if (error)
411                         return error;
412                 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
413                 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
414                 shmseg->shm_perm.mode =
415                     (shmseg->shm_perm.mode & ~ACCESSPERMS) |
416                     (inbuf.shm_perm.mode & ACCESSPERMS);
417                 shmseg->shm_ctime = time_second;
418                 break;
419         case IPC_RMID:
420                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
421                 if (error)
422                         return error;
423                 shmseg->shm_perm.key = IPC_PRIVATE;
424                 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
425                 if (shmseg->shm_nattch <= 0) {
426                         shm_deallocate_segment(shmseg);
427                         shm_last_free = IPCID_TO_IX(uap->shmid);
428                 }
429                 break;
430 #if 0
431         case SHM_LOCK:
432         case SHM_UNLOCK:
433 #endif
434         default:
435                 return EINVAL;
436         }
437         return 0;
438 }
439
440 static int
441 shmget_existing(p, uap, mode, segnum)
442         struct proc *p;
443         struct shmget_args *uap;
444         int mode;
445         int segnum;
446 {
447         struct shmid_ds *shmseg;
448         int error;
449
450         shmseg = &shmsegs[segnum];
451         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
452                 /*
453                  * This segment is in the process of being allocated.  Wait
454                  * until it's done, and look the key up again (in case the
455                  * allocation failed or it was freed).
456                  */
457                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
458                 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
459                 if (error)
460                         return error;
461                 return EAGAIN;
462         }
463         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
464                 return EEXIST;
465         error = ipcperm(p, &shmseg->shm_perm, mode);
466         if (error)
467                 return error;
468         if (uap->size && uap->size > shmseg->shm_segsz)
469                 return EINVAL;
470         uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
471         return 0;
472 }
473
474 static int
475 shmget_allocate_segment(p, uap, mode)
476         struct proc *p;
477         struct shmget_args *uap;
478         int mode;
479 {
480         int i, segnum, shmid, size;
481         struct ucred *cred = p->p_ucred;
482         struct shmid_ds *shmseg;
483         struct shm_handle *shm_handle;
484
485         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
486                 return EINVAL;
487         if (shm_nused >= shminfo.shmmni) /* any shmids left? */
488                 return ENOSPC;
489         size = round_page(uap->size);
490         if (shm_committed + btoc(size) > shminfo.shmall)
491                 return ENOMEM;
492         if (shm_last_free < 0) {
493                 shmrealloc();   /* maybe expand the shmsegs[] array */
494                 for (i = 0; i < shmalloced; i++)
495                         if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
496                                 break;
497                 if (i == shmalloced)
498                         return ENOSPC;
499                 segnum = i;
500         } else  {
501                 segnum = shm_last_free;
502                 shm_last_free = -1;
503         }
504         shmseg = &shmsegs[segnum];
505         /*
506          * In case we sleep in malloc(), mark the segment present but deleted
507          * so that noone else tries to create the same key.
508          */
509         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
510         shmseg->shm_perm.key = uap->key;
511         shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
512         shm_handle = (struct shm_handle *)
513             malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
514         shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
515         
516         /*
517          * We make sure that we have allocated a pager before we need
518          * to.
519          */
520         if (shm_use_phys) {
521                 shm_handle->shm_object =
522                     vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
523         } else {
524                 shm_handle->shm_object =
525                     vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
526         }
527         vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
528         vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
529
530         shmseg->shm_internal = shm_handle;
531         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
532         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
533         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
534             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
535         shmseg->shm_segsz = uap->size;
536         shmseg->shm_cpid = p->p_pid;
537         shmseg->shm_lpid = shmseg->shm_nattch = 0;
538         shmseg->shm_atime = shmseg->shm_dtime = 0;
539         shmseg->shm_ctime = time_second;
540         shm_committed += btoc(size);
541         shm_nused++;
542         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
543                 /*
544                  * Somebody else wanted this key while we were asleep.  Wake
545                  * them up now.
546                  */
547                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
548                 wakeup((caddr_t)shmseg);
549         }
550         uap->sysmsg_result = shmid;
551         return 0;
552 }
553
554 int
555 shmget(struct shmget_args *uap)
556 {
557         struct proc *p = curproc;
558         int segnum, mode, error;
559
560         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
561                 return (ENOSYS);
562
563         mode = uap->shmflg & ACCESSPERMS;
564         if (uap->key != IPC_PRIVATE) {
565         again:
566                 segnum = shm_find_segment_by_key(uap->key);
567                 if (segnum >= 0) {
568                         error = shmget_existing(p, uap, mode, segnum);
569                         if (error == EAGAIN)
570                                 goto again;
571                         return error;
572                 }
573                 if ((uap->shmflg & IPC_CREAT) == 0)
574                         return ENOENT;
575         }
576         return shmget_allocate_segment(p, uap, mode);
577 }
578
579 /*
580  *  shmsys_args(int which, int a2, ...) (VARARGS)
581  */
582 int
583 shmsys(struct shmsys_args *uap)
584 {
585         struct proc *p = curproc;
586         unsigned int which = (unsigned int)uap->which;
587         int error;
588
589         if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
590                 return (ENOSYS);
591
592         if (which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
593                 return EINVAL;
594         bcopy(&uap->a2, &uap->which,
595                 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2));
596         error = ((*shmcalls[which])(uap));
597         return(error);
598 }
599
600 void
601 shmfork(p1, p2)
602         struct proc *p1, *p2;
603 {
604         struct shmmap_state *shmmap_s;
605         size_t size;
606         int i;
607
608         size = shminfo.shmseg * sizeof(struct shmmap_state);
609         shmmap_s = malloc(size, M_SHM, M_WAITOK);
610         bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
611         p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
612         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
613                 if (shmmap_s->shmid != -1)
614                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
615 }
616
617 void
618 shmexit(struct vmspace *vm)
619 {
620         struct shmmap_state *base, *shm;
621         int i;
622
623         if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
624                 vm->vm_shm = NULL;
625                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
626                         if (shm->shmid != -1)
627                                 shm_delete_mapping(vm, shm);
628                 }
629                 free(base, M_SHM);
630         }
631 }
632
633 static void
634 shmrealloc(void)
635 {
636         int i;
637         struct shmid_ds *newsegs;
638
639         if (shmalloced >= shminfo.shmmni)
640                 return;
641
642         newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
643         if (newsegs == NULL)
644                 return;
645         for (i = 0; i < shmalloced; i++)
646                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
647         for (; i < shminfo.shmmni; i++) {
648                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
649                 shmsegs[i].shm_perm.seq = 0;
650         }
651         free(shmsegs, M_SHM);
652         shmsegs = newsegs;
653         shmalloced = shminfo.shmmni;
654 }
655
656 static void
657 shminit(dummy)
658         void *dummy;
659 {
660         int i;
661
662         shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
663         shmalloced = shminfo.shmmni;
664         shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
665         if (shmsegs == NULL)
666                 panic("cannot allocate initial memory for sysvshm");
667         for (i = 0; i < shmalloced; i++) {
668                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
669                 shmsegs[i].shm_perm.seq = 0;
670         }
671         shm_last_free = 0;
672         shm_nused = 0;
673         shm_committed = 0;
674 }
675 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);