Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / sys / kern / sysv_shm.c
1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.21 2008/01/06 16:55:51 swildner Exp $ */
3 /*      $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $      */
4
5 /*
6  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Adam Glass and Charles
19  *      Hannum.
20  * 4. The names of the authors may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include "opt_compat.h"
36 #include "opt_sysvipc.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/shm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/mman.h>
47 #include <sys/stat.h>
48 #include <sys/sysent.h>
49 #include <sys/jail.h>
50
51 #include <sys/mplock2.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <sys/lock.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61
62 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
63
64 struct oshmctl_args;
65 static int sys_oshmctl (struct proc *p, struct oshmctl_args *uap);
66
67 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
68 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
69
70 /* XXX casting to (sy_call_t *) is bogus, as usual. */
71 static sy_call_t *shmcalls[] = {
72         (sy_call_t *)sys_shmat, (sy_call_t *)sys_oshmctl,
73         (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget,
74         (sy_call_t *)sys_shmctl
75 };
76
77 #define SHMSEG_FREE             0x0200
78 #define SHMSEG_REMOVED          0x0400
79 #define SHMSEG_ALLOCATED        0x0800
80 #define SHMSEG_WANTED           0x1000
81
82 static int shm_last_free, shm_committed, shmalloced;
83 int shm_nused;
84 static struct shmid_ds  *shmsegs;
85
86 struct shm_handle {
87         /* vm_offset_t kva; */
88         vm_object_t shm_object;
89 };
90
91 struct shmmap_state {
92         vm_offset_t va;
93         int shmid;
94 };
95
96 static void shm_deallocate_segment (struct shmid_ds *);
97 static int shm_find_segment_by_key (key_t);
98 static struct shmid_ds *shm_find_segment_by_shmid (int);
99 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
100 static void shmrealloc (void);
101 static void shminit (void *);
102
103 /*
104  * Tuneable values
105  */
106 #ifndef SHMMAXPGS
107 #define SHMMAXPGS       8192    /* note: sysv shared memory is swap backed */
108 #endif
109 #ifndef SHMMAX
110 #define SHMMAX  (SHMMAXPGS*PAGE_SIZE)
111 #endif
112 #ifndef SHMMIN
113 #define SHMMIN  1
114 #endif
115 #ifndef SHMMNI
116 #define SHMMNI  192
117 #endif
118 #ifndef SHMSEG
119 #define SHMSEG  128
120 #endif
121 #ifndef SHMALL
122 #define SHMALL  (SHMMAXPGS)
123 #endif
124
125 struct  shminfo shminfo = {
126         SHMMAX,
127         SHMMIN,
128         SHMMNI,
129         SHMSEG,
130         SHMALL
131 };
132
133 static int shm_use_phys;
134
135 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin);
136 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni);
137 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg);
138 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall);
139 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
140
141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
142     "Max shared memory segment size");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
144     "Min shared memory segment size");
145 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0,
146     "Max number of shared memory identifiers");
147 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0,
148     "Max shared memory segments per process");
149 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
150     "Max pages of shared memory");
151 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0,
152     "Use phys pager allocation instead of swap pager allocation");
153
154 static int
155 shm_find_segment_by_key(key_t key)
156 {
157         int i;
158
159         for (i = 0; i < shmalloced; i++)
160                 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
161                     shmsegs[i].shm_perm.key == key)
162                         return i;
163         return -1;
164 }
165
166 static struct shmid_ds *
167 shm_find_segment_by_shmid(int shmid)
168 {
169         int segnum;
170         struct shmid_ds *shmseg;
171
172         segnum = IPCID_TO_IX(shmid);
173         if (segnum < 0 || segnum >= shmalloced)
174                 return NULL;
175         shmseg = &shmsegs[segnum];
176         if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
177             != SHMSEG_ALLOCATED ||
178             shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
179                 return NULL;
180         return shmseg;
181 }
182
183 static void
184 shm_deallocate_segment(struct shmid_ds *shmseg)
185 {
186         struct shm_handle *shm_handle;
187         size_t size;
188
189         shm_handle = shmseg->shm_internal;
190         vm_object_deallocate(shm_handle->shm_object);
191         kfree((caddr_t)shm_handle, M_SHM);
192         shmseg->shm_internal = NULL;
193         size = round_page(shmseg->shm_segsz);
194         shm_committed -= btoc(size);
195         shm_nused--;
196         shmseg->shm_perm.mode = SHMSEG_FREE;
197 }
198
199 static int
200 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
201 {
202         struct shmid_ds *shmseg;
203         int segnum, result;
204         size_t size;
205
206         segnum = IPCID_TO_IX(shmmap_s->shmid);
207         shmseg = &shmsegs[segnum];
208         size = round_page(shmseg->shm_segsz);
209         result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
210         if (result != KERN_SUCCESS)
211                 return EINVAL;
212         shmmap_s->shmid = -1;
213         shmseg->shm_dtime = time_second;
214         if ((--shmseg->shm_nattch <= 0) &&
215             (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
216                 shm_deallocate_segment(shmseg);
217                 shm_last_free = segnum;
218         }
219         return 0;
220 }
221
222 /*
223  * MPALMOSTSAFE
224  */
225 int
226 sys_shmdt(struct shmdt_args *uap)
227 {
228         struct thread *td = curthread;
229         struct proc *p = td->td_proc;
230         struct shmmap_state *shmmap_s;
231         int i;
232         int error;
233
234         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
235                 return (ENOSYS);
236
237         get_mplock();
238         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
239         if (shmmap_s == NULL) {
240                 error = EINVAL;
241                 goto done;
242         }
243         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
244                 if (shmmap_s->shmid != -1 &&
245                     shmmap_s->va == (vm_offset_t)uap->shmaddr)
246                         break;
247         }
248         if (i == shminfo.shmseg)
249                 error = EINVAL;
250         else
251                 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
252 done:
253         rel_mplock();
254         return (error);
255 }
256
257 /*
258  * MPALMOSTSAFE
259  */
260 int
261 sys_shmat(struct shmat_args *uap)
262 {
263         struct thread *td = curthread;
264         struct proc *p = td->td_proc;
265         int error, i, flags;
266         struct shmid_ds *shmseg;
267         struct shmmap_state *shmmap_s = NULL;
268         struct shm_handle *shm_handle;
269         vm_offset_t attach_va;
270         vm_prot_t prot;
271         vm_size_t size;
272         int rv;
273
274         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
275                 return (ENOSYS);
276
277         get_mplock();
278 again:
279         shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
280         if (shmmap_s == NULL) {
281                 size = shminfo.shmseg * sizeof(struct shmmap_state);
282                 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
283                 for (i = 0; i < shminfo.shmseg; i++)
284                         shmmap_s[i].shmid = -1;
285                 if (p->p_vmspace->vm_shm != NULL) {
286                         kfree(shmmap_s, M_SHM);
287                         goto again;
288                 }
289                 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
290         }
291         shmseg = shm_find_segment_by_shmid(uap->shmid);
292         if (shmseg == NULL) {
293                 error = EINVAL;
294                 goto done;
295         }
296         error = ipcperm(p, &shmseg->shm_perm,
297                         (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
298         if (error)
299                 goto done;
300         for (i = 0; i < shminfo.shmseg; i++) {
301                 if (shmmap_s->shmid == -1)
302                         break;
303                 shmmap_s++;
304         }
305         if (i >= shminfo.shmseg) {
306                 error = EMFILE;
307                 goto done;
308         }
309         size = round_page(shmseg->shm_segsz);
310 #ifdef VM_PROT_READ_IS_EXEC
311         prot = VM_PROT_READ | VM_PROT_EXECUTE;
312 #else
313         prot = VM_PROT_READ;
314 #endif
315         if ((uap->shmflg & SHM_RDONLY) == 0)
316                 prot |= VM_PROT_WRITE;
317         flags = MAP_ANON | MAP_SHARED;
318         if (uap->shmaddr) {
319                 flags |= MAP_FIXED;
320                 if (uap->shmflg & SHM_RND) {
321                         attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
322                 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
323                         attach_va = (vm_offset_t)uap->shmaddr;
324                 } else {
325                         error = EINVAL;
326                         goto done;
327                 }
328         } else {
329                 /*
330                  * This is just a hint to vm_map_find() about where to put it.
331                  */
332                 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz);
333         }
334
335         shm_handle = shmseg->shm_internal;
336         vm_object_reference(shm_handle->shm_object);
337         rv = vm_map_find(&p->p_vmspace->vm_map, 
338                          shm_handle->shm_object, 0,
339                          &attach_va,
340                          size, PAGE_SIZE,
341                          ((flags & MAP_FIXED) ? 0 : 1), 
342                          VM_MAPTYPE_NORMAL,
343                          prot, prot,
344                          0);
345         if (rv != KERN_SUCCESS) {
346                 vm_object_deallocate(shm_handle->shm_object);
347                 error = ENOMEM;
348                 goto done;
349         }
350         vm_map_inherit(&p->p_vmspace->vm_map,
351                        attach_va, attach_va + size, VM_INHERIT_SHARE);
352
353         KKASSERT(shmmap_s->shmid == -1);
354         shmmap_s->va = attach_va;
355         shmmap_s->shmid = uap->shmid;
356         shmseg->shm_lpid = p->p_pid;
357         shmseg->shm_atime = time_second;
358         shmseg->shm_nattch++;
359         uap->sysmsg_resultp = (void *)attach_va;
360         error = 0;
361 done:
362         rel_mplock();
363         return error;
364 }
365
366 struct oshmid_ds {
367         struct  ipc_perm shm_perm;      /* operation perms */
368         int     shm_segsz;              /* size of segment (bytes) */
369         ushort  shm_cpid;               /* pid, creator */
370         ushort  shm_lpid;               /* pid, last operation */
371         short   shm_nattch;             /* no. of current attaches */
372         time_t  shm_atime;              /* last attach time */
373         time_t  shm_dtime;              /* last detach time */
374         time_t  shm_ctime;              /* last change time */
375         void    *shm_handle;            /* internal handle for shm segment */
376 };
377
378 struct oshmctl_args {
379         struct sysmsg sysmsg;
380         int shmid;
381         int cmd;
382         struct oshmid_ds *ubuf;
383 };
384
385 /*
386  * MPALMOSTSAFE
387  */
388 static int
389 sys_oshmctl(struct proc *p, struct oshmctl_args *uap)
390 {
391 #ifdef COMPAT_43
392         struct thread *td = curthread;
393         struct shmid_ds *shmseg;
394         struct oshmid_ds outbuf;
395         int error;
396
397         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
398                 return (ENOSYS);
399
400         get_mplock();
401         shmseg = shm_find_segment_by_shmid(uap->shmid);
402         if (shmseg == NULL) {
403                 error = EINVAL;
404                 goto done;
405         }
406
407         switch (uap->cmd) {
408         case IPC_STAT:
409                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
410                 if (error)
411                         break;
412                 outbuf.shm_perm = shmseg->shm_perm;
413                 outbuf.shm_segsz = shmseg->shm_segsz;
414                 outbuf.shm_cpid = shmseg->shm_cpid;
415                 outbuf.shm_lpid = shmseg->shm_lpid;
416                 outbuf.shm_nattch = shmseg->shm_nattch;
417                 outbuf.shm_atime = shmseg->shm_atime;
418                 outbuf.shm_dtime = shmseg->shm_dtime;
419                 outbuf.shm_ctime = shmseg->shm_ctime;
420                 outbuf.shm_handle = shmseg->shm_internal;
421                 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
422                 break;
423         default:
424                 /* XXX casting to (sy_call_t *) is bogus, as usual. */
425                 error = sys_shmctl((struct shmctl_args *)uap);
426         }
427 done:
428         rel_mplock();
429         return error;
430 #else
431         return EINVAL;
432 #endif
433 }
434
435 /*
436  * MPALMOSTSAFE
437  */
438 int
439 sys_shmctl(struct shmctl_args *uap)
440 {
441         struct thread *td = curthread;
442         struct proc *p = td->td_proc;
443         int error;
444         struct shmid_ds inbuf;
445         struct shmid_ds *shmseg;
446
447         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
448                 return (ENOSYS);
449
450         get_mplock();
451         shmseg = shm_find_segment_by_shmid(uap->shmid);
452         if (shmseg == NULL) {
453                 error = EINVAL;
454                 goto done;
455         }
456
457         switch (uap->cmd) {
458         case IPC_STAT:
459                 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
460                 if (error == 0)
461                         error = copyout(shmseg, uap->buf, sizeof(inbuf));
462                 break;
463         case IPC_SET:
464                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
465                 if (error == 0)
466                         error = copyin(uap->buf, &inbuf, sizeof(inbuf));
467                 if (error == 0) {
468                         shmseg->shm_perm.uid = inbuf.shm_perm.uid;
469                         shmseg->shm_perm.gid = inbuf.shm_perm.gid;
470                         shmseg->shm_perm.mode =
471                             (shmseg->shm_perm.mode & ~ACCESSPERMS) |
472                             (inbuf.shm_perm.mode & ACCESSPERMS);
473                         shmseg->shm_ctime = time_second;
474                 }
475                 break;
476         case IPC_RMID:
477                 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
478                 if (error == 0) {
479                         shmseg->shm_perm.key = IPC_PRIVATE;
480                         shmseg->shm_perm.mode |= SHMSEG_REMOVED;
481                         if (shmseg->shm_nattch <= 0) {
482                                 shm_deallocate_segment(shmseg);
483                                 shm_last_free = IPCID_TO_IX(uap->shmid);
484                         }
485                 }
486                 break;
487 #if 0
488         case SHM_LOCK:
489         case SHM_UNLOCK:
490 #endif
491         default:
492                 error = EINVAL;
493                 break;
494         }
495 done:
496         rel_mplock();
497         return error;
498 }
499
500 static int
501 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
502 {
503         struct shmid_ds *shmseg;
504         int error;
505
506         shmseg = &shmsegs[segnum];
507         if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
508                 /*
509                  * This segment is in the process of being allocated.  Wait
510                  * until it's done, and look the key up again (in case the
511                  * allocation failed or it was freed).
512                  */
513                 shmseg->shm_perm.mode |= SHMSEG_WANTED;
514                 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
515                 if (error)
516                         return error;
517                 return EAGAIN;
518         }
519         if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
520                 return EEXIST;
521         error = ipcperm(p, &shmseg->shm_perm, mode);
522         if (error)
523                 return error;
524         if (uap->size && uap->size > shmseg->shm_segsz)
525                 return EINVAL;
526         uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
527         return 0;
528 }
529
530 static int
531 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
532 {
533         int i, segnum, shmid, size;
534         struct ucred *cred = p->p_ucred;
535         struct shmid_ds *shmseg;
536         struct shm_handle *shm_handle;
537
538         if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
539                 return EINVAL;
540         if (shm_nused >= shminfo.shmmni) /* any shmids left? */
541                 return ENOSPC;
542         size = round_page(uap->size);
543         if (shm_committed + btoc(size) > shminfo.shmall)
544                 return ENOMEM;
545         if (shm_last_free < 0) {
546                 shmrealloc();   /* maybe expand the shmsegs[] array */
547                 for (i = 0; i < shmalloced; i++)
548                         if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
549                                 break;
550                 if (i == shmalloced)
551                         return ENOSPC;
552                 segnum = i;
553         } else  {
554                 segnum = shm_last_free;
555                 shm_last_free = -1;
556         }
557         shmseg = &shmsegs[segnum];
558         /*
559          * In case we sleep in malloc(), mark the segment present but deleted
560          * so that noone else tries to create the same key.
561          */
562         shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
563         shmseg->shm_perm.key = uap->key;
564         shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
565         shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
566         shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
567         
568         /*
569          * We make sure that we have allocated a pager before we need
570          * to.
571          */
572         if (shm_use_phys) {
573                 shm_handle->shm_object =
574                    phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
575         } else {
576                 shm_handle->shm_object =
577                    swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
578         }
579         vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
580         vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
581
582         shmseg->shm_internal = shm_handle;
583         shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
584         shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
585         shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
586             (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
587         shmseg->shm_segsz = uap->size;
588         shmseg->shm_cpid = p->p_pid;
589         shmseg->shm_lpid = shmseg->shm_nattch = 0;
590         shmseg->shm_atime = shmseg->shm_dtime = 0;
591         shmseg->shm_ctime = time_second;
592         shm_committed += btoc(size);
593         shm_nused++;
594         if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
595                 /*
596                  * Somebody else wanted this key while we were asleep.  Wake
597                  * them up now.
598                  */
599                 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
600                 wakeup((caddr_t)shmseg);
601         }
602         uap->sysmsg_result = shmid;
603         return 0;
604 }
605
606 /*
607  * MPALMOSTSAFE
608  */
609 int
610 sys_shmget(struct shmget_args *uap)
611 {
612         struct thread *td = curthread;
613         struct proc *p = td->td_proc;
614         int segnum, mode, error;
615
616         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
617                 return (ENOSYS);
618
619         mode = uap->shmflg & ACCESSPERMS;
620         get_mplock();
621
622         if (uap->key != IPC_PRIVATE) {
623         again:
624                 segnum = shm_find_segment_by_key(uap->key);
625                 if (segnum >= 0) {
626                         error = shmget_existing(p, uap, mode, segnum);
627                         if (error == EAGAIN)
628                                 goto again;
629                         goto done;
630                 }
631                 if ((uap->shmflg & IPC_CREAT) == 0) {
632                         error = ENOENT;
633                         goto done;
634                 }
635         }
636         error = shmget_allocate_segment(p, uap, mode);
637 done:
638         rel_mplock();
639         return (error);
640 }
641
642 /*
643  * shmsys_args(int which, int a2, ...) (VARARGS)
644  *
645  * MPALMOSTSAFE
646  */
647 int
648 sys_shmsys(struct shmsys_args *uap)
649 {
650         struct thread *td = curthread;
651         unsigned int which = (unsigned int)uap->which;
652         int error;
653
654         if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
655                 return (ENOSYS);
656
657         if (which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
658                 return EINVAL;
659         get_mplock();
660         bcopy(&uap->a2, &uap->which,
661                 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2));
662         error = ((*shmcalls[which])(uap));
663         rel_mplock();
664
665         return(error);
666 }
667
668 void
669 shmfork(struct proc *p1, struct proc *p2)
670 {
671         struct shmmap_state *shmmap_s;
672         size_t size;
673         int i;
674
675         size = shminfo.shmseg * sizeof(struct shmmap_state);
676         shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
677         bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
678         p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
679         for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
680                 if (shmmap_s->shmid != -1)
681                         shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
682 }
683
684 void
685 shmexit(struct vmspace *vm)
686 {
687         struct shmmap_state *base, *shm;
688         int i;
689
690         if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
691                 vm->vm_shm = NULL;
692                 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
693                         if (shm->shmid != -1)
694                                 shm_delete_mapping(vm, shm);
695                 }
696                 kfree(base, M_SHM);
697         }
698 }
699
700 static void
701 shmrealloc(void)
702 {
703         int i;
704         struct shmid_ds *newsegs;
705
706         if (shmalloced >= shminfo.shmmni)
707                 return;
708
709         newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
710         for (i = 0; i < shmalloced; i++)
711                 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
712         for (; i < shminfo.shmmni; i++) {
713                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
714                 shmsegs[i].shm_perm.seq = 0;
715         }
716         kfree(shmsegs, M_SHM);
717         shmsegs = newsegs;
718         shmalloced = shminfo.shmmni;
719 }
720
721 static void
722 shminit(void *dummy)
723 {
724         int i;
725
726         shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
727         shmalloced = shminfo.shmmni;
728         shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
729         for (i = 0; i < shmalloced; i++) {
730                 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
731                 shmsegs[i].shm_perm.seq = 0;
732         }
733         shm_last_free = 0;
734         shm_nused = 0;
735         shm_committed = 0;
736 }
737 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);