1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
4 * Implementation of SVID semaphores
6 * Author: Daniel Boulet
7 * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
9 * This software is provided ``AS IS'' without any warranties of any kind.
12 #include "namespace.h"
20 #include <sys/param.h>
21 #include <sys/queue.h>
24 #include "un-namespace.h"
26 #include "sysvipc_lock.h"
27 #include "sysvipc_ipc.h"
28 #include "sysvipc_shm.h"
29 #include "sysvipc_sem.h"
30 #include "sysvipc_hash.h"
33 #define SYSV_MUTEX_LOCK(x) if (__isthreaded) _pthread_mutex_lock(x)
34 #define SYSV_MUTEX_UNLOCK(x) if (__isthreaded) _pthread_mutex_unlock(x)
35 #define SYSV_MUTEX_DESTROY(x) if (__isthreaded) _pthread_mutex_destroy(x)
37 extern struct hashtable *shmaddrs;
38 extern struct hashtable *shmres;
39 extern pthread_mutex_t lock_resources;
41 struct sem_undo *undos = NULL;
42 pthread_mutex_t lock_undo = PTHREAD_MUTEX_INITIALIZER;
44 static int semundo_clear(int, int);
48 struct shm_data *data;
51 SYSV_MUTEX_LOCK(&lock_resources);
52 data = _hash_lookup(shmres, id);
54 sysv_print_err("something wrong put_shmdata\n");
55 goto done; /* It should not reach here. */
59 if (data->used == 0 && data->removed) {
60 sysv_print("really remove the sem\n");
61 SYSV_MUTEX_UNLOCK(&lock_resources);
62 /* OBS: Even if the shmctl fails (the thread doesn't
63 * have IPC_M permissions), all structures associated
64 * with it will be removed in the current process.*/
65 sysvipc_shmdt(data->internal);
66 semundo_clear(id, -1);
67 if (data->removed == SEG_ALREADY_REMOVED)
68 return 1; /* The semaphore was removed
69 by another process so there is nothing else
71 /* Else inform the daemon that the segment is removed. */
72 return (sysvipc_shmctl(id, IPC_RMID, NULL));
77 SYSV_MUTEX_UNLOCK(&lock_resources);
81 static struct semid_pool*
82 get_semaptr(int semid, int to_remove, int shm_access) {
83 struct semid_pool *semaptr;
85 struct shm_data *shmdata = get_shmdata(semid, to_remove, shm_access);
87 /* Error is set in get_shmdata. */
91 semaptr = (struct semid_pool *)shmdata->internal;
102 sema_exist(int semid, struct semid_pool *semaptr) {
103 /* Was it removed? */
104 if (semaptr->gen == -1 ||
105 semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid))
111 /* This is the function called when a the semaphore
112 * is descovered as removed. It marks the process
113 * internal data and munmap the */
115 mark_for_removal(int shmid) {
116 sysv_print("Mark that the segment was removed\n");
117 get_shmdata(shmid, SEG_ALREADY_REMOVED, 0);
118 /* Setting SEG_ALREADY_REMOVED parameter, when put_shmdata
119 * is called, the internal resources will be freed.
121 /* Decrement the "usage" field. */
126 try_rwlock_rdlock(int semid, struct semid_pool *semaptr) {
127 sysv_print(" before rd lock id = %d %x\n", semid, semaptr);
129 sysv_rwlock_rdlock(&semaptr->rwlock);
130 sysv_print("rd lock id = %d\n", semid);
132 sysv_mutex_lock(&semaptr->mutex);
133 sysv_print("lock id = %d\n", semid);
135 if (!sema_exist(semid, semaptr)) {
137 sysv_print("error sema %d doesn't exist\n", semid);
139 sysv_rwlock_unlock(&semaptr->rwlock);
141 sysv_mutex_unlock(&semaptr->mutex);
143 /* Internal resources must be freed. */
144 mark_for_removal(semid);
151 try_rwlock_wrlock(int semid, struct semid_pool *semaptr) {
153 sysv_print("before wrlock id = %d %x\n", semid, semaptr);
154 sysv_rwlock_wrlock(&semaptr->rwlock);
156 sysv_print("before lock id = %d %x\n", semid, semaptr);
157 sysv_mutex_lock(&semaptr->mutex);
159 sysv_print("lock id = %d\n", semid);
160 if (!sema_exist(semid, semaptr)) {
162 sysv_print("error sema %d doesn't exist\n", semid);
164 sysv_rwlock_unlock(&semaptr->rwlock);
166 sysv_mutex_unlock(&semaptr->mutex);
168 /* Internal resources must be freed. */
169 mark_for_removal(semid);
176 rwlock_unlock(int semid, struct semid_pool *semaptr) {
177 sysv_print("unlock id = %d %x\n", semid, semaptr);
178 if (!sema_exist(semid, semaptr)) {
179 /* Internal resources must be freed. */
180 mark_for_removal(semid);
185 sysv_rwlock_unlock(&semaptr->rwlock);
187 sysv_mutex_unlock(&semaptr->mutex);
193 sysvipc_semget(key_t key, int nsems, int semflg) {
197 int size = sizeof(struct semid_pool) + nsems * sizeof(struct sem);
199 //TODO resources limits
200 sysv_print("handle semget\n");
202 semid = _shmget(key, size, semflg, SEMGET);
204 /* errno already set. */
208 /* If the semaphore is in process of being removed there are two cases:
209 * - the daemon knows that and it will handle this situation.
210 * - one of the threads from this address space remove it and the daemon
211 * wasn't announced yet; in this scenario, the semaphore is marked
212 * using "removed" field of shm_data and future calls will return
217 /* Set access type. */
218 shm_access = semflg & (IPC_W | IPC_R);
219 if(set_shmdata_access(semid, shm_access) != 0) {
220 /* errno already set. */
224 shmaddr = sysvipc_shmat(semid, NULL, 0);
227 sysvipc_shmctl(semid, IPC_RMID, NULL);
231 //TODO more semaphores in a single file
234 sysv_print("end handle semget %d\n", semid);
239 semundo_clear(int semid, int semnum)
244 sysv_print("semundo clear\n");
246 SYSV_MUTEX_LOCK(&lock_undo);
250 sunptr = &undos->un_ent[0];
253 while (i < undos->un_cnt) {
254 if (sunptr->un_id == semid) {
255 if (semnum == -1 || sunptr->un_num == semnum) {
257 if (i < undos->un_cnt) {
259 undos->un_ent[undos->un_cnt];
270 //TODO Shrink memory if case; not sure if necessary
272 SYSV_MUTEX_UNLOCK(&lock_undo);
273 sysv_print("end semundo clear\n");
278 sysvipc_semctl(int semid, int semnum , int cmd, union semun arg) {
280 struct semid_pool *semaptr = NULL;
281 struct sem *semptr = NULL;
282 struct shmid_ds shmds;
285 /*if (!jail_sysvipc_allowed && cred->cr_prison != NULL)
289 sysv_print("semctl cmd = %d\n", cmd);
294 case IPC_SET: /* Originally was IPC_M but this is checked
312 semaptr = get_semaptr(semid, cmd==IPC_RMID, shm_access);
314 /* errno already set. */
320 /* Mark that the segment is removed. This is done in
321 * get_semaptr call in order to announce other processes.
322 * It will be actually removed after put_shmdata call and
323 * not other thread from this address space use shm_data
334 memset(&shmds, 0, sizeof(shmds)/sizeof(unsigned char));
335 memcpy(&shmds.shm_perm, &arg.buf->sem_perm,
336 sizeof(struct ipc_perm));
337 error = sysvipc_shmctl(semid, cmd, &shmds);
338 /* OBS: didn't update ctime and mode as in kernel implementation
339 * it is done. Those fields are already updated for shmid_ds
340 * struct when calling shmctl
350 error = sysvipc_shmctl(semid, cmd, &shmds);
354 memcpy(&arg.buf->sem_perm, &shmds.shm_perm,
355 sizeof(struct ipc_perm));
356 arg.buf->sem_nsems = (shmds.shm_segsz - sizeof(struct semid_pool)) /
358 arg.buf->sem_ctime = shmds.shm_ctime;
360 /* otime is semaphore specific so read it from
363 error = try_rwlock_rdlock(semid, semaptr);
366 arg.buf->sem_otime = semaptr->ds.sem_otime;
367 rwlock_unlock(semid, semaptr);
371 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
376 error = try_rwlock_rdlock(semid, semaptr);
379 error = semaptr->ds.sem_base[semnum].semncnt;
380 rwlock_unlock(semid, semaptr);
384 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
389 error = try_rwlock_rdlock(semid, semaptr);
392 error = semaptr->ds.sem_base[semnum].sempid;
393 rwlock_unlock(semid, semaptr);
397 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
402 error = try_rwlock_rdlock(semid, semaptr);
405 error = semaptr->ds.sem_base[semnum].semval;
406 rwlock_unlock(semid, semaptr);
415 error = try_rwlock_rdlock(semid, semaptr);
418 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
419 arg.array[i] = semaptr->ds.sem_base[i].semval;
421 rwlock_unlock(semid, semaptr);
425 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
430 error = try_rwlock_rdlock(semid, semaptr);
433 error = semaptr->ds.sem_base[semnum].semzcnt;
434 rwlock_unlock(semid, semaptr);
438 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
443 error = try_rwlock_wrlock(semid, semaptr);
446 semptr = &semaptr->ds.sem_base[semnum];
447 semptr->semval = arg.val;
448 semundo_clear(semid, semnum);
449 if (semptr->semzcnt || semptr->semncnt)
450 umtx_wakeup((int *)&semptr->semval, 0);
451 rwlock_unlock(semid, semaptr);
460 error = try_rwlock_wrlock(semid, semaptr);
463 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
464 semptr = &semaptr->ds.sem_base[i];
465 semptr->semval = arg.array[i];
466 if (semptr->semzcnt || semptr->semncnt)
467 umtx_wakeup((int *)&semptr->semval, 0);
469 semundo_clear(semid, -1);
470 rwlock_unlock(semid, semaptr);
480 sysv_print("end semctl\n");
485 * Adjust a particular entry for a particular proc
488 semundo_adjust(int semid, int semnum, int adjval)
496 struct shm_data *data;
498 sysv_print("semundo adjust\n");
502 SYSV_MUTEX_LOCK(&lock_undo);
504 sysv_print("get undo segment\n");
505 undoid = _shmget(IPC_PRIVATE, PAGE_SIZE, IPC_CREAT | IPC_EXCL | 0600,
508 sysv_print_err("no undo segment\n");
512 addr = sysvipc_shmat(undoid, NULL, 0);
514 sysv_print_err("can not map undo segment\n");
515 sysvipc_shmctl(undoid, IPC_RMID, NULL);
519 undos = (struct sem_undo *)addr;
525 * Look for the requested entry and adjust it (delete if adjval becomes
528 sunptr = &undos->un_ent[0];
529 for (i = 0; i < undos->un_cnt; i++, sunptr++) {
530 if (sunptr->un_id != semid && sunptr->un_num != semnum)
532 sunptr->un_adjval += adjval;
533 if (sunptr->un_adjval == 0) {
535 if (i < undos->un_cnt)
536 undos->un_ent[i] = undos->un_ent[undos->un_cnt];
541 /* Didn't find the right entry - create it */
542 size = sizeof(struct sem_undo) + (undos->un_cnt + 1) *
543 sizeof(struct sem_undo);
544 if (size > (unsigned int)(undos->un_pages * PAGE_SIZE)) {
545 sysv_print("need more undo space\n");
546 sysvipc_shmdt(undos);
549 SYSV_MUTEX_LOCK(&lock_resources);
550 data = _hash_lookup(shmaddrs, (u_long)undos);
551 SYSV_MUTEX_UNLOCK(&lock_resources);
553 /* It is not necessary any lock on "size" because it is used
554 * only by shmat and shmdt.
555 * shmat for undoid is called only from this function and it
556 * is protected by undo_lock.
557 * shmdt for undoid is not called anywhere because the segment
558 * is destroyed by the daemon when the client dies.
560 data->size = undos->un_pages * PAGE_SIZE;
561 undos = sysvipc_shmat(data->shmid, NULL, 0);
564 sunptr = &undos->un_ent[undos->un_cnt];
566 sunptr->un_adjval = adjval;
567 sunptr->un_id = semid;
568 sunptr->un_num = semnum;
569 //if (suptr->un_cnt == seminfo.semume) TODO move it in daemon
571 error = EINVAL; //se face prin notificare
574 SYSV_MUTEX_UNLOCK(&lock_undo);
576 sysv_print("semundo adjust end\n");
580 int sysvipc_semop (int semid, struct sembuf *sops, unsigned nsops) {
581 struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
582 struct sembuf *sopptr;
583 struct sem *semptr = NULL;
584 struct sem *xsemptr = NULL;
590 sysv_print("[client %d] call to semop(%d, %u)\n",
591 getpid(), semid, nsops);
593 /*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
597 semaptr = get_semaptr(semid, 0, IPC_W);
604 if (try_rwlock_rdlock(semid, semaptr) == -1) {
606 if (try_rwlock_wrlock(semid, semaptr) == -1) {
608 sysv_print("sema removed\n");
613 if (nsops > MAX_SOPS) {
614 sysv_print("too many sops (max=%d, nsops=%u)\n",
615 getpid(), MAX_SOPS, nsops);
621 * Loop trying to satisfy the vector of requests.
622 * If we reach a point where we must wait, any requests already
623 * performed are rolled back and we go to sleep until some other
624 * process wakes us up. At this point, we start all over again.
626 * This ensures that from the perspective of other tasks, a set
627 * of requests is atomic (never partially satisfied).
635 for (i = 0; i < (int)nsops; i++) {
638 if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
643 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
645 sysv_mutex_lock(&semptr->sem_mutex);
647 sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
648 sopptr->sem_num, semptr->semval, sopptr->sem_op,
649 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
651 if (sopptr->sem_op < 0) {
652 if (semptr->semval + sopptr->sem_op < 0) {
653 sysv_print("semop: can't do it now\n");
656 semptr->semval += sopptr->sem_op;
657 if (semptr->semval == 0 &&
659 umtx_wakeup((int *)&semptr->semval, 0);
661 if (sopptr->sem_flg & SEM_UNDO)
663 } else if (sopptr->sem_op == 0) {
664 if (semptr->semval > 0) {
665 sysv_print("semop: not zero now\n");
669 semptr->semval += sopptr->sem_op;
670 if (sopptr->sem_flg & SEM_UNDO)
672 if (semptr->semncnt > 0)
673 umtx_wakeup((int *)&semptr->semval, 0);
676 sysv_mutex_unlock(&semptr->sem_mutex);
681 * Did we get through the entire vector?
686 if (sopptr->sem_op == 0)
691 sysv_mutex_unlock(&semptr->sem_mutex);
694 * Rollback the semaphores we had acquired.
696 sysv_print("semop: rollback 0 through %d\n", i-1);
697 for (j = 0; j < i; j++) {
698 xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
700 sysv_mutex_lock(&semptr->sem_mutex);
702 xsemptr->semval -= sops[j].sem_op;
703 if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
704 umtx_wakeup((int *)&xsemptr->semval, 0);
705 if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
706 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
708 sysv_mutex_unlock(&semptr->sem_mutex);
713 * If the request that we couldn't satisfy has the
714 * NOWAIT flag set then return with EAGAIN.
716 if (sopptr->sem_flg & IPC_NOWAIT) {
722 * Release semaptr->lock while sleeping, allowing other
723 * semops (like SETVAL, SETALL, etc), which require an
724 * exclusive lock and might wake us up.
726 * Reload and recheck the validity of semaptr on return.
727 * Note that semptr itself might have changed too, but
728 * we've already interlocked for semptr and that is what
729 * will be woken up if it wakes up the tsleep on a MP
734 sysv_print("semop: good night!\n");
735 val_to_sleep = semptr->semval;
736 rwlock_unlock(semid, semaptr);
739 /* We don't sleep more than SYSV_TIMEOUT because we could
740 * go to sleep after another process calls wakeup and remain
743 eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
744 /* return code is checked below, after sem[nz]cnt-- */
747 * Make sure that the semaphore still exists
750 /* Check if another thread didn't remove the semaphore. */
751 auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
757 if (auxsemaptr != semaptr) {
762 /* Check if another process didn't remove the semaphore. */
764 if (try_rwlock_rdlock(semid, semaptr) == -1) {
766 if (try_rwlock_wrlock(semid, semaptr) == -1) {
771 sysv_print("semop: good morning (eval=%d)!\n", eval);
773 /* The semaphore is still alive. Readjust the count of
776 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
778 sysv_mutex_lock(&semptr->sem_mutex);
780 if (sopptr->sem_op == 0)
785 sysv_mutex_unlock(&semptr->sem_mutex);
789 * Is it really morning, or was our sleep interrupted?
790 * (Delayed check of tsleep() return code because we
791 * need to decrement sem[nz]cnt either way.)
798 sysv_print("semop: good morning!\n");
804 * Process any SEM_UNDO requests.
807 for (i = 0; i < (int)nsops; i++) {
809 * We only need to deal with SEM_UNDO's for non-zero
814 if ((sops[i].sem_flg & SEM_UNDO) == 0)
816 adjval = sops[i].sem_op;
819 eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
824 * Oh-Oh! We ran out of either sem_undo's or undo's.
825 * Rollback the adjustments to this point and then
826 * rollback the semaphore ups and down so we can return
827 * with an error with all structures restored. We
828 * rollback the undo's in the exact reverse order that
829 * we applied them. This guarantees that we won't run
830 * out of space as we roll things back out.
832 for (j = i - 1; j >= 0; j--) {
833 if ((sops[j].sem_flg & SEM_UNDO) == 0)
835 adjval = sops[j].sem_op;
838 if (semundo_adjust(semid, sops[j].sem_num,
840 sysv_print("semop - can't undo undos");
843 for (j = 0; j < (int)nsops; j++) {
844 xsemptr = &semaptr->ds.sem_base[
847 sysv_mutex_lock(&semptr->sem_mutex);
849 xsemptr->semval -= sops[j].sem_op;
850 if (xsemptr->semval == 0 &&
851 xsemptr->semzcnt > 0)
852 umtx_wakeup((int *)&xsemptr->semval, 0);
853 if (xsemptr->semval <= 0 &&
854 xsemptr->semncnt > 0)
855 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
857 sysv_mutex_unlock(&semptr->sem_mutex);
861 sysv_print("eval = %d from semundo_adjust\n", eval);
866 /* Set sempid field for each semaphore. */
867 for (i = 0; i < (int)nsops; i++) {
869 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
871 sysv_mutex_lock(&semptr->sem_mutex);
873 semptr->sempid = getpid();
875 sysv_mutex_unlock(&semptr->sem_mutex);
879 sysv_print("semop: done\n");
880 semaptr->ds.sem_otime = time(NULL);
882 rwlock_unlock(semid, semaptr);