1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
4 * Implementation of SVID semaphores
6 * Author: Daniel Boulet
7 * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
9 * This software is provided ``AS IS'' without any warranties of any kind.
12 #include "namespace.h"
20 #include <sys/param.h>
21 #include <sys/queue.h>
24 #include "un-namespace.h"
26 #include "sysvipc_lock.h"
27 #include "sysvipc_ipc.h"
28 #include "sysvipc_shm.h"
29 #include "sysvipc_sem.h"
30 #include "sysvipc_hash.h"
33 #define SYSV_MUTEX_LOCK(x) if (__isthreaded) _pthread_mutex_lock(x)
34 #define SYSV_MUTEX_UNLOCK(x) if (__isthreaded) _pthread_mutex_unlock(x)
35 #define SYSV_MUTEX_DESTROY(x) if (__isthreaded) _pthread_mutex_destroy(x)
37 extern struct hashtable *shmaddrs;
38 extern struct hashtable *shmres;
39 extern pthread_mutex_t lock_resources;
41 struct sem_undo *undos = NULL;
42 pthread_mutex_t lock_undo = PTHREAD_MUTEX_INITIALIZER;
44 static int semundo_clear(int, int);
49 struct shm_data *data;
52 SYSV_MUTEX_LOCK(&lock_resources);
53 data = _hash_lookup(shmres, id);
55 sysv_print_err("something wrong put_shmdata\n");
56 goto done; /* It should not reach here. */
60 if (data->used == 0 && data->removed) {
61 sysv_print("really remove the sem\n");
62 SYSV_MUTEX_UNLOCK(&lock_resources);
63 /* OBS: Even if the shmctl fails (the thread doesn't
64 * have IPC_M permissions), all structures associated
65 * with it will be removed in the current process.*/
66 sysvipc_shmdt(data->internal);
67 semundo_clear(id, -1);
68 if (data->removed == SEG_ALREADY_REMOVED)
69 return 1; /* The semaphore was removed
70 by another process so there is nothing else
72 /* Else inform the daemon that the segment is removed. */
73 return (sysvipc_shmctl(id, IPC_RMID, NULL));
78 SYSV_MUTEX_UNLOCK(&lock_resources);
82 static struct semid_pool *
83 get_semaptr(int semid, int to_remove, int shm_access)
85 struct semid_pool *semaptr;
87 struct shm_data *shmdata = get_shmdata(semid, to_remove, shm_access);
89 /* Error is set in get_shmdata. */
93 semaptr = (struct semid_pool *)shmdata->internal;
104 sema_exist(int semid, struct semid_pool *semaptr)
106 /* Was it removed? */
107 if (semaptr->gen == -1 ||
108 semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid))
114 /* This is the function called when a the semaphore
115 * is descovered as removed. It marks the process
116 * internal data and munmap the */
118 mark_for_removal(int shmid)
120 sysv_print("Mark that the segment was removed\n");
121 get_shmdata(shmid, SEG_ALREADY_REMOVED, 0);
122 /* Setting SEG_ALREADY_REMOVED parameter, when put_shmdata
123 * is called, the internal resources will be freed.
125 /* Decrement the "usage" field. */
130 try_rwlock_rdlock(int semid, struct semid_pool *semaptr)
132 sysv_print(" before rd lock id = %d %p\n", semid, semaptr);
134 sysv_rwlock_rdlock(&semaptr->rwlock);
135 sysv_print("rd lock id = %d\n", semid);
137 sysv_mutex_lock(&semaptr->mutex);
138 sysv_print("lock id = %d\n", semid);
140 if (!sema_exist(semid, semaptr)) {
142 sysv_print("error sema %d doesn't exist\n", semid);
144 sysv_rwlock_unlock(&semaptr->rwlock);
146 sysv_mutex_unlock(&semaptr->mutex);
148 /* Internal resources must be freed. */
149 mark_for_removal(semid);
156 try_rwlock_wrlock(int semid, struct semid_pool *semaptr)
159 sysv_print("before wrlock id = %d %p\n", semid, semaptr);
160 sysv_rwlock_wrlock(&semaptr->rwlock);
162 sysv_print("before lock id = %d %x\n", semid, semaptr);
163 sysv_mutex_lock(&semaptr->mutex);
165 sysv_print("lock id = %d\n", semid);
166 if (!sema_exist(semid, semaptr)) {
168 sysv_print("error sema %d doesn't exist\n", semid);
170 sysv_rwlock_unlock(&semaptr->rwlock);
172 sysv_mutex_unlock(&semaptr->mutex);
174 /* Internal resources must be freed. */
175 mark_for_removal(semid);
182 rwlock_unlock(int semid, struct semid_pool *semaptr)
184 sysv_print("unlock id = %d %p\n", semid, semaptr);
185 if (!sema_exist(semid, semaptr)) {
186 /* Internal resources must be freed. */
187 mark_for_removal(semid);
192 sysv_rwlock_unlock(&semaptr->rwlock);
194 sysv_mutex_unlock(&semaptr->mutex);
200 sysvipc_semget(key_t key, int nsems, int semflg)
205 int size = sizeof(struct semid_pool) + nsems * sizeof(struct sem);
207 //TODO resources limits
208 sysv_print("handle semget\n");
210 semid = _shmget(key, size, semflg, SEMGET);
212 /* errno already set. */
216 /* If the semaphore is in process of being removed there are two cases:
217 * - the daemon knows that and it will handle this situation.
218 * - one of the threads from this address space remove it and the daemon
219 * wasn't announced yet; in this scenario, the semaphore is marked
220 * using "removed" field of shm_data and future calls will return
225 /* Set access type. */
226 shm_access = semflg & (IPC_W | IPC_R);
227 if(set_shmdata_access(semid, shm_access) != 0) {
228 /* errno already set. */
232 shmaddr = sysvipc_shmat(semid, NULL, 0);
235 sysvipc_shmctl(semid, IPC_RMID, NULL);
239 //TODO more semaphores in a single file
242 sysv_print("end handle semget %d\n", semid);
247 semundo_clear(int semid, int semnum)
252 sysv_print("semundo clear\n");
254 SYSV_MUTEX_LOCK(&lock_undo);
258 sunptr = &undos->un_ent[0];
261 while (i < undos->un_cnt) {
262 if (sunptr->un_id == semid) {
263 if (semnum == -1 || sunptr->un_num == semnum) {
265 if (i < undos->un_cnt) {
267 undos->un_ent[undos->un_cnt];
278 //TODO Shrink memory if case; not sure if necessary
280 SYSV_MUTEX_UNLOCK(&lock_undo);
281 sysv_print("end semundo clear\n");
286 sysvipc___semctl(int semid, int semnum, int cmd, union semun *arg)
289 struct semid_pool *semaptr = NULL;
290 struct sem *semptr = NULL;
291 struct shmid_ds shmds;
294 /*if (!jail_sysvipc_allowed && cred->cr_prison != NULL)
298 sysv_print("semctl cmd = %d\n", cmd);
303 case IPC_SET: /* Originally was IPC_M but this is checked
321 semaptr = get_semaptr(semid, cmd==IPC_RMID, shm_access);
323 /* errno already set. */
329 /* Mark that the segment is removed. This is done in
330 * get_semaptr call in order to announce other processes.
331 * It will be actually removed after put_shmdata call and
332 * not other thread from this address space use shm_data
343 memset(&shmds, 0, sizeof(shmds)/sizeof(unsigned char));
344 memcpy(&shmds.shm_perm, &arg->buf->sem_perm,
345 sizeof(struct ipc_perm));
346 error = sysvipc_shmctl(semid, cmd, &shmds);
347 /* OBS: didn't update ctime and mode as in kernel implementation
348 * it is done. Those fields are already updated for shmid_ds
349 * struct when calling shmctl
359 error = sysvipc_shmctl(semid, cmd, &shmds);
363 memcpy(&arg->buf->sem_perm, &shmds.shm_perm,
364 sizeof(struct ipc_perm));
365 arg->buf->sem_nsems = (shmds.shm_segsz - sizeof(struct semid_pool)) /
367 arg->buf->sem_ctime = shmds.shm_ctime;
369 /* otime is semaphore specific so read it from
372 error = try_rwlock_rdlock(semid, semaptr);
375 arg->buf->sem_otime = semaptr->ds.sem_otime;
376 rwlock_unlock(semid, semaptr);
380 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
385 error = try_rwlock_rdlock(semid, semaptr);
388 error = semaptr->ds.sem_base[semnum].semncnt;
389 rwlock_unlock(semid, semaptr);
393 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
398 error = try_rwlock_rdlock(semid, semaptr);
401 error = semaptr->ds.sem_base[semnum].sempid;
402 rwlock_unlock(semid, semaptr);
406 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
411 error = try_rwlock_rdlock(semid, semaptr);
414 error = semaptr->ds.sem_base[semnum].semval;
415 rwlock_unlock(semid, semaptr);
424 error = try_rwlock_rdlock(semid, semaptr);
427 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
428 arg->array[i] = semaptr->ds.sem_base[i].semval;
430 rwlock_unlock(semid, semaptr);
434 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
439 error = try_rwlock_rdlock(semid, semaptr);
442 error = semaptr->ds.sem_base[semnum].semzcnt;
443 rwlock_unlock(semid, semaptr);
447 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
452 error = try_rwlock_wrlock(semid, semaptr);
455 semptr = &semaptr->ds.sem_base[semnum];
456 semptr->semval = arg->val;
457 semundo_clear(semid, semnum);
458 if (semptr->semzcnt || semptr->semncnt)
459 umtx_wakeup((int *)&semptr->semval, 0);
460 rwlock_unlock(semid, semaptr);
469 error = try_rwlock_wrlock(semid, semaptr);
472 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
473 semptr = &semaptr->ds.sem_base[i];
474 semptr->semval = arg->array[i];
475 if (semptr->semzcnt || semptr->semncnt)
476 umtx_wakeup((int *)&semptr->semval, 0);
478 semundo_clear(semid, -1);
479 rwlock_unlock(semid, semaptr);
489 sysv_print("end semctl\n");
494 * Adjust a particular entry for a particular proc
497 semundo_adjust(int semid, int semnum, int adjval)
505 struct shm_data *data;
507 sysv_print("semundo adjust\n");
511 SYSV_MUTEX_LOCK(&lock_undo);
513 sysv_print("get undo segment\n");
514 undoid = _shmget(IPC_PRIVATE, PAGE_SIZE, IPC_CREAT | IPC_EXCL | 0600,
517 sysv_print_err("no undo segment\n");
521 addr = sysvipc_shmat(undoid, NULL, 0);
523 sysv_print_err("can not map undo segment\n");
524 sysvipc_shmctl(undoid, IPC_RMID, NULL);
528 undos = (struct sem_undo *)addr;
534 * Look for the requested entry and adjust it (delete if adjval becomes
537 sunptr = &undos->un_ent[0];
538 for (i = 0; i < undos->un_cnt; i++, sunptr++) {
539 if (sunptr->un_id != semid && sunptr->un_num != semnum)
541 sunptr->un_adjval += adjval;
542 if (sunptr->un_adjval == 0) {
544 if (i < undos->un_cnt)
545 undos->un_ent[i] = undos->un_ent[undos->un_cnt];
550 /* Didn't find the right entry - create it */
551 size = sizeof(struct sem_undo) + (undos->un_cnt + 1) *
552 sizeof(struct sem_undo);
553 if (size > (unsigned int)(undos->un_pages * PAGE_SIZE)) {
554 sysv_print("need more undo space\n");
555 sysvipc_shmdt(undos);
558 SYSV_MUTEX_LOCK(&lock_resources);
559 data = _hash_lookup(shmaddrs, (u_long)undos);
560 SYSV_MUTEX_UNLOCK(&lock_resources);
562 /* It is not necessary any lock on "size" because it is used
563 * only by shmat and shmdt.
564 * shmat for undoid is called only from this function and it
565 * is protected by undo_lock.
566 * shmdt for undoid is not called anywhere because the segment
567 * is destroyed by the daemon when the client dies.
569 data->size = undos->un_pages * PAGE_SIZE;
570 undos = sysvipc_shmat(data->shmid, NULL, 0);
573 sunptr = &undos->un_ent[undos->un_cnt];
575 sunptr->un_adjval = adjval;
576 sunptr->un_id = semid;
577 sunptr->un_num = semnum;
578 //if (suptr->un_cnt == seminfo.semume) TODO move it in daemon
580 error = EINVAL; //se face prin notificare
583 SYSV_MUTEX_UNLOCK(&lock_undo);
585 sysv_print("semundo adjust end\n");
590 sysvipc_semop(int semid, struct sembuf *sops, unsigned nsops)
592 struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
593 struct sembuf *sopptr;
594 struct sem *semptr = NULL;
595 struct sem *xsemptr = NULL;
601 sysv_print("[client %d] call to semop(%d, %u)\n",
602 getpid(), semid, nsops);
604 /*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
608 semaptr = get_semaptr(semid, 0, IPC_W);
615 if (try_rwlock_rdlock(semid, semaptr) == -1) {
617 if (try_rwlock_wrlock(semid, semaptr) == -1) {
619 sysv_print("sema removed\n");
624 if (nsops > MAX_SOPS) {
625 sysv_print("too many sops (max=%d, nsops=%u)\n",
632 * Loop trying to satisfy the vector of requests.
633 * If we reach a point where we must wait, any requests already
634 * performed are rolled back and we go to sleep until some other
635 * process wakes us up. At this point, we start all over again.
637 * This ensures that from the perspective of other tasks, a set
638 * of requests is atomic (never partially satisfied).
646 for (i = 0; i < (int)nsops; i++) {
649 if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
654 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
656 sysv_mutex_lock(&semptr->sem_mutex);
658 sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
659 sopptr->sem_num, semptr->semval, sopptr->sem_op,
660 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
662 if (sopptr->sem_op < 0) {
663 if (semptr->semval + sopptr->sem_op < 0) {
664 sysv_print("semop: can't do it now\n");
667 semptr->semval += sopptr->sem_op;
668 if (semptr->semval == 0 &&
670 umtx_wakeup((int *)&semptr->semval, 0);
672 if (sopptr->sem_flg & SEM_UNDO)
674 } else if (sopptr->sem_op == 0) {
675 if (semptr->semval > 0) {
676 sysv_print("semop: not zero now\n");
680 semptr->semval += sopptr->sem_op;
681 if (sopptr->sem_flg & SEM_UNDO)
683 if (semptr->semncnt > 0)
684 umtx_wakeup((int *)&semptr->semval, 0);
687 sysv_mutex_unlock(&semptr->sem_mutex);
692 * Did we get through the entire vector?
697 if (sopptr->sem_op == 0)
703 * Get interlock value before rleeasing sem_mutex.
705 * XXX horrible hack until we get a umtx_sleep16() (and a umtx_sleep64())
708 val_to_sleep = *(int *)&semptr->semval;
710 sysv_mutex_unlock(&semptr->sem_mutex);
713 * Rollback the semaphores we had acquired.
715 sysv_print("semop: rollback 0 through %d\n", i-1);
716 for (j = 0; j < i; j++) {
717 xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
719 sysv_mutex_lock(&xsemptr->sem_mutex);
721 xsemptr->semval -= sops[j].sem_op;
722 if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
723 umtx_wakeup((int *)&xsemptr->semval, 0);
724 if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
725 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
727 sysv_mutex_unlock(&xsemptr->sem_mutex);
732 * If the request that we couldn't satisfy has the
733 * NOWAIT flag set then return with EAGAIN.
735 if (sopptr->sem_flg & IPC_NOWAIT) {
741 * Release semaptr->lock while sleeping, allowing other
742 * semops (like SETVAL, SETALL, etc), which require an
743 * exclusive lock and might wake us up.
745 * Reload and recheck the validity of semaptr on return.
746 * Note that semptr itself might have changed too, but
747 * we've already interlocked for semptr and that is what
748 * will be woken up if it wakes up the tsleep on a MP
752 sysv_print("semop: good night!\n");
753 rwlock_unlock(semid, semaptr);
756 /* We don't sleep more than SYSV_TIMEOUT because we could
757 * go to sleep after another process calls wakeup and remain
760 eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
761 /* return code is checked below, after sem[nz]cnt-- */
764 * Make sure that the semaphore still exists
767 /* Check if another thread didn't remove the semaphore. */
768 auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
774 if (auxsemaptr != semaptr) {
779 /* Check if another process didn't remove the semaphore. */
781 if (try_rwlock_rdlock(semid, semaptr) == -1) {
783 if (try_rwlock_wrlock(semid, semaptr) == -1) {
788 sysv_print("semop: good morning (eval=%d)!\n", eval);
790 /* The semaphore is still alive. Readjust the count of
793 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
795 sysv_mutex_lock(&semptr->sem_mutex);
797 if (sopptr->sem_op == 0)
802 sysv_mutex_unlock(&semptr->sem_mutex);
806 * Is it really morning, or was our sleep interrupted?
807 * (Delayed check of tsleep() return code because we
808 * need to decrement sem[nz]cnt either way.)
810 * Always retry on EBUSY
812 if (eval == EAGAIN) {
817 sysv_print("semop: good morning!\n");
823 * Process any SEM_UNDO requests.
826 for (i = 0; i < (int)nsops; i++) {
828 * We only need to deal with SEM_UNDO's for non-zero
833 if ((sops[i].sem_flg & SEM_UNDO) == 0)
835 adjval = sops[i].sem_op;
838 eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
843 * Oh-Oh! We ran out of either sem_undo's or undo's.
844 * Rollback the adjustments to this point and then
845 * rollback the semaphore ups and down so we can return
846 * with an error with all structures restored. We
847 * rollback the undo's in the exact reverse order that
848 * we applied them. This guarantees that we won't run
849 * out of space as we roll things back out.
851 for (j = i - 1; j >= 0; j--) {
852 if ((sops[j].sem_flg & SEM_UNDO) == 0)
854 adjval = sops[j].sem_op;
857 if (semundo_adjust(semid, sops[j].sem_num,
859 sysv_print("semop - can't undo undos");
862 for (j = 0; j < (int)nsops; j++) {
863 xsemptr = &semaptr->ds.sem_base[
866 sysv_mutex_lock(&semptr->sem_mutex);
868 xsemptr->semval -= sops[j].sem_op;
869 if (xsemptr->semval == 0 &&
870 xsemptr->semzcnt > 0)
871 umtx_wakeup((int *)&xsemptr->semval, 0);
872 if (xsemptr->semval <= 0 &&
873 xsemptr->semncnt > 0)
874 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
876 sysv_mutex_unlock(&semptr->sem_mutex);
880 sysv_print("eval = %d from semundo_adjust\n", eval);
885 /* Set sempid field for each semaphore. */
886 for (i = 0; i < (int)nsops; i++) {
888 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
890 sysv_mutex_lock(&semptr->sem_mutex);
892 semptr->sempid = getpid();
894 sysv_mutex_unlock(&semptr->sem_mutex);
898 sysv_print("semop: done\n");
899 semaptr->ds.sem_otime = time(NULL);
901 rwlock_unlock(semid, semaptr);