lwp: Move all lwp syscalls to sys/lwp.h
[dragonfly.git] / lib / libc / sysvipc / lock.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 1998 Alex Nash
4  * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>.
5  * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by John Birrell.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.15 2008/05/09 16:03:27 dillon Exp $
36  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
37  * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.7 2006/04/06 13:03:09 davidxu Exp $
38  */
39
40 #include <sys/lwp.h>
41 #include <machine/atomic.h>
42 #include <machine/tls.h>
43 #include <errno.h>
44
45 #include "sysvipc_utils.h"
46 #include "sysvipc_lock.h"
47 #include "sysvipc_lock_generic.h"
48
49 #include <limits.h>
50 #include <stdio.h>
51 #include <unistd.h>
52
53 #define MAX_READ_LOCKS          (INT_MAX - 1)
54
55 static int rdlock_count;
56
57 int
58 sysv_mutex_init(struct sysv_mutex *mutex) {
59         if(mutex == NULL)
60                 return (EINVAL);
61         mutex->_mutex_static_lock = 0;
62         mutex->pid_owner = -1;
63         mutex->tid_owner = -1;
64         return (0);
65 }
66
67 int
68 sysv_mutex_lock(struct sysv_mutex *mutex)
69 {
70         if (mutex->pid_owner == getpid() &&
71                         mutex->tid_owner == lwp_gettid()) {
72                 sysv_print_err("deadlock: mutex aleady acquired by the thread\n");
73                 return (EDEADLK);
74         }
75         _sysv_umtx_lock(&mutex->_mutex_static_lock);
76         mutex->pid_owner = getpid();
77         mutex->tid_owner = lwp_gettid();
78         return (0);
79 }
80
81 int
82 sysv_mutex_unlock(struct sysv_mutex *mutex)
83 {
84         if (mutex->pid_owner != getpid() ||
85                         mutex->tid_owner != lwp_gettid()) {
86                 sysv_print_err("eperm try unlock a mutex that is not acquired\n");
87                 return (EPERM);
88         }
89
90         mutex->tid_owner = -1;
91         mutex->pid_owner = -1;
92         _sysv_umtx_unlock(&mutex->_mutex_static_lock);
93         return (0);
94 }
95
96 static int
97 sysv_cond_wait(int *val, struct sysv_mutex *mutex) {
98         sysv_mutex_unlock(mutex);
99
100         /* I use SYSV_TIMEOUT to avoid lossing a wakeup
101          * sent before going to sleep and remain blocked.
102          */
103         umtx_sleep(val, *val, SYSV_TIMEOUT);
104         return (sysv_mutex_lock(mutex));
105 }
106
107 static int
108 sysv_cond_signal(int *val) {
109         return (umtx_wakeup(val, 0));
110 }
111
112 int
113 sysv_rwlock_init(struct sysv_rwlock *rwlock)
114 {
115         int ret = 0;
116
117         if (rwlock == NULL)
118                 return (EINVAL);
119
120         /* Initialize the lock. */
121         sysv_mutex_init(&rwlock->lock);
122         rwlock->state = 0;
123         rwlock->blocked_writers = 0;
124
125         return (ret);
126 }
127
128 int
129 sysv_rwlock_unlock (struct sysv_rwlock *rwlock)
130 {
131         int ret;
132
133         if (rwlock == NULL)
134                 return (EINVAL);
135
136         /* Grab the monitor lock. */
137         if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
138                 return (ret);
139
140         if (rwlock->state > 0) {
141                 rdlock_count--;
142                 rwlock->state--;
143                 if (rwlock->state == 0 && rwlock->blocked_writers) {
144                         ret = sysv_cond_signal(&rwlock->write_signal);
145                 }
146         } else if (rwlock->state < 0) {
147                 rwlock->state = 0;
148
149                 if (rwlock->blocked_writers) {
150                         ret = sysv_cond_signal(&rwlock->write_signal);
151                 }
152                 else {
153                         ret = sysv_cond_signal(&rwlock->read_signal);
154                 }
155         } else
156                 ret = EINVAL;
157
158         sysv_mutex_unlock(&rwlock->lock);
159
160         return (ret);
161 }
162
163 int
164 sysv_rwlock_wrlock (struct sysv_rwlock *rwlock)
165 {
166         int ret;
167
168         if (rwlock == NULL)
169                 return (EINVAL);
170
171         /* Grab the monitor lock. */
172         if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
173                 return (ret);
174
175         while (rwlock->state != 0) {
176                 rwlock->blocked_writers++;
177
178                 ret = sysv_cond_wait(&rwlock->write_signal, &rwlock->lock);
179                 if (ret != 0) {
180                         rwlock->blocked_writers--;
181                         /* No unlock is required because only the lock
182                          * operation can return error.
183                          */
184                         //sysv_mutex_unlock(&rwlock->lock);
185                         return (ret);
186                 }
187
188                 rwlock->blocked_writers--;
189         }
190
191         /* Indicate that we are locked for writing. */
192         rwlock->state = -1;
193
194         sysv_mutex_unlock(&rwlock->lock);
195
196         return (ret);
197 }
198
199 int
200 sysv_rwlock_rdlock(struct sysv_rwlock *rwlock)
201 {
202         int ret;
203
204 //      sysv_print("try get rd lock\n");
205         if (rwlock == NULL)
206                 return (EINVAL);
207
208         /* Grab the monitor lock. */
209         if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
210                 return (ret);
211
212         /* Check the lock count. */
213         if (rwlock->state == MAX_READ_LOCKS) {
214                 sysv_mutex_unlock(&rwlock->lock);
215                 return (EAGAIN);
216         }
217
218         if ((rdlock_count > 0) && (rwlock->state > 0)) {
219                 /*
220                  * Taken from the pthread implementation with only
221                  * one change; rdlock_count is per process not per
222                  * thread;
223                  * Original comment:
224                  * To avoid having to track all the rdlocks held by
225                  * a thread or all of the threads that hold a rdlock,
226                  * we keep a simple count of all the rdlocks held by
227                  * a thread.  If a thread holds any rdlocks it is
228                  * possible that it is attempting to take a recursive
229                  * rdlock.  If there are blocked writers and precedence
230                  * is given to them, then that would result in the thread
231                  * deadlocking.  So allowing a thread to take the rdlock
232                  * when it already has one or more rdlocks avoids the
233                  * deadlock.  I hope the reader can follow that logic ;-)
234                  */
235                 ;       /* nothing needed */
236         } else {
237                 /* Give writers priority over readers. */
238                 while (rwlock->blocked_writers || rwlock->state < 0) {
239                         ret = sysv_cond_wait(&rwlock->read_signal,
240                            &rwlock->lock);
241                         if (ret != 0) {
242                                 /* No unlock necessary because only lock
243                                  * operation can return error.
244                                  */
245                                 //sysv_mutex_unlock(&rwlock->lock);
246                                 return (ret);
247                         }
248                 }
249         }
250
251         rdlock_count++;
252         rwlock->state++; /* Indicate we are locked for reading. */
253
254         /*
255          * Something is really wrong if this call fails.  Returning
256          * error won't do because we've already obtained the read
257          * lock.  Decrementing 'state' is no good because we probably
258          * don't have the monitor lock.
259          */
260         sysv_mutex_unlock(&rwlock->lock);
261
262         return (ret);
263 }