Import gcc-4.4.1
[dragonfly.git] / contrib / gcc-4.4 / libstdc++-v3 / include / ext / concurrence.h
1 // Support for concurrent programing -*- C++ -*-
2
3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2009
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library.  This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 // GNU General Public License for more details.
16
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24 // <http://www.gnu.org/licenses/>.
25
26 /** @file concurrence.h
27  *  This is an internal header file, included by other library headers.
28  *  You should not attempt to use it directly.
29  */
30
31 #ifndef _CONCURRENCE_H
32 #define _CONCURRENCE_H 1
33
34 #include <exception>
35 #include <bits/gthr.h> 
36 #include <bits/functexcept.h>
37
38 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
39
40   // Available locking policies:
41   // _S_single    single-threaded code that doesn't need to be locked.
42   // _S_mutex     multi-threaded code that requires additional support
43   //              from gthr.h or abstraction layers in concurrence.h.
44   // _S_atomic    multi-threaded code using atomic operations.
45   enum _Lock_policy { _S_single, _S_mutex, _S_atomic }; 
46
47   // Compile time constant that indicates prefered locking policy in
48   // the current configuration.
49   static const _Lock_policy __default_lock_policy = 
50 #ifdef __GTHREADS
51 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) \
52      && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4))
53   _S_atomic;
54 #else
55   _S_mutex;
56 #endif
57 #else
58   _S_single;
59 #endif
60
61   // NB: As this is used in libsupc++, need to only depend on
62   // exception. No stdexception classes, no use of std::string.
63   class __concurrence_lock_error : public std::exception
64   {
65   public:
66     virtual char const*
67     what() const throw()
68     { return "__gnu_cxx::__concurrence_lock_error"; }
69   };
70
71   class __concurrence_unlock_error : public std::exception
72   {
73   public:
74     virtual char const*
75     what() const throw()
76     { return "__gnu_cxx::__concurrence_unlock_error"; }
77   };
78
79   class __concurrence_broadcast_error : public std::exception
80   {
81   public:
82     virtual char const*
83     what() const throw()
84     { return "__gnu_cxx::__concurrence_broadcast_error"; }
85   };
86
87   class __concurrence_wait_error : public std::exception
88   {
89   public:
90     virtual char const*
91     what() const throw()
92     { return "__gnu_cxx::__concurrence_wait_error"; }
93   };
94
95   // Substitute for concurrence_error object in the case of -fno-exceptions.
96   inline void
97   __throw_concurrence_lock_error()
98   {
99 #if __EXCEPTIONS
100     throw __concurrence_lock_error();
101 #else
102     __builtin_abort();
103 #endif
104   }
105
106   inline void
107   __throw_concurrence_unlock_error()
108   {
109 #if __EXCEPTIONS
110     throw __concurrence_unlock_error();
111 #else
112     __builtin_abort();
113 #endif
114   }
115
116 #ifdef __GTHREAD_HAS_COND
117   inline void
118   __throw_concurrence_broadcast_error()
119   {
120 #if __EXCEPTIONS
121     throw __concurrence_broadcast_error();
122 #else
123     __builtin_abort();
124 #endif
125   }
126
127   inline void
128   __throw_concurrence_wait_error()
129   {
130 #if __EXCEPTIONS
131     throw __concurrence_wait_error();
132 #else
133     __builtin_abort();
134 #endif
135   }
136 #endif
137  
138   class __mutex 
139   {
140   private:
141     __gthread_mutex_t _M_mutex;
142
143     __mutex(const __mutex&);
144     __mutex& operator=(const __mutex&);
145
146   public:
147     __mutex() 
148     { 
149 #if __GTHREADS
150       if (__gthread_active_p())
151         {
152 #if defined __GTHREAD_MUTEX_INIT
153           __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
154           _M_mutex = __tmp;
155 #else
156           __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex); 
157 #endif
158         }
159 #endif 
160     }
161
162     void lock()
163     {
164 #if __GTHREADS
165       if (__gthread_active_p())
166         {
167           if (__gthread_mutex_lock(&_M_mutex) != 0)
168             __throw_concurrence_lock_error();
169         }
170 #endif
171     }
172     
173     void unlock()
174     {
175 #if __GTHREADS
176       if (__gthread_active_p())
177         {
178           if (__gthread_mutex_unlock(&_M_mutex) != 0)
179             __throw_concurrence_unlock_error();
180         }
181 #endif
182     }
183
184     __gthread_mutex_t* gthread_mutex(void)
185       { return &_M_mutex; }
186   };
187
188   class __recursive_mutex 
189   {
190   private:
191     __gthread_recursive_mutex_t _M_mutex;
192
193     __recursive_mutex(const __recursive_mutex&);
194     __recursive_mutex& operator=(const __recursive_mutex&);
195
196   public:
197     __recursive_mutex() 
198     { 
199 #if __GTHREADS
200       if (__gthread_active_p())
201         {
202 #if defined __GTHREAD_RECURSIVE_MUTEX_INIT
203           __gthread_recursive_mutex_t __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
204           _M_mutex = __tmp;
205 #else
206           __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex); 
207 #endif
208         }
209 #endif 
210     }
211
212     void lock()
213     { 
214 #if __GTHREADS
215       if (__gthread_active_p())
216         {
217           if (__gthread_recursive_mutex_lock(&_M_mutex) != 0)
218             __throw_concurrence_lock_error();
219         }
220 #endif
221     }
222     
223     void unlock()
224     { 
225 #if __GTHREADS
226       if (__gthread_active_p())
227         {
228           if (__gthread_recursive_mutex_unlock(&_M_mutex) != 0)
229             __throw_concurrence_unlock_error();
230         }
231 #endif
232     }
233
234     __gthread_recursive_mutex_t* gthread_recursive_mutex(void)
235       { return &_M_mutex; }
236   };
237
238   /// Scoped lock idiom.
239   // Acquire the mutex here with a constructor call, then release with
240   // the destructor call in accordance with RAII style.
241   class __scoped_lock
242   {
243   public:
244     typedef __mutex __mutex_type;
245
246   private:
247     __mutex_type& _M_device;
248
249     __scoped_lock(const __scoped_lock&);
250     __scoped_lock& operator=(const __scoped_lock&);
251
252   public:
253     explicit __scoped_lock(__mutex_type& __name) : _M_device(__name)
254     { _M_device.lock(); }
255
256     ~__scoped_lock() throw()
257     { _M_device.unlock(); }
258   };
259
260 #ifdef __GTHREAD_HAS_COND
261   class __cond
262   {
263   private:
264     __gthread_cond_t _M_cond;
265
266     __cond(const __cond&);
267     __cond& operator=(const __cond&);
268
269   public:
270     __cond() 
271     { 
272 #if __GTHREADS
273       if (__gthread_active_p())
274         {
275 #if defined __GTHREAD_COND_INIT
276           __gthread_cond_t __tmp = __GTHREAD_COND_INIT;
277           _M_cond = __tmp;
278 #else
279           __GTHREAD_COND_INIT_FUNCTION(&_M_cond);
280 #endif
281         }
282 #endif 
283     }
284
285     void broadcast()
286     {
287 #if __GTHREADS
288       if (__gthread_active_p())
289         {
290           if (__gthread_cond_broadcast(&_M_cond) != 0)
291             __throw_concurrence_broadcast_error();
292         }
293 #endif
294     }
295
296     void wait(__mutex *mutex)
297     {
298 #if __GTHREADS
299       {
300           if (__gthread_cond_wait(&_M_cond, mutex->gthread_mutex()) != 0)
301             __throw_concurrence_wait_error();
302       }
303 #endif
304     }
305
306     void wait_recursive(__recursive_mutex *mutex)
307     {
308 #if __GTHREADS
309       {
310           if (__gthread_cond_wait_recursive(&_M_cond,
311                                             mutex->gthread_recursive_mutex())
312               != 0)
313             __throw_concurrence_wait_error();
314       }
315 #endif
316     }
317   };
318 #endif
319
320 _GLIBCXX_END_NAMESPACE
321
322 #endif