libc/libpthread: Inject threadsafe locking callbacks for rtld.
[dragonfly.git] / lib / libthread_xu / thread / thr_list.c
1 /*
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 #include <sys/types.h>
29 #include <sys/queue.h>
30
31 #include <stdlib.h>
32 #include <string.h>
33 #include <pthread.h>
34
35 #include "libc_private.h"
36 #include "thr_private.h"
37
38 /* #define DEBUG_THREAD_LIST */
39 #ifdef DEBUG_THREAD_LIST
40 #define DBG_MSG         stdout_debug
41 #else
42 #define DBG_MSG(x...)
43 #endif
44
45 /* List of all threads */
46 struct thread_head _thread_list = TAILQ_HEAD_INITIALIZER(_thread_list);
47
48 /* List of threads needing GC */
49 struct thread_head _thread_gc_list = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
50
51 /* Number of active threads */
52 int     _thread_active_threads = 1;
53
54 /* Garbage thread count. */
55 int     _thr_gc_count;
56
57 umtx_t  _thr_list_lock;
58
59 /*
60  * Define a high water mark for the maximum number of threads that
61  * will be cached.  Once this level is reached, any extra threads
62  * will be free()'d.
63  */
64 #define MAX_CACHED_THREADS      100
65
66 /*
67  * We've got to keep track of everything that is allocated, not only
68  * to have a speedy free list, but also so they can be deallocated
69  * after a fork().
70  */
71 static TAILQ_HEAD(, pthread)    free_threadq;
72 static umtx_t                   free_thread_lock;
73 static umtx_t                   tcb_lock;
74 static int                      free_thread_count = 0;
75 static int                      inited = 0;
76 static u_int64_t                next_uniqueid = 1;
77
78 LIST_HEAD(thread_hash_head, pthread);
79 #define HASH_QUEUES     128
80 static struct thread_hash_head  thr_hashtable[HASH_QUEUES];
81 #define THREAD_HASH(thrd)       (((unsigned long)thrd >> 12) % HASH_QUEUES)
82
83 static void thr_destroy(struct pthread *curthread, struct pthread *thread);
84
85 void
86 _thr_list_init(void)
87 {
88         int i;
89
90         _thr_gc_count = 0;
91         _thr_umtx_init(&_thr_list_lock);
92         TAILQ_INIT(&_thread_list);
93         TAILQ_INIT(&free_threadq);
94         _thr_umtx_init(&free_thread_lock);
95         _thr_umtx_init(&tcb_lock);
96         if (inited) {
97                 for (i = 0; i < HASH_QUEUES; ++i)
98                         LIST_INIT(&thr_hashtable[i]);
99         }
100         inited = 1;
101 }
102
103 void
104 _thr_gc(struct pthread *curthread)
105 {
106         struct pthread *td, *td_next;
107         TAILQ_HEAD(, pthread) worklist;
108
109         TAILQ_INIT(&worklist);
110         THREAD_LIST_LOCK(curthread);
111
112         /* Check the threads waiting for GC. */
113         for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
114                 td_next = TAILQ_NEXT(td, gcle);
115                 if (td->terminated == 0) {
116                         /* make sure we are not still in userland */
117                         continue;
118                 }
119                 _thr_stack_free(&td->attr);
120                 if (((td->tlflags & TLFLAGS_DETACHED) != 0) &&
121                     (td->refcount == 0)) {
122                         THR_GCLIST_REMOVE(td);
123                         /*
124                          * The thread has detached and is no longer
125                          * referenced.  It is safe to remove all
126                          * remnants of the thread.
127                          */
128                         THR_LIST_REMOVE(td);
129                         TAILQ_INSERT_HEAD(&worklist, td, gcle);
130                 }
131         }
132         THREAD_LIST_UNLOCK(curthread);
133
134         while ((td = TAILQ_FIRST(&worklist)) != NULL) {
135                 TAILQ_REMOVE(&worklist, td, gcle);
136                 /*
137                  * XXX we don't free initial thread, because there might
138                  * have some code referencing initial thread.
139                  */
140                 if (td == _thr_initial) {
141                         DBG_MSG("Initial thread won't be freed\n");
142                         continue;
143                 }
144
145                 _thr_free(curthread, td);
146         }
147 }
148
149 struct pthread *
150 _thr_alloc(struct pthread *curthread)
151 {
152         struct pthread  *thread = NULL;
153         struct tls_tcb  *tcb;
154
155         if (curthread != NULL) {
156                 if (GC_NEEDED())
157                         _thr_gc(curthread);
158                 if (free_thread_count > 0) {
159                         THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
160                         if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
161                                 TAILQ_REMOVE(&free_threadq, thread, tle);
162                                 free_thread_count--;
163                         }
164                         THR_LOCK_RELEASE(curthread, &free_thread_lock);
165                 }
166         }
167         if (thread == NULL) {
168                 thread = malloc(sizeof(struct pthread));
169                 if (thread == NULL)
170                         return (NULL);
171         }
172         if (curthread != NULL) {
173                 THR_LOCK_ACQUIRE(curthread, &tcb_lock);
174                 tcb = _tcb_ctor(thread, 0 /* not initial tls */);
175                 THR_LOCK_RELEASE(curthread, &tcb_lock);
176         } else {
177                 tcb = _tcb_ctor(thread, 1 /* initial tls */);
178         }
179         if (tcb != NULL) {
180                 memset(thread, 0, sizeof(*thread));
181                 thread->tcb = tcb;
182         } else {
183                 thr_destroy(curthread, thread);
184                 thread = NULL;
185         }
186         return (thread);
187 }
188
189 void
190 _thr_free(struct pthread *curthread, struct pthread *thread)
191 {
192         DBG_MSG("Freeing thread %p\n", thread);
193         if (thread->name) {
194                 free(thread->name);
195                 thread->name = NULL;
196         }
197         /*
198          * Always free tcb, as we only know it is part of RTLD TLS
199          * block, but don't know its detail and can not assume how
200          * it works, so better to avoid caching it here.
201          */
202         if (curthread != NULL) {
203                 THR_LOCK_ACQUIRE(curthread, &tcb_lock);
204                 _tcb_dtor(thread->tcb);
205                 THR_LOCK_RELEASE(curthread, &tcb_lock);
206         } else {
207                 _tcb_dtor(thread->tcb);
208         }
209         thread->tcb = NULL;
210         if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
211                 thr_destroy(curthread, thread);
212         } else {
213                 /*
214                  * Add the thread to the free thread list, this also avoids
215                  * pthread id is reused too quickly, may help some buggy apps.
216                  */
217                 THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
218                 TAILQ_INSERT_TAIL(&free_threadq, thread, tle);
219                 free_thread_count++;
220                 THR_LOCK_RELEASE(curthread, &free_thread_lock);
221         }
222 }
223
224 static void
225 thr_destroy(struct pthread *curthread __unused, struct pthread *thread)
226 {
227         free(thread);
228 }
229
230 /*
231  * Add an active thread:
232  *
233  *   o Assign the thread a unique id (which GDB uses to track
234  *     threads.
235  *   o Add the thread to the list of all threads and increment
236  *     number of active threads.
237  */
238 void
239 _thr_link(struct pthread *curthread, struct pthread *thread)
240 {
241         THREAD_LIST_LOCK(curthread);
242         /*
243          * Initialize the unique id (which GDB uses to track
244          * threads), add the thread to the list of all threads,
245          * and
246          */
247         thread->uniqueid = next_uniqueid++;
248         THR_LIST_ADD(thread);
249         _thread_active_threads++;
250         THREAD_LIST_UNLOCK(curthread);
251 }
252
253 /*
254  * Remove an active thread.
255  */
256 void
257 _thr_unlink(struct pthread *curthread, struct pthread *thread)
258 {
259         THREAD_LIST_LOCK(curthread);
260         THR_LIST_REMOVE(thread);
261         _thread_active_threads--;
262         THREAD_LIST_UNLOCK(curthread);
263 }
264
265 void
266 _thr_hash_add(struct pthread *thread)
267 {
268         struct thread_hash_head *head;
269
270         head = &thr_hashtable[THREAD_HASH(thread)];
271         LIST_INSERT_HEAD(head, thread, hle);
272 }
273
274 void
275 _thr_hash_remove(struct pthread *thread)
276 {
277         LIST_REMOVE(thread, hle);
278 }
279
280 struct pthread *
281 _thr_hash_find(struct pthread *thread)
282 {
283         struct pthread *td;
284         struct thread_hash_head *head;
285
286         head = &thr_hashtable[THREAD_HASH(thread)];
287         LIST_FOREACH(td, head, hle) {
288                 if (td == thread)
289                         return (thread);
290         }
291         return (NULL);
292 }
293
294 /*
295  * Find a thread in the linked list of active threads and add a reference
296  * to it.  Threads with positive reference counts will not be deallocated
297  * until all references are released.
298  */
299 int
300 _thr_ref_add(struct pthread *curthread, struct pthread *thread,
301     int include_dead)
302 {
303         int ret;
304
305         if (thread == NULL)
306                 /* Invalid thread: */
307                 return (EINVAL);
308
309         THREAD_LIST_LOCK(curthread);
310         if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
311                 thread->refcount++;
312         }
313         THREAD_LIST_UNLOCK(curthread);
314
315         /* Return zero if the thread exists: */
316         return (ret);
317 }
318
319 void
320 _thr_ref_delete(struct pthread *curthread, struct pthread *thread)
321 {
322         THREAD_LIST_LOCK(curthread);
323         _thr_ref_delete_unlocked(curthread, thread);
324         THREAD_LIST_UNLOCK(curthread);
325 }
326
327 void
328 _thr_ref_delete_unlocked(struct pthread *curthread __unused,
329         struct pthread *thread)
330 {
331         if (thread != NULL) {
332                 thread->refcount--;
333                 if ((thread->refcount == 0) && thread->state == PS_DEAD &&
334                     (thread->tlflags & TLFLAGS_DETACHED) != 0)
335                         THR_GCLIST_ADD(thread);
336         }
337 }
338
339 int
340 _thr_find_thread(struct pthread *curthread __unused, struct pthread *thread,
341     int include_dead)
342 {
343         struct pthread *pthread;
344
345         if (thread == NULL)
346                 return (EINVAL);
347
348         pthread = _thr_hash_find(thread);
349         if (pthread) {
350                 if (include_dead == 0 && pthread->state == PS_DEAD) {
351                         pthread = NULL;
352                 }
353         }
354
355         /* Return zero if the thread exists: */
356         return ((pthread != NULL) ? 0 : ESRCH);
357 }