Initial import from FreeBSD RELENG_4:
[dragonfly.git] / lib / libc_r / uthread / uthread_file.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/lib/libc_r/uthread/uthread_file.c,v 1.12.2.3 2002/10/22 14:44:03 fjoe Exp $
33  *
34  * POSIX stdio FILE locking functions. These assume that the locking
35  * is only required at FILE structure level, not at file descriptor
36  * level too.
37  *
38  */
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <sys/queue.h>
43 #include <pthread.h>
44 #include "pthread_private.h"
45
46 /*
47  * Weak symbols for externally visible functions in this file:
48  */
49 #pragma weak    flockfile=_flockfile
50 #pragma weak    ftrylockfile=_ftrylockfile
51 #pragma weak    funlockfile=_funlockfile
52
53 /*
54  * The FILE lock structure. The FILE *fp is locked if the owner is
55  * not NULL. If not locked, the file lock structure can be
56  * reassigned to a different file by setting fp.
57  */
58 struct  file_lock {
59         LIST_ENTRY(file_lock)   entry;  /* Entry if file list.       */
60         TAILQ_HEAD(lock_head, pthread)
61                                 l_head; /* Head of queue for threads */
62                                         /* waiting on this lock.     */
63         FILE            *fp;            /* The target file.          */
64         struct pthread  *owner;         /* Thread that owns lock.    */
65         int             count;          /* Lock count for owner.     */
66 };
67
68 /*
69  * The number of file lock lists into which the file pointer is
70  * hashed. Ideally, the FILE structure size would have been increased,
71  * but this causes incompatibility, so separate data structures are
72  * required.
73  */
74 #define NUM_HEADS       128
75
76 /*
77  * This macro casts a file pointer to a long integer and right
78  * shifts this by the number of bytes in a pointer. The shifted
79  * value is then remaindered using the maximum number of hash
80  * entries to produce and index into the array of static lock
81  * structures. If there is a collision, a linear search of the
82  * dynamic list of locks linked to each static lock is perfomed.
83  */
84 #define file_idx(_p)    ((((u_long) _p) >> sizeof(void *)) % NUM_HEADS)
85
86 /*
87  * Global array of file locks. The first lock for each hash bucket is
88  * allocated statically in the hope that there won't be too many
89  * collisions that require a malloc and an element added to the list.
90  */
91 struct static_file_lock {
92         LIST_HEAD(file_list_head, file_lock) head;
93         struct  file_lock       fl;
94 } flh[NUM_HEADS];
95
96 /* Set to non-zero when initialisation is complete: */
97 static  int     init_done       = 0;
98
99 /* Lock for accesses to the hash table: */
100 static  spinlock_t      hash_lock       = _SPINLOCK_INITIALIZER;
101
102 /*
103  * Find a lock structure for a FILE, return NULL if the file is
104  * not locked:
105  */
106 static
107 struct file_lock *
108 find_lock(int idx, FILE *fp)
109 {
110         struct file_lock *p;
111
112         /* Check if the file is locked using the static structure: */
113         if (flh[idx].fl.fp == fp && flh[idx].fl.owner != NULL)
114                 /* Return a pointer to the static lock: */
115                 p = &flh[idx].fl;
116         else {
117                 /* Point to the first dynamic lock: */
118                 p = flh[idx].head.lh_first;
119
120                 /*
121                  * Loop through the dynamic locks looking for the
122                  * target file:
123                  */
124                 while (p != NULL && (p->fp != fp || p->owner == NULL))
125                         /* Not this file, try the next: */
126                         p = p->entry.le_next;
127         }
128         return(p);
129 }
130
131 /*
132  * Lock a file, assuming that there is no lock structure currently
133  * assigned to it.
134  */
135 static
136 struct file_lock *
137 do_lock(int idx, FILE *fp)
138 {
139         struct pthread  *curthread = _get_curthread();
140         struct file_lock *p;
141
142         /* Check if the static structure is not being used: */
143         if (flh[idx].fl.owner == NULL) {
144                 /* Return a pointer to the static lock: */
145                 p = &flh[idx].fl;
146         }
147         else {
148                 /* Point to the first dynamic lock: */
149                 p = flh[idx].head.lh_first;
150
151                 /*
152                  * Loop through the dynamic locks looking for a
153                  * lock structure that is not being used:
154                  */
155                 while (p != NULL && p->owner != NULL)
156                         /* This one is used, try the next: */
157                         p = p->entry.le_next;
158         }
159
160         /*
161          * If an existing lock structure has not been found,
162          * allocate memory for a new one:
163          */
164         if (p == NULL && (p = (struct file_lock *)
165             malloc(sizeof(struct file_lock))) != NULL) {
166                 /* Add the new element to the list: */
167                 LIST_INSERT_HEAD(&flh[idx].head, p, entry);
168         }
169
170         /* Check if there is a lock structure to acquire: */
171         if (p != NULL) {
172                 /* Acquire the lock for the running thread: */
173                 p->fp           = fp;
174                 p->owner        = curthread;
175                 p->count        = 1;
176                 TAILQ_INIT(&p->l_head);
177         }
178         return(p);
179 }
180
181 void
182 _flockfile_debug(FILE * fp, char *fname, int lineno)
183 {
184         struct pthread  *curthread = _get_curthread();
185         int     idx = file_idx(fp);
186         struct  file_lock       *p;
187
188         /* Check if this is a real file: */
189         if (fp->_file >= 0) {
190                 /* Lock the hash table: */
191                 _SPINLOCK(&hash_lock);
192
193                 /* Check if the static array has not been initialised: */
194                 if (!init_done) {
195                         /* Initialise the global array: */
196                         memset(flh,0,sizeof(flh));
197
198                         /* Flag the initialisation as complete: */
199                         init_done = 1;
200                 }
201
202                 /* Get a pointer to any existing lock for the file: */
203                 if ((p = find_lock(idx, fp)) == NULL) {
204                         /*
205                          * The file is not locked, so this thread can
206                          * grab the lock:
207                          */
208                         p = do_lock(idx, fp);
209
210                         /* Unlock the hash table: */
211                         _SPINUNLOCK(&hash_lock);
212
213                 /*
214                  * The file is already locked, so check if the
215                  * running thread is the owner:
216                  */
217                 } else if (p->owner == curthread) {
218                         /*
219                          * The running thread is already the
220                          * owner, so increment the count of
221                          * the number of times it has locked
222                          * the file:
223                          */
224                         p->count++;
225
226                         /* Unlock the hash table: */
227                         _SPINUNLOCK(&hash_lock);
228                 } else {
229                         /* Clear the interrupted flag: */
230                         curthread->interrupted = 0;
231
232                         /*
233                          * Prevent being context switched out while
234                          * adding this thread to the file lock queue.
235                          */
236                         _thread_kern_sig_defer();
237
238                         /*
239                          * The file is locked for another thread.
240                          * Append this thread to the queue of
241                          * threads waiting on the lock.
242                          */
243                         TAILQ_INSERT_TAIL(&p->l_head,curthread,qe);
244                         curthread->flags |= PTHREAD_FLAGS_IN_FILEQ;
245
246                         /* Unlock the hash table: */
247                         _SPINUNLOCK(&hash_lock);
248
249                         curthread->data.fp = fp;
250
251                         /* Wait on the FILE lock: */
252                         _thread_kern_sched_state(PS_FILE_WAIT, fname, lineno);
253
254                         if ((curthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) {
255                                 TAILQ_REMOVE(&p->l_head,curthread,qe);
256                                 curthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
257                         }
258
259                         _thread_kern_sig_undefer();
260
261                         if (curthread->interrupted != 0 &&
262                             curthread->continuation != NULL)
263                                 curthread->continuation((void *)curthread);
264                 }
265         }
266 }
267
268 void
269 _flockfile(FILE * fp)
270 {
271         _flockfile_debug(fp, __FILE__, __LINE__);
272 }
273
274 int
275 _ftrylockfile(FILE * fp)
276 {
277         struct pthread  *curthread = _get_curthread();
278         int     ret = -1;
279         int     idx = file_idx(fp);
280         struct  file_lock       *p;
281
282         /* Check if this is a real file: */
283         if (fp->_file >= 0) {
284                 /* Lock the hash table: */
285                 _SPINLOCK(&hash_lock);
286
287                 /* Get a pointer to any existing lock for the file: */
288                 if ((p = find_lock(idx, fp)) == NULL) {
289                         /*
290                          * The file is not locked, so this thread can
291                          * grab the lock:
292                          */
293                         p = do_lock(idx, fp);
294
295                 /*
296                  * The file is already locked, so check if the
297                  * running thread is the owner:
298                  */
299                 } else if (p->owner == curthread) {
300                         /*
301                          * The running thread is already the
302                          * owner, so increment the count of
303                          * the number of times it has locked
304                          * the file:
305                          */
306                         p->count++;
307                 } else {
308                         /*
309                          * The file is locked for another thread,
310                          * so this try fails.
311                          */
312                         p = NULL;
313                 }
314
315                 /* Check if the lock was obtained: */
316                 if (p != NULL)
317                         /* Return success: */
318                         ret = 0;
319
320                 /* Unlock the hash table: */
321                 _SPINUNLOCK(&hash_lock);
322
323         }
324         return (ret);
325 }
326
327 void 
328 _funlockfile(FILE * fp)
329 {
330         struct pthread  *curthread = _get_curthread();
331         int     idx = file_idx(fp);
332         struct  file_lock       *p;
333
334         /* Check if this is a real file: */
335         if (fp->_file >= 0) {
336                 /*
337                  * Defer signals to protect the scheduling queues from
338                  * access by the signal handler:
339                  */
340                 _thread_kern_sig_defer();
341
342                 /* Lock the hash table: */
343                 _SPINLOCK(&hash_lock);
344
345                 /*
346                  * Get a pointer to the lock for the file and check that
347                  * the running thread is the one with the lock:
348                  */
349                 if ((p = find_lock(idx, fp)) != NULL &&
350                     p->owner == curthread) {
351                         /*
352                          * Check if this thread has locked the FILE
353                          * more than once:
354                          */
355                         if (p->count > 1)
356                                 /*
357                                  * Decrement the count of the number of
358                                  * times the running thread has locked this
359                                  * file:
360                                  */
361                                 p->count--;
362                         else {
363                                 /*
364                                  * The running thread will release the
365                                  * lock now:
366                                  */
367                                 p->count = 0;
368
369                                 /* Get the new owner of the lock: */
370                                 while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
371                                         /* Pop the thread off the queue: */
372                                         TAILQ_REMOVE(&p->l_head,p->owner,qe);
373                                         p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
374
375                                         if (p->owner->interrupted == 0) {
376                                                 /*
377                                                  * This is the first lock for
378                                                  * the new owner:
379                                                  */
380                                                 p->count = 1;
381
382                                                 /* Allow the new owner to run: */
383                                                 PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
384
385                                                 /* End the loop when we find a
386                                                  * thread that hasn't been
387                                                  * cancelled or interrupted;
388                                                  */
389                                                 break;
390                                         }
391                                 }
392                         }
393                 }
394
395                 /* Unlock the hash table: */
396                 _SPINUNLOCK(&hash_lock);
397
398                 /*
399                  * Undefer and handle pending signals, yielding if
400                  * necessary:
401                  */
402                 _thread_kern_sig_undefer();
403         }
404 }
405
406 void
407 _funlock_owned(struct pthread *pthread)
408 {
409         int                     idx;
410         struct file_lock        *p, *next_p;
411
412         /*
413          * Defer signals to protect the scheduling queues from
414          * access by the signal handler:
415          */
416         _thread_kern_sig_defer();
417
418         /* Lock the hash table: */
419         _SPINLOCK(&hash_lock);
420
421         for (idx = 0; idx < NUM_HEADS; idx++) {
422                 /* Check the static file lock first: */
423                 p = &flh[idx].fl;
424                 next_p = LIST_FIRST(&flh[idx].head);
425
426                 while (p != NULL) {
427                         if (p->owner == pthread) {
428                                 /*
429                                  * The running thread will release the
430                                  * lock now:
431                                  */
432                                 p->count = 0;
433
434                                 /* Get the new owner of the lock: */
435                                 while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
436                                         /* Pop the thread off the queue: */
437                                         TAILQ_REMOVE(&p->l_head,p->owner,qe);
438                                         p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
439
440                                         if (p->owner->interrupted == 0) {
441                                                 /*
442                                                  * This is the first lock for
443                                                  * the new owner:
444                                                  */
445                                                 p->count = 1;
446
447                                                 /* Allow the new owner to run: */
448                                                 PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
449
450                                                 /* End the loop when we find a
451                                                  * thread that hasn't been
452                                                  * cancelled or interrupted;
453                                                  */
454                                                 break;
455                                         }
456                                 }
457                         }
458                         p = next_p;
459                         if (next_p != NULL)
460                                 next_p = LIST_NEXT(next_p, entry);
461                 }
462         }
463
464         /* Unlock the hash table: */
465         _SPINUNLOCK(&hash_lock);
466
467         /*
468          * Undefer and handle pending signals, yielding if
469          * necessary:
470          */
471         _thread_kern_sig_undefer();
472 }
473
474 void
475 _flockfile_backout(struct pthread *pthread)
476 {
477         int     idx = file_idx(pthread->data.fp);
478         struct  file_lock       *p;
479
480         /*
481          * Defer signals to protect the scheduling queues from
482          * access by the signal handler:
483          */
484         _thread_kern_sig_defer();
485
486         /*
487          * Get a pointer to the lock for the file and check that
488          * the running thread is the one with the lock:
489          */
490         if (((pthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) &&
491             ((p = find_lock(idx, pthread->data.fp)) != NULL)) {
492                 /* Lock the hash table: */
493                 _SPINLOCK(&hash_lock);
494
495                 /* Remove the thread from the queue: */
496                 TAILQ_REMOVE(&p->l_head, pthread, qe);
497                 pthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
498
499                 /* Unlock the hash table: */
500                 _SPINUNLOCK(&hash_lock);
501         }
502
503         /*
504          * Undefer and handle pending signals, yielding if necessary:
505          */
506         _thread_kern_sig_undefer();
507 }
508