Add the DragonFly cvs id and perform general cleanups on cvs/rcs/sccs ids. Most
[dragonfly.git] / lib / libc_r / uthread / uthread_file.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/lib/libc_r/uthread/uthread_file.c,v 1.12.2.3 2002/10/22 14:44:03 fjoe Exp $
33  * $DragonFly: src/lib/libc_r/uthread/uthread_file.c,v 1.2 2003/06/17 04:26:48 dillon Exp $
34  *
35  * POSIX stdio FILE locking functions. These assume that the locking
36  * is only required at FILE structure level, not at file descriptor
37  * level too.
38  *
39  */
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <string.h>
43 #include <sys/queue.h>
44 #include <pthread.h>
45 #include "pthread_private.h"
46
47 /*
48  * Weak symbols for externally visible functions in this file:
49  */
50 #pragma weak    flockfile=_flockfile
51 #pragma weak    ftrylockfile=_ftrylockfile
52 #pragma weak    funlockfile=_funlockfile
53
54 /*
55  * The FILE lock structure. The FILE *fp is locked if the owner is
56  * not NULL. If not locked, the file lock structure can be
57  * reassigned to a different file by setting fp.
58  */
59 struct  file_lock {
60         LIST_ENTRY(file_lock)   entry;  /* Entry if file list.       */
61         TAILQ_HEAD(lock_head, pthread)
62                                 l_head; /* Head of queue for threads */
63                                         /* waiting on this lock.     */
64         FILE            *fp;            /* The target file.          */
65         struct pthread  *owner;         /* Thread that owns lock.    */
66         int             count;          /* Lock count for owner.     */
67 };
68
69 /*
70  * The number of file lock lists into which the file pointer is
71  * hashed. Ideally, the FILE structure size would have been increased,
72  * but this causes incompatibility, so separate data structures are
73  * required.
74  */
75 #define NUM_HEADS       128
76
77 /*
78  * This macro casts a file pointer to a long integer and right
79  * shifts this by the number of bytes in a pointer. The shifted
80  * value is then remaindered using the maximum number of hash
81  * entries to produce and index into the array of static lock
82  * structures. If there is a collision, a linear search of the
83  * dynamic list of locks linked to each static lock is perfomed.
84  */
85 #define file_idx(_p)    ((((u_long) _p) >> sizeof(void *)) % NUM_HEADS)
86
87 /*
88  * Global array of file locks. The first lock for each hash bucket is
89  * allocated statically in the hope that there won't be too many
90  * collisions that require a malloc and an element added to the list.
91  */
92 struct static_file_lock {
93         LIST_HEAD(file_list_head, file_lock) head;
94         struct  file_lock       fl;
95 } flh[NUM_HEADS];
96
97 /* Set to non-zero when initialisation is complete: */
98 static  int     init_done       = 0;
99
100 /* Lock for accesses to the hash table: */
101 static  spinlock_t      hash_lock       = _SPINLOCK_INITIALIZER;
102
103 /*
104  * Find a lock structure for a FILE, return NULL if the file is
105  * not locked:
106  */
107 static
108 struct file_lock *
109 find_lock(int idx, FILE *fp)
110 {
111         struct file_lock *p;
112
113         /* Check if the file is locked using the static structure: */
114         if (flh[idx].fl.fp == fp && flh[idx].fl.owner != NULL)
115                 /* Return a pointer to the static lock: */
116                 p = &flh[idx].fl;
117         else {
118                 /* Point to the first dynamic lock: */
119                 p = flh[idx].head.lh_first;
120
121                 /*
122                  * Loop through the dynamic locks looking for the
123                  * target file:
124                  */
125                 while (p != NULL && (p->fp != fp || p->owner == NULL))
126                         /* Not this file, try the next: */
127                         p = p->entry.le_next;
128         }
129         return(p);
130 }
131
132 /*
133  * Lock a file, assuming that there is no lock structure currently
134  * assigned to it.
135  */
136 static
137 struct file_lock *
138 do_lock(int idx, FILE *fp)
139 {
140         struct pthread  *curthread = _get_curthread();
141         struct file_lock *p;
142
143         /* Check if the static structure is not being used: */
144         if (flh[idx].fl.owner == NULL) {
145                 /* Return a pointer to the static lock: */
146                 p = &flh[idx].fl;
147         }
148         else {
149                 /* Point to the first dynamic lock: */
150                 p = flh[idx].head.lh_first;
151
152                 /*
153                  * Loop through the dynamic locks looking for a
154                  * lock structure that is not being used:
155                  */
156                 while (p != NULL && p->owner != NULL)
157                         /* This one is used, try the next: */
158                         p = p->entry.le_next;
159         }
160
161         /*
162          * If an existing lock structure has not been found,
163          * allocate memory for a new one:
164          */
165         if (p == NULL && (p = (struct file_lock *)
166             malloc(sizeof(struct file_lock))) != NULL) {
167                 /* Add the new element to the list: */
168                 LIST_INSERT_HEAD(&flh[idx].head, p, entry);
169         }
170
171         /* Check if there is a lock structure to acquire: */
172         if (p != NULL) {
173                 /* Acquire the lock for the running thread: */
174                 p->fp           = fp;
175                 p->owner        = curthread;
176                 p->count        = 1;
177                 TAILQ_INIT(&p->l_head);
178         }
179         return(p);
180 }
181
182 void
183 _flockfile_debug(FILE * fp, char *fname, int lineno)
184 {
185         struct pthread  *curthread = _get_curthread();
186         int     idx = file_idx(fp);
187         struct  file_lock       *p;
188
189         /* Check if this is a real file: */
190         if (fp->_file >= 0) {
191                 /* Lock the hash table: */
192                 _SPINLOCK(&hash_lock);
193
194                 /* Check if the static array has not been initialised: */
195                 if (!init_done) {
196                         /* Initialise the global array: */
197                         memset(flh,0,sizeof(flh));
198
199                         /* Flag the initialisation as complete: */
200                         init_done = 1;
201                 }
202
203                 /* Get a pointer to any existing lock for the file: */
204                 if ((p = find_lock(idx, fp)) == NULL) {
205                         /*
206                          * The file is not locked, so this thread can
207                          * grab the lock:
208                          */
209                         p = do_lock(idx, fp);
210
211                         /* Unlock the hash table: */
212                         _SPINUNLOCK(&hash_lock);
213
214                 /*
215                  * The file is already locked, so check if the
216                  * running thread is the owner:
217                  */
218                 } else if (p->owner == curthread) {
219                         /*
220                          * The running thread is already the
221                          * owner, so increment the count of
222                          * the number of times it has locked
223                          * the file:
224                          */
225                         p->count++;
226
227                         /* Unlock the hash table: */
228                         _SPINUNLOCK(&hash_lock);
229                 } else {
230                         /* Clear the interrupted flag: */
231                         curthread->interrupted = 0;
232
233                         /*
234                          * Prevent being context switched out while
235                          * adding this thread to the file lock queue.
236                          */
237                         _thread_kern_sig_defer();
238
239                         /*
240                          * The file is locked for another thread.
241                          * Append this thread to the queue of
242                          * threads waiting on the lock.
243                          */
244                         TAILQ_INSERT_TAIL(&p->l_head,curthread,qe);
245                         curthread->flags |= PTHREAD_FLAGS_IN_FILEQ;
246
247                         /* Unlock the hash table: */
248                         _SPINUNLOCK(&hash_lock);
249
250                         curthread->data.fp = fp;
251
252                         /* Wait on the FILE lock: */
253                         _thread_kern_sched_state(PS_FILE_WAIT, fname, lineno);
254
255                         if ((curthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) {
256                                 TAILQ_REMOVE(&p->l_head,curthread,qe);
257                                 curthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
258                         }
259
260                         _thread_kern_sig_undefer();
261
262                         if (curthread->interrupted != 0 &&
263                             curthread->continuation != NULL)
264                                 curthread->continuation((void *)curthread);
265                 }
266         }
267 }
268
269 void
270 _flockfile(FILE * fp)
271 {
272         _flockfile_debug(fp, __FILE__, __LINE__);
273 }
274
275 int
276 _ftrylockfile(FILE * fp)
277 {
278         struct pthread  *curthread = _get_curthread();
279         int     ret = -1;
280         int     idx = file_idx(fp);
281         struct  file_lock       *p;
282
283         /* Check if this is a real file: */
284         if (fp->_file >= 0) {
285                 /* Lock the hash table: */
286                 _SPINLOCK(&hash_lock);
287
288                 /* Get a pointer to any existing lock for the file: */
289                 if ((p = find_lock(idx, fp)) == NULL) {
290                         /*
291                          * The file is not locked, so this thread can
292                          * grab the lock:
293                          */
294                         p = do_lock(idx, fp);
295
296                 /*
297                  * The file is already locked, so check if the
298                  * running thread is the owner:
299                  */
300                 } else if (p->owner == curthread) {
301                         /*
302                          * The running thread is already the
303                          * owner, so increment the count of
304                          * the number of times it has locked
305                          * the file:
306                          */
307                         p->count++;
308                 } else {
309                         /*
310                          * The file is locked for another thread,
311                          * so this try fails.
312                          */
313                         p = NULL;
314                 }
315
316                 /* Check if the lock was obtained: */
317                 if (p != NULL)
318                         /* Return success: */
319                         ret = 0;
320
321                 /* Unlock the hash table: */
322                 _SPINUNLOCK(&hash_lock);
323
324         }
325         return (ret);
326 }
327
328 void 
329 _funlockfile(FILE * fp)
330 {
331         struct pthread  *curthread = _get_curthread();
332         int     idx = file_idx(fp);
333         struct  file_lock       *p;
334
335         /* Check if this is a real file: */
336         if (fp->_file >= 0) {
337                 /*
338                  * Defer signals to protect the scheduling queues from
339                  * access by the signal handler:
340                  */
341                 _thread_kern_sig_defer();
342
343                 /* Lock the hash table: */
344                 _SPINLOCK(&hash_lock);
345
346                 /*
347                  * Get a pointer to the lock for the file and check that
348                  * the running thread is the one with the lock:
349                  */
350                 if ((p = find_lock(idx, fp)) != NULL &&
351                     p->owner == curthread) {
352                         /*
353                          * Check if this thread has locked the FILE
354                          * more than once:
355                          */
356                         if (p->count > 1)
357                                 /*
358                                  * Decrement the count of the number of
359                                  * times the running thread has locked this
360                                  * file:
361                                  */
362                                 p->count--;
363                         else {
364                                 /*
365                                  * The running thread will release the
366                                  * lock now:
367                                  */
368                                 p->count = 0;
369
370                                 /* Get the new owner of the lock: */
371                                 while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
372                                         /* Pop the thread off the queue: */
373                                         TAILQ_REMOVE(&p->l_head,p->owner,qe);
374                                         p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
375
376                                         if (p->owner->interrupted == 0) {
377                                                 /*
378                                                  * This is the first lock for
379                                                  * the new owner:
380                                                  */
381                                                 p->count = 1;
382
383                                                 /* Allow the new owner to run: */
384                                                 PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
385
386                                                 /* End the loop when we find a
387                                                  * thread that hasn't been
388                                                  * cancelled or interrupted;
389                                                  */
390                                                 break;
391                                         }
392                                 }
393                         }
394                 }
395
396                 /* Unlock the hash table: */
397                 _SPINUNLOCK(&hash_lock);
398
399                 /*
400                  * Undefer and handle pending signals, yielding if
401                  * necessary:
402                  */
403                 _thread_kern_sig_undefer();
404         }
405 }
406
407 void
408 _funlock_owned(struct pthread *pthread)
409 {
410         int                     idx;
411         struct file_lock        *p, *next_p;
412
413         /*
414          * Defer signals to protect the scheduling queues from
415          * access by the signal handler:
416          */
417         _thread_kern_sig_defer();
418
419         /* Lock the hash table: */
420         _SPINLOCK(&hash_lock);
421
422         for (idx = 0; idx < NUM_HEADS; idx++) {
423                 /* Check the static file lock first: */
424                 p = &flh[idx].fl;
425                 next_p = LIST_FIRST(&flh[idx].head);
426
427                 while (p != NULL) {
428                         if (p->owner == pthread) {
429                                 /*
430                                  * The running thread will release the
431                                  * lock now:
432                                  */
433                                 p->count = 0;
434
435                                 /* Get the new owner of the lock: */
436                                 while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
437                                         /* Pop the thread off the queue: */
438                                         TAILQ_REMOVE(&p->l_head,p->owner,qe);
439                                         p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
440
441                                         if (p->owner->interrupted == 0) {
442                                                 /*
443                                                  * This is the first lock for
444                                                  * the new owner:
445                                                  */
446                                                 p->count = 1;
447
448                                                 /* Allow the new owner to run: */
449                                                 PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
450
451                                                 /* End the loop when we find a
452                                                  * thread that hasn't been
453                                                  * cancelled or interrupted;
454                                                  */
455                                                 break;
456                                         }
457                                 }
458                         }
459                         p = next_p;
460                         if (next_p != NULL)
461                                 next_p = LIST_NEXT(next_p, entry);
462                 }
463         }
464
465         /* Unlock the hash table: */
466         _SPINUNLOCK(&hash_lock);
467
468         /*
469          * Undefer and handle pending signals, yielding if
470          * necessary:
471          */
472         _thread_kern_sig_undefer();
473 }
474
475 void
476 _flockfile_backout(struct pthread *pthread)
477 {
478         int     idx = file_idx(pthread->data.fp);
479         struct  file_lock       *p;
480
481         /*
482          * Defer signals to protect the scheduling queues from
483          * access by the signal handler:
484          */
485         _thread_kern_sig_defer();
486
487         /*
488          * Get a pointer to the lock for the file and check that
489          * the running thread is the one with the lock:
490          */
491         if (((pthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) &&
492             ((p = find_lock(idx, pthread->data.fp)) != NULL)) {
493                 /* Lock the hash table: */
494                 _SPINLOCK(&hash_lock);
495
496                 /* Remove the thread from the queue: */
497                 TAILQ_REMOVE(&p->l_head, pthread, qe);
498                 pthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
499
500                 /* Unlock the hash table: */
501                 _SPINUNLOCK(&hash_lock);
502         }
503
504         /*
505          * Undefer and handle pending signals, yielding if necessary:
506          */
507         _thread_kern_sig_undefer();
508 }
509