2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_file.c,v 1.12.2.3 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_file.c,v 1.3 2004/01/23 11:30:28 joerg Exp $
35 * POSIX stdio FILE locking functions. These assume that the locking
36 * is only required at FILE structure level, not at file descriptor
43 #include <sys/queue.h>
45 #include "pthread_private.h"
48 * Weak symbols for externally visible functions in this file:
50 #pragma weak flockfile=_flockfile
51 #pragma weak ftrylockfile=_ftrylockfile
52 #pragma weak funlockfile=_funlockfile
54 void flockfile(FILE *);
55 int ftrylockfile(FILE *);
56 void funlockfile(FILE *);
59 * The FILE lock structure. The FILE *fp is locked if the owner is
60 * not NULL. If not locked, the file lock structure can be
61 * reassigned to a different file by setting fp.
64 LIST_ENTRY(file_lock) entry; /* Entry if file list. */
65 TAILQ_HEAD(lock_head, pthread)
66 l_head; /* Head of queue for threads */
67 /* waiting on this lock. */
68 FILE *fp; /* The target file. */
69 struct pthread *owner; /* Thread that owns lock. */
70 int count; /* Lock count for owner. */
74 * The number of file lock lists into which the file pointer is
75 * hashed. Ideally, the FILE structure size would have been increased,
76 * but this causes incompatibility, so separate data structures are
82 * This macro casts a file pointer to a long integer and right
83 * shifts this by the number of bytes in a pointer. The shifted
84 * value is then remaindered using the maximum number of hash
85 * entries to produce and index into the array of static lock
86 * structures. If there is a collision, a linear search of the
87 * dynamic list of locks linked to each static lock is perfomed.
89 #define file_idx(_p) ((((u_long) _p) >> sizeof(void *)) % NUM_HEADS)
92 * Global array of file locks. The first lock for each hash bucket is
93 * allocated statically in the hope that there won't be too many
94 * collisions that require a malloc and an element added to the list.
96 struct static_file_lock {
97 LIST_HEAD(file_list_head, file_lock) head;
101 /* Set to non-zero when initialisation is complete: */
102 static int init_done = 0;
104 /* Lock for accesses to the hash table: */
105 static spinlock_t hash_lock = _SPINLOCK_INITIALIZER;
108 * Find a lock structure for a FILE, return NULL if the file is
113 find_lock(int idx, FILE *fp)
117 /* Check if the file is locked using the static structure: */
118 if (flh[idx].fl.fp == fp && flh[idx].fl.owner != NULL)
119 /* Return a pointer to the static lock: */
122 /* Point to the first dynamic lock: */
123 p = flh[idx].head.lh_first;
126 * Loop through the dynamic locks looking for the
129 while (p != NULL && (p->fp != fp || p->owner == NULL))
130 /* Not this file, try the next: */
131 p = p->entry.le_next;
137 * Lock a file, assuming that there is no lock structure currently
142 do_lock(int idx, FILE *fp)
144 struct pthread *curthread = _get_curthread();
147 /* Check if the static structure is not being used: */
148 if (flh[idx].fl.owner == NULL) {
149 /* Return a pointer to the static lock: */
153 /* Point to the first dynamic lock: */
154 p = flh[idx].head.lh_first;
157 * Loop through the dynamic locks looking for a
158 * lock structure that is not being used:
160 while (p != NULL && p->owner != NULL)
161 /* This one is used, try the next: */
162 p = p->entry.le_next;
166 * If an existing lock structure has not been found,
167 * allocate memory for a new one:
169 if (p == NULL && (p = (struct file_lock *)
170 malloc(sizeof(struct file_lock))) != NULL) {
171 /* Add the new element to the list: */
172 LIST_INSERT_HEAD(&flh[idx].head, p, entry);
175 /* Check if there is a lock structure to acquire: */
177 /* Acquire the lock for the running thread: */
179 p->owner = curthread;
181 TAILQ_INIT(&p->l_head);
187 _flockfile_debug(FILE * fp, char *fname, int lineno)
189 struct pthread *curthread = _get_curthread();
190 int idx = file_idx(fp);
193 /* Check if this is a real file: */
194 if (fp->_file >= 0) {
195 /* Lock the hash table: */
196 _SPINLOCK(&hash_lock);
198 /* Check if the static array has not been initialised: */
200 /* Initialise the global array: */
201 memset(flh,0,sizeof(flh));
203 /* Flag the initialisation as complete: */
207 /* Get a pointer to any existing lock for the file: */
208 if ((p = find_lock(idx, fp)) == NULL) {
210 * The file is not locked, so this thread can
213 p = do_lock(idx, fp);
215 /* Unlock the hash table: */
216 _SPINUNLOCK(&hash_lock);
219 * The file is already locked, so check if the
220 * running thread is the owner:
222 } else if (p->owner == curthread) {
224 * The running thread is already the
225 * owner, so increment the count of
226 * the number of times it has locked
231 /* Unlock the hash table: */
232 _SPINUNLOCK(&hash_lock);
234 /* Clear the interrupted flag: */
235 curthread->interrupted = 0;
238 * Prevent being context switched out while
239 * adding this thread to the file lock queue.
241 _thread_kern_sig_defer();
244 * The file is locked for another thread.
245 * Append this thread to the queue of
246 * threads waiting on the lock.
248 TAILQ_INSERT_TAIL(&p->l_head,curthread,qe);
249 curthread->flags |= PTHREAD_FLAGS_IN_FILEQ;
251 /* Unlock the hash table: */
252 _SPINUNLOCK(&hash_lock);
254 curthread->data.fp = fp;
256 /* Wait on the FILE lock: */
257 _thread_kern_sched_state(PS_FILE_WAIT, fname, lineno);
259 if ((curthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) {
260 TAILQ_REMOVE(&p->l_head,curthread,qe);
261 curthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
264 _thread_kern_sig_undefer();
266 if (curthread->interrupted != 0 &&
267 curthread->continuation != NULL)
268 curthread->continuation((void *)curthread);
274 _flockfile(FILE * fp)
276 _flockfile_debug(fp, __FILE__, __LINE__);
280 _ftrylockfile(FILE * fp)
282 struct pthread *curthread = _get_curthread();
284 int idx = file_idx(fp);
287 /* Check if this is a real file: */
288 if (fp->_file >= 0) {
289 /* Lock the hash table: */
290 _SPINLOCK(&hash_lock);
292 /* Get a pointer to any existing lock for the file: */
293 if ((p = find_lock(idx, fp)) == NULL) {
295 * The file is not locked, so this thread can
298 p = do_lock(idx, fp);
301 * The file is already locked, so check if the
302 * running thread is the owner:
304 } else if (p->owner == curthread) {
306 * The running thread is already the
307 * owner, so increment the count of
308 * the number of times it has locked
314 * The file is locked for another thread,
320 /* Check if the lock was obtained: */
322 /* Return success: */
325 /* Unlock the hash table: */
326 _SPINUNLOCK(&hash_lock);
333 _funlockfile(FILE * fp)
335 struct pthread *curthread = _get_curthread();
336 int idx = file_idx(fp);
339 /* Check if this is a real file: */
340 if (fp->_file >= 0) {
342 * Defer signals to protect the scheduling queues from
343 * access by the signal handler:
345 _thread_kern_sig_defer();
347 /* Lock the hash table: */
348 _SPINLOCK(&hash_lock);
351 * Get a pointer to the lock for the file and check that
352 * the running thread is the one with the lock:
354 if ((p = find_lock(idx, fp)) != NULL &&
355 p->owner == curthread) {
357 * Check if this thread has locked the FILE
362 * Decrement the count of the number of
363 * times the running thread has locked this
369 * The running thread will release the
374 /* Get the new owner of the lock: */
375 while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
376 /* Pop the thread off the queue: */
377 TAILQ_REMOVE(&p->l_head,p->owner,qe);
378 p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
380 if (p->owner->interrupted == 0) {
382 * This is the first lock for
387 /* Allow the new owner to run: */
388 PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
390 /* End the loop when we find a
391 * thread that hasn't been
392 * cancelled or interrupted;
400 /* Unlock the hash table: */
401 _SPINUNLOCK(&hash_lock);
404 * Undefer and handle pending signals, yielding if
407 _thread_kern_sig_undefer();
412 _funlock_owned(struct pthread *pthread)
415 struct file_lock *p, *next_p;
418 * Defer signals to protect the scheduling queues from
419 * access by the signal handler:
421 _thread_kern_sig_defer();
423 /* Lock the hash table: */
424 _SPINLOCK(&hash_lock);
426 for (idx = 0; idx < NUM_HEADS; idx++) {
427 /* Check the static file lock first: */
429 next_p = LIST_FIRST(&flh[idx].head);
432 if (p->owner == pthread) {
434 * The running thread will release the
439 /* Get the new owner of the lock: */
440 while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
441 /* Pop the thread off the queue: */
442 TAILQ_REMOVE(&p->l_head,p->owner,qe);
443 p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
445 if (p->owner->interrupted == 0) {
447 * This is the first lock for
452 /* Allow the new owner to run: */
453 PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
455 /* End the loop when we find a
456 * thread that hasn't been
457 * cancelled or interrupted;
465 next_p = LIST_NEXT(next_p, entry);
469 /* Unlock the hash table: */
470 _SPINUNLOCK(&hash_lock);
473 * Undefer and handle pending signals, yielding if
476 _thread_kern_sig_undefer();
480 _flockfile_backout(struct pthread *pthread)
482 int idx = file_idx(pthread->data.fp);
486 * Defer signals to protect the scheduling queues from
487 * access by the signal handler:
489 _thread_kern_sig_defer();
492 * Get a pointer to the lock for the file and check that
493 * the running thread is the one with the lock:
495 if (((pthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) &&
496 ((p = find_lock(idx, pthread->data.fp)) != NULL)) {
497 /* Lock the hash table: */
498 _SPINLOCK(&hash_lock);
500 /* Remove the thread from the queue: */
501 TAILQ_REMOVE(&p->l_head, pthread, qe);
502 pthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
504 /* Unlock the hash table: */
505 _SPINUNLOCK(&hash_lock);
509 * Undefer and handle pending signals, yielding if necessary:
511 _thread_kern_sig_undefer();