4 * Implements inline procedure support for the LWKT subsystem.
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
11 * $DragonFly: src/sys/sys/thread2.h,v 1.23 2005/07/26 20:53:55 dillon Exp $
14 #ifndef _SYS_THREAD2_H_
15 #define _SYS_THREAD2_H_
18 * Userland will have its own globaldata which it includes prior to this.
20 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
21 #ifndef _SYS_GLOBALDATA_H_
22 #include <sys/globaldata.h>
24 #ifndef _MACHINE_CPUFUNC_H_
25 #include <machine/cpufunc.h>
30 * Critical section debugging
32 #ifdef DEBUG_CRIT_SECTIONS
33 #define __DEBUG_CRIT_ARG__ const char *id
34 #define __DEBUG_CRIT_ADD_ARG__ , const char *id
35 #define __DEBUG_CRIT_PASS_ARG__ , id
36 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id)
37 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id)
38 #define crit_enter() _crit_enter(__FUNCTION__)
39 #define crit_enter_id(id) _crit_enter(id)
40 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __FUNCTION__)
41 #define crit_enter_gd(curgd) _crit_enter_gd(curgd, __FUNCTION__)
42 #define crit_exit() _crit_exit(__FUNCTION__)
43 #define crit_exit_id(id) _crit_exit(id)
44 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __FUNCTION__)
45 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__FUNCTION__)
46 #define crit_exit_gd(curgd) _crit_exit_gd((curgd), __FUNCTION__)
48 #define __DEBUG_CRIT_ARG__ void
49 #define __DEBUG_CRIT_ADD_ARG__
50 #define __DEBUG_CRIT_PASS_ARG__
51 #define __DEBUG_CRIT_ENTER(td)
52 #define __DEBUG_CRIT_EXIT(td)
53 #define crit_enter() _crit_enter()
54 #define crit_enter_id(id) _crit_enter()
55 #define crit_enter_quick(curtd) _crit_enter_quick(curtd)
56 #define crit_enter_gd(curgd) _crit_enter_gd(curgd)
57 #define crit_exit() _crit_exit()
58 #define crit_exit_id(id) _crit_exit()
59 #define crit_exit_quick(curtd) _crit_exit_quick(curtd)
60 #define crit_exit_noyield(curtd) _crit_exit_noyield(curtd)
61 #define crit_exit_gd(curgd) _crit_exit_gd(curgd)
65 * Track crit_enter()/crit_exit() pairs and warn on mismatches.
67 #ifdef DEBUG_CRIT_SECTIONS
69 #include <sys/systm.h>
72 _debug_crit_enter(thread_t td, const char *id)
74 int wi = td->td_crit_debug_index;
76 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
77 ++td->td_crit_debug_index;
81 _debug_crit_exit(thread_t td, const char *id)
86 wi = td->td_crit_debug_index - 1;
87 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
88 if (td->td_in_crit_report == 0) {
89 td->td_in_crit_report = 1;
90 printf("crit_exit(%s) expected id %s\n", id, gid);
91 td->td_in_crit_report = 0;
94 --td->td_crit_debug_index;
100 * Critical sections prevent preemption by raising a thread's priority
101 * above the highest possible interrupting priority. Additionally, the
102 * current cpu will not be able to schedule a new thread but will instead
103 * place it on a pending list (with interrupts physically disabled) and
104 * set mycpu->gd_reqflags to indicate that work needs to be done, which
105 * lwkt_yield_quick() takes care of.
107 * Some of these routines take a struct thread pointer as an argument. This
108 * pointer MUST be curthread and is only passed as an optimization.
110 * Synchronous switching and blocking is allowed while in a critical section.
114 _crit_enter(__DEBUG_CRIT_ARG__)
116 struct thread *td = curthread;
122 td->td_pri += TDPRI_CRIT;
123 __DEBUG_CRIT_ENTER(td);
128 _crit_enter_quick(struct thread *curtd __DEBUG_CRIT_ADD_ARG__)
130 curtd->td_pri += TDPRI_CRIT;
131 __DEBUG_CRIT_ENTER(curtd);
136 _crit_enter_gd(globaldata_t mygd __DEBUG_CRIT_ADD_ARG__)
138 _crit_enter_quick(mygd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
142 _crit_exit_noyield(struct thread *curtd __DEBUG_CRIT_ADD_ARG__)
144 __DEBUG_CRIT_EXIT(curtd);
145 curtd->td_pri -= TDPRI_CRIT;
147 if (curtd->td_pri < 0)
150 cpu_ccfence(); /* prevent compiler reordering */
154 _crit_exit(__DEBUG_CRIT_ARG__)
156 thread_t td = curthread;
158 __DEBUG_CRIT_EXIT(td);
159 td->td_pri -= TDPRI_CRIT;
164 cpu_ccfence(); /* prevent compiler reordering */
165 if (td->td_gd->gd_reqflags && td->td_pri < TDPRI_CRIT)
170 _crit_exit_quick(struct thread *curtd __DEBUG_CRIT_ADD_ARG__)
172 globaldata_t gd = curtd->td_gd;
174 __DEBUG_CRIT_EXIT(curtd);
175 curtd->td_pri -= TDPRI_CRIT;
176 cpu_ccfence(); /* prevent compiler reordering */
177 if (gd->gd_reqflags && curtd->td_pri < TDPRI_CRIT)
182 _crit_exit_gd(globaldata_t mygd __DEBUG_CRIT_ADD_ARG__)
184 _crit_exit_quick(mygd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
188 crit_test(thread_t td)
190 return(td->td_pri >= TDPRI_CRIT);
194 * Initialize a tokref_t. We only need to initialize the token pointer
195 * and the magic number. We do not have to initialize tr_next, tr_gdreqnext,
199 lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok)
201 ref->tr_magic = LWKT_TOKREF_MAGIC1;
207 * Return whether any threads are runnable, whether they meet mp_lock
208 * requirements or not.
213 return (mycpu->gd_runqmask != 0);
217 lwkt_getpri(thread_t td)
219 return(td->td_pri & TDPRI_MASK);
223 lwkt_getpri_self(void)
225 return(lwkt_getpri(curthread));