kernel - Uninline crit_exit()
[dragonfly.git] / sys / sys / thread2.h
CommitLineData
f1d1c3fa
MD
1/*
2 * SYS/THREAD2.H
3 *
02d8a449 4 * Implements inline procedure support for the LWKT subsystem.
f1d1c3fa 5 *
02d8a449
MD
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
9 * this cpu.
f1d1c3fa 10 *
26be20a0 11 * $DragonFly: src/sys/sys/thread2.h,v 1.28 2006/12/23 00:27:03 swildner Exp $
f1d1c3fa
MD
12 */
13
14#ifndef _SYS_THREAD2_H_
15#define _SYS_THREAD2_H_
16
03d6a592
MD
17#ifndef _KERNEL
18
19#error "This file should not be included by userland programs."
20
21#else
22
f1d1c3fa 23/*
05220613
MD
24 * Userland will have its own globaldata which it includes prior to this.
25 */
03d6a592
MD
26#ifndef _SYS_SYSTM_H_
27#include <sys/systm.h>
28#endif
05220613
MD
29#ifndef _SYS_GLOBALDATA_H_
30#include <sys/globaldata.h>
31#endif
853ae338
MD
32#ifndef _MACHINE_CPUFUNC_H_
33#include <machine/cpufunc.h>
34#endif
05220613
MD
35
36/*
02d8a449
MD
37 * Critical section debugging
38 */
39#ifdef DEBUG_CRIT_SECTIONS
40#define __DEBUG_CRIT_ARG__ const char *id
41#define __DEBUG_CRIT_ADD_ARG__ , const char *id
42#define __DEBUG_CRIT_PASS_ARG__ , id
43#define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id)
44#define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id)
4a28fe22
MD
45#define crit_enter() _crit_enter(mycpu, __FUNCTION__)
46#define crit_enter_id(id) _crit_enter(mycpu, id)
47#define crit_enter_gd(curgd) _crit_enter((curgd), __FUNCTION__)
02d8a449 48#define crit_enter_quick(curtd) _crit_enter_quick((curtd), __FUNCTION__)
4a28fe22
MD
49#define crit_enter_hard() _crit_enter_hard(mycpu, __FUNCTION__)
50#define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __FUNCTION__)
51#define crit_exit() _crit_exit(mycpu, __FUNCTION__)
52#define crit_exit_id(id) _crit_exit(mycpu, id)
53#define crit_exit_gd(curgd) _crit_exit((curgd), __FUNCTION__)
02d8a449 54#define crit_exit_quick(curtd) _crit_exit_quick((curtd), __FUNCTION__)
4a28fe22
MD
55#define crit_exit_hard() _crit_exit_hard(mycpu, __FUNCTION__)
56#define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __FUNCTION__)
02d8a449 57#define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__FUNCTION__)
02d8a449
MD
58#else
59#define __DEBUG_CRIT_ARG__ void
60#define __DEBUG_CRIT_ADD_ARG__
61#define __DEBUG_CRIT_PASS_ARG__
62#define __DEBUG_CRIT_ENTER(td)
63#define __DEBUG_CRIT_EXIT(td)
4a28fe22
MD
64#define crit_enter() _crit_enter(mycpu)
65#define crit_enter_id(id) _crit_enter(mycpu)
66#define crit_enter_gd(curgd) _crit_enter((curgd))
67#define crit_enter_quick(curtd) _crit_enter_quick((curtd))
68#define crit_enter_hard() _crit_enter_hard(mycpu)
69#define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd))
e4db4f52 70#define crit_exit() crit_exit_wrapper()
4a28fe22
MD
71#define crit_exit_id(id) _crit_exit(mycpu)
72#define crit_exit_gd(curgd) _crit_exit((curgd))
73#define crit_exit_quick(curtd) _crit_exit_quick((curtd))
74#define crit_exit_hard() _crit_exit_hard(mycpu)
75#define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd))
76#define crit_exit_noyield(curtd) _crit_exit_noyield((curtd))
02d8a449
MD
77#endif
78
b6468f56
MD
79extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
80
02d8a449
MD
81/*
82 * Track crit_enter()/crit_exit() pairs and warn on mismatches.
83 */
84#ifdef DEBUG_CRIT_SECTIONS
85
02d8a449
MD
86static __inline void
87_debug_crit_enter(thread_t td, const char *id)
88{
89 int wi = td->td_crit_debug_index;
90
91 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
92 ++td->td_crit_debug_index;
93}
94
95static __inline void
96_debug_crit_exit(thread_t td, const char *id)
97{
98 const char *gid;
99 int wi;
100
101 wi = td->td_crit_debug_index - 1;
102 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
103 if (td->td_in_crit_report == 0) {
104 td->td_in_crit_report = 1;
26be20a0 105 kprintf("crit_exit(%s) expected id %s\n", id, gid);
02d8a449
MD
106 td->td_in_crit_report = 0;
107 }
108 }
109 --td->td_crit_debug_index;
110}
111
112#endif
113
114/*
4a28fe22
MD
115 * Critical sections prevent preemption, but allowing explicit blocking
116 * and thread switching. Any interrupt occuring while in a critical
117 * section is made pending and returns immediately. Interrupts are not
118 * physically disabled.
f1d1c3fa 119 *
4a28fe22
MD
120 * Hard critical sections prevent preemption and disallow any blocking
121 * or thread switching, and in addition will assert on any blockable
122 * operation (acquire token not already held, lockmgr, mutex ops, or
123 * splz). Spinlocks can still be used in hard sections.
7966cb69 124 *
4a28fe22
MD
125 * All critical section routines only operate on the current thread.
126 * Passed gd or td arguments are simply optimizations when mycpu or
127 * curthread is already available to the caller.
f1d1c3fa 128 */
57c254db 129
4a28fe22
MD
130/*
131 * crit_enter
132 */
f1d1c3fa 133static __inline void
4a28fe22 134_crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
f1d1c3fa 135{
f9235b6d 136 ++td->td_critcount;
02d8a449 137 __DEBUG_CRIT_ENTER(td);
9a35dbc3 138 cpu_ccfence();
f1d1c3fa
MD
139}
140
141static __inline void
4a28fe22 142_crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
f1d1c3fa 143{
4a28fe22 144 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
7966cb69 145}
f1d1c3fa 146
7966cb69 147static __inline void
4a28fe22 148_crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
37af14fe 149{
4a28fe22
MD
150 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
151 ++gd->gd_intr_nesting_level;
37af14fe
MD
152}
153
f1d1c3fa 154
4a28fe22
MD
155/*
156 * crit_exit*()
157 *
158 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
159 * never true regardless of crit_count, should result in 100%
160 * optimal code execution. We don't check crit_count because
161 * it just bloats the inline and does not improve performance.
e4db4f52
MD
162 *
163 * NOTE: This can produce a considerable amount of code despite the
164 * relatively few lines of code so the non-debug case typically
165 * just wraps it in a real function, crit_exit_wrapper().
4a28fe22 166 */
f1d1c3fa 167static __inline void
4a28fe22 168_crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
f1d1c3fa 169{
02d8a449 170 __DEBUG_CRIT_EXIT(td);
f9235b6d 171 --td->td_critcount;
7966cb69 172#ifdef INVARIANTS
4a28fe22 173 if (__predict_false(td->td_critcount < 0))
26a0694b 174 crit_panic();
7966cb69 175#endif
35238fa5 176 cpu_ccfence(); /* prevent compiler reordering */
7966cb69
MD
177}
178
179static __inline void
4a28fe22 180_crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
7966cb69 181{
4a28fe22
MD
182 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
183 if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
184 lwkt_maybe_splz(td);
185}
853ae338 186
4a28fe22
MD
187static __inline void
188_crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
189{
190 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
f1d1c3fa
MD
191}
192
37af14fe 193static __inline void
4a28fe22 194_crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
37af14fe 195{
4a28fe22
MD
196 --gd->gd_intr_nesting_level;
197 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
37af14fe
MD
198}
199
4b5f931b 200static __inline int
654a39f0
MD
201crit_test(thread_t td)
202{
f9235b6d 203 return(td->td_critcount);
654a39f0
MD
204}
205
41a01a4d 206/*
96728c05
MD
207 * Return whether any threads are runnable, whether they meet mp_lock
208 * requirements or not.
209 */
f1d1c3fa 210static __inline int
4b5f931b 211lwkt_runnable(void)
f1d1c3fa 212{
f9235b6d 213 return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
f1d1c3fa
MD
214}
215
234d4a62
MD
216static __inline int
217lwkt_getpri(thread_t td)
218{
f9235b6d 219 return(td->td_pri);
234d4a62
MD
220}
221
222static __inline int
223lwkt_getpri_self(void)
224{
225 return(lwkt_getpri(curthread));
226}
227
3824f392
MD
228/*
229 * Reduce our priority in preparation for a return to userland. If
230 * our passive release function was still in place, our priority was
231 * never raised and does not need to be reduced.
232 *
233 * See also lwkt_passive_release() and platform/blah/trap.c
234 */
235static __inline void
236lwkt_passive_recover(thread_t td)
237{
238 if (td->td_release == NULL)
239 lwkt_setpri_self(TDPRI_USER_NORM);
240 td->td_release = NULL;
241}
242
b8a98473
MD
243#ifdef SMP
244
245/*
246 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments:
247 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
248 * the trap frame is not known). However, we wish to provide opaque
249 * interfaces for simpler callbacks... the basic IPI messaging function as
250 * used by the kernel takes a single argument.
251 */
252static __inline int
253lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
254{
255 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
256}
257
258static __inline int
fc17ad60 259lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
b8a98473
MD
260{
261 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
262}
263
264static __inline int
265lwkt_send_ipiq_mask(u_int32_t mask, ipifunc1_t func, void *arg)
266{
267 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
268}
269
270static __inline int
fc17ad60 271lwkt_send_ipiq2_mask(u_int32_t mask, ipifunc2_t func, void *arg1, int arg2)
b8a98473
MD
272{
273 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
274}
275
276static __inline int
277lwkt_send_ipiq_nowait(globaldata_t target, ipifunc1_t func, void *arg)
278{
279 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg, 0));
280}
281
282static __inline int
fc17ad60 283lwkt_send_ipiq2_nowait(globaldata_t target, ipifunc2_t func,
b8a98473
MD
284 void *arg1, int arg2)
285{
286 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg1, arg2));
287}
288
289static __inline int
290lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
291{
292 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
293}
294
295static __inline int
fc17ad60 296lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
b8a98473
MD
297 void *arg1, int arg2)
298{
299 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
300}
301
302static __inline int
303lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
304{
305 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
306}
307
308static __inline int
309lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
310{
311 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
312}
313
03d6a592
MD
314#endif /* SMP */
315#endif /* _KERNEL */
316#endif /* _SYS_THREAD2_H_ */
f1d1c3fa 317