vkernel - Unbreak 32-bit vkernel builds by fixing broken assertion
[dragonfly.git] / sys / sys / thread2.h
CommitLineData
f1d1c3fa
MD
1/*
2 * SYS/THREAD2.H
3 *
02d8a449 4 * Implements inline procedure support for the LWKT subsystem.
f1d1c3fa 5 *
02d8a449
MD
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
9 * this cpu.
f1d1c3fa
MD
10 */
11
12#ifndef _SYS_THREAD2_H_
13#define _SYS_THREAD2_H_
14
03d6a592
MD
15#ifndef _KERNEL
16
17#error "This file should not be included by userland programs."
18
19#else
20
f1d1c3fa 21/*
05220613
MD
22 * Userland will have its own globaldata which it includes prior to this.
23 */
03d6a592
MD
24#ifndef _SYS_SYSTM_H_
25#include <sys/systm.h>
26#endif
05220613
MD
27#ifndef _SYS_GLOBALDATA_H_
28#include <sys/globaldata.h>
29#endif
853ae338 30#include <machine/cpufunc.h>
05220613
MD
31
32/*
01672f8c
MD
33 * Is a token held either by the specified thread or held shared?
34 *
35 * We can't inexpensively validate the thread for a shared token
36 * without iterating td->td_toks, so this isn't a perfect test.
37 */
38static __inline int
39_lwkt_token_held_any(lwkt_token_t tok, thread_t td)
40{
41 long count = tok->t_count;
42
43 cpu_ccfence();
44 if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop)
45 return TRUE;
46 if ((count & TOK_EXCLUSIVE) == 0 &&
47 (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) {
48 return TRUE;
49 }
50 return FALSE;
51}
52
53/*
b5d16701
MD
54 * Is a token held by the specified thread?
55 */
56static __inline int
01672f8c 57_lwkt_token_held_excl(lwkt_token_t tok, thread_t td)
b5d16701 58{
01672f8c 59 return ((tok->t_ref >= &td->td_toks_base &&
54341a3b 60 tok->t_ref < td->td_toks_stop));
b5d16701
MD
61}
62
63/*
02d8a449
MD
64 * Critical section debugging
65 */
66#ifdef DEBUG_CRIT_SECTIONS
67#define __DEBUG_CRIT_ARG__ const char *id
68#define __DEBUG_CRIT_ADD_ARG__ , const char *id
69#define __DEBUG_CRIT_PASS_ARG__ , id
70#define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id)
71#define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id)
4a28fe22
MD
72#define crit_enter() _crit_enter(mycpu, __FUNCTION__)
73#define crit_enter_id(id) _crit_enter(mycpu, id)
74#define crit_enter_gd(curgd) _crit_enter((curgd), __FUNCTION__)
02d8a449 75#define crit_enter_quick(curtd) _crit_enter_quick((curtd), __FUNCTION__)
4a28fe22
MD
76#define crit_enter_hard() _crit_enter_hard(mycpu, __FUNCTION__)
77#define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __FUNCTION__)
78#define crit_exit() _crit_exit(mycpu, __FUNCTION__)
79#define crit_exit_id(id) _crit_exit(mycpu, id)
80#define crit_exit_gd(curgd) _crit_exit((curgd), __FUNCTION__)
02d8a449 81#define crit_exit_quick(curtd) _crit_exit_quick((curtd), __FUNCTION__)
4a28fe22
MD
82#define crit_exit_hard() _crit_exit_hard(mycpu, __FUNCTION__)
83#define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __FUNCTION__)
02d8a449 84#define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__FUNCTION__)
02d8a449
MD
85#else
86#define __DEBUG_CRIT_ARG__ void
87#define __DEBUG_CRIT_ADD_ARG__
88#define __DEBUG_CRIT_PASS_ARG__
89#define __DEBUG_CRIT_ENTER(td)
90#define __DEBUG_CRIT_EXIT(td)
4a28fe22
MD
91#define crit_enter() _crit_enter(mycpu)
92#define crit_enter_id(id) _crit_enter(mycpu)
93#define crit_enter_gd(curgd) _crit_enter((curgd))
94#define crit_enter_quick(curtd) _crit_enter_quick((curtd))
95#define crit_enter_hard() _crit_enter_hard(mycpu)
96#define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd))
e4db4f52 97#define crit_exit() crit_exit_wrapper()
4a28fe22
MD
98#define crit_exit_id(id) _crit_exit(mycpu)
99#define crit_exit_gd(curgd) _crit_exit((curgd))
100#define crit_exit_quick(curtd) _crit_exit_quick((curtd))
101#define crit_exit_hard() _crit_exit_hard(mycpu)
102#define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd))
103#define crit_exit_noyield(curtd) _crit_exit_noyield((curtd))
02d8a449
MD
104#endif
105
b6468f56
MD
106extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
107
02d8a449
MD
108/*
109 * Track crit_enter()/crit_exit() pairs and warn on mismatches.
110 */
111#ifdef DEBUG_CRIT_SECTIONS
112
02d8a449
MD
113static __inline void
114_debug_crit_enter(thread_t td, const char *id)
115{
116 int wi = td->td_crit_debug_index;
117
118 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
119 ++td->td_crit_debug_index;
120}
121
122static __inline void
123_debug_crit_exit(thread_t td, const char *id)
124{
125 const char *gid;
126 int wi;
127
128 wi = td->td_crit_debug_index - 1;
129 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
130 if (td->td_in_crit_report == 0) {
131 td->td_in_crit_report = 1;
26be20a0 132 kprintf("crit_exit(%s) expected id %s\n", id, gid);
02d8a449
MD
133 td->td_in_crit_report = 0;
134 }
135 }
136 --td->td_crit_debug_index;
137}
138
139#endif
140
141/*
4a28fe22
MD
142 * Critical sections prevent preemption, but allowing explicit blocking
143 * and thread switching. Any interrupt occuring while in a critical
144 * section is made pending and returns immediately. Interrupts are not
145 * physically disabled.
f1d1c3fa 146 *
4a28fe22
MD
147 * Hard critical sections prevent preemption and disallow any blocking
148 * or thread switching, and in addition will assert on any blockable
149 * operation (acquire token not already held, lockmgr, mutex ops, or
150 * splz). Spinlocks can still be used in hard sections.
7966cb69 151 *
4a28fe22
MD
152 * All critical section routines only operate on the current thread.
153 * Passed gd or td arguments are simply optimizations when mycpu or
154 * curthread is already available to the caller.
f1d1c3fa 155 */
57c254db 156
4a28fe22
MD
157/*
158 * crit_enter
159 */
f1d1c3fa 160static __inline void
4a28fe22 161_crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
f1d1c3fa 162{
f9235b6d 163 ++td->td_critcount;
02d8a449 164 __DEBUG_CRIT_ENTER(td);
9a35dbc3 165 cpu_ccfence();
f1d1c3fa
MD
166}
167
168static __inline void
4a28fe22 169_crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
f1d1c3fa 170{
4a28fe22 171 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
7966cb69 172}
f1d1c3fa 173
7966cb69 174static __inline void
4a28fe22 175_crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
37af14fe 176{
4a28fe22
MD
177 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
178 ++gd->gd_intr_nesting_level;
37af14fe
MD
179}
180
f1d1c3fa 181
4a28fe22
MD
182/*
183 * crit_exit*()
184 *
185 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
186 * never true regardless of crit_count, should result in 100%
187 * optimal code execution. We don't check crit_count because
188 * it just bloats the inline and does not improve performance.
e4db4f52
MD
189 *
190 * NOTE: This can produce a considerable amount of code despite the
191 * relatively few lines of code so the non-debug case typically
192 * just wraps it in a real function, crit_exit_wrapper().
4a28fe22 193 */
f1d1c3fa 194static __inline void
4a28fe22 195_crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
f1d1c3fa 196{
02d8a449 197 __DEBUG_CRIT_EXIT(td);
f9235b6d 198 --td->td_critcount;
7966cb69 199#ifdef INVARIANTS
4a28fe22 200 if (__predict_false(td->td_critcount < 0))
26a0694b 201 crit_panic();
7966cb69 202#endif
35238fa5 203 cpu_ccfence(); /* prevent compiler reordering */
7966cb69
MD
204}
205
206static __inline void
4a28fe22 207_crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
7966cb69 208{
4a28fe22
MD
209 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
210 if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
211 lwkt_maybe_splz(td);
212}
853ae338 213
4a28fe22
MD
214static __inline void
215_crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
216{
217 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
f1d1c3fa
MD
218}
219
37af14fe 220static __inline void
4a28fe22 221_crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
37af14fe 222{
4a28fe22
MD
223 --gd->gd_intr_nesting_level;
224 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
37af14fe
MD
225}
226
4b5f931b 227static __inline int
654a39f0
MD
228crit_test(thread_t td)
229{
f9235b6d 230 return(td->td_critcount);
654a39f0
MD
231}
232
41a01a4d 233/*
b5d16701 234 * Return whether any threads are runnable.
96728c05 235 */
f1d1c3fa 236static __inline int
4b5f931b 237lwkt_runnable(void)
f1d1c3fa 238{
f9235b6d 239 return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
f1d1c3fa
MD
240}
241
234d4a62
MD
242static __inline int
243lwkt_getpri(thread_t td)
244{
f9235b6d 245 return(td->td_pri);
234d4a62
MD
246}
247
248static __inline int
249lwkt_getpri_self(void)
250{
251 return(lwkt_getpri(curthread));
252}
253
3824f392
MD
254/*
255 * Reduce our priority in preparation for a return to userland. If
256 * our passive release function was still in place, our priority was
257 * never raised and does not need to be reduced.
258 *
259 * See also lwkt_passive_release() and platform/blah/trap.c
260 */
261static __inline void
262lwkt_passive_recover(thread_t td)
263{
264 if (td->td_release == NULL)
265 lwkt_setpri_self(TDPRI_USER_NORM);
266 td->td_release = NULL;
267}
268
d5b2d319
MD
269/*
270 * cpusync support
271 */
272static __inline void
273lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask,
274 cpusync_func_t func, void *data)
275{
276 cs->cs_mask = mask;
277 /* cs->cs_mack = 0; handled by _interlock */
278 cs->cs_func = func;
279 cs->cs_data = data;
280}
281
b8a98473
MD
282#ifdef SMP
283
284/*
285 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments:
286 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
287 * the trap frame is not known). However, we wish to provide opaque
288 * interfaces for simpler callbacks... the basic IPI messaging function as
289 * used by the kernel takes a single argument.
290 */
291static __inline int
292lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
293{
294 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
295}
296
297static __inline int
fc17ad60 298lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
b8a98473
MD
299{
300 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
301}
302
303static __inline int
da23a592 304lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg)
b8a98473
MD
305{
306 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
307}
308
309static __inline int
da23a592 310lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2)
b8a98473
MD
311{
312 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
313}
314
315static __inline int
316lwkt_send_ipiq_nowait(globaldata_t target, ipifunc1_t func, void *arg)
317{
318 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg, 0));
319}
320
321static __inline int
fc17ad60 322lwkt_send_ipiq2_nowait(globaldata_t target, ipifunc2_t func,
b8a98473
MD
323 void *arg1, int arg2)
324{
325 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg1, arg2));
326}
327
328static __inline int
329lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
330{
331 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
332}
333
334static __inline int
fc17ad60 335lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
b8a98473
MD
336 void *arg1, int arg2)
337{
338 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
339}
340
341static __inline int
342lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
343{
344 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
345}
346
347static __inline int
348lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
349{
350 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
351}
352
03d6a592
MD
353#endif /* SMP */
354#endif /* _KERNEL */
355#endif /* _SYS_THREAD2_H_ */
f1d1c3fa 356