Change sendfile() to use the new m_ext callback scheme for cleaning up after
[dragonfly.git] / sys / sys / thread2.h
... / ...
CommitLineData
1/*
2 * SYS/THREAD2.H
3 *
4 * Implements inline procedure support for the LWKT subsystem.
5 *
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
9 * this cpu.
10 *
11 * $DragonFly: src/sys/sys/thread2.h,v 1.16 2004/07/29 08:55:48 dillon Exp $
12 */
13
14#ifndef _SYS_THREAD2_H_
15#define _SYS_THREAD2_H_
16
17/*
18 * Userland will have its own globaldata which it includes prior to this.
19 */
20#if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
21#ifndef _SYS_GLOBALDATA_H_
22#include <sys/globaldata.h>
23#endif
24#ifndef _MACHINE_CPUFUNC_H_
25#include <machine/cpufunc.h>
26#endif
27#endif
28
29/*
30 * Critical sections prevent preemption by raising a thread's priority
31 * above the highest possible interrupting priority. Additionally, the
32 * current cpu will not be able to schedule a new thread but will instead
33 * place it on a pending list (with interrupts physically disabled) and
34 * set mycpu->gd_reqflags to indicate that work needs to be done, which
35 * lwkt_yield_quick() takes care of.
36 *
37 * Some of these routines take a struct thread pointer as an argument. This
38 * pointer MUST be curthread and is only passed as an optimization.
39 *
40 * Synchronous switching and blocking is allowed while in a critical section.
41 */
42
43static __inline void
44crit_enter(void)
45{
46 struct thread *td = curthread;
47
48 td->td_pri += TDPRI_CRIT;
49#ifdef INVARIANTS
50 if (td->td_pri < 0)
51 crit_panic();
52#endif
53}
54
55static __inline void
56crit_enter_quick(struct thread *curtd)
57{
58 curtd->td_pri += TDPRI_CRIT;
59}
60
61static __inline void
62crit_enter_gd(globaldata_t mygd)
63{
64 crit_enter_quick(mygd->gd_curthread);
65}
66
67static __inline void
68crit_exit_noyield(struct thread *curtd)
69{
70 curtd->td_pri -= TDPRI_CRIT;
71#ifdef INVARIANTS
72 if (curtd->td_pri < 0)
73 crit_panic();
74#endif
75}
76
77static __inline void
78crit_exit(void)
79{
80 thread_t td = curthread;
81
82 td->td_pri -= TDPRI_CRIT;
83#ifdef INVARIANTS
84 if (td->td_pri < 0)
85 crit_panic();
86#endif
87 cpu_mb1(); /* must flush td_pri before checking gd_reqflags */
88 if (td->td_gd->gd_reqflags && td->td_pri < TDPRI_CRIT)
89 lwkt_yield_quick();
90}
91
92static __inline void
93crit_exit_quick(struct thread *curtd)
94{
95 globaldata_t gd = curtd->td_gd;
96
97 curtd->td_pri -= TDPRI_CRIT;
98 cpu_mb1(); /* must flush td_pri before checking gd_reqflags */
99 if (gd->gd_reqflags && curtd->td_pri < TDPRI_CRIT)
100 lwkt_yield_quick();
101}
102
103static __inline void
104crit_exit_gd(globaldata_t mygd)
105{
106 crit_exit_quick(mygd->gd_curthread);
107}
108
109static __inline int
110crit_panic_save(void)
111{
112 thread_t td = curthread;
113 int pri = td->td_pri;
114 td->td_pri = td->td_pri & TDPRI_MASK;
115 return(pri);
116}
117
118static __inline void
119crit_panic_restore(int cpri)
120{
121 curthread->td_pri = cpri;
122}
123
124/*
125 * Initialize a tokref_t. We only need to initialize the token pointer
126 * and the magic number. We do not have to initialize tr_next, tr_gdreqnext,
127 * or tr_reqgd.
128 */
129static __inline void
130lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok)
131{
132 ref->tr_magic = LWKT_TOKREF_MAGIC1;
133 ref->tr_tok = tok;
134}
135
136/*
137 * Return whether any threads are runnable, whether they meet mp_lock
138 * requirements or not.
139 */
140static __inline int
141lwkt_runnable(void)
142{
143 return (mycpu->gd_runqmask != 0);
144}
145
146static __inline int
147lwkt_getpri(thread_t td)
148{
149 return(td->td_pri & TDPRI_MASK);
150}
151
152static __inline int
153lwkt_getpri_self(void)
154{
155 return(lwkt_getpri(curthread));
156}
157
158#endif
159