4 * Implements the architecture independant portion of the LWKT
12 #include <sys/param.h> /* MAXCOMLEN */
15 #include <sys/queue.h> /* TAILQ_* macros */
17 #ifndef _SYS_MSGPORT_H_
18 #include <sys/msgport.h> /* lwkt_port */
21 #include <sys/time.h> /* struct timeval */
26 #ifndef _SYS_SPINLOCK_H_
27 #include <sys/spinlock.h>
29 #ifndef _SYS_IOSCHED_H_
30 #include <sys/iosched.h>
32 #include <machine/thread.h> /* md_thread */
33 #include <machine/stdint.h>
34 #include <machine/ucontext.h>
52 struct sleepqueue_wchan;
54 typedef struct lwkt_queue *lwkt_queue_t;
55 typedef struct lwkt_token *lwkt_token_t;
56 typedef struct lwkt_tokref *lwkt_tokref_t;
58 typedef struct lwkt_cpu_msg *lwkt_cpu_msg_t;
59 typedef struct lwkt_cpu_port *lwkt_cpu_port_t;
61 typedef struct lwkt_ipiq *lwkt_ipiq_t;
62 typedef struct lwkt_cpusync *lwkt_cpusync_t;
63 typedef struct thread *thread_t;
65 typedef TAILQ_HEAD(lwkt_queue, thread) lwkt_queue;
68 * Differentiation between kernel threads and user threads. Userland
69 * programs which want to access to kernel structures have to define
70 * _KERNEL_STRUCTURES. This is a kinda safety valve to prevent badly
71 * written user programs from getting an LWKT thread that is neither the
72 * kernel nor the user version.
74 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
75 #ifndef _SYS_CPUMASK_H_
76 #include <sys/cpumask.h> /* cpumask_t */
79 #include <machine/frame.h>
86 * Tokens are used to serialize access to information. They are 'soft'
87 * serialization entities that only stay in effect while a thread is
88 * running. If the thread blocks, other threads can run holding the same
89 * token(s). The tokens are reacquired when the original thread resumes.
91 * Tokens guarantee that no deadlock can happen regardless of type or
92 * ordering. However, obtaining the same token first shared, then
93 * stacking exclusive, is not allowed and will panic.
95 * A thread can depend on its serialization remaining intact through a
96 * preemption. An interrupt which attempts to use the same token as the
97 * thread being preempted will reschedule itself for non-preemptive
98 * operation, so the new token code is capable of interlocking against
99 * interrupts as well as other cpus. This means that your token can only
100 * be (temporarily) lost if you *explicitly* block.
102 * Tokens are managed through a helper reference structure, lwkt_tokref. Each
103 * thread has a stack of tokref's to keep track of acquired tokens. Multiple
104 * tokref's may reference the same token.
107 * Acquiring an exclusive token requires acquiring the EXCLUSIVE bit
108 * with count == 0. If the exclusive bit cannot be acquired, EXCLREQ
109 * is set. Once acquired, EXCLREQ is cleared (but could get set by
110 * another thread also trying for an exclusive lock at any time).
113 * Acquiring a shared token requires waiting for the EXCLUSIVE bit
114 * to be cleared and then acquiring a count. A shared lock request
115 * can temporarily acquire a count and then back it out if it is
116 * unable to obtain the EXCLUSIVE bit, allowing fetchadd to be used.
118 * A thread attempting to get a single shared token will defer to
119 * pending exclusive requesters. However, a thread already holding
120 * one or more tokens and trying to get an additional shared token
121 * cannot defer to exclusive requesters because doing so can lead
124 * Multiple exclusive tokens are handled by treating the additional tokens
125 * as a special case of the shared token, incrementing the count value. This
126 * reduces the complexity of the token release code.
130 long t_count; /* Shared/exclreq/exclusive access */
131 struct lwkt_tokref *t_ref; /* Exclusive ref */
132 long t_collisions; /* Collision counter */
133 const char *t_desc; /* Descriptive name */
136 #define TOK_EXCLUSIVE 0x00000001 /* Exclusive lock held */
137 #define TOK_EXCLREQ 0x00000002 /* Exclusive request pending */
138 #define TOK_INCR 4 /* Shared count increment */
139 #define TOK_COUNTMASK (~(long)(TOK_EXCLUSIVE|TOK_EXCLREQ))
142 * Static initialization for a lwkt_token.
144 #define LWKT_TOKEN_INITIALIZER(name) \
153 * Assert that a particular token is held
155 #define LWKT_TOKEN_HELD_ANY(tok) _lwkt_token_held_any(tok, curthread)
156 #define LWKT_TOKEN_HELD_EXCL(tok) _lwkt_token_held_excl(tok, curthread)
158 #define ASSERT_LWKT_TOKEN_HELD(tok) \
159 KKASSERT(LWKT_TOKEN_HELD_ANY(tok))
161 #define ASSERT_LWKT_TOKEN_HELD_EXCL(tok) \
162 KKASSERT(LWKT_TOKEN_HELD_EXCL(tok))
164 #define ASSERT_NO_TOKENS_HELD(td) \
165 KKASSERT((td)->td_toks_stop == &td->td_toks_array[0])
168 lwkt_token_t tr_tok; /* token in question */
169 long tr_count; /* TOK_EXCLUSIVE|TOK_EXCLREQ or 0 */
170 struct thread *tr_owner; /* me */
173 #define MAXCPUFIFO 256 /* power of 2 */
174 #define MAXCPUFIFO_MASK (MAXCPUFIFO - 1)
175 #define LWKT_MAXTOKENS 32 /* max tokens beneficially held by thread */
177 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
179 * Always cast to ipifunc_t when registering an ipi. The actual ipi function
180 * is called with both the data and an interrupt frame, but the ipi function
181 * that is registered might only declare a data argument.
183 typedef void (*ipifunc1_t)(void *arg);
184 typedef void (*ipifunc2_t)(void *arg, int arg2);
185 typedef void (*ipifunc3_t)(void *arg, int arg2, struct intrframe *frame);
188 int ip_rindex; /* only written by target cpu */
189 int ip_xindex; /* written by target, indicates completion */
190 int ip_windex; /* only written by source cpu */
191 int ip_drain; /* drain source limit */
196 char filler[32 - sizeof(int) - sizeof(void *) * 2];
197 } ip_info[MAXCPUFIFO];
201 * CPU Synchronization structure. See lwkt_cpusync_init() and
202 * lwkt_cpusync_interlock() for more information.
204 typedef void (*cpusync_func_t)(void *arg);
206 struct lwkt_cpusync {
207 cpumask_t cs_mask; /* cpus running the sync */
208 cpumask_t cs_mack; /* mask acknowledge */
209 cpusync_func_t cs_func; /* function to execute */
210 void *cs_data; /* function data */
212 #endif /* _KERNEL || _KERNEL_STRUCTURES */
215 * The standard message and queue structure used for communications between
216 * cpus. Messages are typically queued via a machine-specific non-linked
217 * FIFO matrix allowing any cpu to send a message to any other cpu without
221 typedef struct lwkt_cpu_msg {
222 void (*cm_func)(lwkt_cpu_msg_t msg); /* primary dispatch function */
223 int cm_code; /* request code if applicable */
224 int cm_cpu; /* reply to cpu */
225 thread_t cm_originator; /* originating thread for wakeup */
230 * per-thread file descriptor cache
233 int fd; /* descriptor being cached */
235 struct file *fp; /* cached referenced fp */
240 #define NFDCACHE 4 /* max fd's cached by a thread */
243 * Thread structure. Note that ownership of a thread structure is special
244 * cased and there is no 'token'. A thread is always owned by the cpu
245 * represented by td_gd, any manipulation of the thread by some other cpu
246 * must be done through cpu_*msg() functions. e.g. you could request
247 * ownership of a thread that way, or hand a thread off to another cpu.
249 * NOTE: td_ucred is synchronized from the p_ucred on user->kernel syscall,
250 * trap, and AST/signal transitions to provide a stable ucred for
251 * (primarily) system calls. This field will be NULL for pure kernel
257 TAILQ_ENTRY(thread) td_threadq;
258 TAILQ_ENTRY(thread) td_allq;
259 TAILQ_ENTRY(thread) td_sleepq;
260 lwkt_port td_msgport; /* built-in message port for replies */
261 struct lwp *td_lwp; /* (optional) associated lwp */
262 struct proc *td_proc; /* (optional) associated process */
263 struct pcb *td_pcb; /* points to pcb and top of kstack */
264 struct globaldata *td_gd; /* associated with this cpu */
265 const char *td_wmesg; /* string name for blockage */
266 const volatile void *td_wchan; /* waiting on channel */
267 int td_pri; /* 0-31, 31=highest priority (note 1) */
268 int td_critcount; /* critical section priority */
269 u_int td_flags; /* TDF flags */
270 int td_wdomain; /* domain for wchan address (typ 0) */
271 void (*td_preemptable)(struct thread *td, int critcount);
272 void (*td_release)(struct thread *td);
273 char *td_kstack; /* kernel stack */
274 int td_kstack_size; /* size of kernel stack */
275 char *td_sp; /* kernel stack pointer for LWKT restore */
276 thread_t (*td_switch)(struct thread *ntd);
277 __uint64_t td_uticks; /* Statclock hits in user mode (uS) */
278 __uint64_t td_sticks; /* Statclock hits in system mode (uS) */
279 __uint64_t td_iticks; /* Statclock hits processing intr (uS) */
280 int td_locks; /* lockmgr lock debugging */
281 struct plimit *td_limit; /* synchronized from proc->p_limit */
282 int td_refs; /* hold position in gd_tdallq / hold free */
283 int td_nest_count; /* prevent splz nesting */
284 u_int td_contended; /* token contention count */
285 u_int td_mpflags; /* flags can be set by foreign cpus */
286 int td_cscount; /* cpu synchronization master */
287 int td_wakefromcpu; /* who woke me up? */
288 int td_upri; /* user priority (sub-priority under td_pri) */
289 int td_type; /* thread type, TD_TYPE_ */
290 int td_tracker; /* misc use (base value 0), recursion count */
292 int td_unused03[3]; /* for future fields */
293 struct iosched_data td_iosdata; /* Dynamic I/O scheduling data */
294 struct timeval td_start; /* start time for a thread/process */
295 char td_comm[MAXCOMLEN+1]; /* typ 16+1 bytes */
296 struct thread *td_preempted; /* we preempted this thread */
297 struct ucred *td_ucred; /* synchronized from proc->p_ucred */
298 mcontext_t *td_kfpuctx; /* kernel_fpu_begin()/kernel_fpu_end() */
299 lwkt_tokref_t td_toks_have; /* tokens we own */
300 lwkt_tokref_t td_toks_stop; /* tokens we want */
301 struct lwkt_tokref td_toks_array[LWKT_MAXTOKENS];
302 int td_fairq_load; /* fairq */
303 int td_fairq_count; /* fairq */
304 struct globaldata *td_migrate_gd; /* target gd for thread migration */
305 struct fdcache td_fdcache[NFDCACHE];
308 * Linux and FreeBSD compat fields
310 void *td_linux_task; /* drm/linux support */
311 struct sleepqueue_wchan *td_sqwc; /* freebsd sleepq*() API */
312 sbintime_t td_sqtimo; /* freebsd sleepq*() API */
313 int td_sqqueue; /* freebsd sleepq*() API */
318 #ifdef DEBUG_CRIT_SECTIONS
319 #define CRIT_DEBUG_ARRAY_SIZE 32
320 #define CRIT_DEBUG_ARRAY_MASK (CRIT_DEBUG_ARRAY_SIZE - 1)
321 const char *td_crit_debug_array[CRIT_DEBUG_ARRAY_SIZE];
322 int td_crit_debug_index;
323 int td_in_crit_report;
329 struct md_thread td_mach;
335 #define SPINLOCK_DEBUG_ARRAY_SIZE 32
336 int td_spinlock_stack_id[SPINLOCK_DEBUG_ARRAY_SIZE];
337 struct spinlock *td_spinlock_stack[SPINLOCK_DEBUG_ARRAY_SIZE];
338 void *td_spinlock_caller_pc[SPINLOCK_DEBUG_ARRAY_SIZE];
341 * Track lockmgr locks held; lk->lk_filename:lk->lk_lineno is the holder
343 #define LOCKMGR_DEBUG_ARRAY_SIZE 8
344 int td_lockmgr_stack_id[LOCKMGR_DEBUG_ARRAY_SIZE];
345 struct lock *td_lockmgr_stack[LOCKMGR_DEBUG_ARRAY_SIZE];
349 #define td_toks_base td_toks_array[0]
350 #define td_toks_end td_toks_array[LWKT_MAXTOKENS]
352 #define TD_TOKS_HELD(td) ((td)->td_toks_stop != &(td)->td_toks_base)
353 #define TD_TOKS_NOT_HELD(td) ((td)->td_toks_stop == &(td)->td_toks_base)
356 * Thread flags. Note that TDF_RUNNING is cleared on the old thread after
357 * we switch to the new one, which is necessary because LWKTs don't need
358 * to hold the BGL. This flag is used by the exit code and the managed
359 * thread migration code. Note in addition that preemption will cause
360 * TDF_RUNNING to be cleared temporarily, so any code checking TDF_RUNNING
361 * must also check TDF_PREEMPT_LOCK.
363 * LWKT threads stay on their (per-cpu) run queue while running, not to
364 * be confused with user processes which are removed from the user scheduling
365 * run queue while actually running.
367 * td_threadq can represent the thread on one of three queues... the LWKT
368 * run queue, a tsleep queue, or an lwkt blocking queue. The LWKT subsystem
369 * does not allow a thread to be scheduled if it already resides on some
372 #define TDF_RUNNING 0x00000001 /* thread still active */
373 #define TDF_RUNQ 0x00000002 /* on an LWKT run queue */
374 #define TDF_PREEMPT_LOCK 0x00000004 /* I have been preempted */
375 #define TDF_PREEMPT_DONE 0x00000008 /* ac preemption complete */
376 #define TDF_NOSTART 0x00000010 /* do not schedule on create */
377 #define TDF_MIGRATING 0x00000020 /* thread is being migrated */
378 #define TDF_SINTR 0x00000040 /* interruptability for 'ps' */
379 #define TDF_TSLEEPQ 0x00000080 /* on a tsleep wait queue */
381 #define TDF_SYSTHREAD 0x00000100 /* reserve memory may be used */
382 #define TDF_ALLOCATED_THREAD 0x00000200 /* objcache allocated thread */
383 #define TDF_ALLOCATED_STACK 0x00000400 /* objcache allocated stack */
384 #define TDF_FPU_HEUR 0x00000800 /* active restore on switch */
385 #define TDF_DEADLKTREAT 0x00001000 /* special lockmgr treatment */
386 #define TDF_MARKER 0x00002000 /* tdallq list scan marker */
387 #define TDF_TIMEOUT_RUNNING 0x00004000 /* tsleep timeout race */
388 #define TDF_TIMEOUT 0x00008000 /* tsleep timeout */
389 #define TDF_INTTHREAD 0x00010000 /* interrupt thread */
390 #define TDF_TSLEEP_DESCHEDULED 0x00020000 /* tsleep core deschedule */
391 #define TDF_BLOCKED 0x00040000 /* Thread is blocked */
392 #define TDF_PANICWARN 0x00080000 /* panic warning in switch */
393 #define TDF_BLOCKQ 0x00100000 /* on block queue */
394 #define TDF_FORCE_SPINPORT 0x00200000
395 #define TDF_EXITING 0x00400000 /* thread exiting */
396 #define TDF_USINGFP 0x00800000 /* thread using fp coproc */
397 #define TDF_KERNELFP 0x01000000 /* kernel using fp coproc */
398 #define TDF_DELAYED_WAKEUP 0x02000000
399 #define TDF_FIXEDCPU 0x04000000 /* running cpu is fixed */
400 #define TDF_USERMODE 0x08000000 /* in or entering user mode */
401 #define TDF_NOFAULT 0x10000000 /* force onfault on fault */
402 #define TDF_CLKTHREAD 0x20000000 /* detect INTTHREAD clock */
404 #define TDF_MP_STOPREQ 0x00000001 /* suspend_kproc */
405 #define TDF_MP_WAKEREQ 0x00000002 /* resume_kproc */
406 #define TDF_MP_EXITWAIT 0x00000004 /* reaper, see lwp_wait() */
407 #define TDF_MP_EXITSIG 0x00000008 /* reaper, see lwp_wait() */
408 #define TDF_MP_BATCH_DEMARC 0x00000010 /* batch mode handling */
409 #define TDF_MP_DIDYIELD 0x00000020 /* effects scheduling */
411 #define TD_TYPE_GENERIC 0 /* generic thread */
412 #define TD_TYPE_CRYPTO 1 /* crypto thread */
413 #define TD_TYPE_NETISR 2 /* netisr thread */
416 * Thread priorities. Typically only one thread from any given
417 * user process scheduling queue is on the LWKT run queue at a time.
418 * Remember that there is one LWKT run queue per cpu.
420 * Critical sections are handled by bumping td_pri above TDPRI_MAX, which
421 * causes interrupts to be masked as they occur. When this occurs a
422 * rollup flag will be set in mycpu->gd_reqflags.
424 #define TDPRI_IDLE_THREAD 0 /* the idle thread */
425 #define TDPRI_IDLE_WORK 1 /* idle work (page zero, etc) */
426 #define TDPRI_USER_SCHEDULER 2 /* user scheduler helper */
427 #define TDPRI_USER_IDLE 4 /* user scheduler idle */
428 #define TDPRI_USER_NORM 6 /* user scheduler normal */
429 #define TDPRI_USER_REAL 8 /* user scheduler real time */
430 #define TDPRI_KERN_LPSCHED 9 /* (comparison point only) */
431 #define TDPRI_KERN_USER 10 /* kernel / block in syscall */
432 #define TDPRI_KERN_DAEMON 12 /* kernel daemon (pageout, etc) */
433 #define TDPRI_SOFT_NORM 14 /* kernel / normal */
434 #define TDPRI_SOFT_TIMER 16 /* kernel / timer */
435 #define TDPRI_UNUSED19 19
436 #define TDPRI_INT_SUPPORT 20 /* kernel / high priority support */
437 #define TDPRI_INT_LOW 27 /* low priority interrupt */
438 #define TDPRI_INT_MED 28 /* medium priority interrupt */
439 #define TDPRI_INT_HIGH 29 /* high priority interrupt */
442 #define LWKT_THREAD_STACK (UPAGES * PAGE_SIZE)
444 #define IN_CRITICAL_SECT(td) ((td)->td_critcount)
448 extern void (*linux_task_drop_callback)(struct thread *);
449 extern void (*linux_proc_drop_callback)(struct proc *);
454 extern struct lwkt_token mp_token;
455 extern struct lwkt_token pmap_token;
456 extern struct lwkt_token dev_token;
457 extern struct lwkt_token vm_token;
458 extern struct lwkt_token vmspace_token;
459 extern struct lwkt_token kvm_token;
460 extern struct lwkt_token sigio_token;
461 extern struct lwkt_token tty_token;
462 extern struct lwkt_token vnode_token;
463 extern struct lwkt_token revoke_token;
464 extern struct lwkt_token kbd_token;
465 extern struct lwkt_token vga_token;
470 struct thread *lwkt_alloc_thread(struct thread *, int, int, int);
471 void lwkt_init_thread(struct thread *, void *, int, int, struct globaldata *);
472 void lwkt_set_interrupt_support_thread(void);
473 void lwkt_set_comm(thread_t, const char *, ...) __printflike(2, 3);
474 void lwkt_free_thread(struct thread *);
475 void lwkt_gdinit(struct globaldata *);
476 void lwkt_switch(void);
477 void lwkt_switch_return(struct thread *);
478 void lwkt_preempt(thread_t, int);
479 void lwkt_schedule(thread_t);
480 void lwkt_schedule_noresched(thread_t);
481 void lwkt_schedule_self(thread_t);
482 void lwkt_deschedule(thread_t);
483 void lwkt_deschedule_self(thread_t);
484 void lwkt_yield(void);
485 void lwkt_yield_quick(void);
486 void lwkt_user_yield(void);
487 void lwkt_hold(thread_t);
488 void lwkt_rele(thread_t);
489 void lwkt_passive_release(thread_t);
490 void lwkt_maybe_splz(thread_t);
492 void lwkt_gettoken(lwkt_token_t);
493 void lwkt_gettoken_shared(lwkt_token_t);
494 int lwkt_trytoken(lwkt_token_t);
495 void lwkt_reltoken(lwkt_token_t);
496 int lwkt_cnttoken(lwkt_token_t, thread_t);
497 int lwkt_getalltokens(thread_t, int);
498 void lwkt_relalltokens(thread_t);
499 void lwkt_token_init(lwkt_token_t, const char *);
500 void lwkt_token_uninit(lwkt_token_t);
502 void lwkt_token_pool_init(void);
503 lwkt_token_t lwkt_token_pool_lookup(void *);
504 lwkt_token_t lwkt_getpooltoken(void *);
505 void lwkt_relpooltoken(void *);
507 void lwkt_token_swap(void);
509 void lwkt_setpri(thread_t, int);
510 void lwkt_setpri_initial(thread_t, int);
511 void lwkt_setpri_self(int);
512 void lwkt_schedulerclock(thread_t td);
513 void lwkt_setcpu_self(struct globaldata *);
514 void lwkt_migratecpu(int);
516 void lwkt_giveaway(struct thread *);
517 void lwkt_acquire(struct thread *);
518 int lwkt_send_ipiq3(struct globaldata *, ipifunc3_t, void *, int);
519 int lwkt_send_ipiq3_passive(struct globaldata *, ipifunc3_t, void *, int);
520 int lwkt_send_ipiq3_bycpu(int, ipifunc3_t, void *, int);
521 int lwkt_send_ipiq3_mask(cpumask_t, ipifunc3_t, void *, int);
522 void lwkt_wait_ipiq(struct globaldata *, int);
523 void lwkt_process_ipiq(void);
524 void lwkt_process_ipiq_frame(struct intrframe *);
525 void lwkt_smp_stopped(void);
526 void lwkt_synchronize_ipiqs(const char *);
528 /* lwkt_cpusync_init() - inline function in sys/thread2.h */
529 void lwkt_cpusync_simple(cpumask_t, cpusync_func_t, void *);
530 void lwkt_cpusync_interlock(lwkt_cpusync_t);
531 void lwkt_cpusync_deinterlock(lwkt_cpusync_t);
532 void lwkt_cpusync_quick(lwkt_cpusync_t);
534 void crit_panic(void) __dead2;
535 struct lwp *lwkt_preempted_proc(void);
537 int lwkt_create(void (*)(void *), void *, struct thread **, struct thread *,
538 int, int, const char *, ...) __printflike(7, 8);
539 void lwkt_exit(void) __dead2;
540 void lwkt_remove_tdallq(struct thread *);
544 #endif /* !_SYS_THREAD_H_ */