*
* Types which must already be defined when this header is included by
* userland: struct md_thread
- *
- * $DragonFly: src/sys/sys/thread.h,v 1.97 2008/09/20 04:31:02 sephe Exp $
*/
#ifndef _SYS_THREAD_H_
#ifndef _SYS_TIME_H_
#include <sys/time.h> /* struct timeval */
#endif
+#ifndef _SYS_LOCK_H
+#include <sys/lock.h>
+#endif
#ifndef _SYS_SPINLOCK_H_
#include <sys/spinlock.h>
#endif
#ifndef _SYS_IOSCHED_H_
#include <sys/iosched.h>
#endif
-#ifndef _MACHINE_THREAD_H_
#include <machine/thread.h>
-#endif
struct globaldata;
struct lwp;
* Tokens are managed through a helper reference structure, lwkt_tokref. Each
* thread has a stack of tokref's to keep track of acquired tokens. Multiple
* tokref's may reference the same token.
+ *
+ * Tokens can be held shared or exclusive. An exclusive holder is able
+ * to set the TOK_EXCLUSIVE bit in t_count as long as no bit in the count
+ * mask is set. If unable to accomplish this TOK_EXCLREQ can be set instead
+ * which prevents any new shared acquisitions while the exclusive requestor
+ * spins in the scheduler. A shared holder can bump t_count by the increment
+ * value as long as neither TOK_EXCLUSIVE or TOK_EXCLREQ is set, else spin
+ * in the scheduler.
+ *
+ * Multiple exclusive tokens are handled by treating the additional tokens
+ * as a special case of the shared token, incrementing the count value. This
+ * reduces the complexity of the token release code.
*/
typedef struct lwkt_token {
- struct lwkt_tokref *t_ref; /* Owning ref or NULL */
- intptr_t t_flags; /* MP lock required */
+ long t_count; /* Shared/exclreq/exclusive access */
+ struct lwkt_tokref *t_ref; /* Exclusive ref */
long t_collisions; /* Collision counter */
- cpumask_t t_collmask; /* Collision cpu mask for resched */
const char *t_desc; /* Descriptive name */
} lwkt_token;
-#define LWKT_TOKEN_MPSAFE 0x0001
+#define TOK_EXCLUSIVE 0x00000001 /* Exclusive lock held */
+#define TOK_EXCLREQ 0x00000002 /* Exclusive request pending */
+#define TOK_INCR 4 /* Shared count increment */
+#define TOK_COUNTMASK (~(long)(TOK_EXCLUSIVE|TOK_EXCLREQ))
/*
* Static initialization for a lwkt_token.
- * UP - Not MPSAFE (full MP lock will also be acquired)
- * MP - Is MPSAFE (only the token will be acquired)
*/
-#define LWKT_TOKEN_UP_INITIALIZER(name) \
+#define LWKT_TOKEN_INITIALIZER(name) \
{ \
+ .t_count = 0, \
.t_ref = NULL, \
- .t_flags = 0, \
.t_collisions = 0, \
- .t_collmask = 0, \
- .t_desc = #name \
-}
-
-#define LWKT_TOKEN_MP_INITIALIZER(name) \
-{ \
- .t_ref = NULL, \
- .t_flags = LWKT_TOKEN_MPSAFE, \
- .t_collisions = 0, \
- .t_collmask = 0, \
.t_desc = #name \
}
struct lwkt_tokref {
lwkt_token_t tr_tok; /* token in question */
+ long tr_count; /* TOK_EXCLUSIVE|TOK_EXCLREQ or 0 */
struct thread *tr_owner; /* me */
- intptr_t tr_flags; /* copy of t_flags */
- const void *tr_stallpc; /* stalled at pc */
};
-#define MAXCPUFIFO 16 /* power of 2 */
+#define MAXCPUFIFO 32 /* power of 2 */
#define MAXCPUFIFO_MASK (MAXCPUFIFO - 1)
#define LWKT_MAXTOKENS 32 /* max tokens beneficially held by thread */
int ip_rindex; /* only written by target cpu */
int ip_xindex; /* written by target, indicates completion */
int ip_windex; /* only written by source cpu */
- ipifunc3_t ip_func[MAXCPUFIFO];
- void *ip_arg1[MAXCPUFIFO];
- int ip_arg2[MAXCPUFIFO];
- u_int ip_npoll; /* synchronization to avoid excess IPIs */
+ struct {
+ ipifunc3_t func;
+ void *arg1;
+ int arg2;
+ char filler[32 - sizeof(int) - sizeof(void *) * 2];
+ } ip_info[MAXCPUFIFO];
} lwkt_ipiq;
/*
const volatile void *td_wchan; /* waiting on channel */
int td_pri; /* 0-31, 31=highest priority (note 1) */
int td_critcount; /* critical section priority */
- int td_flags; /* TDF flags */
+ u_int td_flags; /* TDF flags */
int td_wdomain; /* domain for wchan address (typ 0) */
void (*td_preemptable)(struct thread *td, int critcount);
void (*td_release)(struct thread *td);
char *td_kstack; /* kernel stack */
int td_kstack_size; /* size of kernel stack */
char *td_sp; /* kernel stack pointer for LWKT restore */
- void (*td_switch)(struct thread *ntd);
+ thread_t (*td_switch)(struct thread *ntd);
__uint64_t td_uticks; /* Statclock hits in user mode (uS) */
__uint64_t td_sticks; /* Statclock hits in system mode (uS) */
__uint64_t td_iticks; /* Statclock hits processing intr (uS) */
void *td_dsched_priv1; /* priv data for I/O schedulers */
int td_refs; /* hold position in gd_tdallq / hold free */
int td_nest_count; /* prevent splz nesting */
- int td_unused01[2]; /* for future fields */
+ int td_contended; /* token contention count */
+ u_int td_mpflags; /* flags can be set by foreign cpus */
#ifdef SMP
int td_cscount; /* cpu synchronization master */
#else
struct thread *td_preempted; /* we preempted this thread */
struct ucred *td_ucred; /* synchronized from p_ucred */
struct caps_kinfo *td_caps; /* list of client and server registrations */
- lwkt_tokref_t td_toks_stop;
+ lwkt_tokref_t td_toks_have; /* tokens we own */
+ lwkt_tokref_t td_toks_stop; /* tokens we want */
struct lwkt_tokref td_toks_array[LWKT_MAXTOKENS];
- int td_fairq_lticks; /* fairq wakeup accumulator reset */
- int td_fairq_accum; /* fairq priority accumulator */
- const void *td_mplock_stallpc; /* last mplock stall address */
+ int td_fairq_load; /* fairq */
+ int td_fairq_count; /* fairq */
+ struct globaldata *td_migrate_gd; /* target gd for thread migration */
#ifdef DEBUG_CRIT_SECTIONS
#define CRIT_DEBUG_ARRAY_SIZE 32
#define CRIT_DEBUG_ARRAY_MASK (CRIT_DEBUG_ARRAY_SIZE - 1)
int td_spinlock_stack_id[SPINLOCK_DEBUG_ARRAY_SIZE];
struct spinlock *td_spinlock_stack[SPINLOCK_DEBUG_ARRAY_SIZE];
void *td_spinlock_caller_pc[SPINLOCK_DEBUG_ARRAY_SIZE];
+
+ /*
+ * Track lockmgr locks held; lk->lk_filename:lk->lk_lineno is the holder
+ */
+#define LOCKMGR_DEBUG_ARRAY_SIZE 8
+ int td_lockmgr_stack_id[LOCKMGR_DEBUG_ARRAY_SIZE];
+ struct lock *td_lockmgr_stack[LOCKMGR_DEBUG_ARRAY_SIZE];
#endif
};
* does not allow a thread to be scheduled if it already resides on some
* queue.
*/
-#define TDF_RUNNING 0x0001 /* thread still active */
-#define TDF_RUNQ 0x0002 /* on an LWKT run queue */
-#define TDF_PREEMPT_LOCK 0x0004 /* I have been preempted */
-#define TDF_PREEMPT_DONE 0x0008 /* acknowledge preemption complete */
-#define TDF_UNUSED00000010 0x0010
-#define TDF_MIGRATING 0x0020 /* thread is being migrated */
-#define TDF_SINTR 0x0040 /* interruptability hint for 'ps' */
-#define TDF_TSLEEPQ 0x0080 /* on a tsleep wait queue */
-
-#define TDF_SYSTHREAD 0x0100 /* allocations may use reserve */
-#define TDF_ALLOCATED_THREAD 0x0200 /* objcache allocated thread */
-#define TDF_ALLOCATED_STACK 0x0400 /* objcache allocated stack */
-#define TDF_VERBOSE 0x0800 /* verbose on exit */
-#define TDF_DEADLKTREAT 0x1000 /* special lockmgr deadlock treatment */
-#define TDF_STOPREQ 0x2000 /* suspend_kproc */
-#define TDF_WAKEREQ 0x4000 /* resume_kproc */
-#define TDF_TIMEOUT 0x8000 /* tsleep timeout */
+#define TDF_RUNNING 0x00000001 /* thread still active */
+#define TDF_RUNQ 0x00000002 /* on an LWKT run queue */
+#define TDF_PREEMPT_LOCK 0x00000004 /* I have been preempted */
+#define TDF_PREEMPT_DONE 0x00000008 /* ac preemption complete */
+#define TDF_NOSTART 0x00000010 /* do not schedule on create */
+#define TDF_MIGRATING 0x00000020 /* thread is being migrated */
+#define TDF_SINTR 0x00000040 /* interruptability for 'ps' */
+#define TDF_TSLEEPQ 0x00000080 /* on a tsleep wait queue */
+
+#define TDF_SYSTHREAD 0x00000100 /* reserve memory may be used */
+#define TDF_ALLOCATED_THREAD 0x00000200 /* objcache allocated thread */
+#define TDF_ALLOCATED_STACK 0x00000400 /* objcache allocated stack */
+#define TDF_VERBOSE 0x00000800 /* verbose on exit */
+#define TDF_DEADLKTREAT 0x00001000 /* special lockmgr treatment */
+#define TDF_UNUSED2000 0x00002000
+#define TDF_TIMEOUT_RUNNING 0x00004000 /* tsleep timeout race */
+#define TDF_TIMEOUT 0x00008000 /* tsleep timeout */
#define TDF_INTTHREAD 0x00010000 /* interrupt thread */
#define TDF_TSLEEP_DESCHEDULED 0x00020000 /* tsleep core deschedule */
#define TDF_BLOCKED 0x00040000 /* Thread is blocked */
#define TDF_PANICWARN 0x00080000 /* panic warning in switch */
#define TDF_BLOCKQ 0x00100000 /* on block queue */
-#define TDF_UNUSED00200000 0x00200000
+#define TDF_FORCE_SPINPORT 0x00200000
#define TDF_EXITING 0x00400000 /* thread exiting */
#define TDF_USINGFP 0x00800000 /* thread using fp coproc */
#define TDF_KERNELFP 0x01000000 /* kernel using fp coproc */
#define TDF_UNUSED02000000 0x02000000
#define TDF_CRYPTO 0x04000000 /* crypto thread */
-#define TDF_MARKER 0x80000000 /* fairq marker thread */
+
+#define TDF_MP_STOPREQ 0x00000001 /* suspend_kproc */
+#define TDF_MP_WAKEREQ 0x00000002 /* resume_kproc */
/*
* Thread priorities. Typically only one thread from any given
#define TDPRI_INT_HIGH 29 /* high priority interrupt */
#define TDPRI_MAX 31
-/*
- * Scale is the approximate number of ticks for which we desire the
- * entire gd_tdrunq to get service. With hz = 100 a scale of 8 is 80ms.
- *
- * Setting this value too small will result in inefficient switching
- * rates.
- */
-#define TDFAIRQ_SCALE 8
-#define TDFAIRQ_MAX(gd) ((gd)->gd_fairq_total_pri * TDFAIRQ_SCALE)
-
#define LWKT_THREAD_STACK (UPAGES * PAGE_SIZE)
-#define CACHE_NTHREADS 6
-
#define IN_CRITICAL_SECT(td) ((td)->td_critcount)
#ifdef _KERNEL
extern struct thread *lwkt_alloc_thread(struct thread *, int, int, int);
extern void lwkt_init_thread(struct thread *, void *, int, int,
struct globaldata *);
+extern void lwkt_set_interrupt_support_thread(void);
extern void lwkt_set_comm(thread_t, const char *, ...) __printflike(2, 3);
extern void lwkt_wait_free(struct thread *);
extern void lwkt_free_thread(struct thread *);
extern void lwkt_gdinit(struct globaldata *);
extern void lwkt_switch(void);
+extern void lwkt_switch_return(struct thread *);
extern void lwkt_preempt(thread_t, int);
extern void lwkt_schedule(thread_t);
extern void lwkt_schedule_noresched(thread_t);
extern void lwkt_maybe_splz(thread_t);
extern void lwkt_gettoken(lwkt_token_t);
+extern void lwkt_gettoken_shared(lwkt_token_t);
extern void lwkt_gettoken_hard(lwkt_token_t);
extern int lwkt_trytoken(lwkt_token_t);
extern void lwkt_reltoken(lwkt_token_t);
extern void lwkt_reltoken_hard(lwkt_token_t);
extern int lwkt_cnttoken(lwkt_token_t, thread_t);
-extern int lwkt_getalltokens(thread_t);
+extern int lwkt_getalltokens(thread_t, int);
extern void lwkt_relalltokens(thread_t);
extern void lwkt_drain_token_requests(void);
-extern void lwkt_token_init(lwkt_token_t, int, const char *);
+extern void lwkt_token_init(lwkt_token_t, const char *);
extern void lwkt_token_uninit(lwkt_token_t);
extern void lwkt_token_pool_init(void);
extern lwkt_token_t lwkt_getpooltoken(void *);
extern void lwkt_relpooltoken(void *);
+extern void lwkt_token_swap(void);
+
extern void lwkt_setpri(thread_t, int);
extern void lwkt_setpri_initial(thread_t, int);
extern void lwkt_setpri_self(int);
-extern void lwkt_fairq_schedulerclock(thread_t td);
-extern void lwkt_fairq_setpri_self(int pri);
-extern int lwkt_fairq_push(int pri);
-extern void lwkt_fairq_pop(int pri);
-extern void lwkt_fairq_yield(void);
+extern void lwkt_schedulerclock(thread_t td);
extern void lwkt_setcpu_self(struct globaldata *);
extern void lwkt_migratecpu(int);