4 * Implement the MP lock. Note that debug operations
6 #ifndef _SYS_MPLOCK2_H_
7 #define _SYS_MPLOCK2_H_
9 #ifndef _MACHINE_ATOMIC_H_
10 #include <machine/atomic.h>
12 #ifndef _SYS_THREAD_H_
13 #include <sys/thread.h>
15 #ifndef _SYS_GLOBALDATA_H_
16 #include <sys/globaldata.h>
21 #define get_mplock() get_mplock_debug(__FILE__, __LINE__)
22 #define try_mplock() try_mplock_debug(__FILE__, __LINE__)
23 #define cpu_try_mplock() cpu_try_mplock_debug(__FILE__, __LINE__)
25 void _get_mplock_predisposed(const char *file, int line);
26 void _get_mplock_contested(const char *file, int line);
27 void _try_mplock_contested(const char *file, int line);
28 void _cpu_try_mplock_contested(const char *file, int line);
29 void _rel_mplock_contested(void);
30 void cpu_get_initial_mplock(void);
31 void handle_cpu_contention_mask(void);
32 void yield_mplock(struct thread *td);
35 extern int cpu_contention_mask;
36 extern const char *mp_lock_holder_file;
37 extern int mp_lock_holder_line;
40 * Acquire the MP lock, block until we get it.
42 * In order to acquire the MP lock we must first pre-dispose td_mpcount
43 * for the acquisition and then get the actual lock.
45 * The mplock must check a number of conditions and it is better to
46 * leave it to a procedure if we cannot get it trivially.
48 * WARNING: The mp_lock and td_mpcount are not necessarily synchronized.
49 * We must synchronize them here. They can be unsynchronized
50 * for a variety of reasons including predisposition, td_xpcount,
55 get_mplock_debug(const char *file, int line)
57 globaldata_t gd = mycpu;
58 thread_t td = gd->gd_curthread;
61 if (mp_lock != gd->gd_cpuid)
62 _get_mplock_predisposed(file, line);
68 * In order to release the MP lock we must first pre-dispose td_mpcount
69 * for the release and then, if it is 0 and td_xpcount is also zero,
70 * release the actual lock.
72 * The contested function is called only if we are unable to release the
73 * Actual lock. This can occur if we raced an interrupt after decrementing
74 * td_mpcount to 0 and the interrupt acquired and released the lock.
76 * The function also catches the td_mpcount underflow case because the
77 * lock will be in a released state and thus fail the subsequent release.
79 * WARNING: The mp_lock and td_mpcount are not necessarily synchronized.
80 * We must synchronize them here. They can be unsynchronized
81 * for a variety of reasons including predisposition, td_xpcount,
88 globaldata_t gd = mycpu;
89 thread_t td = gd->gd_curthread;
93 if (n < 0 || ((n + td->td_xpcount) == 0 &&
94 atomic_cmpset_int(&mp_lock, gd->gd_cpuid, -1) == 0)) {
95 _rel_mplock_contested();
100 * Attempt to acquire the MP lock, returning 0 on failure and 1 on success.
102 * The contested function is called on failure and typically serves simply
103 * to log the attempt (if debugging enabled).
107 try_mplock_debug(const char *file, int line)
109 globaldata_t gd = mycpu;
110 thread_t td = gd->gd_curthread;
113 if (mp_lock != gd->gd_cpuid &&
114 atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) {
115 _try_mplock_contested(file, line);
119 mp_lock_holder_file = file;
120 mp_lock_holder_line = line;
126 * Low level acquisition of the MP lock ignoring curthred->td_mpcount
128 * This version of try_mplock() is used when the caller has already
129 * predisposed td->td_mpcount.
131 * Returns non-zero on success, 0 on failure.
133 * WARNING: Must be called from within a critical section if td_mpcount is
134 * zero, otherwise an itnerrupt race can cause the lock to be lost.
138 cpu_try_mplock_debug(const char *file, int line)
140 globaldata_t gd = mycpu;
142 if (mp_lock != gd->gd_cpuid &&
143 atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) {
144 _cpu_try_mplock_contested(file, line);
148 mp_lock_holder_file = file;
149 mp_lock_holder_line = line;
155 * A cpu wanted the MP lock but could not get it. This function is also
156 * called directly from the LWKT scheduler.
158 * Reentrant, may be called even if the cpu is already contending the MP
163 set_cpu_contention_mask(globaldata_t gd)
165 atomic_set_int(&cpu_contention_mask, gd->gd_cpumask);
169 * A cpu is no longer contending for the MP lock after previously contending
172 * Reentrant, may be called even if the cpu was not previously contending
177 clr_cpu_contention_mask(globaldata_t gd)
179 atomic_clear_int(&cpu_contention_mask, gd->gd_cpumask);
190 * Low level release of the MP lock ignoring curthread->td_mpcount
192 * WARNING: Caller must be in a critical section, otherwise the
193 * mp_lock can be lost from an interrupt race and we would
194 * end up clearing someone else's lock.
197 cpu_rel_mplock(int cpu)
199 (void)atomic_cmpset_int(&mp_lock, cpu, -1);
202 #define MP_LOCK_HELD(gd) \
203 (mp_lock == gd->gd_cpuid)
205 #define ASSERT_MP_LOCK_HELD(td) \
206 KASSERT(MP_LOCK_HELD(td->td_gd), \
207 ("MP_LOCK_HELD: Not held thread %p", td))
212 * UNI-PROCESSOR BUILD - Degenerate case macros
216 #define try_mplock() 1
217 #define owner_mplock() 0
218 #define MP_LOCK_HELD(gd) (!0)
219 #define ASSERT_MP_LOCK_HELD(td)