2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Helper functions for MP lock acquisition and release.
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
43 #include <sys/rtprio.h>
44 #include <sys/queue.h>
45 #include <sys/sysctl.h>
46 #include <sys/kthread.h>
47 #include <machine/cpu.h>
50 #include <sys/spinlock.h>
53 #include <sys/thread2.h>
54 #include <sys/mplock2.h>
55 #include <sys/spinlock2.h>
58 static int chain_mplock = 0;
59 static int bgl_yield = 10;
60 static __int64_t mplock_contention_count = 0;
62 SYSCTL_INT(_lwkt, OID_AUTO, chain_mplock, CTLFLAG_RW, &chain_mplock, 0,
63 "Chain IPI's to other CPU's potentially needing the MP lock when it is yielded");
64 SYSCTL_INT(_lwkt, OID_AUTO, bgl_yield_delay, CTLFLAG_RW, &bgl_yield, 0,
65 "Duration of delay when MP lock is temporarily yielded");
66 SYSCTL_QUAD(_lwkt, OID_AUTO, mplock_contention_count, CTLFLAG_RW,
67 &mplock_contention_count, 0, "spinning due to MPLOCK contention");
72 #if !defined(KTR_GIANT_CONTENTION)
73 #define KTR_GIANT_CONTENTION KTR_ALL
76 KTR_INFO_MASTER(giant);
77 KTR_INFO(KTR_GIANT_CONTENTION, giant, beg, 0,
78 "thread=%p held %s:%-5d want %s:%-5d",
79 sizeof(void *) * 3 + sizeof(int) * 2);
80 KTR_INFO(KTR_GIANT_CONTENTION, giant, end, 1,
81 "thread=%p held %s:%-5d want %s:%-5d",
82 sizeof(void *) * 3 + sizeof(int) * 2);
84 #define loggiant(name) \
85 KTR_LOG(giant_ ## name, curthread, \
86 mp_lock_holder_file, mp_lock_holder_line, \
90 int cpu_contention_mask;
91 const char *mp_lock_holder_file; /* debugging */
92 int mp_lock_holder_line; /* debugging */
95 * Sets up the initial MP lock state near the start of the kernel boot
98 cpu_get_initial_mplock(void)
100 mp_lock = 0; /* cpu 0 */
101 curthread->td_mpcount = 1;
105 * This code is called from the get_mplock() inline when the mplock
106 * is not already held. td_mpcount has already been predisposed
110 _get_mplock_predisposed(const char *file, int line)
112 globaldata_t gd = mycpu;
114 if (gd->gd_intr_nesting_level) {
115 panic("Attempt to acquire mplock not already held "
116 "in hard section, ipi or interrupt %s:%d",
119 if (atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0)
120 _get_mplock_contested(file, line);
122 mp_lock_holder_file = file;
123 mp_lock_holder_line = line;
128 * Called when the MP lock could not be trvially acquired. The caller
129 * has already bumped td_mpcount.
132 _get_mplock_contested(const char *file, int line)
134 globaldata_t gd = mycpu;
137 const void **stkframe = (const void **)&file;
139 ++mplock_contention_count;
143 if (ov == gd->gd_cpuid)
146 if (atomic_cmpset_int(&mp_lock, ov, gd->gd_cpuid))
149 gd->gd_curthread->td_mplock_stallpc = stkframe[-1];
153 KKASSERT(gd->gd_cpuid == mp_lock);
160 * Called if td_mpcount went negative or if td_mpcount + td_xpcount is 0
161 * and we were unable to release the MP lock. Handles sanity checks
164 * It is possible for the inline release to have raced an interrupt which
165 * get/rel'd the MP lock, causing the inline's cmpset to fail. If this
166 * case occurs mp_lock will either already be in a released state or it
167 * will have already been acquired by another cpu.
170 _rel_mplock_contested(void)
172 globaldata_t gd = mycpu;
173 thread_t td = gd->gd_curthread;
176 KKASSERT(td->td_mpcount >= 0);
177 if (td->td_mpcount + td->td_xpcount == 0) {
180 if (ov != gd->gd_cpuid)
182 if (atomic_cmpset_int(&mp_lock, ov, -1))
189 * Called when try_mplock() fails.
191 * The inline bumped td_mpcount so we have to undo it.
193 * It is possible to race an interrupt which acquired and released the
194 * MP lock. When combined with the td_mpcount decrement we do the MP lock
195 * can wind up in any state and possibly not even owned by us.
197 * It is also possible for this function to be called even if td_mpcount > 1
198 * if someone bumped it and raced an interrupt which then called try_mpock().
201 _try_mplock_contested(const char *file, int line)
203 globaldata_t gd = mycpu;
204 thread_t td = gd->gd_curthread;
208 KKASSERT(td->td_mpcount >= 0);
209 ++mplock_contention_count;
211 if (td->td_mpcount + td->td_xpcount == 0) {
214 if (ov != gd->gd_cpuid)
216 if (atomic_cmpset_int(&mp_lock, ov, -1))
223 * Called when cpu_try_mplock() fails.
225 * The inline did not touch td_mpcount so we do not either.
228 _cpu_try_mplock_contested(const char *file, int line)
230 ++mplock_contention_count;
234 * Temporarily yield the MP lock. This is part of lwkt_user_yield()
235 * which is kinda hackish. The MP lock cannot be yielded if inherited
236 * due to a preemption.
239 yield_mplock(thread_t td)
243 if (td->td_xpcount == 0) {
244 savecnt = td->td_mpcount;
249 td->td_mpcount = savecnt;
256 * The rel_mplock() code will call this function after releasing the
257 * last reference on the MP lock if cpu_contention_mask is non-zero.
259 * We then chain an IPI to a single other cpu potentially needing the
260 * lock. This is a bit heuristical and we can wind up with IPIs flying
261 * all over the place.
263 static void lwkt_mp_lock_uncontested_remote(void *arg __unused);
266 lwkt_mp_lock_uncontested(void)
276 clr_mplock_contention_mask(gd);
277 mask = cpu_contention_mask;
278 tmpmask = ~((1 << gd->gd_cpuid) - 1);
282 cpuid = bsfl(mask & tmpmask);
285 atomic_clear_int(&cpu_contention_mask, 1 << cpuid);
286 dgd = globaldata_find(cpuid);
287 lwkt_send_ipiq(dgd, lwkt_mp_lock_uncontested_remote, NULL);
293 * The idea is for this IPI to interrupt a potentially lower priority
294 * thread, such as a user thread, to allow the scheduler to reschedule
295 * a higher priority kernel thread that needs the MP lock.
297 * For now we set the LWKT reschedule flag which generates an AST in
298 * doreti, though theoretically it is also possible to possibly preempt
299 * here if the underlying thread was operating in user mode. Nah.
302 lwkt_mp_lock_uncontested_remote(void *arg __unused)