| 1 | /* |
| 2 | * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. |
| 3 | * |
| 4 | * This code is derived from software contributed to The DragonFly Project |
| 5 | * by Matthew Dillon <dillon@backplane.com> |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in |
| 15 | * the documentation and/or other materials provided with the |
| 16 | * distribution. |
| 17 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 18 | * contributors may be used to endorse or promote products derived |
| 19 | * from this software without specific, prior written permission. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 32 | * SUCH DAMAGE. |
| 33 | */ |
| 34 | |
| 35 | /* |
| 36 | * Each cpu in a system has its own self-contained light weight kernel |
| 37 | * thread scheduler, which means that generally speaking we only need |
| 38 | * to use a critical section to avoid problems. Foreign thread |
| 39 | * scheduling is queued via (async) IPIs. |
| 40 | */ |
| 41 | |
| 42 | #include <sys/param.h> |
| 43 | #include <sys/systm.h> |
| 44 | #include <sys/kernel.h> |
| 45 | #include <sys/proc.h> |
| 46 | #include <sys/rtprio.h> |
| 47 | #include <sys/queue.h> |
| 48 | #include <sys/sysctl.h> |
| 49 | #include <sys/kthread.h> |
| 50 | #include <machine/cpu.h> |
| 51 | #include <sys/lock.h> |
| 52 | #include <sys/caps.h> |
| 53 | #include <sys/spinlock.h> |
| 54 | #include <sys/ktr.h> |
| 55 | |
| 56 | #include <sys/thread2.h> |
| 57 | #include <sys/spinlock2.h> |
| 58 | |
| 59 | #include <vm/vm.h> |
| 60 | #include <vm/vm_param.h> |
| 61 | #include <vm/vm_kern.h> |
| 62 | #include <vm/vm_object.h> |
| 63 | #include <vm/vm_page.h> |
| 64 | #include <vm/vm_map.h> |
| 65 | #include <vm/vm_pager.h> |
| 66 | #include <vm/vm_extern.h> |
| 67 | |
| 68 | #include <machine/stdarg.h> |
| 69 | #include <machine/smp.h> |
| 70 | |
| 71 | #if !defined(KTR_CTXSW) |
| 72 | #define KTR_CTXSW KTR_ALL |
| 73 | #endif |
| 74 | KTR_INFO_MASTER(ctxsw); |
| 75 | KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "sw %p > %p", 2 * sizeof(struct thread *)); |
| 76 | KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "pre %p > %p", 2 * sizeof(struct thread *)); |
| 77 | |
| 78 | static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); |
| 79 | |
| 80 | #ifdef SMP |
| 81 | static int mplock_countx = 0; |
| 82 | #endif |
| 83 | #ifdef INVARIANTS |
| 84 | static int panic_on_cscount = 0; |
| 85 | #endif |
| 86 | static __int64_t switch_count = 0; |
| 87 | static __int64_t preempt_hit = 0; |
| 88 | static __int64_t preempt_miss = 0; |
| 89 | static __int64_t preempt_weird = 0; |
| 90 | static __int64_t token_contention_count = 0; |
| 91 | static __int64_t mplock_contention_count = 0; |
| 92 | static int lwkt_use_spin_port; |
| 93 | #ifdef SMP |
| 94 | static int chain_mplock = 0; |
| 95 | static int bgl_yield = 10; |
| 96 | #endif |
| 97 | static struct objcache *thread_cache; |
| 98 | |
| 99 | volatile cpumask_t mp_lock_contention_mask; |
| 100 | |
| 101 | #ifdef SMP |
| 102 | static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); |
| 103 | #endif |
| 104 | |
| 105 | extern void cpu_heavy_restore(void); |
| 106 | extern void cpu_lwkt_restore(void); |
| 107 | extern void cpu_kthread_restore(void); |
| 108 | extern void cpu_idle_restore(void); |
| 109 | |
| 110 | #ifdef __amd64__ |
| 111 | |
| 112 | static int |
| 113 | jg_tos_ok(struct thread *td) |
| 114 | { |
| 115 | void *tos; |
| 116 | int tos_ok; |
| 117 | |
| 118 | if (td == NULL) { |
| 119 | return 1; |
| 120 | } |
| 121 | KKASSERT(td->td_sp != NULL); |
| 122 | tos = ((void **)td->td_sp)[0]; |
| 123 | tos_ok = 0; |
| 124 | if ((tos == cpu_heavy_restore) || (tos == cpu_lwkt_restore) || |
| 125 | (tos == cpu_kthread_restore) || (tos == cpu_idle_restore)) { |
| 126 | tos_ok = 1; |
| 127 | } |
| 128 | return tos_ok; |
| 129 | } |
| 130 | |
| 131 | #endif |
| 132 | |
| 133 | /* |
| 134 | * We can make all thread ports use the spin backend instead of the thread |
| 135 | * backend. This should only be set to debug the spin backend. |
| 136 | */ |
| 137 | TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); |
| 138 | |
| 139 | #ifdef INVARIANTS |
| 140 | SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); |
| 141 | #endif |
| 142 | #ifdef SMP |
| 143 | SYSCTL_INT(_lwkt, OID_AUTO, chain_mplock, CTLFLAG_RW, &chain_mplock, 0, ""); |
| 144 | SYSCTL_INT(_lwkt, OID_AUTO, bgl_yield_delay, CTLFLAG_RW, &bgl_yield, 0, ""); |
| 145 | #endif |
| 146 | SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); |
| 147 | SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); |
| 148 | SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); |
| 149 | SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); |
| 150 | #ifdef INVARIANTS |
| 151 | SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW, |
| 152 | &token_contention_count, 0, "spinning due to token contention"); |
| 153 | SYSCTL_QUAD(_lwkt, OID_AUTO, mplock_contention_count, CTLFLAG_RW, |
| 154 | &mplock_contention_count, 0, "spinning due to MPLOCK contention"); |
| 155 | #endif |
| 156 | |
| 157 | /* |
| 158 | * Kernel Trace |
| 159 | */ |
| 160 | #if !defined(KTR_GIANT_CONTENTION) |
| 161 | #define KTR_GIANT_CONTENTION KTR_ALL |
| 162 | #endif |
| 163 | |
| 164 | KTR_INFO_MASTER(giant); |
| 165 | KTR_INFO(KTR_GIANT_CONTENTION, giant, beg, 0, "thread=%p", sizeof(void *)); |
| 166 | KTR_INFO(KTR_GIANT_CONTENTION, giant, end, 1, "thread=%p", sizeof(void *)); |
| 167 | |
| 168 | #define loggiant(name) KTR_LOG(giant_ ## name, curthread) |
| 169 | |
| 170 | /* |
| 171 | * These helper procedures handle the runq, they can only be called from |
| 172 | * within a critical section. |
| 173 | * |
| 174 | * WARNING! Prior to SMP being brought up it is possible to enqueue and |
| 175 | * dequeue threads belonging to other cpus, so be sure to use td->td_gd |
| 176 | * instead of 'mycpu' when referencing the globaldata structure. Once |
| 177 | * SMP live enqueuing and dequeueing only occurs on the current cpu. |
| 178 | */ |
| 179 | static __inline |
| 180 | void |
| 181 | _lwkt_dequeue(thread_t td) |
| 182 | { |
| 183 | if (td->td_flags & TDF_RUNQ) { |
| 184 | int nq = td->td_pri & TDPRI_MASK; |
| 185 | struct globaldata *gd = td->td_gd; |
| 186 | |
| 187 | td->td_flags &= ~TDF_RUNQ; |
| 188 | TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); |
| 189 | /* runqmask is passively cleaned up by the switcher */ |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | static __inline |
| 194 | void |
| 195 | _lwkt_enqueue(thread_t td) |
| 196 | { |
| 197 | if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { |
| 198 | int nq = td->td_pri & TDPRI_MASK; |
| 199 | struct globaldata *gd = td->td_gd; |
| 200 | |
| 201 | td->td_flags |= TDF_RUNQ; |
| 202 | TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); |
| 203 | gd->gd_runqmask |= 1 << nq; |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | static __boolean_t |
| 208 | _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) |
| 209 | { |
| 210 | struct thread *td = (struct thread *)obj; |
| 211 | |
| 212 | td->td_kstack = NULL; |
| 213 | td->td_kstack_size = 0; |
| 214 | td->td_flags = TDF_ALLOCATED_THREAD; |
| 215 | return (1); |
| 216 | } |
| 217 | |
| 218 | static void |
| 219 | _lwkt_thread_dtor(void *obj, void *privdata) |
| 220 | { |
| 221 | struct thread *td = (struct thread *)obj; |
| 222 | |
| 223 | KASSERT(td->td_flags & TDF_ALLOCATED_THREAD, |
| 224 | ("_lwkt_thread_dtor: not allocated from objcache")); |
| 225 | KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && |
| 226 | td->td_kstack_size > 0, |
| 227 | ("_lwkt_thread_dtor: corrupted stack")); |
| 228 | kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); |
| 229 | } |
| 230 | |
| 231 | /* |
| 232 | * Initialize the lwkt s/system. |
| 233 | */ |
| 234 | void |
| 235 | lwkt_init(void) |
| 236 | { |
| 237 | /* An objcache has 2 magazines per CPU so divide cache size by 2. */ |
| 238 | thread_cache = objcache_create_mbacked(M_THREAD, sizeof(struct thread), |
| 239 | NULL, CACHE_NTHREADS/2, |
| 240 | _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * Schedule a thread to run. As the current thread we can always safely |
| 245 | * schedule ourselves, and a shortcut procedure is provided for that |
| 246 | * function. |
| 247 | * |
| 248 | * (non-blocking, self contained on a per cpu basis) |
| 249 | */ |
| 250 | void |
| 251 | lwkt_schedule_self(thread_t td) |
| 252 | { |
| 253 | crit_enter_quick(td); |
| 254 | KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); |
| 255 | KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); |
| 256 | _lwkt_enqueue(td); |
| 257 | crit_exit_quick(td); |
| 258 | } |
| 259 | |
| 260 | /* |
| 261 | * Deschedule a thread. |
| 262 | * |
| 263 | * (non-blocking, self contained on a per cpu basis) |
| 264 | */ |
| 265 | void |
| 266 | lwkt_deschedule_self(thread_t td) |
| 267 | { |
| 268 | crit_enter_quick(td); |
| 269 | _lwkt_dequeue(td); |
| 270 | crit_exit_quick(td); |
| 271 | } |
| 272 | |
| 273 | /* |
| 274 | * LWKTs operate on a per-cpu basis |
| 275 | * |
| 276 | * WARNING! Called from early boot, 'mycpu' may not work yet. |
| 277 | */ |
| 278 | void |
| 279 | lwkt_gdinit(struct globaldata *gd) |
| 280 | { |
| 281 | int i; |
| 282 | |
| 283 | for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) |
| 284 | TAILQ_INIT(&gd->gd_tdrunq[i]); |
| 285 | gd->gd_runqmask = 0; |
| 286 | TAILQ_INIT(&gd->gd_tdallq); |
| 287 | } |
| 288 | |
| 289 | /* |
| 290 | * Create a new thread. The thread must be associated with a process context |
| 291 | * or LWKT start address before it can be scheduled. If the target cpu is |
| 292 | * -1 the thread will be created on the current cpu. |
| 293 | * |
| 294 | * If you intend to create a thread without a process context this function |
| 295 | * does everything except load the startup and switcher function. |
| 296 | */ |
| 297 | thread_t |
| 298 | lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) |
| 299 | { |
| 300 | globaldata_t gd = mycpu; |
| 301 | void *stack; |
| 302 | |
| 303 | /* |
| 304 | * If static thread storage is not supplied allocate a thread. Reuse |
| 305 | * a cached free thread if possible. gd_freetd is used to keep an exiting |
| 306 | * thread intact through the exit. |
| 307 | */ |
| 308 | if (td == NULL) { |
| 309 | if ((td = gd->gd_freetd) != NULL) |
| 310 | gd->gd_freetd = NULL; |
| 311 | else |
| 312 | td = objcache_get(thread_cache, M_WAITOK); |
| 313 | KASSERT((td->td_flags & |
| 314 | (TDF_ALLOCATED_THREAD|TDF_RUNNING)) == TDF_ALLOCATED_THREAD, |
| 315 | ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags)); |
| 316 | flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK); |
| 317 | } |
| 318 | |
| 319 | /* |
| 320 | * Try to reuse cached stack. |
| 321 | */ |
| 322 | if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { |
| 323 | if (flags & TDF_ALLOCATED_STACK) { |
| 324 | kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); |
| 325 | stack = NULL; |
| 326 | } |
| 327 | } |
| 328 | if (stack == NULL) { |
| 329 | stack = (void *)kmem_alloc(&kernel_map, stksize); |
| 330 | flags |= TDF_ALLOCATED_STACK; |
| 331 | } |
| 332 | if (cpu < 0) |
| 333 | lwkt_init_thread(td, stack, stksize, flags, gd); |
| 334 | else |
| 335 | lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); |
| 336 | return(td); |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * Initialize a preexisting thread structure. This function is used by |
| 341 | * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. |
| 342 | * |
| 343 | * All threads start out in a critical section at a priority of |
| 344 | * TDPRI_KERN_DAEMON. Higher level code will modify the priority as |
| 345 | * appropriate. This function may send an IPI message when the |
| 346 | * requested cpu is not the current cpu and consequently gd_tdallq may |
| 347 | * not be initialized synchronously from the point of view of the originating |
| 348 | * cpu. |
| 349 | * |
| 350 | * NOTE! we have to be careful in regards to creating threads for other cpus |
| 351 | * if SMP has not yet been activated. |
| 352 | */ |
| 353 | #ifdef SMP |
| 354 | |
| 355 | static void |
| 356 | lwkt_init_thread_remote(void *arg) |
| 357 | { |
| 358 | thread_t td = arg; |
| 359 | |
| 360 | /* |
| 361 | * Protected by critical section held by IPI dispatch |
| 362 | */ |
| 363 | TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); |
| 364 | } |
| 365 | |
| 366 | #endif |
| 367 | |
| 368 | void |
| 369 | lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, |
| 370 | struct globaldata *gd) |
| 371 | { |
| 372 | globaldata_t mygd = mycpu; |
| 373 | |
| 374 | bzero(td, sizeof(struct thread)); |
| 375 | td->td_kstack = stack; |
| 376 | td->td_kstack_size = stksize; |
| 377 | td->td_flags = flags; |
| 378 | td->td_gd = gd; |
| 379 | td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; |
| 380 | #ifdef SMP |
| 381 | if ((flags & TDF_MPSAFE) == 0) |
| 382 | td->td_mpcount = 1; |
| 383 | #endif |
| 384 | if (lwkt_use_spin_port) |
| 385 | lwkt_initport_spin(&td->td_msgport); |
| 386 | else |
| 387 | lwkt_initport_thread(&td->td_msgport, td); |
| 388 | pmap_init_thread(td); |
| 389 | #ifdef SMP |
| 390 | /* |
| 391 | * Normally initializing a thread for a remote cpu requires sending an |
| 392 | * IPI. However, the idlethread is setup before the other cpus are |
| 393 | * activated so we have to treat it as a special case. XXX manipulation |
| 394 | * of gd_tdallq requires the BGL. |
| 395 | */ |
| 396 | if (gd == mygd || td == &gd->gd_idlethread) { |
| 397 | crit_enter_gd(mygd); |
| 398 | TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); |
| 399 | crit_exit_gd(mygd); |
| 400 | } else { |
| 401 | lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); |
| 402 | } |
| 403 | #else |
| 404 | crit_enter_gd(mygd); |
| 405 | TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); |
| 406 | crit_exit_gd(mygd); |
| 407 | #endif |
| 408 | } |
| 409 | |
| 410 | void |
| 411 | lwkt_set_comm(thread_t td, const char *ctl, ...) |
| 412 | { |
| 413 | __va_list va; |
| 414 | |
| 415 | __va_start(va, ctl); |
| 416 | kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); |
| 417 | __va_end(va); |
| 418 | } |
| 419 | |
| 420 | void |
| 421 | lwkt_hold(thread_t td) |
| 422 | { |
| 423 | ++td->td_refs; |
| 424 | } |
| 425 | |
| 426 | void |
| 427 | lwkt_rele(thread_t td) |
| 428 | { |
| 429 | KKASSERT(td->td_refs > 0); |
| 430 | --td->td_refs; |
| 431 | } |
| 432 | |
| 433 | void |
| 434 | lwkt_wait_free(thread_t td) |
| 435 | { |
| 436 | while (td->td_refs) |
| 437 | tsleep(td, 0, "tdreap", hz); |
| 438 | } |
| 439 | |
| 440 | void |
| 441 | lwkt_free_thread(thread_t td) |
| 442 | { |
| 443 | KASSERT((td->td_flags & TDF_RUNNING) == 0, |
| 444 | ("lwkt_free_thread: did not exit! %p", td)); |
| 445 | |
| 446 | if (td->td_flags & TDF_ALLOCATED_THREAD) { |
| 447 | objcache_put(thread_cache, td); |
| 448 | } else if (td->td_flags & TDF_ALLOCATED_STACK) { |
| 449 | /* client-allocated struct with internally allocated stack */ |
| 450 | KASSERT(td->td_kstack && td->td_kstack_size > 0, |
| 451 | ("lwkt_free_thread: corrupted stack")); |
| 452 | kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); |
| 453 | td->td_kstack = NULL; |
| 454 | td->td_kstack_size = 0; |
| 455 | } |
| 456 | } |
| 457 | |
| 458 | |
| 459 | /* |
| 460 | * Switch to the next runnable lwkt. If no LWKTs are runnable then |
| 461 | * switch to the idlethread. Switching must occur within a critical |
| 462 | * section to avoid races with the scheduling queue. |
| 463 | * |
| 464 | * We always have full control over our cpu's run queue. Other cpus |
| 465 | * that wish to manipulate our queue must use the cpu_*msg() calls to |
| 466 | * talk to our cpu, so a critical section is all that is needed and |
| 467 | * the result is very, very fast thread switching. |
| 468 | * |
| 469 | * The LWKT scheduler uses a fixed priority model and round-robins at |
| 470 | * each priority level. User process scheduling is a totally |
| 471 | * different beast and LWKT priorities should not be confused with |
| 472 | * user process priorities. |
| 473 | * |
| 474 | * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() |
| 475 | * cleans it up. Note that the td_switch() function cannot do anything that |
| 476 | * requires the MP lock since the MP lock will have already been setup for |
| 477 | * the target thread (not the current thread). It's nice to have a scheduler |
| 478 | * that does not need the MP lock to work because it allows us to do some |
| 479 | * really cool high-performance MP lock optimizations. |
| 480 | * |
| 481 | * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() |
| 482 | * is not called by the current thread in the preemption case, only when |
| 483 | * the preempting thread blocks (in order to return to the original thread). |
| 484 | */ |
| 485 | void |
| 486 | lwkt_switch(void) |
| 487 | { |
| 488 | globaldata_t gd = mycpu; |
| 489 | thread_t td = gd->gd_curthread; |
| 490 | thread_t ntd; |
| 491 | #ifdef SMP |
| 492 | int mpheld; |
| 493 | #endif |
| 494 | |
| 495 | /* |
| 496 | * Switching from within a 'fast' (non thread switched) interrupt or IPI |
| 497 | * is illegal. However, we may have to do it anyway if we hit a fatal |
| 498 | * kernel trap or we have paniced. |
| 499 | * |
| 500 | * If this case occurs save and restore the interrupt nesting level. |
| 501 | */ |
| 502 | if (gd->gd_intr_nesting_level) { |
| 503 | int savegdnest; |
| 504 | int savegdtrap; |
| 505 | |
| 506 | if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { |
| 507 | panic("lwkt_switch: cannot switch from within " |
| 508 | "a fast interrupt, yet, td %p\n", td); |
| 509 | } else { |
| 510 | savegdnest = gd->gd_intr_nesting_level; |
| 511 | savegdtrap = gd->gd_trap_nesting_level; |
| 512 | gd->gd_intr_nesting_level = 0; |
| 513 | gd->gd_trap_nesting_level = 0; |
| 514 | if ((td->td_flags & TDF_PANICWARN) == 0) { |
| 515 | td->td_flags |= TDF_PANICWARN; |
| 516 | kprintf("Warning: thread switch from interrupt or IPI, " |
| 517 | "thread %p (%s)\n", td, td->td_comm); |
| 518 | print_backtrace(); |
| 519 | } |
| 520 | lwkt_switch(); |
| 521 | gd->gd_intr_nesting_level = savegdnest; |
| 522 | gd->gd_trap_nesting_level = savegdtrap; |
| 523 | return; |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | /* |
| 528 | * Passive release (used to transition from user to kernel mode |
| 529 | * when we block or switch rather then when we enter the kernel). |
| 530 | * This function is NOT called if we are switching into a preemption |
| 531 | * or returning from a preemption. Typically this causes us to lose |
| 532 | * our current process designation (if we have one) and become a true |
| 533 | * LWKT thread, and may also hand the current process designation to |
| 534 | * another process and schedule thread. |
| 535 | */ |
| 536 | if (td->td_release) |
| 537 | td->td_release(td); |
| 538 | |
| 539 | crit_enter_gd(gd); |
| 540 | if (td->td_toks) |
| 541 | lwkt_relalltokens(td); |
| 542 | |
| 543 | /* |
| 544 | * We had better not be holding any spin locks, but don't get into an |
| 545 | * endless panic loop. |
| 546 | */ |
| 547 | KASSERT(gd->gd_spinlock_rd == NULL || panicstr != NULL, |
| 548 | ("lwkt_switch: still holding a shared spinlock %p!", |
| 549 | gd->gd_spinlock_rd)); |
| 550 | KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL, |
| 551 | ("lwkt_switch: still holding %d exclusive spinlocks!", |
| 552 | gd->gd_spinlocks_wr)); |
| 553 | |
| 554 | |
| 555 | #ifdef SMP |
| 556 | /* |
| 557 | * td_mpcount cannot be used to determine if we currently hold the |
| 558 | * MP lock because get_mplock() will increment it prior to attempting |
| 559 | * to get the lock, and switch out if it can't. Our ownership of |
| 560 | * the actual lock will remain stable while we are in a critical section |
| 561 | * (but, of course, another cpu may own or release the lock so the |
| 562 | * actual value of mp_lock is not stable). |
| 563 | */ |
| 564 | mpheld = MP_LOCK_HELD(); |
| 565 | #ifdef INVARIANTS |
| 566 | if (td->td_cscount) { |
| 567 | kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", |
| 568 | td); |
| 569 | if (panic_on_cscount) |
| 570 | panic("switching while mastering cpusync"); |
| 571 | } |
| 572 | #endif |
| 573 | #endif |
| 574 | if ((ntd = td->td_preempted) != NULL) { |
| 575 | /* |
| 576 | * We had preempted another thread on this cpu, resume the preempted |
| 577 | * thread. This occurs transparently, whether the preempted thread |
| 578 | * was scheduled or not (it may have been preempted after descheduling |
| 579 | * itself). |
| 580 | * |
| 581 | * We have to setup the MP lock for the original thread after backing |
| 582 | * out the adjustment that was made to curthread when the original |
| 583 | * was preempted. |
| 584 | */ |
| 585 | KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); |
| 586 | #ifdef SMP |
| 587 | if (ntd->td_mpcount && mpheld == 0) { |
| 588 | panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", |
| 589 | td, ntd, td->td_mpcount, ntd->td_mpcount); |
| 590 | } |
| 591 | if (ntd->td_mpcount) { |
| 592 | td->td_mpcount -= ntd->td_mpcount; |
| 593 | KKASSERT(td->td_mpcount >= 0); |
| 594 | } |
| 595 | #endif |
| 596 | ntd->td_flags |= TDF_PREEMPT_DONE; |
| 597 | |
| 598 | /* |
| 599 | * The interrupt may have woken a thread up, we need to properly |
| 600 | * set the reschedule flag if the originally interrupted thread is |
| 601 | * at a lower priority. |
| 602 | */ |
| 603 | if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) |
| 604 | need_lwkt_resched(); |
| 605 | /* YYY release mp lock on switchback if original doesn't need it */ |
| 606 | } else { |
| 607 | /* |
| 608 | * Priority queue / round-robin at each priority. Note that user |
| 609 | * processes run at a fixed, low priority and the user process |
| 610 | * scheduler deals with interactions between user processes |
| 611 | * by scheduling and descheduling them from the LWKT queue as |
| 612 | * necessary. |
| 613 | * |
| 614 | * We have to adjust the MP lock for the target thread. If we |
| 615 | * need the MP lock and cannot obtain it we try to locate a |
| 616 | * thread that does not need the MP lock. If we cannot, we spin |
| 617 | * instead of HLT. |
| 618 | * |
| 619 | * A similar issue exists for the tokens held by the target thread. |
| 620 | * If we cannot obtain ownership of the tokens we cannot immediately |
| 621 | * schedule the thread. |
| 622 | */ |
| 623 | |
| 624 | /* |
| 625 | * If an LWKT reschedule was requested, well that is what we are |
| 626 | * doing now so clear it. |
| 627 | */ |
| 628 | clear_lwkt_resched(); |
| 629 | again: |
| 630 | if (gd->gd_runqmask) { |
| 631 | int nq = bsrl(gd->gd_runqmask); |
| 632 | if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { |
| 633 | gd->gd_runqmask &= ~(1 << nq); |
| 634 | goto again; |
| 635 | } |
| 636 | #ifdef SMP |
| 637 | /* |
| 638 | * THREAD SELECTION FOR AN SMP MACHINE BUILD |
| 639 | * |
| 640 | * If the target needs the MP lock and we couldn't get it, |
| 641 | * or if the target is holding tokens and we could not |
| 642 | * gain ownership of the tokens, continue looking for a |
| 643 | * thread to schedule and spin instead of HLT if we can't. |
| 644 | * |
| 645 | * NOTE: the mpheld variable invalid after this conditional, it |
| 646 | * can change due to both cpu_try_mplock() returning success |
| 647 | * AND interactions in lwkt_getalltokens() due to the fact that |
| 648 | * we are trying to check the mpcount of a thread other then |
| 649 | * the current thread. Because of this, if the current thread |
| 650 | * is not holding td_mpcount, an IPI indirectly run via |
| 651 | * lwkt_getalltokens() can obtain and release the MP lock and |
| 652 | * cause the core MP lock to be released. |
| 653 | */ |
| 654 | if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || |
| 655 | (ntd->td_toks && lwkt_getalltokens(ntd) == 0) |
| 656 | ) { |
| 657 | u_int32_t rqmask = gd->gd_runqmask; |
| 658 | |
| 659 | mpheld = MP_LOCK_HELD(); |
| 660 | ntd = NULL; |
| 661 | while (rqmask) { |
| 662 | TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { |
| 663 | if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) { |
| 664 | /* spinning due to MP lock being held */ |
| 665 | #ifdef INVARIANTS |
| 666 | ++mplock_contention_count; |
| 667 | #endif |
| 668 | /* mplock still not held, 'mpheld' still valid */ |
| 669 | continue; |
| 670 | } |
| 671 | |
| 672 | /* |
| 673 | * mpheld state invalid after getalltokens call returns |
| 674 | * failure, but the variable is only needed for |
| 675 | * the loop. |
| 676 | */ |
| 677 | if (ntd->td_toks && !lwkt_getalltokens(ntd)) { |
| 678 | /* spinning due to token contention */ |
| 679 | #ifdef INVARIANTS |
| 680 | ++token_contention_count; |
| 681 | #endif |
| 682 | mpheld = MP_LOCK_HELD(); |
| 683 | continue; |
| 684 | } |
| 685 | break; |
| 686 | } |
| 687 | if (ntd) |
| 688 | break; |
| 689 | rqmask &= ~(1 << nq); |
| 690 | nq = bsrl(rqmask); |
| 691 | |
| 692 | /* |
| 693 | * We have two choices. We can either refuse to run a |
| 694 | * user thread when a kernel thread needs the MP lock |
| 695 | * but could not get it, or we can allow it to run but |
| 696 | * then expect an IPI (hopefully) later on to force a |
| 697 | * reschedule when the MP lock might become available. |
| 698 | */ |
| 699 | if (nq < TDPRI_KERN_LPSCHED) { |
| 700 | if (chain_mplock == 0) |
| 701 | break; |
| 702 | atomic_set_int(&mp_lock_contention_mask, |
| 703 | gd->gd_cpumask); |
| 704 | /* continue loop, allow user threads to be scheduled */ |
| 705 | } |
| 706 | } |
| 707 | if (ntd == NULL) { |
| 708 | cpu_mplock_contested(); |
| 709 | ntd = &gd->gd_idlethread; |
| 710 | ntd->td_flags |= TDF_IDLE_NOHLT; |
| 711 | goto using_idle_thread; |
| 712 | } else { |
| 713 | ++gd->gd_cnt.v_swtch; |
| 714 | TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); |
| 715 | TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); |
| 716 | } |
| 717 | } else { |
| 718 | if (ntd->td_mpcount) |
| 719 | ++mplock_countx; |
| 720 | ++gd->gd_cnt.v_swtch; |
| 721 | TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); |
| 722 | TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); |
| 723 | } |
| 724 | #else |
| 725 | /* |
| 726 | * THREAD SELECTION FOR A UP MACHINE BUILD. We don't have to |
| 727 | * worry about tokens or the BGL. However, we still have |
| 728 | * to call lwkt_getalltokens() in order to properly detect |
| 729 | * stale tokens. This call cannot fail for a UP build! |
| 730 | */ |
| 731 | lwkt_getalltokens(ntd); |
| 732 | ++gd->gd_cnt.v_swtch; |
| 733 | TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); |
| 734 | TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); |
| 735 | #endif |
| 736 | } else { |
| 737 | /* |
| 738 | * We have nothing to run but only let the idle loop halt |
| 739 | * the cpu if there are no pending interrupts. |
| 740 | */ |
| 741 | ntd = &gd->gd_idlethread; |
| 742 | if (gd->gd_reqflags & RQF_IDLECHECK_MASK) |
| 743 | ntd->td_flags |= TDF_IDLE_NOHLT; |
| 744 | #ifdef SMP |
| 745 | using_idle_thread: |
| 746 | /* |
| 747 | * The idle thread should not be holding the MP lock unless we |
| 748 | * are trapping in the kernel or in a panic. Since we select the |
| 749 | * idle thread unconditionally when no other thread is available, |
| 750 | * if the MP lock is desired during a panic or kernel trap, we |
| 751 | * have to loop in the scheduler until we get it. |
| 752 | */ |
| 753 | if (ntd->td_mpcount) { |
| 754 | mpheld = MP_LOCK_HELD(); |
| 755 | if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { |
| 756 | panic("Idle thread %p was holding the BGL!", ntd); |
| 757 | } else if (mpheld == 0) { |
| 758 | cpu_mplock_contested(); |
| 759 | goto again; |
| 760 | } |
| 761 | } |
| 762 | #endif |
| 763 | } |
| 764 | } |
| 765 | KASSERT(ntd->td_pri >= TDPRI_CRIT, |
| 766 | ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); |
| 767 | |
| 768 | /* |
| 769 | * Do the actual switch. If the new target does not need the MP lock |
| 770 | * and we are holding it, release the MP lock. If the new target requires |
| 771 | * the MP lock we have already acquired it for the target. |
| 772 | */ |
| 773 | #ifdef SMP |
| 774 | if (ntd->td_mpcount == 0 ) { |
| 775 | if (MP_LOCK_HELD()) |
| 776 | cpu_rel_mplock(); |
| 777 | } else { |
| 778 | ASSERT_MP_LOCK_HELD(ntd); |
| 779 | } |
| 780 | #endif |
| 781 | if (td != ntd) { |
| 782 | ++switch_count; |
| 783 | #ifdef __amd64__ |
| 784 | KKASSERT(jg_tos_ok(ntd)); |
| 785 | #endif |
| 786 | KTR_LOG(ctxsw_sw, td, ntd); |
| 787 | td->td_switch(ntd); |
| 788 | } |
| 789 | /* NOTE: current cpu may have changed after switch */ |
| 790 | crit_exit_quick(td); |
| 791 | } |
| 792 | |
| 793 | /* |
| 794 | * Request that the target thread preempt the current thread. Preemption |
| 795 | * only works under a specific set of conditions: |
| 796 | * |
| 797 | * - We are not preempting ourselves |
| 798 | * - The target thread is owned by the current cpu |
| 799 | * - We are not currently being preempted |
| 800 | * - The target is not currently being preempted |
| 801 | * - We are not holding any spin locks |
| 802 | * - The target thread is not holding any tokens |
| 803 | * - We are able to satisfy the target's MP lock requirements (if any). |
| 804 | * |
| 805 | * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically |
| 806 | * this is called via lwkt_schedule() through the td_preemptable callback. |
| 807 | * critpri is the managed critical priority that we should ignore in order |
| 808 | * to determine whether preemption is possible (aka usually just the crit |
| 809 | * priority of lwkt_schedule() itself). |
| 810 | * |
| 811 | * XXX at the moment we run the target thread in a critical section during |
| 812 | * the preemption in order to prevent the target from taking interrupts |
| 813 | * that *WE* can't. Preemption is strictly limited to interrupt threads |
| 814 | * and interrupt-like threads, outside of a critical section, and the |
| 815 | * preempted source thread will be resumed the instant the target blocks |
| 816 | * whether or not the source is scheduled (i.e. preemption is supposed to |
| 817 | * be as transparent as possible). |
| 818 | * |
| 819 | * The target thread inherits our MP count (added to its own) for the |
| 820 | * duration of the preemption in order to preserve the atomicy of the |
| 821 | * MP lock during the preemption. Therefore, any preempting targets must be |
| 822 | * careful in regards to MP assertions. Note that the MP count may be |
| 823 | * out of sync with the physical mp_lock, but we do not have to preserve |
| 824 | * the original ownership of the lock if it was out of synch (that is, we |
| 825 | * can leave it synchronized on return). |
| 826 | */ |
| 827 | void |
| 828 | lwkt_preempt(thread_t ntd, int critpri) |
| 829 | { |
| 830 | struct globaldata *gd = mycpu; |
| 831 | thread_t td; |
| 832 | #ifdef SMP |
| 833 | int mpheld; |
| 834 | int savecnt; |
| 835 | #endif |
| 836 | |
| 837 | /* |
| 838 | * The caller has put us in a critical section. We can only preempt |
| 839 | * if the caller of the caller was not in a critical section (basically |
| 840 | * a local interrupt), as determined by the 'critpri' parameter. We |
| 841 | * also can't preempt if the caller is holding any spinlocks (even if |
| 842 | * he isn't in a critical section). This also handles the tokens test. |
| 843 | * |
| 844 | * YYY The target thread must be in a critical section (else it must |
| 845 | * inherit our critical section? I dunno yet). |
| 846 | * |
| 847 | * Set need_lwkt_resched() unconditionally for now YYY. |
| 848 | */ |
| 849 | KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); |
| 850 | |
| 851 | td = gd->gd_curthread; |
| 852 | if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { |
| 853 | ++preempt_miss; |
| 854 | return; |
| 855 | } |
| 856 | if ((td->td_pri & ~TDPRI_MASK) > critpri) { |
| 857 | ++preempt_miss; |
| 858 | need_lwkt_resched(); |
| 859 | return; |
| 860 | } |
| 861 | #ifdef SMP |
| 862 | if (ntd->td_gd != gd) { |
| 863 | ++preempt_miss; |
| 864 | need_lwkt_resched(); |
| 865 | return; |
| 866 | } |
| 867 | #endif |
| 868 | /* |
| 869 | * Take the easy way out and do not preempt if we are holding |
| 870 | * any spinlocks. We could test whether the thread(s) being |
| 871 | * preempted interlock against the target thread's tokens and whether |
| 872 | * we can get all the target thread's tokens, but this situation |
| 873 | * should not occur very often so its easier to simply not preempt. |
| 874 | * Also, plain spinlocks are impossible to figure out at this point so |
| 875 | * just don't preempt. |
| 876 | * |
| 877 | * Do not try to preempt if the target thread is holding any tokens. |
| 878 | * We could try to acquire the tokens but this case is so rare there |
| 879 | * is no need to support it. |
| 880 | */ |
| 881 | if (gd->gd_spinlock_rd || gd->gd_spinlocks_wr) { |
| 882 | ++preempt_miss; |
| 883 | need_lwkt_resched(); |
| 884 | return; |
| 885 | } |
| 886 | if (ntd->td_toks) { |
| 887 | ++preempt_miss; |
| 888 | need_lwkt_resched(); |
| 889 | return; |
| 890 | } |
| 891 | if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { |
| 892 | ++preempt_weird; |
| 893 | need_lwkt_resched(); |
| 894 | return; |
| 895 | } |
| 896 | if (ntd->td_preempted) { |
| 897 | ++preempt_hit; |
| 898 | need_lwkt_resched(); |
| 899 | return; |
| 900 | } |
| 901 | #ifdef SMP |
| 902 | /* |
| 903 | * note: an interrupt might have occured just as we were transitioning |
| 904 | * to or from the MP lock. In this case td_mpcount will be pre-disposed |
| 905 | * (non-zero) but not actually synchronized with the actual state of the |
| 906 | * lock. We can use it to imply an MP lock requirement for the |
| 907 | * preemption but we cannot use it to test whether we hold the MP lock |
| 908 | * or not. |
| 909 | */ |
| 910 | savecnt = td->td_mpcount; |
| 911 | mpheld = MP_LOCK_HELD(); |
| 912 | ntd->td_mpcount += td->td_mpcount; |
| 913 | if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { |
| 914 | ntd->td_mpcount -= td->td_mpcount; |
| 915 | ++preempt_miss; |
| 916 | need_lwkt_resched(); |
| 917 | return; |
| 918 | } |
| 919 | #endif |
| 920 | |
| 921 | /* |
| 922 | * Since we are able to preempt the current thread, there is no need to |
| 923 | * call need_lwkt_resched(). |
| 924 | */ |
| 925 | ++preempt_hit; |
| 926 | ntd->td_preempted = td; |
| 927 | td->td_flags |= TDF_PREEMPT_LOCK; |
| 928 | KTR_LOG(ctxsw_pre, td, ntd); |
| 929 | td->td_switch(ntd); |
| 930 | |
| 931 | KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); |
| 932 | #ifdef SMP |
| 933 | KKASSERT(savecnt == td->td_mpcount); |
| 934 | mpheld = MP_LOCK_HELD(); |
| 935 | if (mpheld && td->td_mpcount == 0) |
| 936 | cpu_rel_mplock(); |
| 937 | else if (mpheld == 0 && td->td_mpcount) |
| 938 | panic("lwkt_preempt(): MP lock was not held through"); |
| 939 | #endif |
| 940 | ntd->td_preempted = NULL; |
| 941 | td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); |
| 942 | } |
| 943 | |
| 944 | /* |
| 945 | * Conditionally call splz() if gd_reqflags indicates work is pending. |
| 946 | * |
| 947 | * td_nest_count prevents deep nesting via splz() or doreti() which |
| 948 | * might otherwise blow out the kernel stack. Note that except for |
| 949 | * this special case, we MUST call splz() here to handle any |
| 950 | * pending ints, particularly after we switch, or we might accidently |
| 951 | * halt the cpu with interrupts pending. |
| 952 | * |
| 953 | * (self contained on a per cpu basis) |
| 954 | */ |
| 955 | void |
| 956 | splz_check(void) |
| 957 | { |
| 958 | globaldata_t gd = mycpu; |
| 959 | thread_t td = gd->gd_curthread; |
| 960 | |
| 961 | if (gd->gd_reqflags && td->td_nest_count < 2) |
| 962 | splz(); |
| 963 | } |
| 964 | |
| 965 | /* |
| 966 | * This implements a normal yield which will yield to equal priority |
| 967 | * threads as well as higher priority threads. Note that gd_reqflags |
| 968 | * tests will be handled by the crit_exit() call in lwkt_switch(). |
| 969 | * |
| 970 | * (self contained on a per cpu basis) |
| 971 | */ |
| 972 | void |
| 973 | lwkt_yield(void) |
| 974 | { |
| 975 | lwkt_schedule_self(curthread); |
| 976 | lwkt_switch(); |
| 977 | } |
| 978 | |
| 979 | /* |
| 980 | * This function is used along with the lwkt_passive_recover() inline |
| 981 | * by the trap code to negotiate a passive release of the current |
| 982 | * process/lwp designation with the user scheduler. |
| 983 | */ |
| 984 | void |
| 985 | lwkt_passive_release(struct thread *td) |
| 986 | { |
| 987 | struct lwp *lp = td->td_lwp; |
| 988 | |
| 989 | td->td_release = NULL; |
| 990 | lwkt_setpri_self(TDPRI_KERN_USER); |
| 991 | lp->lwp_proc->p_usched->release_curproc(lp); |
| 992 | } |
| 993 | |
| 994 | /* |
| 995 | * Make a kernel thread act as if it were in user mode with regards |
| 996 | * to scheduling, to avoid becoming cpu-bound in the kernel. Kernel |
| 997 | * loops which may be potentially cpu-bound can call lwkt_user_yield(). |
| 998 | * |
| 999 | * The lwkt_user_yield() function is designed to have very low overhead |
| 1000 | * if no yield is determined to be needed. |
| 1001 | */ |
| 1002 | void |
| 1003 | lwkt_user_yield(void) |
| 1004 | { |
| 1005 | thread_t td = curthread; |
| 1006 | struct lwp *lp = td->td_lwp; |
| 1007 | |
| 1008 | #ifdef SMP |
| 1009 | /* |
| 1010 | * XXX SEVERE TEMPORARY HACK. A cpu-bound operation running in the |
| 1011 | * kernel can prevent other cpus from servicing interrupt threads |
| 1012 | * which still require the MP lock (which is a lot of them). This |
| 1013 | * has a chaining effect since if the interrupt is blocked, so is |
| 1014 | * the event, so normal scheduling will not pick up on the problem. |
| 1015 | */ |
| 1016 | if (mplock_countx && td->td_mpcount) { |
| 1017 | int savecnt = td->td_mpcount; |
| 1018 | |
| 1019 | td->td_mpcount = 1; |
| 1020 | mplock_countx = 0; |
| 1021 | rel_mplock(); |
| 1022 | DELAY(bgl_yield); |
| 1023 | get_mplock(); |
| 1024 | td->td_mpcount = savecnt; |
| 1025 | } |
| 1026 | #endif |
| 1027 | |
| 1028 | /* |
| 1029 | * Another kernel thread wants the cpu |
| 1030 | */ |
| 1031 | if (lwkt_resched_wanted()) |
| 1032 | lwkt_switch(); |
| 1033 | |
| 1034 | /* |
| 1035 | * If the user scheduler has asynchronously determined that the current |
| 1036 | * process (when running in user mode) needs to lose the cpu then make |
| 1037 | * sure we are released. |
| 1038 | */ |
| 1039 | if (user_resched_wanted()) { |
| 1040 | if (td->td_release) |
| 1041 | td->td_release(td); |
| 1042 | } |
| 1043 | |
| 1044 | /* |
| 1045 | * If we are released reduce our priority |
| 1046 | */ |
| 1047 | if (td->td_release == NULL) { |
| 1048 | if (lwkt_check_resched(td) > 0) |
| 1049 | lwkt_switch(); |
| 1050 | if (lp) { |
| 1051 | lp->lwp_proc->p_usched->acquire_curproc(lp); |
| 1052 | td->td_release = lwkt_passive_release; |
| 1053 | lwkt_setpri_self(TDPRI_USER_NORM); |
| 1054 | } |
| 1055 | } |
| 1056 | } |
| 1057 | |
| 1058 | /* |
| 1059 | * Return 0 if no runnable threads are pending at the same or higher |
| 1060 | * priority as the passed thread. |
| 1061 | * |
| 1062 | * Return 1 if runnable threads are pending at the same priority. |
| 1063 | * |
| 1064 | * Return 2 if runnable threads are pending at a higher priority. |
| 1065 | */ |
| 1066 | int |
| 1067 | lwkt_check_resched(thread_t td) |
| 1068 | { |
| 1069 | int pri = td->td_pri & TDPRI_MASK; |
| 1070 | |
| 1071 | if (td->td_gd->gd_runqmask > (2 << pri) - 1) |
| 1072 | return(2); |
| 1073 | if (TAILQ_NEXT(td, td_threadq)) |
| 1074 | return(1); |
| 1075 | return(0); |
| 1076 | } |
| 1077 | |
| 1078 | /* |
| 1079 | * Generic schedule. Possibly schedule threads belonging to other cpus and |
| 1080 | * deal with threads that might be blocked on a wait queue. |
| 1081 | * |
| 1082 | * We have a little helper inline function which does additional work after |
| 1083 | * the thread has been enqueued, including dealing with preemption and |
| 1084 | * setting need_lwkt_resched() (which prevents the kernel from returning |
| 1085 | * to userland until it has processed higher priority threads). |
| 1086 | * |
| 1087 | * It is possible for this routine to be called after a failed _enqueue |
| 1088 | * (due to the target thread migrating, sleeping, or otherwise blocked). |
| 1089 | * We have to check that the thread is actually on the run queue! |
| 1090 | * |
| 1091 | * reschedok is an optimized constant propagated from lwkt_schedule() or |
| 1092 | * lwkt_schedule_noresched(). By default it is non-zero, causing a |
| 1093 | * reschedule to be requested if the target thread has a higher priority. |
| 1094 | * The port messaging code will set MSG_NORESCHED and cause reschedok to |
| 1095 | * be 0, prevented undesired reschedules. |
| 1096 | */ |
| 1097 | static __inline |
| 1098 | void |
| 1099 | _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri, int reschedok) |
| 1100 | { |
| 1101 | thread_t otd; |
| 1102 | |
| 1103 | if (ntd->td_flags & TDF_RUNQ) { |
| 1104 | if (ntd->td_preemptable && reschedok) { |
| 1105 | ntd->td_preemptable(ntd, cpri); /* YYY +token */ |
| 1106 | } else if (reschedok) { |
| 1107 | otd = curthread; |
| 1108 | if ((ntd->td_pri & TDPRI_MASK) > (otd->td_pri & TDPRI_MASK)) |
| 1109 | need_lwkt_resched(); |
| 1110 | } |
| 1111 | } |
| 1112 | } |
| 1113 | |
| 1114 | static __inline |
| 1115 | void |
| 1116 | _lwkt_schedule(thread_t td, int reschedok) |
| 1117 | { |
| 1118 | globaldata_t mygd = mycpu; |
| 1119 | |
| 1120 | KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); |
| 1121 | crit_enter_gd(mygd); |
| 1122 | KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); |
| 1123 | if (td == mygd->gd_curthread) { |
| 1124 | _lwkt_enqueue(td); |
| 1125 | } else { |
| 1126 | /* |
| 1127 | * If we own the thread, there is no race (since we are in a |
| 1128 | * critical section). If we do not own the thread there might |
| 1129 | * be a race but the target cpu will deal with it. |
| 1130 | */ |
| 1131 | #ifdef SMP |
| 1132 | if (td->td_gd == mygd) { |
| 1133 | _lwkt_enqueue(td); |
| 1134 | _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok); |
| 1135 | } else { |
| 1136 | lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); |
| 1137 | } |
| 1138 | #else |
| 1139 | _lwkt_enqueue(td); |
| 1140 | _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok); |
| 1141 | #endif |
| 1142 | } |
| 1143 | crit_exit_gd(mygd); |
| 1144 | } |
| 1145 | |
| 1146 | void |
| 1147 | lwkt_schedule(thread_t td) |
| 1148 | { |
| 1149 | _lwkt_schedule(td, 1); |
| 1150 | } |
| 1151 | |
| 1152 | void |
| 1153 | lwkt_schedule_noresched(thread_t td) |
| 1154 | { |
| 1155 | _lwkt_schedule(td, 0); |
| 1156 | } |
| 1157 | |
| 1158 | #ifdef SMP |
| 1159 | |
| 1160 | /* |
| 1161 | * When scheduled remotely if frame != NULL the IPIQ is being |
| 1162 | * run via doreti or an interrupt then preemption can be allowed. |
| 1163 | * |
| 1164 | * To allow preemption we have to drop the critical section so only |
| 1165 | * one is present in _lwkt_schedule_post. |
| 1166 | */ |
| 1167 | static void |
| 1168 | lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) |
| 1169 | { |
| 1170 | thread_t td = curthread; |
| 1171 | thread_t ntd = arg; |
| 1172 | |
| 1173 | if (frame && ntd->td_preemptable) { |
| 1174 | crit_exit_noyield(td); |
| 1175 | _lwkt_schedule(ntd, 1); |
| 1176 | crit_enter_quick(td); |
| 1177 | } else { |
| 1178 | _lwkt_schedule(ntd, 1); |
| 1179 | } |
| 1180 | } |
| 1181 | |
| 1182 | /* |
| 1183 | * Thread migration using a 'Pull' method. The thread may or may not be |
| 1184 | * the current thread. It MUST be descheduled and in a stable state. |
| 1185 | * lwkt_giveaway() must be called on the cpu owning the thread. |
| 1186 | * |
| 1187 | * At any point after lwkt_giveaway() is called, the target cpu may |
| 1188 | * 'pull' the thread by calling lwkt_acquire(). |
| 1189 | * |
| 1190 | * We have to make sure the thread is not sitting on a per-cpu tsleep |
| 1191 | * queue or it will blow up when it moves to another cpu. |
| 1192 | * |
| 1193 | * MPSAFE - must be called under very specific conditions. |
| 1194 | */ |
| 1195 | void |
| 1196 | lwkt_giveaway(thread_t td) |
| 1197 | { |
| 1198 | globaldata_t gd = mycpu; |
| 1199 | |
| 1200 | crit_enter_gd(gd); |
| 1201 | if (td->td_flags & TDF_TSLEEPQ) |
| 1202 | tsleep_remove(td); |
| 1203 | KKASSERT(td->td_gd == gd); |
| 1204 | TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); |
| 1205 | td->td_flags |= TDF_MIGRATING; |
| 1206 | crit_exit_gd(gd); |
| 1207 | } |
| 1208 | |
| 1209 | void |
| 1210 | lwkt_acquire(thread_t td) |
| 1211 | { |
| 1212 | globaldata_t gd; |
| 1213 | globaldata_t mygd; |
| 1214 | |
| 1215 | KKASSERT(td->td_flags & TDF_MIGRATING); |
| 1216 | gd = td->td_gd; |
| 1217 | mygd = mycpu; |
| 1218 | if (gd != mycpu) { |
| 1219 | cpu_lfence(); |
| 1220 | KKASSERT((td->td_flags & TDF_RUNQ) == 0); |
| 1221 | crit_enter_gd(mygd); |
| 1222 | while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { |
| 1223 | #ifdef SMP |
| 1224 | lwkt_process_ipiq(); |
| 1225 | #endif |
| 1226 | cpu_lfence(); |
| 1227 | } |
| 1228 | td->td_gd = mygd; |
| 1229 | TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); |
| 1230 | td->td_flags &= ~TDF_MIGRATING; |
| 1231 | crit_exit_gd(mygd); |
| 1232 | } else { |
| 1233 | crit_enter_gd(mygd); |
| 1234 | TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); |
| 1235 | td->td_flags &= ~TDF_MIGRATING; |
| 1236 | crit_exit_gd(mygd); |
| 1237 | } |
| 1238 | } |
| 1239 | |
| 1240 | #endif |
| 1241 | |
| 1242 | /* |
| 1243 | * Generic deschedule. Descheduling threads other then your own should be |
| 1244 | * done only in carefully controlled circumstances. Descheduling is |
| 1245 | * asynchronous. |
| 1246 | * |
| 1247 | * This function may block if the cpu has run out of messages. |
| 1248 | */ |
| 1249 | void |
| 1250 | lwkt_deschedule(thread_t td) |
| 1251 | { |
| 1252 | crit_enter(); |
| 1253 | #ifdef SMP |
| 1254 | if (td == curthread) { |
| 1255 | _lwkt_dequeue(td); |
| 1256 | } else { |
| 1257 | if (td->td_gd == mycpu) { |
| 1258 | _lwkt_dequeue(td); |
| 1259 | } else { |
| 1260 | lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); |
| 1261 | } |
| 1262 | } |
| 1263 | #else |
| 1264 | _lwkt_dequeue(td); |
| 1265 | #endif |
| 1266 | crit_exit(); |
| 1267 | } |
| 1268 | |
| 1269 | /* |
| 1270 | * Set the target thread's priority. This routine does not automatically |
| 1271 | * switch to a higher priority thread, LWKT threads are not designed for |
| 1272 | * continuous priority changes. Yield if you want to switch. |
| 1273 | * |
| 1274 | * We have to retain the critical section count which uses the high bits |
| 1275 | * of the td_pri field. The specified priority may also indicate zero or |
| 1276 | * more critical sections by adding TDPRI_CRIT*N. |
| 1277 | * |
| 1278 | * Note that we requeue the thread whether it winds up on a different runq |
| 1279 | * or not. uio_yield() depends on this and the routine is not normally |
| 1280 | * called with the same priority otherwise. |
| 1281 | */ |
| 1282 | void |
| 1283 | lwkt_setpri(thread_t td, int pri) |
| 1284 | { |
| 1285 | KKASSERT(pri >= 0); |
| 1286 | KKASSERT(td->td_gd == mycpu); |
| 1287 | crit_enter(); |
| 1288 | if (td->td_flags & TDF_RUNQ) { |
| 1289 | _lwkt_dequeue(td); |
| 1290 | td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; |
| 1291 | _lwkt_enqueue(td); |
| 1292 | } else { |
| 1293 | td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; |
| 1294 | } |
| 1295 | crit_exit(); |
| 1296 | } |
| 1297 | |
| 1298 | void |
| 1299 | lwkt_setpri_self(int pri) |
| 1300 | { |
| 1301 | thread_t td = curthread; |
| 1302 | |
| 1303 | KKASSERT(pri >= 0 && pri <= TDPRI_MAX); |
| 1304 | crit_enter(); |
| 1305 | if (td->td_flags & TDF_RUNQ) { |
| 1306 | _lwkt_dequeue(td); |
| 1307 | td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; |
| 1308 | _lwkt_enqueue(td); |
| 1309 | } else { |
| 1310 | td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; |
| 1311 | } |
| 1312 | crit_exit(); |
| 1313 | } |
| 1314 | |
| 1315 | /* |
| 1316 | * Migrate the current thread to the specified cpu. |
| 1317 | * |
| 1318 | * This is accomplished by descheduling ourselves from the current cpu, |
| 1319 | * moving our thread to the tdallq of the target cpu, IPI messaging the |
| 1320 | * target cpu, and switching out. TDF_MIGRATING prevents scheduling |
| 1321 | * races while the thread is being migrated. |
| 1322 | * |
| 1323 | * We must be sure to remove ourselves from the current cpu's tsleepq |
| 1324 | * before potentially moving to another queue. The thread can be on |
| 1325 | * a tsleepq due to a left-over tsleep_interlock(). |
| 1326 | */ |
| 1327 | #ifdef SMP |
| 1328 | static void lwkt_setcpu_remote(void *arg); |
| 1329 | #endif |
| 1330 | |
| 1331 | void |
| 1332 | lwkt_setcpu_self(globaldata_t rgd) |
| 1333 | { |
| 1334 | #ifdef SMP |
| 1335 | thread_t td = curthread; |
| 1336 | |
| 1337 | if (td->td_gd != rgd) { |
| 1338 | crit_enter_quick(td); |
| 1339 | if (td->td_flags & TDF_TSLEEPQ) |
| 1340 | tsleep_remove(td); |
| 1341 | td->td_flags |= TDF_MIGRATING; |
| 1342 | lwkt_deschedule_self(td); |
| 1343 | TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); |
| 1344 | lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td); |
| 1345 | lwkt_switch(); |
| 1346 | /* we are now on the target cpu */ |
| 1347 | TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); |
| 1348 | crit_exit_quick(td); |
| 1349 | } |
| 1350 | #endif |
| 1351 | } |
| 1352 | |
| 1353 | void |
| 1354 | lwkt_migratecpu(int cpuid) |
| 1355 | { |
| 1356 | #ifdef SMP |
| 1357 | globaldata_t rgd; |
| 1358 | |
| 1359 | rgd = globaldata_find(cpuid); |
| 1360 | lwkt_setcpu_self(rgd); |
| 1361 | #endif |
| 1362 | } |
| 1363 | |
| 1364 | /* |
| 1365 | * Remote IPI for cpu migration (called while in a critical section so we |
| 1366 | * do not have to enter another one). The thread has already been moved to |
| 1367 | * our cpu's allq, but we must wait for the thread to be completely switched |
| 1368 | * out on the originating cpu before we schedule it on ours or the stack |
| 1369 | * state may be corrupt. We clear TDF_MIGRATING after flushing the GD |
| 1370 | * change to main memory. |
| 1371 | * |
| 1372 | * XXX The use of TDF_MIGRATING might not be sufficient to avoid races |
| 1373 | * against wakeups. It is best if this interface is used only when there |
| 1374 | * are no pending events that might try to schedule the thread. |
| 1375 | */ |
| 1376 | #ifdef SMP |
| 1377 | static void |
| 1378 | lwkt_setcpu_remote(void *arg) |
| 1379 | { |
| 1380 | thread_t td = arg; |
| 1381 | globaldata_t gd = mycpu; |
| 1382 | |
| 1383 | while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { |
| 1384 | #ifdef SMP |
| 1385 | lwkt_process_ipiq(); |
| 1386 | #endif |
| 1387 | cpu_lfence(); |
| 1388 | } |
| 1389 | td->td_gd = gd; |
| 1390 | cpu_sfence(); |
| 1391 | td->td_flags &= ~TDF_MIGRATING; |
| 1392 | KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); |
| 1393 | _lwkt_enqueue(td); |
| 1394 | } |
| 1395 | #endif |
| 1396 | |
| 1397 | struct lwp * |
| 1398 | lwkt_preempted_proc(void) |
| 1399 | { |
| 1400 | thread_t td = curthread; |
| 1401 | while (td->td_preempted) |
| 1402 | td = td->td_preempted; |
| 1403 | return(td->td_lwp); |
| 1404 | } |
| 1405 | |
| 1406 | /* |
| 1407 | * Create a kernel process/thread/whatever. It shares it's address space |
| 1408 | * with proc0 - ie: kernel only. |
| 1409 | * |
| 1410 | * NOTE! By default new threads are created with the MP lock held. A |
| 1411 | * thread which does not require the MP lock should release it by calling |
| 1412 | * rel_mplock() at the start of the new thread. |
| 1413 | */ |
| 1414 | int |
| 1415 | lwkt_create(void (*func)(void *), void *arg, |
| 1416 | struct thread **tdp, thread_t template, int tdflags, int cpu, |
| 1417 | const char *fmt, ...) |
| 1418 | { |
| 1419 | thread_t td; |
| 1420 | __va_list ap; |
| 1421 | |
| 1422 | td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, |
| 1423 | tdflags); |
| 1424 | if (tdp) |
| 1425 | *tdp = td; |
| 1426 | cpu_set_thread_handler(td, lwkt_exit, func, arg); |
| 1427 | |
| 1428 | /* |
| 1429 | * Set up arg0 for 'ps' etc |
| 1430 | */ |
| 1431 | __va_start(ap, fmt); |
| 1432 | kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); |
| 1433 | __va_end(ap); |
| 1434 | |
| 1435 | /* |
| 1436 | * Schedule the thread to run |
| 1437 | */ |
| 1438 | if ((td->td_flags & TDF_STOPREQ) == 0) |
| 1439 | lwkt_schedule(td); |
| 1440 | else |
| 1441 | td->td_flags &= ~TDF_STOPREQ; |
| 1442 | return 0; |
| 1443 | } |
| 1444 | |
| 1445 | /* |
| 1446 | * Destroy an LWKT thread. Warning! This function is not called when |
| 1447 | * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and |
| 1448 | * uses a different reaping mechanism. |
| 1449 | */ |
| 1450 | void |
| 1451 | lwkt_exit(void) |
| 1452 | { |
| 1453 | thread_t td = curthread; |
| 1454 | thread_t std; |
| 1455 | globaldata_t gd; |
| 1456 | |
| 1457 | if (td->td_flags & TDF_VERBOSE) |
| 1458 | kprintf("kthread %p %s has exited\n", td, td->td_comm); |
| 1459 | caps_exit(td); |
| 1460 | |
| 1461 | /* |
| 1462 | * Get us into a critical section to interlock gd_freetd and loop |
| 1463 | * until we can get it freed. |
| 1464 | * |
| 1465 | * We have to cache the current td in gd_freetd because objcache_put()ing |
| 1466 | * it would rip it out from under us while our thread is still active. |
| 1467 | */ |
| 1468 | gd = mycpu; |
| 1469 | crit_enter_quick(td); |
| 1470 | while ((std = gd->gd_freetd) != NULL) { |
| 1471 | gd->gd_freetd = NULL; |
| 1472 | objcache_put(thread_cache, std); |
| 1473 | } |
| 1474 | |
| 1475 | /* |
| 1476 | * Remove thread resources from kernel lists and deschedule us for |
| 1477 | * the last time. |
| 1478 | */ |
| 1479 | if (td->td_flags & TDF_TSLEEPQ) |
| 1480 | tsleep_remove(td); |
| 1481 | biosched_done(td); |
| 1482 | lwkt_deschedule_self(td); |
| 1483 | lwkt_remove_tdallq(td); |
| 1484 | if (td->td_flags & TDF_ALLOCATED_THREAD) |
| 1485 | gd->gd_freetd = td; |
| 1486 | cpu_thread_exit(); |
| 1487 | } |
| 1488 | |
| 1489 | void |
| 1490 | lwkt_remove_tdallq(thread_t td) |
| 1491 | { |
| 1492 | KKASSERT(td->td_gd == mycpu); |
| 1493 | TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); |
| 1494 | } |
| 1495 | |
| 1496 | void |
| 1497 | crit_panic(void) |
| 1498 | { |
| 1499 | thread_t td = curthread; |
| 1500 | int lpri = td->td_pri; |
| 1501 | |
| 1502 | td->td_pri = 0; |
| 1503 | panic("td_pri is/would-go negative! %p %d", td, lpri); |
| 1504 | } |
| 1505 | |
| 1506 | #ifdef SMP |
| 1507 | |
| 1508 | /* |
| 1509 | * Called from debugger/panic on cpus which have been stopped. We must still |
| 1510 | * process the IPIQ while stopped, even if we were stopped while in a critical |
| 1511 | * section (XXX). |
| 1512 | * |
| 1513 | * If we are dumping also try to process any pending interrupts. This may |
| 1514 | * or may not work depending on the state of the cpu at the point it was |
| 1515 | * stopped. |
| 1516 | */ |
| 1517 | void |
| 1518 | lwkt_smp_stopped(void) |
| 1519 | { |
| 1520 | globaldata_t gd = mycpu; |
| 1521 | |
| 1522 | crit_enter_gd(gd); |
| 1523 | if (dumping) { |
| 1524 | lwkt_process_ipiq(); |
| 1525 | splz(); |
| 1526 | } else { |
| 1527 | lwkt_process_ipiq(); |
| 1528 | } |
| 1529 | crit_exit_gd(gd); |
| 1530 | } |
| 1531 | |
| 1532 | /* |
| 1533 | * get_mplock() calls this routine if it is unable to obtain the MP lock. |
| 1534 | * get_mplock() has already incremented td_mpcount. We must block and |
| 1535 | * not return until giant is held. |
| 1536 | * |
| 1537 | * All we have to do is lwkt_switch() away. The LWKT scheduler will not |
| 1538 | * reschedule the thread until it can obtain the giant lock for it. |
| 1539 | */ |
| 1540 | void |
| 1541 | lwkt_mp_lock_contested(void) |
| 1542 | { |
| 1543 | ++mplock_countx; |
| 1544 | loggiant(beg); |
| 1545 | lwkt_switch(); |
| 1546 | loggiant(end); |
| 1547 | } |
| 1548 | |
| 1549 | /* |
| 1550 | * The rel_mplock() code will call this function after releasing the |
| 1551 | * last reference on the MP lock if mp_lock_contention_mask is non-zero. |
| 1552 | * |
| 1553 | * We then chain an IPI to a single other cpu potentially needing the |
| 1554 | * lock. This is a bit heuristical and we can wind up with IPIs flying |
| 1555 | * all over the place. |
| 1556 | */ |
| 1557 | static void lwkt_mp_lock_uncontested_remote(void *arg __unused); |
| 1558 | |
| 1559 | void |
| 1560 | lwkt_mp_lock_uncontested(void) |
| 1561 | { |
| 1562 | globaldata_t gd; |
| 1563 | globaldata_t dgd; |
| 1564 | cpumask_t mask; |
| 1565 | cpumask_t tmpmask; |
| 1566 | int cpuid; |
| 1567 | |
| 1568 | if (chain_mplock) { |
| 1569 | gd = mycpu; |
| 1570 | atomic_clear_int(&mp_lock_contention_mask, gd->gd_cpumask); |
| 1571 | mask = mp_lock_contention_mask; |
| 1572 | tmpmask = ~((1 << gd->gd_cpuid) - 1); |
| 1573 | |
| 1574 | if (mask) { |
| 1575 | if (mask & tmpmask) |
| 1576 | cpuid = bsfl(mask & tmpmask); |
| 1577 | else |
| 1578 | cpuid = bsfl(mask); |
| 1579 | atomic_clear_int(&mp_lock_contention_mask, 1 << cpuid); |
| 1580 | dgd = globaldata_find(cpuid); |
| 1581 | lwkt_send_ipiq(dgd, lwkt_mp_lock_uncontested_remote, NULL); |
| 1582 | } |
| 1583 | } |
| 1584 | } |
| 1585 | |
| 1586 | /* |
| 1587 | * The idea is for this IPI to interrupt a potentially lower priority |
| 1588 | * thread, such as a user thread, to allow the scheduler to reschedule |
| 1589 | * a higher priority kernel thread that needs the MP lock. |
| 1590 | * |
| 1591 | * For now we set the LWKT reschedule flag which generates an AST in |
| 1592 | * doreti, though theoretically it is also possible to possibly preempt |
| 1593 | * here if the underlying thread was operating in user mode. Nah. |
| 1594 | */ |
| 1595 | static void |
| 1596 | lwkt_mp_lock_uncontested_remote(void *arg __unused) |
| 1597 | { |
| 1598 | need_lwkt_resched(); |
| 1599 | } |
| 1600 | |
| 1601 | #endif |