kernel: Make SMP support default (and non-optional).
[dragonfly.git] / sys / kern / usched_bsd4.c
CommitLineData
38b25931 1/*
e28d8b15
MD
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
8 * and many others.
38b25931
MD
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
38b25931
MD
30 */
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/queue.h>
37#include <sys/proc.h>
38#include <sys/rtprio.h>
38b25931
MD
39#include <sys/uio.h>
40#include <sys/sysctl.h>
41#include <sys/resourcevar.h>
52eedfb5 42#include <sys/spinlock.h>
d6d39bc7 43#include <sys/cpu_topology.h>
52eedfb5
MD
44#include <sys/thread2.h>
45#include <sys/spinlock2.h>
684a93c4 46#include <sys/mplock2.h>
52eedfb5 47
d6d39bc7
MC
48#include <sys/ktr.h>
49
50#include <machine/cpu.h>
51#include <machine/smp.h>
52
38b25931
MD
53/*
54 * Priorities. Note that with 32 run queues per scheduler each queue
55 * represents four priority levels.
56 */
57
58#define MAXPRI 128
59#define PRIMASK (MAXPRI - 1)
60#define PRIBASE_REALTIME 0
61#define PRIBASE_NORMAL MAXPRI
62#define PRIBASE_IDLE (MAXPRI * 2)
63#define PRIBASE_THREAD (MAXPRI * 3)
64#define PRIBASE_NULL (MAXPRI * 4)
65
66#define NQS 32 /* 32 run queues. */
67#define PPQ (MAXPRI / NQS) /* priorities per queue */
52eedfb5 68#define PPQMASK (PPQ - 1)
38b25931
MD
69
70/*
71 * NICEPPQ - number of nice units per priority queue
38b25931
MD
72 *
73 * ESTCPUPPQ - number of estcpu units per priority queue
74 * ESTCPUMAX - number of estcpu units
38b25931
MD
75 */
76#define NICEPPQ 2
38b25931
MD
77#define ESTCPUPPQ 512
78#define ESTCPUMAX (ESTCPUPPQ * NQS)
52cac9fb 79#define BATCHMAX (ESTCPUFREQ * 30)
38b25931
MD
80#define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
81
82#define ESTCPULIM(v) min((v), ESTCPUMAX)
83
553ea3c8 84TAILQ_HEAD(rq, lwp);
38b25931 85
553ea3c8
SS
86#define lwp_priority lwp_usdata.bsd4.priority
87#define lwp_rqindex lwp_usdata.bsd4.rqindex
553ea3c8 88#define lwp_estcpu lwp_usdata.bsd4.estcpu
52cac9fb 89#define lwp_batch lwp_usdata.bsd4.batch
52eedfb5 90#define lwp_rqtype lwp_usdata.bsd4.rqtype
38b25931 91
553ea3c8
SS
92static void bsd4_acquire_curproc(struct lwp *lp);
93static void bsd4_release_curproc(struct lwp *lp);
38b25931 94static void bsd4_select_curproc(globaldata_t gd);
553ea3c8 95static void bsd4_setrunqueue(struct lwp *lp);
553ea3c8 96static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
38b25931 97 sysclock_t cpstamp);
52eedfb5 98static void bsd4_recalculate_estcpu(struct lwp *lp);
553ea3c8
SS
99static void bsd4_resetpriority(struct lwp *lp);
100static void bsd4_forking(struct lwp *plp, struct lwp *lp);
52cac9fb 101static void bsd4_exiting(struct lwp *lp, struct proc *);
e28d8b15 102static void bsd4_uload_update(struct lwp *lp);
c3149361 103static void bsd4_yield(struct lwp *lp);
e28d8b15
MD
104static void bsd4_need_user_resched_remote(void *dummy);
105static int bsd4_batchy_looser_pri_test(struct lwp* lp);
106static struct lwp *bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp);
696ae4d4 107static void bsd4_kick_helper(struct lwp *lp);
e28d8b15 108static struct lwp *bsd4_chooseproc_locked(struct lwp *chklp);
52eedfb5
MD
109static void bsd4_remrunqueue_locked(struct lwp *lp);
110static void bsd4_setrunqueue_locked(struct lwp *lp);
38b25931
MD
111
112struct usched usched_bsd4 = {
113 { NULL },
114 "bsd4", "Original DragonFly Scheduler",
cb7f4ab1
MD
115 NULL, /* default registration */
116 NULL, /* default deregistration */
38b25931
MD
117 bsd4_acquire_curproc,
118 bsd4_release_curproc,
38b25931 119 bsd4_setrunqueue,
38b25931
MD
120 bsd4_schedulerclock,
121 bsd4_recalculate_estcpu,
122 bsd4_resetpriority,
123 bsd4_forking,
cb7f4ab1 124 bsd4_exiting,
e28d8b15 125 bsd4_uload_update,
c3149361
MD
126 NULL, /* setcpumask not supported */
127 bsd4_yield
38b25931
MD
128};
129
52eedfb5 130struct usched_bsd4_pcpu {
d6d39bc7
MC
131 struct thread helper_thread;
132 short rrcount;
133 short upri;
134 struct lwp *uschedcp;
135 struct lwp *old_uschedcp;
d6d39bc7 136 cpu_node_t *cpunode;
52eedfb5
MD
137};
138
139typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
140
38b25931
MD
141/*
142 * We have NQS (32) run queues per scheduling class. For the normal
143 * class, there are 128 priorities scaled onto these 32 queues. New
144 * processes are added to the last entry in each queue, and processes
145 * are selected for running by taking them from the head and maintaining
146 * a simple FIFO arrangement. Realtime and Idle priority processes have
147 * and explicit 0-31 priority which maps directly onto their class queue
148 * index. When a queue has something in it, the corresponding bit is
149 * set in the queuebits variable, allowing a single read to determine
150 * the state of all 32 queues and then a ffs() to find the first busy
151 * queue.
152 */
52eedfb5
MD
153static struct rq bsd4_queues[NQS];
154static struct rq bsd4_rtqueues[NQS];
155static struct rq bsd4_idqueues[NQS];
156static u_int32_t bsd4_queuebits;
157static u_int32_t bsd4_rtqueuebits;
158static u_int32_t bsd4_idqueuebits;
159static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */
160static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */
161static int bsd4_runqcount;
52eedfb5 162static volatile int bsd4_scancpu;
52eedfb5
MD
163static struct spinlock bsd4_spin;
164static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
d6d39bc7
MC
165static struct sysctl_ctx_list usched_bsd4_sysctl_ctx;
166static struct sysctl_oid *usched_bsd4_sysctl_tree;
167
168/* Debug info exposed through debug.* sysctl */
38b25931 169
e28d8b15
MD
170SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD,
171 &bsd4_runqcount, 0,
172 "Number of run queues");
d6d39bc7
MC
173
174static int usched_bsd4_debug = -1;
e28d8b15
MD
175SYSCTL_INT(_debug, OID_AUTO, bsd4_scdebug, CTLFLAG_RW,
176 &usched_bsd4_debug, 0,
177 "Print debug information for this pid");
38b25931 178
e28d8b15
MD
179static int usched_bsd4_pid_debug = -1;
180SYSCTL_INT(_debug, OID_AUTO, bsd4_pid_debug, CTLFLAG_RW,
181 &usched_bsd4_pid_debug, 0,
182 "Print KTR debug information for this pid");
d6d39bc7
MC
183
184/* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */
d6d39bc7
MC
185static int usched_bsd4_smt = 0;
186static int usched_bsd4_cache_coherent = 0;
187static int usched_bsd4_upri_affinity = 16; /* 32 queues - half-way */
188static int usched_bsd4_queue_checks = 5;
189static int usched_bsd4_stick_to_level = 0;
696ae4d4 190static long usched_bsd4_kicks;
38b25931 191static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
52cac9fb 192static int usched_bsd4_decay = 8;
52cac9fb 193static int usched_bsd4_batch_time = 10;
d6d39bc7
MC
194
195/* KTR debug printings */
196
c016171e 197KTR_INFO_MASTER_EXTERN(usched);
d6d39bc7
MC
198
199#if !defined(KTR_USCHED_BSD4)
200#define KTR_USCHED_BSD4 KTR_ALL
201#endif
202
203KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_urw, 0,
204 "USCHED_BSD4(bsd4_acquire_curproc in user_reseched_wanted "
205 "after release: pid %d, cpuid %d, curr_cpuid %d)",
206 pid_t pid, int cpuid, int curr);
207KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_before_loop, 0,
208 "USCHED_BSD4(bsd4_acquire_curproc before loop: pid %d, cpuid %d, "
209 "curr_cpuid %d)",
210 pid_t pid, int cpuid, int curr);
211KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_not, 0,
212 "USCHED_BSD4(bsd4_acquire_curproc couldn't acquire after "
213 "bsd4_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
214 pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
215KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_switch, 0,
216 "USCHED_BSD4(bsd4_acquire_curproc after lwkt_switch: pid %d, "
217 "cpuid %d, curr_cpuid %d)",
218 pid_t pid, int cpuid, int curr);
219
220KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_release_curproc, 0,
221 "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
222 "cpuid %d, curr_cpuid %d)",
223 pid_t pid, int cpuid, int curr);
224
225KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_select_curproc, 0,
226 "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
227 "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
228 pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
229
d6d39bc7
MC
230KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_false, 0,
231 "USCHED_BSD4(batchy_looser_pri_test false: pid %d, "
232 "cpuid %d, verify_mask %lu)",
233 pid_t pid, int cpuid, cpumask_t mask);
234KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_true, 0,
235 "USCHED_BSD4(batchy_looser_pri_test true: pid %d, "
236 "cpuid %d, verify_mask %lu)",
237 pid_t pid, int cpuid, cpumask_t mask);
238
239KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_smt, 0,
240 "USCHED_BSD4(bsd4_setrunqueue free cpus smt: pid %d, cpuid %d, "
241 "mask %lu, curr_cpuid %d)",
242 pid_t pid, int cpuid, cpumask_t mask, int curr);
243KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_non_smt, 0,
244 "USCHED_BSD4(bsd4_setrunqueue free cpus check non_smt: pid %d, "
245 "cpuid %d, mask %lu, curr_cpuid %d)",
246 pid_t pid, int cpuid, cpumask_t mask, int curr);
247KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_rc, 0,
248 "USCHED_BSD4(bsd4_setrunqueue running cpus check: pid %d, "
249 "cpuid %d, mask %lu, curr_cpuid %d)",
250 pid_t pid, int cpuid, cpumask_t mask, int curr);
251KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found, 0,
252 "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
253 "mask %lu, found_cpuid %d, curr_cpuid %d)",
254 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
255KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_not_found, 0,
256 "USCHED_BSD4(bsd4_setrunqueue not found cpu: pid %d, cpuid %d, "
257 "try_cpuid %d, curr_cpuid %d)",
258 pid_t pid, int cpuid, int try_cpuid, int curr);
259KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found_best_cpuid, 0,
260 "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
261 "mask %lu, found_cpuid %d, curr_cpuid %d)",
262 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
d6d39bc7
MC
263
264KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc, 0,
265 "USCHED_BSD4(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
266 pid_t pid, int old_cpuid, int curr);
d6d39bc7
MC
267KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc, 0,
268 "USCHED_BSD4(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
269 pid_t pid, int old_cpuid, int curr);
270KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_not_good, 0,
271 "USCHED_BSD4(chooseproc_cc not good: pid %d, old_cpumask %lu, "
272 "sibling_mask %lu, curr_cpumask %lu)",
273 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
274KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_elected, 0,
275 "USCHED_BSD4(chooseproc_cc elected: pid %d, old_cpumask %lu, "
276 "sibling_mask %lu, curr_cpumask: %lu)",
277 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
278
279KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process, 0,
280 "USCHED_BSD4(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
281 int id, pid_t pid, int cpuid);
282KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_process, 0,
283 "USCHED_BSD4(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
284 int id, pid_t pid, int cpuid);
285KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process_found, 0,
286 "USCHED_BSD4(sched_thread %d no process found; tmpmask %lu)",
287 int id, cpumask_t tmpmask);
38b25931
MD
288
289/*
290 * Initialize the run queues at boot time.
291 */
292static void
e28d8b15 293bsd4_rqinit(void *dummy)
38b25931
MD
294{
295 int i;
296
52eedfb5 297 spin_init(&bsd4_spin);
38b25931 298 for (i = 0; i < NQS; i++) {
52eedfb5
MD
299 TAILQ_INIT(&bsd4_queues[i]);
300 TAILQ_INIT(&bsd4_rtqueues[i]);
301 TAILQ_INIT(&bsd4_idqueues[i]);
38b25931 302 }
da23a592 303 atomic_clear_cpumask(&bsd4_curprocmask, 1);
38b25931 304}
e28d8b15 305SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, bsd4_rqinit, NULL)
38b25931
MD
306
307/*
52eedfb5 308 * BSD4_ACQUIRE_CURPROC
38b25931 309 *
52eedfb5
MD
310 * This function is called when the kernel intends to return to userland.
311 * It is responsible for making the thread the current designated userland
312 * thread for this cpu, blocking if necessary.
313 *
e3e6be1f
MD
314 * The kernel will not depress our LWKT priority until after we return,
315 * in case we have to shove over to another cpu.
316 *
317 * We must determine our thread's disposition before we switch away. This
318 * is very sensitive code.
52eedfb5
MD
319 *
320 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
321 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
322 * occur, this function is called only under very controlled circumstances.
323 *
52eedfb5 324 * MPSAFE
38b25931 325 */
52eedfb5
MD
326static void
327bsd4_acquire_curproc(struct lwp *lp)
38b25931 328{
b9eb1c19
MD
329 globaldata_t gd;
330 bsd4_pcpu_t dd;
4643740a 331 thread_t td;
85946b6c 332#if 0
b9eb1c19 333 struct lwp *olp;
85946b6c 334#endif
38b25931 335
4643740a
MD
336 /*
337 * Make sure we aren't sitting on a tsleep queue.
338 */
339 td = lp->lwp_thread;
340 crit_enter_quick(td);
341 if (td->td_flags & TDF_TSLEEPQ)
342 tsleep_remove(td);
b9eb1c19 343 bsd4_recalculate_estcpu(lp);
38b25931 344
38b25931 345 /*
b9eb1c19
MD
346 * If a reschedule was requested give another thread the
347 * driver's seat.
38b25931 348 */
b9eb1c19
MD
349 if (user_resched_wanted()) {
350 clear_user_resched();
351 bsd4_release_curproc(lp);
d6d39bc7
MC
352
353 KTR_COND_LOG(usched_bsd4_acquire_curproc_urw,
354 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
355 lp->lwp_proc->p_pid,
356 lp->lwp_thread->td_gd->gd_cpuid,
357 mycpu->gd_cpuid);
38b25931 358 }
38b25931 359
52eedfb5 360 /*
b9eb1c19 361 * Loop until we are the current user thread
52eedfb5 362 */
85946b6c
MD
363 gd = mycpu;
364 dd = &bsd4_pcpu[gd->gd_cpuid];
365
d6d39bc7
MC
366 KTR_COND_LOG(usched_bsd4_acquire_curproc_before_loop,
367 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
368 lp->lwp_proc->p_pid,
369 lp->lwp_thread->td_gd->gd_cpuid,
370 gd->gd_cpuid);
371
52eedfb5 372 do {
b9eb1c19 373 /*
85946b6c 374 * Process any pending events and higher priority threads.
b9eb1c19 375 */
85946b6c 376 lwkt_yield();
b9eb1c19
MD
377
378 /*
379 * Become the currently scheduled user thread for this cpu
380 * if we can do so trivially.
381 *
382 * We can steal another thread's current thread designation
383 * on this cpu since if we are running that other thread
384 * must not be, so we can safely deschedule it.
385 */
386 if (dd->uschedcp == lp) {
eb501f47
MD
387 /*
388 * We are already the current lwp (hot path).
389 */
b9eb1c19
MD
390 dd->upri = lp->lwp_priority;
391 } else if (dd->uschedcp == NULL) {
eb501f47
MD
392 /*
393 * We can trivially become the current lwp.
394 */
da23a592 395 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
b9eb1c19
MD
396 dd->uschedcp = lp;
397 dd->upri = lp->lwp_priority;
398 } else if (dd->upri > lp->lwp_priority) {
eb501f47 399 /*
85946b6c
MD
400 * We can steal the current cpu's lwp designation
401 * away simply by replacing it. The other thread
402 * will stall when it tries to return to userland.
eb501f47 403 */
b9eb1c19
MD
404 dd->uschedcp = lp;
405 dd->upri = lp->lwp_priority;
85946b6c 406 /*
b9eb1c19
MD
407 lwkt_deschedule(olp->lwp_thread);
408 bsd4_setrunqueue(olp);
85946b6c 409 */
b9eb1c19 410 } else {
eb501f47
MD
411 /*
412 * We cannot become the current lwp, place the lp
413 * on the bsd4 run-queue and deschedule ourselves.
85946b6c
MD
414 *
415 * When we are reactivated we will have another
416 * chance.
eb501f47 417 */
b9eb1c19 418 lwkt_deschedule(lp->lwp_thread);
d6d39bc7 419
b9eb1c19 420 bsd4_setrunqueue(lp);
d6d39bc7
MC
421
422 KTR_COND_LOG(usched_bsd4_acquire_curproc_not,
423 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
424 lp->lwp_proc->p_pid,
425 lp->lwp_thread->td_gd->gd_cpuid,
426 dd->uschedcp->lwp_proc->p_pid,
427 gd->gd_cpuid);
428
429
b9eb1c19 430 lwkt_switch();
d6d39bc7 431
85946b6c
MD
432 /*
433 * Reload after a switch or setrunqueue/switch possibly
434 * moved us to another cpu.
435 */
436 gd = mycpu;
437 dd = &bsd4_pcpu[gd->gd_cpuid];
d6d39bc7
MC
438
439 KTR_COND_LOG(usched_bsd4_acquire_curproc_switch,
440 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
441 lp->lwp_proc->p_pid,
442 lp->lwp_thread->td_gd->gd_cpuid,
443 gd->gd_cpuid);
b9eb1c19 444 }
52eedfb5 445 } while (dd->uschedcp != lp);
b9eb1c19 446
4643740a
MD
447 crit_exit_quick(td);
448 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
52eedfb5
MD
449}
450
451/*
452 * BSD4_RELEASE_CURPROC
453 *
454 * This routine detaches the current thread from the userland scheduler,
b9eb1c19
MD
455 * usually because the thread needs to run or block in the kernel (at
456 * kernel priority) for a while.
52eedfb5
MD
457 *
458 * This routine is also responsible for selecting a new thread to
459 * make the current thread.
460 *
461 * NOTE: This implementation differs from the dummy example in that
462 * bsd4_select_curproc() is able to select the current process, whereas
463 * dummy_select_curproc() is not able to select the current process.
464 * This means we have to NULL out uschedcp.
465 *
466 * Additionally, note that we may already be on a run queue if releasing
467 * via the lwkt_switch() in bsd4_setrunqueue().
468 *
52eedfb5
MD
469 * MPSAFE
470 */
d6d39bc7 471
52eedfb5
MD
472static void
473bsd4_release_curproc(struct lwp *lp)
474{
475 globaldata_t gd = mycpu;
476 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
477
478 if (dd->uschedcp == lp) {
b9eb1c19 479 crit_enter();
4643740a 480 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
d6d39bc7
MC
481
482 KTR_COND_LOG(usched_bsd4_release_curproc,
483 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
484 lp->lwp_proc->p_pid,
485 lp->lwp_thread->td_gd->gd_cpuid,
486 gd->gd_cpuid);
487
52eedfb5 488 dd->uschedcp = NULL; /* don't let lp be selected */
b9eb1c19 489 dd->upri = PRIBASE_NULL;
da23a592 490 atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
d6d39bc7 491 dd->old_uschedcp = lp; /* used only for KTR debug prints */
52eedfb5 492 bsd4_select_curproc(gd);
b9eb1c19 493 crit_exit();
52eedfb5 494 }
38b25931
MD
495}
496
38b25931 497/*
52eedfb5
MD
498 * BSD4_SELECT_CURPROC
499 *
b9eb1c19
MD
500 * Select a new current process for this cpu and clear any pending user
501 * reschedule request. The cpu currently has no current process.
52eedfb5
MD
502 *
503 * This routine is also responsible for equal-priority round-robining,
504 * typically triggered from bsd4_schedulerclock(). In our dummy example
505 * all the 'user' threads are LWKT scheduled all at once and we just
506 * call lwkt_switch().
507 *
b9eb1c19
MD
508 * The calling process is not on the queue and cannot be selected.
509 *
52eedfb5 510 * MPSAFE
38b25931
MD
511 */
512static
513void
52eedfb5 514bsd4_select_curproc(globaldata_t gd)
38b25931 515{
52eedfb5
MD
516 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
517 struct lwp *nlp;
518 int cpuid = gd->gd_cpuid;
38b25931 519
52eedfb5 520 crit_enter_gd(gd);
52eedfb5 521
287a8577 522 spin_lock(&bsd4_spin);
d6d39bc7 523 if(usched_bsd4_cache_coherent)
e28d8b15 524 nlp = bsd4_chooseproc_locked_cache_coherent(dd->uschedcp);
d6d39bc7 525 else
e28d8b15 526 nlp = bsd4_chooseproc_locked(dd->uschedcp);
d6d39bc7
MC
527
528 if (nlp) {
529
530 KTR_COND_LOG(usched_bsd4_select_curproc,
531 nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
532 nlp->lwp_proc->p_pid,
533 nlp->lwp_thread->td_gd->gd_cpuid,
534 dd->old_uschedcp->lwp_proc->p_pid,
535 dd->old_uschedcp->lwp_thread->td_gd->gd_cpuid,
536 gd->gd_cpuid);
537
da23a592 538 atomic_set_cpumask(&bsd4_curprocmask, CPUMASK(cpuid));
52eedfb5
MD
539 dd->upri = nlp->lwp_priority;
540 dd->uschedcp = nlp;
901ecceb 541 dd->rrcount = 0; /* reset round robin */
287a8577 542 spin_unlock(&bsd4_spin);
52eedfb5 543 lwkt_acquire(nlp->lwp_thread);
52eedfb5 544 lwkt_schedule(nlp->lwp_thread);
eb501f47
MD
545 } else {
546 spin_unlock(&bsd4_spin);
547 }
d6d39bc7 548
eb501f47 549#if 0
da23a592
MD
550 } else if (bsd4_runqcount && (bsd4_rdyprocmask & CPUMASK(cpuid))) {
551 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
287a8577 552 spin_unlock(&bsd4_spin);
52eedfb5
MD
553 lwkt_schedule(&dd->helper_thread);
554 } else {
287a8577 555 spin_unlock(&bsd4_spin);
52eedfb5 556 }
eb501f47 557#endif
52eedfb5
MD
558 crit_exit_gd(gd);
559}
d6d39bc7
MC
560
561/*
562 * batchy_looser_pri_test() - determine if a process is batchy or not
563 * relative to the other processes running in the system
564 */
565static int
e28d8b15 566bsd4_batchy_looser_pri_test(struct lwp* lp)
d6d39bc7
MC
567{
568 cpumask_t mask;
569 bsd4_pcpu_t other_dd;
570 int cpu;
571
572 /* Current running processes */
573 mask = bsd4_curprocmask & smp_active_mask
574 & usched_global_cpumask;
575
576 while(mask) {
577 cpu = BSFCPUMASK(mask);
578 other_dd = &bsd4_pcpu[cpu];
579 if (other_dd->upri - lp->lwp_priority > usched_bsd4_upri_affinity * PPQ) {
580
581 KTR_COND_LOG(usched_batchy_test_false,
582 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
583 lp->lwp_proc->p_pid,
584 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 585 (unsigned long)mask);
d6d39bc7
MC
586
587 return 0;
588 }
589 mask &= ~CPUMASK(cpu);
590 }
591
592 KTR_COND_LOG(usched_batchy_test_true,
593 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
594 lp->lwp_proc->p_pid,
595 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 596 (unsigned long)mask);
d6d39bc7
MC
597
598 return 1;
599}
38b25931
MD
600
601/*
d6d39bc7 602 *
52eedfb5
MD
603 * BSD4_SETRUNQUEUE
604 *
b9eb1c19
MD
605 * Place the specified lwp on the user scheduler's run queue. This routine
606 * must be called with the thread descheduled. The lwp must be runnable.
38b25931 607 *
b9eb1c19 608 * The thread may be the current thread as a special case.
52eedfb5
MD
609 *
610 * MPSAFE
38b25931
MD
611 */
612static void
553ea3c8 613bsd4_setrunqueue(struct lwp *lp)
38b25931 614{
52eedfb5
MD
615 globaldata_t gd;
616 bsd4_pcpu_t dd;
b9eb1c19 617 int cpuid;
38b25931 618 cpumask_t mask;
52eedfb5 619 cpumask_t tmpmask;
38b25931 620
52eedfb5
MD
621 /*
622 * First validate the process state relative to the current cpu.
623 * We don't need the spinlock for this, just a critical section.
624 * We are in control of the process.
625 */
38b25931 626 crit_enter();
164b8401 627 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
4643740a 628 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
164b8401 629 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
4643740a 630 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
553ea3c8 631 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
38b25931
MD
632
633 /*
52eedfb5
MD
634 * Note: gd and dd are relative to the target thread's last cpu,
635 * NOT our current cpu.
38b25931 636 */
553ea3c8 637 gd = lp->lwp_thread->td_gd;
52eedfb5 638 dd = &bsd4_pcpu[gd->gd_cpuid];
38b25931
MD
639
640 /*
52eedfb5
MD
641 * This process is not supposed to be scheduled anywhere or assigned
642 * as the current process anywhere. Assert the condition.
38b25931 643 */
52eedfb5 644 KKASSERT(dd->uschedcp != lp);
38b25931 645
38b25931 646 /*
52eedfb5
MD
647 * XXX fixme. Could be part of a remrunqueue/setrunqueue
648 * operation when the priority is recalculated, so TDF_MIGRATING
649 * may already be set.
38b25931 650 */
52eedfb5
MD
651 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
652 lwkt_giveaway(lp->lwp_thread);
50017724
MD
653
654 /*
655 * We lose control of lp the moment we release the spinlock after
656 * having placed lp on the queue. i.e. another cpu could pick it
657 * up and it could exit, or its priority could be further adjusted,
658 * or something like that.
659 */
287a8577 660 spin_lock(&bsd4_spin);
52eedfb5 661 bsd4_setrunqueue_locked(lp);
e28d8b15 662 lp->lwp_rebal_ticks = sched_ticks;
38b25931
MD
663
664 /*
b9eb1c19
MD
665 * Kick the scheduler helper on one of the other cpu's
666 * and request a reschedule if appropriate.
eb501f47
MD
667 *
668 * NOTE: We check all cpus whos rdyprocmask is set. First we
669 * look for cpus without designated lps, then we look for
670 * cpus with designated lps with a worse priority than our
671 * process.
38b25931 672 */
b9eb1c19 673 ++bsd4_scancpu;
38b25931 674
901ecceb 675 if (usched_bsd4_smt) {
d6d39bc7
MC
676
677 /*
901ecceb
MD
678 * SMT heuristic - Try to schedule on a free physical core.
679 * If no physical core found than choose the one that has
680 * an interactive thread.
d6d39bc7
MC
681 */
682
683 int best_cpuid = -1;
684 int min_prio = MAXPRI * MAXPRI;
685 int sibling;
686
687 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
688 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
689 smp_active_mask & usched_global_cpumask;
690
691 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_smt,
692 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
693 lp->lwp_proc->p_pid,
694 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 695 (unsigned long)mask,
d6d39bc7
MC
696 mycpu->gd_cpuid);
697
698 while (mask) {
699 tmpmask = ~(CPUMASK(cpuid) - 1);
700 if (mask & tmpmask)
701 cpuid = BSFCPUMASK(mask & tmpmask);
702 else
703 cpuid = BSFCPUMASK(mask);
704 gd = globaldata_find(cpuid);
705 dd = &bsd4_pcpu[cpuid];
706
707 if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
708 if (dd->cpunode->parent_node->members & ~dd->cpunode->members & mask) {
709
710 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
711 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
712 lp->lwp_proc->p_pid,
713 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 714 (unsigned long)mask,
d6d39bc7
MC
715 cpuid,
716 mycpu->gd_cpuid);
717
718 goto found;
719 } else {
720 sibling = BSFCPUMASK(dd->cpunode->parent_node->members &
721 ~dd->cpunode->members);
722 if (min_prio > bsd4_pcpu[sibling].upri) {
723 min_prio = bsd4_pcpu[sibling].upri;
724 best_cpuid = cpuid;
725 }
726 }
727 }
728 mask &= ~CPUMASK(cpuid);
729 }
730
731 if (best_cpuid != -1) {
732 cpuid = best_cpuid;
733 gd = globaldata_find(cpuid);
734 dd = &bsd4_pcpu[cpuid];
735
736 KTR_COND_LOG(usched_bsd4_setrunqueue_found_best_cpuid,
737 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
738 lp->lwp_proc->p_pid,
739 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 740 (unsigned long)mask,
d6d39bc7
MC
741 cpuid,
742 mycpu->gd_cpuid);
b9eb1c19 743
eb501f47 744 goto found;
d6d39bc7
MC
745 }
746 } else {
747 /* Fallback to the original heuristic */
748 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
749 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
750 smp_active_mask & usched_global_cpumask;
751
752 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_non_smt,
753 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
754 lp->lwp_proc->p_pid,
755 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 756 (unsigned long)mask,
d6d39bc7
MC
757 mycpu->gd_cpuid);
758
759 while (mask) {
760 tmpmask = ~(CPUMASK(cpuid) - 1);
761 if (mask & tmpmask)
762 cpuid = BSFCPUMASK(mask & tmpmask);
763 else
764 cpuid = BSFCPUMASK(mask);
765 gd = globaldata_find(cpuid);
766 dd = &bsd4_pcpu[cpuid];
767
768 if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
769
770 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
771 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
772 lp->lwp_proc->p_pid,
773 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 774 (unsigned long)mask,
d6d39bc7
MC
775 cpuid,
776 mycpu->gd_cpuid);
777
778 goto found;
779 }
780 mask &= ~CPUMASK(cpuid);
781 }
eb501f47
MD
782 }
783
784 /*
785 * Then cpus which might have a currently running lp
786 */
787 mask = bsd4_curprocmask & bsd4_rdyprocmask &
916e604f 788 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
eb501f47 789
d6d39bc7
MC
790 KTR_COND_LOG(usched_bsd4_setrunqueue_rc,
791 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
792 lp->lwp_proc->p_pid,
793 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 794 (unsigned long)mask,
d6d39bc7
MC
795 mycpu->gd_cpuid);
796
eb501f47
MD
797 while (mask) {
798 tmpmask = ~(CPUMASK(cpuid) - 1);
799 if (mask & tmpmask)
800 cpuid = BSFCPUMASK(mask & tmpmask);
801 else
802 cpuid = BSFCPUMASK(mask);
803 gd = globaldata_find(cpuid);
804 dd = &bsd4_pcpu[cpuid];
805
d6d39bc7
MC
806 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
807
808 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
809 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
810 lp->lwp_proc->p_pid,
811 lp->lwp_thread->td_gd->gd_cpuid,
153fa3e0 812 (unsigned long)mask,
d6d39bc7
MC
813 cpuid,
814 mycpu->gd_cpuid);
815
eb501f47 816 goto found;
d6d39bc7 817 }
da23a592 818 mask &= ~CPUMASK(cpuid);
b9eb1c19 819 }
eb501f47
MD
820
821 /*
822 * If we cannot find a suitable cpu we reload from bsd4_scancpu
823 * and round-robin. Other cpus will pickup as they release their
824 * current lwps or become ready.
825 *
916e604f
MD
826 * Avoid a degenerate system lockup case if usched_global_cpumask
827 * is set to 0 or otherwise does not cover lwp_cpumask.
828 *
eb501f47
MD
829 * We only kick the target helper thread in this case, we do not
830 * set the user resched flag because
831 */
832 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
916e604f
MD
833 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) {
834 cpuid = 0;
835 }
eb501f47
MD
836 gd = globaldata_find(cpuid);
837 dd = &bsd4_pcpu[cpuid];
d6d39bc7
MC
838
839 KTR_COND_LOG(usched_bsd4_setrunqueue_not_found,
840 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
841 lp->lwp_proc->p_pid,
842 lp->lwp_thread->td_gd->gd_cpuid,
843 cpuid,
844 mycpu->gd_cpuid);
845
eb501f47
MD
846found:
847 if (gd == mycpu) {
848 spin_unlock(&bsd4_spin);
58bb3381
MD
849 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
850 if (dd->uschedcp == NULL) {
55b580ae 851 wakeup_mycpu(&dd->helper_thread);
58bb3381
MD
852 } else {
853 need_user_resched();
854 }
855 }
eb501f47
MD
856 } else {
857 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
858 spin_unlock(&bsd4_spin);
859 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
e28d8b15 860 lwkt_send_ipiq(gd, bsd4_need_user_resched_remote, NULL);
eb501f47 861 else
d6d39bc7 862 wakeup(&dd->helper_thread);
eb501f47 863 }
38b25931
MD
864 crit_exit();
865}
866
38b25931
MD
867/*
868 * This routine is called from a systimer IPI. It MUST be MP-safe and
52eedfb5
MD
869 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
870 * each cpu.
871 *
de4d4cb0
MD
872 * This routine is called on every sched tick. If the currently running
873 * thread belongs to this scheduler it will be called with a non-NULL lp,
874 * otherwise it will be called with a NULL lp.
875 *
270ac911 876 * MPSAFE
38b25931
MD
877 */
878static
879void
553ea3c8 880bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
38b25931
MD
881{
882 globaldata_t gd = mycpu;
52eedfb5 883 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
38b25931 884
de4d4cb0
MD
885 /*
886 * No impl if no lp running.
887 */
888 if (lp == NULL)
889 return;
890
38b25931
MD
891 /*
892 * Do we need to round-robin? We round-robin 10 times a second.
893 * This should only occur for cpu-bound batch processes.
894 */
52eedfb5
MD
895 if (++dd->rrcount >= usched_bsd4_rrinterval) {
896 dd->rrcount = 0;
38b25931
MD
897 need_user_resched();
898 }
899
900 /*
52cac9fb 901 * Adjust estcpu upward using a real time equivalent calculation.
38b25931 902 */
52cac9fb 903 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
50017724
MD
904
905 /*
77912481
MD
906 * Spinlocks also hold a critical section so there should not be
907 * any active.
50017724 908 */
0846e4ce 909 KKASSERT(gd->gd_spinlocks == 0);
77912481
MD
910
911 bsd4_resetpriority(lp);
38b25931
MD
912}
913
914/*
52eedfb5 915 * Called from acquire and from kern_synch's one-second timer (one of the
d6d39bc7 916 * callout helper threads) with a critical section held.
38b25931 917 *
52eedfb5
MD
918 * Decay p_estcpu based on the number of ticks we haven't been running
919 * and our p_nice. As the load increases each process observes a larger
920 * number of idle ticks (because other processes are running in them).
921 * This observation leads to a larger correction which tends to make the
922 * system more 'batchy'.
38b25931 923 *
52eedfb5
MD
924 * Note that no recalculation occurs for a process which sleeps and wakes
925 * up in the same tick. That is, a system doing thousands of context
926 * switches per second will still only do serious estcpu calculations
927 * ESTCPUFREQ times per second.
38b25931 928 *
52eedfb5 929 * MPSAFE
38b25931
MD
930 */
931static
d6d39bc7 932void
52eedfb5 933bsd4_recalculate_estcpu(struct lwp *lp)
38b25931 934{
52eedfb5
MD
935 globaldata_t gd = mycpu;
936 sysclock_t cpbase;
52cac9fb
MD
937 sysclock_t ttlticks;
938 int estcpu;
939 int decay_factor;
38b25931
MD
940
941 /*
52eedfb5
MD
942 * We have to subtract periodic to get the last schedclock
943 * timeout time, otherwise we would get the upcoming timeout.
944 * Keep in mind that a process can migrate between cpus and
945 * while the scheduler clock should be very close, boundary
946 * conditions could lead to a small negative delta.
38b25931 947 */
52eedfb5 948 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
38b25931 949
52eedfb5
MD
950 if (lp->lwp_slptime > 1) {
951 /*
952 * Too much time has passed, do a coarse correction.
953 */
954 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
955 bsd4_resetpriority(lp);
956 lp->lwp_cpbase = cpbase;
957 lp->lwp_cpticks = 0;
52cac9fb
MD
958 lp->lwp_batch -= ESTCPUFREQ;
959 if (lp->lwp_batch < 0)
960 lp->lwp_batch = 0;
52eedfb5
MD
961 } else if (lp->lwp_cpbase != cpbase) {
962 /*
963 * Adjust estcpu if we are in a different tick. Don't waste
d6d39bc7
MC
964 * time if we are in the same tick.
965 *
52eedfb5 966 * First calculate the number of ticks in the measurement
52cac9fb 967 * interval. The ttlticks calculation can wind up 0 due to
52eedfb5
MD
968 * a bug in the handling of lwp_slptime (as yet not found),
969 * so make sure we do not get a divide by 0 panic.
970 */
52cac9fb
MD
971 ttlticks = (cpbase - lp->lwp_cpbase) /
972 gd->gd_schedclock.periodic;
68a23bee 973 if ((ssysclock_t)ttlticks < 0) {
52cac9fb
MD
974 ttlticks = 0;
975 lp->lwp_cpbase = cpbase;
52eedfb5 976 }
52cac9fb
MD
977 if (ttlticks == 0)
978 return;
979 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
38b25931 980
52eedfb5 981 /*
52cac9fb
MD
982 * Calculate the percentage of one cpu used factoring in ncpus
983 * and the load and adjust estcpu. Handle degenerate cases
984 * by adding 1 to bsd4_runqcount.
985 *
986 * estcpu is scaled by ESTCPUMAX.
987 *
988 * bsd4_runqcount is the excess number of user processes
989 * that cannot be immediately scheduled to cpus. We want
990 * to count these as running to avoid range compression
991 * in the base calculation (which is the actual percentage
992 * of one cpu used).
52eedfb5 993 */
52cac9fb
MD
994 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
995 (bsd4_runqcount + ncpus) / (ncpus * ttlticks);
38b25931 996
52eedfb5 997 /*
52cac9fb
MD
998 * If estcpu is > 50% we become more batch-like
999 * If estcpu is <= 50% we become less batch-like
5c559233 1000 *
52cac9fb 1001 * It takes 30 cpu seconds to traverse the entire range.
52eedfb5 1002 */
52cac9fb
MD
1003 if (estcpu > ESTCPUMAX / 2) {
1004 lp->lwp_batch += ttlticks;
1005 if (lp->lwp_batch > BATCHMAX)
1006 lp->lwp_batch = BATCHMAX;
1007 } else {
1008 lp->lwp_batch -= ttlticks;
1009 if (lp->lwp_batch < 0)
1010 lp->lwp_batch = 0;
5c559233 1011 }
344ad853 1012
d6d39bc7 1013 if (usched_bsd4_debug == lp->lwp_proc->p_pid) {
52cac9fb
MD
1014 kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
1015 lp->lwp_proc->p_pid, lp,
1016 estcpu, lp->lwp_estcpu,
1017 lp->lwp_batch,
1018 lp->lwp_cpticks, ttlticks);
5c559233 1019 }
52cac9fb
MD
1020
1021 /*
1022 * Adjust lp->lwp_esetcpu. The decay factor determines how
1023 * quickly lwp_estcpu collapses to its realtime calculation.
1024 * A slower collapse gives us a more accurate number but
1025 * can cause a cpu hog to eat too much cpu before the
1026 * scheduler decides to downgrade it.
1027 *
1028 * NOTE: p_nice is accounted for in bsd4_resetpriority(),
1029 * and not here, but we must still ensure that a
1030 * cpu-bound nice -20 process does not completely
1031 * override a cpu-bound nice +20 process.
1032 *
1033 * NOTE: We must use ESTCPULIM() here to deal with any
1034 * overshoot.
1035 */
1036 decay_factor = usched_bsd4_decay;
1037 if (decay_factor < 1)
1038 decay_factor = 1;
1039 if (decay_factor > 1024)
1040 decay_factor = 1024;
1041
1042 lp->lwp_estcpu = ESTCPULIM(
1043 (lp->lwp_estcpu * decay_factor + estcpu) /
1044 (decay_factor + 1));
1045
d6d39bc7 1046 if (usched_bsd4_debug == lp->lwp_proc->p_pid)
52cac9fb 1047 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
52eedfb5 1048 bsd4_resetpriority(lp);
52cac9fb 1049 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
52eedfb5
MD
1050 lp->lwp_cpticks = 0;
1051 }
38b25931
MD
1052}
1053
1054/*
1055 * Compute the priority of a process when running in user mode.
1056 * Arrange to reschedule if the resulting priority is better
1057 * than that of the current process.
52eedfb5
MD
1058 *
1059 * This routine may be called with any process.
1060 *
1061 * This routine is called by fork1() for initial setup with the process
1062 * of the run queue, and also may be called normally with the process on or
1063 * off the run queue.
1064 *
1065 * MPSAFE
38b25931
MD
1066 */
1067static void
553ea3c8 1068bsd4_resetpriority(struct lwp *lp)
38b25931 1069{
52eedfb5 1070 bsd4_pcpu_t dd;
38b25931 1071 int newpriority;
52eedfb5
MD
1072 u_short newrqtype;
1073 int reschedcpu;
52cac9fb
MD
1074 int checkpri;
1075 int estcpu;
270ac911 1076
38b25931 1077 /*
52eedfb5 1078 * Calculate the new priority and queue type
38b25931 1079 */
52eedfb5 1080 crit_enter();
287a8577 1081 spin_lock(&bsd4_spin);
52eedfb5
MD
1082
1083 newrqtype = lp->lwp_rtprio.type;
1084
1085 switch(newrqtype) {
38b25931 1086 case RTP_PRIO_REALTIME:
f64250e0 1087 case RTP_PRIO_FIFO:
52eedfb5
MD
1088 newpriority = PRIBASE_REALTIME +
1089 (lp->lwp_rtprio.prio & PRIMASK);
1090 break;
38b25931 1091 case RTP_PRIO_NORMAL:
52cac9fb
MD
1092 /*
1093 * Detune estcpu based on batchiness. lwp_batch ranges
1094 * from 0 to BATCHMAX. Limit estcpu for the sake of
1095 * the priority calculation to between 50% and 100%.
1096 */
1097 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
1098 (BATCHMAX * 2);
1099
1100 /*
1101 * p_nice piece Adds (0-40) * 2 0-80
1102 * estcpu Adds 16384 * 4 / 512 0-128
1103 */
52eedfb5 1104 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
52cac9fb 1105 newpriority += estcpu * PPQ / ESTCPUPPQ;
52eedfb5
MD
1106 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1107 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1108 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
38b25931
MD
1109 break;
1110 case RTP_PRIO_IDLE:
52eedfb5
MD
1111 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1112 break;
38b25931 1113 case RTP_PRIO_THREAD:
52eedfb5
MD
1114 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1115 break;
1116 default:
1117 panic("Bad RTP_PRIO %d", newrqtype);
1118 /* NOT REACHED */
38b25931
MD
1119 }
1120
1121 /*
52eedfb5
MD
1122 * The newpriority incorporates the queue type so do a simple masked
1123 * check to determine if the process has moved to another queue. If
1124 * it has, and it is currently on a run queue, then move it.
d992c377
MD
1125 *
1126 * td_upri has normal sense (higher values are more desireable), so
1127 * negate it.
38b25931 1128 */
d992c377 1129 lp->lwp_thread->td_upri = -(newpriority & ~PPQMASK);
52eedfb5
MD
1130 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1131 lp->lwp_priority = newpriority;
4643740a 1132 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
52eedfb5
MD
1133 bsd4_remrunqueue_locked(lp);
1134 lp->lwp_rqtype = newrqtype;
1135 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1136 bsd4_setrunqueue_locked(lp);
52cac9fb 1137 checkpri = 1;
52eedfb5
MD
1138 } else {
1139 lp->lwp_rqtype = newrqtype;
1140 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
52cac9fb 1141 checkpri = 0;
52eedfb5 1142 }
52cac9fb 1143 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
38b25931 1144 } else {
52eedfb5
MD
1145 lp->lwp_priority = newpriority;
1146 reschedcpu = -1;
52cac9fb 1147 checkpri = 1;
52eedfb5 1148 }
52eedfb5
MD
1149
1150 /*
50017724
MD
1151 * Determine if we need to reschedule the target cpu. This only
1152 * occurs if the LWP is already on a scheduler queue, which means
1153 * that idle cpu notification has already occured. At most we
1154 * need only issue a need_user_resched() on the appropriate cpu.
281b4fa8
YT
1155 *
1156 * The LWP may be owned by a CPU different from the current one,
1157 * in which case dd->uschedcp may be modified without an MP lock
1158 * or a spinlock held. The worst that happens is that the code
1159 * below causes a spurious need_user_resched() on the target CPU
1160 * and dd->pri to be wrong for a short period of time, both of
1161 * which are harmless.
52cac9fb
MD
1162 *
1163 * If checkpri is 0 we are adjusting the priority of the current
1164 * process, possibly higher (less desireable), so ignore the upri
1165 * check which will fail in that case.
52eedfb5
MD
1166 */
1167 if (reschedcpu >= 0) {
1168 dd = &bsd4_pcpu[reschedcpu];
eb501f47 1169 if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) &&
52cac9fb
MD
1170 (checkpri == 0 ||
1171 (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
52eedfb5 1172 if (reschedcpu == mycpu->gd_cpuid) {
eb501f47 1173 spin_unlock(&bsd4_spin);
52eedfb5
MD
1174 need_user_resched();
1175 } else {
eb501f47
MD
1176 spin_unlock(&bsd4_spin);
1177 atomic_clear_cpumask(&bsd4_rdyprocmask,
1178 CPUMASK(reschedcpu));
52eedfb5 1179 lwkt_send_ipiq(lp->lwp_thread->td_gd,
e28d8b15
MD
1180 bsd4_need_user_resched_remote,
1181 NULL);
52eedfb5 1182 }
eb501f47
MD
1183 } else {
1184 spin_unlock(&bsd4_spin);
52eedfb5 1185 }
eb501f47
MD
1186 } else {
1187 spin_unlock(&bsd4_spin);
38b25931
MD
1188 }
1189 crit_exit();
1190}
1191
3919ced0
MD
1192/*
1193 * MPSAFE
1194 */
c3149361
MD
1195static
1196void
d6d39bc7 1197bsd4_yield(struct lwp *lp)
c3149361
MD
1198{
1199#if 0
1200 /* FUTURE (or something similar) */
1201 switch(lp->lwp_rqtype) {
1202 case RTP_PRIO_NORMAL:
1203 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
c3149361
MD
1204 break;
1205 default:
1206 break;
1207 }
1208#endif
1209 need_user_resched();
1210}
1211
38b25931
MD
1212/*
1213 * Called from fork1() when a new child process is being created.
1214 *
1215 * Give the child process an initial estcpu that is more batch then
1216 * its parent and dock the parent for the fork (but do not
1217 * reschedule the parent). This comprises the main part of our batch
1218 * detection heuristic for both parallel forking and sequential execs.
1219 *
553ea3c8 1220 * XXX lwp should be "spawning" instead of "forking"
270ac911
MD
1221 *
1222 * MPSAFE
38b25931
MD
1223 */
1224static void
553ea3c8 1225bsd4_forking(struct lwp *plp, struct lwp *lp)
38b25931 1226{
52cac9fb
MD
1227 /*
1228 * Put the child 4 queue slots (out of 32) higher than the parent
1229 * (less desireable than the parent).
1230 */
1231 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1232
1233 /*
1234 * The batch status of children always starts out centerline
1235 * and will inch-up or inch-down as appropriate. It takes roughly
1236 * ~15 seconds of >50% cpu to hit the limit.
1237 */
1238 lp->lwp_batch = BATCHMAX / 2;
1239
1240 /*
1241 * Dock the parent a cost for the fork, protecting us from fork
1242 * bombs. If the parent is forking quickly make the child more
1243 * batchy.
1244 */
1245 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
38b25931
MD
1246}
1247
1248/*
e28d8b15
MD
1249 * Called when a lwp is being removed from this scheduler, typically
1250 * during lwp_exit().
38b25931
MD
1251 */
1252static void
52cac9fb 1253bsd4_exiting(struct lwp *lp, struct proc *child_proc)
38b25931 1254{
38b25931
MD
1255}
1256
e28d8b15
MD
1257static void
1258bsd4_uload_update(struct lwp *lp)
1259{
1260}
1261
38b25931 1262/*
52eedfb5
MD
1263 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1264 * it selects a user process and returns it. If chklp is non-NULL and chklp
1265 * has a better or equal priority then the process that would otherwise be
1266 * chosen, NULL is returned.
38b25931 1267 *
52eedfb5
MD
1268 * Until we fix the RUNQ code the chklp test has to be strict or we may
1269 * bounce between processes trying to acquire the current process designation.
38b25931 1270 *
52eedfb5
MD
1271 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is
1272 * left intact through the entire routine.
38b25931
MD
1273 */
1274static
52eedfb5 1275struct lwp *
e28d8b15 1276bsd4_chooseproc_locked(struct lwp *chklp)
38b25931 1277{
52eedfb5
MD
1278 struct lwp *lp;
1279 struct rq *q;
a60ccb85 1280 u_int32_t *which, *which2;
52eedfb5 1281 u_int32_t pri;
a60ccb85
DX
1282 u_int32_t rtqbits;
1283 u_int32_t tsqbits;
1284 u_int32_t idqbits;
1285 cpumask_t cpumask;
38b25931 1286
a60ccb85
DX
1287 rtqbits = bsd4_rtqueuebits;
1288 tsqbits = bsd4_queuebits;
1289 idqbits = bsd4_idqueuebits;
1290 cpumask = mycpu->gd_cpumask;
1291
d6d39bc7 1292
a60ccb85 1293again:
a60ccb85
DX
1294 if (rtqbits) {
1295 pri = bsfl(rtqbits);
52eedfb5
MD
1296 q = &bsd4_rtqueues[pri];
1297 which = &bsd4_rtqueuebits;
a60ccb85
DX
1298 which2 = &rtqbits;
1299 } else if (tsqbits) {
1300 pri = bsfl(tsqbits);
52eedfb5
MD
1301 q = &bsd4_queues[pri];
1302 which = &bsd4_queuebits;
a60ccb85
DX
1303 which2 = &tsqbits;
1304 } else if (idqbits) {
1305 pri = bsfl(idqbits);
52eedfb5
MD
1306 q = &bsd4_idqueues[pri];
1307 which = &bsd4_idqueuebits;
a60ccb85 1308 which2 = &idqbits;
52eedfb5
MD
1309 } else {
1310 return NULL;
1311 }
1312 lp = TAILQ_FIRST(q);
1313 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
270ac911 1314
a60ccb85
DX
1315 while ((lp->lwp_cpumask & cpumask) == 0) {
1316 lp = TAILQ_NEXT(lp, lwp_procq);
1317 if (lp == NULL) {
1318 *which2 &= ~(1 << pri);
1319 goto again;
1320 }
1321 }
a60ccb85 1322
38b25931 1323 /*
52eedfb5
MD
1324 * If the passed lwp <chklp> is reasonably close to the selected
1325 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
d6d39bc7 1326 *
52eedfb5
MD
1327 * Note that we must error on the side of <chklp> to avoid bouncing
1328 * between threads in the acquire code.
38b25931 1329 */
52eedfb5
MD
1330 if (chklp) {
1331 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1332 return(NULL);
1333 }
38b25931 1334
52eedfb5
MD
1335 /*
1336 * If the chosen lwp does not reside on this cpu spend a few
1337 * cycles looking for a better candidate at the same priority level.
1338 * This is a fallback check, setrunqueue() tries to wakeup the
1339 * correct cpu and is our front-line affinity.
1340 */
1341 if (lp->lwp_thread->td_gd != mycpu &&
1342 (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
1343 ) {
1344 if (chklp->lwp_thread->td_gd == mycpu) {
52eedfb5 1345 lp = chklp;
38b25931 1346 }
52eedfb5 1347 }
38b25931 1348
d6d39bc7
MC
1349 KTR_COND_LOG(usched_chooseproc,
1350 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1351 lp->lwp_proc->p_pid,
1352 lp->lwp_thread->td_gd->gd_cpuid,
1353 mycpu->gd_cpuid);
1354
52eedfb5
MD
1355 TAILQ_REMOVE(q, lp, lwp_procq);
1356 --bsd4_runqcount;
1357 if (TAILQ_EMPTY(q))
1358 *which &= ~(1 << pri);
4643740a
MD
1359 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1360 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
901ecceb 1361
52eedfb5
MD
1362 return lp;
1363}
38b25931 1364
d6d39bc7
MC
1365/*
1366 * chooseproc() - with a cache coherence heuristic. Try to pull a process that
1367 * has its home on the current CPU> If the process doesn't have its home here
1368 * and is a batchy one (see batcy_looser_pri_test), we can wait for a
1369 * sched_tick, may be its home will become free and pull it in. Anyway,
1370 * we can't wait more than one tick. If that tick expired, we pull in that
1371 * process, no matter what.
1372 */
1373static
1374struct lwp *
e28d8b15 1375bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp)
d6d39bc7
MC
1376{
1377 struct lwp *lp;
1378 struct rq *q;
1379 u_int32_t *which, *which2;
1380 u_int32_t pri;
1381 u_int32_t checks;
1382 u_int32_t rtqbits;
1383 u_int32_t tsqbits;
1384 u_int32_t idqbits;
1385 cpumask_t cpumask;
1386
1387 struct lwp * min_level_lwp = NULL;
1388 struct rq *min_q = NULL;
1389 cpumask_t siblings;
1390 cpu_node_t* cpunode = NULL;
1391 u_int32_t min_level = MAXCPU; /* number of levels < MAXCPU */
1392 u_int32_t *min_which = NULL;
1393 u_int32_t min_pri = 0;
1394 u_int32_t level = 0;
1395
1396 rtqbits = bsd4_rtqueuebits;
1397 tsqbits = bsd4_queuebits;
1398 idqbits = bsd4_idqueuebits;
1399 cpumask = mycpu->gd_cpumask;
1400
1401 /* Get the mask coresponding to the sysctl configured level */
1402 cpunode = bsd4_pcpu[mycpu->gd_cpuid].cpunode;
1403 level = usched_bsd4_stick_to_level;
1404 while (level) {
1405 cpunode = cpunode->parent_node;
1406 level--;
1407 }
1408 /* The cpus which can ellect a process */
1409 siblings = cpunode->members;
901ecceb 1410 checks = 0;
d6d39bc7
MC
1411
1412again:
1413 if (rtqbits) {
1414 pri = bsfl(rtqbits);
1415 q = &bsd4_rtqueues[pri];
1416 which = &bsd4_rtqueuebits;
1417 which2 = &rtqbits;
1418 } else if (tsqbits) {
1419 pri = bsfl(tsqbits);
1420 q = &bsd4_queues[pri];
1421 which = &bsd4_queuebits;
1422 which2 = &tsqbits;
1423 } else if (idqbits) {
1424 pri = bsfl(idqbits);
1425 q = &bsd4_idqueues[pri];
1426 which = &bsd4_idqueuebits;
1427 which2 = &idqbits;
1428 } else {
901ecceb
MD
1429 /*
1430 * No more left and we didn't reach the checks limit.
1431 */
e28d8b15 1432 bsd4_kick_helper(min_level_lwp);
d6d39bc7
MC
1433 return NULL;
1434 }
1435 lp = TAILQ_FIRST(q);
1436 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1437
901ecceb
MD
1438 /*
1439 * Limit the number of checks/queue to a configurable value to
d6d39bc7
MC
1440 * minimize the contention (we are in a locked region
1441 */
901ecceb 1442 while (checks < usched_bsd4_queue_checks) {
d6d39bc7
MC
1443 if ((lp->lwp_cpumask & cpumask) == 0 ||
1444 ((siblings & lp->lwp_thread->td_gd->gd_cpumask) == 0 &&
e28d8b15
MD
1445 (lp->lwp_rebal_ticks == sched_ticks ||
1446 lp->lwp_rebal_ticks == (int)(sched_ticks - 1)) &&
1447 bsd4_batchy_looser_pri_test(lp))) {
d6d39bc7
MC
1448
1449 KTR_COND_LOG(usched_chooseproc_cc_not_good,
1450 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1451 lp->lwp_proc->p_pid,
153fa3e0
MD
1452 (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1453 (unsigned long)siblings,
1454 (unsigned long)cpumask);
d6d39bc7
MC
1455
1456 cpunode = bsd4_pcpu[lp->lwp_thread->td_gd->gd_cpuid].cpunode;
1457 level = 0;
1458 while (cpunode) {
901ecceb 1459 if (cpunode->members & cpumask)
d6d39bc7 1460 break;
d6d39bc7
MC
1461 cpunode = cpunode->parent_node;
1462 level++;
1463 }
901ecceb
MD
1464 if (level < min_level ||
1465 (level == min_level && min_level_lwp &&
1466 lp->lwp_priority < min_level_lwp->lwp_priority)) {
e28d8b15 1467 bsd4_kick_helper(min_level_lwp);
d6d39bc7
MC
1468 min_level_lwp = lp;
1469 min_level = level;
1470 min_q = q;
1471 min_which = which;
1472 min_pri = pri;
901ecceb 1473 } else {
e28d8b15 1474 bsd4_kick_helper(lp);
d6d39bc7 1475 }
d6d39bc7
MC
1476 lp = TAILQ_NEXT(lp, lwp_procq);
1477 if (lp == NULL) {
1478 *which2 &= ~(1 << pri);
1479 goto again;
1480 }
1481 } else {
1482 KTR_COND_LOG(usched_chooseproc_cc_elected,
1483 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1484 lp->lwp_proc->p_pid,
153fa3e0
MD
1485 (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1486 (unsigned long)siblings,
1487 (unsigned long)cpumask);
d6d39bc7
MC
1488
1489 goto found;
1490 }
901ecceb 1491 ++checks;
d6d39bc7 1492 }
901ecceb
MD
1493
1494 /*
1495 * Checks exhausted, we tried to defer too many threads, so schedule
1496 * the best of the worst.
1497 */
d6d39bc7
MC
1498 lp = min_level_lwp;
1499 q = min_q;
1500 which = min_which;
1501 pri = min_pri;
1502 KASSERT(lp, ("chooseproc: at least the first lp was good"));
1503
1504found:
1505
1506 /*
1507 * If the passed lwp <chklp> is reasonably close to the selected
1508 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1509 *
1510 * Note that we must error on the side of <chklp> to avoid bouncing
1511 * between threads in the acquire code.
1512 */
1513 if (chklp) {
901ecceb 1514 if (chklp->lwp_priority < lp->lwp_priority + PPQ) {
e28d8b15 1515 bsd4_kick_helper(lp);
d6d39bc7 1516 return(NULL);
901ecceb 1517 }
d6d39bc7
MC
1518 }
1519
1520 KTR_COND_LOG(usched_chooseproc_cc,
1521 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1522 lp->lwp_proc->p_pid,
1523 lp->lwp_thread->td_gd->gd_cpuid,
1524 mycpu->gd_cpuid);
1525
1526 TAILQ_REMOVE(q, lp, lwp_procq);
1527 --bsd4_runqcount;
1528 if (TAILQ_EMPTY(q))
1529 *which &= ~(1 << pri);
1530 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1531 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
901ecceb 1532
d6d39bc7
MC
1533 return lp;
1534}
1535
901ecceb
MD
1536/*
1537 * If we aren't willing to schedule a ready process on our cpu, give it's
1538 * target cpu a kick rather than wait for the next tick.
1539 *
1540 * Called with bsd4_spin held.
1541 */
1542static
1543void
e28d8b15 1544bsd4_kick_helper(struct lwp *lp)
901ecceb
MD
1545{
1546 globaldata_t gd;
1547 bsd4_pcpu_t dd;
1548
1549 if (lp == NULL)
1550 return;
1551 gd = lp->lwp_thread->td_gd;
1552 dd = &bsd4_pcpu[gd->gd_cpuid];
1553 if ((smp_active_mask & usched_global_cpumask &
1554 bsd4_rdyprocmask & gd->gd_cpumask) == 0) {
1555 return;
1556 }
1557 ++usched_bsd4_kicks;
1558 atomic_clear_cpumask(&bsd4_rdyprocmask, gd->gd_cpumask);
1559 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
e28d8b15 1560 lwkt_send_ipiq(gd, bsd4_need_user_resched_remote, NULL);
901ecceb
MD
1561 } else {
1562 wakeup(&dd->helper_thread);
1563 }
1564}
b9eb1c19 1565
52eedfb5
MD
1566static
1567void
e28d8b15 1568bsd4_need_user_resched_remote(void *dummy)
52eedfb5 1569{
b9eb1c19
MD
1570 globaldata_t gd = mycpu;
1571 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
1572
eb501f47 1573 need_user_resched();
55b580ae
AH
1574
1575 /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1576 wakeup_mycpu(&dd->helper_thread);
52eedfb5 1577}
38b25931 1578
52eedfb5
MD
1579/*
1580 * bsd4_remrunqueue_locked() removes a given process from the run queue
1581 * that it is on, clearing the queue busy bit if it becomes empty.
1582 *
1583 * Note that user process scheduler is different from the LWKT schedule.
1584 * The user process scheduler only manages user processes but it uses LWKT
1585 * underneath, and a user process operating in the kernel will often be
1586 * 'released' from our management.
1587 *
1588 * MPSAFE - bsd4_spin must be held exclusively on call
1589 */
1590static void
1591bsd4_remrunqueue_locked(struct lwp *lp)
1592{
1593 struct rq *q;
1594 u_int32_t *which;
1595 u_int8_t pri;
1596
4643740a
MD
1597 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1598 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
52eedfb5
MD
1599 --bsd4_runqcount;
1600 KKASSERT(bsd4_runqcount >= 0);
1601
1602 pri = lp->lwp_rqindex;
1603 switch(lp->lwp_rqtype) {
1604 case RTP_PRIO_NORMAL:
1605 q = &bsd4_queues[pri];
1606 which = &bsd4_queuebits;
1607 break;
1608 case RTP_PRIO_REALTIME:
1609 case RTP_PRIO_FIFO:
1610 q = &bsd4_rtqueues[pri];
1611 which = &bsd4_rtqueuebits;
1612 break;
1613 case RTP_PRIO_IDLE:
1614 q = &bsd4_idqueues[pri];
1615 which = &bsd4_idqueuebits;
1616 break;
1617 default:
1618 panic("remrunqueue: invalid rtprio type");
1619 /* NOT REACHED */
1620 }
1621 TAILQ_REMOVE(q, lp, lwp_procq);
1622 if (TAILQ_EMPTY(q)) {
1623 KASSERT((*which & (1 << pri)) != 0,
1624 ("remrunqueue: remove from empty queue"));
1625 *which &= ~(1 << pri);
38b25931
MD
1626 }
1627}
1628
52eedfb5
MD
1629/*
1630 * bsd4_setrunqueue_locked()
1631 *
1632 * Add a process whos rqtype and rqindex had previously been calculated
1633 * onto the appropriate run queue. Determine if the addition requires
1634 * a reschedule on a cpu and return the cpuid or -1.
1635 *
1636 * NOTE: Lower priorities are better priorities.
1637 *
1638 * MPSAFE - bsd4_spin must be held exclusively on call
1639 */
1640static void
1641bsd4_setrunqueue_locked(struct lwp *lp)
1642{
1643 struct rq *q;
1644 u_int32_t *which;
1645 int pri;
1646
4643740a
MD
1647 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1648 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
52eedfb5
MD
1649 ++bsd4_runqcount;
1650
1651 pri = lp->lwp_rqindex;
1652
1653 switch(lp->lwp_rqtype) {
1654 case RTP_PRIO_NORMAL:
1655 q = &bsd4_queues[pri];
1656 which = &bsd4_queuebits;
1657 break;
1658 case RTP_PRIO_REALTIME:
1659 case RTP_PRIO_FIFO:
1660 q = &bsd4_rtqueues[pri];
1661 which = &bsd4_rtqueuebits;
1662 break;
1663 case RTP_PRIO_IDLE:
1664 q = &bsd4_idqueues[pri];
1665 which = &bsd4_idqueuebits;
1666 break;
1667 default:
1668 panic("remrunqueue: invalid rtprio type");
1669 /* NOT REACHED */
1670 }
1671
1672 /*
1673 * Add to the correct queue and set the appropriate bit. If no
1674 * lower priority (i.e. better) processes are in the queue then
1675 * we want a reschedule, calculate the best cpu for the job.
1676 *
1677 * Always run reschedules on the LWPs original cpu.
1678 */
1679 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1680 *which |= 1 << pri;
1681}
1682
38b25931
MD
1683/*
1684 * For SMP systems a user scheduler helper thread is created for each
1685 * cpu and is used to allow one cpu to wakeup another for the purposes of
c9e9fb21
MD
1686 * scheduling userland threads from setrunqueue().
1687 *
1688 * UP systems do not need the helper since there is only one cpu.
1689 *
1690 * We can't use the idle thread for this because we might block.
1691 * Additionally, doing things this way allows us to HLT idle cpus
1692 * on MP systems.
52eedfb5
MD
1693 *
1694 * MPSAFE
38b25931
MD
1695 */
1696static void
1697sched_thread(void *dummy)
1698{
52eedfb5
MD
1699 globaldata_t gd;
1700 bsd4_pcpu_t dd;
85946b6c 1701 bsd4_pcpu_t tmpdd;
52eedfb5 1702 struct lwp *nlp;
eb501f47 1703 cpumask_t mask;
52eedfb5 1704 int cpuid;
418f19aa 1705 cpumask_t tmpmask;
52eedfb5
MD
1706 int tmpid;
1707
1708 gd = mycpu;
1709 cpuid = gd->gd_cpuid; /* doesn't change */
eb501f47 1710 mask = gd->gd_cpumask; /* doesn't change */
52eedfb5
MD
1711 dd = &bsd4_pcpu[cpuid];
1712
1713 /*
c9e9fb21
MD
1714 * Since we are woken up only when no user processes are scheduled
1715 * on a cpu, we can run at an ultra low priority.
52eedfb5 1716 */
50017724 1717 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
38b25931 1718
90f4cbeb 1719 tsleep(&dd->helper_thread, 0, "sched_thread_sleep", 0);
d6d39bc7 1720
38b25931 1721 for (;;) {
50017724
MD
1722 /*
1723 * We use the LWKT deschedule-interlock trick to avoid racing
1724 * bsd4_rdyprocmask. This means we cannot block through to the
1725 * manual lwkt_switch() call we make below.
1726 */
52eedfb5 1727 crit_enter_gd(gd);
d6d39bc7 1728 tsleep_interlock(&dd->helper_thread, 0);
287a8577 1729 spin_lock(&bsd4_spin);
eb501f47 1730 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
b9eb1c19
MD
1731
1732 clear_user_resched(); /* This satisfied the reschedule request */
1733 dd->rrcount = 0; /* Reset the round-robin counter */
1734
eb501f47 1735 if ((bsd4_curprocmask & mask) == 0) {
b9eb1c19
MD
1736 /*
1737 * No thread is currently scheduled.
1738 */
1739 KKASSERT(dd->uschedcp == NULL);
e28d8b15 1740 if ((nlp = bsd4_chooseproc_locked(NULL)) != NULL) {
d6d39bc7
MC
1741 KTR_COND_LOG(usched_sched_thread_no_process,
1742 nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1743 gd->gd_cpuid,
1744 nlp->lwp_proc->p_pid,
1745 nlp->lwp_thread->td_gd->gd_cpuid);
1746
eb501f47 1747 atomic_set_cpumask(&bsd4_curprocmask, mask);
52eedfb5
MD
1748 dd->upri = nlp->lwp_priority;
1749 dd->uschedcp = nlp;
901ecceb 1750 dd->rrcount = 0; /* reset round robin */
287a8577 1751 spin_unlock(&bsd4_spin);
52eedfb5
MD
1752 lwkt_acquire(nlp->lwp_thread);
1753 lwkt_schedule(nlp->lwp_thread);
1754 } else {
287a8577 1755 spin_unlock(&bsd4_spin);
52eedfb5 1756 }
b9eb1c19 1757 } else if (bsd4_runqcount) {
e28d8b15 1758 if ((nlp = bsd4_chooseproc_locked(dd->uschedcp)) != NULL) {
d6d39bc7
MC
1759 KTR_COND_LOG(usched_sched_thread_process,
1760 nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1761 gd->gd_cpuid,
1762 nlp->lwp_proc->p_pid,
1763 nlp->lwp_thread->td_gd->gd_cpuid);
1764
eb501f47
MD
1765 dd->upri = nlp->lwp_priority;
1766 dd->uschedcp = nlp;
901ecceb 1767 dd->rrcount = 0; /* reset round robin */
eb501f47
MD
1768 spin_unlock(&bsd4_spin);
1769 lwkt_acquire(nlp->lwp_thread);
1770 lwkt_schedule(nlp->lwp_thread);
52eedfb5 1771 } else {
eb501f47
MD
1772 /*
1773 * CHAINING CONDITION TRAIN
1774 *
1775 * We could not deal with the scheduler wakeup
1776 * request on this cpu, locate a ready scheduler
1777 * with no current lp assignment and chain to it.
1778 *
1779 * This ensures that a wakeup race which fails due
1780 * to priority test does not leave other unscheduled
1781 * cpus idle when the runqueue is not empty.
1782 */
d6d39bc7 1783 tmpmask = ~bsd4_curprocmask &
901ecceb 1784 bsd4_rdyprocmask & smp_active_mask;
eb501f47
MD
1785 if (tmpmask) {
1786 tmpid = BSFCPUMASK(tmpmask);
85946b6c 1787 tmpdd = &bsd4_pcpu[tmpid];
eb501f47 1788 atomic_clear_cpumask(&bsd4_rdyprocmask,
901ecceb 1789 CPUMASK(tmpid));
eb501f47 1790 spin_unlock(&bsd4_spin);
d6d39bc7 1791 wakeup(&tmpdd->helper_thread);
eb501f47
MD
1792 } else {
1793 spin_unlock(&bsd4_spin);
1794 }
d6d39bc7
MC
1795
1796 KTR_LOG(usched_sched_thread_no_process_found,
901ecceb 1797 gd->gd_cpuid, (unsigned long)tmpmask);
52eedfb5 1798 }
b9eb1c19
MD
1799 } else {
1800 /*
1801 * The runq is empty.
1802 */
287a8577 1803 spin_unlock(&bsd4_spin);
38b25931 1804 }
85946b6c
MD
1805
1806 /*
1807 * We're descheduled unless someone scheduled us. Switch away.
1808 * Exiting the critical section will cause splz() to be called
1809 * for us if interrupts and such are pending.
1810 */
52eedfb5 1811 crit_exit_gd(gd);
901ecceb 1812 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
38b25931
MD
1813 }
1814}
1815
d6d39bc7
MC
1816/* sysctl stick_to_level parameter */
1817static int
1818sysctl_usched_bsd4_stick_to_level(SYSCTL_HANDLER_ARGS)
1819{
1820 int error, new_val;
1821
1822 new_val = usched_bsd4_stick_to_level;
1823
1824 error = sysctl_handle_int(oidp, &new_val, 0, req);
1825 if (error != 0 || req->newptr == NULL)
1826 return (error);
901ecceb 1827 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
d6d39bc7
MC
1828 return (EINVAL);
1829 usched_bsd4_stick_to_level = new_val;
1830 return (0);
1831}
1832
38b25931
MD
1833/*
1834 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1835 * been cleared by rqinit() and we should not mess with it further.
1836 */
1837static void
1838sched_thread_cpu_init(void)
1839{
d6d39bc7
MC
1840 int i;
1841 int cpuid;
1842 int smt_not_supported = 0;
1843 int cache_coherent_not_supported = 0;
901ecceb 1844
d6d39bc7
MC
1845 if (bootverbose)
1846 kprintf("Start scheduler helpers on cpus:\n");
38b25931 1847
d6d39bc7 1848 sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
901ecceb
MD
1849 usched_bsd4_sysctl_tree =
1850 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
1851 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1852 "usched_bsd4", CTLFLAG_RD, 0, "");
38b25931 1853
d6d39bc7
MC
1854 for (i = 0; i < ncpus; ++i) {
1855 bsd4_pcpu_t dd = &bsd4_pcpu[i];
1856 cpumask_t mask = CPUMASK(i);
38b25931 1857
d6d39bc7
MC
1858 if ((mask & smp_active_mask) == 0)
1859 continue;
38b25931 1860
d6d39bc7 1861 dd->cpunode = get_cpu_node_by_cpuid(i);
38b25931 1862
d6d39bc7
MC
1863 if (dd->cpunode == NULL) {
1864 smt_not_supported = 1;
1865 cache_coherent_not_supported = 1;
1866 if (bootverbose)
901ecceb
MD
1867 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1868 "found for cpu\n", i);
d6d39bc7 1869 } else {
d6d39bc7 1870 switch (dd->cpunode->type) {
901ecceb
MD
1871 case THREAD_LEVEL:
1872 if (bootverbose)
1873 kprintf ("\tcpu%d - HyperThreading "
1874 "available. Core siblings: ",
1875 i);
1876 break;
1877 case CORE_LEVEL:
1878 smt_not_supported = 1;
1879
1880 if (bootverbose)
1881 kprintf ("\tcpu%d - No HT available, "
1882 "multi-core/physical "
1883 "cpu. Physical siblings: ",
1884 i);
1885 break;
1886 case CHIP_LEVEL:
1887 smt_not_supported = 1;
1888
1889 if (bootverbose)
1890 kprintf ("\tcpu%d - No HT available, "
1891 "single-core/physical cpu. "
1892 "Package Siblings: ",
1893 i);
1894 break;
1895 default:
1896 /* Let's go for safe defaults here */
1897 smt_not_supported = 1;
1898 cache_coherent_not_supported = 1;
1899 if (bootverbose)
1900 kprintf ("\tcpu%d - Unknown cpunode->"
1901 "type=%u. Siblings: ",
1902 i,
1903 (u_int)dd->cpunode->type);
1904 break;
d6d39bc7
MC
1905 }
1906
1907 if (bootverbose) {
1908 if (dd->cpunode->parent_node != NULL) {
1909 CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1910 kprintf("cpu%d ", cpuid);
1911 kprintf("\n");
1912 } else {
1913 kprintf(" no siblings\n");
1914 }
1915 }
1916 }
1917
1918 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
901ecceb 1919 0, i, "usched %d", i);
d6d39bc7
MC
1920
1921 /*
1922 * Allow user scheduling on the target cpu. cpu #0 has already
1923 * been enabled in rqinit().
1924 */
1925 if (i)
1926 atomic_clear_cpumask(&bsd4_curprocmask, mask);
1927 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
1928 dd->upri = PRIBASE_NULL;
1929
1930 }
1931
1932 /* usched_bsd4 sysctl configurable parameters */
1933
1934 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1935 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1936 OID_AUTO, "rrinterval", CTLFLAG_RW,
1937 &usched_bsd4_rrinterval, 0, "");
d6d39bc7 1938 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1939 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1940 OID_AUTO, "decay", CTLFLAG_RW,
1941 &usched_bsd4_decay, 0, "Extra decay when not running");
d6d39bc7 1942 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1943 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1944 OID_AUTO, "batch_time", CTLFLAG_RW,
1945 &usched_bsd4_batch_time, 0, "Min batch counter value");
1946 SYSCTL_ADD_LONG(&usched_bsd4_sysctl_ctx,
1947 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1948 OID_AUTO, "kicks", CTLFLAG_RW,
1949 &usched_bsd4_kicks, "Number of kickstarts");
d6d39bc7
MC
1950
1951 /* Add enable/disable option for SMT scheduling if supported */
1952 if (smt_not_supported) {
1953 usched_bsd4_smt = 0;
1954 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1955 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1956 OID_AUTO, "smt", CTLFLAG_RD,
1957 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
d6d39bc7
MC
1958 } else {
1959 usched_bsd4_smt = 1;
1960 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1961 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1962 OID_AUTO, "smt", CTLFLAG_RW,
1963 &usched_bsd4_smt, 0, "Enable SMT scheduling");
d6d39bc7
MC
1964 }
1965
901ecceb
MD
1966 /*
1967 * Add enable/disable option for cache coherent scheduling
1968 * if supported
1969 */
d6d39bc7 1970 if (cache_coherent_not_supported) {
d6d39bc7
MC
1971 usched_bsd4_cache_coherent = 0;
1972 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1973 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1974 OID_AUTO, "cache_coherent", CTLFLAG_RD,
1975 "NOT SUPPORTED", 0,
1976 "Cache coherence NOT SUPPORTED");
d6d39bc7 1977 } else {
d6d39bc7
MC
1978 usched_bsd4_cache_coherent = 1;
1979 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1980 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1981 OID_AUTO, "cache_coherent", CTLFLAG_RW,
1982 &usched_bsd4_cache_coherent, 0,
1983 "Enable/Disable cache coherent scheduling");
d6d39bc7
MC
1984
1985 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1986 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1987 OID_AUTO, "upri_affinity", CTLFLAG_RW,
1988 &usched_bsd4_upri_affinity, 1,
1989 "Number of PPQs in user priority check");
d6d39bc7
MC
1990
1991 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1992 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1993 OID_AUTO, "queue_checks", CTLFLAG_RW,
1994 &usched_bsd4_queue_checks, 5,
1995 "LWPs to check from a queue before giving up");
d6d39bc7
MC
1996
1997 SYSCTL_ADD_PROC(&usched_bsd4_sysctl_ctx,
901ecceb
MD
1998 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
1999 OID_AUTO, "stick_to_level",
2000 CTLTYPE_INT | CTLFLAG_RW,
2001 NULL, sizeof usched_bsd4_stick_to_level,
2002 sysctl_usched_bsd4_stick_to_level, "I",
2003 "Stick a process to this level. See sysctl"
2004 "paremter hw.cpu_topology.level_description");
d6d39bc7 2005 }
38b25931 2006}
ba39e2e0
MD
2007SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2008 sched_thread_cpu_init, NULL)