043b264d2cbad4f73f01a1f3c970c0a98aa3c653
[dragonfly.git] / sys / kern / usched_bsd4.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/queue.h>
37 #include <sys/proc.h>
38 #include <sys/rtprio.h>
39 #include <sys/uio.h>
40 #include <sys/sysctl.h>
41 #include <sys/resourcevar.h>
42 #include <sys/spinlock.h>
43 #include <sys/cpu_topology.h>
44 #include <sys/thread2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/mplock2.h>
47
48 #include <sys/ktr.h>
49
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
52
53 /*
54  * Priorities.  Note that with 32 run queues per scheduler each queue
55  * represents four priority levels.
56  */
57
58 #define MAXPRI                  128
59 #define PRIMASK                 (MAXPRI - 1)
60 #define PRIBASE_REALTIME        0
61 #define PRIBASE_NORMAL          MAXPRI
62 #define PRIBASE_IDLE            (MAXPRI * 2)
63 #define PRIBASE_THREAD          (MAXPRI * 3)
64 #define PRIBASE_NULL            (MAXPRI * 4)
65
66 #define NQS     32                      /* 32 run queues. */
67 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
68 #define PPQMASK (PPQ - 1)
69
70 /*
71  * NICEPPQ      - number of nice units per priority queue
72  *
73  * ESTCPUPPQ    - number of estcpu units per priority queue
74  * ESTCPUMAX    - number of estcpu units
75  */
76 #define NICEPPQ         2
77 #define ESTCPUPPQ       512
78 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
79 #define BATCHMAX        (ESTCPUFREQ * 30)
80 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
81
82 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
83
84 TAILQ_HEAD(rq, lwp);
85
86 #define lwp_priority    lwp_usdata.bsd4.priority
87 #define lwp_rqindex     lwp_usdata.bsd4.rqindex
88 #define lwp_estcpu      lwp_usdata.bsd4.estcpu
89 #define lwp_batch       lwp_usdata.bsd4.batch
90 #define lwp_rqtype      lwp_usdata.bsd4.rqtype
91
92 static void bsd4_acquire_curproc(struct lwp *lp);
93 static void bsd4_release_curproc(struct lwp *lp);
94 static void bsd4_select_curproc(globaldata_t gd);
95 static void bsd4_setrunqueue(struct lwp *lp);
96 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
97                                 sysclock_t cpstamp);
98 static void bsd4_recalculate_estcpu(struct lwp *lp);
99 static void bsd4_resetpriority(struct lwp *lp);
100 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
101 static void bsd4_exiting(struct lwp *lp, struct proc *);
102 static void bsd4_uload_update(struct lwp *lp);
103 static void bsd4_yield(struct lwp *lp);
104
105 #ifdef SMP
106 static void bsd4_need_user_resched_remote(void *dummy);
107 static int bsd4_batchy_looser_pri_test(struct lwp* lp);
108 static struct lwp *bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp);
109 static void bsd4_kick_helper(struct lwp *lp);
110 #endif
111 static struct lwp *bsd4_chooseproc_locked(struct lwp *chklp);
112 static void bsd4_remrunqueue_locked(struct lwp *lp);
113 static void bsd4_setrunqueue_locked(struct lwp *lp);
114
115 struct usched usched_bsd4 = {
116         { NULL },
117         "bsd4", "Original DragonFly Scheduler",
118         NULL,                   /* default registration */
119         NULL,                   /* default deregistration */
120         bsd4_acquire_curproc,
121         bsd4_release_curproc,
122         bsd4_setrunqueue,
123         bsd4_schedulerclock,
124         bsd4_recalculate_estcpu,
125         bsd4_resetpriority,
126         bsd4_forking,
127         bsd4_exiting,
128         bsd4_uload_update,
129         NULL,                   /* setcpumask not supported */
130         bsd4_yield
131 };
132
133 struct usched_bsd4_pcpu {
134         struct thread   helper_thread;
135         short           rrcount;
136         short           upri;
137         struct lwp      *uschedcp;
138         struct lwp      *old_uschedcp;
139 #ifdef SMP
140         cpu_node_t      *cpunode;
141 #endif
142 };
143
144 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
145
146 /*
147  * We have NQS (32) run queues per scheduling class.  For the normal
148  * class, there are 128 priorities scaled onto these 32 queues.  New
149  * processes are added to the last entry in each queue, and processes
150  * are selected for running by taking them from the head and maintaining
151  * a simple FIFO arrangement.  Realtime and Idle priority processes have
152  * and explicit 0-31 priority which maps directly onto their class queue
153  * index.  When a queue has something in it, the corresponding bit is
154  * set in the queuebits variable, allowing a single read to determine
155  * the state of all 32 queues and then a ffs() to find the first busy
156  * queue.
157  */
158 static struct rq bsd4_queues[NQS];
159 static struct rq bsd4_rtqueues[NQS];
160 static struct rq bsd4_idqueues[NQS];
161 static u_int32_t bsd4_queuebits;
162 static u_int32_t bsd4_rtqueuebits;
163 static u_int32_t bsd4_idqueuebits;
164 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */
165 static cpumask_t bsd4_rdyprocmask;      /* ready to accept a user process */
166 static int       bsd4_runqcount;
167 #ifdef SMP
168 static volatile int bsd4_scancpu;
169 #endif
170 static struct spinlock bsd4_spin;
171 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
172 static struct sysctl_ctx_list usched_bsd4_sysctl_ctx;
173 static struct sysctl_oid *usched_bsd4_sysctl_tree;
174
175 /* Debug info exposed through debug.* sysctl */
176
177 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD,
178            &bsd4_runqcount, 0,
179            "Number of run queues");
180
181 static int usched_bsd4_debug = -1;
182 SYSCTL_INT(_debug, OID_AUTO, bsd4_scdebug, CTLFLAG_RW,
183            &usched_bsd4_debug, 0,
184            "Print debug information for this pid");
185
186 static int usched_bsd4_pid_debug = -1;
187 SYSCTL_INT(_debug, OID_AUTO, bsd4_pid_debug, CTLFLAG_RW,
188            &usched_bsd4_pid_debug, 0,
189            "Print KTR debug information for this pid");
190
191 /* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */
192 #ifdef SMP
193 static int usched_bsd4_smt = 0;
194 static int usched_bsd4_cache_coherent = 0;
195 static int usched_bsd4_upri_affinity = 16; /* 32 queues - half-way */
196 static int usched_bsd4_queue_checks = 5;
197 static int usched_bsd4_stick_to_level = 0;
198 static long usched_bsd4_kicks;
199 #endif
200 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
201 static int usched_bsd4_decay = 8;
202 static int usched_bsd4_batch_time = 10;
203
204 /* KTR debug printings */
205
206 KTR_INFO_MASTER_EXTERN(usched);
207
208 #if !defined(KTR_USCHED_BSD4)
209 #define KTR_USCHED_BSD4 KTR_ALL
210 #endif
211
212 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_urw, 0,
213     "USCHED_BSD4(bsd4_acquire_curproc in user_reseched_wanted "
214     "after release: pid %d, cpuid %d, curr_cpuid %d)",
215     pid_t pid, int cpuid, int curr);
216 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_before_loop, 0,
217     "USCHED_BSD4(bsd4_acquire_curproc before loop: pid %d, cpuid %d, "
218     "curr_cpuid %d)",
219     pid_t pid, int cpuid, int curr);
220 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_not, 0,
221     "USCHED_BSD4(bsd4_acquire_curproc couldn't acquire after "
222     "bsd4_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
223     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
224 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_switch, 0,
225     "USCHED_BSD4(bsd4_acquire_curproc after lwkt_switch: pid %d, "
226     "cpuid %d, curr_cpuid %d)",
227     pid_t pid, int cpuid, int curr);
228
229 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_release_curproc, 0,
230     "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
231     "cpuid %d, curr_cpuid %d)",
232     pid_t pid, int cpuid, int curr);
233
234 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_select_curproc, 0,
235     "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
236     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
237     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
238
239 #ifdef SMP
240 KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_false, 0,
241     "USCHED_BSD4(batchy_looser_pri_test false: pid %d, "
242     "cpuid %d, verify_mask %lu)",
243     pid_t pid, int cpuid, cpumask_t mask);
244 KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_true, 0,
245     "USCHED_BSD4(batchy_looser_pri_test true: pid %d, "
246     "cpuid %d, verify_mask %lu)",
247     pid_t pid, int cpuid, cpumask_t mask);
248
249 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_smt, 0,
250     "USCHED_BSD4(bsd4_setrunqueue free cpus smt: pid %d, cpuid %d, "
251     "mask %lu, curr_cpuid %d)",
252     pid_t pid, int cpuid, cpumask_t mask, int curr);
253 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_non_smt, 0,
254     "USCHED_BSD4(bsd4_setrunqueue free cpus check non_smt: pid %d, "
255     "cpuid %d, mask %lu, curr_cpuid %d)",
256     pid_t pid, int cpuid, cpumask_t mask, int curr);
257 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_rc, 0,
258     "USCHED_BSD4(bsd4_setrunqueue running cpus check: pid %d, "
259     "cpuid %d, mask %lu, curr_cpuid %d)",
260     pid_t pid, int cpuid, cpumask_t mask, int curr);
261 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found, 0,
262     "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
263     "mask %lu, found_cpuid %d, curr_cpuid %d)",
264     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
265 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_not_found, 0,
266     "USCHED_BSD4(bsd4_setrunqueue not found cpu: pid %d, cpuid %d, "
267     "try_cpuid %d, curr_cpuid %d)",
268     pid_t pid, int cpuid, int try_cpuid, int curr);
269 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found_best_cpuid, 0,
270     "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
271     "mask %lu, found_cpuid %d, curr_cpuid %d)",
272     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
273 #endif
274
275 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc, 0,
276     "USCHED_BSD4(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
277     pid_t pid, int old_cpuid, int curr);
278 #ifdef SMP
279 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc, 0,
280     "USCHED_BSD4(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
281     pid_t pid, int old_cpuid, int curr);
282 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_not_good, 0,
283     "USCHED_BSD4(chooseproc_cc not good: pid %d, old_cpumask %lu, "
284     "sibling_mask %lu, curr_cpumask %lu)",
285     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
286 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_elected, 0,
287     "USCHED_BSD4(chooseproc_cc elected: pid %d, old_cpumask %lu, "
288     "sibling_mask %lu, curr_cpumask: %lu)",
289     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
290
291 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process, 0,
292     "USCHED_BSD4(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
293     int id, pid_t pid, int cpuid);
294 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_process, 0,
295     "USCHED_BSD4(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
296     int id, pid_t pid, int cpuid);
297 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process_found, 0,
298     "USCHED_BSD4(sched_thread %d no process found; tmpmask %lu)",
299     int id, cpumask_t tmpmask);
300 #endif
301
302 /*
303  * Initialize the run queues at boot time.
304  */
305 static void
306 bsd4_rqinit(void *dummy)
307 {
308         int i;
309
310         spin_init(&bsd4_spin);
311         for (i = 0; i < NQS; i++) {
312                 TAILQ_INIT(&bsd4_queues[i]);
313                 TAILQ_INIT(&bsd4_rtqueues[i]);
314                 TAILQ_INIT(&bsd4_idqueues[i]);
315         }
316         atomic_clear_cpumask(&bsd4_curprocmask, 1);
317 }
318 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, bsd4_rqinit, NULL)
319
320 /*
321  * BSD4_ACQUIRE_CURPROC
322  *
323  * This function is called when the kernel intends to return to userland.
324  * It is responsible for making the thread the current designated userland
325  * thread for this cpu, blocking if necessary.
326  *
327  * The kernel has already depressed our LWKT priority so we must not switch
328  * until we have either assigned or disposed of the thread.
329  *
330  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
331  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
332  * occur, this function is called only under very controlled circumstances.
333  *
334  * MPSAFE
335  */
336 static void
337 bsd4_acquire_curproc(struct lwp *lp)
338 {
339         globaldata_t gd;
340         bsd4_pcpu_t dd;
341         thread_t td;
342 #if 0
343         struct lwp *olp;
344 #endif
345
346         /*
347          * Make sure we aren't sitting on a tsleep queue.
348          */
349         td = lp->lwp_thread;
350         crit_enter_quick(td);
351         if (td->td_flags & TDF_TSLEEPQ)
352                 tsleep_remove(td);
353         bsd4_recalculate_estcpu(lp);
354
355         /*
356          * If a reschedule was requested give another thread the
357          * driver's seat.
358          */
359         if (user_resched_wanted()) {
360                 clear_user_resched();
361                 bsd4_release_curproc(lp);
362
363                 KTR_COND_LOG(usched_bsd4_acquire_curproc_urw,
364                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
365                     lp->lwp_proc->p_pid,
366                     lp->lwp_thread->td_gd->gd_cpuid,
367                     mycpu->gd_cpuid);
368         }
369
370         /*
371          * Loop until we are the current user thread
372          */
373         gd = mycpu;
374         dd = &bsd4_pcpu[gd->gd_cpuid];
375
376         KTR_COND_LOG(usched_bsd4_acquire_curproc_before_loop,
377             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
378             lp->lwp_proc->p_pid,
379             lp->lwp_thread->td_gd->gd_cpuid,
380             gd->gd_cpuid);
381
382         do {
383                 /*
384                  * Process any pending events and higher priority threads.
385                  */
386                 lwkt_yield();
387
388                 /*
389                  * Become the currently scheduled user thread for this cpu
390                  * if we can do so trivially.
391                  *
392                  * We can steal another thread's current thread designation
393                  * on this cpu since if we are running that other thread
394                  * must not be, so we can safely deschedule it.
395                  */
396                 if (dd->uschedcp == lp) {
397                         /*
398                          * We are already the current lwp (hot path).
399                          */
400                         dd->upri = lp->lwp_priority;
401                 } else if (dd->uschedcp == NULL) {
402                         /*
403                          * We can trivially become the current lwp.
404                          */
405                         atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
406                         dd->uschedcp = lp;
407                         dd->upri = lp->lwp_priority;
408                 } else if (dd->upri > lp->lwp_priority) {
409                         /*
410                          * We can steal the current cpu's lwp designation
411                          * away simply by replacing it.  The other thread
412                          * will stall when it tries to return to userland.
413                          */
414                         dd->uschedcp = lp;
415                         dd->upri = lp->lwp_priority;
416                         /*
417                         lwkt_deschedule(olp->lwp_thread);
418                         bsd4_setrunqueue(olp);
419                         */
420                 } else {
421                         /*
422                          * We cannot become the current lwp, place the lp
423                          * on the bsd4 run-queue and deschedule ourselves.
424                          *
425                          * When we are reactivated we will have another
426                          * chance.
427                          */
428                         lwkt_deschedule(lp->lwp_thread);
429
430                         bsd4_setrunqueue(lp);
431
432                         KTR_COND_LOG(usched_bsd4_acquire_curproc_not,
433                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
434                             lp->lwp_proc->p_pid,
435                             lp->lwp_thread->td_gd->gd_cpuid,
436                             dd->uschedcp->lwp_proc->p_pid,
437                             gd->gd_cpuid);
438
439
440                         lwkt_switch();
441
442                         /*
443                          * Reload after a switch or setrunqueue/switch possibly
444                          * moved us to another cpu.
445                          */
446                         gd = mycpu;
447                         dd = &bsd4_pcpu[gd->gd_cpuid];
448
449                         KTR_COND_LOG(usched_bsd4_acquire_curproc_switch,
450                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
451                             lp->lwp_proc->p_pid,
452                             lp->lwp_thread->td_gd->gd_cpuid,
453                             gd->gd_cpuid);
454                 }
455         } while (dd->uschedcp != lp);
456
457         crit_exit_quick(td);
458         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
459 }
460
461 /*
462  * BSD4_RELEASE_CURPROC
463  *
464  * This routine detaches the current thread from the userland scheduler,
465  * usually because the thread needs to run or block in the kernel (at
466  * kernel priority) for a while.
467  *
468  * This routine is also responsible for selecting a new thread to
469  * make the current thread.
470  *
471  * NOTE: This implementation differs from the dummy example in that
472  * bsd4_select_curproc() is able to select the current process, whereas
473  * dummy_select_curproc() is not able to select the current process.
474  * This means we have to NULL out uschedcp.
475  *
476  * Additionally, note that we may already be on a run queue if releasing
477  * via the lwkt_switch() in bsd4_setrunqueue().
478  *
479  * MPSAFE
480  */
481
482 static void
483 bsd4_release_curproc(struct lwp *lp)
484 {
485         globaldata_t gd = mycpu;
486         bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
487
488         if (dd->uschedcp == lp) {
489                 crit_enter();
490                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
491
492                 KTR_COND_LOG(usched_bsd4_release_curproc,
493                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
494                     lp->lwp_proc->p_pid,
495                     lp->lwp_thread->td_gd->gd_cpuid,
496                     gd->gd_cpuid);
497
498                 dd->uschedcp = NULL;    /* don't let lp be selected */
499                 dd->upri = PRIBASE_NULL;
500                 atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
501                 dd->old_uschedcp = lp;  /* used only for KTR debug prints */
502                 bsd4_select_curproc(gd);
503                 crit_exit();
504         }
505 }
506
507 /*
508  * BSD4_SELECT_CURPROC
509  *
510  * Select a new current process for this cpu and clear any pending user
511  * reschedule request.  The cpu currently has no current process.
512  *
513  * This routine is also responsible for equal-priority round-robining,
514  * typically triggered from bsd4_schedulerclock().  In our dummy example
515  * all the 'user' threads are LWKT scheduled all at once and we just
516  * call lwkt_switch().
517  *
518  * The calling process is not on the queue and cannot be selected.
519  *
520  * MPSAFE
521  */
522 static
523 void
524 bsd4_select_curproc(globaldata_t gd)
525 {
526         bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
527         struct lwp *nlp;
528         int cpuid = gd->gd_cpuid;
529
530         crit_enter_gd(gd);
531
532         spin_lock(&bsd4_spin);
533 #ifdef SMP
534         if(usched_bsd4_cache_coherent)
535                 nlp = bsd4_chooseproc_locked_cache_coherent(dd->uschedcp);
536         else
537 #endif
538                 nlp = bsd4_chooseproc_locked(dd->uschedcp);
539
540         if (nlp) {
541
542                 KTR_COND_LOG(usched_bsd4_select_curproc,
543                     nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
544                     nlp->lwp_proc->p_pid,
545                     nlp->lwp_thread->td_gd->gd_cpuid,
546                     dd->old_uschedcp->lwp_proc->p_pid,
547                     dd->old_uschedcp->lwp_thread->td_gd->gd_cpuid,
548                     gd->gd_cpuid);
549
550                 atomic_set_cpumask(&bsd4_curprocmask, CPUMASK(cpuid));
551                 dd->upri = nlp->lwp_priority;
552                 dd->uschedcp = nlp;
553                 dd->rrcount = 0;                /* reset round robin */
554                 spin_unlock(&bsd4_spin);
555 #ifdef SMP
556                 lwkt_acquire(nlp->lwp_thread);
557 #endif
558                 lwkt_schedule(nlp->lwp_thread);
559         } else {
560                 spin_unlock(&bsd4_spin);
561         }
562
563 #if 0
564         } else if (bsd4_runqcount && (bsd4_rdyprocmask & CPUMASK(cpuid))) {
565                 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
566                 spin_unlock(&bsd4_spin);
567                 lwkt_schedule(&dd->helper_thread);
568         } else {
569                 spin_unlock(&bsd4_spin);
570         }
571 #endif
572         crit_exit_gd(gd);
573 }
574 #ifdef SMP
575
576 /*
577  * batchy_looser_pri_test() - determine if a process is batchy or not
578  * relative to the other processes running in the system
579  */
580 static int
581 bsd4_batchy_looser_pri_test(struct lwp* lp)
582 {
583         cpumask_t mask;
584         bsd4_pcpu_t other_dd;
585         int cpu;
586
587         /* Current running processes */
588         mask = bsd4_curprocmask & smp_active_mask
589             & usched_global_cpumask;
590
591         while(mask) {
592                 cpu = BSFCPUMASK(mask);
593                 other_dd = &bsd4_pcpu[cpu];
594                 if (other_dd->upri - lp->lwp_priority > usched_bsd4_upri_affinity * PPQ) {
595
596                         KTR_COND_LOG(usched_batchy_test_false,
597                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
598                             lp->lwp_proc->p_pid,
599                             lp->lwp_thread->td_gd->gd_cpuid,
600                             (unsigned long)mask);
601
602                         return 0;
603                 }
604                 mask &= ~CPUMASK(cpu);
605         }
606
607         KTR_COND_LOG(usched_batchy_test_true,
608             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
609             lp->lwp_proc->p_pid,
610             lp->lwp_thread->td_gd->gd_cpuid,
611             (unsigned long)mask);
612
613         return 1;
614 }
615
616 #endif
617 /*
618  *
619  * BSD4_SETRUNQUEUE
620  *
621  * Place the specified lwp on the user scheduler's run queue.  This routine
622  * must be called with the thread descheduled.  The lwp must be runnable.
623  *
624  * The thread may be the current thread as a special case.
625  *
626  * MPSAFE
627  */
628 static void
629 bsd4_setrunqueue(struct lwp *lp)
630 {
631         globaldata_t gd;
632         bsd4_pcpu_t dd;
633 #ifdef SMP
634         int cpuid;
635         cpumask_t mask;
636         cpumask_t tmpmask;
637 #endif
638
639         /*
640          * First validate the process state relative to the current cpu.
641          * We don't need the spinlock for this, just a critical section.
642          * We are in control of the process.
643          */
644         crit_enter();
645         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
646         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
647             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
648              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
649         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
650
651         /*
652          * Note: gd and dd are relative to the target thread's last cpu,
653          * NOT our current cpu.
654          */
655         gd = lp->lwp_thread->td_gd;
656         dd = &bsd4_pcpu[gd->gd_cpuid];
657
658         /*
659          * This process is not supposed to be scheduled anywhere or assigned
660          * as the current process anywhere.  Assert the condition.
661          */
662         KKASSERT(dd->uschedcp != lp);
663
664 #ifndef SMP
665         /*
666          * If we are not SMP we do not have a scheduler helper to kick
667          * and must directly activate the process if none are scheduled.
668          *
669          * This is really only an issue when bootstrapping init since
670          * the caller in all other cases will be a user process, and
671          * even if released (dd->uschedcp == NULL), that process will
672          * kickstart the scheduler when it returns to user mode from
673          * the kernel.
674          */
675         if (dd->uschedcp == NULL) {
676                 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
677                 dd->uschedcp = lp;
678                 dd->upri = lp->lwp_priority;
679                 lwkt_schedule(lp->lwp_thread);
680                 crit_exit();
681                 return;
682         }
683 #endif
684
685 #ifdef SMP
686         /*
687          * XXX fixme.  Could be part of a remrunqueue/setrunqueue
688          * operation when the priority is recalculated, so TDF_MIGRATING
689          * may already be set.
690          */
691         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
692                 lwkt_giveaway(lp->lwp_thread);
693 #endif
694
695         /*
696          * We lose control of lp the moment we release the spinlock after
697          * having placed lp on the queue.  i.e. another cpu could pick it
698          * up and it could exit, or its priority could be further adjusted,
699          * or something like that.
700          */
701         spin_lock(&bsd4_spin);
702         bsd4_setrunqueue_locked(lp);
703         lp->lwp_rebal_ticks = sched_ticks;
704
705 #ifdef SMP
706         /*
707          * Kick the scheduler helper on one of the other cpu's
708          * and request a reschedule if appropriate.
709          *
710          * NOTE: We check all cpus whos rdyprocmask is set.  First we
711          *       look for cpus without designated lps, then we look for
712          *       cpus with designated lps with a worse priority than our
713          *       process.
714          */
715         ++bsd4_scancpu;
716
717         if (usched_bsd4_smt) {
718
719                 /*
720                  * SMT heuristic - Try to schedule on a free physical core.
721                  * If no physical core found than choose the one that has
722                  * an interactive thread.
723                  */
724
725                 int best_cpuid = -1;
726                 int min_prio = MAXPRI * MAXPRI;
727                 int sibling;
728
729                 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
730                 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
731                     smp_active_mask & usched_global_cpumask;
732
733                 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_smt,
734                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
735                     lp->lwp_proc->p_pid,
736                     lp->lwp_thread->td_gd->gd_cpuid,
737                     (unsigned long)mask,
738                     mycpu->gd_cpuid);
739
740                 while (mask) {
741                         tmpmask = ~(CPUMASK(cpuid) - 1);
742                         if (mask & tmpmask)
743                                 cpuid = BSFCPUMASK(mask & tmpmask);
744                         else
745                                 cpuid = BSFCPUMASK(mask);
746                         gd = globaldata_find(cpuid);
747                         dd = &bsd4_pcpu[cpuid];
748
749                         if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
750                                 if (dd->cpunode->parent_node->members & ~dd->cpunode->members & mask) {
751
752                                         KTR_COND_LOG(usched_bsd4_setrunqueue_found,
753                                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
754                                             lp->lwp_proc->p_pid,
755                                             lp->lwp_thread->td_gd->gd_cpuid,
756                                             (unsigned long)mask,
757                                             cpuid,
758                                             mycpu->gd_cpuid);
759
760                                         goto found;
761                                 } else {
762                                         sibling = BSFCPUMASK(dd->cpunode->parent_node->members &
763                                             ~dd->cpunode->members);
764                                         if (min_prio > bsd4_pcpu[sibling].upri) {
765                                                 min_prio = bsd4_pcpu[sibling].upri;
766                                                 best_cpuid = cpuid;
767                                         }
768                                 }
769                         }
770                         mask &= ~CPUMASK(cpuid);
771                 }
772
773                 if (best_cpuid != -1) {
774                         cpuid = best_cpuid;
775                         gd = globaldata_find(cpuid);
776                         dd = &bsd4_pcpu[cpuid];
777
778                         KTR_COND_LOG(usched_bsd4_setrunqueue_found_best_cpuid,
779                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
780                             lp->lwp_proc->p_pid,
781                             lp->lwp_thread->td_gd->gd_cpuid,
782                             (unsigned long)mask,
783                             cpuid,
784                             mycpu->gd_cpuid);
785
786                         goto found;
787                 }
788         } else {
789                 /* Fallback to the original heuristic */
790                 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
791                 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
792                        smp_active_mask & usched_global_cpumask;
793
794                 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_non_smt,
795                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
796                     lp->lwp_proc->p_pid,
797                     lp->lwp_thread->td_gd->gd_cpuid,
798                     (unsigned long)mask,
799                     mycpu->gd_cpuid);
800
801                 while (mask) {
802                         tmpmask = ~(CPUMASK(cpuid) - 1);
803                         if (mask & tmpmask)
804                                 cpuid = BSFCPUMASK(mask & tmpmask);
805                         else
806                                 cpuid = BSFCPUMASK(mask);
807                         gd = globaldata_find(cpuid);
808                         dd = &bsd4_pcpu[cpuid];
809
810                         if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
811
812                                 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
813                                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
814                                     lp->lwp_proc->p_pid,
815                                     lp->lwp_thread->td_gd->gd_cpuid,
816                                     (unsigned long)mask,
817                                     cpuid,
818                                     mycpu->gd_cpuid);
819
820                                 goto found;
821                         }
822                         mask &= ~CPUMASK(cpuid);
823                 }
824         }
825
826         /*
827          * Then cpus which might have a currently running lp
828          */
829         mask = bsd4_curprocmask & bsd4_rdyprocmask &
830                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
831
832         KTR_COND_LOG(usched_bsd4_setrunqueue_rc,
833             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
834             lp->lwp_proc->p_pid,
835             lp->lwp_thread->td_gd->gd_cpuid,
836             (unsigned long)mask,
837             mycpu->gd_cpuid);
838
839         while (mask) {
840                 tmpmask = ~(CPUMASK(cpuid) - 1);
841                 if (mask & tmpmask)
842                         cpuid = BSFCPUMASK(mask & tmpmask);
843                 else
844                         cpuid = BSFCPUMASK(mask);
845                 gd = globaldata_find(cpuid);
846                 dd = &bsd4_pcpu[cpuid];
847
848                 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
849
850                         KTR_COND_LOG(usched_bsd4_setrunqueue_found,
851                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
852                             lp->lwp_proc->p_pid,
853                             lp->lwp_thread->td_gd->gd_cpuid,
854                             (unsigned long)mask,
855                             cpuid,
856                             mycpu->gd_cpuid);
857
858                         goto found;
859                 }
860                 mask &= ~CPUMASK(cpuid);
861         }
862
863         /*
864          * If we cannot find a suitable cpu we reload from bsd4_scancpu
865          * and round-robin.  Other cpus will pickup as they release their
866          * current lwps or become ready.
867          *
868          * Avoid a degenerate system lockup case if usched_global_cpumask
869          * is set to 0 or otherwise does not cover lwp_cpumask.
870          *
871          * We only kick the target helper thread in this case, we do not
872          * set the user resched flag because
873          */
874         cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
875         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) {
876                 cpuid = 0;
877         }
878         gd = globaldata_find(cpuid);
879         dd = &bsd4_pcpu[cpuid];
880
881         KTR_COND_LOG(usched_bsd4_setrunqueue_not_found,
882             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
883             lp->lwp_proc->p_pid,
884             lp->lwp_thread->td_gd->gd_cpuid,
885             cpuid,
886             mycpu->gd_cpuid);
887
888 found:
889         if (gd == mycpu) {
890                 spin_unlock(&bsd4_spin);
891                 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
892                         if (dd->uschedcp == NULL) {
893                                 wakeup_mycpu(&dd->helper_thread);
894                         } else {
895                                 need_user_resched();
896                         }
897                 }
898         } else {
899                 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
900                 spin_unlock(&bsd4_spin);
901                 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
902                         lwkt_send_ipiq(gd, bsd4_need_user_resched_remote, NULL);
903                 else
904                         wakeup(&dd->helper_thread);
905         }
906 #else
907         /*
908          * Request a reschedule if appropriate.
909          */
910         spin_unlock(&bsd4_spin);
911         if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
912                 need_user_resched();
913         }
914 #endif
915         crit_exit();
916 }
917
918 /*
919  * This routine is called from a systimer IPI.  It MUST be MP-safe and
920  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
921  * each cpu.
922  *
923  * This routine is called on every sched tick.  If the currently running
924  * thread belongs to this scheduler it will be called with a non-NULL lp,
925  * otherwise it will be called with a NULL lp.
926  *
927  * MPSAFE
928  */
929 static
930 void
931 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
932 {
933         globaldata_t gd = mycpu;
934         bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
935
936         /*
937          * No impl if no lp running.
938          */
939         if (lp == NULL)
940                 return;
941
942         /*
943          * Do we need to round-robin?  We round-robin 10 times a second.
944          * This should only occur for cpu-bound batch processes.
945          */
946         if (++dd->rrcount >= usched_bsd4_rrinterval) {
947                 dd->rrcount = 0;
948                 need_user_resched();
949         }
950
951         /*
952          * Adjust estcpu upward using a real time equivalent calculation.
953          */
954         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
955
956         /*
957          * Spinlocks also hold a critical section so there should not be
958          * any active.
959          */
960         KKASSERT(gd->gd_spinlocks_wr == 0);
961
962         bsd4_resetpriority(lp);
963 }
964
965 /*
966  * Called from acquire and from kern_synch's one-second timer (one of the
967  * callout helper threads) with a critical section held.
968  *
969  * Decay p_estcpu based on the number of ticks we haven't been running
970  * and our p_nice.  As the load increases each process observes a larger
971  * number of idle ticks (because other processes are running in them).
972  * This observation leads to a larger correction which tends to make the
973  * system more 'batchy'.
974  *
975  * Note that no recalculation occurs for a process which sleeps and wakes
976  * up in the same tick.  That is, a system doing thousands of context
977  * switches per second will still only do serious estcpu calculations
978  * ESTCPUFREQ times per second.
979  *
980  * MPSAFE
981  */
982 static
983 void
984 bsd4_recalculate_estcpu(struct lwp *lp)
985 {
986         globaldata_t gd = mycpu;
987         sysclock_t cpbase;
988         sysclock_t ttlticks;
989         int estcpu;
990         int decay_factor;
991
992         /*
993          * We have to subtract periodic to get the last schedclock
994          * timeout time, otherwise we would get the upcoming timeout.
995          * Keep in mind that a process can migrate between cpus and
996          * while the scheduler clock should be very close, boundary
997          * conditions could lead to a small negative delta.
998          */
999         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1000
1001         if (lp->lwp_slptime > 1) {
1002                 /*
1003                  * Too much time has passed, do a coarse correction.
1004                  */
1005                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1006                 bsd4_resetpriority(lp);
1007                 lp->lwp_cpbase = cpbase;
1008                 lp->lwp_cpticks = 0;
1009                 lp->lwp_batch -= ESTCPUFREQ;
1010                 if (lp->lwp_batch < 0)
1011                         lp->lwp_batch = 0;
1012         } else if (lp->lwp_cpbase != cpbase) {
1013                 /*
1014                  * Adjust estcpu if we are in a different tick.  Don't waste
1015                  * time if we are in the same tick.
1016                  *
1017                  * First calculate the number of ticks in the measurement
1018                  * interval.  The ttlticks calculation can wind up 0 due to
1019                  * a bug in the handling of lwp_slptime  (as yet not found),
1020                  * so make sure we do not get a divide by 0 panic.
1021                  */
1022                 ttlticks = (cpbase - lp->lwp_cpbase) /
1023                            gd->gd_schedclock.periodic;
1024                 if (ttlticks < 0) {
1025                         ttlticks = 0;
1026                         lp->lwp_cpbase = cpbase;
1027                 }
1028                 if (ttlticks == 0)
1029                         return;
1030                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1031
1032                 /*
1033                  * Calculate the percentage of one cpu used factoring in ncpus
1034                  * and the load and adjust estcpu.  Handle degenerate cases
1035                  * by adding 1 to bsd4_runqcount.
1036                  *
1037                  * estcpu is scaled by ESTCPUMAX.
1038                  *
1039                  * bsd4_runqcount is the excess number of user processes
1040                  * that cannot be immediately scheduled to cpus.  We want
1041                  * to count these as running to avoid range compression
1042                  * in the base calculation (which is the actual percentage
1043                  * of one cpu used).
1044                  */
1045                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
1046                          (bsd4_runqcount + ncpus) / (ncpus * ttlticks);
1047
1048                 /*
1049                  * If estcpu is > 50% we become more batch-like
1050                  * If estcpu is <= 50% we become less batch-like
1051                  *
1052                  * It takes 30 cpu seconds to traverse the entire range.
1053                  */
1054                 if (estcpu > ESTCPUMAX / 2) {
1055                         lp->lwp_batch += ttlticks;
1056                         if (lp->lwp_batch > BATCHMAX)
1057                                 lp->lwp_batch = BATCHMAX;
1058                 } else {
1059                         lp->lwp_batch -= ttlticks;
1060                         if (lp->lwp_batch < 0)
1061                                 lp->lwp_batch = 0;
1062                 }
1063
1064                 if (usched_bsd4_debug == lp->lwp_proc->p_pid) {
1065                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
1066                                 lp->lwp_proc->p_pid, lp,
1067                                 estcpu, lp->lwp_estcpu,
1068                                 lp->lwp_batch,
1069                                 lp->lwp_cpticks, ttlticks);
1070                 }
1071
1072                 /*
1073                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
1074                  * quickly lwp_estcpu collapses to its realtime calculation.
1075                  * A slower collapse gives us a more accurate number but
1076                  * can cause a cpu hog to eat too much cpu before the
1077                  * scheduler decides to downgrade it.
1078                  *
1079                  * NOTE: p_nice is accounted for in bsd4_resetpriority(),
1080                  *       and not here, but we must still ensure that a
1081                  *       cpu-bound nice -20 process does not completely
1082                  *       override a cpu-bound nice +20 process.
1083                  *
1084                  * NOTE: We must use ESTCPULIM() here to deal with any
1085                  *       overshoot.
1086                  */
1087                 decay_factor = usched_bsd4_decay;
1088                 if (decay_factor < 1)
1089                         decay_factor = 1;
1090                 if (decay_factor > 1024)
1091                         decay_factor = 1024;
1092
1093                 lp->lwp_estcpu = ESTCPULIM(
1094                         (lp->lwp_estcpu * decay_factor + estcpu) /
1095                         (decay_factor + 1));
1096
1097                 if (usched_bsd4_debug == lp->lwp_proc->p_pid)
1098                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
1099                 bsd4_resetpriority(lp);
1100                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1101                 lp->lwp_cpticks = 0;
1102         }
1103 }
1104
1105 /*
1106  * Compute the priority of a process when running in user mode.
1107  * Arrange to reschedule if the resulting priority is better
1108  * than that of the current process.
1109  *
1110  * This routine may be called with any process.
1111  *
1112  * This routine is called by fork1() for initial setup with the process
1113  * of the run queue, and also may be called normally with the process on or
1114  * off the run queue.
1115  *
1116  * MPSAFE
1117  */
1118 static void
1119 bsd4_resetpriority(struct lwp *lp)
1120 {
1121         bsd4_pcpu_t dd;
1122         int newpriority;
1123         u_short newrqtype;
1124         int reschedcpu;
1125         int checkpri;
1126         int estcpu;
1127
1128         /*
1129          * Calculate the new priority and queue type
1130          */
1131         crit_enter();
1132         spin_lock(&bsd4_spin);
1133
1134         newrqtype = lp->lwp_rtprio.type;
1135
1136         switch(newrqtype) {
1137         case RTP_PRIO_REALTIME:
1138         case RTP_PRIO_FIFO:
1139                 newpriority = PRIBASE_REALTIME +
1140                              (lp->lwp_rtprio.prio & PRIMASK);
1141                 break;
1142         case RTP_PRIO_NORMAL:
1143                 /*
1144                  * Detune estcpu based on batchiness.  lwp_batch ranges
1145                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
1146                  * the priority calculation to between 50% and 100%.
1147                  */
1148                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
1149                          (BATCHMAX * 2);
1150
1151                 /*
1152                  * p_nice piece         Adds (0-40) * 2         0-80
1153                  * estcpu               Adds 16384  * 4 / 512   0-128
1154                  */
1155                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1156                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1157                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1158                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1159                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1160                 break;
1161         case RTP_PRIO_IDLE:
1162                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1163                 break;
1164         case RTP_PRIO_THREAD:
1165                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1166                 break;
1167         default:
1168                 panic("Bad RTP_PRIO %d", newrqtype);
1169                 /* NOT REACHED */
1170         }
1171
1172         /*
1173          * The newpriority incorporates the queue type so do a simple masked
1174          * check to determine if the process has moved to another queue.  If
1175          * it has, and it is currently on a run queue, then move it.
1176          *
1177          * td_upri has normal sense (higher values are more desireable), so
1178          * negate it.
1179          */
1180         lp->lwp_thread->td_upri = -(newpriority & ~PPQMASK);
1181         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1182                 lp->lwp_priority = newpriority;
1183                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1184                         bsd4_remrunqueue_locked(lp);
1185                         lp->lwp_rqtype = newrqtype;
1186                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1187                         bsd4_setrunqueue_locked(lp);
1188                         checkpri = 1;
1189                 } else {
1190                         lp->lwp_rqtype = newrqtype;
1191                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1192                         checkpri = 0;
1193                 }
1194                 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
1195         } else {
1196                 lp->lwp_priority = newpriority;
1197                 reschedcpu = -1;
1198                 checkpri = 1;
1199         }
1200
1201         /*
1202          * Determine if we need to reschedule the target cpu.  This only
1203          * occurs if the LWP is already on a scheduler queue, which means
1204          * that idle cpu notification has already occured.  At most we
1205          * need only issue a need_user_resched() on the appropriate cpu.
1206          *
1207          * The LWP may be owned by a CPU different from the current one,
1208          * in which case dd->uschedcp may be modified without an MP lock
1209          * or a spinlock held.  The worst that happens is that the code
1210          * below causes a spurious need_user_resched() on the target CPU
1211          * and dd->pri to be wrong for a short period of time, both of
1212          * which are harmless.
1213          *
1214          * If checkpri is 0 we are adjusting the priority of the current
1215          * process, possibly higher (less desireable), so ignore the upri
1216          * check which will fail in that case.
1217          */
1218         if (reschedcpu >= 0) {
1219                 dd = &bsd4_pcpu[reschedcpu];
1220                 if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) &&
1221                     (checkpri == 0 ||
1222                      (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1223 #ifdef SMP
1224                         if (reschedcpu == mycpu->gd_cpuid) {
1225                                 spin_unlock(&bsd4_spin);
1226                                 need_user_resched();
1227                         } else {
1228                                 spin_unlock(&bsd4_spin);
1229                                 atomic_clear_cpumask(&bsd4_rdyprocmask,
1230                                                      CPUMASK(reschedcpu));
1231                                 lwkt_send_ipiq(lp->lwp_thread->td_gd,
1232                                                bsd4_need_user_resched_remote,
1233                                                NULL);
1234                         }
1235 #else
1236                         spin_unlock(&bsd4_spin);
1237                         need_user_resched();
1238 #endif
1239                 } else {
1240                         spin_unlock(&bsd4_spin);
1241                 }
1242         } else {
1243                 spin_unlock(&bsd4_spin);
1244         }
1245         crit_exit();
1246 }
1247
1248 /*
1249  * MPSAFE
1250  */
1251 static
1252 void
1253 bsd4_yield(struct lwp *lp)
1254 {
1255 #if 0
1256         /* FUTURE (or something similar) */
1257         switch(lp->lwp_rqtype) {
1258         case RTP_PRIO_NORMAL:
1259                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1260                 break;
1261         default:
1262                 break;
1263         }
1264 #endif
1265         need_user_resched();
1266 }
1267
1268 /*
1269  * Called from fork1() when a new child process is being created.
1270  *
1271  * Give the child process an initial estcpu that is more batch then
1272  * its parent and dock the parent for the fork (but do not
1273  * reschedule the parent).   This comprises the main part of our batch
1274  * detection heuristic for both parallel forking and sequential execs.
1275  *
1276  * XXX lwp should be "spawning" instead of "forking"
1277  *
1278  * MPSAFE
1279  */
1280 static void
1281 bsd4_forking(struct lwp *plp, struct lwp *lp)
1282 {
1283         /*
1284          * Put the child 4 queue slots (out of 32) higher than the parent
1285          * (less desireable than the parent).
1286          */
1287         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1288
1289         /*
1290          * The batch status of children always starts out centerline
1291          * and will inch-up or inch-down as appropriate.  It takes roughly
1292          * ~15 seconds of >50% cpu to hit the limit.
1293          */
1294         lp->lwp_batch = BATCHMAX / 2;
1295
1296         /*
1297          * Dock the parent a cost for the fork, protecting us from fork
1298          * bombs.  If the parent is forking quickly make the child more
1299          * batchy.
1300          */
1301         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1302 }
1303
1304 /*
1305  * Called when a lwp is being removed from this scheduler, typically
1306  * during lwp_exit().
1307  */
1308 static void
1309 bsd4_exiting(struct lwp *lp, struct proc *child_proc)
1310 {
1311 }
1312
1313 static void
1314 bsd4_uload_update(struct lwp *lp)
1315 {
1316 }
1317
1318 /*
1319  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1320  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1321  * has a better or equal priority then the process that would otherwise be
1322  * chosen, NULL is returned.
1323  *
1324  * Until we fix the RUNQ code the chklp test has to be strict or we may
1325  * bounce between processes trying to acquire the current process designation.
1326  *
1327  * MPSAFE - must be called with bsd4_spin exclusive held.  The spinlock is
1328  *          left intact through the entire routine.
1329  */
1330 static
1331 struct lwp *
1332 bsd4_chooseproc_locked(struct lwp *chklp)
1333 {
1334         struct lwp *lp;
1335         struct rq *q;
1336         u_int32_t *which, *which2;
1337         u_int32_t pri;
1338         u_int32_t rtqbits;
1339         u_int32_t tsqbits;
1340         u_int32_t idqbits;
1341         cpumask_t cpumask;
1342
1343         rtqbits = bsd4_rtqueuebits;
1344         tsqbits = bsd4_queuebits;
1345         idqbits = bsd4_idqueuebits;
1346         cpumask = mycpu->gd_cpumask;
1347
1348
1349 #ifdef SMP
1350 again:
1351 #endif
1352         if (rtqbits) {
1353                 pri = bsfl(rtqbits);
1354                 q = &bsd4_rtqueues[pri];
1355                 which = &bsd4_rtqueuebits;
1356                 which2 = &rtqbits;
1357         } else if (tsqbits) {
1358                 pri = bsfl(tsqbits);
1359                 q = &bsd4_queues[pri];
1360                 which = &bsd4_queuebits;
1361                 which2 = &tsqbits;
1362         } else if (idqbits) {
1363                 pri = bsfl(idqbits);
1364                 q = &bsd4_idqueues[pri];
1365                 which = &bsd4_idqueuebits;
1366                 which2 = &idqbits;
1367         } else {
1368                 return NULL;
1369         }
1370         lp = TAILQ_FIRST(q);
1371         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1372
1373 #ifdef SMP
1374         while ((lp->lwp_cpumask & cpumask) == 0) {
1375                 lp = TAILQ_NEXT(lp, lwp_procq);
1376                 if (lp == NULL) {
1377                         *which2 &= ~(1 << pri);
1378                         goto again;
1379                 }
1380         }
1381 #endif
1382
1383         /*
1384          * If the passed lwp <chklp> is reasonably close to the selected
1385          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1386          *
1387          * Note that we must error on the side of <chklp> to avoid bouncing
1388          * between threads in the acquire code.
1389          */
1390         if (chklp) {
1391                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1392                         return(NULL);
1393         }
1394
1395 #ifdef SMP
1396         /*
1397          * If the chosen lwp does not reside on this cpu spend a few
1398          * cycles looking for a better candidate at the same priority level.
1399          * This is a fallback check, setrunqueue() tries to wakeup the
1400          * correct cpu and is our front-line affinity.
1401          */
1402         if (lp->lwp_thread->td_gd != mycpu &&
1403             (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
1404         ) {
1405                 if (chklp->lwp_thread->td_gd == mycpu) {
1406                         lp = chklp;
1407                 }
1408         }
1409 #endif
1410
1411         KTR_COND_LOG(usched_chooseproc,
1412             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1413             lp->lwp_proc->p_pid,
1414             lp->lwp_thread->td_gd->gd_cpuid,
1415             mycpu->gd_cpuid);
1416
1417         TAILQ_REMOVE(q, lp, lwp_procq);
1418         --bsd4_runqcount;
1419         if (TAILQ_EMPTY(q))
1420                 *which &= ~(1 << pri);
1421         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1422         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1423
1424         return lp;
1425 }
1426
1427 #ifdef SMP
1428 /*
1429  * chooseproc() - with a cache coherence heuristic. Try to pull a process that
1430  * has its home on the current CPU> If the process doesn't have its home here
1431  * and is a batchy one (see batcy_looser_pri_test), we can wait for a
1432  * sched_tick, may be its home will become free and pull it in. Anyway,
1433  * we can't wait more than one tick. If that tick expired, we pull in that
1434  * process, no matter what.
1435  */
1436 static
1437 struct lwp *
1438 bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp)
1439 {
1440         struct lwp *lp;
1441         struct rq *q;
1442         u_int32_t *which, *which2;
1443         u_int32_t pri;
1444         u_int32_t checks;
1445         u_int32_t rtqbits;
1446         u_int32_t tsqbits;
1447         u_int32_t idqbits;
1448         cpumask_t cpumask;
1449
1450         struct lwp * min_level_lwp = NULL;
1451         struct rq *min_q = NULL;
1452         cpumask_t siblings;
1453         cpu_node_t* cpunode = NULL;
1454         u_int32_t min_level = MAXCPU;   /* number of levels < MAXCPU */
1455         u_int32_t *min_which = NULL;
1456         u_int32_t min_pri = 0;
1457         u_int32_t level = 0;
1458
1459         rtqbits = bsd4_rtqueuebits;
1460         tsqbits = bsd4_queuebits;
1461         idqbits = bsd4_idqueuebits;
1462         cpumask = mycpu->gd_cpumask;
1463
1464         /* Get the mask coresponding to the sysctl configured level */
1465         cpunode = bsd4_pcpu[mycpu->gd_cpuid].cpunode;
1466         level = usched_bsd4_stick_to_level;
1467         while (level) {
1468                 cpunode = cpunode->parent_node;
1469                 level--;
1470         }
1471         /* The cpus which can ellect a process */
1472         siblings = cpunode->members;
1473         checks = 0;
1474
1475 again:
1476         if (rtqbits) {
1477                 pri = bsfl(rtqbits);
1478                 q = &bsd4_rtqueues[pri];
1479                 which = &bsd4_rtqueuebits;
1480                 which2 = &rtqbits;
1481         } else if (tsqbits) {
1482                 pri = bsfl(tsqbits);
1483                 q = &bsd4_queues[pri];
1484                 which = &bsd4_queuebits;
1485                 which2 = &tsqbits;
1486         } else if (idqbits) {
1487                 pri = bsfl(idqbits);
1488                 q = &bsd4_idqueues[pri];
1489                 which = &bsd4_idqueuebits;
1490                 which2 = &idqbits;
1491         } else {
1492                 /*
1493                  * No more left and we didn't reach the checks limit.
1494                  */
1495                 bsd4_kick_helper(min_level_lwp);
1496                 return NULL;
1497         }
1498         lp = TAILQ_FIRST(q);
1499         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1500
1501         /*
1502          * Limit the number of checks/queue to a configurable value to
1503          * minimize the contention (we are in a locked region
1504          */
1505         while (checks < usched_bsd4_queue_checks) {
1506                 if ((lp->lwp_cpumask & cpumask) == 0 ||
1507                     ((siblings & lp->lwp_thread->td_gd->gd_cpumask) == 0 &&
1508                       (lp->lwp_rebal_ticks == sched_ticks ||
1509                        lp->lwp_rebal_ticks == (int)(sched_ticks - 1)) &&
1510                       bsd4_batchy_looser_pri_test(lp))) {
1511
1512                         KTR_COND_LOG(usched_chooseproc_cc_not_good,
1513                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1514                             lp->lwp_proc->p_pid,
1515                             (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1516                             (unsigned long)siblings,
1517                             (unsigned long)cpumask);
1518
1519                         cpunode = bsd4_pcpu[lp->lwp_thread->td_gd->gd_cpuid].cpunode;
1520                         level = 0;
1521                         while (cpunode) {
1522                                 if (cpunode->members & cpumask)
1523                                         break;
1524                                 cpunode = cpunode->parent_node;
1525                                 level++;
1526                         }
1527                         if (level < min_level ||
1528                             (level == min_level && min_level_lwp &&
1529                              lp->lwp_priority < min_level_lwp->lwp_priority)) {
1530                                 bsd4_kick_helper(min_level_lwp);
1531                                 min_level_lwp = lp;
1532                                 min_level = level;
1533                                 min_q = q;
1534                                 min_which = which;
1535                                 min_pri = pri;
1536                         } else {
1537                                 bsd4_kick_helper(lp);
1538                         }
1539                         lp = TAILQ_NEXT(lp, lwp_procq);
1540                         if (lp == NULL) {
1541                                 *which2 &= ~(1 << pri);
1542                                 goto again;
1543                         }
1544                 } else {
1545                         KTR_COND_LOG(usched_chooseproc_cc_elected,
1546                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1547                             lp->lwp_proc->p_pid,
1548                             (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1549                             (unsigned long)siblings,
1550                             (unsigned long)cpumask);
1551
1552                         goto found;
1553                 }
1554                 ++checks;
1555         }
1556
1557         /*
1558          * Checks exhausted, we tried to defer too many threads, so schedule
1559          * the best of the worst.
1560          */
1561         lp = min_level_lwp;
1562         q = min_q;
1563         which = min_which;
1564         pri = min_pri;
1565         KASSERT(lp, ("chooseproc: at least the first lp was good"));
1566
1567 found:
1568
1569         /*
1570          * If the passed lwp <chklp> is reasonably close to the selected
1571          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1572          *
1573          * Note that we must error on the side of <chklp> to avoid bouncing
1574          * between threads in the acquire code.
1575          */
1576         if (chklp) {
1577                 if (chklp->lwp_priority < lp->lwp_priority + PPQ) {
1578                         bsd4_kick_helper(lp);
1579                         return(NULL);
1580                 }
1581         }
1582
1583         KTR_COND_LOG(usched_chooseproc_cc,
1584             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1585             lp->lwp_proc->p_pid,
1586             lp->lwp_thread->td_gd->gd_cpuid,
1587             mycpu->gd_cpuid);
1588
1589         TAILQ_REMOVE(q, lp, lwp_procq);
1590         --bsd4_runqcount;
1591         if (TAILQ_EMPTY(q))
1592                 *which &= ~(1 << pri);
1593         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1594         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1595
1596         return lp;
1597 }
1598
1599 /*
1600  * If we aren't willing to schedule a ready process on our cpu, give it's
1601  * target cpu a kick rather than wait for the next tick.
1602  *
1603  * Called with bsd4_spin held.
1604  */
1605 static
1606 void
1607 bsd4_kick_helper(struct lwp *lp)
1608 {
1609         globaldata_t gd;
1610         bsd4_pcpu_t dd;
1611
1612         if (lp == NULL)
1613                 return;
1614         gd = lp->lwp_thread->td_gd;
1615         dd = &bsd4_pcpu[gd->gd_cpuid];
1616         if ((smp_active_mask & usched_global_cpumask &
1617             bsd4_rdyprocmask & gd->gd_cpumask) == 0) {
1618                 return;
1619         }
1620         ++usched_bsd4_kicks;
1621         atomic_clear_cpumask(&bsd4_rdyprocmask, gd->gd_cpumask);
1622         if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
1623                 lwkt_send_ipiq(gd, bsd4_need_user_resched_remote, NULL);
1624         } else {
1625                 wakeup(&dd->helper_thread);
1626         }
1627 }
1628
1629 static
1630 void
1631 bsd4_need_user_resched_remote(void *dummy)
1632 {
1633         globaldata_t gd = mycpu;
1634         bsd4_pcpu_t  dd = &bsd4_pcpu[gd->gd_cpuid];
1635
1636         need_user_resched();
1637
1638         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1639         wakeup_mycpu(&dd->helper_thread);
1640 }
1641
1642 #endif
1643
1644 /*
1645  * bsd4_remrunqueue_locked() removes a given process from the run queue
1646  * that it is on, clearing the queue busy bit if it becomes empty.
1647  *
1648  * Note that user process scheduler is different from the LWKT schedule.
1649  * The user process scheduler only manages user processes but it uses LWKT
1650  * underneath, and a user process operating in the kernel will often be
1651  * 'released' from our management.
1652  *
1653  * MPSAFE - bsd4_spin must be held exclusively on call
1654  */
1655 static void
1656 bsd4_remrunqueue_locked(struct lwp *lp)
1657 {
1658         struct rq *q;
1659         u_int32_t *which;
1660         u_int8_t pri;
1661
1662         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1663         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1664         --bsd4_runqcount;
1665         KKASSERT(bsd4_runqcount >= 0);
1666
1667         pri = lp->lwp_rqindex;
1668         switch(lp->lwp_rqtype) {
1669         case RTP_PRIO_NORMAL:
1670                 q = &bsd4_queues[pri];
1671                 which = &bsd4_queuebits;
1672                 break;
1673         case RTP_PRIO_REALTIME:
1674         case RTP_PRIO_FIFO:
1675                 q = &bsd4_rtqueues[pri];
1676                 which = &bsd4_rtqueuebits;
1677                 break;
1678         case RTP_PRIO_IDLE:
1679                 q = &bsd4_idqueues[pri];
1680                 which = &bsd4_idqueuebits;
1681                 break;
1682         default:
1683                 panic("remrunqueue: invalid rtprio type");
1684                 /* NOT REACHED */
1685         }
1686         TAILQ_REMOVE(q, lp, lwp_procq);
1687         if (TAILQ_EMPTY(q)) {
1688                 KASSERT((*which & (1 << pri)) != 0,
1689                         ("remrunqueue: remove from empty queue"));
1690                 *which &= ~(1 << pri);
1691         }
1692 }
1693
1694 /*
1695  * bsd4_setrunqueue_locked()
1696  *
1697  * Add a process whos rqtype and rqindex had previously been calculated
1698  * onto the appropriate run queue.   Determine if the addition requires
1699  * a reschedule on a cpu and return the cpuid or -1.
1700  *
1701  * NOTE: Lower priorities are better priorities.
1702  *
1703  * MPSAFE - bsd4_spin must be held exclusively on call
1704  */
1705 static void
1706 bsd4_setrunqueue_locked(struct lwp *lp)
1707 {
1708         struct rq *q;
1709         u_int32_t *which;
1710         int pri;
1711
1712         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1713         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1714         ++bsd4_runqcount;
1715
1716         pri = lp->lwp_rqindex;
1717
1718         switch(lp->lwp_rqtype) {
1719         case RTP_PRIO_NORMAL:
1720                 q = &bsd4_queues[pri];
1721                 which = &bsd4_queuebits;
1722                 break;
1723         case RTP_PRIO_REALTIME:
1724         case RTP_PRIO_FIFO:
1725                 q = &bsd4_rtqueues[pri];
1726                 which = &bsd4_rtqueuebits;
1727                 break;
1728         case RTP_PRIO_IDLE:
1729                 q = &bsd4_idqueues[pri];
1730                 which = &bsd4_idqueuebits;
1731                 break;
1732         default:
1733                 panic("remrunqueue: invalid rtprio type");
1734                 /* NOT REACHED */
1735         }
1736
1737         /*
1738          * Add to the correct queue and set the appropriate bit.  If no
1739          * lower priority (i.e. better) processes are in the queue then
1740          * we want a reschedule, calculate the best cpu for the job.
1741          *
1742          * Always run reschedules on the LWPs original cpu.
1743          */
1744         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1745         *which |= 1 << pri;
1746 }
1747
1748 #ifdef SMP
1749
1750 /*
1751  * For SMP systems a user scheduler helper thread is created for each
1752  * cpu and is used to allow one cpu to wakeup another for the purposes of
1753  * scheduling userland threads from setrunqueue().
1754  *
1755  * UP systems do not need the helper since there is only one cpu.
1756  *
1757  * We can't use the idle thread for this because we might block.
1758  * Additionally, doing things this way allows us to HLT idle cpus
1759  * on MP systems.
1760  *
1761  * MPSAFE
1762  */
1763 static void
1764 sched_thread(void *dummy)
1765 {
1766     globaldata_t gd;
1767     bsd4_pcpu_t  dd;
1768     bsd4_pcpu_t  tmpdd;
1769     struct lwp *nlp;
1770     cpumask_t mask;
1771     int cpuid;
1772     cpumask_t tmpmask;
1773     int tmpid;
1774
1775     gd = mycpu;
1776     cpuid = gd->gd_cpuid;       /* doesn't change */
1777     mask = gd->gd_cpumask;      /* doesn't change */
1778     dd = &bsd4_pcpu[cpuid];
1779
1780     /*
1781      * Since we are woken up only when no user processes are scheduled
1782      * on a cpu, we can run at an ultra low priority.
1783      */
1784     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1785
1786     tsleep(&dd->helper_thread, 0, "sched_thread_sleep", 0);
1787
1788     for (;;) {
1789         /*
1790          * We use the LWKT deschedule-interlock trick to avoid racing
1791          * bsd4_rdyprocmask.  This means we cannot block through to the
1792          * manual lwkt_switch() call we make below.
1793          */
1794         crit_enter_gd(gd);
1795         tsleep_interlock(&dd->helper_thread, 0);
1796         spin_lock(&bsd4_spin);
1797         atomic_set_cpumask(&bsd4_rdyprocmask, mask);
1798
1799         clear_user_resched();   /* This satisfied the reschedule request */
1800         dd->rrcount = 0;        /* Reset the round-robin counter */
1801
1802         if ((bsd4_curprocmask & mask) == 0) {
1803                 /*
1804                  * No thread is currently scheduled.
1805                  */
1806                 KKASSERT(dd->uschedcp == NULL);
1807                 if ((nlp = bsd4_chooseproc_locked(NULL)) != NULL) {
1808                         KTR_COND_LOG(usched_sched_thread_no_process,
1809                             nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1810                             gd->gd_cpuid,
1811                             nlp->lwp_proc->p_pid,
1812                             nlp->lwp_thread->td_gd->gd_cpuid);
1813
1814                         atomic_set_cpumask(&bsd4_curprocmask, mask);
1815                         dd->upri = nlp->lwp_priority;
1816                         dd->uschedcp = nlp;
1817                         dd->rrcount = 0;        /* reset round robin */
1818                         spin_unlock(&bsd4_spin);
1819                         lwkt_acquire(nlp->lwp_thread);
1820                         lwkt_schedule(nlp->lwp_thread);
1821                 } else {
1822                         spin_unlock(&bsd4_spin);
1823                 }
1824         } else if (bsd4_runqcount) {
1825                 if ((nlp = bsd4_chooseproc_locked(dd->uschedcp)) != NULL) {
1826                         KTR_COND_LOG(usched_sched_thread_process,
1827                             nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1828                             gd->gd_cpuid,
1829                             nlp->lwp_proc->p_pid,
1830                             nlp->lwp_thread->td_gd->gd_cpuid);
1831
1832                         dd->upri = nlp->lwp_priority;
1833                         dd->uschedcp = nlp;
1834                         dd->rrcount = 0;        /* reset round robin */
1835                         spin_unlock(&bsd4_spin);
1836                         lwkt_acquire(nlp->lwp_thread);
1837                         lwkt_schedule(nlp->lwp_thread);
1838                 } else {
1839                         /*
1840                          * CHAINING CONDITION TRAIN
1841                          *
1842                          * We could not deal with the scheduler wakeup
1843                          * request on this cpu, locate a ready scheduler
1844                          * with no current lp assignment and chain to it.
1845                          *
1846                          * This ensures that a wakeup race which fails due
1847                          * to priority test does not leave other unscheduled
1848                          * cpus idle when the runqueue is not empty.
1849                          */
1850                         tmpmask = ~bsd4_curprocmask &
1851                                   bsd4_rdyprocmask & smp_active_mask;
1852                         if (tmpmask) {
1853                                 tmpid = BSFCPUMASK(tmpmask);
1854                                 tmpdd = &bsd4_pcpu[tmpid];
1855                                 atomic_clear_cpumask(&bsd4_rdyprocmask,
1856                                                      CPUMASK(tmpid));
1857                                 spin_unlock(&bsd4_spin);
1858                                 wakeup(&tmpdd->helper_thread);
1859                         } else {
1860                                 spin_unlock(&bsd4_spin);
1861                         }
1862
1863                         KTR_LOG(usched_sched_thread_no_process_found,
1864                                 gd->gd_cpuid, (unsigned long)tmpmask);
1865                 }
1866         } else {
1867                 /*
1868                  * The runq is empty.
1869                  */
1870                 spin_unlock(&bsd4_spin);
1871         }
1872
1873         /*
1874          * We're descheduled unless someone scheduled us.  Switch away.
1875          * Exiting the critical section will cause splz() to be called
1876          * for us if interrupts and such are pending.
1877          */
1878         crit_exit_gd(gd);
1879         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1880     }
1881 }
1882
1883 /* sysctl stick_to_level parameter */
1884 static int
1885 sysctl_usched_bsd4_stick_to_level(SYSCTL_HANDLER_ARGS)
1886 {
1887         int error, new_val;
1888
1889         new_val = usched_bsd4_stick_to_level;
1890
1891         error = sysctl_handle_int(oidp, &new_val, 0, req);
1892         if (error != 0 || req->newptr == NULL)
1893                 return (error);
1894         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1895                 return (EINVAL);
1896         usched_bsd4_stick_to_level = new_val;
1897         return (0);
1898 }
1899
1900 /*
1901  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1902  * been cleared by rqinit() and we should not mess with it further.
1903  */
1904 static void
1905 sched_thread_cpu_init(void)
1906 {
1907         int i;
1908         int cpuid;
1909         int smt_not_supported = 0;
1910         int cache_coherent_not_supported = 0;
1911
1912         if (bootverbose)
1913                 kprintf("Start scheduler helpers on cpus:\n");
1914
1915         sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
1916         usched_bsd4_sysctl_tree =
1917                 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
1918                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1919                                 "usched_bsd4", CTLFLAG_RD, 0, "");
1920
1921         for (i = 0; i < ncpus; ++i) {
1922                 bsd4_pcpu_t dd = &bsd4_pcpu[i];
1923                 cpumask_t mask = CPUMASK(i);
1924
1925                 if ((mask & smp_active_mask) == 0)
1926                     continue;
1927
1928                 dd->cpunode = get_cpu_node_by_cpuid(i);
1929
1930                 if (dd->cpunode == NULL) {
1931                         smt_not_supported = 1;
1932                         cache_coherent_not_supported = 1;
1933                         if (bootverbose)
1934                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1935                                          "found for cpu\n", i);
1936                 } else {
1937                         switch (dd->cpunode->type) {
1938                         case THREAD_LEVEL:
1939                                 if (bootverbose)
1940                                         kprintf ("\tcpu%d - HyperThreading "
1941                                                  "available. Core siblings: ",
1942                                                  i);
1943                                 break;
1944                         case CORE_LEVEL:
1945                                 smt_not_supported = 1;
1946
1947                                 if (bootverbose)
1948                                         kprintf ("\tcpu%d - No HT available, "
1949                                                  "multi-core/physical "
1950                                                  "cpu. Physical siblings: ",
1951                                                  i);
1952                                 break;
1953                         case CHIP_LEVEL:
1954                                 smt_not_supported = 1;
1955
1956                                 if (bootverbose)
1957                                         kprintf ("\tcpu%d - No HT available, "
1958                                                  "single-core/physical cpu. "
1959                                                  "Package Siblings: ",
1960                                                  i);
1961                                 break;
1962                         default:
1963                                 /* Let's go for safe defaults here */
1964                                 smt_not_supported = 1;
1965                                 cache_coherent_not_supported = 1;
1966                                 if (bootverbose)
1967                                         kprintf ("\tcpu%d - Unknown cpunode->"
1968                                                  "type=%u. Siblings: ",
1969                                                  i,
1970                                                  (u_int)dd->cpunode->type);
1971                                 break;
1972                         }
1973
1974                         if (bootverbose) {
1975                                 if (dd->cpunode->parent_node != NULL) {
1976                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1977                                                 kprintf("cpu%d ", cpuid);
1978                                         kprintf("\n");
1979                                 } else {
1980                                         kprintf(" no siblings\n");
1981                                 }
1982                         }
1983                 }
1984
1985                 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
1986                             0, i, "usched %d", i);
1987
1988                 /*
1989                  * Allow user scheduling on the target cpu.  cpu #0 has already
1990                  * been enabled in rqinit().
1991                  */
1992                 if (i)
1993                     atomic_clear_cpumask(&bsd4_curprocmask, mask);
1994                 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
1995                 dd->upri = PRIBASE_NULL;
1996
1997         }
1998
1999         /* usched_bsd4 sysctl configurable parameters */
2000
2001         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2002                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2003                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2004                        &usched_bsd4_rrinterval, 0, "");
2005         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2006                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2007                        OID_AUTO, "decay", CTLFLAG_RW,
2008                        &usched_bsd4_decay, 0, "Extra decay when not running");
2009         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2010                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2011                        OID_AUTO, "batch_time", CTLFLAG_RW,
2012                        &usched_bsd4_batch_time, 0, "Min batch counter value");
2013         SYSCTL_ADD_LONG(&usched_bsd4_sysctl_ctx,
2014                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2015                        OID_AUTO, "kicks", CTLFLAG_RW,
2016                        &usched_bsd4_kicks, "Number of kickstarts");
2017
2018         /* Add enable/disable option for SMT scheduling if supported */
2019         if (smt_not_supported) {
2020                 usched_bsd4_smt = 0;
2021                 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
2022                                   SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2023                                   OID_AUTO, "smt", CTLFLAG_RD,
2024                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2025         } else {
2026                 usched_bsd4_smt = 1;
2027                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2028                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2029                                OID_AUTO, "smt", CTLFLAG_RW,
2030                                &usched_bsd4_smt, 0, "Enable SMT scheduling");
2031         }
2032
2033         /*
2034          * Add enable/disable option for cache coherent scheduling
2035          * if supported
2036          */
2037         if (cache_coherent_not_supported) {
2038                 usched_bsd4_cache_coherent = 0;
2039                 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
2040                                   SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2041                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2042                                   "NOT SUPPORTED", 0,
2043                                   "Cache coherence NOT SUPPORTED");
2044         } else {
2045                 usched_bsd4_cache_coherent = 1;
2046                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2047                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2048                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2049                                &usched_bsd4_cache_coherent, 0,
2050                                "Enable/Disable cache coherent scheduling");
2051
2052                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2053                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2054                                OID_AUTO, "upri_affinity", CTLFLAG_RW,
2055                                &usched_bsd4_upri_affinity, 1,
2056                                "Number of PPQs in user priority check");
2057
2058                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2059                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2060                                OID_AUTO, "queue_checks", CTLFLAG_RW,
2061                                &usched_bsd4_queue_checks, 5,
2062                                "LWPs to check from a queue before giving up");
2063
2064                 SYSCTL_ADD_PROC(&usched_bsd4_sysctl_ctx,
2065                                 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2066                                 OID_AUTO, "stick_to_level",
2067                                 CTLTYPE_INT | CTLFLAG_RW,
2068                                 NULL, sizeof usched_bsd4_stick_to_level,
2069                                 sysctl_usched_bsd4_stick_to_level, "I",
2070                                 "Stick a process to this level. See sysctl"
2071                                 "paremter hw.cpu_topology.level_description");
2072         }
2073 }
2074 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2075         sched_thread_cpu_init, NULL)
2076
2077 #else /* No SMP options - just add the configurable parameters to sysctl */
2078
2079 static void
2080 sched_sysctl_tree_init(void)
2081 {
2082         sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
2083         usched_bsd4_sysctl_tree =
2084                 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
2085                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2086                                 "usched_bsd4", CTLFLAG_RD, 0, "");
2087
2088         /* usched_bsd4 sysctl configurable parameters */
2089         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2090                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2091                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2092                        &usched_bsd4_rrinterval, 0, "");
2093         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2094                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2095                        OID_AUTO, "decay", CTLFLAG_RW,
2096                        &usched_bsd4_decay, 0, "Extra decay when not running");
2097         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2098                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2099                        OID_AUTO, "batch_time", CTLFLAG_RW,
2100                        &usched_bsd4_batch_time, 0, "Min batch counter value");
2101 }
2102 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2103         sched_sysctl_tree_init, NULL)
2104 #endif