276b6a5fd2223a00d4947497c7f5c2379843fa71
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_batch       lwp_usdata.dfly.batch
97 #define lwp_rqtype      lwp_usdata.dfly.rqtype
98 #define lwp_qcpu        lwp_usdata.dfly.qcpu
99
100 struct usched_dfly_pcpu {
101         struct spinlock spin;
102         struct thread   helper_thread;
103         short           rrcount;
104         short           upri;
105         int             uload;
106         struct lwp      *uschedcp;
107         struct rq       queues[NQS];
108         struct rq       rtqueues[NQS];
109         struct rq       idqueues[NQS];
110         u_int32_t       queuebits;
111         u_int32_t       rtqueuebits;
112         u_int32_t       idqueuebits;
113         int             runqcount;
114         int             cpuid;
115         cpumask_t       cpumask;
116 #ifdef SMP
117         cpu_node_t      *cpunode;
118 #endif
119 };
120
121 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
122
123 static void dfly_acquire_curproc(struct lwp *lp);
124 static void dfly_release_curproc(struct lwp *lp);
125 static void dfly_select_curproc(globaldata_t gd);
126 static void dfly_setrunqueue(struct lwp *lp);
127 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
128 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
129                                 sysclock_t cpstamp);
130 static void dfly_recalculate_estcpu(struct lwp *lp);
131 static void dfly_resetpriority(struct lwp *lp);
132 static void dfly_forking(struct lwp *plp, struct lwp *lp);
133 static void dfly_exiting(struct lwp *lp, struct proc *);
134 static void dfly_uload_update(struct lwp *lp);
135 static void dfly_yield(struct lwp *lp);
136 #ifdef SMP
137 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
138 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
139 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
140 #if 0
141 static void dfly_wakeup_random_helper(dfly_pcpu_t notdd);
142 #endif
143 #endif
144
145 #ifdef SMP
146 static void dfly_need_user_resched_remote(void *dummy);
147 #endif
148 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
149                                         int isremote);
150 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
151 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
152
153 struct usched usched_dfly = {
154         { NULL },
155         "dfly", "Original DragonFly Scheduler",
156         NULL,                   /* default registration */
157         NULL,                   /* default deregistration */
158         dfly_acquire_curproc,
159         dfly_release_curproc,
160         dfly_setrunqueue,
161         dfly_schedulerclock,
162         dfly_recalculate_estcpu,
163         dfly_resetpriority,
164         dfly_forking,
165         dfly_exiting,
166         dfly_uload_update,
167         NULL,                   /* setcpumask not supported */
168         dfly_yield
169 };
170
171 /*
172  * We have NQS (32) run queues per scheduling class.  For the normal
173  * class, there are 128 priorities scaled onto these 32 queues.  New
174  * processes are added to the last entry in each queue, and processes
175  * are selected for running by taking them from the head and maintaining
176  * a simple FIFO arrangement.  Realtime and Idle priority processes have
177  * and explicit 0-31 priority which maps directly onto their class queue
178  * index.  When a queue has something in it, the corresponding bit is
179  * set in the queuebits variable, allowing a single read to determine
180  * the state of all 32 queues and then a ffs() to find the first busy
181  * queue.
182  */
183 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
184 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
185 #ifdef SMP
186 static volatile int dfly_scancpu;
187 #endif
188 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
189 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
190 static struct sysctl_oid *usched_dfly_sysctl_tree;
191
192 /* Debug info exposed through debug.* sysctl */
193
194 static int usched_dfly_debug = -1;
195 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
196            &usched_dfly_debug, 0,
197            "Print debug information for this pid");
198
199 static int usched_dfly_pid_debug = -1;
200 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
201            &usched_dfly_pid_debug, 0,
202            "Print KTR debug information for this pid");
203
204 static int usched_dfly_chooser = 0;
205 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
206            &usched_dfly_chooser, 0,
207            "Print KTR debug information for this pid");
208
209 /* Tunning usched_dfly - configurable through kern.usched_dfly.* */
210 #ifdef SMP
211 static int usched_dfly_smt = 0;
212 static int usched_dfly_cache_coherent = 0;
213 static int usched_dfly_weight1 = 25;    /* thread's current cpu */
214 static int usched_dfly_weight2 = 15;    /* synchronous peer's current cpu */
215 static int usched_dfly_weight3 = 10;    /* number of threads on queue */
216 static int usched_dfly_pull_enable = 1; /* allow pulls */
217 #endif
218 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
219 static int usched_dfly_decay = 8;
220 static int usched_dfly_batch_time = 10;
221
222 /* KTR debug printings */
223
224 KTR_INFO_MASTER(usched);
225
226 #if !defined(KTR_USCHED_DFLY)
227 #define KTR_USCHED_DFLY KTR_ALL
228 #endif
229
230 #if 0
231 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
232     "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
233     "after release: pid %d, cpuid %d, curr_cpuid %d)",
234     pid_t pid, int cpuid, int curr);
235 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
236     "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
237     "curr_cpuid %d)",
238     pid_t pid, int cpuid, int curr);
239 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
240     "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
241     "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
242     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
243 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
244     "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
245     "cpuid %d, curr_cpuid %d)",
246     pid_t pid, int cpuid, int curr);
247
248 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
249     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
250     "cpuid %d, curr_cpuid %d)",
251     pid_t pid, int cpuid, int curr);
252
253 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
254     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
255     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
256     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
257
258 #ifdef SMP
259 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
260     "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
261     "cpuid %d, verify_mask %lu)",
262     pid_t pid, int cpuid, cpumask_t mask);
263 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
264     "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
265     "cpuid %d, verify_mask %lu)",
266     pid_t pid, int cpuid, cpumask_t mask);
267
268 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
269     "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
270     "mask %lu, curr_cpuid %d)",
271     pid_t pid, int cpuid, cpumask_t mask, int curr);
272 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
273     "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
274     "cpuid %d, mask %lu, curr_cpuid %d)",
275     pid_t pid, int cpuid, cpumask_t mask, int curr);
276 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
277     "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
278     "cpuid %d, mask %lu, curr_cpuid %d)",
279     pid_t pid, int cpuid, cpumask_t mask, int curr);
280 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
281     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
282     "mask %lu, found_cpuid %d, curr_cpuid %d)",
283     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
284 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
285     "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
286     "try_cpuid %d, curr_cpuid %d)",
287     pid_t pid, int cpuid, int try_cpuid, int curr);
288 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
289     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
290     "mask %lu, found_cpuid %d, curr_cpuid %d)",
291     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
292 #endif
293 #endif
294
295 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
296     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
297     pid_t pid, int old_cpuid, int curr);
298 #ifdef SMP
299 #if 0
300 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
301     "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
302     pid_t pid, int old_cpuid, int curr);
303 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
304     "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
305     "sibling_mask %lu, curr_cpumask %lu)",
306     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
307 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
308     "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
309     "sibling_mask %lu, curr_cpumask: %lu)",
310     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
311 #endif
312
313 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
314     "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
315     int id, pid_t pid, int cpuid);
316 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
317     "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
318     int id, pid_t pid, int cpuid);
319 #if 0
320 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
321     "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
322     int id, cpumask_t tmpmask);
323 #endif
324 #endif
325
326 /*
327  * DFLY_ACQUIRE_CURPROC
328  *
329  * This function is called when the kernel intends to return to userland.
330  * It is responsible for making the thread the current designated userland
331  * thread for this cpu, blocking if necessary.
332  *
333  * The kernel has already depressed our LWKT priority so we must not switch
334  * until we have either assigned or disposed of the thread.
335  *
336  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
337  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
338  * occur, this function is called only under very controlled circumstances.
339  */
340 static void
341 dfly_acquire_curproc(struct lwp *lp)
342 {
343         globaldata_t gd;
344         dfly_pcpu_t dd;
345         dfly_pcpu_t rdd;
346         thread_t td;
347
348         /*
349          * Make sure we aren't sitting on a tsleep queue.
350          */
351         td = lp->lwp_thread;
352         crit_enter_quick(td);
353         if (td->td_flags & TDF_TSLEEPQ)
354                 tsleep_remove(td);
355         dfly_recalculate_estcpu(lp);
356
357         /*
358          * If a reschedule was requested give another thread the
359          * driver's seat.
360          */
361         if (user_resched_wanted()) {
362                 clear_user_resched();
363                 dfly_release_curproc(lp);
364         }
365
366         /*
367          * Loop until we are the current user thread
368          */
369         gd = mycpu;
370         dd = &dfly_pcpu[gd->gd_cpuid];
371
372         do {
373                 /*
374                  * Process any pending events and higher priority threads.
375                  */
376                 lwkt_yield();
377
378                 /*
379                  * Become the currently scheduled user thread for this cpu
380                  * if we can do so trivially.
381                  *
382                  * We can steal another thread's current thread designation
383                  * on this cpu since if we are running that other thread
384                  * must not be, so we can safely deschedule it.
385                  */
386                 if (dd->uschedcp == lp) {
387                         /*
388                          * We are already the current lwp (hot path).
389                          */
390                         dd->upri = lp->lwp_priority;
391                 } else if ((rdd = dfly_choose_best_queue(lp)) != dd) {
392                         lwkt_deschedule(lp->lwp_thread);
393                         dfly_setrunqueue_dd(rdd, lp);
394                         lwkt_switch();
395                         gd = mycpu;
396                         dd = &dfly_pcpu[gd->gd_cpuid];
397                 } else if (dd->uschedcp == NULL) {
398                         /*
399                          * We can trivially become the current lwp.
400                          */
401                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
402                         dd->uschedcp = lp;
403                         dd->upri = lp->lwp_priority;
404                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
405                 } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
406                         /*
407                          * We can steal the current cpu's lwp designation
408                          * away simply by replacing it.  The other thread
409                          * will stall when it tries to return to userland,
410                          * possibly rescheduling elsewhere when it calls
411                          * setrunqueue.
412                          */
413                         dd->uschedcp = lp;
414                         dd->upri = lp->lwp_priority;
415                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
416                 } else {
417                         /*
418                          * We cannot become the current lwp, place the lp
419                          * on the run-queue of this or another cpu and
420                          * deschedule ourselves.
421                          *
422                          * When we are reactivated we will have another
423                          * chance.
424                          *
425                          * Reload after a switch or setrunqueue/switch possibly
426                          * moved us to another cpu.
427                          */
428                         lwkt_deschedule(lp->lwp_thread);
429                         dfly_setrunqueue_dd(dd, lp);
430                         lwkt_switch();
431                         gd = mycpu;
432                         dd = &dfly_pcpu[gd->gd_cpuid];
433                 }
434         } while (dd->uschedcp != lp);
435
436         crit_exit_quick(td);
437         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
438 }
439
440 /*
441  * DFLY_RELEASE_CURPROC
442  *
443  * This routine detaches the current thread from the userland scheduler,
444  * usually because the thread needs to run or block in the kernel (at
445  * kernel priority) for a while.
446  *
447  * This routine is also responsible for selecting a new thread to
448  * make the current thread.
449  *
450  * NOTE: This implementation differs from the dummy example in that
451  * dfly_select_curproc() is able to select the current process, whereas
452  * dummy_select_curproc() is not able to select the current process.
453  * This means we have to NULL out uschedcp.
454  *
455  * Additionally, note that we may already be on a run queue if releasing
456  * via the lwkt_switch() in dfly_setrunqueue().
457  */
458 static void
459 dfly_release_curproc(struct lwp *lp)
460 {
461         globaldata_t gd = mycpu;
462         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
463
464         /*
465          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
466          * by wakeup().
467          */
468         lp->lwp_thread->td_wakefromcpu = gd->gd_cpuid;
469
470         if (dd->uschedcp == lp) {
471                 spin_lock(&dd->spin);
472                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
473
474                 dd->uschedcp = NULL;    /* don't let lp be selected */
475                 dd->upri = PRIBASE_NULL;
476                 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
477                 spin_unlock(&dd->spin);
478                 dfly_select_curproc(gd);
479         }
480 }
481
482 /*
483  * DFLY_SELECT_CURPROC
484  *
485  * Select a new current process for this cpu and clear any pending user
486  * reschedule request.  The cpu currently has no current process.
487  *
488  * This routine is also responsible for equal-priority round-robining,
489  * typically triggered from dfly_schedulerclock().  In our dummy example
490  * all the 'user' threads are LWKT scheduled all at once and we just
491  * call lwkt_switch().
492  *
493  * The calling process is not on the queue and cannot be selected.
494  */
495 static
496 void
497 dfly_select_curproc(globaldata_t gd)
498 {
499         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
500         struct lwp *nlp;
501         int cpuid = gd->gd_cpuid;
502
503         crit_enter_gd(gd);
504
505         spin_lock(&dd->spin);
506         nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 1);
507
508         if (nlp) {
509                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
510                 dd->upri = nlp->lwp_priority;
511                 dd->uschedcp = nlp;
512                 dd->rrcount = 0;                /* reset round robin */
513                 spin_unlock(&dd->spin);
514 #ifdef SMP
515                 lwkt_acquire(nlp->lwp_thread);
516 #endif
517                 lwkt_schedule(nlp->lwp_thread);
518         } else {
519                 spin_unlock(&dd->spin);
520         }
521         crit_exit_gd(gd);
522 }
523
524 /*
525  * Place the specified lwp on the user scheduler's run queue.  This routine
526  * must be called with the thread descheduled.  The lwp must be runnable.
527  * It must not be possible for anyone else to explicitly schedule this thread.
528  *
529  * The thread may be the current thread as a special case.
530  */
531 static void
532 dfly_setrunqueue(struct lwp *lp)
533 {
534         dfly_pcpu_t rdd;
535
536         /*
537          * First validate the process LWKT state.
538          */
539         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
540         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
541             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
542              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
543         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
544
545         /*
546          * NOTE: rdd does not necessarily represent the current cpu.
547          *       Instead it represents the cpu the thread was last
548          *       scheduled on.
549          */
550         rdd = &dfly_pcpu[lp->lwp_qcpu];
551
552         /*
553          * This process is not supposed to be scheduled anywhere or assigned
554          * as the current process anywhere.  Assert the condition.
555          */
556         KKASSERT(rdd->uschedcp != lp);
557
558 #ifndef SMP
559         /*
560          * If we are not SMP we do not have a scheduler helper to kick
561          * and must directly activate the process if none are scheduled.
562          *
563          * This is really only an issue when bootstrapping init since
564          * the caller in all other cases will be a user process, and
565          * even if released (rdd->uschedcp == NULL), that process will
566          * kickstart the scheduler when it returns to user mode from
567          * the kernel.
568          *
569          * NOTE: On SMP we can't just set some other cpu's uschedcp.
570          */
571         if (rdd->uschedcp == NULL) {
572                 spin_lock(&rdd->spin);
573                 if (rdd->uschedcp == NULL) {
574                         atomic_set_cpumask(&dfly_curprocmask, 1);
575                         rdd->uschedcp = lp;
576                         rdd->upri = lp->lwp_priority;
577                         spin_unlock(&rdd->spin);
578                         lwkt_schedule(lp->lwp_thread);
579                         return;
580                 }
581                 spin_unlock(&rdd->spin);
582         }
583 #endif
584
585 #ifdef SMP
586         /*
587          * Ok, we have to setrunqueue some target cpu and request a reschedule
588          * if necessary.
589          *
590          * We have to choose the best target cpu.  It might not be the current
591          * target even if the current cpu has no running user thread (for
592          * example, because the current cpu might be a hyperthread and its
593          * sibling has a thread assigned).
594          *
595          * If we just forked it is most optimal to run the child on the same
596          * cpu just in case the parent decides to wait for it (thus getting
597          * off that cpu).  As long as there is nothing else runnable on the
598          * cpu, that is.  If we did this unconditionally a parent forking
599          * multiple children before waiting (e.g. make -j N) leaves other
600          * cpus idle that could be working.
601          */
602         if (lp->lwp_forked) {
603                 lp->lwp_forked = 0;
604                 if (dfly_pcpu[lp->lwp_qcpu].runqcount)
605                         rdd = dfly_choose_best_queue(lp);
606                 else
607                         rdd = &dfly_pcpu[lp->lwp_qcpu];
608                 /* dfly_wakeup_random_helper(rdd); */
609         } else {
610                 rdd = dfly_choose_best_queue(lp);
611                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
612         }
613 #endif
614         dfly_setrunqueue_dd(rdd, lp);
615 }
616
617 static void
618 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
619 {
620 #ifdef SMP
621         globaldata_t rgd;
622
623         /*
624          * We might be moving the lp to another cpu's run queue, and once
625          * on the runqueue (even if it is our cpu's), another cpu can rip
626          * it away from us.
627          *
628          * TDF_MIGRATING might already be set if this is part of a
629          * remrunqueue+setrunqueue sequence.
630          */
631         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
632                 lwkt_giveaway(lp->lwp_thread);
633
634         rgd = globaldata_find(rdd->cpuid);
635
636         /*
637          * We lose control of the lp the moment we release the spinlock
638          * after having placed it on the queue.  i.e. another cpu could pick
639          * it up, or it could exit, or its priority could be further
640          * adjusted, or something like that.
641          *
642          * WARNING! rdd can point to a foreign cpu!
643          */
644         spin_lock(&rdd->spin);
645         dfly_setrunqueue_locked(rdd, lp);
646
647         if (rgd == mycpu) {
648                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
649                         spin_unlock(&rdd->spin);
650                         if (rdd->uschedcp == NULL) {
651                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
652                                 need_user_resched();
653                         } else {
654                                 need_user_resched();
655                         }
656                 } else {
657                         spin_unlock(&rdd->spin);
658                 }
659         } else {
660                 atomic_clear_cpumask(&dfly_rdyprocmask, rgd->gd_cpumask);
661                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
662                         spin_unlock(&rdd->spin);
663                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
664                                        NULL);
665                 } else {
666                         spin_unlock(&rdd->spin);
667                         wakeup(&rdd->helper_thread);
668                 }
669         }
670 #else
671         /*
672          * Request a reschedule if appropriate.
673          */
674         spin_lock(&rdd->spin);
675         dfly_setrunqueue_locked(rdd, lp);
676         spin_unlock(&rdd->spin);
677         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
678                 need_user_resched();
679         }
680 #endif
681 }
682
683 #if 0
684
685 /*
686  * This wakes up a random helper that might have no work on its cpu to do.
687  * The idea is to improve fork/fork-exec/fork-wait/exec and similar
688  * process-spawning sequences by first scheduling the forked process
689  * on the same cpu as the parent, in case the parent is just going to
690  * wait*().  But if the parent does not wait we want another cpu to pick
691  * the forked process up ASAP.
692  *
693  * The ipi/helper-scheduling sequence typically takes a lot longer to run
694  * than a return-from-procedure-call and the parent then entering a
695  * wait*().  There's a race here that we want the parent to win ONLY if
696  * it is going to wait*().
697  *
698  * If a process sticks around for long enough normal scheduling action
699  * will move it to the right place.
700  */
701 static
702 void
703 dfly_wakeup_random_helper(dfly_pcpu_t notdd)
704 {
705         cpumask_t tmpmask;
706         cpumask_t mask;
707         int cpuid;
708
709         mask = dfly_rdyprocmask & ~dfly_curprocmask & smp_active_mask &
710                usched_global_cpumask & ~notdd->cpumask;
711         ++dfly_scancpu;
712         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
713
714         if (mask) {
715                 tmpmask = ~(CPUMASK(cpuid) - 1);
716                 if (mask & tmpmask)
717                         cpuid = BSFCPUMASK(mask & tmpmask);
718                 else
719                         cpuid = BSFCPUMASK(mask);
720                 atomic_clear_cpumask(&dfly_rdyprocmask, CPUMASK(cpuid));
721                 wakeup(&dfly_pcpu[cpuid].helper_thread);
722         }
723 }
724
725 #endif
726
727 /*
728  * This routine is called from a systimer IPI.  It MUST be MP-safe and
729  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
730  * each cpu.
731  */
732 static
733 void
734 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
735 {
736         globaldata_t gd = mycpu;
737         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
738
739         /*
740          * Do we need to round-robin?  We round-robin 10 times a second.
741          * This should only occur for cpu-bound batch processes.
742          */
743         if (++dd->rrcount >= usched_dfly_rrinterval) {
744                 dd->rrcount = 0;
745                 need_user_resched();
746         }
747
748         /*
749          * Adjust estcpu upward using a real time equivalent calculation.
750          */
751         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
752
753         /*
754          * Spinlocks also hold a critical section so there should not be
755          * any active.
756          */
757         KKASSERT(gd->gd_spinlocks_wr == 0);
758
759         dfly_resetpriority(lp);
760 }
761
762 /*
763  * Called from acquire and from kern_synch's one-second timer (one of the
764  * callout helper threads) with a critical section held.
765  *
766  * Decay p_estcpu based on the number of ticks we haven't been running
767  * and our p_nice.  As the load increases each process observes a larger
768  * number of idle ticks (because other processes are running in them).
769  * This observation leads to a larger correction which tends to make the
770  * system more 'batchy'.
771  *
772  * Note that no recalculation occurs for a process which sleeps and wakes
773  * up in the same tick.  That is, a system doing thousands of context
774  * switches per second will still only do serious estcpu calculations
775  * ESTCPUFREQ times per second.
776  */
777 static
778 void
779 dfly_recalculate_estcpu(struct lwp *lp)
780 {
781         globaldata_t gd = mycpu;
782         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
783         sysclock_t cpbase;
784         sysclock_t ttlticks;
785         int estcpu;
786         int decay_factor;
787
788         /*
789          * We have to subtract periodic to get the last schedclock
790          * timeout time, otherwise we would get the upcoming timeout.
791          * Keep in mind that a process can migrate between cpus and
792          * while the scheduler clock should be very close, boundary
793          * conditions could lead to a small negative delta.
794          */
795         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
796
797         if (lp->lwp_slptime > 1) {
798                 /*
799                  * Too much time has passed, do a coarse correction.
800                  */
801                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
802                 dfly_resetpriority(lp);
803                 lp->lwp_cpbase = cpbase;
804                 lp->lwp_cpticks = 0;
805                 lp->lwp_batch -= ESTCPUFREQ;
806                 if (lp->lwp_batch < 0)
807                         lp->lwp_batch = 0;
808         } else if (lp->lwp_cpbase != cpbase) {
809                 /*
810                  * Adjust estcpu if we are in a different tick.  Don't waste
811                  * time if we are in the same tick.
812                  *
813                  * First calculate the number of ticks in the measurement
814                  * interval.  The ttlticks calculation can wind up 0 due to
815                  * a bug in the handling of lwp_slptime  (as yet not found),
816                  * so make sure we do not get a divide by 0 panic.
817                  */
818                 ttlticks = (cpbase - lp->lwp_cpbase) /
819                            gd->gd_schedclock.periodic;
820                 if (ttlticks < 0) {
821                         ttlticks = 0;
822                         lp->lwp_cpbase = cpbase;
823                 }
824                 if (ttlticks == 0)
825                         return;
826                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
827
828                 /*
829                  * Calculate the percentage of one cpu used factoring in ncpus
830                  * and the load and adjust estcpu.  Handle degenerate cases
831                  * by adding 1 to runqcount.
832                  *
833                  * estcpu is scaled by ESTCPUMAX.
834                  *
835                  * runqcount is the excess number of user processes
836                  * that cannot be immediately scheduled to cpus.  We want
837                  * to count these as running to avoid range compression
838                  * in the base calculation (which is the actual percentage
839                  * of one cpu used).
840                  */
841                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
842                          (dd->runqcount + ncpus) / (ncpus * ttlticks);
843
844                 /*
845                  * If estcpu is > 50% we become more batch-like
846                  * If estcpu is <= 50% we become less batch-like
847                  *
848                  * It takes 30 cpu seconds to traverse the entire range.
849                  */
850                 if (estcpu > ESTCPUMAX / 2) {
851                         lp->lwp_batch += ttlticks;
852                         if (lp->lwp_batch > BATCHMAX)
853                                 lp->lwp_batch = BATCHMAX;
854                 } else {
855                         lp->lwp_batch -= ttlticks;
856                         if (lp->lwp_batch < 0)
857                                 lp->lwp_batch = 0;
858                 }
859
860                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
861                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
862                                 lp->lwp_proc->p_pid, lp,
863                                 estcpu, lp->lwp_estcpu,
864                                 lp->lwp_batch,
865                                 lp->lwp_cpticks, ttlticks);
866                 }
867
868                 /*
869                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
870                  * quickly lwp_estcpu collapses to its realtime calculation.
871                  * A slower collapse gives us a more accurate number but
872                  * can cause a cpu hog to eat too much cpu before the
873                  * scheduler decides to downgrade it.
874                  *
875                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
876                  *       and not here, but we must still ensure that a
877                  *       cpu-bound nice -20 process does not completely
878                  *       override a cpu-bound nice +20 process.
879                  *
880                  * NOTE: We must use ESTCPULIM() here to deal with any
881                  *       overshoot.
882                  */
883                 decay_factor = usched_dfly_decay;
884                 if (decay_factor < 1)
885                         decay_factor = 1;
886                 if (decay_factor > 1024)
887                         decay_factor = 1024;
888
889                 lp->lwp_estcpu = ESTCPULIM(
890                         (lp->lwp_estcpu * decay_factor + estcpu) /
891                         (decay_factor + 1));
892
893                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
894                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
895                 dfly_resetpriority(lp);
896                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
897                 lp->lwp_cpticks = 0;
898         }
899 }
900
901 /*
902  * Compute the priority of a process when running in user mode.
903  * Arrange to reschedule if the resulting priority is better
904  * than that of the current process.
905  *
906  * This routine may be called with any process.
907  *
908  * This routine is called by fork1() for initial setup with the process
909  * of the run queue, and also may be called normally with the process on or
910  * off the run queue.
911  */
912 static void
913 dfly_resetpriority(struct lwp *lp)
914 {
915         dfly_pcpu_t rdd;
916         int newpriority;
917         u_short newrqtype;
918         int rcpu;
919         int checkpri;
920         int estcpu;
921
922         crit_enter();
923
924         /*
925          * Lock the scheduler (lp) belongs to.  This can be on a different
926          * cpu.  Handle races.  This loop breaks out with the appropriate
927          * rdd locked.
928          */
929         for (;;) {
930                 rcpu = lp->lwp_qcpu;
931                 cpu_ccfence();
932                 rdd = &dfly_pcpu[rcpu];
933                 spin_lock(&rdd->spin);
934                 if (rcpu == lp->lwp_qcpu)
935                         break;
936                 spin_unlock(&rdd->spin);
937         }
938
939         /*
940          * Calculate the new priority and queue type
941          */
942         newrqtype = lp->lwp_rtprio.type;
943
944         switch(newrqtype) {
945         case RTP_PRIO_REALTIME:
946         case RTP_PRIO_FIFO:
947                 newpriority = PRIBASE_REALTIME +
948                              (lp->lwp_rtprio.prio & PRIMASK);
949                 break;
950         case RTP_PRIO_NORMAL:
951                 /*
952                  * Detune estcpu based on batchiness.  lwp_batch ranges
953                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
954                  * the priority calculation to between 50% and 100%.
955                  */
956                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
957                          (BATCHMAX * 2);
958
959                 /*
960                  * p_nice piece         Adds (0-40) * 2         0-80
961                  * estcpu               Adds 16384  * 4 / 512   0-128
962                  */
963                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
964                 newpriority += estcpu * PPQ / ESTCPUPPQ;
965                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
966                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
967                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
968                 break;
969         case RTP_PRIO_IDLE:
970                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
971                 break;
972         case RTP_PRIO_THREAD:
973                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
974                 break;
975         default:
976                 panic("Bad RTP_PRIO %d", newrqtype);
977                 /* NOT REACHED */
978         }
979
980         /*
981          * The newpriority incorporates the queue type so do a simple masked
982          * check to determine if the process has moved to another queue.  If
983          * it has, and it is currently on a run queue, then move it.
984          *
985          * Since uload is ~PPQMASK masked, no modifications are necessary if
986          * we end up in the same run queue.
987          */
988         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
989                 int delta_uload;
990
991                 /*
992                  * uload can change, calculate the adjustment to reduce
993                  * edge cases since choosers scan the cpu topology without
994                  * locks.
995                  */
996                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
997                         delta_uload =
998                                 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
999                                 ((newpriority & ~PPQMASK) & PRIMASK);
1000                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1001                                        delta_uload);
1002                 }
1003                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1004                         dfly_remrunqueue_locked(rdd, lp);
1005                         lp->lwp_priority = newpriority;
1006                         lp->lwp_rqtype = newrqtype;
1007                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1008                         dfly_setrunqueue_locked(rdd, lp);
1009                         checkpri = 1;
1010                 } else {
1011                         lp->lwp_priority = newpriority;
1012                         lp->lwp_rqtype = newrqtype;
1013                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1014                         checkpri = 0;
1015                 }
1016         } else {
1017                 /*
1018                  * In the same PPQ, uload cannot change.
1019                  */
1020                 lp->lwp_priority = newpriority;
1021                 checkpri = 1;
1022                 rcpu = -1;
1023         }
1024
1025         /*
1026          * Determine if we need to reschedule the target cpu.  This only
1027          * occurs if the LWP is already on a scheduler queue, which means
1028          * that idle cpu notification has already occured.  At most we
1029          * need only issue a need_user_resched() on the appropriate cpu.
1030          *
1031          * The LWP may be owned by a CPU different from the current one,
1032          * in which case dd->uschedcp may be modified without an MP lock
1033          * or a spinlock held.  The worst that happens is that the code
1034          * below causes a spurious need_user_resched() on the target CPU
1035          * and dd->pri to be wrong for a short period of time, both of
1036          * which are harmless.
1037          *
1038          * If checkpri is 0 we are adjusting the priority of the current
1039          * process, possibly higher (less desireable), so ignore the upri
1040          * check which will fail in that case.
1041          */
1042         if (rcpu >= 0) {
1043                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1044                     (checkpri == 0 ||
1045                      (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1046 #ifdef SMP
1047                         if (rcpu == mycpu->gd_cpuid) {
1048                                 spin_unlock(&rdd->spin);
1049                                 need_user_resched();
1050                         } else {
1051                                 atomic_clear_cpumask(&dfly_rdyprocmask,
1052                                                      CPUMASK(rcpu));
1053                                 spin_unlock(&rdd->spin);
1054                                 lwkt_send_ipiq(globaldata_find(rcpu),
1055                                                dfly_need_user_resched_remote,
1056                                                NULL);
1057                         }
1058 #else
1059                         spin_unlock(&rdd->spin);
1060                         need_user_resched();
1061 #endif
1062                 } else {
1063                         spin_unlock(&rdd->spin);
1064                 }
1065         } else {
1066                 spin_unlock(&rdd->spin);
1067         }
1068         crit_exit();
1069 }
1070
1071 static
1072 void
1073 dfly_yield(struct lwp *lp)
1074 {
1075 #if 0
1076         /* FUTURE (or something similar) */
1077         switch(lp->lwp_rqtype) {
1078         case RTP_PRIO_NORMAL:
1079                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1080                 break;
1081         default:
1082                 break;
1083         }
1084 #endif
1085         need_user_resched();
1086 }
1087
1088 /*
1089  * Called from fork1() when a new child process is being created.
1090  *
1091  * Give the child process an initial estcpu that is more batch then
1092  * its parent and dock the parent for the fork (but do not
1093  * reschedule the parent).   This comprises the main part of our batch
1094  * detection heuristic for both parallel forking and sequential execs.
1095  *
1096  * XXX lwp should be "spawning" instead of "forking"
1097  */
1098 static void
1099 dfly_forking(struct lwp *plp, struct lwp *lp)
1100 {
1101         /*
1102          * Put the child 4 queue slots (out of 32) higher than the parent
1103          * (less desireable than the parent).
1104          */
1105         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1106         lp->lwp_forked = 1;
1107
1108         /*
1109          * The batch status of children always starts out centerline
1110          * and will inch-up or inch-down as appropriate.  It takes roughly
1111          * ~15 seconds of >50% cpu to hit the limit.
1112          */
1113         lp->lwp_batch = BATCHMAX / 2;
1114
1115         /*
1116          * Dock the parent a cost for the fork, protecting us from fork
1117          * bombs.  If the parent is forking quickly make the child more
1118          * batchy.
1119          */
1120         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1121 }
1122
1123 /*
1124  * Called when a lwp is being removed from this scheduler, typically
1125  * during lwp_exit().
1126  */
1127 static void
1128 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1129 {
1130         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1131
1132         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1133                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1134                 atomic_add_int(&dd->uload,
1135                                -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1136         }
1137 }
1138
1139 /*
1140  * This function cannot block in any way
1141  */
1142 static void
1143 dfly_uload_update(struct lwp *lp)
1144 {
1145         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1146
1147         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1148                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1149                         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1150                         atomic_add_int(&dd->uload,
1151                                    ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1152                 }
1153         } else {
1154                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1155                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1156                         atomic_add_int(&dd->uload,
1157                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1158                 }
1159         }
1160 }
1161
1162 /*
1163  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1164  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1165  * has a better or equal priority then the process that would otherwise be
1166  * chosen, NULL is returned.
1167  *
1168  * Until we fix the RUNQ code the chklp test has to be strict or we may
1169  * bounce between processes trying to acquire the current process designation.
1170  *
1171  * Must be called with dd->spin locked.  The spinlock is left intact through
1172  * the entire routine.
1173  *
1174  * if chklp is NULL this function will dive other cpu's queues looking
1175  * for work if the current queue is empty.
1176  */
1177 static
1178 struct lwp *
1179 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
1180 {
1181 #ifdef SMP
1182         dfly_pcpu_t xdd;
1183 #endif
1184         struct lwp *lp;
1185         struct rq *q;
1186         u_int32_t *which, *which2;
1187         u_int32_t pri;
1188         u_int32_t rtqbits;
1189         u_int32_t tsqbits;
1190         u_int32_t idqbits;
1191
1192         rtqbits = dd->rtqueuebits;
1193         tsqbits = dd->queuebits;
1194         idqbits = dd->idqueuebits;
1195
1196         if (rtqbits) {
1197                 pri = bsfl(rtqbits);
1198                 q = &dd->rtqueues[pri];
1199                 which = &dd->rtqueuebits;
1200                 which2 = &rtqbits;
1201         } else if (tsqbits) {
1202                 pri = bsfl(tsqbits);
1203                 q = &dd->queues[pri];
1204                 which = &dd->queuebits;
1205                 which2 = &tsqbits;
1206         } else if (idqbits) {
1207                 pri = bsfl(idqbits);
1208                 q = &dd->idqueues[pri];
1209                 which = &dd->idqueuebits;
1210                 which2 = &idqbits;
1211         } else
1212 #ifdef SMP
1213         if (isremote || usched_dfly_pull_enable == 0) {
1214                 /*
1215                  * Queue is empty, disallow remote->remote recursion and
1216                  * do not pull if threads are active.
1217                  */
1218                 return (NULL);
1219         } else {
1220                 /*
1221                  * Pull a runnable thread from a remote run queue.  We have
1222                  * to adjust qcpu and uload manually because the lp we return
1223                  * might be assigned directly to uschedcp (setrunqueue might
1224                  * not be called).
1225                  */
1226                 xdd = dfly_choose_worst_queue(dd);
1227                 if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
1228                         lp = dfly_chooseproc_locked(xdd, NULL, 1);
1229                         if (lp) {
1230                                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1231                                         atomic_add_int(&xdd->uload,
1232                                             -((lp->lwp_priority & ~PPQMASK) &
1233                                               PRIMASK));
1234                                 }
1235                                 lp->lwp_qcpu = dd->cpuid;
1236                                 atomic_add_int(&dd->uload,
1237                                     ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1238                                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1239                         }
1240                         spin_unlock(&xdd->spin);
1241                 } else {
1242                         lp = NULL;
1243                 }
1244                 return (lp);
1245         }
1246 #else
1247         {
1248                 return NULL;
1249         }
1250 #endif
1251         lp = TAILQ_FIRST(q);
1252         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1253
1254         /*
1255          * If the passed lwp <chklp> is reasonably close to the selected
1256          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1257          *
1258          * Note that we must error on the side of <chklp> to avoid bouncing
1259          * between threads in the acquire code.
1260          */
1261         if (chklp) {
1262                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1263                         return(NULL);
1264         }
1265
1266         KTR_COND_LOG(usched_chooseproc,
1267             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1268             lp->lwp_proc->p_pid,
1269             lp->lwp_thread->td_gd->gd_cpuid,
1270             mycpu->gd_cpuid);
1271
1272         TAILQ_REMOVE(q, lp, lwp_procq);
1273         --dd->runqcount;
1274         if (TAILQ_EMPTY(q))
1275                 *which &= ~(1 << pri);
1276         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1277         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1278
1279         return lp;
1280 }
1281
1282 #ifdef SMP
1283
1284 /*
1285  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1286  *
1287  * Choose a cpu node to schedule lp on, hopefully nearby its current
1288  * node.  We give the current node a modest advantage for obvious reasons.
1289  *
1290  * We also give the node the thread was woken up FROM a slight advantage
1291  * in order to try to schedule paired threads which synchronize/block waiting
1292  * for each other fairly close to each other.  Similarly in a network setting
1293  * this feature will also attempt to place a user process near the kernel
1294  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1295  * algorithm as it heuristically groups synchronizing processes for locality
1296  * of reference in multi-socket systems.
1297  *
1298  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1299  *
1300  * When the topology is known choose a cpu whos group has, in aggregate,
1301  * has the lowest weighted load.
1302  */
1303 static
1304 dfly_pcpu_t
1305 dfly_choose_best_queue(struct lwp *lp)
1306 {
1307         cpumask_t mask;
1308         cpu_node_t *cpup;
1309         cpu_node_t *cpun;
1310         cpu_node_t *cpub;
1311         dfly_pcpu_t dd1 = &dfly_pcpu[lp->lwp_qcpu];
1312         dfly_pcpu_t dd2 = &dfly_pcpu[lp->lwp_thread->td_wakefromcpu];
1313         dfly_pcpu_t rdd;
1314         int cpuid;
1315         int n;
1316         int count;
1317         int load;
1318         int lowest_load;
1319
1320         /*
1321          * When the topology is unknown choose a random cpu that is hopefully
1322          * idle.
1323          */
1324         if (dd1->cpunode == NULL)
1325                 return (dfly_choose_queue_simple(dd1, lp));
1326
1327         /*
1328          * When the topology is known choose a cpu whos group has, in
1329          * aggregate, has the lowest weighted load.
1330          */
1331         cpup = root_cpu_node;
1332         rdd = dd1;
1333
1334         while (cpup) {
1335                 /*
1336                  * Degenerate case super-root
1337                  */
1338                 if (cpup->child_node && cpup->child_no == 1) {
1339                         cpup = cpup->child_node;
1340                         continue;
1341                 }
1342
1343                 /*
1344                  * Terminal cpunode
1345                  */
1346                 if (cpup->child_node == NULL) {
1347                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1348                         break;
1349                 }
1350
1351                 cpub = NULL;
1352                 lowest_load = 0x7FFFFFFF;
1353
1354                 for (n = 0; n < cpup->child_no; ++n) {
1355                         /*
1356                          * Accumulate load information for all cpus
1357                          * which are members of this node.
1358                          */
1359                         cpun = &cpup->child_node[n];
1360                         mask = cpun->members & usched_global_cpumask &
1361                                smp_active_mask & lp->lwp_cpumask;
1362                         if (mask == 0)
1363                                 continue;
1364
1365                         /*
1366                          * Compensate if the lp is already accounted for in
1367                          * the aggregate uload for this mask set.  We want
1368                          * to calculate the loads as if lp was not present.
1369                          */
1370                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1371                             CPUMASK(lp->lwp_qcpu) & mask) {
1372                                 load = -((lp->lwp_priority & ~PPQMASK) &
1373                                          PRIMASK);
1374                         } else {
1375                                 load = 0;
1376                         }
1377
1378                         count = 0;
1379                         while (mask) {
1380                                 cpuid = BSFCPUMASK(mask);
1381                                 load += dfly_pcpu[cpuid].uload;
1382                                 load += dfly_pcpu[cpuid].runqcount *
1383                                         usched_dfly_weight3;
1384                                 mask &= ~CPUMASK(cpuid);
1385                                 ++count;
1386                         }
1387                         load /= count;
1388
1389                         /*
1390                          * Give a slight advantage to the cpu groups (lp)
1391                          * belongs to, and a very slight advantage to the
1392                          * cpu groups our synchronous partner belongs to.
1393                          */
1394                         if (cpun->members & dd1->cpumask)
1395                                 load -= usched_dfly_weight1;
1396                         else if (cpun->members & dd2->cpumask)
1397                                 load -= usched_dfly_weight2;
1398
1399                         /*
1400                          * Calculate the best load
1401                          */
1402                         if (cpub == NULL || lowest_load > load ||
1403                             (lowest_load == load &&
1404                              (cpun->members & dd1->cpumask))
1405                         ) {
1406                                 lowest_load = load;
1407                                 cpub = cpun;
1408                         }
1409                 }
1410                 cpup = cpub;
1411         }
1412         if (usched_dfly_chooser)
1413                 kprintf("lp %02d->%02d %s\n",
1414                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1415         return (rdd);
1416 }
1417
1418 /*
1419  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1420  *
1421  * Choose the worst queue close to dd's cpu node with a non-empty runq.
1422  *
1423  * This is used by the thread chooser when the current cpu's queues are
1424  * empty to steal a thread from another cpu's queue.  We want to offload
1425  * the most heavily-loaded queue.
1426  */
1427 static
1428 dfly_pcpu_t
1429 dfly_choose_worst_queue(dfly_pcpu_t dd)
1430 {
1431         cpumask_t mask;
1432         cpu_node_t *cpup;
1433         cpu_node_t *cpun;
1434         cpu_node_t *cpub;
1435         dfly_pcpu_t rdd;
1436         int cpuid;
1437         int n;
1438         int count;
1439         int load;
1440         int highest_load;
1441
1442         /*
1443          * When the topology is unknown choose a random cpu that is hopefully
1444          * idle.
1445          */
1446         if (dd->cpunode == NULL) {
1447                 return (NULL);
1448         }
1449
1450         /*
1451          * When the topology is known choose a cpu whos group has, in
1452          * aggregate, has the lowest weighted load.
1453          */
1454         cpup = root_cpu_node;
1455         rdd = dd;
1456         while (cpup) {
1457                 /*
1458                  * Degenerate case super-root
1459                  */
1460                 if (cpup->child_node && cpup->child_no == 1) {
1461                         cpup = cpup->child_node;
1462                         continue;
1463                 }
1464
1465                 /*
1466                  * Terminal cpunode
1467                  */
1468                 if (cpup->child_node == NULL) {
1469                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1470                         break;
1471                 }
1472
1473                 cpub = NULL;
1474                 highest_load = 0;
1475
1476                 for (n = 0; n < cpup->child_no; ++n) {
1477                         /*
1478                          * Accumulate load information for all cpus
1479                          * which are members of this node.
1480                          */
1481                         cpun = &cpup->child_node[n];
1482                         mask = cpun->members & usched_global_cpumask &
1483                                smp_active_mask;
1484                         if (mask == 0)
1485                                 continue;
1486                         count = 0;
1487                         load = 0;
1488                         while (mask) {
1489                                 cpuid = BSFCPUMASK(mask);
1490                                 load += dfly_pcpu[cpuid].uload;
1491                                 load += dfly_pcpu[cpuid].runqcount *
1492                                         usched_dfly_weight3;
1493                                 mask &= ~CPUMASK(cpuid);
1494                                 ++count;
1495                         }
1496                         load /= count;
1497
1498                         /*
1499                          * Give a slight advantage to nearby cpus.
1500                          */
1501                         if (cpun->members & dd->cpumask)
1502                                 load += usched_dfly_weight1;
1503
1504                         /*
1505                          * The best candidate is the one with the worst
1506                          * (highest) load.  Prefer candiates that are
1507                          * closer to our cpu.
1508                          */
1509                         if (cpub == NULL || highest_load < load ||
1510                              (highest_load == load &&
1511                               (cpun->members & dd->cpumask))
1512                         ) {
1513                                 highest_load = load;
1514                                 cpub = cpun;
1515                         }
1516                 }
1517                 cpup = cpub;
1518         }
1519         return (rdd);
1520 }
1521
1522 static
1523 dfly_pcpu_t
1524 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1525 {
1526         dfly_pcpu_t rdd;
1527         cpumask_t tmpmask;
1528         cpumask_t mask;
1529         int cpuid;
1530
1531         /*
1532          * Fallback to the original heuristic, select random cpu,
1533          * first checking cpus not currently running a user thread.
1534          */
1535         ++dfly_scancpu;
1536         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1537         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1538                smp_active_mask & usched_global_cpumask;
1539
1540         while (mask) {
1541                 tmpmask = ~(CPUMASK(cpuid) - 1);
1542                 if (mask & tmpmask)
1543                         cpuid = BSFCPUMASK(mask & tmpmask);
1544                 else
1545                         cpuid = BSFCPUMASK(mask);
1546                 rdd = &dfly_pcpu[cpuid];
1547
1548                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1549                         goto found;
1550                 mask &= ~CPUMASK(cpuid);
1551         }
1552
1553         /*
1554          * Then cpus which might have a currently running lp
1555          */
1556         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1557         mask = dfly_curprocmask & dfly_rdyprocmask &
1558                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1559
1560         while (mask) {
1561                 tmpmask = ~(CPUMASK(cpuid) - 1);
1562                 if (mask & tmpmask)
1563                         cpuid = BSFCPUMASK(mask & tmpmask);
1564                 else
1565                         cpuid = BSFCPUMASK(mask);
1566                 rdd = &dfly_pcpu[cpuid];
1567
1568                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1569                         goto found;
1570                 mask &= ~CPUMASK(cpuid);
1571         }
1572
1573         /*
1574          * If we cannot find a suitable cpu we reload from dfly_scancpu
1575          * and round-robin.  Other cpus will pickup as they release their
1576          * current lwps or become ready.
1577          *
1578          * Avoid a degenerate system lockup case if usched_global_cpumask
1579          * is set to 0 or otherwise does not cover lwp_cpumask.
1580          *
1581          * We only kick the target helper thread in this case, we do not
1582          * set the user resched flag because
1583          */
1584         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1585         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1586                 cpuid = 0;
1587         rdd = &dfly_pcpu[cpuid];
1588 found:
1589         return (rdd);
1590 }
1591
1592 static
1593 void
1594 dfly_need_user_resched_remote(void *dummy)
1595 {
1596         globaldata_t gd = mycpu;
1597         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1598
1599         need_user_resched();
1600
1601         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1602         wakeup_mycpu(&dd->helper_thread);
1603 }
1604
1605 #endif
1606
1607 /*
1608  * dfly_remrunqueue_locked() removes a given process from the run queue
1609  * that it is on, clearing the queue busy bit if it becomes empty.
1610  *
1611  * Note that user process scheduler is different from the LWKT schedule.
1612  * The user process scheduler only manages user processes but it uses LWKT
1613  * underneath, and a user process operating in the kernel will often be
1614  * 'released' from our management.
1615  *
1616  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1617  * to sleep or the lwp is moved to a different runq.
1618  */
1619 static void
1620 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1621 {
1622         struct rq *q;
1623         u_int32_t *which;
1624         u_int8_t pri;
1625
1626         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1627         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1628         --rdd->runqcount;
1629         /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
1630         KKASSERT(rdd->runqcount >= 0);
1631
1632         pri = lp->lwp_rqindex;
1633         switch(lp->lwp_rqtype) {
1634         case RTP_PRIO_NORMAL:
1635                 q = &rdd->queues[pri];
1636                 which = &rdd->queuebits;
1637                 break;
1638         case RTP_PRIO_REALTIME:
1639         case RTP_PRIO_FIFO:
1640                 q = &rdd->rtqueues[pri];
1641                 which = &rdd->rtqueuebits;
1642                 break;
1643         case RTP_PRIO_IDLE:
1644                 q = &rdd->idqueues[pri];
1645                 which = &rdd->idqueuebits;
1646                 break;
1647         default:
1648                 panic("remrunqueue: invalid rtprio type");
1649                 /* NOT REACHED */
1650         }
1651         TAILQ_REMOVE(q, lp, lwp_procq);
1652         if (TAILQ_EMPTY(q)) {
1653                 KASSERT((*which & (1 << pri)) != 0,
1654                         ("remrunqueue: remove from empty queue"));
1655                 *which &= ~(1 << pri);
1656         }
1657 }
1658
1659 /*
1660  * dfly_setrunqueue_locked()
1661  *
1662  * Add a process whos rqtype and rqindex had previously been calculated
1663  * onto the appropriate run queue.   Determine if the addition requires
1664  * a reschedule on a cpu and return the cpuid or -1.
1665  *
1666  * NOTE:          Lower priorities are better priorities.
1667  *
1668  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1669  *                sum of the rough lwp_priority for all running and runnable
1670  *                processes.  Lower priority processes (higher lwp_priority
1671  *                values) actually DO count as more load, not less, because
1672  *                these are the programs which require the most care with
1673  *                regards to cpu selection.
1674  */
1675 static void
1676 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1677 {
1678         struct rq *q;
1679         u_int32_t *which;
1680         int pri;
1681
1682         if (lp->lwp_qcpu != rdd->cpuid) {
1683                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1684                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1685                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1686                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1687                 }
1688                 lp->lwp_qcpu = rdd->cpuid;
1689         }
1690
1691         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1692         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1693         ++rdd->runqcount;
1694         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1695                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1696                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1697                                (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1698         }
1699
1700         pri = lp->lwp_rqindex;
1701
1702         switch(lp->lwp_rqtype) {
1703         case RTP_PRIO_NORMAL:
1704                 q = &rdd->queues[pri];
1705                 which = &rdd->queuebits;
1706                 break;
1707         case RTP_PRIO_REALTIME:
1708         case RTP_PRIO_FIFO:
1709                 q = &rdd->rtqueues[pri];
1710                 which = &rdd->rtqueuebits;
1711                 break;
1712         case RTP_PRIO_IDLE:
1713                 q = &rdd->idqueues[pri];
1714                 which = &rdd->idqueuebits;
1715                 break;
1716         default:
1717                 panic("remrunqueue: invalid rtprio type");
1718                 /* NOT REACHED */
1719         }
1720
1721         /*
1722          * Add to the correct queue and set the appropriate bit.  If no
1723          * lower priority (i.e. better) processes are in the queue then
1724          * we want a reschedule, calculate the best cpu for the job.
1725          *
1726          * Always run reschedules on the LWPs original cpu.
1727          */
1728         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1729         *which |= 1 << pri;
1730 }
1731
1732 #ifdef SMP
1733
1734 /*
1735  * For SMP systems a user scheduler helper thread is created for each
1736  * cpu and is used to allow one cpu to wakeup another for the purposes of
1737  * scheduling userland threads from setrunqueue().
1738  *
1739  * UP systems do not need the helper since there is only one cpu.
1740  *
1741  * We can't use the idle thread for this because we might block.
1742  * Additionally, doing things this way allows us to HLT idle cpus
1743  * on MP systems.
1744  */
1745 static void
1746 dfly_helper_thread(void *dummy)
1747 {
1748     globaldata_t gd;
1749     dfly_pcpu_t  dd;
1750     struct lwp *nlp;
1751     cpumask_t mask;
1752     int cpuid;
1753
1754     gd = mycpu;
1755     cpuid = gd->gd_cpuid;       /* doesn't change */
1756     mask = gd->gd_cpumask;      /* doesn't change */
1757     dd = &dfly_pcpu[cpuid];
1758
1759     /*
1760      * Since we only want to be woken up only when no user processes
1761      * are scheduled on a cpu, run at an ultra low priority.
1762      */
1763     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1764
1765     tsleep(&dd->helper_thread, 0, "schslp", hz);
1766
1767     for (;;) {
1768         /*
1769          * We use the LWKT deschedule-interlock trick to avoid racing
1770          * dfly_rdyprocmask.  This means we cannot block through to the
1771          * manual lwkt_switch() call we make below.
1772          */
1773         crit_enter_gd(gd);
1774         tsleep_interlock(&dd->helper_thread, 0);
1775
1776         spin_lock(&dd->spin);
1777
1778         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1779         clear_user_resched();   /* This satisfied the reschedule request */
1780         dd->rrcount = 0;        /* Reset the round-robin counter */
1781
1782         if ((dfly_curprocmask & mask) == 0) {
1783                 /*
1784                  * No thread is currently scheduled.
1785                  */
1786                 KKASSERT(dd->uschedcp == NULL);
1787                 if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
1788                         KTR_COND_LOG(usched_sched_thread_no_process,
1789                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1790                             gd->gd_cpuid,
1791                             nlp->lwp_proc->p_pid,
1792                             nlp->lwp_thread->td_gd->gd_cpuid);
1793
1794                         atomic_set_cpumask(&dfly_curprocmask, mask);
1795                         dd->upri = nlp->lwp_priority;
1796                         dd->uschedcp = nlp;
1797                         dd->rrcount = 0;        /* reset round robin */
1798                         spin_unlock(&dd->spin);
1799                         lwkt_acquire(nlp->lwp_thread);
1800                         lwkt_schedule(nlp->lwp_thread);
1801                 } else {
1802                         spin_unlock(&dd->spin);
1803                 }
1804         } else if (dd->runqcount) {
1805                 /*
1806                  * Possibly find a better process to schedule.
1807                  */
1808                 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
1809                 if (nlp) {
1810                         KTR_COND_LOG(usched_sched_thread_process,
1811                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1812                             gd->gd_cpuid,
1813                             nlp->lwp_proc->p_pid,
1814                             nlp->lwp_thread->td_gd->gd_cpuid);
1815
1816                         dd->upri = nlp->lwp_priority;
1817                         dd->uschedcp = nlp;
1818                         dd->rrcount = 0;        /* reset round robin */
1819                         spin_unlock(&dd->spin);
1820                         lwkt_acquire(nlp->lwp_thread);
1821                         lwkt_schedule(nlp->lwp_thread);
1822                 } else {
1823                         /*
1824                          * Leave the thread on our run queue.  Another
1825                          * scheduler will try to pull it later.
1826                          */
1827                         spin_unlock(&dd->spin);
1828                 }
1829         } else {
1830                 /*
1831                  * The runq is empty.
1832                  */
1833                 spin_unlock(&dd->spin);
1834         }
1835
1836         /*
1837          * We're descheduled unless someone scheduled us.  Switch away.
1838          * Exiting the critical section will cause splz() to be called
1839          * for us if interrupts and such are pending.
1840          */
1841         crit_exit_gd(gd);
1842         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", hz);
1843     }
1844 }
1845
1846 #if 0
1847 static int
1848 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
1849 {
1850         int error, new_val;
1851
1852         new_val = usched_dfly_stick_to_level;
1853
1854         error = sysctl_handle_int(oidp, &new_val, 0, req);
1855         if (error != 0 || req->newptr == NULL)
1856                 return (error);
1857         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1858                 return (EINVAL);
1859         usched_dfly_stick_to_level = new_val;
1860         return (0);
1861 }
1862 #endif
1863
1864 /*
1865  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1866  * been cleared by rqinit() and we should not mess with it further.
1867  */
1868 static void
1869 dfly_helper_thread_cpu_init(void)
1870 {
1871         int i;
1872         int j;
1873         int cpuid;
1874         int smt_not_supported = 0;
1875         int cache_coherent_not_supported = 0;
1876
1877         if (bootverbose)
1878                 kprintf("Start scheduler helpers on cpus:\n");
1879
1880         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1881         usched_dfly_sysctl_tree =
1882                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1883                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1884                                 "usched_dfly", CTLFLAG_RD, 0, "");
1885
1886         for (i = 0; i < ncpus; ++i) {
1887                 dfly_pcpu_t dd = &dfly_pcpu[i];
1888                 cpumask_t mask = CPUMASK(i);
1889
1890                 if ((mask & smp_active_mask) == 0)
1891                     continue;
1892
1893                 spin_init(&dd->spin);
1894                 dd->cpunode = get_cpu_node_by_cpuid(i);
1895                 dd->cpuid = i;
1896                 dd->cpumask = CPUMASK(i);
1897                 for (j = 0; j < NQS; j++) {
1898                         TAILQ_INIT(&dd->queues[j]);
1899                         TAILQ_INIT(&dd->rtqueues[j]);
1900                         TAILQ_INIT(&dd->idqueues[j]);
1901                 }
1902                 atomic_clear_cpumask(&dfly_curprocmask, 1);
1903
1904                 if (dd->cpunode == NULL) {
1905                         smt_not_supported = 1;
1906                         cache_coherent_not_supported = 1;
1907                         if (bootverbose)
1908                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1909                                          "found for cpu\n", i);
1910                 } else {
1911                         switch (dd->cpunode->type) {
1912                         case THREAD_LEVEL:
1913                                 if (bootverbose)
1914                                         kprintf ("\tcpu%d - HyperThreading "
1915                                                  "available. Core siblings: ",
1916                                                  i);
1917                                 break;
1918                         case CORE_LEVEL:
1919                                 smt_not_supported = 1;
1920
1921                                 if (bootverbose)
1922                                         kprintf ("\tcpu%d - No HT available, "
1923                                                  "multi-core/physical "
1924                                                  "cpu. Physical siblings: ",
1925                                                  i);
1926                                 break;
1927                         case CHIP_LEVEL:
1928                                 smt_not_supported = 1;
1929
1930                                 if (bootverbose)
1931                                         kprintf ("\tcpu%d - No HT available, "
1932                                                  "single-core/physical cpu. "
1933                                                  "Package Siblings: ",
1934                                                  i);
1935                                 break;
1936                         default:
1937                                 /* Let's go for safe defaults here */
1938                                 smt_not_supported = 1;
1939                                 cache_coherent_not_supported = 1;
1940                                 if (bootverbose)
1941                                         kprintf ("\tcpu%d - Unknown cpunode->"
1942                                                  "type=%u. Siblings: ",
1943                                                  i,
1944                                                  (u_int)dd->cpunode->type);
1945                                 break;
1946                         }
1947
1948                         if (bootverbose) {
1949                                 if (dd->cpunode->parent_node != NULL) {
1950                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1951                                                 kprintf("cpu%d ", cpuid);
1952                                         kprintf("\n");
1953                                 } else {
1954                                         kprintf(" no siblings\n");
1955                                 }
1956                         }
1957                 }
1958
1959                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
1960                             0, i, "usched %d", i);
1961
1962                 /*
1963                  * Allow user scheduling on the target cpu.  cpu #0 has already
1964                  * been enabled in rqinit().
1965                  */
1966                 if (i)
1967                     atomic_clear_cpumask(&dfly_curprocmask, mask);
1968                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1969                 dd->upri = PRIBASE_NULL;
1970
1971         }
1972
1973         /* usched_dfly sysctl configurable parameters */
1974
1975         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1976                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1977                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1978                        &usched_dfly_rrinterval, 0, "");
1979         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1980                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1981                        OID_AUTO, "decay", CTLFLAG_RW,
1982                        &usched_dfly_decay, 0, "Extra decay when not running");
1983         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1984                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1985                        OID_AUTO, "batch_time", CTLFLAG_RW,
1986                        &usched_dfly_batch_time, 0, "Min batch counter value");
1987
1988         /* Add enable/disable option for SMT scheduling if supported */
1989         if (smt_not_supported) {
1990                 usched_dfly_smt = 0;
1991                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1992                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1993                                   OID_AUTO, "smt", CTLFLAG_RD,
1994                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
1995         } else {
1996                 usched_dfly_smt = 1;
1997                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1998                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1999                                OID_AUTO, "smt", CTLFLAG_RW,
2000                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2001         }
2002
2003         /*
2004          * Add enable/disable option for cache coherent scheduling
2005          * if supported
2006          */
2007         if (cache_coherent_not_supported) {
2008                 usched_dfly_cache_coherent = 0;
2009                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2010                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2011                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2012                                   "NOT SUPPORTED", 0,
2013                                   "Cache coherence NOT SUPPORTED");
2014         } else {
2015                 usched_dfly_cache_coherent = 1;
2016                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2017                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2018                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2019                                &usched_dfly_cache_coherent, 0,
2020                                "Enable/Disable cache coherent scheduling");
2021
2022                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2023                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2024                                OID_AUTO, "weight1", CTLFLAG_RW,
2025                                &usched_dfly_weight1, 10,
2026                                "Weight selection for current cpu");
2027
2028                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2029                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2030                                OID_AUTO, "weight2", CTLFLAG_RW,
2031                                &usched_dfly_weight2, 5,
2032                                "Weight selection for wakefrom cpu");
2033
2034                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2035                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2036                                OID_AUTO, "weight3", CTLFLAG_RW,
2037                                &usched_dfly_weight3, 50,
2038                                "Weight selection for num threads on queue");
2039
2040                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2041                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2042                                OID_AUTO, "pull_enable", CTLFLAG_RW,
2043                                &usched_dfly_pull_enable, 1,
2044                                "Allow pulls into empty queues");
2045
2046
2047 #if 0
2048                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2049                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2050                                 OID_AUTO, "stick_to_level",
2051                                 CTLTYPE_INT | CTLFLAG_RW,
2052                                 NULL, sizeof usched_dfly_stick_to_level,
2053                                 sysctl_usched_dfly_stick_to_level, "I",
2054                                 "Stick a process to this level. See sysctl"
2055                                 "paremter hw.cpu_topology.level_description");
2056 #endif
2057         }
2058 }
2059 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2060         dfly_helper_thread_cpu_init, NULL)
2061
2062 #else /* No SMP options - just add the configurable parameters to sysctl */
2063
2064 static void
2065 sched_sysctl_tree_init(void)
2066 {
2067         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2068         usched_dfly_sysctl_tree =
2069                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2070                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2071                                 "usched_dfly", CTLFLAG_RD, 0, "");
2072
2073         /* usched_dfly sysctl configurable parameters */
2074         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2075                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2076                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2077                        &usched_dfly_rrinterval, 0, "");
2078         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2079                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2080                        OID_AUTO, "decay", CTLFLAG_RW,
2081                        &usched_dfly_decay, 0, "Extra decay when not running");
2082         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2083                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2084                        OID_AUTO, "batch_time", CTLFLAG_RW,
2085                        &usched_dfly_batch_time, 0, "Min batch counter value");
2086 }
2087 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2088         sched_sysctl_tree_init, NULL)
2089 #endif