28848cb4205242dafc54f1ef6a109a953c96bb05
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_rqindex     lwp_usdata.dfly.rqindex
94 #define lwp_estcpu      lwp_usdata.dfly.estcpu
95 #define lwp_batch       lwp_usdata.dfly.batch
96 #define lwp_rqtype      lwp_usdata.dfly.rqtype
97 #define lwp_qcpu        lwp_usdata.dfly.qcpu
98
99 struct usched_dfly_pcpu {
100         struct spinlock spin;
101         struct thread   helper_thread;
102         short           rrcount;
103         short           upri;
104         int             uload;
105         struct lwp      *uschedcp;
106         struct rq       queues[NQS];
107         struct rq       rtqueues[NQS];
108         struct rq       idqueues[NQS];
109         u_int32_t       queuebits;
110         u_int32_t       rtqueuebits;
111         u_int32_t       idqueuebits;
112         int             runqcount;
113         int             cpuid;
114         cpumask_t       cpumask;
115 #ifdef SMP
116         cpu_node_t      *cpunode;
117 #endif
118 };
119
120 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
121
122 static void dfly_acquire_curproc(struct lwp *lp);
123 static void dfly_release_curproc(struct lwp *lp);
124 static void dfly_select_curproc(globaldata_t gd);
125 static void dfly_setrunqueue(struct lwp *lp);
126 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
127                                 sysclock_t cpstamp);
128 static void dfly_recalculate_estcpu(struct lwp *lp);
129 static void dfly_resetpriority(struct lwp *lp);
130 static void dfly_forking(struct lwp *plp, struct lwp *lp);
131 static void dfly_exiting(struct lwp *lp, struct proc *);
132 static void dfly_uload_update(struct lwp *lp);
133 static void dfly_yield(struct lwp *lp);
134 #ifdef SMP
135 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
136 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
137 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
138 #endif
139
140 #ifdef SMP
141 static void dfly_need_user_resched_remote(void *dummy);
142 #endif
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
144                                         int isremote);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147
148 struct usched usched_dfly = {
149         { NULL },
150         "dfly", "Original DragonFly Scheduler",
151         NULL,                   /* default registration */
152         NULL,                   /* default deregistration */
153         dfly_acquire_curproc,
154         dfly_release_curproc,
155         dfly_setrunqueue,
156         dfly_schedulerclock,
157         dfly_recalculate_estcpu,
158         dfly_resetpriority,
159         dfly_forking,
160         dfly_exiting,
161         dfly_uload_update,
162         NULL,                   /* setcpumask not supported */
163         dfly_yield
164 };
165
166 /*
167  * We have NQS (32) run queues per scheduling class.  For the normal
168  * class, there are 128 priorities scaled onto these 32 queues.  New
169  * processes are added to the last entry in each queue, and processes
170  * are selected for running by taking them from the head and maintaining
171  * a simple FIFO arrangement.  Realtime and Idle priority processes have
172  * and explicit 0-31 priority which maps directly onto their class queue
173  * index.  When a queue has something in it, the corresponding bit is
174  * set in the queuebits variable, allowing a single read to determine
175  * the state of all 32 queues and then a ffs() to find the first busy
176  * queue.
177  */
178 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
179 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
180 #ifdef SMP
181 static volatile int dfly_scancpu;
182 /*static struct spinlock dfly_spin = SPINLOCK_INITIALIZER(dfly_spin);*/
183 #endif
184 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
185 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
186 static struct sysctl_oid *usched_dfly_sysctl_tree;
187
188 /* Debug info exposed through debug.* sysctl */
189
190 static int usched_dfly_debug = -1;
191 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
192            &usched_dfly_debug, 0,
193            "Print debug information for this pid");
194
195 static int usched_dfly_pid_debug = -1;
196 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
197            &usched_dfly_pid_debug, 0,
198            "Print KTR debug information for this pid");
199
200 static int usched_dfly_chooser = 0;
201 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
202            &usched_dfly_chooser, 0,
203            "Print KTR debug information for this pid");
204
205 /* Tunning usched_dfly - configurable through kern.usched_dfly.* */
206 #ifdef SMP
207 static int usched_dfly_smt = 0;
208 static int usched_dfly_cache_coherent = 0;
209 static int usched_dfly_weight1 = 10;
210 static int usched_dfly_weight2 = 5;
211 static int usched_dfly_stick_to_level = 0;
212 #endif
213 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
214 static int usched_dfly_decay = 8;
215 static int usched_dfly_batch_time = 10;
216
217 /* KTR debug printings */
218
219 KTR_INFO_MASTER(usched);
220
221 #if !defined(KTR_USCHED_DFLY)
222 #define KTR_USCHED_DFLY KTR_ALL
223 #endif
224
225 #if 0
226 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
227     "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
228     "after release: pid %d, cpuid %d, curr_cpuid %d)",
229     pid_t pid, int cpuid, int curr);
230 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
231     "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
232     "curr_cpuid %d)",
233     pid_t pid, int cpuid, int curr);
234 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
235     "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
236     "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
237     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
238 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
239     "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
240     "cpuid %d, curr_cpuid %d)",
241     pid_t pid, int cpuid, int curr);
242
243 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
244     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
245     "cpuid %d, curr_cpuid %d)",
246     pid_t pid, int cpuid, int curr);
247
248 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
249     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
250     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
251     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
252
253 #ifdef SMP
254 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
255     "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
256     "cpuid %d, verify_mask %lu)",
257     pid_t pid, int cpuid, cpumask_t mask);
258 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
259     "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
260     "cpuid %d, verify_mask %lu)",
261     pid_t pid, int cpuid, cpumask_t mask);
262
263 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
264     "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
265     "mask %lu, curr_cpuid %d)",
266     pid_t pid, int cpuid, cpumask_t mask, int curr);
267 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
268     "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
269     "cpuid %d, mask %lu, curr_cpuid %d)",
270     pid_t pid, int cpuid, cpumask_t mask, int curr);
271 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
272     "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
273     "cpuid %d, mask %lu, curr_cpuid %d)",
274     pid_t pid, int cpuid, cpumask_t mask, int curr);
275 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
276     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
277     "mask %lu, found_cpuid %d, curr_cpuid %d)",
278     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
279 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
280     "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
281     "try_cpuid %d, curr_cpuid %d)",
282     pid_t pid, int cpuid, int try_cpuid, int curr);
283 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
284     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
285     "mask %lu, found_cpuid %d, curr_cpuid %d)",
286     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
287 #endif
288 #endif
289
290 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
291     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
292     pid_t pid, int old_cpuid, int curr);
293 #ifdef SMP
294 #if 0
295 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
296     "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
297     pid_t pid, int old_cpuid, int curr);
298 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
299     "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
300     "sibling_mask %lu, curr_cpumask %lu)",
301     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
302 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
303     "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
304     "sibling_mask %lu, curr_cpumask: %lu)",
305     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
306 #endif
307
308 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
309     "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
310     int id, pid_t pid, int cpuid);
311 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
312     "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
313     int id, pid_t pid, int cpuid);
314 #if 0
315 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
316     "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
317     int id, cpumask_t tmpmask);
318 #endif
319 #endif
320
321 /*
322  * DFLY_ACQUIRE_CURPROC
323  *
324  * This function is called when the kernel intends to return to userland.
325  * It is responsible for making the thread the current designated userland
326  * thread for this cpu, blocking if necessary.
327  *
328  * The kernel has already depressed our LWKT priority so we must not switch
329  * until we have either assigned or disposed of the thread.
330  *
331  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
332  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
333  * occur, this function is called only under very controlled circumstances.
334  */
335 static void
336 dfly_acquire_curproc(struct lwp *lp)
337 {
338         globaldata_t gd;
339         dfly_pcpu_t dd;
340         thread_t td;
341
342         /*
343          * Make sure we aren't sitting on a tsleep queue.
344          */
345         td = lp->lwp_thread;
346         crit_enter_quick(td);
347         if (td->td_flags & TDF_TSLEEPQ)
348                 tsleep_remove(td);
349         dfly_recalculate_estcpu(lp);
350
351         /*
352          * If a reschedule was requested give another thread the
353          * driver's seat.
354          */
355         if (user_resched_wanted()) {
356                 clear_user_resched();
357                 dfly_release_curproc(lp);
358         }
359
360         /*
361          * Loop until we are the current user thread
362          */
363         gd = mycpu;
364         dd = &dfly_pcpu[gd->gd_cpuid];
365
366         do {
367                 /*
368                  * Process any pending events and higher priority threads.
369                  */
370                 lwkt_yield();
371
372                 /*
373                  * Become the currently scheduled user thread for this cpu
374                  * if we can do so trivially.
375                  *
376                  * We can steal another thread's current thread designation
377                  * on this cpu since if we are running that other thread
378                  * must not be, so we can safely deschedule it.
379                  */
380                 if (dd->uschedcp == lp) {
381                         /*
382                          * We are already the current lwp (hot path).
383                          */
384                         dd->upri = lp->lwp_priority;
385                 } else if (dd->uschedcp == NULL) {
386                         /*
387                          * We can trivially become the current lwp.
388                          */
389                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
390                         dd->uschedcp = lp;
391                         dd->upri = lp->lwp_priority;
392                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
393                 } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
394                         /*
395                          * We can steal the current cpu's lwp designation
396                          * away simply by replacing it.  The other thread
397                          * will stall when it tries to return to userland,
398                          * possibly rescheduling elsewhere when it calls
399                          * setrunqueue.
400                          */
401                         dd->uschedcp = lp;
402                         dd->upri = lp->lwp_priority;
403                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
404                 } else {
405                         /*
406                          * We cannot become the current lwp, place the lp
407                          * on the run-queue of this or another cpu and
408                          * deschedule ourselves.
409                          *
410                          * When we are reactivated we will have another
411                          * chance.
412                          */
413                         lwkt_deschedule(lp->lwp_thread);
414                         dfly_setrunqueue(lp);
415
416                         /*
417                          * Reload after a switch or setrunqueue/switch possibly
418                          * moved us to another cpu.
419                          */
420                         lwkt_switch();
421                         gd = mycpu;
422                         dd = &dfly_pcpu[gd->gd_cpuid];
423                 }
424         } while (dd->uschedcp != lp);
425
426         crit_exit_quick(td);
427         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
428 }
429
430 /*
431  * DFLY_RELEASE_CURPROC
432  *
433  * This routine detaches the current thread from the userland scheduler,
434  * usually because the thread needs to run or block in the kernel (at
435  * kernel priority) for a while.
436  *
437  * This routine is also responsible for selecting a new thread to
438  * make the current thread.
439  *
440  * NOTE: This implementation differs from the dummy example in that
441  * dfly_select_curproc() is able to select the current process, whereas
442  * dummy_select_curproc() is not able to select the current process.
443  * This means we have to NULL out uschedcp.
444  *
445  * Additionally, note that we may already be on a run queue if releasing
446  * via the lwkt_switch() in dfly_setrunqueue().
447  */
448
449 static void
450 dfly_release_curproc(struct lwp *lp)
451 {
452         globaldata_t gd = mycpu;
453         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
454
455         /*
456          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
457          * by wakeup().
458          */
459         lp->lwp_thread->td_wakefromcpu = gd->gd_cpuid;
460
461         if (dd->uschedcp == lp) {
462                 crit_enter();
463                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
464
465                 dd->uschedcp = NULL;    /* don't let lp be selected */
466                 dd->upri = PRIBASE_NULL;
467                 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
468                 dfly_select_curproc(gd);
469                 crit_exit();
470         }
471 }
472
473 /*
474  * DFLY_SELECT_CURPROC
475  *
476  * Select a new current process for this cpu and clear any pending user
477  * reschedule request.  The cpu currently has no current process.
478  *
479  * This routine is also responsible for equal-priority round-robining,
480  * typically triggered from dfly_schedulerclock().  In our dummy example
481  * all the 'user' threads are LWKT scheduled all at once and we just
482  * call lwkt_switch().
483  *
484  * The calling process is not on the queue and cannot be selected.
485  */
486 static
487 void
488 dfly_select_curproc(globaldata_t gd)
489 {
490         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
491         struct lwp *nlp;
492         int cpuid = gd->gd_cpuid;
493
494         crit_enter_gd(gd);
495
496         /*spin_lock(&dfly_spin);*/
497         spin_lock(&dd->spin);
498         nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
499
500         if (nlp) {
501                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
502                 dd->upri = nlp->lwp_priority;
503                 dd->uschedcp = nlp;
504                 dd->rrcount = 0;                /* reset round robin */
505                 spin_unlock(&dd->spin);
506                 /*spin_unlock(&dfly_spin);*/
507 #ifdef SMP
508                 lwkt_acquire(nlp->lwp_thread);
509 #endif
510                 lwkt_schedule(nlp->lwp_thread);
511         } else {
512                 spin_unlock(&dd->spin);
513                 /*spin_unlock(&dfly_spin);*/
514         }
515         crit_exit_gd(gd);
516 }
517
518 /*
519  * Place the specified lwp on the user scheduler's run queue.  This routine
520  * must be called with the thread descheduled.  The lwp must be runnable.
521  * It must not be possible for anyone else to explicitly schedule this thread.
522  *
523  * The thread may be the current thread as a special case.
524  */
525 static void
526 dfly_setrunqueue(struct lwp *lp)
527 {
528 #ifdef SMP
529         globaldata_t rgd;
530 #endif
531         dfly_pcpu_t rdd;
532
533         /*
534          * First validate the process LWKT state.
535          */
536         crit_enter();
537         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
538         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
539             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
540              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
541         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
542
543         /*
544          * NOTE: rdd does not necessarily represent the current cpu.
545          *       Instead it represents the cpu the thread was last
546          *       scheduled on.
547          */
548         rdd = &dfly_pcpu[lp->lwp_qcpu];
549
550         /*
551          * This process is not supposed to be scheduled anywhere or assigned
552          * as the current process anywhere.  Assert the condition.
553          */
554         KKASSERT(rdd->uschedcp != lp);
555
556 #ifndef SMP
557         /*
558          * If we are not SMP we do not have a scheduler helper to kick
559          * and must directly activate the process if none are scheduled.
560          *
561          * This is really only an issue when bootstrapping init since
562          * the caller in all other cases will be a user process, and
563          * even if released (rdd->uschedcp == NULL), that process will
564          * kickstart the scheduler when it returns to user mode from
565          * the kernel.
566          *
567          * NOTE: On SMP we can't just set some other cpu's uschedcp.
568          */
569         if (rdd->uschedcp == NULL) {
570                 spin_lock(&rdd->spin);
571                 if (rdd->uschedcp == NULL) {
572                         atomic_set_cpumask(&dfly_curprocmask, 1);
573                         rdd->uschedcp = lp;
574                         rdd->upri = lp->lwp_priority;
575                         spin_unlock(&rdd->spin);
576                         lwkt_schedule(lp->lwp_thread);
577                         crit_exit();
578                         return;
579                 }
580                 spin_unlock(&rdd->spin);
581         }
582 #endif
583
584 #ifdef SMP
585         /*
586          * XXX fixme.  Could be part of a remrunqueue/setrunqueue
587          * operation when the priority is recalculated, so TDF_MIGRATING
588          * may already be set.
589          */
590         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
591                 lwkt_giveaway(lp->lwp_thread);
592 #endif
593
594 #ifdef SMP
595         /*
596          * Ok, we have to setrunqueue some target cpu and request a reschedule
597          * if necessary.
598          *
599          * We have to choose the best target cpu.  It might not be the current
600          * target even if the current cpu has no running user thread (for
601          * example, because the current cpu might be a hyperthread and its
602          * sibling has a thread assigned).
603          */
604         /*spin_lock(&dfly_spin);*/
605         rdd = dfly_choose_best_queue(lp);
606         rgd = globaldata_find(rdd->cpuid);
607
608         /*
609          * We lose control of lp the moment we release the spinlock after
610          * having placed lp on the queue.  i.e. another cpu could pick it
611          * up and it could exit, or its priority could be further adjusted,
612          * or something like that.
613          *
614          * WARNING! dd can point to a foreign cpu!
615          */
616         spin_lock(&rdd->spin);
617         dfly_setrunqueue_locked(rdd, lp);
618         /*spin_unlock(&dfly_spin);*/
619
620         if (rgd == mycpu) {
621                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
622                         spin_unlock(&rdd->spin);
623                         if (rdd->uschedcp == NULL) {
624                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
625                                 need_user_resched();
626                         } else {
627                                 need_user_resched();
628                         }
629                 } else {
630                         spin_unlock(&rdd->spin);
631                 }
632         } else {
633                 atomic_clear_cpumask(&dfly_rdyprocmask, rgd->gd_cpumask);
634                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
635                         spin_unlock(&rdd->spin);
636                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
637                                        NULL);
638                 } else {
639                         spin_unlock(&rdd->spin);
640                         wakeup(&rdd->helper_thread);
641                 }
642         }
643 #else
644         /*
645          * Request a reschedule if appropriate.
646          */
647         spin_lock(&rdd->spin);
648         dfly_setrunqueue_locked(rdd, lp);
649         spin_unlock(&rdd->spin);
650         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
651                 need_user_resched();
652         }
653 #endif
654         crit_exit();
655 }
656
657 /*
658  * This routine is called from a systimer IPI.  It MUST be MP-safe and
659  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
660  * each cpu.
661  */
662 static
663 void
664 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
665 {
666         globaldata_t gd = mycpu;
667         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
668
669         /*
670          * Do we need to round-robin?  We round-robin 10 times a second.
671          * This should only occur for cpu-bound batch processes.
672          */
673         if (++dd->rrcount >= usched_dfly_rrinterval) {
674                 dd->rrcount = 0;
675                 need_user_resched();
676         }
677
678         /*
679          * Adjust estcpu upward using a real time equivalent calculation.
680          */
681         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
682
683         /*
684          * Spinlocks also hold a critical section so there should not be
685          * any active.
686          */
687         KKASSERT(gd->gd_spinlocks_wr == 0);
688
689         dfly_resetpriority(lp);
690 }
691
692 /*
693  * Called from acquire and from kern_synch's one-second timer (one of the
694  * callout helper threads) with a critical section held.
695  *
696  * Decay p_estcpu based on the number of ticks we haven't been running
697  * and our p_nice.  As the load increases each process observes a larger
698  * number of idle ticks (because other processes are running in them).
699  * This observation leads to a larger correction which tends to make the
700  * system more 'batchy'.
701  *
702  * Note that no recalculation occurs for a process which sleeps and wakes
703  * up in the same tick.  That is, a system doing thousands of context
704  * switches per second will still only do serious estcpu calculations
705  * ESTCPUFREQ times per second.
706  */
707 static
708 void
709 dfly_recalculate_estcpu(struct lwp *lp)
710 {
711         globaldata_t gd = mycpu;
712         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
713         sysclock_t cpbase;
714         sysclock_t ttlticks;
715         int estcpu;
716         int decay_factor;
717
718         /*
719          * We have to subtract periodic to get the last schedclock
720          * timeout time, otherwise we would get the upcoming timeout.
721          * Keep in mind that a process can migrate between cpus and
722          * while the scheduler clock should be very close, boundary
723          * conditions could lead to a small negative delta.
724          */
725         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
726
727         if (lp->lwp_slptime > 1) {
728                 /*
729                  * Too much time has passed, do a coarse correction.
730                  */
731                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
732                 dfly_resetpriority(lp);
733                 lp->lwp_cpbase = cpbase;
734                 lp->lwp_cpticks = 0;
735                 lp->lwp_batch -= ESTCPUFREQ;
736                 if (lp->lwp_batch < 0)
737                         lp->lwp_batch = 0;
738         } else if (lp->lwp_cpbase != cpbase) {
739                 /*
740                  * Adjust estcpu if we are in a different tick.  Don't waste
741                  * time if we are in the same tick.
742                  *
743                  * First calculate the number of ticks in the measurement
744                  * interval.  The ttlticks calculation can wind up 0 due to
745                  * a bug in the handling of lwp_slptime  (as yet not found),
746                  * so make sure we do not get a divide by 0 panic.
747                  */
748                 ttlticks = (cpbase - lp->lwp_cpbase) /
749                            gd->gd_schedclock.periodic;
750                 if (ttlticks < 0) {
751                         ttlticks = 0;
752                         lp->lwp_cpbase = cpbase;
753                 }
754                 if (ttlticks == 0)
755                         return;
756                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
757
758                 /*
759                  * Calculate the percentage of one cpu used factoring in ncpus
760                  * and the load and adjust estcpu.  Handle degenerate cases
761                  * by adding 1 to runqcount.
762                  *
763                  * estcpu is scaled by ESTCPUMAX.
764                  *
765                  * runqcount is the excess number of user processes
766                  * that cannot be immediately scheduled to cpus.  We want
767                  * to count these as running to avoid range compression
768                  * in the base calculation (which is the actual percentage
769                  * of one cpu used).
770                  */
771                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
772                          (dd->runqcount + ncpus) / (ncpus * ttlticks);
773
774                 /*
775                  * If estcpu is > 50% we become more batch-like
776                  * If estcpu is <= 50% we become less batch-like
777                  *
778                  * It takes 30 cpu seconds to traverse the entire range.
779                  */
780                 if (estcpu > ESTCPUMAX / 2) {
781                         lp->lwp_batch += ttlticks;
782                         if (lp->lwp_batch > BATCHMAX)
783                                 lp->lwp_batch = BATCHMAX;
784                 } else {
785                         lp->lwp_batch -= ttlticks;
786                         if (lp->lwp_batch < 0)
787                                 lp->lwp_batch = 0;
788                 }
789
790                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
791                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
792                                 lp->lwp_proc->p_pid, lp,
793                                 estcpu, lp->lwp_estcpu,
794                                 lp->lwp_batch,
795                                 lp->lwp_cpticks, ttlticks);
796                 }
797
798                 /*
799                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
800                  * quickly lwp_estcpu collapses to its realtime calculation.
801                  * A slower collapse gives us a more accurate number but
802                  * can cause a cpu hog to eat too much cpu before the
803                  * scheduler decides to downgrade it.
804                  *
805                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
806                  *       and not here, but we must still ensure that a
807                  *       cpu-bound nice -20 process does not completely
808                  *       override a cpu-bound nice +20 process.
809                  *
810                  * NOTE: We must use ESTCPULIM() here to deal with any
811                  *       overshoot.
812                  */
813                 decay_factor = usched_dfly_decay;
814                 if (decay_factor < 1)
815                         decay_factor = 1;
816                 if (decay_factor > 1024)
817                         decay_factor = 1024;
818
819                 lp->lwp_estcpu = ESTCPULIM(
820                         (lp->lwp_estcpu * decay_factor + estcpu) /
821                         (decay_factor + 1));
822
823                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
824                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
825                 dfly_resetpriority(lp);
826                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
827                 lp->lwp_cpticks = 0;
828         }
829 }
830
831 /*
832  * Compute the priority of a process when running in user mode.
833  * Arrange to reschedule if the resulting priority is better
834  * than that of the current process.
835  *
836  * This routine may be called with any process.
837  *
838  * This routine is called by fork1() for initial setup with the process
839  * of the run queue, and also may be called normally with the process on or
840  * off the run queue.
841  */
842 static void
843 dfly_resetpriority(struct lwp *lp)
844 {
845         dfly_pcpu_t rdd;
846         int newpriority;
847         u_short newrqtype;
848         int rcpu;
849         int checkpri;
850         int estcpu;
851
852         crit_enter();
853
854         /*
855          * Lock the scheduler (lp) belongs to.  This can be on a different
856          * cpu.  Handle races.  This loop breaks out with the appropriate
857          * rdd locked.
858          */
859         for (;;) {
860                 rcpu = lp->lwp_qcpu;
861                 rdd = &dfly_pcpu[rcpu];
862                 spin_lock(&rdd->spin);
863                 if (rcpu == lp->lwp_qcpu)
864                         break;
865                 spin_unlock(&rdd->spin);
866         }
867
868         /*
869          * Calculate the new priority and queue type
870          */
871         newrqtype = lp->lwp_rtprio.type;
872
873         switch(newrqtype) {
874         case RTP_PRIO_REALTIME:
875         case RTP_PRIO_FIFO:
876                 newpriority = PRIBASE_REALTIME +
877                              (lp->lwp_rtprio.prio & PRIMASK);
878                 break;
879         case RTP_PRIO_NORMAL:
880                 /*
881                  * Detune estcpu based on batchiness.  lwp_batch ranges
882                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
883                  * the priority calculation to between 50% and 100%.
884                  */
885                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
886                          (BATCHMAX * 2);
887
888                 /*
889                  * p_nice piece         Adds (0-40) * 2         0-80
890                  * estcpu               Adds 16384  * 4 / 512   0-128
891                  */
892                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
893                 newpriority += estcpu * PPQ / ESTCPUPPQ;
894                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
895                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
896                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
897                 break;
898         case RTP_PRIO_IDLE:
899                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
900                 break;
901         case RTP_PRIO_THREAD:
902                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
903                 break;
904         default:
905                 panic("Bad RTP_PRIO %d", newrqtype);
906                 /* NOT REACHED */
907         }
908
909         /*
910          * The newpriority incorporates the queue type so do a simple masked
911          * check to determine if the process has moved to another queue.  If
912          * it has, and it is currently on a run queue, then move it.
913          *
914          * Since uload is ~PPQMASK masked, no modifications are necessary if
915          * we end up in the same run queue.
916          */
917         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
918                 int delta_uload;
919
920                 /*
921                  * uload can change, calculate the adjustment to reduce
922                  * edge cases since choosers scan the cpu topology without
923                  * locks.
924                  */
925                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
926                         delta_uload =
927                                 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
928                                 ((newpriority & ~PPQMASK) & PRIMASK);
929                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
930                                        delta_uload);
931                 }
932                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
933                         dfly_remrunqueue_locked(rdd, lp);
934                         lp->lwp_priority = newpriority;
935                         lp->lwp_rqtype = newrqtype;
936                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
937                         dfly_setrunqueue_locked(rdd, lp);
938                         checkpri = 1;
939                 } else {
940                         lp->lwp_priority = newpriority;
941                         lp->lwp_rqtype = newrqtype;
942                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
943                         checkpri = 0;
944                 }
945         } else {
946                 /*
947                  * In the same PPQ, uload cannot change.
948                  */
949                 lp->lwp_priority = newpriority;
950                 checkpri = 1;
951                 rcpu = -1;
952         }
953
954         /*
955          * Determine if we need to reschedule the target cpu.  This only
956          * occurs if the LWP is already on a scheduler queue, which means
957          * that idle cpu notification has already occured.  At most we
958          * need only issue a need_user_resched() on the appropriate cpu.
959          *
960          * The LWP may be owned by a CPU different from the current one,
961          * in which case dd->uschedcp may be modified without an MP lock
962          * or a spinlock held.  The worst that happens is that the code
963          * below causes a spurious need_user_resched() on the target CPU
964          * and dd->pri to be wrong for a short period of time, both of
965          * which are harmless.
966          *
967          * If checkpri is 0 we are adjusting the priority of the current
968          * process, possibly higher (less desireable), so ignore the upri
969          * check which will fail in that case.
970          */
971         if (rcpu >= 0) {
972                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
973                     (checkpri == 0 ||
974                      (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
975 #ifdef SMP
976                         if (rcpu == mycpu->gd_cpuid) {
977                                 spin_unlock(&rdd->spin);
978                                 need_user_resched();
979                         } else {
980                                 atomic_clear_cpumask(&dfly_rdyprocmask,
981                                                      CPUMASK(rcpu));
982                                 spin_unlock(&rdd->spin);
983                                 lwkt_send_ipiq(globaldata_find(rcpu),
984                                                dfly_need_user_resched_remote,
985                                                NULL);
986                         }
987 #else
988                         spin_unlock(&rdd->spin);
989                         need_user_resched();
990 #endif
991                 } else {
992                         spin_unlock(&rdd->spin);
993                 }
994         } else {
995                 spin_unlock(&rdd->spin);
996         }
997         crit_exit();
998 }
999
1000 static
1001 void
1002 dfly_yield(struct lwp *lp)
1003 {
1004 #if 0
1005         /* FUTURE (or something similar) */
1006         switch(lp->lwp_rqtype) {
1007         case RTP_PRIO_NORMAL:
1008                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1009                 break;
1010         default:
1011                 break;
1012         }
1013 #endif
1014         need_user_resched();
1015 }
1016
1017 /*
1018  * Called from fork1() when a new child process is being created.
1019  *
1020  * Give the child process an initial estcpu that is more batch then
1021  * its parent and dock the parent for the fork (but do not
1022  * reschedule the parent).   This comprises the main part of our batch
1023  * detection heuristic for both parallel forking and sequential execs.
1024  *
1025  * XXX lwp should be "spawning" instead of "forking"
1026  */
1027 static void
1028 dfly_forking(struct lwp *plp, struct lwp *lp)
1029 {
1030         /*
1031          * Put the child 4 queue slots (out of 32) higher than the parent
1032          * (less desireable than the parent).
1033          */
1034         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1035
1036         /*
1037          * The batch status of children always starts out centerline
1038          * and will inch-up or inch-down as appropriate.  It takes roughly
1039          * ~15 seconds of >50% cpu to hit the limit.
1040          */
1041         lp->lwp_batch = BATCHMAX / 2;
1042
1043         /*
1044          * Dock the parent a cost for the fork, protecting us from fork
1045          * bombs.  If the parent is forking quickly make the child more
1046          * batchy.
1047          */
1048         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1049 }
1050
1051 /*
1052  * Called when a lwp is being removed from this scheduler, typically
1053  * during lwp_exit().
1054  */
1055 static void
1056 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1057 {
1058         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1059
1060         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1061                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1062                 atomic_add_int(&dd->uload,
1063                                -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1064         }
1065 }
1066
1067 static void
1068 dfly_uload_update(struct lwp *lp)
1069 {
1070         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1071
1072         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1073                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1074                         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1075                         atomic_add_int(&dd->uload,
1076                                    ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1077                 }
1078         } else {
1079                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1080                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1081                         atomic_add_int(&dd->uload,
1082                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1083                 }
1084         }
1085 }
1086
1087 /*
1088  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1089  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1090  * has a better or equal priority then the process that would otherwise be
1091  * chosen, NULL is returned.
1092  *
1093  * Until we fix the RUNQ code the chklp test has to be strict or we may
1094  * bounce between processes trying to acquire the current process designation.
1095  *
1096  * Must be called with dfly_spin exclusive held.  The spinlock is
1097  * left intact through the entire routine.
1098  *
1099  * if chklp is NULL this function will dive other cpu's queues looking
1100  * for work if the current queue is empty.
1101  */
1102 static
1103 struct lwp *
1104 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
1105 {
1106 #ifdef SMP
1107         dfly_pcpu_t xdd;
1108 #endif
1109         struct lwp *lp;
1110         struct rq *q;
1111         u_int32_t *which, *which2;
1112         u_int32_t pri;
1113         u_int32_t rtqbits;
1114         u_int32_t tsqbits;
1115         u_int32_t idqbits;
1116
1117         rtqbits = dd->rtqueuebits;
1118         tsqbits = dd->queuebits;
1119         idqbits = dd->idqueuebits;
1120
1121         if (rtqbits) {
1122                 pri = bsfl(rtqbits);
1123                 q = &dd->rtqueues[pri];
1124                 which = &dd->rtqueuebits;
1125                 which2 = &rtqbits;
1126         } else if (tsqbits) {
1127                 pri = bsfl(tsqbits);
1128                 q = &dd->queues[pri];
1129                 which = &dd->queuebits;
1130                 which2 = &tsqbits;
1131         } else if (idqbits) {
1132                 pri = bsfl(idqbits);
1133                 q = &dd->idqueues[pri];
1134                 which = &dd->idqueuebits;
1135                 which2 = &idqbits;
1136         } else
1137 #ifdef SMP
1138         if (isremote) {
1139                 /*
1140                  * Disallow remote->remote recursion
1141                  */
1142                 return (NULL);
1143         } else {
1144                 /*
1145                  * Pull a runnable thread from a remote run queue.  We have
1146                  * to adjust qcpu and uload manually because the lp we return
1147                  * might be assigned directly to uschedcp (setrunqueue might
1148                  * not be called).
1149                  */
1150                 xdd = dfly_choose_worst_queue(dd);
1151                 if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
1152                         lp = dfly_chooseproc_locked(xdd, NULL, 1);
1153                         if (lp) {
1154                                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1155                                         atomic_add_int(&xdd->uload,
1156                                             -((lp->lwp_priority & ~PPQMASK) &
1157                                               PRIMASK));
1158                                 }
1159                                 lp->lwp_qcpu = dd->cpuid;
1160                                 atomic_add_int(&dd->uload,
1161                                     ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1162                                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1163                         }
1164                         spin_unlock(&xdd->spin);
1165                 } else {
1166                         lp = NULL;
1167                 }
1168                 return (lp);
1169         }
1170 #else
1171         {
1172                 return NULL;
1173         }
1174 #endif
1175         lp = TAILQ_FIRST(q);
1176         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1177
1178         /*
1179          * If the passed lwp <chklp> is reasonably close to the selected
1180          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1181          *
1182          * Note that we must error on the side of <chklp> to avoid bouncing
1183          * between threads in the acquire code.
1184          */
1185         if (chklp) {
1186                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1187                         return(NULL);
1188         }
1189
1190         KTR_COND_LOG(usched_chooseproc,
1191             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1192             lp->lwp_proc->p_pid,
1193             lp->lwp_thread->td_gd->gd_cpuid,
1194             mycpu->gd_cpuid);
1195
1196         TAILQ_REMOVE(q, lp, lwp_procq);
1197         --dd->runqcount;
1198         if (TAILQ_EMPTY(q))
1199                 *which &= ~(1 << pri);
1200         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1201         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1202
1203         return lp;
1204 }
1205
1206 #ifdef SMP
1207
1208 /*
1209  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1210  *
1211  * Choose a cpu node to schedule lp on, hopefully nearby its current
1212  * node.  We give the current node a modest advantage for obvious reasons.
1213  *
1214  * We also give the node the thread was woken up FROM a slight advantage
1215  * in order to try to schedule paired threads which synchronize/block waiting
1216  * for each other fairly close to each other.  Similarly in a network setting
1217  * this feature will also attempt to place a user process near the kernel
1218  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1219  * algorithm as it heuristically groups synchronizing processes for locality
1220  * of reference in multi-socket systems.
1221  *
1222  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1223  *
1224  * When the topology is known choose a cpu whos group has, in aggregate,
1225  * has the lowest weighted load.
1226  */
1227 static
1228 dfly_pcpu_t
1229 dfly_choose_best_queue(struct lwp *lp)
1230 {
1231         cpumask_t mask;
1232         cpu_node_t *cpup;
1233         cpu_node_t *cpun;
1234         cpu_node_t *cpub;
1235         dfly_pcpu_t dd1 = &dfly_pcpu[lp->lwp_qcpu];
1236         dfly_pcpu_t dd2 = &dfly_pcpu[lp->lwp_thread->td_wakefromcpu];
1237         dfly_pcpu_t rdd;
1238         int cpuid;
1239         int n;
1240         int load;
1241         int lowest_load;
1242         int level;
1243
1244         /*
1245          * When the topology is unknown choose a random cpu that is hopefully
1246          * idle.
1247          */
1248         if (dd1->cpunode == NULL)
1249                 return (dfly_choose_queue_simple(dd1, lp));
1250
1251         /*
1252          * When the topology is known choose a cpu whos group has, in
1253          * aggregate, has the lowest weighted load.
1254          */
1255         cpup = root_cpu_node;
1256         rdd = dd1;
1257         level = cpu_topology_levels_number;
1258
1259         while (cpup) {
1260                 /*
1261                  * Degenerate case super-root
1262                  */
1263                 if (cpup->child_node && cpup->child_no == 1) {
1264                         cpup = cpup->child_node;
1265                         --level;
1266                         continue;
1267                 }
1268
1269                 /*
1270                  * Terminal cpunode
1271                  */
1272                 if (cpup->child_node == NULL) {
1273                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1274                         break;
1275                 }
1276
1277                 cpub = NULL;
1278                 lowest_load = 0x7FFFFFFF;
1279
1280                 for (n = 0; n < cpup->child_no; ++n) {
1281                         /*
1282                          * Accumulate load information for all cpus
1283                          * which are members of this node.
1284                          */
1285                         cpun = &cpup->child_node[n];
1286                         mask = cpun->members & usched_global_cpumask &
1287                                smp_active_mask & lp->lwp_cpumask;
1288                         if (mask == 0)
1289                                 continue;
1290                         load = 0;
1291                         while (mask) {
1292                                 cpuid = BSFCPUMASK(mask);
1293                                 load += dfly_pcpu[cpuid].uload;
1294                                 mask &= ~CPUMASK(cpuid);
1295                         }
1296
1297                         /*
1298                          * Give a slight advantage to nearby cpus.
1299                          */
1300                         if (cpun->members & dd1->cpumask)
1301                                 load -= PPQ * level * usched_dfly_weight1 / 10;
1302                         else if (cpun->members & dd2->cpumask)
1303                                 load -= PPQ * level * usched_dfly_weight2 / 10;
1304
1305                         /*
1306                          * Calculate the best load
1307                          */
1308                         if (cpub == NULL || lowest_load > load ||
1309                             (lowest_load == load &&
1310                              (cpun->members & dd1->cpumask))
1311                         ) {
1312                                 lowest_load = load;
1313                                 cpub = cpun;
1314                         }
1315                 }
1316                 cpup = cpub;
1317                 --level;
1318         }
1319         if (usched_dfly_chooser)
1320                 kprintf("lp %02d->%02d %s\n",
1321                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1322         return (rdd);
1323 }
1324
1325 /*
1326  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1327  *
1328  * Choose the worst queue close to dd's cpu node with a non-empty runq.
1329  *
1330  * This is used by the thread chooser when the current cpu's queues are
1331  * empty to steal a thread from another cpu's queue.  We want to offload
1332  * the most heavily-loaded queue.
1333  */
1334 static
1335 dfly_pcpu_t
1336 dfly_choose_worst_queue(dfly_pcpu_t dd)
1337 {
1338         cpumask_t mask;
1339         cpu_node_t *cpup;
1340         cpu_node_t *cpun;
1341         cpu_node_t *cpub;
1342         dfly_pcpu_t rdd;
1343         int cpuid;
1344         int n;
1345         int load;
1346         int highest_load;
1347         int uloadok;
1348         int level;
1349
1350         /*
1351          * When the topology is unknown choose a random cpu that is hopefully
1352          * idle.
1353          */
1354         if (dd->cpunode == NULL) {
1355                 return (NULL);
1356         }
1357
1358         /*
1359          * When the topology is known choose a cpu whos group has, in
1360          * aggregate, has the lowest weighted load.
1361          */
1362         cpup = root_cpu_node;
1363         rdd = dd;
1364         level = cpu_topology_levels_number;
1365         while (cpup) {
1366                 /*
1367                  * Degenerate case super-root
1368                  */
1369                 if (cpup->child_node && cpup->child_no == 1) {
1370                         cpup = cpup->child_node;
1371                         --level;
1372                         continue;
1373                 }
1374
1375                 /*
1376                  * Terminal cpunode
1377                  */
1378                 if (cpup->child_node == NULL) {
1379                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1380                         break;
1381                 }
1382
1383                 cpub = NULL;
1384                 highest_load = 0;
1385
1386                 for (n = 0; n < cpup->child_no; ++n) {
1387                         /*
1388                          * Accumulate load information for all cpus
1389                          * which are members of this node.
1390                          */
1391                         cpun = &cpup->child_node[n];
1392                         mask = cpun->members & usched_global_cpumask &
1393                                smp_active_mask;
1394                         if (mask == 0)
1395                                 continue;
1396                         load = 0;
1397                         uloadok = 0;
1398                         while (mask) {
1399                                 cpuid = BSFCPUMASK(mask);
1400                                 load += dfly_pcpu[cpuid].uload;
1401                                 if (dfly_pcpu[cpuid].uload)
1402                                         uloadok = 1;
1403                                 mask &= ~CPUMASK(cpuid);
1404                         }
1405
1406                         /*
1407                          * Give a slight advantage to nearby cpus.
1408                          */
1409                         if (cpun->members & dd->cpumask)
1410                                 load += PPQ * level;
1411
1412                         /*
1413                          * The best candidate is the one with the worst
1414                          * (highest) load.  Prefer candiates that are
1415                          * closer to our cpu.
1416                          */
1417                         if (uloadok &&
1418                             (cpub == NULL || highest_load < load ||
1419                              (highest_load == load &&
1420                               (cpun->members & dd->cpumask)))
1421                         ) {
1422                                 highest_load = load;
1423                                 cpub = cpun;
1424                         }
1425                 }
1426                 cpup = cpub;
1427                 --level;
1428         }
1429         return (rdd);
1430 }
1431
1432 static
1433 dfly_pcpu_t
1434 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1435 {
1436         dfly_pcpu_t rdd;
1437         cpumask_t tmpmask;
1438         cpumask_t mask;
1439         int cpuid;
1440
1441         /*
1442          * Fallback to the original heuristic, select random cpu,
1443          * first checking cpus not currently running a user thread.
1444          */
1445         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1446         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1447                smp_active_mask & usched_global_cpumask;
1448
1449         while (mask) {
1450                 tmpmask = ~(CPUMASK(cpuid) - 1);
1451                 if (mask & tmpmask)
1452                         cpuid = BSFCPUMASK(mask & tmpmask);
1453                 else
1454                         cpuid = BSFCPUMASK(mask);
1455                 rdd = &dfly_pcpu[cpuid];
1456
1457                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1458                         goto found;
1459                 mask &= ~CPUMASK(cpuid);
1460         }
1461
1462         /*
1463          * Then cpus which might have a currently running lp
1464          */
1465         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1466         mask = dfly_curprocmask & dfly_rdyprocmask &
1467                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1468
1469         while (mask) {
1470                 tmpmask = ~(CPUMASK(cpuid) - 1);
1471                 if (mask & tmpmask)
1472                         cpuid = BSFCPUMASK(mask & tmpmask);
1473                 else
1474                         cpuid = BSFCPUMASK(mask);
1475                 rdd = &dfly_pcpu[cpuid];
1476
1477                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1478                         goto found;
1479                 mask &= ~CPUMASK(cpuid);
1480         }
1481
1482         /*
1483          * If we cannot find a suitable cpu we reload from dfly_scancpu
1484          * and round-robin.  Other cpus will pickup as they release their
1485          * current lwps or become ready.
1486          *
1487          * Avoid a degenerate system lockup case if usched_global_cpumask
1488          * is set to 0 or otherwise does not cover lwp_cpumask.
1489          *
1490          * We only kick the target helper thread in this case, we do not
1491          * set the user resched flag because
1492          */
1493         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1494         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1495                 cpuid = 0;
1496         rdd = &dfly_pcpu[cpuid];
1497 found:
1498         return (rdd);
1499 }
1500
1501 static
1502 void
1503 dfly_need_user_resched_remote(void *dummy)
1504 {
1505         globaldata_t gd = mycpu;
1506         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1507
1508         need_user_resched();
1509
1510         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1511         wakeup_mycpu(&dd->helper_thread);
1512 }
1513
1514 #endif
1515
1516 /*
1517  * dfly_remrunqueue_locked() removes a given process from the run queue
1518  * that it is on, clearing the queue busy bit if it becomes empty.
1519  *
1520  * Note that user process scheduler is different from the LWKT schedule.
1521  * The user process scheduler only manages user processes but it uses LWKT
1522  * underneath, and a user process operating in the kernel will often be
1523  * 'released' from our management.
1524  *
1525  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1526  * to sleep or the lwp is moved to a different runq.
1527  */
1528 static void
1529 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1530 {
1531         struct rq *q;
1532         u_int32_t *which;
1533         u_int8_t pri;
1534
1535         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1536         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1537         --rdd->runqcount;
1538         /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
1539         KKASSERT(rdd->runqcount >= 0);
1540
1541         pri = lp->lwp_rqindex;
1542         switch(lp->lwp_rqtype) {
1543         case RTP_PRIO_NORMAL:
1544                 q = &rdd->queues[pri];
1545                 which = &rdd->queuebits;
1546                 break;
1547         case RTP_PRIO_REALTIME:
1548         case RTP_PRIO_FIFO:
1549                 q = &rdd->rtqueues[pri];
1550                 which = &rdd->rtqueuebits;
1551                 break;
1552         case RTP_PRIO_IDLE:
1553                 q = &rdd->idqueues[pri];
1554                 which = &rdd->idqueuebits;
1555                 break;
1556         default:
1557                 panic("remrunqueue: invalid rtprio type");
1558                 /* NOT REACHED */
1559         }
1560         TAILQ_REMOVE(q, lp, lwp_procq);
1561         if (TAILQ_EMPTY(q)) {
1562                 KASSERT((*which & (1 << pri)) != 0,
1563                         ("remrunqueue: remove from empty queue"));
1564                 *which &= ~(1 << pri);
1565         }
1566 }
1567
1568 /*
1569  * dfly_setrunqueue_locked()
1570  *
1571  * Add a process whos rqtype and rqindex had previously been calculated
1572  * onto the appropriate run queue.   Determine if the addition requires
1573  * a reschedule on a cpu and return the cpuid or -1.
1574  *
1575  * NOTE:          Lower priorities are better priorities.
1576  *
1577  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1578  *                sum of the rough lwp_priority for all running and runnable
1579  *                processes.  Lower priority processes (higher lwp_priority
1580  *                values) actually DO count as more load, not less, because
1581  *                these are the programs which require the most care with
1582  *                regards to cpu selection.
1583  */
1584 static void
1585 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1586 {
1587         struct rq *q;
1588         u_int32_t *which;
1589         int pri;
1590
1591         if (lp->lwp_qcpu != rdd->cpuid) {
1592                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1593                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1594                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1595                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1596                 }
1597                 lp->lwp_qcpu = rdd->cpuid;
1598         }
1599
1600         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1601         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1602         ++rdd->runqcount;
1603         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1604                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1605                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1606                                (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1607         }
1608
1609         pri = lp->lwp_rqindex;
1610
1611         switch(lp->lwp_rqtype) {
1612         case RTP_PRIO_NORMAL:
1613                 q = &rdd->queues[pri];
1614                 which = &rdd->queuebits;
1615                 break;
1616         case RTP_PRIO_REALTIME:
1617         case RTP_PRIO_FIFO:
1618                 q = &rdd->rtqueues[pri];
1619                 which = &rdd->rtqueuebits;
1620                 break;
1621         case RTP_PRIO_IDLE:
1622                 q = &rdd->idqueues[pri];
1623                 which = &rdd->idqueuebits;
1624                 break;
1625         default:
1626                 panic("remrunqueue: invalid rtprio type");
1627                 /* NOT REACHED */
1628         }
1629
1630         /*
1631          * Add to the correct queue and set the appropriate bit.  If no
1632          * lower priority (i.e. better) processes are in the queue then
1633          * we want a reschedule, calculate the best cpu for the job.
1634          *
1635          * Always run reschedules on the LWPs original cpu.
1636          */
1637         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1638         *which |= 1 << pri;
1639 }
1640
1641 #ifdef SMP
1642
1643 /*
1644  * For SMP systems a user scheduler helper thread is created for each
1645  * cpu and is used to allow one cpu to wakeup another for the purposes of
1646  * scheduling userland threads from setrunqueue().
1647  *
1648  * UP systems do not need the helper since there is only one cpu.
1649  *
1650  * We can't use the idle thread for this because we might block.
1651  * Additionally, doing things this way allows us to HLT idle cpus
1652  * on MP systems.
1653  */
1654 static void
1655 dfly_helper_thread(void *dummy)
1656 {
1657     globaldata_t gd;
1658     dfly_pcpu_t  dd;
1659     struct lwp *nlp;
1660     cpumask_t mask;
1661     int cpuid;
1662
1663     gd = mycpu;
1664     cpuid = gd->gd_cpuid;       /* doesn't change */
1665     mask = gd->gd_cpumask;      /* doesn't change */
1666     dd = &dfly_pcpu[cpuid];
1667
1668     /*
1669      * Since we only want to be woken up only when no user processes
1670      * are scheduled on a cpu, run at an ultra low priority.
1671      */
1672     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1673
1674     tsleep(&dd->helper_thread, 0, "schslp", 0);
1675
1676     for (;;) {
1677         /*
1678          * We use the LWKT deschedule-interlock trick to avoid racing
1679          * dfly_rdyprocmask.  This means we cannot block through to the
1680          * manual lwkt_switch() call we make below.
1681          */
1682         crit_enter_gd(gd);
1683         tsleep_interlock(&dd->helper_thread, 0);
1684
1685         /*spin_lock(&dfly_spin);*/
1686         spin_lock(&dd->spin);
1687
1688         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1689         clear_user_resched();   /* This satisfied the reschedule request */
1690         dd->rrcount = 0;        /* Reset the round-robin counter */
1691
1692         if ((dfly_curprocmask & mask) == 0) {
1693                 /*
1694                  * No thread is currently scheduled.
1695                  */
1696                 KKASSERT(dd->uschedcp == NULL);
1697                 if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
1698                         KTR_COND_LOG(usched_sched_thread_no_process,
1699                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1700                             gd->gd_cpuid,
1701                             nlp->lwp_proc->p_pid,
1702                             nlp->lwp_thread->td_gd->gd_cpuid);
1703
1704                         atomic_set_cpumask(&dfly_curprocmask, mask);
1705                         dd->upri = nlp->lwp_priority;
1706                         dd->uschedcp = nlp;
1707                         dd->rrcount = 0;        /* reset round robin */
1708                         spin_unlock(&dd->spin);
1709                         /*spin_unlock(&dfly_spin);*/
1710                         lwkt_acquire(nlp->lwp_thread);
1711                         lwkt_schedule(nlp->lwp_thread);
1712                 } else {
1713                         spin_unlock(&dd->spin);
1714                         /*spin_unlock(&dfly_spin);*/
1715                 }
1716         } else if (dd->runqcount) {
1717                 /*
1718                  * Possibly find a better process to schedule.
1719                  */
1720                 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
1721                 if (nlp) {
1722                         KTR_COND_LOG(usched_sched_thread_process,
1723                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1724                             gd->gd_cpuid,
1725                             nlp->lwp_proc->p_pid,
1726                             nlp->lwp_thread->td_gd->gd_cpuid);
1727
1728                         dd->upri = nlp->lwp_priority;
1729                         dd->uschedcp = nlp;
1730                         dd->rrcount = 0;        /* reset round robin */
1731                         spin_unlock(&dd->spin);
1732                         /*spin_unlock(&dfly_spin);*/
1733                         lwkt_acquire(nlp->lwp_thread);
1734                         lwkt_schedule(nlp->lwp_thread);
1735                 } else {
1736                         /*
1737                          * Leave the thread on our run queue.  Another
1738                          * scheduler will try to pull it later.
1739                          */
1740                         spin_unlock(&dd->spin);
1741                         /*spin_unlock(&dfly_spin);*/
1742                 }
1743         } else {
1744                 /*
1745                  * The runq is empty.
1746                  */
1747                 spin_unlock(&dd->spin);
1748                 /*spin_unlock(&dfly_spin);*/
1749         }
1750
1751         /*
1752          * We're descheduled unless someone scheduled us.  Switch away.
1753          * Exiting the critical section will cause splz() to be called
1754          * for us if interrupts and such are pending.
1755          */
1756         crit_exit_gd(gd);
1757         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1758     }
1759 }
1760
1761 /* sysctl stick_to_level parameter */
1762 static int
1763 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
1764 {
1765         int error, new_val;
1766
1767         new_val = usched_dfly_stick_to_level;
1768
1769         error = sysctl_handle_int(oidp, &new_val, 0, req);
1770         if (error != 0 || req->newptr == NULL)
1771                 return (error);
1772         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1773                 return (EINVAL);
1774         usched_dfly_stick_to_level = new_val;
1775         return (0);
1776 }
1777
1778 /*
1779  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1780  * been cleared by rqinit() and we should not mess with it further.
1781  */
1782 static void
1783 dfly_helper_thread_cpu_init(void)
1784 {
1785         int i;
1786         int j;
1787         int cpuid;
1788         int smt_not_supported = 0;
1789         int cache_coherent_not_supported = 0;
1790
1791         if (bootverbose)
1792                 kprintf("Start scheduler helpers on cpus:\n");
1793
1794         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1795         usched_dfly_sysctl_tree =
1796                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1797                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1798                                 "usched_dfly", CTLFLAG_RD, 0, "");
1799
1800         for (i = 0; i < ncpus; ++i) {
1801                 dfly_pcpu_t dd = &dfly_pcpu[i];
1802                 cpumask_t mask = CPUMASK(i);
1803
1804                 if ((mask & smp_active_mask) == 0)
1805                     continue;
1806
1807                 spin_init(&dd->spin);
1808                 dd->cpunode = get_cpu_node_by_cpuid(i);
1809                 dd->cpuid = i;
1810                 dd->cpumask = CPUMASK(i);
1811                 for (j = 0; j < NQS; j++) {
1812                         TAILQ_INIT(&dd->queues[j]);
1813                         TAILQ_INIT(&dd->rtqueues[j]);
1814                         TAILQ_INIT(&dd->idqueues[j]);
1815                 }
1816                 atomic_clear_cpumask(&dfly_curprocmask, 1);
1817
1818                 if (dd->cpunode == NULL) {
1819                         smt_not_supported = 1;
1820                         cache_coherent_not_supported = 1;
1821                         if (bootverbose)
1822                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1823                                          "found for cpu\n", i);
1824                 } else {
1825                         switch (dd->cpunode->type) {
1826                         case THREAD_LEVEL:
1827                                 if (bootverbose)
1828                                         kprintf ("\tcpu%d - HyperThreading "
1829                                                  "available. Core siblings: ",
1830                                                  i);
1831                                 break;
1832                         case CORE_LEVEL:
1833                                 smt_not_supported = 1;
1834
1835                                 if (bootverbose)
1836                                         kprintf ("\tcpu%d - No HT available, "
1837                                                  "multi-core/physical "
1838                                                  "cpu. Physical siblings: ",
1839                                                  i);
1840                                 break;
1841                         case CHIP_LEVEL:
1842                                 smt_not_supported = 1;
1843
1844                                 if (bootverbose)
1845                                         kprintf ("\tcpu%d - No HT available, "
1846                                                  "single-core/physical cpu. "
1847                                                  "Package Siblings: ",
1848                                                  i);
1849                                 break;
1850                         default:
1851                                 /* Let's go for safe defaults here */
1852                                 smt_not_supported = 1;
1853                                 cache_coherent_not_supported = 1;
1854                                 if (bootverbose)
1855                                         kprintf ("\tcpu%d - Unknown cpunode->"
1856                                                  "type=%u. Siblings: ",
1857                                                  i,
1858                                                  (u_int)dd->cpunode->type);
1859                                 break;
1860                         }
1861
1862                         if (bootverbose) {
1863                                 if (dd->cpunode->parent_node != NULL) {
1864                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1865                                                 kprintf("cpu%d ", cpuid);
1866                                         kprintf("\n");
1867                                 } else {
1868                                         kprintf(" no siblings\n");
1869                                 }
1870                         }
1871                 }
1872
1873                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
1874                             0, i, "usched %d", i);
1875
1876                 /*
1877                  * Allow user scheduling on the target cpu.  cpu #0 has already
1878                  * been enabled in rqinit().
1879                  */
1880                 if (i)
1881                     atomic_clear_cpumask(&dfly_curprocmask, mask);
1882                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1883                 dd->upri = PRIBASE_NULL;
1884
1885         }
1886
1887         /* usched_dfly sysctl configurable parameters */
1888
1889         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1890                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1891                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1892                        &usched_dfly_rrinterval, 0, "");
1893         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1894                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1895                        OID_AUTO, "decay", CTLFLAG_RW,
1896                        &usched_dfly_decay, 0, "Extra decay when not running");
1897         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1898                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1899                        OID_AUTO, "batch_time", CTLFLAG_RW,
1900                        &usched_dfly_batch_time, 0, "Min batch counter value");
1901
1902         /* Add enable/disable option for SMT scheduling if supported */
1903         if (smt_not_supported) {
1904                 usched_dfly_smt = 0;
1905                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1906                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1907                                   OID_AUTO, "smt", CTLFLAG_RD,
1908                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
1909         } else {
1910                 usched_dfly_smt = 1;
1911                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1912                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1913                                OID_AUTO, "smt", CTLFLAG_RW,
1914                                &usched_dfly_smt, 0, "Enable SMT scheduling");
1915         }
1916
1917         /*
1918          * Add enable/disable option for cache coherent scheduling
1919          * if supported
1920          */
1921         if (cache_coherent_not_supported) {
1922                 usched_dfly_cache_coherent = 0;
1923                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1924                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1925                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
1926                                   "NOT SUPPORTED", 0,
1927                                   "Cache coherence NOT SUPPORTED");
1928         } else {
1929                 usched_dfly_cache_coherent = 1;
1930                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1931                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1932                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
1933                                &usched_dfly_cache_coherent, 0,
1934                                "Enable/Disable cache coherent scheduling");
1935
1936                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1937                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1938                                OID_AUTO, "weight1", CTLFLAG_RW,
1939                                &usched_dfly_weight1, 10,
1940                                "Weight selection for current cpu");
1941
1942                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1943                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1944                                OID_AUTO, "weight2", CTLFLAG_RW,
1945                                &usched_dfly_weight2, 5,
1946                                "Weight selection for wakefrom cpu");
1947
1948                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
1949                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1950                                 OID_AUTO, "stick_to_level",
1951                                 CTLTYPE_INT | CTLFLAG_RW,
1952                                 NULL, sizeof usched_dfly_stick_to_level,
1953                                 sysctl_usched_dfly_stick_to_level, "I",
1954                                 "Stick a process to this level. See sysctl"
1955                                 "paremter hw.cpu_topology.level_description");
1956         }
1957 }
1958 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1959         dfly_helper_thread_cpu_init, NULL)
1960
1961 #else /* No SMP options - just add the configurable parameters to sysctl */
1962
1963 static void
1964 sched_sysctl_tree_init(void)
1965 {
1966         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1967         usched_dfly_sysctl_tree =
1968                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1969                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1970                                 "usched_dfly", CTLFLAG_RD, 0, "");
1971
1972         /* usched_dfly sysctl configurable parameters */
1973         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1974                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1975                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1976                        &usched_dfly_rrinterval, 0, "");
1977         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1978                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1979                        OID_AUTO, "decay", CTLFLAG_RW,
1980                        &usched_dfly_decay, 0, "Extra decay when not running");
1981         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1982                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1983                        OID_AUTO, "batch_time", CTLFLAG_RW,
1984                        &usched_dfly_batch_time, 0, "Min batch counter value");
1985 }
1986 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1987         sched_sysctl_tree_init, NULL)
1988 #endif