kernel - usched_dfly revamp
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_batch       lwp_usdata.dfly.batch
97 #define lwp_rqtype      lwp_usdata.dfly.rqtype
98 #define lwp_qcpu        lwp_usdata.dfly.qcpu
99
100 struct usched_dfly_pcpu {
101         struct spinlock spin;
102         struct thread   helper_thread;
103         short           rrcount;
104         short           upri;
105         int             uload;
106         int             ucount;
107         struct lwp      *uschedcp;
108         struct rq       queues[NQS];
109         struct rq       rtqueues[NQS];
110         struct rq       idqueues[NQS];
111         u_int32_t       queuebits;
112         u_int32_t       rtqueuebits;
113         u_int32_t       idqueuebits;
114         int             runqcount;
115         int             cpuid;
116         cpumask_t       cpumask;
117 #ifdef SMP
118         cpu_node_t      *cpunode;
119 #endif
120 };
121
122 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
123
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
130                                 sysclock_t cpstamp);
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
137 #ifdef SMP
138 static void dfly_changeqcpu_locked(struct lwp *lp,
139                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
140 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
141 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
142 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
143 #endif
144
145 #ifdef SMP
146 static void dfly_need_user_resched_remote(void *dummy);
147 #endif
148 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
149                                           struct lwp *chklp, int worst);
150 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
151 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
152
153 struct usched usched_dfly = {
154         { NULL },
155         "dfly", "Original DragonFly Scheduler",
156         NULL,                   /* default registration */
157         NULL,                   /* default deregistration */
158         dfly_acquire_curproc,
159         dfly_release_curproc,
160         dfly_setrunqueue,
161         dfly_schedulerclock,
162         dfly_recalculate_estcpu,
163         dfly_resetpriority,
164         dfly_forking,
165         dfly_exiting,
166         dfly_uload_update,
167         NULL,                   /* setcpumask not supported */
168         dfly_yield
169 };
170
171 /*
172  * We have NQS (32) run queues per scheduling class.  For the normal
173  * class, there are 128 priorities scaled onto these 32 queues.  New
174  * processes are added to the last entry in each queue, and processes
175  * are selected for running by taking them from the head and maintaining
176  * a simple FIFO arrangement.  Realtime and Idle priority processes have
177  * and explicit 0-31 priority which maps directly onto their class queue
178  * index.  When a queue has something in it, the corresponding bit is
179  * set in the queuebits variable, allowing a single read to determine
180  * the state of all 32 queues and then a ffs() to find the first busy
181  * queue.
182  */
183 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
184 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
185 #ifdef SMP
186 static volatile int dfly_scancpu;
187 #endif
188 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
189 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
190 static struct sysctl_oid *usched_dfly_sysctl_tree;
191
192 /* Debug info exposed through debug.* sysctl */
193
194 static int usched_dfly_debug = -1;
195 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
196            &usched_dfly_debug, 0,
197            "Print debug information for this pid");
198
199 static int usched_dfly_pid_debug = -1;
200 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
201            &usched_dfly_pid_debug, 0,
202            "Print KTR debug information for this pid");
203
204 static int usched_dfly_chooser = 0;
205 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
206            &usched_dfly_chooser, 0,
207            "Print KTR debug information for this pid");
208
209 /*
210  * Tunning usched_dfly - configurable through kern.usched_dfly.
211  *
212  * weight1 - Tries to keep threads on their current cpu.  If you
213  *           make this value too large the scheduler will not be
214  *           able to load-balance large loads.
215  *
216  * weight2 - If non-zero, detects thread pairs undergoing synchronous
217  *           communications and tries to move them closer together.
218  *           Behavior is adjusted by bit 4 of features (0x10).
219  *
220  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
221  *           a small value is recommended.
222  *
223  * weight3 - Weighting based on the number of recently runnable threads
224  *           on the userland scheduling queue (ignoring their loads).
225  *           A nominal value here prevents high-priority (low-load)
226  *           threads from accumulating on one cpu core when other
227  *           cores are available.
228  *
229  *           This value should be left fairly small relative to weight1
230  *           and weight4.
231  *
232  * weight4 - Weighting based on other cpu queues being available
233  *           or running processes with higher lwp_priority's.
234  *
235  *           This allows a thread to migrate to another nearby cpu if it
236  *           is unable to run on the current cpu based on the other cpu
237  *           being idle or running a lower priority (higher lwp_priority)
238  *           thread.  This value should be large enough to override weight1
239  *
240  * features - These flags can be set or cleared to enable or disable various
241  *            features.
242  *
243  *            0x01      Enable idle-cpu pulling                 (default)
244  *            0x02      Enable proactive pushing                (default)
245  *            0x04      Enable rebalancing rover                (default)
246  *            0x08      Enable more proactive pushing           (default)
247  *            0x10      (flip weight2 limit on same cpu)        (default)
248  *            0x20      choose best cpu for forked process
249  *            0x40      choose current cpu for forked process
250  *            0x80      choose random cpu for forked process    (default)
251  */
252 #ifdef SMP
253 static int usched_dfly_smt = 0;
254 static int usched_dfly_cache_coherent = 0;
255 static int usched_dfly_weight1 = 30;    /* keep thread on current cpu */
256 static int usched_dfly_weight2 = 15;    /* synchronous peer's current cpu */
257 static int usched_dfly_weight3 = 10;    /* number of threads on queue */
258 static int usched_dfly_weight4 = 50;    /* availability of idle cores */
259 static int usched_dfly_features = 0x8F; /* allow pulls */
260 #endif
261 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
262 static int usched_dfly_decay = 8;
263 static int usched_dfly_batch_time = 10;
264
265 /* KTR debug printings */
266
267 KTR_INFO_MASTER(usched);
268
269 #if !defined(KTR_USCHED_DFLY)
270 #define KTR_USCHED_DFLY KTR_ALL
271 #endif
272
273 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
274     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
275     pid_t pid, int old_cpuid, int curr);
276
277 /*
278  * This function is called when the kernel intends to return to userland.
279  * It is responsible for making the thread the current designated userland
280  * thread for this cpu, blocking if necessary.
281  *
282  * The kernel has already depressed our LWKT priority so we must not switch
283  * until we have either assigned or disposed of the thread.
284  *
285  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
286  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
287  * occur, this function is called only under very controlled circumstances.
288  */
289 static void
290 dfly_acquire_curproc(struct lwp *lp)
291 {
292         globaldata_t gd;
293         dfly_pcpu_t dd;
294 #ifdef SMP
295         dfly_pcpu_t rdd;
296 #endif
297         thread_t td;
298         int force_resched;
299
300         /*
301          * Make sure we aren't sitting on a tsleep queue.
302          */
303         td = lp->lwp_thread;
304         crit_enter_quick(td);
305         if (td->td_flags & TDF_TSLEEPQ)
306                 tsleep_remove(td);
307         dfly_recalculate_estcpu(lp);
308
309         gd = mycpu;
310         dd = &dfly_pcpu[gd->gd_cpuid];
311
312         /*
313          * Process any pending interrupts/ipi's, then handle reschedule
314          * requests.  dfly_release_curproc() will try to assign a new
315          * uschedcp that isn't us and otherwise NULL it out.
316          */
317         force_resched = 0;
318         if (user_resched_wanted()) {
319                 if (dd->uschedcp == lp)
320                         force_resched = 1;
321                 clear_user_resched();
322                 dfly_release_curproc(lp);
323         }
324
325         /*
326          * Loop until we are the current user thread.
327          *
328          * NOTE: dd spinlock not held at top of loop.
329          */
330         if (dd->uschedcp == lp)
331                 lwkt_yield_quick();
332
333         while (dd->uschedcp != lp) {
334                 lwkt_yield_quick();
335
336                 spin_lock(&dd->spin);
337
338                 /*
339                  * We are not or are no longer the current lwp and a forced
340                  * reschedule was requested.  Figure out the best cpu to
341                  * run on (our current cpu will be given significant weight).
342                  *
343                  * (if a reschedule was not requested we want to move this
344                  *  step after the uschedcp tests).
345                  */
346 #ifdef SMP
347                 if (force_resched &&
348                     (usched_dfly_features & 0x08) &&
349                     (rdd = dfly_choose_best_queue(lp)) != dd) {
350                         dfly_changeqcpu_locked(lp, dd, rdd);
351                         spin_unlock(&dd->spin);
352                         lwkt_deschedule(lp->lwp_thread);
353                         dfly_setrunqueue_dd(rdd, lp);
354                         lwkt_switch();
355                         gd = mycpu;
356                         dd = &dfly_pcpu[gd->gd_cpuid];
357                         continue;
358                 }
359 #endif
360
361                 /*
362                  * Either no reschedule was requested or the best queue was
363                  * dd, and no current process has been selected.  We can
364                  * trivially become the current lwp on the current cpu.
365                  */
366                 if (dd->uschedcp == NULL) {
367                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
368                         dd->uschedcp = lp;
369                         dd->upri = lp->lwp_priority;
370                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
371                         spin_unlock(&dd->spin);
372                         break;
373                 }
374
375                 /*
376                  * Can we steal the current designated user thread?
377                  *
378                  * If we do the other thread will stall when it tries to
379                  * return to userland, possibly rescheduling elsewhere.
380                  *
381                  * It is important to do a masked test to avoid the edge
382                  * case where two near-equal-priority threads are constantly
383                  * interrupting each other.
384                  */
385                 if (dd->uschedcp &&
386                    (dd->upri & ~PPQMASK) >
387                    (lp->lwp_priority & ~PPQMASK)) {
388                         dd->uschedcp = lp;
389                         dd->upri = lp->lwp_priority;
390                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
391                         spin_unlock(&dd->spin);
392                         break;
393                 }
394
395 #ifdef SMP
396                 /*
397                  * We are not the current lwp, figure out the best cpu
398                  * to run on (our current cpu will be given significant
399                  * weight).  Loop on cpu change.
400                  */
401                 if ((usched_dfly_features & 0x02) &&
402                     force_resched == 0 &&
403                     (rdd = dfly_choose_best_queue(lp)) != dd) {
404                         dfly_changeqcpu_locked(lp, dd, rdd);
405                         spin_unlock(&dd->spin);
406                         lwkt_deschedule(lp->lwp_thread);
407                         dfly_setrunqueue_dd(rdd, lp);
408                         lwkt_switch();
409                         gd = mycpu;
410                         dd = &dfly_pcpu[gd->gd_cpuid];
411                         continue;
412                 }
413 #endif
414
415                 /*
416                  * We cannot become the current lwp, place the lp on the
417                  * run-queue of this or another cpu and deschedule ourselves.
418                  *
419                  * When we are reactivated we will have another chance.
420                  *
421                  * Reload after a switch or setrunqueue/switch possibly
422                  * moved us to another cpu.
423                  */
424                 spin_unlock(&dd->spin);
425                 lwkt_deschedule(lp->lwp_thread);
426                 dfly_setrunqueue_dd(dd, lp);
427                 lwkt_switch();
428                 gd = mycpu;
429                 dd = &dfly_pcpu[gd->gd_cpuid];
430         }
431
432         /*
433          * Make sure upri is synchronized, then yield to LWKT threads as
434          * needed before returning.  This could result in another reschedule.
435          * XXX
436          */
437         crit_exit_quick(td);
438
439         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
440 }
441
442 /*
443  * DFLY_RELEASE_CURPROC
444  *
445  * This routine detaches the current thread from the userland scheduler,
446  * usually because the thread needs to run or block in the kernel (at
447  * kernel priority) for a while.
448  *
449  * This routine is also responsible for selecting a new thread to
450  * make the current thread.
451  *
452  * NOTE: This implementation differs from the dummy example in that
453  * dfly_select_curproc() is able to select the current process, whereas
454  * dummy_select_curproc() is not able to select the current process.
455  * This means we have to NULL out uschedcp.
456  *
457  * Additionally, note that we may already be on a run queue if releasing
458  * via the lwkt_switch() in dfly_setrunqueue().
459  */
460 static void
461 dfly_release_curproc(struct lwp *lp)
462 {
463         globaldata_t gd = mycpu;
464         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
465
466         /*
467          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
468          * by wakeup().
469          */
470         if (dd->uschedcp == lp) {
471                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
472                 spin_lock(&dd->spin);
473                 if (dd->uschedcp == lp) {
474                         dd->uschedcp = NULL;    /* don't let lp be selected */
475                         dd->upri = PRIBASE_NULL;
476                         atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
477                         spin_unlock(&dd->spin);
478                         dfly_select_curproc(gd);
479                 } else {
480                         spin_unlock(&dd->spin);
481                 }
482         }
483 }
484
485 /*
486  * DFLY_SELECT_CURPROC
487  *
488  * Select a new current process for this cpu and clear any pending user
489  * reschedule request.  The cpu currently has no current process.
490  *
491  * This routine is also responsible for equal-priority round-robining,
492  * typically triggered from dfly_schedulerclock().  In our dummy example
493  * all the 'user' threads are LWKT scheduled all at once and we just
494  * call lwkt_switch().
495  *
496  * The calling process is not on the queue and cannot be selected.
497  */
498 static
499 void
500 dfly_select_curproc(globaldata_t gd)
501 {
502         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
503         struct lwp *nlp;
504         int cpuid = gd->gd_cpuid;
505
506         crit_enter_gd(gd);
507
508         spin_lock(&dd->spin);
509         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
510
511         if (nlp) {
512                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
513                 dd->upri = nlp->lwp_priority;
514                 dd->uschedcp = nlp;
515                 dd->rrcount = 0;                /* reset round robin */
516                 spin_unlock(&dd->spin);
517 #ifdef SMP
518                 lwkt_acquire(nlp->lwp_thread);
519 #endif
520                 lwkt_schedule(nlp->lwp_thread);
521         } else {
522                 spin_unlock(&dd->spin);
523         }
524         crit_exit_gd(gd);
525 }
526
527 /*
528  * Place the specified lwp on the user scheduler's run queue.  This routine
529  * must be called with the thread descheduled.  The lwp must be runnable.
530  * It must not be possible for anyone else to explicitly schedule this thread.
531  *
532  * The thread may be the current thread as a special case.
533  */
534 static void
535 dfly_setrunqueue(struct lwp *lp)
536 {
537         dfly_pcpu_t dd;
538         dfly_pcpu_t rdd;
539
540         /*
541          * First validate the process LWKT state.
542          */
543         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
544         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
545             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
546              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
547         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
548
549         /*
550          * NOTE: dd/rdd do not necessarily represent the current cpu.
551          *       Instead they may represent the cpu the thread was last
552          *       scheduled on or inherited by its parent.
553          */
554         dd = &dfly_pcpu[lp->lwp_qcpu];
555         rdd = dd;
556
557         /*
558          * This process is not supposed to be scheduled anywhere or assigned
559          * as the current process anywhere.  Assert the condition.
560          */
561         KKASSERT(rdd->uschedcp != lp);
562
563 #ifndef SMP
564         /*
565          * If we are not SMP we do not have a scheduler helper to kick
566          * and must directly activate the process if none are scheduled.
567          *
568          * This is really only an issue when bootstrapping init since
569          * the caller in all other cases will be a user process, and
570          * even if released (rdd->uschedcp == NULL), that process will
571          * kickstart the scheduler when it returns to user mode from
572          * the kernel.
573          *
574          * NOTE: On SMP we can't just set some other cpu's uschedcp.
575          */
576         if (rdd->uschedcp == NULL) {
577                 spin_lock(&rdd->spin);
578                 if (rdd->uschedcp == NULL) {
579                         atomic_set_cpumask(&dfly_curprocmask, 1);
580                         rdd->uschedcp = lp;
581                         rdd->upri = lp->lwp_priority;
582                         spin_unlock(&rdd->spin);
583                         lwkt_schedule(lp->lwp_thread);
584                         return;
585                 }
586                 spin_unlock(&rdd->spin);
587         }
588 #endif
589
590 #ifdef SMP
591         /*
592          * Ok, we have to setrunqueue some target cpu and request a reschedule
593          * if necessary.
594          *
595          * We have to choose the best target cpu.  It might not be the current
596          * target even if the current cpu has no running user thread (for
597          * example, because the current cpu might be a hyperthread and its
598          * sibling has a thread assigned).
599          *
600          * If we just forked it is most optimal to run the child on the same
601          * cpu just in case the parent decides to wait for it (thus getting
602          * off that cpu).  As long as there is nothing else runnable on the
603          * cpu, that is.  If we did this unconditionally a parent forking
604          * multiple children before waiting (e.g. make -j N) leaves other
605          * cpus idle that could be working.
606          */
607         if (lp->lwp_forked) {
608                 lp->lwp_forked = 0;
609                 if (usched_dfly_features & 0x20)
610                         rdd = dfly_choose_best_queue(lp);
611                 else if (usched_dfly_features & 0x40)
612                         rdd = &dfly_pcpu[lp->lwp_qcpu];
613                 else if (usched_dfly_features & 0x80)
614                         rdd = dfly_choose_queue_simple(rdd, lp);
615                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
616                         rdd = dfly_choose_best_queue(lp);
617                 else
618                         rdd = &dfly_pcpu[lp->lwp_qcpu];
619         } else {
620                 rdd = dfly_choose_best_queue(lp);
621                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
622         }
623         if (lp->lwp_qcpu != rdd->cpuid) {
624                 spin_lock(&dd->spin);
625                 dfly_changeqcpu_locked(lp, dd, rdd);
626                 spin_unlock(&dd->spin);
627         }
628 #endif
629         dfly_setrunqueue_dd(rdd, lp);
630 }
631
632 #ifdef SMP
633
634 /*
635  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
636  * spin-locked on-call.  rdd does not have to be.
637  */
638 static void
639 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
640 {
641         if (lp->lwp_qcpu != rdd->cpuid) {
642                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
643                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
644                         atomic_add_int(&dd->uload,
645                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
646                         atomic_add_int(&dd->ucount, -1);
647                 }
648                 lp->lwp_qcpu = rdd->cpuid;
649         }
650 }
651
652 #endif
653
654 /*
655  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
656  * also performs all necessary ancillary notification actions.
657  */
658 static void
659 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
660 {
661 #ifdef SMP
662         globaldata_t rgd;
663
664         /*
665          * We might be moving the lp to another cpu's run queue, and once
666          * on the runqueue (even if it is our cpu's), another cpu can rip
667          * it away from us.
668          *
669          * TDF_MIGRATING might already be set if this is part of a
670          * remrunqueue+setrunqueue sequence.
671          */
672         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
673                 lwkt_giveaway(lp->lwp_thread);
674
675         rgd = globaldata_find(rdd->cpuid);
676
677         /*
678          * We lose control of the lp the moment we release the spinlock
679          * after having placed it on the queue.  i.e. another cpu could pick
680          * it up, or it could exit, or its priority could be further
681          * adjusted, or something like that.
682          *
683          * WARNING! rdd can point to a foreign cpu!
684          */
685         spin_lock(&rdd->spin);
686         dfly_setrunqueue_locked(rdd, lp);
687
688         if (rgd == mycpu) {
689                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
690                         spin_unlock(&rdd->spin);
691                         if (rdd->uschedcp == NULL) {
692                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
693                                 need_user_resched();
694                         } else {
695                                 need_user_resched();
696                         }
697                 } else {
698                         spin_unlock(&rdd->spin);
699                 }
700         } else {
701                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
702                         spin_unlock(&rdd->spin);
703                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
704                                        NULL);
705                 } else if (dfly_rdyprocmask & rgd->gd_cpumask) {
706                         atomic_clear_cpumask(&dfly_rdyprocmask,
707                                              rgd->gd_cpumask);
708                         spin_unlock(&rdd->spin);
709                         wakeup(&rdd->helper_thread);
710                 } else {
711                         spin_unlock(&rdd->spin);
712                 }
713         }
714 #else
715         /*
716          * Request a reschedule if appropriate.
717          */
718         spin_lock(&rdd->spin);
719         dfly_setrunqueue_locked(rdd, lp);
720         spin_unlock(&rdd->spin);
721         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
722                 need_user_resched();
723         }
724 #endif
725 }
726
727 /*
728  * This routine is called from a systimer IPI.  It MUST be MP-safe and
729  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
730  * each cpu.
731  */
732 static
733 void
734 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
735 {
736         globaldata_t gd = mycpu;
737         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
738
739         /*
740          * Spinlocks also hold a critical section so there should not be
741          * any active.
742          */
743         KKASSERT(gd->gd_spinlocks_wr == 0);
744
745         if (lp == NULL)
746                 return;
747
748         /*
749          * Do we need to round-robin?  We round-robin 10 times a second.
750          * This should only occur for cpu-bound batch processes.
751          */
752         if (++dd->rrcount >= usched_dfly_rrinterval) {
753                 lp->lwp_thread->td_wakefromcpu = -1;
754                 dd->rrcount = 0;
755                 need_user_resched();
756         }
757
758         /*
759          * Adjust estcpu upward using a real time equivalent calculation,
760          * and recalculate lp's priority.
761          */
762         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
763         dfly_resetpriority(lp);
764
765         /*
766          * Rebalance cpus on each scheduler tick.  Each cpu in turn will
767          * calculate the worst queue and, if sufficiently loaded, will
768          * pull a process from that queue into our current queue.
769          *
770          * To try to avoid always moving the same thread. XXX
771          */
772 #ifdef SMP
773         if ((usched_dfly_features & 0x04) &&
774             ((uint16_t)sched_ticks % ncpus) == gd->gd_cpuid) {
775                 /*
776                  * Our cpu is up.
777                  */
778                 struct lwp *nlp;
779                 dfly_pcpu_t rdd;
780
781                 /*
782                  * We have to choose the worst thread in the worst queue
783                  * because it likely finished its batch on that cpu and is
784                  * now waiting for cpu again.
785                  */
786                 rdd = dfly_choose_worst_queue(dd);
787                 if (rdd) {
788                         spin_lock(&dd->spin);
789                         if (spin_trylock(&rdd->spin)) {
790                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
791                                 spin_unlock(&rdd->spin);
792                                 if (nlp == NULL)
793                                         spin_unlock(&dd->spin);
794                         } else {
795                                 spin_unlock(&dd->spin);
796                                 nlp = NULL;
797                         }
798                 } else {
799                         nlp = NULL;
800                 }
801                 /* dd->spin held if nlp != NULL */
802
803                 /*
804                  * Either schedule it or add it to our queue.
805                  */
806                 if (nlp &&
807                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
808                         atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
809                         dd->upri = nlp->lwp_priority;
810                         dd->uschedcp = nlp;
811                         dd->rrcount = 0;        /* reset round robin */
812                         spin_unlock(&dd->spin);
813                         lwkt_acquire(nlp->lwp_thread);
814                         lwkt_schedule(nlp->lwp_thread);
815                 } else if (nlp) {
816                         dfly_setrunqueue_locked(dd, nlp);
817                         spin_unlock(&dd->spin);
818                 }
819         }
820 #endif
821 }
822
823 /*
824  * Called from acquire and from kern_synch's one-second timer (one of the
825  * callout helper threads) with a critical section held.
826  *
827  * Decay p_estcpu based on the number of ticks we haven't been running
828  * and our p_nice.  As the load increases each process observes a larger
829  * number of idle ticks (because other processes are running in them).
830  * This observation leads to a larger correction which tends to make the
831  * system more 'batchy'.
832  *
833  * Note that no recalculation occurs for a process which sleeps and wakes
834  * up in the same tick.  That is, a system doing thousands of context
835  * switches per second will still only do serious estcpu calculations
836  * ESTCPUFREQ times per second.
837  */
838 static
839 void
840 dfly_recalculate_estcpu(struct lwp *lp)
841 {
842         globaldata_t gd = mycpu;
843         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
844         sysclock_t cpbase;
845         sysclock_t ttlticks;
846         int estcpu;
847         int decay_factor;
848
849         /*
850          * We have to subtract periodic to get the last schedclock
851          * timeout time, otherwise we would get the upcoming timeout.
852          * Keep in mind that a process can migrate between cpus and
853          * while the scheduler clock should be very close, boundary
854          * conditions could lead to a small negative delta.
855          */
856         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
857
858         if (lp->lwp_slptime > 1) {
859                 /*
860                  * Too much time has passed, do a coarse correction.
861                  */
862                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
863                 dfly_resetpriority(lp);
864                 lp->lwp_cpbase = cpbase;
865                 lp->lwp_cpticks = 0;
866                 lp->lwp_batch -= ESTCPUFREQ;
867                 if (lp->lwp_batch < 0)
868                         lp->lwp_batch = 0;
869         } else if (lp->lwp_cpbase != cpbase) {
870                 /*
871                  * Adjust estcpu if we are in a different tick.  Don't waste
872                  * time if we are in the same tick.
873                  *
874                  * First calculate the number of ticks in the measurement
875                  * interval.  The ttlticks calculation can wind up 0 due to
876                  * a bug in the handling of lwp_slptime  (as yet not found),
877                  * so make sure we do not get a divide by 0 panic.
878                  */
879                 ttlticks = (cpbase - lp->lwp_cpbase) /
880                            gd->gd_schedclock.periodic;
881                 if (ttlticks < 0) {
882                         ttlticks = 0;
883                         lp->lwp_cpbase = cpbase;
884                 }
885                 if (ttlticks == 0)
886                         return;
887                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
888
889                 /*
890                  * Calculate the percentage of one cpu used factoring in ncpus
891                  * and the load and adjust estcpu.  Handle degenerate cases
892                  * by adding 1 to runqcount.
893                  *
894                  * estcpu is scaled by ESTCPUMAX.
895                  *
896                  * runqcount is the excess number of user processes
897                  * that cannot be immediately scheduled to cpus.  We want
898                  * to count these as running to avoid range compression
899                  * in the base calculation (which is the actual percentage
900                  * of one cpu used).
901                  */
902                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
903                          (dd->runqcount + ncpus) / (ncpus * ttlticks);
904
905                 /*
906                  * If estcpu is > 50% we become more batch-like
907                  * If estcpu is <= 50% we become less batch-like
908                  *
909                  * It takes 30 cpu seconds to traverse the entire range.
910                  */
911                 if (estcpu > ESTCPUMAX / 2) {
912                         lp->lwp_batch += ttlticks;
913                         if (lp->lwp_batch > BATCHMAX)
914                                 lp->lwp_batch = BATCHMAX;
915                 } else {
916                         lp->lwp_batch -= ttlticks;
917                         if (lp->lwp_batch < 0)
918                                 lp->lwp_batch = 0;
919                 }
920
921                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
922                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
923                                 lp->lwp_proc->p_pid, lp,
924                                 estcpu, lp->lwp_estcpu,
925                                 lp->lwp_batch,
926                                 lp->lwp_cpticks, ttlticks);
927                 }
928
929                 /*
930                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
931                  * quickly lwp_estcpu collapses to its realtime calculation.
932                  * A slower collapse gives us a more accurate number but
933                  * can cause a cpu hog to eat too much cpu before the
934                  * scheduler decides to downgrade it.
935                  *
936                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
937                  *       and not here, but we must still ensure that a
938                  *       cpu-bound nice -20 process does not completely
939                  *       override a cpu-bound nice +20 process.
940                  *
941                  * NOTE: We must use ESTCPULIM() here to deal with any
942                  *       overshoot.
943                  */
944                 decay_factor = usched_dfly_decay;
945                 if (decay_factor < 1)
946                         decay_factor = 1;
947                 if (decay_factor > 1024)
948                         decay_factor = 1024;
949
950                 lp->lwp_estcpu = ESTCPULIM(
951                         (lp->lwp_estcpu * decay_factor + estcpu) /
952                         (decay_factor + 1));
953
954                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
955                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
956                 dfly_resetpriority(lp);
957                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
958                 lp->lwp_cpticks = 0;
959         }
960 }
961
962 /*
963  * Compute the priority of a process when running in user mode.
964  * Arrange to reschedule if the resulting priority is better
965  * than that of the current process.
966  *
967  * This routine may be called with any process.
968  *
969  * This routine is called by fork1() for initial setup with the process
970  * of the run queue, and also may be called normally with the process on or
971  * off the run queue.
972  */
973 static void
974 dfly_resetpriority(struct lwp *lp)
975 {
976         dfly_pcpu_t rdd;
977         int newpriority;
978         u_short newrqtype;
979         int rcpu;
980         int checkpri;
981         int estcpu;
982
983         crit_enter();
984
985         /*
986          * Lock the scheduler (lp) belongs to.  This can be on a different
987          * cpu.  Handle races.  This loop breaks out with the appropriate
988          * rdd locked.
989          */
990         for (;;) {
991                 rcpu = lp->lwp_qcpu;
992                 cpu_ccfence();
993                 rdd = &dfly_pcpu[rcpu];
994                 spin_lock(&rdd->spin);
995                 if (rcpu == lp->lwp_qcpu)
996                         break;
997                 spin_unlock(&rdd->spin);
998         }
999
1000         /*
1001          * Calculate the new priority and queue type
1002          */
1003         newrqtype = lp->lwp_rtprio.type;
1004
1005         switch(newrqtype) {
1006         case RTP_PRIO_REALTIME:
1007         case RTP_PRIO_FIFO:
1008                 newpriority = PRIBASE_REALTIME +
1009                              (lp->lwp_rtprio.prio & PRIMASK);
1010                 break;
1011         case RTP_PRIO_NORMAL:
1012                 /*
1013                  * Detune estcpu based on batchiness.  lwp_batch ranges
1014                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
1015                  * the priority calculation to between 50% and 100%.
1016                  */
1017                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
1018                          (BATCHMAX * 2);
1019
1020                 /*
1021                  * p_nice piece         Adds (0-40) * 2         0-80
1022                  * estcpu               Adds 16384  * 4 / 512   0-128
1023                  */
1024                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1025                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1026                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1027                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1028                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1029                 break;
1030         case RTP_PRIO_IDLE:
1031                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1032                 break;
1033         case RTP_PRIO_THREAD:
1034                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1035                 break;
1036         default:
1037                 panic("Bad RTP_PRIO %d", newrqtype);
1038                 /* NOT REACHED */
1039         }
1040
1041         /*
1042          * The newpriority incorporates the queue type so do a simple masked
1043          * check to determine if the process has moved to another queue.  If
1044          * it has, and it is currently on a run queue, then move it.
1045          *
1046          * Since uload is ~PPQMASK masked, no modifications are necessary if
1047          * we end up in the same run queue.
1048          */
1049         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1050                 int delta_uload;
1051
1052                 /*
1053                  * uload can change, calculate the adjustment to reduce
1054                  * edge cases since choosers scan the cpu topology without
1055                  * locks.
1056                  */
1057                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1058                         delta_uload =
1059                                 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
1060                                 ((newpriority & ~PPQMASK) & PRIMASK);
1061                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1062                                        delta_uload);
1063                         /* no change in ucount */
1064                 }
1065                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1066                         dfly_remrunqueue_locked(rdd, lp);
1067                         lp->lwp_priority = newpriority;
1068                         lp->lwp_rqtype = newrqtype;
1069                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1070                         dfly_setrunqueue_locked(rdd, lp);
1071                         checkpri = 1;
1072                 } else {
1073                         lp->lwp_priority = newpriority;
1074                         lp->lwp_rqtype = newrqtype;
1075                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1076                         checkpri = 0;
1077                 }
1078         } else {
1079                 /*
1080                  * In the same PPQ, uload cannot change.
1081                  */
1082                 lp->lwp_priority = newpriority;
1083                 checkpri = 1;
1084                 rcpu = -1;
1085         }
1086
1087         /*
1088          * Determine if we need to reschedule the target cpu.  This only
1089          * occurs if the LWP is already on a scheduler queue, which means
1090          * that idle cpu notification has already occured.  At most we
1091          * need only issue a need_user_resched() on the appropriate cpu.
1092          *
1093          * The LWP may be owned by a CPU different from the current one,
1094          * in which case dd->uschedcp may be modified without an MP lock
1095          * or a spinlock held.  The worst that happens is that the code
1096          * below causes a spurious need_user_resched() on the target CPU
1097          * and dd->pri to be wrong for a short period of time, both of
1098          * which are harmless.
1099          *
1100          * If checkpri is 0 we are adjusting the priority of the current
1101          * process, possibly higher (less desireable), so ignore the upri
1102          * check which will fail in that case.
1103          */
1104         if (rcpu >= 0) {
1105                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1106                     (checkpri == 0 ||
1107                      (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1108 #ifdef SMP
1109                         if (rcpu == mycpu->gd_cpuid) {
1110                                 spin_unlock(&rdd->spin);
1111                                 need_user_resched();
1112                         } else {
1113                                 atomic_clear_cpumask(&dfly_rdyprocmask,
1114                                                      CPUMASK(rcpu));
1115                                 spin_unlock(&rdd->spin);
1116                                 lwkt_send_ipiq(globaldata_find(rcpu),
1117                                                dfly_need_user_resched_remote,
1118                                                NULL);
1119                         }
1120 #else
1121                         spin_unlock(&rdd->spin);
1122                         need_user_resched();
1123 #endif
1124                 } else {
1125                         spin_unlock(&rdd->spin);
1126                 }
1127         } else {
1128                 spin_unlock(&rdd->spin);
1129         }
1130         crit_exit();
1131 }
1132
1133 static
1134 void
1135 dfly_yield(struct lwp *lp)
1136 {
1137 #if 0
1138         /* FUTURE (or something similar) */
1139         switch(lp->lwp_rqtype) {
1140         case RTP_PRIO_NORMAL:
1141                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1142                 break;
1143         default:
1144                 break;
1145         }
1146 #endif
1147         need_user_resched();
1148 }
1149
1150 /*
1151  * Called from fork1() when a new child process is being created.
1152  *
1153  * Give the child process an initial estcpu that is more batch then
1154  * its parent and dock the parent for the fork (but do not
1155  * reschedule the parent).   This comprises the main part of our batch
1156  * detection heuristic for both parallel forking and sequential execs.
1157  *
1158  * XXX lwp should be "spawning" instead of "forking"
1159  */
1160 static void
1161 dfly_forking(struct lwp *plp, struct lwp *lp)
1162 {
1163         /*
1164          * Put the child 4 queue slots (out of 32) higher than the parent
1165          * (less desireable than the parent).
1166          */
1167         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1168         lp->lwp_forked = 1;
1169
1170         /*
1171          * The batch status of children always starts out centerline
1172          * and will inch-up or inch-down as appropriate.  It takes roughly
1173          * ~15 seconds of >50% cpu to hit the limit.
1174          */
1175         lp->lwp_batch = BATCHMAX / 2;
1176
1177         /*
1178          * Dock the parent a cost for the fork, protecting us from fork
1179          * bombs.  If the parent is forking quickly make the child more
1180          * batchy.
1181          */
1182         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1183 }
1184
1185 /*
1186  * Called when a lwp is being removed from this scheduler, typically
1187  * during lwp_exit().  We have to clean out any ULOAD accounting before
1188  * we can let the lp go.  The dd->spin lock is not needed for uload
1189  * updates.
1190  *
1191  * Scheduler dequeueing has already occurred, no further action in that
1192  * regard is needed.
1193  */
1194 static void
1195 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1196 {
1197         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1198
1199         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1200                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1201                 atomic_add_int(&dd->uload,
1202                                -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1203                 atomic_add_int(&dd->ucount, -1);
1204         }
1205 }
1206
1207 /*
1208  * This function cannot block in any way, but spinlocks are ok.
1209  *
1210  * Update the uload based on the state of the thread (whether it is going
1211  * to sleep or running again).  The uload is meant to be a longer-term
1212  * load and not an instantanious load.
1213  */
1214 static void
1215 dfly_uload_update(struct lwp *lp)
1216 {
1217         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1218
1219         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1220                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1221                         spin_lock(&dd->spin);
1222                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1223                                 atomic_set_int(&lp->lwp_mpflags,
1224                                                LWP_MP_ULOAD);
1225                                 atomic_add_int(&dd->uload,
1226                                    ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1227                                 atomic_add_int(&dd->ucount, 1);
1228                         }
1229                         spin_unlock(&dd->spin);
1230                 }
1231         } else if (lp->lwp_slptime > 0) {
1232                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1233                         spin_lock(&dd->spin);
1234                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1235                                 atomic_clear_int(&lp->lwp_mpflags,
1236                                                  LWP_MP_ULOAD);
1237                                 atomic_add_int(&dd->uload,
1238                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1239                                 atomic_add_int(&dd->ucount, -1);
1240                         }
1241                         spin_unlock(&dd->spin);
1242                 }
1243         }
1244 }
1245
1246 /*
1247  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1248  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1249  * has a better or equal priority then the process that would otherwise be
1250  * chosen, NULL is returned.
1251  *
1252  * Until we fix the RUNQ code the chklp test has to be strict or we may
1253  * bounce between processes trying to acquire the current process designation.
1254  *
1255  * Must be called with rdd->spin locked.  The spinlock is left intact through
1256  * the entire routine.  dd->spin does not have to be locked.
1257  *
1258  * If worst is non-zero this function finds the worst thread instead of the
1259  * best thread (used by the schedulerclock-based rover).
1260  */
1261 static
1262 struct lwp *
1263 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1264                        struct lwp *chklp, int worst)
1265 {
1266         struct lwp *lp;
1267         struct rq *q;
1268         u_int32_t *which, *which2;
1269         u_int32_t pri;
1270         u_int32_t rtqbits;
1271         u_int32_t tsqbits;
1272         u_int32_t idqbits;
1273
1274         rtqbits = rdd->rtqueuebits;
1275         tsqbits = rdd->queuebits;
1276         idqbits = rdd->idqueuebits;
1277
1278         if (worst) {
1279                 if (idqbits) {
1280                         pri = bsrl(idqbits);
1281                         q = &rdd->idqueues[pri];
1282                         which = &rdd->idqueuebits;
1283                         which2 = &idqbits;
1284                 } else if (tsqbits) {
1285                         pri = bsrl(tsqbits);
1286                         q = &rdd->queues[pri];
1287                         which = &rdd->queuebits;
1288                         which2 = &tsqbits;
1289                 } else if (rtqbits) {
1290                         pri = bsrl(rtqbits);
1291                         q = &rdd->rtqueues[pri];
1292                         which = &rdd->rtqueuebits;
1293                         which2 = &rtqbits;
1294                 } else {
1295                         return (NULL);
1296                 }
1297                 lp = TAILQ_LAST(q, rq);
1298         } else {
1299                 if (rtqbits) {
1300                         pri = bsfl(rtqbits);
1301                         q = &rdd->rtqueues[pri];
1302                         which = &rdd->rtqueuebits;
1303                         which2 = &rtqbits;
1304                 } else if (tsqbits) {
1305                         pri = bsfl(tsqbits);
1306                         q = &rdd->queues[pri];
1307                         which = &rdd->queuebits;
1308                         which2 = &tsqbits;
1309                 } else if (idqbits) {
1310                         pri = bsfl(idqbits);
1311                         q = &rdd->idqueues[pri];
1312                         which = &rdd->idqueuebits;
1313                         which2 = &idqbits;
1314                 } else {
1315                         return (NULL);
1316                 }
1317                 lp = TAILQ_FIRST(q);
1318         }
1319         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1320
1321         /*
1322          * If the passed lwp <chklp> is reasonably close to the selected
1323          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1324          *
1325          * Note that we must error on the side of <chklp> to avoid bouncing
1326          * between threads in the acquire code.
1327          */
1328         if (chklp) {
1329                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1330                         return(NULL);
1331         }
1332
1333         KTR_COND_LOG(usched_chooseproc,
1334             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1335             lp->lwp_proc->p_pid,
1336             lp->lwp_thread->td_gd->gd_cpuid,
1337             mycpu->gd_cpuid);
1338
1339         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1340         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1341         TAILQ_REMOVE(q, lp, lwp_procq);
1342         --rdd->runqcount;
1343         if (TAILQ_EMPTY(q))
1344                 *which &= ~(1 << pri);
1345
1346         /*
1347          * If we are choosing a process from rdd with the intent to
1348          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1349          * is still held.
1350          */
1351         if (rdd != dd) {
1352                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1353                         atomic_add_int(&rdd->uload,
1354                             -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1355                         atomic_add_int(&rdd->ucount, -1);
1356                 }
1357                 lp->lwp_qcpu = dd->cpuid;
1358                 atomic_add_int(&dd->uload,
1359                     ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1360                 atomic_add_int(&dd->ucount, 1);
1361                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1362         }
1363         return lp;
1364 }
1365
1366 #ifdef SMP
1367
1368 /*
1369  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1370  *
1371  * Choose a cpu node to schedule lp on, hopefully nearby its current
1372  * node.
1373  *
1374  * We give the current node a modest advantage for obvious reasons.
1375  *
1376  * We also give the node the thread was woken up FROM a slight advantage
1377  * in order to try to schedule paired threads which synchronize/block waiting
1378  * for each other fairly close to each other.  Similarly in a network setting
1379  * this feature will also attempt to place a user process near the kernel
1380  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1381  * algorithm as it heuristically groups synchronizing processes for locality
1382  * of reference in multi-socket systems.
1383  *
1384  * We check against running processes and give a big advantage if there
1385  * are none running.
1386  *
1387  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1388  *
1389  * When the topology is known choose a cpu whos group has, in aggregate,
1390  * has the lowest weighted load.
1391  */
1392 static
1393 dfly_pcpu_t
1394 dfly_choose_best_queue(struct lwp *lp)
1395 {
1396         cpumask_t wakemask;
1397         cpumask_t mask;
1398         cpu_node_t *cpup;
1399         cpu_node_t *cpun;
1400         cpu_node_t *cpub;
1401         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1402         dfly_pcpu_t rdd;
1403         int wakecpu;
1404         int cpuid;
1405         int n;
1406         int count;
1407         int load;
1408         int lowest_load;
1409
1410         /*
1411          * When the topology is unknown choose a random cpu that is hopefully
1412          * idle.
1413          */
1414         if (dd->cpunode == NULL)
1415                 return (dfly_choose_queue_simple(dd, lp));
1416
1417         /*
1418          * Pairing mask
1419          */
1420         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1421                 wakemask = dfly_pcpu[wakecpu].cpumask;
1422         else
1423                 wakemask = 0;
1424
1425         /*
1426          * When the topology is known choose a cpu whos group has, in
1427          * aggregate, has the lowest weighted load.
1428          */
1429         cpup = root_cpu_node;
1430         rdd = dd;
1431
1432         while (cpup) {
1433                 /*
1434                  * Degenerate case super-root
1435                  */
1436                 if (cpup->child_node && cpup->child_no == 1) {
1437                         cpup = cpup->child_node;
1438                         continue;
1439                 }
1440
1441                 /*
1442                  * Terminal cpunode
1443                  */
1444                 if (cpup->child_node == NULL) {
1445                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1446                         break;
1447                 }
1448
1449                 cpub = NULL;
1450                 lowest_load = 0x7FFFFFFF;
1451
1452                 for (n = 0; n < cpup->child_no; ++n) {
1453                         /*
1454                          * Accumulate load information for all cpus
1455                          * which are members of this node.
1456                          */
1457                         cpun = &cpup->child_node[n];
1458                         mask = cpun->members & usched_global_cpumask &
1459                                smp_active_mask & lp->lwp_cpumask;
1460                         if (mask == 0)
1461                                 continue;
1462
1463                         count = 0;
1464                         load = 0;
1465
1466                         while (mask) {
1467                                 cpuid = BSFCPUMASK(mask);
1468                                 rdd = &dfly_pcpu[cpuid];
1469                                 load += rdd->uload;
1470                                 load += rdd->ucount * usched_dfly_weight3;
1471
1472                                 if (rdd->uschedcp == NULL &&
1473                                     rdd->runqcount == 0) {
1474                                         load -= usched_dfly_weight4;
1475                                 } else if (rdd->upri > lp->lwp_priority + PPQ) {
1476                                         load -= usched_dfly_weight4 / 2;
1477                                 }
1478                                 mask &= ~CPUMASK(cpuid);
1479                                 ++count;
1480                         }
1481
1482                         /*
1483                          * Compensate if the lp is already accounted for in
1484                          * the aggregate uload for this mask set.  We want
1485                          * to calculate the loads as if lp were not present,
1486                          * otherwise the calculation is bogus.
1487                          */
1488                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1489                             (dd->cpumask & cpun->members)) {
1490                                 load -= ((lp->lwp_priority & ~PPQMASK) &
1491                                          PRIMASK);
1492                                 load -= usched_dfly_weight3;
1493                         }
1494
1495                         load /= count;
1496
1497                         /*
1498                          * Advantage the cpu group (lp) is already on.
1499                          */
1500                         if (cpun->members & dd->cpumask)
1501                                 load -= usched_dfly_weight1;
1502
1503                         /*
1504                          * Advantage the cpu group we want to pair (lp) to,
1505                          * but don't let it go to the exact same cpu as
1506                          * the wakecpu target.
1507                          *
1508                          * We do this by checking whether cpun is a
1509                          * terminal node or not.  All cpun's at the same
1510                          * level will either all be terminal or all not
1511                          * terminal.
1512                          *
1513                          * If it is and we match we disadvantage the load.
1514                          * If it is and we don't match we advantage the load.
1515                          *
1516                          * Also note that we are effectively disadvantaging
1517                          * all-but-one by the same amount, so it won't effect
1518                          * the weight1 factor for the all-but-one nodes.
1519                          */
1520                         if (cpun->members & wakemask) {
1521                                 if (cpun->child_node != NULL) {
1522                                         /* advantage */
1523                                         load -= usched_dfly_weight2;
1524                                 } else {
1525                                         if (usched_dfly_features & 0x10)
1526                                                 load += usched_dfly_weight2;
1527                                         else
1528                                                 load -= usched_dfly_weight2;
1529                                 }
1530                         }
1531
1532                         /*
1533                          * Calculate the best load
1534                          */
1535                         if (cpub == NULL || lowest_load > load ||
1536                             (lowest_load == load &&
1537                              (cpun->members & dd->cpumask))
1538                         ) {
1539                                 lowest_load = load;
1540                                 cpub = cpun;
1541                         }
1542                 }
1543                 cpup = cpub;
1544         }
1545         if (usched_dfly_chooser)
1546                 kprintf("lp %02d->%02d %s\n",
1547                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1548         return (rdd);
1549 }
1550
1551 /*
1552  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1553  *
1554  * Choose the worst queue close to dd's cpu node with a non-empty runq
1555  * that is NOT dd.  Also require that the moving of the highest-load thread
1556  * from rdd to dd does not cause the uload's to cross each other.
1557  *
1558  * This is used by the thread chooser when the current cpu's queues are
1559  * empty to steal a thread from another cpu's queue.  We want to offload
1560  * the most heavily-loaded queue.
1561  */
1562 static
1563 dfly_pcpu_t
1564 dfly_choose_worst_queue(dfly_pcpu_t dd)
1565 {
1566         cpumask_t mask;
1567         cpu_node_t *cpup;
1568         cpu_node_t *cpun;
1569         cpu_node_t *cpub;
1570         dfly_pcpu_t rdd;
1571         int cpuid;
1572         int n;
1573         int count;
1574         int load;
1575         int pri;
1576         int hpri;
1577         int highest_load;
1578
1579         /*
1580          * When the topology is unknown choose a random cpu that is hopefully
1581          * idle.
1582          */
1583         if (dd->cpunode == NULL) {
1584                 return (NULL);
1585         }
1586
1587         /*
1588          * When the topology is known choose a cpu whos group has, in
1589          * aggregate, has the lowest weighted load.
1590          */
1591         cpup = root_cpu_node;
1592         rdd = dd;
1593         while (cpup) {
1594                 /*
1595                  * Degenerate case super-root
1596                  */
1597                 if (cpup->child_node && cpup->child_no == 1) {
1598                         cpup = cpup->child_node;
1599                         continue;
1600                 }
1601
1602                 /*
1603                  * Terminal cpunode
1604                  */
1605                 if (cpup->child_node == NULL) {
1606                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1607                         break;
1608                 }
1609
1610                 cpub = NULL;
1611                 highest_load = 0;
1612
1613                 for (n = 0; n < cpup->child_no; ++n) {
1614                         /*
1615                          * Accumulate load information for all cpus
1616                          * which are members of this node.
1617                          */
1618                         cpun = &cpup->child_node[n];
1619                         mask = cpun->members & usched_global_cpumask &
1620                                smp_active_mask;
1621                         if (mask == 0)
1622                                 continue;
1623                         count = 0;
1624                         load = 0;
1625
1626                         while (mask) {
1627                                 cpuid = BSFCPUMASK(mask);
1628                                 rdd = &dfly_pcpu[cpuid];
1629                                 load += rdd->uload;
1630                                 load += rdd->ucount * usched_dfly_weight3;
1631                                 if (rdd->uschedcp == NULL &&
1632                                     rdd->runqcount == 0 &&
1633                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1634                                 ) {
1635                                         load -= usched_dfly_weight4;
1636                                 } else if (rdd->upri > dd->upri + PPQ) {
1637                                         load -= usched_dfly_weight4 / 2;
1638                                 }
1639                                 mask &= ~CPUMASK(cpuid);
1640                                 ++count;
1641                         }
1642                         load /= count;
1643
1644                         /*
1645                          * Prefer candidates which are somewhat closer to
1646                          * our cpu.
1647                          */
1648                         if (dd->cpumask & cpun->members)
1649                                 load += usched_dfly_weight1;
1650
1651                         /*
1652                          * The best candidate is the one with the worst
1653                          * (highest) load.
1654                          */
1655                         if (cpub == NULL || highest_load < load) {
1656                                 highest_load = load;
1657                                 cpub = cpun;
1658                         }
1659                 }
1660                 cpup = cpub;
1661         }
1662
1663         /*
1664          * We never return our own node (dd), and only return a remote
1665          * node if it's load is significantly worse than ours (i.e. where
1666          * stealing a thread would be considered reasonable).
1667          *
1668          * This also helps us avoid breaking paired threads apart which
1669          * can have disastrous effects on performance.
1670          */
1671         if (rdd == dd)
1672                 return(NULL);
1673
1674         hpri = 0;
1675         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1676                 hpri = pri;
1677         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1678                 hpri = pri;
1679         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1680                 hpri = pri;
1681         hpri *= PPQ;
1682         if (rdd->uload - hpri < dd->uload + hpri)
1683                 return(NULL);
1684         return (rdd);
1685 }
1686
1687 static
1688 dfly_pcpu_t
1689 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1690 {
1691         dfly_pcpu_t rdd;
1692         cpumask_t tmpmask;
1693         cpumask_t mask;
1694         int cpuid;
1695
1696         /*
1697          * Fallback to the original heuristic, select random cpu,
1698          * first checking cpus not currently running a user thread.
1699          */
1700         ++dfly_scancpu;
1701         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1702         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1703                smp_active_mask & usched_global_cpumask;
1704
1705         while (mask) {
1706                 tmpmask = ~(CPUMASK(cpuid) - 1);
1707                 if (mask & tmpmask)
1708                         cpuid = BSFCPUMASK(mask & tmpmask);
1709                 else
1710                         cpuid = BSFCPUMASK(mask);
1711                 rdd = &dfly_pcpu[cpuid];
1712
1713                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1714                         goto found;
1715                 mask &= ~CPUMASK(cpuid);
1716         }
1717
1718         /*
1719          * Then cpus which might have a currently running lp
1720          */
1721         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1722         mask = dfly_curprocmask & dfly_rdyprocmask &
1723                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1724
1725         while (mask) {
1726                 tmpmask = ~(CPUMASK(cpuid) - 1);
1727                 if (mask & tmpmask)
1728                         cpuid = BSFCPUMASK(mask & tmpmask);
1729                 else
1730                         cpuid = BSFCPUMASK(mask);
1731                 rdd = &dfly_pcpu[cpuid];
1732
1733                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1734                         goto found;
1735                 mask &= ~CPUMASK(cpuid);
1736         }
1737
1738         /*
1739          * If we cannot find a suitable cpu we reload from dfly_scancpu
1740          * and round-robin.  Other cpus will pickup as they release their
1741          * current lwps or become ready.
1742          *
1743          * Avoid a degenerate system lockup case if usched_global_cpumask
1744          * is set to 0 or otherwise does not cover lwp_cpumask.
1745          *
1746          * We only kick the target helper thread in this case, we do not
1747          * set the user resched flag because
1748          */
1749         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1750         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1751                 cpuid = 0;
1752         rdd = &dfly_pcpu[cpuid];
1753 found:
1754         return (rdd);
1755 }
1756
1757 static
1758 void
1759 dfly_need_user_resched_remote(void *dummy)
1760 {
1761         globaldata_t gd = mycpu;
1762         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1763
1764         need_user_resched();
1765
1766         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1767         wakeup_mycpu(&dd->helper_thread);
1768 }
1769
1770 #endif
1771
1772 /*
1773  * dfly_remrunqueue_locked() removes a given process from the run queue
1774  * that it is on, clearing the queue busy bit if it becomes empty.
1775  *
1776  * Note that user process scheduler is different from the LWKT schedule.
1777  * The user process scheduler only manages user processes but it uses LWKT
1778  * underneath, and a user process operating in the kernel will often be
1779  * 'released' from our management.
1780  *
1781  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1782  * to sleep or the lwp is moved to a different runq.
1783  */
1784 static void
1785 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1786 {
1787         struct rq *q;
1788         u_int32_t *which;
1789         u_int8_t pri;
1790
1791         KKASSERT(rdd->runqcount >= 0);
1792
1793         pri = lp->lwp_rqindex;
1794
1795         switch(lp->lwp_rqtype) {
1796         case RTP_PRIO_NORMAL:
1797                 q = &rdd->queues[pri];
1798                 which = &rdd->queuebits;
1799                 break;
1800         case RTP_PRIO_REALTIME:
1801         case RTP_PRIO_FIFO:
1802                 q = &rdd->rtqueues[pri];
1803                 which = &rdd->rtqueuebits;
1804                 break;
1805         case RTP_PRIO_IDLE:
1806                 q = &rdd->idqueues[pri];
1807                 which = &rdd->idqueuebits;
1808                 break;
1809         default:
1810                 panic("remrunqueue: invalid rtprio type");
1811                 /* NOT REACHED */
1812         }
1813         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1814         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1815         TAILQ_REMOVE(q, lp, lwp_procq);
1816         --rdd->runqcount;
1817         if (TAILQ_EMPTY(q)) {
1818                 KASSERT((*which & (1 << pri)) != 0,
1819                         ("remrunqueue: remove from empty queue"));
1820                 *which &= ~(1 << pri);
1821         }
1822 }
1823
1824 /*
1825  * dfly_setrunqueue_locked()
1826  *
1827  * Add a process whos rqtype and rqindex had previously been calculated
1828  * onto the appropriate run queue.   Determine if the addition requires
1829  * a reschedule on a cpu and return the cpuid or -1.
1830  *
1831  * NOTE:          Lower priorities are better priorities.
1832  *
1833  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1834  *                sum of the rough lwp_priority for all running and runnable
1835  *                processes.  Lower priority processes (higher lwp_priority
1836  *                values) actually DO count as more load, not less, because
1837  *                these are the programs which require the most care with
1838  *                regards to cpu selection.
1839  */
1840 static void
1841 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1842 {
1843         struct rq *q;
1844         u_int32_t *which;
1845         int pri;
1846
1847         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1848
1849         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1850                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1851                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1852                                (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1853                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1854         }
1855
1856         pri = lp->lwp_rqindex;
1857
1858         switch(lp->lwp_rqtype) {
1859         case RTP_PRIO_NORMAL:
1860                 q = &rdd->queues[pri];
1861                 which = &rdd->queuebits;
1862                 break;
1863         case RTP_PRIO_REALTIME:
1864         case RTP_PRIO_FIFO:
1865                 q = &rdd->rtqueues[pri];
1866                 which = &rdd->rtqueuebits;
1867                 break;
1868         case RTP_PRIO_IDLE:
1869                 q = &rdd->idqueues[pri];
1870                 which = &rdd->idqueuebits;
1871                 break;
1872         default:
1873                 panic("remrunqueue: invalid rtprio type");
1874                 /* NOT REACHED */
1875         }
1876
1877         /*
1878          * Add to the correct queue and set the appropriate bit.  If no
1879          * lower priority (i.e. better) processes are in the queue then
1880          * we want a reschedule, calculate the best cpu for the job.
1881          *
1882          * Always run reschedules on the LWPs original cpu.
1883          */
1884         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1885         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1886         ++rdd->runqcount;
1887         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1888         *which |= 1 << pri;
1889 }
1890
1891 #ifdef SMP
1892
1893 /*
1894  * For SMP systems a user scheduler helper thread is created for each
1895  * cpu and is used to allow one cpu to wakeup another for the purposes of
1896  * scheduling userland threads from setrunqueue().
1897  *
1898  * UP systems do not need the helper since there is only one cpu.
1899  *
1900  * We can't use the idle thread for this because we might block.
1901  * Additionally, doing things this way allows us to HLT idle cpus
1902  * on MP systems.
1903  */
1904 static void
1905 dfly_helper_thread(void *dummy)
1906 {
1907     globaldata_t gd;
1908     dfly_pcpu_t dd;
1909     dfly_pcpu_t rdd;
1910     struct lwp *nlp;
1911     cpumask_t mask;
1912     int cpuid;
1913
1914     gd = mycpu;
1915     cpuid = gd->gd_cpuid;       /* doesn't change */
1916     mask = gd->gd_cpumask;      /* doesn't change */
1917     dd = &dfly_pcpu[cpuid];
1918
1919     /*
1920      * Since we only want to be woken up only when no user processes
1921      * are scheduled on a cpu, run at an ultra low priority.
1922      */
1923     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1924
1925     tsleep(&dd->helper_thread, 0, "schslp", 0);
1926
1927     for (;;) {
1928         /*
1929          * We use the LWKT deschedule-interlock trick to avoid racing
1930          * dfly_rdyprocmask.  This means we cannot block through to the
1931          * manual lwkt_switch() call we make below.
1932          */
1933         crit_enter_gd(gd);
1934         tsleep_interlock(&dd->helper_thread, 0);
1935
1936         spin_lock(&dd->spin);
1937
1938         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1939         clear_user_resched();   /* This satisfied the reschedule request */
1940         dd->rrcount = 0;        /* Reset the round-robin counter */
1941
1942         if (dd->runqcount || dd->uschedcp != NULL) {
1943                 /*
1944                  * Threads are available.  A thread may or may not be
1945                  * currently scheduled.  Get the best thread already queued
1946                  * to this cpu.
1947                  */
1948                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
1949                 if (nlp) {
1950                         atomic_set_cpumask(&dfly_curprocmask, mask);
1951                         dd->upri = nlp->lwp_priority;
1952                         dd->uschedcp = nlp;
1953                         dd->rrcount = 0;        /* reset round robin */
1954                         spin_unlock(&dd->spin);
1955                         lwkt_acquire(nlp->lwp_thread);
1956                         lwkt_schedule(nlp->lwp_thread);
1957                 } else {
1958                         /*
1959                          * This situation should not occur because we had
1960                          * at least one thread available.
1961                          */
1962                         spin_unlock(&dd->spin);
1963                 }
1964         } else if (usched_dfly_features & 0x01) {
1965                 /*
1966                  * This cpu is devoid of runnable threads, steal a thread
1967                  * from another cpu.  Since we're stealing, might as well
1968                  * load balance at the same time.
1969                  *
1970                  * NOTE! This function only returns a non-NULL rdd when
1971                  *       another cpu's queue is obviously overloaded.  We
1972                  *       do not want to perform the type of rebalancing
1973                  *       the schedclock does here because it would result
1974                  *       in insane process pulling when 'steady' state is
1975                  *       partially unbalanced (e.g. 6 runnables and only
1976                  *       4 cores).
1977                  */
1978                 rdd = dfly_choose_worst_queue(dd);
1979                 if (rdd && spin_trylock(&rdd->spin)) {
1980                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 0);
1981                         spin_unlock(&rdd->spin);
1982                 } else {
1983                         nlp = NULL;
1984                 }
1985                 if (nlp) {
1986                         atomic_set_cpumask(&dfly_curprocmask, mask);
1987                         dd->upri = nlp->lwp_priority;
1988                         dd->uschedcp = nlp;
1989                         dd->rrcount = 0;        /* reset round robin */
1990                         spin_unlock(&dd->spin);
1991                         lwkt_acquire(nlp->lwp_thread);
1992                         lwkt_schedule(nlp->lwp_thread);
1993                 } else {
1994                         /*
1995                          * Leave the thread on our run queue.  Another
1996                          * scheduler will try to pull it later.
1997                          */
1998                         spin_unlock(&dd->spin);
1999                 }
2000         } else {
2001                 /*
2002                  * devoid of runnable threads and not allowed to steal
2003                  * any.
2004                  */
2005                 spin_unlock(&dd->spin);
2006         }
2007
2008         /*
2009          * We're descheduled unless someone scheduled us.  Switch away.
2010          * Exiting the critical section will cause splz() to be called
2011          * for us if interrupts and such are pending.
2012          */
2013         crit_exit_gd(gd);
2014         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2015     }
2016 }
2017
2018 #if 0
2019 static int
2020 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2021 {
2022         int error, new_val;
2023
2024         new_val = usched_dfly_stick_to_level;
2025
2026         error = sysctl_handle_int(oidp, &new_val, 0, req);
2027         if (error != 0 || req->newptr == NULL)
2028                 return (error);
2029         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2030                 return (EINVAL);
2031         usched_dfly_stick_to_level = new_val;
2032         return (0);
2033 }
2034 #endif
2035
2036 /*
2037  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
2038  * been cleared by rqinit() and we should not mess with it further.
2039  */
2040 static void
2041 dfly_helper_thread_cpu_init(void)
2042 {
2043         int i;
2044         int j;
2045         int cpuid;
2046         int smt_not_supported = 0;
2047         int cache_coherent_not_supported = 0;
2048
2049         if (bootverbose)
2050                 kprintf("Start scheduler helpers on cpus:\n");
2051
2052         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2053         usched_dfly_sysctl_tree =
2054                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2055                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2056                                 "usched_dfly", CTLFLAG_RD, 0, "");
2057
2058         for (i = 0; i < ncpus; ++i) {
2059                 dfly_pcpu_t dd = &dfly_pcpu[i];
2060                 cpumask_t mask = CPUMASK(i);
2061
2062                 if ((mask & smp_active_mask) == 0)
2063                     continue;
2064
2065                 spin_init(&dd->spin);
2066                 dd->cpunode = get_cpu_node_by_cpuid(i);
2067                 dd->cpuid = i;
2068                 dd->cpumask = CPUMASK(i);
2069                 for (j = 0; j < NQS; j++) {
2070                         TAILQ_INIT(&dd->queues[j]);
2071                         TAILQ_INIT(&dd->rtqueues[j]);
2072                         TAILQ_INIT(&dd->idqueues[j]);
2073                 }
2074                 atomic_clear_cpumask(&dfly_curprocmask, 1);
2075
2076                 if (dd->cpunode == NULL) {
2077                         smt_not_supported = 1;
2078                         cache_coherent_not_supported = 1;
2079                         if (bootverbose)
2080                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2081                                          "found for cpu\n", i);
2082                 } else {
2083                         switch (dd->cpunode->type) {
2084                         case THREAD_LEVEL:
2085                                 if (bootverbose)
2086                                         kprintf ("\tcpu%d - HyperThreading "
2087                                                  "available. Core siblings: ",
2088                                                  i);
2089                                 break;
2090                         case CORE_LEVEL:
2091                                 smt_not_supported = 1;
2092
2093                                 if (bootverbose)
2094                                         kprintf ("\tcpu%d - No HT available, "
2095                                                  "multi-core/physical "
2096                                                  "cpu. Physical siblings: ",
2097                                                  i);
2098                                 break;
2099                         case CHIP_LEVEL:
2100                                 smt_not_supported = 1;
2101
2102                                 if (bootverbose)
2103                                         kprintf ("\tcpu%d - No HT available, "
2104                                                  "single-core/physical cpu. "
2105                                                  "Package Siblings: ",
2106                                                  i);
2107                                 break;
2108                         default:
2109                                 /* Let's go for safe defaults here */
2110                                 smt_not_supported = 1;
2111                                 cache_coherent_not_supported = 1;
2112                                 if (bootverbose)
2113                                         kprintf ("\tcpu%d - Unknown cpunode->"
2114                                                  "type=%u. Siblings: ",
2115                                                  i,
2116                                                  (u_int)dd->cpunode->type);
2117                                 break;
2118                         }
2119
2120                         if (bootverbose) {
2121                                 if (dd->cpunode->parent_node != NULL) {
2122                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2123                                                 kprintf("cpu%d ", cpuid);
2124                                         kprintf("\n");
2125                                 } else {
2126                                         kprintf(" no siblings\n");
2127                                 }
2128                         }
2129                 }
2130
2131                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2132                             0, i, "usched %d", i);
2133
2134                 /*
2135                  * Allow user scheduling on the target cpu.  cpu #0 has already
2136                  * been enabled in rqinit().
2137                  */
2138                 if (i)
2139                     atomic_clear_cpumask(&dfly_curprocmask, mask);
2140                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2141                 dd->upri = PRIBASE_NULL;
2142
2143         }
2144
2145         /* usched_dfly sysctl configurable parameters */
2146
2147         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2148                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2149                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2150                        &usched_dfly_rrinterval, 0, "");
2151         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2152                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2153                        OID_AUTO, "decay", CTLFLAG_RW,
2154                        &usched_dfly_decay, 0, "Extra decay when not running");
2155         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2156                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2157                        OID_AUTO, "batch_time", CTLFLAG_RW,
2158                        &usched_dfly_batch_time, 0, "Min batch counter value");
2159
2160         /* Add enable/disable option for SMT scheduling if supported */
2161         if (smt_not_supported) {
2162                 usched_dfly_smt = 0;
2163                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2164                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2165                                   OID_AUTO, "smt", CTLFLAG_RD,
2166                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2167         } else {
2168                 usched_dfly_smt = 1;
2169                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2170                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2171                                OID_AUTO, "smt", CTLFLAG_RW,
2172                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2173         }
2174
2175         /*
2176          * Add enable/disable option for cache coherent scheduling
2177          * if supported
2178          */
2179         if (cache_coherent_not_supported) {
2180                 usched_dfly_cache_coherent = 0;
2181                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2182                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2183                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2184                                   "NOT SUPPORTED", 0,
2185                                   "Cache coherence NOT SUPPORTED");
2186         } else {
2187                 usched_dfly_cache_coherent = 1;
2188                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2189                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2190                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2191                                &usched_dfly_cache_coherent, 0,
2192                                "Enable/Disable cache coherent scheduling");
2193
2194                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2195                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2196                                OID_AUTO, "weight1", CTLFLAG_RW,
2197                                &usched_dfly_weight1, 10,
2198                                "Weight selection for current cpu");
2199
2200                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2201                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2202                                OID_AUTO, "weight2", CTLFLAG_RW,
2203                                &usched_dfly_weight2, 5,
2204                                "Weight selection for wakefrom cpu");
2205
2206                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2207                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2208                                OID_AUTO, "weight3", CTLFLAG_RW,
2209                                &usched_dfly_weight3, 50,
2210                                "Weight selection for num threads on queue");
2211
2212                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2213                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2214                                OID_AUTO, "weight4", CTLFLAG_RW,
2215                                &usched_dfly_weight4, 50,
2216                                "Availability of other idle cpus");
2217
2218                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2219                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2220                                OID_AUTO, "features", CTLFLAG_RW,
2221                                &usched_dfly_features, 15,
2222                                "Allow pulls into empty queues");
2223
2224
2225 #if 0
2226                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2227                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2228                                 OID_AUTO, "stick_to_level",
2229                                 CTLTYPE_INT | CTLFLAG_RW,
2230                                 NULL, sizeof usched_dfly_stick_to_level,
2231                                 sysctl_usched_dfly_stick_to_level, "I",
2232                                 "Stick a process to this level. See sysctl"
2233                                 "paremter hw.cpu_topology.level_description");
2234 #endif
2235         }
2236 }
2237 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2238         dfly_helper_thread_cpu_init, NULL)
2239
2240 #else /* No SMP options - just add the configurable parameters to sysctl */
2241
2242 static void
2243 sched_sysctl_tree_init(void)
2244 {
2245         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2246         usched_dfly_sysctl_tree =
2247                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2248                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2249                                 "usched_dfly", CTLFLAG_RD, 0, "");
2250
2251         /* usched_dfly sysctl configurable parameters */
2252         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2253                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2254                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2255                        &usched_dfly_rrinterval, 0, "");
2256         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2257                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2258                        OID_AUTO, "decay", CTLFLAG_RW,
2259                        &usched_dfly_decay, 0, "Extra decay when not running");
2260         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2261                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2262                        OID_AUTO, "batch_time", CTLFLAG_RW,
2263                        &usched_dfly_batch_time, 0, "Min batch counter value");
2264 }
2265 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2266         sched_sysctl_tree_init, NULL)
2267 #endif