Merge branch 'vendor/BMAKE'
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_estfast     lwp_usdata.dfly.estfast
97 #define lwp_uload       lwp_usdata.dfly.uload
98 #define lwp_rqtype      lwp_usdata.dfly.rqtype
99 #define lwp_qcpu        lwp_usdata.dfly.qcpu
100 #define lwp_rrcount     lwp_usdata.dfly.rrcount
101
102 struct usched_dfly_pcpu {
103         struct spinlock spin;
104 #ifdef SMP
105         struct thread   helper_thread;
106 #else
107         struct thread   helper_thread_UNUSED;   /* field unused */
108 #endif
109         short           unusde01;
110         short           upri;
111         int             uload;
112         int             ucount;
113         struct lwp      *uschedcp;
114         struct rq       queues[NQS];
115         struct rq       rtqueues[NQS];
116         struct rq       idqueues[NQS];
117         u_int32_t       queuebits;
118         u_int32_t       rtqueuebits;
119         u_int32_t       idqueuebits;
120         int             runqcount;
121         int             cpuid;
122         cpumask_t       cpumask;
123 #ifdef SMP
124         cpu_node_t      *cpunode;
125 #endif
126 };
127
128 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
129
130 static void dfly_acquire_curproc(struct lwp *lp);
131 static void dfly_release_curproc(struct lwp *lp);
132 static void dfly_select_curproc(globaldata_t gd);
133 static void dfly_setrunqueue(struct lwp *lp);
134 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
135 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
136                                 sysclock_t cpstamp);
137 static void dfly_recalculate_estcpu(struct lwp *lp);
138 static void dfly_resetpriority(struct lwp *lp);
139 static void dfly_forking(struct lwp *plp, struct lwp *lp);
140 static void dfly_exiting(struct lwp *lp, struct proc *);
141 static void dfly_uload_update(struct lwp *lp);
142 static void dfly_yield(struct lwp *lp);
143 #ifdef SMP
144 static void dfly_changeqcpu_locked(struct lwp *lp,
145                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
146 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
147 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
148 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
149 #endif
150
151 #ifdef SMP
152 static void dfly_need_user_resched_remote(void *dummy);
153 #endif
154 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
155                                           struct lwp *chklp, int worst);
156 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
157 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
158
159 struct usched usched_dfly = {
160         { NULL },
161         "dfly", "Original DragonFly Scheduler",
162         NULL,                   /* default registration */
163         NULL,                   /* default deregistration */
164         dfly_acquire_curproc,
165         dfly_release_curproc,
166         dfly_setrunqueue,
167         dfly_schedulerclock,
168         dfly_recalculate_estcpu,
169         dfly_resetpriority,
170         dfly_forking,
171         dfly_exiting,
172         dfly_uload_update,
173         NULL,                   /* setcpumask not supported */
174         dfly_yield
175 };
176
177 /*
178  * We have NQS (32) run queues per scheduling class.  For the normal
179  * class, there are 128 priorities scaled onto these 32 queues.  New
180  * processes are added to the last entry in each queue, and processes
181  * are selected for running by taking them from the head and maintaining
182  * a simple FIFO arrangement.  Realtime and Idle priority processes have
183  * and explicit 0-31 priority which maps directly onto their class queue
184  * index.  When a queue has something in it, the corresponding bit is
185  * set in the queuebits variable, allowing a single read to determine
186  * the state of all 32 queues and then a ffs() to find the first busy
187  * queue.
188  */
189 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
190 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
191 #ifdef SMP
192 static volatile int dfly_scancpu;
193 #endif
194 static volatile int dfly_ucount;        /* total running on whole system */
195 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
196 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
197 static struct sysctl_oid *usched_dfly_sysctl_tree;
198
199 /* Debug info exposed through debug.* sysctl */
200
201 static int usched_dfly_debug = -1;
202 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
203            &usched_dfly_debug, 0,
204            "Print debug information for this pid");
205
206 static int usched_dfly_pid_debug = -1;
207 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
208            &usched_dfly_pid_debug, 0,
209            "Print KTR debug information for this pid");
210
211 static int usched_dfly_chooser = 0;
212 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
213            &usched_dfly_chooser, 0,
214            "Print KTR debug information for this pid");
215
216 /*
217  * Tunning usched_dfly - configurable through kern.usched_dfly.
218  *
219  * weight1 - Tries to keep threads on their current cpu.  If you
220  *           make this value too large the scheduler will not be
221  *           able to load-balance large loads.
222  *
223  * weight2 - If non-zero, detects thread pairs undergoing synchronous
224  *           communications and tries to move them closer together.
225  *           Behavior is adjusted by bit 4 of features (0x10).
226  *
227  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
228  *           a small value is recommended.
229  *
230  * weight3 - Weighting based on the number of recently runnable threads
231  *           on the userland scheduling queue (ignoring their loads).
232  *           A nominal value here prevents high-priority (low-load)
233  *           threads from accumulating on one cpu core when other
234  *           cores are available.
235  *
236  *           This value should be left fairly small relative to weight1
237  *           and weight4.
238  *
239  * weight4 - Weighting based on other cpu queues being available
240  *           or running processes with higher lwp_priority's.
241  *
242  *           This allows a thread to migrate to another nearby cpu if it
243  *           is unable to run on the current cpu based on the other cpu
244  *           being idle or running a lower priority (higher lwp_priority)
245  *           thread.  This value should be large enough to override weight1
246  *
247  * features - These flags can be set or cleared to enable or disable various
248  *            features.
249  *
250  *            0x01      Enable idle-cpu pulling                 (default)
251  *            0x02      Enable proactive pushing                (default)
252  *            0x04      Enable rebalancing rover                (default)
253  *            0x08      Enable more proactive pushing           (default)
254  *            0x10      (flip weight2 limit on same cpu)        (default)
255  *            0x20      choose best cpu for forked process
256  *            0x40      choose current cpu for forked process
257  *            0x80      choose random cpu for forked process    (default)
258  */
259 #ifdef SMP
260 static int usched_dfly_smt = 0;
261 static int usched_dfly_cache_coherent = 0;
262 static int usched_dfly_weight1 = 200;   /* keep thread on current cpu */
263 static int usched_dfly_weight2 = 180;   /* synchronous peer's current cpu */
264 static int usched_dfly_weight3 = 40;    /* number of threads on queue */
265 static int usched_dfly_weight4 = 160;   /* availability of idle cores */
266 static int usched_dfly_features = 0x8F; /* allow pulls */
267 #endif
268 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
269 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
270 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
271 static int usched_dfly_decay = 8;
272
273 /* KTR debug printings */
274
275 KTR_INFO_MASTER(usched);
276
277 #if !defined(KTR_USCHED_DFLY)
278 #define KTR_USCHED_DFLY KTR_ALL
279 #endif
280
281 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
282     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
283     pid_t pid, int old_cpuid, int curr);
284
285 /*
286  * This function is called when the kernel intends to return to userland.
287  * It is responsible for making the thread the current designated userland
288  * thread for this cpu, blocking if necessary.
289  *
290  * The kernel will not depress our LWKT priority until after we return,
291  * in case we have to shove over to another cpu.
292  *
293  * We must determine our thread's disposition before we switch away.  This
294  * is very sensitive code.
295  *
296  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
297  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
298  * occur, this function is called only under very controlled circumstances.
299  */
300 static void
301 dfly_acquire_curproc(struct lwp *lp)
302 {
303         globaldata_t gd;
304         dfly_pcpu_t dd;
305 #ifdef SMP
306         dfly_pcpu_t rdd;
307 #endif
308         thread_t td;
309         int force_resched;
310
311         /*
312          * Make sure we aren't sitting on a tsleep queue.
313          */
314         td = lp->lwp_thread;
315         crit_enter_quick(td);
316         if (td->td_flags & TDF_TSLEEPQ)
317                 tsleep_remove(td);
318         dfly_recalculate_estcpu(lp);
319
320         gd = mycpu;
321         dd = &dfly_pcpu[gd->gd_cpuid];
322
323         /*
324          * Process any pending interrupts/ipi's, then handle reschedule
325          * requests.  dfly_release_curproc() will try to assign a new
326          * uschedcp that isn't us and otherwise NULL it out.
327          */
328         force_resched = 0;
329         if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
330             lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
331                 force_resched = 1;
332         }
333
334         if (user_resched_wanted()) {
335                 if (dd->uschedcp == lp)
336                         force_resched = 1;
337                 clear_user_resched();
338                 dfly_release_curproc(lp);
339         }
340
341         /*
342          * Loop until we are the current user thread.
343          *
344          * NOTE: dd spinlock not held at top of loop.
345          */
346         if (dd->uschedcp == lp)
347                 lwkt_yield_quick();
348
349         while (dd->uschedcp != lp) {
350                 lwkt_yield_quick();
351
352                 spin_lock(&dd->spin);
353
354                 /*
355                  * We are not or are no longer the current lwp and a forced
356                  * reschedule was requested.  Figure out the best cpu to
357                  * run on (our current cpu will be given significant weight).
358                  *
359                  * (if a reschedule was not requested we want to move this
360                  *  step after the uschedcp tests).
361                  */
362 #ifdef SMP
363                 if (force_resched &&
364                     (usched_dfly_features & 0x08) &&
365                     (rdd = dfly_choose_best_queue(lp)) != dd) {
366                         dfly_changeqcpu_locked(lp, dd, rdd);
367                         spin_unlock(&dd->spin);
368                         lwkt_deschedule(lp->lwp_thread);
369                         dfly_setrunqueue_dd(rdd, lp);
370                         lwkt_switch();
371                         gd = mycpu;
372                         dd = &dfly_pcpu[gd->gd_cpuid];
373                         continue;
374                 }
375 #endif
376
377                 /*
378                  * Either no reschedule was requested or the best queue was
379                  * dd, and no current process has been selected.  We can
380                  * trivially become the current lwp on the current cpu.
381                  */
382                 if (dd->uschedcp == NULL) {
383                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
384                         dd->uschedcp = lp;
385                         dd->upri = lp->lwp_priority;
386                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
387                         spin_unlock(&dd->spin);
388                         break;
389                 }
390
391                 /*
392                  * Can we steal the current designated user thread?
393                  *
394                  * If we do the other thread will stall when it tries to
395                  * return to userland, possibly rescheduling elsewhere.
396                  *
397                  * It is important to do a masked test to avoid the edge
398                  * case where two near-equal-priority threads are constantly
399                  * interrupting each other.
400                  *
401                  * In the exact match case another thread has already gained
402                  * uschedcp and lowered its priority, if we steal it the
403                  * other thread will stay stuck on the LWKT runq and not
404                  * push to another cpu.  So don't steal on equal-priority even
405                  * though it might appear to be more beneficial due to not
406                  * having to switch back to the other thread's context.
407                  *
408                  * usched_dfly_fast_resched requires that two threads be
409                  * significantly far apart in priority in order to interrupt.
410                  *
411                  * If better but not sufficiently far apart, the current
412                  * uschedcp will be interrupted at the next scheduler clock.
413                  */
414                 if (dd->uschedcp &&
415                    (dd->upri & ~PPQMASK) >
416                    (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
417                         dd->uschedcp = lp;
418                         dd->upri = lp->lwp_priority;
419                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
420                         spin_unlock(&dd->spin);
421                         break;
422                 }
423 #ifdef SMP
424                 /*
425                  * We are not the current lwp, figure out the best cpu
426                  * to run on (our current cpu will be given significant
427                  * weight).  Loop on cpu change.
428                  */
429                 if ((usched_dfly_features & 0x02) &&
430                     force_resched == 0 &&
431                     (rdd = dfly_choose_best_queue(lp)) != dd) {
432                         dfly_changeqcpu_locked(lp, dd, rdd);
433                         spin_unlock(&dd->spin);
434                         lwkt_deschedule(lp->lwp_thread);
435                         dfly_setrunqueue_dd(rdd, lp);
436                         lwkt_switch();
437                         gd = mycpu;
438                         dd = &dfly_pcpu[gd->gd_cpuid];
439                         continue;
440                 }
441 #endif
442
443                 /*
444                  * We cannot become the current lwp, place the lp on the
445                  * run-queue of this or another cpu and deschedule ourselves.
446                  *
447                  * When we are reactivated we will have another chance.
448                  *
449                  * Reload after a switch or setrunqueue/switch possibly
450                  * moved us to another cpu.
451                  */
452                 spin_unlock(&dd->spin);
453                 lwkt_deschedule(lp->lwp_thread);
454                 dfly_setrunqueue_dd(dd, lp);
455                 lwkt_switch();
456                 gd = mycpu;
457                 dd = &dfly_pcpu[gd->gd_cpuid];
458         }
459
460         /*
461          * Make sure upri is synchronized, then yield to LWKT threads as
462          * needed before returning.  This could result in another reschedule.
463          * XXX
464          */
465         crit_exit_quick(td);
466
467         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
468 }
469
470 /*
471  * DFLY_RELEASE_CURPROC
472  *
473  * This routine detaches the current thread from the userland scheduler,
474  * usually because the thread needs to run or block in the kernel (at
475  * kernel priority) for a while.
476  *
477  * This routine is also responsible for selecting a new thread to
478  * make the current thread.
479  *
480  * NOTE: This implementation differs from the dummy example in that
481  * dfly_select_curproc() is able to select the current process, whereas
482  * dummy_select_curproc() is not able to select the current process.
483  * This means we have to NULL out uschedcp.
484  *
485  * Additionally, note that we may already be on a run queue if releasing
486  * via the lwkt_switch() in dfly_setrunqueue().
487  */
488 static void
489 dfly_release_curproc(struct lwp *lp)
490 {
491         globaldata_t gd = mycpu;
492         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
493
494         /*
495          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
496          * by wakeup().
497          */
498         if (dd->uschedcp == lp) {
499                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
500                 spin_lock(&dd->spin);
501                 if (dd->uschedcp == lp) {
502                         dd->uschedcp = NULL;    /* don't let lp be selected */
503                         dd->upri = PRIBASE_NULL;
504                         atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
505                         spin_unlock(&dd->spin);
506                         dfly_select_curproc(gd);
507                 } else {
508                         spin_unlock(&dd->spin);
509                 }
510         }
511 }
512
513 /*
514  * DFLY_SELECT_CURPROC
515  *
516  * Select a new current process for this cpu and clear any pending user
517  * reschedule request.  The cpu currently has no current process.
518  *
519  * This routine is also responsible for equal-priority round-robining,
520  * typically triggered from dfly_schedulerclock().  In our dummy example
521  * all the 'user' threads are LWKT scheduled all at once and we just
522  * call lwkt_switch().
523  *
524  * The calling process is not on the queue and cannot be selected.
525  */
526 static
527 void
528 dfly_select_curproc(globaldata_t gd)
529 {
530         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
531         struct lwp *nlp;
532         int cpuid = gd->gd_cpuid;
533
534         crit_enter_gd(gd);
535
536         spin_lock(&dd->spin);
537         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
538
539         if (nlp) {
540                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
541                 dd->upri = nlp->lwp_priority;
542                 dd->uschedcp = nlp;
543 #if 0
544                 dd->rrcount = 0;                /* reset round robin */
545 #endif
546                 spin_unlock(&dd->spin);
547 #ifdef SMP
548                 lwkt_acquire(nlp->lwp_thread);
549 #endif
550                 lwkt_schedule(nlp->lwp_thread);
551         } else {
552                 spin_unlock(&dd->spin);
553         }
554         crit_exit_gd(gd);
555 }
556
557 /*
558  * Place the specified lwp on the user scheduler's run queue.  This routine
559  * must be called with the thread descheduled.  The lwp must be runnable.
560  * It must not be possible for anyone else to explicitly schedule this thread.
561  *
562  * The thread may be the current thread as a special case.
563  */
564 static void
565 dfly_setrunqueue(struct lwp *lp)
566 {
567         dfly_pcpu_t dd;
568         dfly_pcpu_t rdd;
569
570         /*
571          * First validate the process LWKT state.
572          */
573         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
574         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
575             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
576              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
577         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
578
579         /*
580          * NOTE: dd/rdd do not necessarily represent the current cpu.
581          *       Instead they may represent the cpu the thread was last
582          *       scheduled on or inherited by its parent.
583          */
584         dd = &dfly_pcpu[lp->lwp_qcpu];
585         rdd = dd;
586
587         /*
588          * This process is not supposed to be scheduled anywhere or assigned
589          * as the current process anywhere.  Assert the condition.
590          */
591         KKASSERT(rdd->uschedcp != lp);
592
593 #ifndef SMP
594         /*
595          * If we are not SMP we do not have a scheduler helper to kick
596          * and must directly activate the process if none are scheduled.
597          *
598          * This is really only an issue when bootstrapping init since
599          * the caller in all other cases will be a user process, and
600          * even if released (rdd->uschedcp == NULL), that process will
601          * kickstart the scheduler when it returns to user mode from
602          * the kernel.
603          *
604          * NOTE: On SMP we can't just set some other cpu's uschedcp.
605          */
606         if (rdd->uschedcp == NULL) {
607                 spin_lock(&rdd->spin);
608                 if (rdd->uschedcp == NULL) {
609                         atomic_set_cpumask(&dfly_curprocmask, 1);
610                         rdd->uschedcp = lp;
611                         rdd->upri = lp->lwp_priority;
612                         spin_unlock(&rdd->spin);
613                         lwkt_schedule(lp->lwp_thread);
614                         return;
615                 }
616                 spin_unlock(&rdd->spin);
617         }
618 #endif
619
620 #ifdef SMP
621         /*
622          * Ok, we have to setrunqueue some target cpu and request a reschedule
623          * if necessary.
624          *
625          * We have to choose the best target cpu.  It might not be the current
626          * target even if the current cpu has no running user thread (for
627          * example, because the current cpu might be a hyperthread and its
628          * sibling has a thread assigned).
629          *
630          * If we just forked it is most optimal to run the child on the same
631          * cpu just in case the parent decides to wait for it (thus getting
632          * off that cpu).  As long as there is nothing else runnable on the
633          * cpu, that is.  If we did this unconditionally a parent forking
634          * multiple children before waiting (e.g. make -j N) leaves other
635          * cpus idle that could be working.
636          */
637         if (lp->lwp_forked) {
638                 lp->lwp_forked = 0;
639                 if (usched_dfly_features & 0x20)
640                         rdd = dfly_choose_best_queue(lp);
641                 else if (usched_dfly_features & 0x40)
642                         rdd = &dfly_pcpu[lp->lwp_qcpu];
643                 else if (usched_dfly_features & 0x80)
644                         rdd = dfly_choose_queue_simple(rdd, lp);
645                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
646                         rdd = dfly_choose_best_queue(lp);
647                 else
648                         rdd = &dfly_pcpu[lp->lwp_qcpu];
649         } else {
650                 rdd = dfly_choose_best_queue(lp);
651                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
652         }
653         if (lp->lwp_qcpu != rdd->cpuid) {
654                 spin_lock(&dd->spin);
655                 dfly_changeqcpu_locked(lp, dd, rdd);
656                 spin_unlock(&dd->spin);
657         }
658 #endif
659         dfly_setrunqueue_dd(rdd, lp);
660 }
661
662 #ifdef SMP
663
664 /*
665  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
666  * spin-locked on-call.  rdd does not have to be.
667  */
668 static void
669 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
670 {
671         if (lp->lwp_qcpu != rdd->cpuid) {
672                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
673                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
674                         atomic_add_int(&dd->uload, -lp->lwp_uload);
675                         atomic_add_int(&dd->ucount, -1);
676                         atomic_add_int(&dfly_ucount, -1);
677                 }
678                 lp->lwp_qcpu = rdd->cpuid;
679         }
680 }
681
682 #endif
683
684 /*
685  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
686  * also performs all necessary ancillary notification actions.
687  */
688 static void
689 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
690 {
691 #ifdef SMP
692         globaldata_t rgd;
693
694         /*
695          * We might be moving the lp to another cpu's run queue, and once
696          * on the runqueue (even if it is our cpu's), another cpu can rip
697          * it away from us.
698          *
699          * TDF_MIGRATING might already be set if this is part of a
700          * remrunqueue+setrunqueue sequence.
701          */
702         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
703                 lwkt_giveaway(lp->lwp_thread);
704
705         rgd = globaldata_find(rdd->cpuid);
706
707         /*
708          * We lose control of the lp the moment we release the spinlock
709          * after having placed it on the queue.  i.e. another cpu could pick
710          * it up, or it could exit, or its priority could be further
711          * adjusted, or something like that.
712          *
713          * WARNING! rdd can point to a foreign cpu!
714          */
715         spin_lock(&rdd->spin);
716         dfly_setrunqueue_locked(rdd, lp);
717
718         /*
719          * Potentially interrupt the currently-running thread
720          */
721         if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
722                 /*
723                  * Currently running thread is better or same, do not
724                  * interrupt.
725                  */
726                 spin_unlock(&rdd->spin);
727         } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
728                    usched_dfly_fast_resched) {
729                 /*
730                  * Currently running thread is not better, but not so bad
731                  * that we need to interrupt it.  Let it run for one more
732                  * scheduler tick.
733                  */
734                 if (rdd->uschedcp &&
735                     rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
736                         rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
737                 }
738                 spin_unlock(&rdd->spin);
739         } else if (rgd == mycpu) {
740                 /*
741                  * We should interrupt the currently running thread, which
742                  * is on the current cpu.
743                  */
744                 spin_unlock(&rdd->spin);
745                 if (rdd->uschedcp == NULL) {
746                         wakeup_mycpu(&rdd->helper_thread); /* XXX */
747                         need_user_resched();
748                 } else {
749                         need_user_resched();
750                 }
751         } else {
752                 /*
753                  * We should interrupt the currently running thread, which
754                  * is on a different cpu.
755                  */
756                 spin_unlock(&rdd->spin);
757                 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
758         }
759 #else
760         /*
761          * Request a reschedule if appropriate.
762          */
763         spin_lock(&rdd->spin);
764         dfly_setrunqueue_locked(rdd, lp);
765         spin_unlock(&rdd->spin);
766         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
767                 need_user_resched();
768         }
769 #endif
770 }
771
772 /*
773  * This routine is called from a systimer IPI.  It MUST be MP-safe and
774  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
775  * each cpu.
776  */
777 static
778 void
779 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
780 {
781         globaldata_t gd = mycpu;
782 #ifdef SMP
783         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
784 #endif
785
786         /*
787          * Spinlocks also hold a critical section so there should not be
788          * any active.
789          */
790         KKASSERT(gd->gd_spinlocks == 0);
791
792         if (lp == NULL)
793                 return;
794
795         /*
796          * Do we need to round-robin?  We round-robin 10 times a second.
797          * This should only occur for cpu-bound batch processes.
798          */
799         if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
800                 lp->lwp_thread->td_wakefromcpu = -1;
801                 need_user_resched();
802         }
803
804         /*
805          * Adjust estcpu upward using a real time equivalent calculation,
806          * and recalculate lp's priority.
807          */
808         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
809         dfly_resetpriority(lp);
810
811         /*
812          * Rebalance two cpus every 8 ticks, pulling the worst thread
813          * from the worst cpu's queue into a rotating cpu number.
814          *
815          * This mechanic is needed because the push algorithms can
816          * steady-state in an non-optimal configuration.  We need to mix it
817          * up a little, even if it means breaking up a paired thread, so
818          * the push algorithms can rebalance the degenerate conditions.
819          * This portion of the algorithm exists to ensure stability at the
820          * selected weightings.
821          *
822          * Because we might be breaking up optimal conditions we do not want
823          * to execute this too quickly, hence we only rebalance approximately
824          * ~7-8 times per second.  The push's, on the otherhand, are capable
825          * moving threads to other cpus at a much higher rate.
826          *
827          * We choose the most heavily loaded thread from the worst queue
828          * in order to ensure that multiple heavy-weight threads on the same
829          * queue get broken up, and also because these threads are the most
830          * likely to be able to remain in place.  Hopefully then any pairings,
831          * if applicable, migrate to where these threads are.
832          */
833 #ifdef SMP
834         if ((usched_dfly_features & 0x04) &&
835             ((u_int)sched_ticks & 7) == 0 &&
836             (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
837                 /*
838                  * Our cpu is up.
839                  */
840                 struct lwp *nlp;
841                 dfly_pcpu_t rdd;
842
843                 rdd = dfly_choose_worst_queue(dd);
844                 if (rdd) {
845                         spin_lock(&dd->spin);
846                         if (spin_trylock(&rdd->spin)) {
847                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
848                                 spin_unlock(&rdd->spin);
849                                 if (nlp == NULL)
850                                         spin_unlock(&dd->spin);
851                         } else {
852                                 spin_unlock(&dd->spin);
853                                 nlp = NULL;
854                         }
855                 } else {
856                         nlp = NULL;
857                 }
858                 /* dd->spin held if nlp != NULL */
859
860                 /*
861                  * Either schedule it or add it to our queue.
862                  */
863                 if (nlp &&
864                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
865                         atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
866                         dd->upri = nlp->lwp_priority;
867                         dd->uschedcp = nlp;
868 #if 0
869                         dd->rrcount = 0;        /* reset round robin */
870 #endif
871                         spin_unlock(&dd->spin);
872                         lwkt_acquire(nlp->lwp_thread);
873                         lwkt_schedule(nlp->lwp_thread);
874                 } else if (nlp) {
875                         dfly_setrunqueue_locked(dd, nlp);
876                         spin_unlock(&dd->spin);
877                 }
878         }
879 #endif
880 }
881
882 /*
883  * Called from acquire and from kern_synch's one-second timer (one of the
884  * callout helper threads) with a critical section held.
885  *
886  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
887  * overall system load.
888  *
889  * Note that no recalculation occurs for a process which sleeps and wakes
890  * up in the same tick.  That is, a system doing thousands of context
891  * switches per second will still only do serious estcpu calculations
892  * ESTCPUFREQ times per second.
893  */
894 static
895 void
896 dfly_recalculate_estcpu(struct lwp *lp)
897 {
898         globaldata_t gd = mycpu;
899         sysclock_t cpbase;
900         sysclock_t ttlticks;
901         int estcpu;
902         int decay_factor;
903         int ucount;
904
905         /*
906          * We have to subtract periodic to get the last schedclock
907          * timeout time, otherwise we would get the upcoming timeout.
908          * Keep in mind that a process can migrate between cpus and
909          * while the scheduler clock should be very close, boundary
910          * conditions could lead to a small negative delta.
911          */
912         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
913
914         if (lp->lwp_slptime > 1) {
915                 /*
916                  * Too much time has passed, do a coarse correction.
917                  */
918                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
919                 dfly_resetpriority(lp);
920                 lp->lwp_cpbase = cpbase;
921                 lp->lwp_cpticks = 0;
922                 lp->lwp_estfast = 0;
923         } else if (lp->lwp_cpbase != cpbase) {
924                 /*
925                  * Adjust estcpu if we are in a different tick.  Don't waste
926                  * time if we are in the same tick.
927                  *
928                  * First calculate the number of ticks in the measurement
929                  * interval.  The ttlticks calculation can wind up 0 due to
930                  * a bug in the handling of lwp_slptime  (as yet not found),
931                  * so make sure we do not get a divide by 0 panic.
932                  */
933                 ttlticks = (cpbase - lp->lwp_cpbase) /
934                            gd->gd_schedclock.periodic;
935                 if ((ssysclock_t)ttlticks < 0) {
936                         ttlticks = 0;
937                         lp->lwp_cpbase = cpbase;
938                 }
939                 if (ttlticks == 0)
940                         return;
941                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
942
943                 /*
944                  * Calculate the percentage of one cpu being used then
945                  * compensate for any system load in excess of ncpus.
946                  *
947                  * For example, if we have 8 cores and 16 running cpu-bound
948                  * processes then all things being equal each process will
949                  * get 50% of one cpu.  We need to pump this value back
950                  * up to 100% so the estcpu calculation properly adjusts
951                  * the process's dynamic priority.
952                  *
953                  * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
954                  */
955                 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
956                 ucount = dfly_ucount;
957                 if (ucount > ncpus) {
958                         estcpu += estcpu * (ucount - ncpus) / ncpus;
959                 }
960
961                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
962                         kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
963                                 lp->lwp_proc->p_pid, lp,
964                                 estcpu, lp->lwp_estcpu,
965                                 lp->lwp_cpticks, ttlticks);
966                 }
967
968                 /*
969                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
970                  * quickly lwp_estcpu collapses to its realtime calculation.
971                  * A slower collapse gives us a more accurate number over
972                  * the long term but can create problems with bursty threads
973                  * or threads which become cpu hogs.
974                  *
975                  * To solve this problem, newly started lwps and lwps which
976                  * are restarting after having been asleep for a while are
977                  * given a much, much faster decay in order to quickly
978                  * detect whether they become cpu-bound.
979                  *
980                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
981                  *       and not here, but we must still ensure that a
982                  *       cpu-bound nice -20 process does not completely
983                  *       override a cpu-bound nice +20 process.
984                  *
985                  * NOTE: We must use ESTCPULIM() here to deal with any
986                  *       overshoot.
987                  */
988                 decay_factor = usched_dfly_decay;
989                 if (decay_factor < 1)
990                         decay_factor = 1;
991                 if (decay_factor > 1024)
992                         decay_factor = 1024;
993
994                 if (lp->lwp_estfast < usched_dfly_decay) {
995                         ++lp->lwp_estfast;
996                         lp->lwp_estcpu = ESTCPULIM(
997                                 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
998                                 (lp->lwp_estfast + 1));
999                 } else {
1000                         lp->lwp_estcpu = ESTCPULIM(
1001                                 (lp->lwp_estcpu * decay_factor + estcpu) /
1002                                 (decay_factor + 1));
1003                 }
1004
1005                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1006                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
1007                 dfly_resetpriority(lp);
1008                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1009                 lp->lwp_cpticks = 0;
1010         }
1011 }
1012
1013 /*
1014  * Compute the priority of a process when running in user mode.
1015  * Arrange to reschedule if the resulting priority is better
1016  * than that of the current process.
1017  *
1018  * This routine may be called with any process.
1019  *
1020  * This routine is called by fork1() for initial setup with the process
1021  * of the run queue, and also may be called normally with the process on or
1022  * off the run queue.
1023  */
1024 static void
1025 dfly_resetpriority(struct lwp *lp)
1026 {
1027         dfly_pcpu_t rdd;
1028         int newpriority;
1029         u_short newrqtype;
1030         int rcpu;
1031         int checkpri;
1032         int estcpu;
1033         int delta_uload;
1034
1035         crit_enter();
1036
1037         /*
1038          * Lock the scheduler (lp) belongs to.  This can be on a different
1039          * cpu.  Handle races.  This loop breaks out with the appropriate
1040          * rdd locked.
1041          */
1042         for (;;) {
1043                 rcpu = lp->lwp_qcpu;
1044                 cpu_ccfence();
1045                 rdd = &dfly_pcpu[rcpu];
1046                 spin_lock(&rdd->spin);
1047                 if (rcpu == lp->lwp_qcpu)
1048                         break;
1049                 spin_unlock(&rdd->spin);
1050         }
1051
1052         /*
1053          * Calculate the new priority and queue type
1054          */
1055         newrqtype = lp->lwp_rtprio.type;
1056
1057         switch(newrqtype) {
1058         case RTP_PRIO_REALTIME:
1059         case RTP_PRIO_FIFO:
1060                 newpriority = PRIBASE_REALTIME +
1061                              (lp->lwp_rtprio.prio & PRIMASK);
1062                 break;
1063         case RTP_PRIO_NORMAL:
1064                 /*
1065                  *
1066                  */
1067                 estcpu = lp->lwp_estcpu;
1068
1069                 /*
1070                  * p_nice piece         Adds (0-40) * 2         0-80
1071                  * estcpu               Adds 16384  * 4 / 512   0-128
1072                  */
1073                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1074                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1075                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1076                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1077                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1078                 break;
1079         case RTP_PRIO_IDLE:
1080                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1081                 break;
1082         case RTP_PRIO_THREAD:
1083                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1084                 break;
1085         default:
1086                 panic("Bad RTP_PRIO %d", newrqtype);
1087                 /* NOT REACHED */
1088         }
1089
1090         /*
1091          * The LWKT scheduler doesn't dive usched structures, give it a hint
1092          * on the relative priority of user threads running in the kernel.
1093          * The LWKT scheduler will always ensure that a user thread running
1094          * in the kernel will get cpu some time, regardless of its upri,
1095          * but can decide not to instantly switch from one kernel or user
1096          * mode user thread to a kernel-mode user thread when it has a less
1097          * desireable user priority.
1098          *
1099          * td_upri has normal sense (higher values are more desireable), so
1100          * negate it.
1101          */
1102         lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1103
1104         /*
1105          * The newpriority incorporates the queue type so do a simple masked
1106          * check to determine if the process has moved to another queue.  If
1107          * it has, and it is currently on a run queue, then move it.
1108          *
1109          * Since uload is ~PPQMASK masked, no modifications are necessary if
1110          * we end up in the same run queue.
1111          */
1112         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1113                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1114                         dfly_remrunqueue_locked(rdd, lp);
1115                         lp->lwp_priority = newpriority;
1116                         lp->lwp_rqtype = newrqtype;
1117                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1118                         dfly_setrunqueue_locked(rdd, lp);
1119                         checkpri = 1;
1120                 } else {
1121                         lp->lwp_priority = newpriority;
1122                         lp->lwp_rqtype = newrqtype;
1123                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1124                         checkpri = 0;
1125                 }
1126         } else {
1127                 /*
1128                  * In the same PPQ, uload cannot change.
1129                  */
1130                 lp->lwp_priority = newpriority;
1131                 checkpri = 1;
1132                 rcpu = -1;
1133         }
1134
1135         /*
1136          * Adjust effective load.
1137          *
1138          * Calculate load then scale up or down geometrically based on p_nice.
1139          * Processes niced up (positive) are less important, and processes
1140          * niced downard (negative) are more important.  The higher the uload,
1141          * the more important the thread.
1142          */
1143         /* 0-511, 0-100% cpu */
1144         delta_uload = lp->lwp_estcpu / NQS;
1145         delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1146
1147
1148         delta_uload -= lp->lwp_uload;
1149         lp->lwp_uload += delta_uload;
1150         if (lp->lwp_mpflags & LWP_MP_ULOAD)
1151                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1152
1153         /*
1154          * Determine if we need to reschedule the target cpu.  This only
1155          * occurs if the LWP is already on a scheduler queue, which means
1156          * that idle cpu notification has already occured.  At most we
1157          * need only issue a need_user_resched() on the appropriate cpu.
1158          *
1159          * The LWP may be owned by a CPU different from the current one,
1160          * in which case dd->uschedcp may be modified without an MP lock
1161          * or a spinlock held.  The worst that happens is that the code
1162          * below causes a spurious need_user_resched() on the target CPU
1163          * and dd->pri to be wrong for a short period of time, both of
1164          * which are harmless.
1165          *
1166          * If checkpri is 0 we are adjusting the priority of the current
1167          * process, possibly higher (less desireable), so ignore the upri
1168          * check which will fail in that case.
1169          */
1170         if (rcpu >= 0) {
1171                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1172                     (checkpri == 0 ||
1173                      (rdd->upri & ~PRIMASK) >
1174                      (lp->lwp_priority & ~PRIMASK))) {
1175 #ifdef SMP
1176                         if (rcpu == mycpu->gd_cpuid) {
1177                                 spin_unlock(&rdd->spin);
1178                                 need_user_resched();
1179                         } else {
1180                                 spin_unlock(&rdd->spin);
1181                                 lwkt_send_ipiq(globaldata_find(rcpu),
1182                                                dfly_need_user_resched_remote,
1183                                                NULL);
1184                         }
1185 #else
1186                         spin_unlock(&rdd->spin);
1187                         need_user_resched();
1188 #endif
1189                 } else {
1190                         spin_unlock(&rdd->spin);
1191                 }
1192         } else {
1193                 spin_unlock(&rdd->spin);
1194         }
1195         crit_exit();
1196 }
1197
1198 static
1199 void
1200 dfly_yield(struct lwp *lp)
1201 {
1202 #if 0
1203         /* FUTURE (or something similar) */
1204         switch(lp->lwp_rqtype) {
1205         case RTP_PRIO_NORMAL:
1206                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1207                 break;
1208         default:
1209                 break;
1210         }
1211 #endif
1212         need_user_resched();
1213 }
1214
1215 /*
1216  * Called from fork1() when a new child process is being created.
1217  *
1218  * Give the child process an initial estcpu that is more batch then
1219  * its parent and dock the parent for the fork (but do not
1220  * reschedule the parent).
1221  *
1222  * fast
1223  *
1224  * XXX lwp should be "spawning" instead of "forking"
1225  */
1226 static void
1227 dfly_forking(struct lwp *plp, struct lwp *lp)
1228 {
1229         /*
1230          * Put the child 4 queue slots (out of 32) higher than the parent
1231          * (less desireable than the parent).
1232          */
1233         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1234         lp->lwp_forked = 1;
1235         lp->lwp_estfast = 0;
1236
1237         /*
1238          * Dock the parent a cost for the fork, protecting us from fork
1239          * bombs.  If the parent is forking quickly make the child more
1240          * batchy.
1241          */
1242         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1243 }
1244
1245 /*
1246  * Called when a lwp is being removed from this scheduler, typically
1247  * during lwp_exit().  We have to clean out any ULOAD accounting before
1248  * we can let the lp go.  The dd->spin lock is not needed for uload
1249  * updates.
1250  *
1251  * Scheduler dequeueing has already occurred, no further action in that
1252  * regard is needed.
1253  */
1254 static void
1255 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1256 {
1257         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1258
1259         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1260                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1261                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1262                 atomic_add_int(&dd->ucount, -1);
1263                 atomic_add_int(&dfly_ucount, -1);
1264         }
1265 }
1266
1267 /*
1268  * This function cannot block in any way, but spinlocks are ok.
1269  *
1270  * Update the uload based on the state of the thread (whether it is going
1271  * to sleep or running again).  The uload is meant to be a longer-term
1272  * load and not an instantanious load.
1273  */
1274 static void
1275 dfly_uload_update(struct lwp *lp)
1276 {
1277         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1278
1279         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1280                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1281                         spin_lock(&dd->spin);
1282                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1283                                 atomic_set_int(&lp->lwp_mpflags,
1284                                                LWP_MP_ULOAD);
1285                                 atomic_add_int(&dd->uload, lp->lwp_uload);
1286                                 atomic_add_int(&dd->ucount, 1);
1287                                 atomic_add_int(&dfly_ucount, 1);
1288                         }
1289                         spin_unlock(&dd->spin);
1290                 }
1291         } else if (lp->lwp_slptime > 0) {
1292                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1293                         spin_lock(&dd->spin);
1294                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1295                                 atomic_clear_int(&lp->lwp_mpflags,
1296                                                  LWP_MP_ULOAD);
1297                                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1298                                 atomic_add_int(&dd->ucount, -1);
1299                                 atomic_add_int(&dfly_ucount, -1);
1300                         }
1301                         spin_unlock(&dd->spin);
1302                 }
1303         }
1304 }
1305
1306 /*
1307  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1308  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1309  * has a better or equal priority then the process that would otherwise be
1310  * chosen, NULL is returned.
1311  *
1312  * Until we fix the RUNQ code the chklp test has to be strict or we may
1313  * bounce between processes trying to acquire the current process designation.
1314  *
1315  * Must be called with rdd->spin locked.  The spinlock is left intact through
1316  * the entire routine.  dd->spin does not have to be locked.
1317  *
1318  * If worst is non-zero this function finds the worst thread instead of the
1319  * best thread (used by the schedulerclock-based rover).
1320  */
1321 static
1322 struct lwp *
1323 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1324                        struct lwp *chklp, int worst)
1325 {
1326         struct lwp *lp;
1327         struct rq *q;
1328         u_int32_t *which, *which2;
1329         u_int32_t pri;
1330         u_int32_t rtqbits;
1331         u_int32_t tsqbits;
1332         u_int32_t idqbits;
1333
1334         rtqbits = rdd->rtqueuebits;
1335         tsqbits = rdd->queuebits;
1336         idqbits = rdd->idqueuebits;
1337
1338         if (worst) {
1339                 if (idqbits) {
1340                         pri = bsrl(idqbits);
1341                         q = &rdd->idqueues[pri];
1342                         which = &rdd->idqueuebits;
1343                         which2 = &idqbits;
1344                 } else if (tsqbits) {
1345                         pri = bsrl(tsqbits);
1346                         q = &rdd->queues[pri];
1347                         which = &rdd->queuebits;
1348                         which2 = &tsqbits;
1349                 } else if (rtqbits) {
1350                         pri = bsrl(rtqbits);
1351                         q = &rdd->rtqueues[pri];
1352                         which = &rdd->rtqueuebits;
1353                         which2 = &rtqbits;
1354                 } else {
1355                         return (NULL);
1356                 }
1357                 lp = TAILQ_LAST(q, rq);
1358         } else {
1359                 if (rtqbits) {
1360                         pri = bsfl(rtqbits);
1361                         q = &rdd->rtqueues[pri];
1362                         which = &rdd->rtqueuebits;
1363                         which2 = &rtqbits;
1364                 } else if (tsqbits) {
1365                         pri = bsfl(tsqbits);
1366                         q = &rdd->queues[pri];
1367                         which = &rdd->queuebits;
1368                         which2 = &tsqbits;
1369                 } else if (idqbits) {
1370                         pri = bsfl(idqbits);
1371                         q = &rdd->idqueues[pri];
1372                         which = &rdd->idqueuebits;
1373                         which2 = &idqbits;
1374                 } else {
1375                         return (NULL);
1376                 }
1377                 lp = TAILQ_FIRST(q);
1378         }
1379         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1380
1381         /*
1382          * If the passed lwp <chklp> is reasonably close to the selected
1383          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1384          *
1385          * Note that we must error on the side of <chklp> to avoid bouncing
1386          * between threads in the acquire code.
1387          */
1388         if (chklp) {
1389                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1390                         return(NULL);
1391         }
1392
1393         KTR_COND_LOG(usched_chooseproc,
1394             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1395             lp->lwp_proc->p_pid,
1396             lp->lwp_thread->td_gd->gd_cpuid,
1397             mycpu->gd_cpuid);
1398
1399         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1400         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1401         TAILQ_REMOVE(q, lp, lwp_procq);
1402         --rdd->runqcount;
1403         if (TAILQ_EMPTY(q))
1404                 *which &= ~(1 << pri);
1405
1406         /*
1407          * If we are choosing a process from rdd with the intent to
1408          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1409          * is still held.
1410          */
1411         if (rdd != dd) {
1412                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1413                         atomic_add_int(&rdd->uload, -lp->lwp_uload);
1414                         atomic_add_int(&rdd->ucount, -1);
1415                         atomic_add_int(&dfly_ucount, -1);
1416                 }
1417                 lp->lwp_qcpu = dd->cpuid;
1418                 atomic_add_int(&dd->uload, lp->lwp_uload);
1419                 atomic_add_int(&dd->ucount, 1);
1420                 atomic_add_int(&dfly_ucount, 1);
1421                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1422         }
1423         return lp;
1424 }
1425
1426 #ifdef SMP
1427
1428 /*
1429  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1430  *
1431  * Choose a cpu node to schedule lp on, hopefully nearby its current
1432  * node.
1433  *
1434  * We give the current node a modest advantage for obvious reasons.
1435  *
1436  * We also give the node the thread was woken up FROM a slight advantage
1437  * in order to try to schedule paired threads which synchronize/block waiting
1438  * for each other fairly close to each other.  Similarly in a network setting
1439  * this feature will also attempt to place a user process near the kernel
1440  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1441  * algorithm as it heuristically groups synchronizing processes for locality
1442  * of reference in multi-socket systems.
1443  *
1444  * We check against running processes and give a big advantage if there
1445  * are none running.
1446  *
1447  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1448  *
1449  * When the topology is known choose a cpu whos group has, in aggregate,
1450  * has the lowest weighted load.
1451  */
1452 static
1453 dfly_pcpu_t
1454 dfly_choose_best_queue(struct lwp *lp)
1455 {
1456         cpumask_t wakemask;
1457         cpumask_t mask;
1458         cpu_node_t *cpup;
1459         cpu_node_t *cpun;
1460         cpu_node_t *cpub;
1461         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1462         dfly_pcpu_t rdd;
1463         int wakecpu;
1464         int cpuid;
1465         int n;
1466         int count;
1467         int load;
1468         int lowest_load;
1469
1470         /*
1471          * When the topology is unknown choose a random cpu that is hopefully
1472          * idle.
1473          */
1474         if (dd->cpunode == NULL)
1475                 return (dfly_choose_queue_simple(dd, lp));
1476
1477         /*
1478          * Pairing mask
1479          */
1480         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1481                 wakemask = dfly_pcpu[wakecpu].cpumask;
1482         else
1483                 wakemask = 0;
1484
1485         /*
1486          * When the topology is known choose a cpu whos group has, in
1487          * aggregate, has the lowest weighted load.
1488          */
1489         cpup = root_cpu_node;
1490         rdd = dd;
1491
1492         while (cpup) {
1493                 /*
1494                  * Degenerate case super-root
1495                  */
1496                 if (cpup->child_node && cpup->child_no == 1) {
1497                         cpup = cpup->child_node;
1498                         continue;
1499                 }
1500
1501                 /*
1502                  * Terminal cpunode
1503                  */
1504                 if (cpup->child_node == NULL) {
1505                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1506                         break;
1507                 }
1508
1509                 cpub = NULL;
1510                 lowest_load = 0x7FFFFFFF;
1511
1512                 for (n = 0; n < cpup->child_no; ++n) {
1513                         /*
1514                          * Accumulate load information for all cpus
1515                          * which are members of this node.
1516                          */
1517                         cpun = &cpup->child_node[n];
1518                         mask = cpun->members & usched_global_cpumask &
1519                                smp_active_mask & lp->lwp_cpumask;
1520                         if (mask == 0)
1521                                 continue;
1522
1523                         count = 0;
1524                         load = 0;
1525
1526                         while (mask) {
1527                                 cpuid = BSFCPUMASK(mask);
1528                                 rdd = &dfly_pcpu[cpuid];
1529                                 load += rdd->uload;
1530                                 load += rdd->ucount * usched_dfly_weight3;
1531
1532                                 if (rdd->uschedcp == NULL &&
1533                                     rdd->runqcount == 0 &&
1534                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1535                                 ) {
1536                                         load -= usched_dfly_weight4;
1537                                 }
1538 #if 0
1539                                 else if (rdd->upri > lp->lwp_priority + PPQ) {
1540                                         load -= usched_dfly_weight4 / 2;
1541                                 }
1542 #endif
1543                                 mask &= ~CPUMASK(cpuid);
1544                                 ++count;
1545                         }
1546
1547                         /*
1548                          * Compensate if the lp is already accounted for in
1549                          * the aggregate uload for this mask set.  We want
1550                          * to calculate the loads as if lp were not present,
1551                          * otherwise the calculation is bogus.
1552                          */
1553                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1554                             (dd->cpumask & cpun->members)) {
1555                                 load -= lp->lwp_uload;
1556                                 load -= usched_dfly_weight3;
1557                         }
1558
1559                         load /= count;
1560
1561                         /*
1562                          * Advantage the cpu group (lp) is already on.
1563                          */
1564                         if (cpun->members & dd->cpumask)
1565                                 load -= usched_dfly_weight1;
1566
1567                         /*
1568                          * Advantage the cpu group we want to pair (lp) to,
1569                          * but don't let it go to the exact same cpu as
1570                          * the wakecpu target.
1571                          *
1572                          * We do this by checking whether cpun is a
1573                          * terminal node or not.  All cpun's at the same
1574                          * level will either all be terminal or all not
1575                          * terminal.
1576                          *
1577                          * If it is and we match we disadvantage the load.
1578                          * If it is and we don't match we advantage the load.
1579                          *
1580                          * Also note that we are effectively disadvantaging
1581                          * all-but-one by the same amount, so it won't effect
1582                          * the weight1 factor for the all-but-one nodes.
1583                          */
1584                         if (cpun->members & wakemask) {
1585                                 if (cpun->child_node != NULL) {
1586                                         /* advantage */
1587                                         load -= usched_dfly_weight2;
1588                                 } else {
1589                                         if (usched_dfly_features & 0x10)
1590                                                 load += usched_dfly_weight2;
1591                                         else
1592                                                 load -= usched_dfly_weight2;
1593                                 }
1594                         }
1595
1596                         /*
1597                          * Calculate the best load
1598                          */
1599                         if (cpub == NULL || lowest_load > load ||
1600                             (lowest_load == load &&
1601                              (cpun->members & dd->cpumask))
1602                         ) {
1603                                 lowest_load = load;
1604                                 cpub = cpun;
1605                         }
1606                 }
1607                 cpup = cpub;
1608         }
1609         if (usched_dfly_chooser)
1610                 kprintf("lp %02d->%02d %s\n",
1611                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1612         return (rdd);
1613 }
1614
1615 /*
1616  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1617  *
1618  * Choose the worst queue close to dd's cpu node with a non-empty runq
1619  * that is NOT dd.  Also require that the moving of the highest-load thread
1620  * from rdd to dd does not cause the uload's to cross each other.
1621  *
1622  * This is used by the thread chooser when the current cpu's queues are
1623  * empty to steal a thread from another cpu's queue.  We want to offload
1624  * the most heavily-loaded queue.
1625  */
1626 static
1627 dfly_pcpu_t
1628 dfly_choose_worst_queue(dfly_pcpu_t dd)
1629 {
1630         cpumask_t mask;
1631         cpu_node_t *cpup;
1632         cpu_node_t *cpun;
1633         cpu_node_t *cpub;
1634         dfly_pcpu_t rdd;
1635         int cpuid;
1636         int n;
1637         int count;
1638         int load;
1639 #if 0
1640         int pri;
1641         int hpri;
1642 #endif
1643         int highest_load;
1644
1645         /*
1646          * When the topology is unknown choose a random cpu that is hopefully
1647          * idle.
1648          */
1649         if (dd->cpunode == NULL) {
1650                 return (NULL);
1651         }
1652
1653         /*
1654          * When the topology is known choose a cpu whos group has, in
1655          * aggregate, has the lowest weighted load.
1656          */
1657         cpup = root_cpu_node;
1658         rdd = dd;
1659         while (cpup) {
1660                 /*
1661                  * Degenerate case super-root
1662                  */
1663                 if (cpup->child_node && cpup->child_no == 1) {
1664                         cpup = cpup->child_node;
1665                         continue;
1666                 }
1667
1668                 /*
1669                  * Terminal cpunode
1670                  */
1671                 if (cpup->child_node == NULL) {
1672                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1673                         break;
1674                 }
1675
1676                 cpub = NULL;
1677                 highest_load = 0;
1678
1679                 for (n = 0; n < cpup->child_no; ++n) {
1680                         /*
1681                          * Accumulate load information for all cpus
1682                          * which are members of this node.
1683                          */
1684                         cpun = &cpup->child_node[n];
1685                         mask = cpun->members & usched_global_cpumask &
1686                                smp_active_mask;
1687                         if (mask == 0)
1688                                 continue;
1689                         count = 0;
1690                         load = 0;
1691
1692                         while (mask) {
1693                                 cpuid = BSFCPUMASK(mask);
1694                                 rdd = &dfly_pcpu[cpuid];
1695                                 load += rdd->uload;
1696                                 load += rdd->ucount * usched_dfly_weight3;
1697                                 if (rdd->uschedcp == NULL &&
1698                                     rdd->runqcount == 0 &&
1699                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1700                                 ) {
1701                                         load -= usched_dfly_weight4;
1702                                 }
1703 #if 0
1704                                 else if (rdd->upri > dd->upri + PPQ) {
1705                                         load -= usched_dfly_weight4 / 2;
1706                                 }
1707 #endif
1708                                 mask &= ~CPUMASK(cpuid);
1709                                 ++count;
1710                         }
1711                         load /= count;
1712
1713                         /*
1714                          * Prefer candidates which are somewhat closer to
1715                          * our cpu.
1716                          */
1717                         if (dd->cpumask & cpun->members)
1718                                 load += usched_dfly_weight1;
1719
1720                         /*
1721                          * The best candidate is the one with the worst
1722                          * (highest) load.
1723                          */
1724                         if (cpub == NULL || highest_load < load) {
1725                                 highest_load = load;
1726                                 cpub = cpun;
1727                         }
1728                 }
1729                 cpup = cpub;
1730         }
1731
1732         /*
1733          * We never return our own node (dd), and only return a remote
1734          * node if it's load is significantly worse than ours (i.e. where
1735          * stealing a thread would be considered reasonable).
1736          *
1737          * This also helps us avoid breaking paired threads apart which
1738          * can have disastrous effects on performance.
1739          */
1740         if (rdd == dd)
1741                 return(NULL);
1742
1743 #if 0
1744         hpri = 0;
1745         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1746                 hpri = pri;
1747         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1748                 hpri = pri;
1749         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1750                 hpri = pri;
1751         hpri *= PPQ;
1752         if (rdd->uload - hpri < dd->uload + hpri)
1753                 return(NULL);
1754 #endif
1755         return (rdd);
1756 }
1757
1758 static
1759 dfly_pcpu_t
1760 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1761 {
1762         dfly_pcpu_t rdd;
1763         cpumask_t tmpmask;
1764         cpumask_t mask;
1765         int cpuid;
1766
1767         /*
1768          * Fallback to the original heuristic, select random cpu,
1769          * first checking cpus not currently running a user thread.
1770          */
1771         ++dfly_scancpu;
1772         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1773         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1774                smp_active_mask & usched_global_cpumask;
1775
1776         while (mask) {
1777                 tmpmask = ~(CPUMASK(cpuid) - 1);
1778                 if (mask & tmpmask)
1779                         cpuid = BSFCPUMASK(mask & tmpmask);
1780                 else
1781                         cpuid = BSFCPUMASK(mask);
1782                 rdd = &dfly_pcpu[cpuid];
1783
1784                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1785                         goto found;
1786                 mask &= ~CPUMASK(cpuid);
1787         }
1788
1789         /*
1790          * Then cpus which might have a currently running lp
1791          */
1792         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1793         mask = dfly_curprocmask & dfly_rdyprocmask &
1794                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1795
1796         while (mask) {
1797                 tmpmask = ~(CPUMASK(cpuid) - 1);
1798                 if (mask & tmpmask)
1799                         cpuid = BSFCPUMASK(mask & tmpmask);
1800                 else
1801                         cpuid = BSFCPUMASK(mask);
1802                 rdd = &dfly_pcpu[cpuid];
1803
1804                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1805                         goto found;
1806                 mask &= ~CPUMASK(cpuid);
1807         }
1808
1809         /*
1810          * If we cannot find a suitable cpu we reload from dfly_scancpu
1811          * and round-robin.  Other cpus will pickup as they release their
1812          * current lwps or become ready.
1813          *
1814          * Avoid a degenerate system lockup case if usched_global_cpumask
1815          * is set to 0 or otherwise does not cover lwp_cpumask.
1816          *
1817          * We only kick the target helper thread in this case, we do not
1818          * set the user resched flag because
1819          */
1820         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1821         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1822                 cpuid = 0;
1823         rdd = &dfly_pcpu[cpuid];
1824 found:
1825         return (rdd);
1826 }
1827
1828 static
1829 void
1830 dfly_need_user_resched_remote(void *dummy)
1831 {
1832         globaldata_t gd = mycpu;
1833         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1834
1835         /*
1836          * Flag reschedule needed
1837          */
1838         need_user_resched();
1839
1840         /*
1841          * If no user thread is currently running we need to kick the helper
1842          * on our cpu to recover.  Otherwise the cpu will never schedule
1843          * anything again.
1844          *
1845          * We cannot schedule the process ourselves because this is an
1846          * IPI callback and we cannot acquire spinlocks in an IPI callback.
1847          *
1848          * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1849          */
1850         if (dd->uschedcp == NULL && (dfly_rdyprocmask & gd->gd_cpumask)) {
1851                 atomic_clear_cpumask(&dfly_rdyprocmask, gd->gd_cpumask);
1852                 wakeup_mycpu(&dd->helper_thread);
1853         }
1854 }
1855
1856 #endif
1857
1858 /*
1859  * dfly_remrunqueue_locked() removes a given process from the run queue
1860  * that it is on, clearing the queue busy bit if it becomes empty.
1861  *
1862  * Note that user process scheduler is different from the LWKT schedule.
1863  * The user process scheduler only manages user processes but it uses LWKT
1864  * underneath, and a user process operating in the kernel will often be
1865  * 'released' from our management.
1866  *
1867  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1868  * to sleep or the lwp is moved to a different runq.
1869  */
1870 static void
1871 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1872 {
1873         struct rq *q;
1874         u_int32_t *which;
1875         u_int8_t pri;
1876
1877         KKASSERT(rdd->runqcount >= 0);
1878
1879         pri = lp->lwp_rqindex;
1880
1881         switch(lp->lwp_rqtype) {
1882         case RTP_PRIO_NORMAL:
1883                 q = &rdd->queues[pri];
1884                 which = &rdd->queuebits;
1885                 break;
1886         case RTP_PRIO_REALTIME:
1887         case RTP_PRIO_FIFO:
1888                 q = &rdd->rtqueues[pri];
1889                 which = &rdd->rtqueuebits;
1890                 break;
1891         case RTP_PRIO_IDLE:
1892                 q = &rdd->idqueues[pri];
1893                 which = &rdd->idqueuebits;
1894                 break;
1895         default:
1896                 panic("remrunqueue: invalid rtprio type");
1897                 /* NOT REACHED */
1898         }
1899         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1900         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1901         TAILQ_REMOVE(q, lp, lwp_procq);
1902         --rdd->runqcount;
1903         if (TAILQ_EMPTY(q)) {
1904                 KASSERT((*which & (1 << pri)) != 0,
1905                         ("remrunqueue: remove from empty queue"));
1906                 *which &= ~(1 << pri);
1907         }
1908 }
1909
1910 /*
1911  * dfly_setrunqueue_locked()
1912  *
1913  * Add a process whos rqtype and rqindex had previously been calculated
1914  * onto the appropriate run queue.   Determine if the addition requires
1915  * a reschedule on a cpu and return the cpuid or -1.
1916  *
1917  * NOTE:          Lower priorities are better priorities.
1918  *
1919  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1920  *                sum of the rough lwp_priority for all running and runnable
1921  *                processes.  Lower priority processes (higher lwp_priority
1922  *                values) actually DO count as more load, not less, because
1923  *                these are the programs which require the most care with
1924  *                regards to cpu selection.
1925  */
1926 static void
1927 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1928 {
1929         struct rq *q;
1930         u_int32_t *which;
1931         int pri;
1932
1933         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1934
1935         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1936                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1937                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1938                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1939                 atomic_add_int(&dfly_ucount, 1);
1940         }
1941
1942         pri = lp->lwp_rqindex;
1943
1944         switch(lp->lwp_rqtype) {
1945         case RTP_PRIO_NORMAL:
1946                 q = &rdd->queues[pri];
1947                 which = &rdd->queuebits;
1948                 break;
1949         case RTP_PRIO_REALTIME:
1950         case RTP_PRIO_FIFO:
1951                 q = &rdd->rtqueues[pri];
1952                 which = &rdd->rtqueuebits;
1953                 break;
1954         case RTP_PRIO_IDLE:
1955                 q = &rdd->idqueues[pri];
1956                 which = &rdd->idqueuebits;
1957                 break;
1958         default:
1959                 panic("remrunqueue: invalid rtprio type");
1960                 /* NOT REACHED */
1961         }
1962
1963         /*
1964          * Place us on the selected queue.  Determine if we should be
1965          * placed at the head of the queue or at the end.
1966          *
1967          * We are placed at the tail if our round-robin count has expired,
1968          * or is about to expire and the system thinks its a good place to
1969          * round-robin, or there is already a next thread on the queue
1970          * (it might be trying to pick up where it left off and we don't
1971          * want to interfere).
1972          */
1973         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1974         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1975         ++rdd->runqcount;
1976
1977         if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
1978             (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
1979              (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC)) ||
1980             !TAILQ_EMPTY(q)
1981         ) {
1982                 atomic_clear_int(&lp->lwp_thread->td_mpflags,
1983                                  TDF_MP_BATCH_DEMARC);
1984                 lp->lwp_rrcount = 0;
1985                 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1986         } else {
1987                 if (TAILQ_EMPTY(q))
1988                         lp->lwp_rrcount = 0;
1989                 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
1990         }
1991         *which |= 1 << pri;
1992 }
1993
1994 #ifdef SMP
1995
1996 /*
1997  * For SMP systems a user scheduler helper thread is created for each
1998  * cpu and is used to allow one cpu to wakeup another for the purposes of
1999  * scheduling userland threads from setrunqueue().
2000  *
2001  * UP systems do not need the helper since there is only one cpu.
2002  *
2003  * We can't use the idle thread for this because we might block.
2004  * Additionally, doing things this way allows us to HLT idle cpus
2005  * on MP systems.
2006  */
2007 static void
2008 dfly_helper_thread(void *dummy)
2009 {
2010     globaldata_t gd;
2011     dfly_pcpu_t dd;
2012     dfly_pcpu_t rdd;
2013     struct lwp *nlp;
2014     cpumask_t mask;
2015     int cpuid;
2016
2017     gd = mycpu;
2018     cpuid = gd->gd_cpuid;       /* doesn't change */
2019     mask = gd->gd_cpumask;      /* doesn't change */
2020     dd = &dfly_pcpu[cpuid];
2021
2022     /*
2023      * Since we only want to be woken up only when no user processes
2024      * are scheduled on a cpu, run at an ultra low priority.
2025      */
2026     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2027
2028     tsleep(&dd->helper_thread, 0, "schslp", 0);
2029
2030     for (;;) {
2031         /*
2032          * We use the LWKT deschedule-interlock trick to avoid racing
2033          * dfly_rdyprocmask.  This means we cannot block through to the
2034          * manual lwkt_switch() call we make below.
2035          */
2036         crit_enter_gd(gd);
2037         tsleep_interlock(&dd->helper_thread, 0);
2038
2039         spin_lock(&dd->spin);
2040
2041         atomic_set_cpumask(&dfly_rdyprocmask, mask);
2042         clear_user_resched();   /* This satisfied the reschedule request */
2043 #if 0
2044         dd->rrcount = 0;        /* Reset the round-robin counter */
2045 #endif
2046
2047         if (dd->runqcount || dd->uschedcp != NULL) {
2048                 /*
2049                  * Threads are available.  A thread may or may not be
2050                  * currently scheduled.  Get the best thread already queued
2051                  * to this cpu.
2052                  */
2053                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2054                 if (nlp) {
2055                         atomic_set_cpumask(&dfly_curprocmask, mask);
2056                         dd->upri = nlp->lwp_priority;
2057                         dd->uschedcp = nlp;
2058 #if 0
2059                         dd->rrcount = 0;        /* reset round robin */
2060 #endif
2061                         spin_unlock(&dd->spin);
2062                         lwkt_acquire(nlp->lwp_thread);
2063                         lwkt_schedule(nlp->lwp_thread);
2064                 } else {
2065                         /*
2066                          * This situation should not occur because we had
2067                          * at least one thread available.
2068                          */
2069                         spin_unlock(&dd->spin);
2070                 }
2071         } else if (usched_dfly_features & 0x01) {
2072                 /*
2073                  * This cpu is devoid of runnable threads, steal a thread
2074                  * from another cpu.  Since we're stealing, might as well
2075                  * load balance at the same time.
2076                  *
2077                  * We choose the highest-loaded thread from the worst queue.
2078                  *
2079                  * NOTE! This function only returns a non-NULL rdd when
2080                  *       another cpu's queue is obviously overloaded.  We
2081                  *       do not want to perform the type of rebalancing
2082                  *       the schedclock does here because it would result
2083                  *       in insane process pulling when 'steady' state is
2084                  *       partially unbalanced (e.g. 6 runnables and only
2085                  *       4 cores).
2086                  */
2087                 rdd = dfly_choose_worst_queue(dd);
2088                 if (rdd && spin_trylock(&rdd->spin)) {
2089                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2090                         spin_unlock(&rdd->spin);
2091                 } else {
2092                         nlp = NULL;
2093                 }
2094                 if (nlp) {
2095                         atomic_set_cpumask(&dfly_curprocmask, mask);
2096                         dd->upri = nlp->lwp_priority;
2097                         dd->uschedcp = nlp;
2098 #if 0
2099                         dd->rrcount = 0;        /* reset round robin */
2100 #endif
2101                         spin_unlock(&dd->spin);
2102                         lwkt_acquire(nlp->lwp_thread);
2103                         lwkt_schedule(nlp->lwp_thread);
2104                 } else {
2105                         /*
2106                          * Leave the thread on our run queue.  Another
2107                          * scheduler will try to pull it later.
2108                          */
2109                         spin_unlock(&dd->spin);
2110                 }
2111         } else {
2112                 /*
2113                  * devoid of runnable threads and not allowed to steal
2114                  * any.
2115                  */
2116                 spin_unlock(&dd->spin);
2117         }
2118
2119         /*
2120          * We're descheduled unless someone scheduled us.  Switch away.
2121          * Exiting the critical section will cause splz() to be called
2122          * for us if interrupts and such are pending.
2123          */
2124         crit_exit_gd(gd);
2125         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2126     }
2127 }
2128
2129 #if 0
2130 static int
2131 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2132 {
2133         int error, new_val;
2134
2135         new_val = usched_dfly_stick_to_level;
2136
2137         error = sysctl_handle_int(oidp, &new_val, 0, req);
2138         if (error != 0 || req->newptr == NULL)
2139                 return (error);
2140         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2141                 return (EINVAL);
2142         usched_dfly_stick_to_level = new_val;
2143         return (0);
2144 }
2145 #endif
2146
2147 #endif
2148
2149 /*
2150  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2151  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2152  * we should not mess with it further.
2153  */
2154 static void
2155 usched_dfly_cpu_init(void)
2156 {
2157         int i;
2158         int j;
2159 #ifdef SMP
2160         int cpuid;
2161         int smt_not_supported = 0;
2162         int cache_coherent_not_supported = 0;
2163 #endif
2164
2165         if (bootverbose)
2166                 kprintf("Start scheduler helpers on cpus:\n");
2167
2168         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2169         usched_dfly_sysctl_tree =
2170                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2171                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2172                                 "usched_dfly", CTLFLAG_RD, 0, "");
2173
2174         for (i = 0; i < ncpus; ++i) {
2175                 dfly_pcpu_t dd = &dfly_pcpu[i];
2176                 cpumask_t mask = CPUMASK(i);
2177
2178                 if ((mask & smp_active_mask) == 0)
2179                     continue;
2180
2181                 spin_init(&dd->spin);
2182 #ifdef SMP
2183                 dd->cpunode = get_cpu_node_by_cpuid(i);
2184 #endif
2185                 dd->cpuid = i;
2186                 dd->cpumask = CPUMASK(i);
2187                 for (j = 0; j < NQS; j++) {
2188                         TAILQ_INIT(&dd->queues[j]);
2189                         TAILQ_INIT(&dd->rtqueues[j]);
2190                         TAILQ_INIT(&dd->idqueues[j]);
2191                 }
2192                 atomic_clear_cpumask(&dfly_curprocmask, 1);
2193
2194 #ifdef SMP
2195                 if (dd->cpunode == NULL) {
2196                         smt_not_supported = 1;
2197                         cache_coherent_not_supported = 1;
2198                         if (bootverbose)
2199                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2200                                          "found for cpu\n", i);
2201                 } else {
2202                         switch (dd->cpunode->type) {
2203                         case THREAD_LEVEL:
2204                                 if (bootverbose)
2205                                         kprintf ("\tcpu%d - HyperThreading "
2206                                                  "available. Core siblings: ",
2207                                                  i);
2208                                 break;
2209                         case CORE_LEVEL:
2210                                 smt_not_supported = 1;
2211
2212                                 if (bootverbose)
2213                                         kprintf ("\tcpu%d - No HT available, "
2214                                                  "multi-core/physical "
2215                                                  "cpu. Physical siblings: ",
2216                                                  i);
2217                                 break;
2218                         case CHIP_LEVEL:
2219                                 smt_not_supported = 1;
2220
2221                                 if (bootverbose)
2222                                         kprintf ("\tcpu%d - No HT available, "
2223                                                  "single-core/physical cpu. "
2224                                                  "Package Siblings: ",
2225                                                  i);
2226                                 break;
2227                         default:
2228                                 /* Let's go for safe defaults here */
2229                                 smt_not_supported = 1;
2230                                 cache_coherent_not_supported = 1;
2231                                 if (bootverbose)
2232                                         kprintf ("\tcpu%d - Unknown cpunode->"
2233                                                  "type=%u. Siblings: ",
2234                                                  i,
2235                                                  (u_int)dd->cpunode->type);
2236                                 break;
2237                         }
2238
2239                         if (bootverbose) {
2240                                 if (dd->cpunode->parent_node != NULL) {
2241                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2242                                                 kprintf("cpu%d ", cpuid);
2243                                         kprintf("\n");
2244                                 } else {
2245                                         kprintf(" no siblings\n");
2246                                 }
2247                         }
2248                 }
2249
2250                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2251                             0, i, "usched %d", i);
2252 #endif
2253
2254                 /*
2255                  * Allow user scheduling on the target cpu.  cpu #0 has already
2256                  * been enabled in rqinit().
2257                  */
2258                 if (i)
2259                     atomic_clear_cpumask(&dfly_curprocmask, mask);
2260                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2261                 dd->upri = PRIBASE_NULL;
2262
2263         }
2264
2265         /* usched_dfly sysctl configurable parameters */
2266
2267         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2268                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2269                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2270                        &usched_dfly_rrinterval, 0, "");
2271         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2272                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2273                        OID_AUTO, "decay", CTLFLAG_RW,
2274                        &usched_dfly_decay, 0, "Extra decay when not running");
2275
2276 #ifdef SMP
2277         /* Add enable/disable option for SMT scheduling if supported */
2278         if (smt_not_supported) {
2279                 usched_dfly_smt = 0;
2280                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2281                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2282                                   OID_AUTO, "smt", CTLFLAG_RD,
2283                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2284         } else {
2285                 usched_dfly_smt = 1;
2286                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2287                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2288                                OID_AUTO, "smt", CTLFLAG_RW,
2289                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2290         }
2291
2292         /*
2293          * Add enable/disable option for cache coherent scheduling
2294          * if supported
2295          */
2296         if (cache_coherent_not_supported) {
2297                 usched_dfly_cache_coherent = 0;
2298                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2299                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2300                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2301                                   "NOT SUPPORTED", 0,
2302                                   "Cache coherence NOT SUPPORTED");
2303         } else {
2304                 usched_dfly_cache_coherent = 1;
2305                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2306                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2307                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2308                                &usched_dfly_cache_coherent, 0,
2309                                "Enable/Disable cache coherent scheduling");
2310
2311                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2312                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2313                                OID_AUTO, "weight1", CTLFLAG_RW,
2314                                &usched_dfly_weight1, 200,
2315                                "Weight selection for current cpu");
2316
2317                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2318                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2319                                OID_AUTO, "weight2", CTLFLAG_RW,
2320                                &usched_dfly_weight2, 180,
2321                                "Weight selection for wakefrom cpu");
2322
2323                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2324                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2325                                OID_AUTO, "weight3", CTLFLAG_RW,
2326                                &usched_dfly_weight3, 40,
2327                                "Weight selection for num threads on queue");
2328
2329                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2330                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2331                                OID_AUTO, "weight4", CTLFLAG_RW,
2332                                &usched_dfly_weight4, 160,
2333                                "Availability of other idle cpus");
2334
2335                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2336                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2337                                OID_AUTO, "fast_resched", CTLFLAG_RW,
2338                                &usched_dfly_fast_resched, 0,
2339                                "Availability of other idle cpus");
2340
2341                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2342                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2343                                OID_AUTO, "features", CTLFLAG_RW,
2344                                &usched_dfly_features, 0x8F,
2345                                "Allow pulls into empty queues");
2346
2347                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2348                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2349                                OID_AUTO, "swmask", CTLFLAG_RW,
2350                                &usched_dfly_swmask, ~PPQMASK,
2351                                "Queue mask to force thread switch");
2352
2353 #if 0
2354                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2355                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2356                                 OID_AUTO, "stick_to_level",
2357                                 CTLTYPE_INT | CTLFLAG_RW,
2358                                 NULL, sizeof usched_dfly_stick_to_level,
2359                                 sysctl_usched_dfly_stick_to_level, "I",
2360                                 "Stick a process to this level. See sysctl"
2361                                 "paremter hw.cpu_topology.level_description");
2362 #endif
2363         }
2364 #endif /* SMP */
2365 }
2366 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2367         usched_dfly_cpu_init, NULL)