32a7511445de19bcc64e90b4467084aeddbae82b
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_estfast     lwp_usdata.dfly.estfast
97 #define lwp_uload       lwp_usdata.dfly.uload
98 #define lwp_rqtype      lwp_usdata.dfly.rqtype
99 #define lwp_qcpu        lwp_usdata.dfly.qcpu
100 #define lwp_rrcount     lwp_usdata.dfly.rrcount
101
102 struct usched_dfly_pcpu {
103         struct spinlock spin;
104         struct thread   helper_thread;
105         short           unusde01;
106         short           upri;
107         int             uload;
108         int             ucount;
109         struct lwp      *uschedcp;
110         struct rq       queues[NQS];
111         struct rq       rtqueues[NQS];
112         struct rq       idqueues[NQS];
113         u_int32_t       queuebits;
114         u_int32_t       rtqueuebits;
115         u_int32_t       idqueuebits;
116         int             runqcount;
117         int             cpuid;
118         cpumask_t       cpumask;
119         cpu_node_t      *cpunode;
120 };
121
122 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
123
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
130                                 sysclock_t cpstamp);
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
137 static void dfly_changeqcpu_locked(struct lwp *lp,
138                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
139 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
140 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
141 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
142 static void dfly_need_user_resched_remote(void *dummy);
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
144                                           struct lwp *chklp, int worst);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147 static void dfly_changedcpu(struct lwp *lp);
148
149 struct usched usched_dfly = {
150         { NULL },
151         "dfly", "Original DragonFly Scheduler",
152         NULL,                   /* default registration */
153         NULL,                   /* default deregistration */
154         dfly_acquire_curproc,
155         dfly_release_curproc,
156         dfly_setrunqueue,
157         dfly_schedulerclock,
158         dfly_recalculate_estcpu,
159         dfly_resetpriority,
160         dfly_forking,
161         dfly_exiting,
162         dfly_uload_update,
163         NULL,                   /* setcpumask not supported */
164         dfly_yield,
165         dfly_changedcpu
166 };
167
168 /*
169  * We have NQS (32) run queues per scheduling class.  For the normal
170  * class, there are 128 priorities scaled onto these 32 queues.  New
171  * processes are added to the last entry in each queue, and processes
172  * are selected for running by taking them from the head and maintaining
173  * a simple FIFO arrangement.  Realtime and Idle priority processes have
174  * and explicit 0-31 priority which maps directly onto their class queue
175  * index.  When a queue has something in it, the corresponding bit is
176  * set in the queuebits variable, allowing a single read to determine
177  * the state of all 32 queues and then a ffs() to find the first busy
178  * queue.
179  */
180 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
181 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
182 static volatile int dfly_scancpu;
183 static volatile int dfly_ucount;        /* total running on whole system */
184 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
185 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
186 static struct sysctl_oid *usched_dfly_sysctl_tree;
187
188 /* Debug info exposed through debug.* sysctl */
189
190 static int usched_dfly_debug = -1;
191 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
192            &usched_dfly_debug, 0,
193            "Print debug information for this pid");
194
195 static int usched_dfly_pid_debug = -1;
196 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
197            &usched_dfly_pid_debug, 0,
198            "Print KTR debug information for this pid");
199
200 static int usched_dfly_chooser = 0;
201 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
202            &usched_dfly_chooser, 0,
203            "Print KTR debug information for this pid");
204
205 /*
206  * Tunning usched_dfly - configurable through kern.usched_dfly.
207  *
208  * weight1 - Tries to keep threads on their current cpu.  If you
209  *           make this value too large the scheduler will not be
210  *           able to load-balance large loads.
211  *
212  * weight2 - If non-zero, detects thread pairs undergoing synchronous
213  *           communications and tries to move them closer together.
214  *           Behavior is adjusted by bit 4 of features (0x10).
215  *
216  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
217  *           a small value is recommended.
218  *
219  * weight3 - Weighting based on the number of recently runnable threads
220  *           on the userland scheduling queue (ignoring their loads).
221  *           A nominal value here prevents high-priority (low-load)
222  *           threads from accumulating on one cpu core when other
223  *           cores are available.
224  *
225  *           This value should be left fairly small relative to weight1
226  *           and weight4.
227  *
228  * weight4 - Weighting based on other cpu queues being available
229  *           or running processes with higher lwp_priority's.
230  *
231  *           This allows a thread to migrate to another nearby cpu if it
232  *           is unable to run on the current cpu based on the other cpu
233  *           being idle or running a lower priority (higher lwp_priority)
234  *           thread.  This value should be large enough to override weight1
235  *
236  * features - These flags can be set or cleared to enable or disable various
237  *            features.
238  *
239  *            0x01      Enable idle-cpu pulling                 (default)
240  *            0x02      Enable proactive pushing                (default)
241  *            0x04      Enable rebalancing rover                (default)
242  *            0x08      Enable more proactive pushing           (default)
243  *            0x10      (flip weight2 limit on same cpu)        (default)
244  *            0x20      choose best cpu for forked process
245  *            0x40      choose current cpu for forked process
246  *            0x80      choose random cpu for forked process    (default)
247  */
248 static int usched_dfly_smt = 0;
249 static int usched_dfly_cache_coherent = 0;
250 static int usched_dfly_weight1 = 200;   /* keep thread on current cpu */
251 static int usched_dfly_weight2 = 180;   /* synchronous peer's current cpu */
252 static int usched_dfly_weight3 = 40;    /* number of threads on queue */
253 static int usched_dfly_weight4 = 160;   /* availability of idle cores */
254 static int usched_dfly_features = 0x8F; /* allow pulls */
255 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
256 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
257 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
258 static int usched_dfly_decay = 8;
259
260 /* KTR debug printings */
261
262 KTR_INFO_MASTER(usched);
263
264 #if !defined(KTR_USCHED_DFLY)
265 #define KTR_USCHED_DFLY KTR_ALL
266 #endif
267
268 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
269     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
270     pid_t pid, int old_cpuid, int curr);
271
272 /*
273  * This function is called when the kernel intends to return to userland.
274  * It is responsible for making the thread the current designated userland
275  * thread for this cpu, blocking if necessary.
276  *
277  * The kernel will not depress our LWKT priority until after we return,
278  * in case we have to shove over to another cpu.
279  *
280  * We must determine our thread's disposition before we switch away.  This
281  * is very sensitive code.
282  *
283  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
284  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
285  * occur, this function is called only under very controlled circumstances.
286  */
287 static void
288 dfly_acquire_curproc(struct lwp *lp)
289 {
290         globaldata_t gd;
291         dfly_pcpu_t dd;
292         dfly_pcpu_t rdd;
293         thread_t td;
294         int force_resched;
295
296         /*
297          * Make sure we aren't sitting on a tsleep queue.
298          */
299         td = lp->lwp_thread;
300         crit_enter_quick(td);
301         if (td->td_flags & TDF_TSLEEPQ)
302                 tsleep_remove(td);
303         dfly_recalculate_estcpu(lp);
304
305         gd = mycpu;
306         dd = &dfly_pcpu[gd->gd_cpuid];
307
308         /*
309          * Process any pending interrupts/ipi's, then handle reschedule
310          * requests.  dfly_release_curproc() will try to assign a new
311          * uschedcp that isn't us and otherwise NULL it out.
312          */
313         force_resched = 0;
314         if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
315             lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
316                 force_resched = 1;
317         }
318
319         if (user_resched_wanted()) {
320                 if (dd->uschedcp == lp)
321                         force_resched = 1;
322                 clear_user_resched();
323                 dfly_release_curproc(lp);
324         }
325
326         /*
327          * Loop until we are the current user thread.
328          *
329          * NOTE: dd spinlock not held at top of loop.
330          */
331         if (dd->uschedcp == lp)
332                 lwkt_yield_quick();
333
334         while (dd->uschedcp != lp) {
335                 lwkt_yield_quick();
336
337                 spin_lock(&dd->spin);
338
339                 /*
340                  * We are not or are no longer the current lwp and a forced
341                  * reschedule was requested.  Figure out the best cpu to
342                  * run on (our current cpu will be given significant weight).
343                  *
344                  * (if a reschedule was not requested we want to move this
345                  *  step after the uschedcp tests).
346                  */
347                 if (force_resched &&
348                     (usched_dfly_features & 0x08) &&
349                     (rdd = dfly_choose_best_queue(lp)) != dd) {
350                         dfly_changeqcpu_locked(lp, dd, rdd);
351                         spin_unlock(&dd->spin);
352                         lwkt_deschedule(lp->lwp_thread);
353                         dfly_setrunqueue_dd(rdd, lp);
354                         lwkt_switch();
355                         gd = mycpu;
356                         dd = &dfly_pcpu[gd->gd_cpuid];
357                         continue;
358                 }
359
360                 /*
361                  * Either no reschedule was requested or the best queue was
362                  * dd, and no current process has been selected.  We can
363                  * trivially become the current lwp on the current cpu.
364                  */
365                 if (dd->uschedcp == NULL) {
366                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
367                         dd->uschedcp = lp;
368                         dd->upri = lp->lwp_priority;
369                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
370                         spin_unlock(&dd->spin);
371                         break;
372                 }
373
374                 /*
375                  * Can we steal the current designated user thread?
376                  *
377                  * If we do the other thread will stall when it tries to
378                  * return to userland, possibly rescheduling elsewhere.
379                  *
380                  * It is important to do a masked test to avoid the edge
381                  * case where two near-equal-priority threads are constantly
382                  * interrupting each other.
383                  *
384                  * In the exact match case another thread has already gained
385                  * uschedcp and lowered its priority, if we steal it the
386                  * other thread will stay stuck on the LWKT runq and not
387                  * push to another cpu.  So don't steal on equal-priority even
388                  * though it might appear to be more beneficial due to not
389                  * having to switch back to the other thread's context.
390                  *
391                  * usched_dfly_fast_resched requires that two threads be
392                  * significantly far apart in priority in order to interrupt.
393                  *
394                  * If better but not sufficiently far apart, the current
395                  * uschedcp will be interrupted at the next scheduler clock.
396                  */
397                 if (dd->uschedcp &&
398                    (dd->upri & ~PPQMASK) >
399                    (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
400                         dd->uschedcp = lp;
401                         dd->upri = lp->lwp_priority;
402                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
403                         spin_unlock(&dd->spin);
404                         break;
405                 }
406                 /*
407                  * We are not the current lwp, figure out the best cpu
408                  * to run on (our current cpu will be given significant
409                  * weight).  Loop on cpu change.
410                  */
411                 if ((usched_dfly_features & 0x02) &&
412                     force_resched == 0 &&
413                     (rdd = dfly_choose_best_queue(lp)) != dd) {
414                         dfly_changeqcpu_locked(lp, dd, rdd);
415                         spin_unlock(&dd->spin);
416                         lwkt_deschedule(lp->lwp_thread);
417                         dfly_setrunqueue_dd(rdd, lp);
418                         lwkt_switch();
419                         gd = mycpu;
420                         dd = &dfly_pcpu[gd->gd_cpuid];
421                         continue;
422                 }
423
424                 /*
425                  * We cannot become the current lwp, place the lp on the
426                  * run-queue of this or another cpu and deschedule ourselves.
427                  *
428                  * When we are reactivated we will have another chance.
429                  *
430                  * Reload after a switch or setrunqueue/switch possibly
431                  * moved us to another cpu.
432                  */
433                 spin_unlock(&dd->spin);
434                 lwkt_deschedule(lp->lwp_thread);
435                 dfly_setrunqueue_dd(dd, lp);
436                 lwkt_switch();
437                 gd = mycpu;
438                 dd = &dfly_pcpu[gd->gd_cpuid];
439         }
440
441         /*
442          * Make sure upri is synchronized, then yield to LWKT threads as
443          * needed before returning.  This could result in another reschedule.
444          * XXX
445          */
446         crit_exit_quick(td);
447
448         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
449 }
450
451 /*
452  * DFLY_RELEASE_CURPROC
453  *
454  * This routine detaches the current thread from the userland scheduler,
455  * usually because the thread needs to run or block in the kernel (at
456  * kernel priority) for a while.
457  *
458  * This routine is also responsible for selecting a new thread to
459  * make the current thread.
460  *
461  * NOTE: This implementation differs from the dummy example in that
462  * dfly_select_curproc() is able to select the current process, whereas
463  * dummy_select_curproc() is not able to select the current process.
464  * This means we have to NULL out uschedcp.
465  *
466  * Additionally, note that we may already be on a run queue if releasing
467  * via the lwkt_switch() in dfly_setrunqueue().
468  */
469 static void
470 dfly_release_curproc(struct lwp *lp)
471 {
472         globaldata_t gd = mycpu;
473         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
474
475         /*
476          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
477          * by wakeup().
478          */
479         if (dd->uschedcp == lp) {
480                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
481                 spin_lock(&dd->spin);
482                 if (dd->uschedcp == lp) {
483                         dd->uschedcp = NULL;    /* don't let lp be selected */
484                         dd->upri = PRIBASE_NULL;
485                         atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
486                         spin_unlock(&dd->spin);
487                         dfly_select_curproc(gd);
488                 } else {
489                         spin_unlock(&dd->spin);
490                 }
491         }
492 }
493
494 /*
495  * DFLY_SELECT_CURPROC
496  *
497  * Select a new current process for this cpu and clear any pending user
498  * reschedule request.  The cpu currently has no current process.
499  *
500  * This routine is also responsible for equal-priority round-robining,
501  * typically triggered from dfly_schedulerclock().  In our dummy example
502  * all the 'user' threads are LWKT scheduled all at once and we just
503  * call lwkt_switch().
504  *
505  * The calling process is not on the queue and cannot be selected.
506  */
507 static
508 void
509 dfly_select_curproc(globaldata_t gd)
510 {
511         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
512         struct lwp *nlp;
513         int cpuid = gd->gd_cpuid;
514
515         crit_enter_gd(gd);
516
517         spin_lock(&dd->spin);
518         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
519
520         if (nlp) {
521                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
522                 dd->upri = nlp->lwp_priority;
523                 dd->uschedcp = nlp;
524 #if 0
525                 dd->rrcount = 0;                /* reset round robin */
526 #endif
527                 spin_unlock(&dd->spin);
528                 lwkt_acquire(nlp->lwp_thread);
529                 lwkt_schedule(nlp->lwp_thread);
530         } else {
531                 spin_unlock(&dd->spin);
532         }
533         crit_exit_gd(gd);
534 }
535
536 /*
537  * Place the specified lwp on the user scheduler's run queue.  This routine
538  * must be called with the thread descheduled.  The lwp must be runnable.
539  * It must not be possible for anyone else to explicitly schedule this thread.
540  *
541  * The thread may be the current thread as a special case.
542  */
543 static void
544 dfly_setrunqueue(struct lwp *lp)
545 {
546         dfly_pcpu_t dd;
547         dfly_pcpu_t rdd;
548
549         /*
550          * First validate the process LWKT state.
551          */
552         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
553         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
554             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
555              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
556         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
557
558         /*
559          * NOTE: dd/rdd do not necessarily represent the current cpu.
560          *       Instead they may represent the cpu the thread was last
561          *       scheduled on or inherited by its parent.
562          */
563         dd = &dfly_pcpu[lp->lwp_qcpu];
564         rdd = dd;
565
566         /*
567          * This process is not supposed to be scheduled anywhere or assigned
568          * as the current process anywhere.  Assert the condition.
569          */
570         KKASSERT(rdd->uschedcp != lp);
571
572         /*
573          * Ok, we have to setrunqueue some target cpu and request a reschedule
574          * if necessary.
575          *
576          * We have to choose the best target cpu.  It might not be the current
577          * target even if the current cpu has no running user thread (for
578          * example, because the current cpu might be a hyperthread and its
579          * sibling has a thread assigned).
580          *
581          * If we just forked it is most optimal to run the child on the same
582          * cpu just in case the parent decides to wait for it (thus getting
583          * off that cpu).  As long as there is nothing else runnable on the
584          * cpu, that is.  If we did this unconditionally a parent forking
585          * multiple children before waiting (e.g. make -j N) leaves other
586          * cpus idle that could be working.
587          */
588         if (lp->lwp_forked) {
589                 lp->lwp_forked = 0;
590                 if (usched_dfly_features & 0x20)
591                         rdd = dfly_choose_best_queue(lp);
592                 else if (usched_dfly_features & 0x40)
593                         rdd = &dfly_pcpu[lp->lwp_qcpu];
594                 else if (usched_dfly_features & 0x80)
595                         rdd = dfly_choose_queue_simple(rdd, lp);
596                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
597                         rdd = dfly_choose_best_queue(lp);
598                 else
599                         rdd = &dfly_pcpu[lp->lwp_qcpu];
600         } else {
601                 rdd = dfly_choose_best_queue(lp);
602                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
603         }
604         if (lp->lwp_qcpu != rdd->cpuid) {
605                 spin_lock(&dd->spin);
606                 dfly_changeqcpu_locked(lp, dd, rdd);
607                 spin_unlock(&dd->spin);
608         }
609         dfly_setrunqueue_dd(rdd, lp);
610 }
611
612 /*
613  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
614  * spin-locked on-call.  rdd does not have to be.
615  */
616 static void
617 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
618 {
619         if (lp->lwp_qcpu != rdd->cpuid) {
620                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
621                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
622                         atomic_add_int(&dd->uload, -lp->lwp_uload);
623                         atomic_add_int(&dd->ucount, -1);
624                         atomic_add_int(&dfly_ucount, -1);
625                 }
626                 lp->lwp_qcpu = rdd->cpuid;
627         }
628 }
629
630 /*
631  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
632  * also performs all necessary ancillary notification actions.
633  */
634 static void
635 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
636 {
637         globaldata_t rgd;
638
639         /*
640          * We might be moving the lp to another cpu's run queue, and once
641          * on the runqueue (even if it is our cpu's), another cpu can rip
642          * it away from us.
643          *
644          * TDF_MIGRATING might already be set if this is part of a
645          * remrunqueue+setrunqueue sequence.
646          */
647         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
648                 lwkt_giveaway(lp->lwp_thread);
649
650         rgd = globaldata_find(rdd->cpuid);
651
652         /*
653          * We lose control of the lp the moment we release the spinlock
654          * after having placed it on the queue.  i.e. another cpu could pick
655          * it up, or it could exit, or its priority could be further
656          * adjusted, or something like that.
657          *
658          * WARNING! rdd can point to a foreign cpu!
659          */
660         spin_lock(&rdd->spin);
661         dfly_setrunqueue_locked(rdd, lp);
662
663         /*
664          * Potentially interrupt the currently-running thread
665          */
666         if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
667                 /*
668                  * Currently running thread is better or same, do not
669                  * interrupt.
670                  */
671                 spin_unlock(&rdd->spin);
672         } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
673                    usched_dfly_fast_resched) {
674                 /*
675                  * Currently running thread is not better, but not so bad
676                  * that we need to interrupt it.  Let it run for one more
677                  * scheduler tick.
678                  */
679                 if (rdd->uschedcp &&
680                     rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
681                         rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
682                 }
683                 spin_unlock(&rdd->spin);
684         } else if (rgd == mycpu) {
685                 /*
686                  * We should interrupt the currently running thread, which
687                  * is on the current cpu.
688                  */
689                 spin_unlock(&rdd->spin);
690                 if (rdd->uschedcp == NULL) {
691                         wakeup_mycpu(&rdd->helper_thread); /* XXX */
692                         need_user_resched();
693                 } else {
694                         need_user_resched();
695                 }
696         } else {
697                 /*
698                  * We should interrupt the currently running thread, which
699                  * is on a different cpu.
700                  */
701                 spin_unlock(&rdd->spin);
702                 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
703         }
704 }
705
706 /*
707  * This routine is called from a systimer IPI.  It MUST be MP-safe and
708  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
709  * each cpu.
710  */
711 static
712 void
713 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
714 {
715         globaldata_t gd = mycpu;
716         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
717
718         /*
719          * Spinlocks also hold a critical section so there should not be
720          * any active.
721          */
722         KKASSERT(gd->gd_spinlocks == 0);
723
724         if (lp == NULL)
725                 return;
726
727         /*
728          * Do we need to round-robin?  We round-robin 10 times a second.
729          * This should only occur for cpu-bound batch processes.
730          */
731         if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
732                 lp->lwp_thread->td_wakefromcpu = -1;
733                 need_user_resched();
734         }
735
736         /*
737          * Adjust estcpu upward using a real time equivalent calculation,
738          * and recalculate lp's priority.
739          */
740         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
741         dfly_resetpriority(lp);
742
743         /*
744          * Rebalance two cpus every 8 ticks, pulling the worst thread
745          * from the worst cpu's queue into a rotating cpu number.
746          *
747          * This mechanic is needed because the push algorithms can
748          * steady-state in an non-optimal configuration.  We need to mix it
749          * up a little, even if it means breaking up a paired thread, so
750          * the push algorithms can rebalance the degenerate conditions.
751          * This portion of the algorithm exists to ensure stability at the
752          * selected weightings.
753          *
754          * Because we might be breaking up optimal conditions we do not want
755          * to execute this too quickly, hence we only rebalance approximately
756          * ~7-8 times per second.  The push's, on the otherhand, are capable
757          * moving threads to other cpus at a much higher rate.
758          *
759          * We choose the most heavily loaded thread from the worst queue
760          * in order to ensure that multiple heavy-weight threads on the same
761          * queue get broken up, and also because these threads are the most
762          * likely to be able to remain in place.  Hopefully then any pairings,
763          * if applicable, migrate to where these threads are.
764          */
765         if ((usched_dfly_features & 0x04) &&
766             ((u_int)sched_ticks & 7) == 0 &&
767             (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
768                 /*
769                  * Our cpu is up.
770                  */
771                 struct lwp *nlp;
772                 dfly_pcpu_t rdd;
773
774                 rdd = dfly_choose_worst_queue(dd);
775                 if (rdd) {
776                         spin_lock(&dd->spin);
777                         if (spin_trylock(&rdd->spin)) {
778                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
779                                 spin_unlock(&rdd->spin);
780                                 if (nlp == NULL)
781                                         spin_unlock(&dd->spin);
782                         } else {
783                                 spin_unlock(&dd->spin);
784                                 nlp = NULL;
785                         }
786                 } else {
787                         nlp = NULL;
788                 }
789                 /* dd->spin held if nlp != NULL */
790
791                 /*
792                  * Either schedule it or add it to our queue.
793                  */
794                 if (nlp &&
795                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
796                         atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
797                         dd->upri = nlp->lwp_priority;
798                         dd->uschedcp = nlp;
799 #if 0
800                         dd->rrcount = 0;        /* reset round robin */
801 #endif
802                         spin_unlock(&dd->spin);
803                         lwkt_acquire(nlp->lwp_thread);
804                         lwkt_schedule(nlp->lwp_thread);
805                 } else if (nlp) {
806                         dfly_setrunqueue_locked(dd, nlp);
807                         spin_unlock(&dd->spin);
808                 }
809         }
810 }
811
812 /*
813  * Called from acquire and from kern_synch's one-second timer (one of the
814  * callout helper threads) with a critical section held.
815  *
816  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
817  * overall system load.
818  *
819  * Note that no recalculation occurs for a process which sleeps and wakes
820  * up in the same tick.  That is, a system doing thousands of context
821  * switches per second will still only do serious estcpu calculations
822  * ESTCPUFREQ times per second.
823  */
824 static
825 void
826 dfly_recalculate_estcpu(struct lwp *lp)
827 {
828         globaldata_t gd = mycpu;
829         sysclock_t cpbase;
830         sysclock_t ttlticks;
831         int estcpu;
832         int decay_factor;
833         int ucount;
834
835         /*
836          * We have to subtract periodic to get the last schedclock
837          * timeout time, otherwise we would get the upcoming timeout.
838          * Keep in mind that a process can migrate between cpus and
839          * while the scheduler clock should be very close, boundary
840          * conditions could lead to a small negative delta.
841          */
842         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
843
844         if (lp->lwp_slptime > 1) {
845                 /*
846                  * Too much time has passed, do a coarse correction.
847                  */
848                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
849                 dfly_resetpriority(lp);
850                 lp->lwp_cpbase = cpbase;
851                 lp->lwp_cpticks = 0;
852                 lp->lwp_estfast = 0;
853         } else if (lp->lwp_cpbase != cpbase) {
854                 /*
855                  * Adjust estcpu if we are in a different tick.  Don't waste
856                  * time if we are in the same tick.
857                  *
858                  * First calculate the number of ticks in the measurement
859                  * interval.  The ttlticks calculation can wind up 0 due to
860                  * a bug in the handling of lwp_slptime  (as yet not found),
861                  * so make sure we do not get a divide by 0 panic.
862                  */
863                 ttlticks = (cpbase - lp->lwp_cpbase) /
864                            gd->gd_schedclock.periodic;
865                 if ((ssysclock_t)ttlticks < 0) {
866                         ttlticks = 0;
867                         lp->lwp_cpbase = cpbase;
868                 }
869                 if (ttlticks == 0)
870                         return;
871                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
872
873                 /*
874                  * Calculate the percentage of one cpu being used then
875                  * compensate for any system load in excess of ncpus.
876                  *
877                  * For example, if we have 8 cores and 16 running cpu-bound
878                  * processes then all things being equal each process will
879                  * get 50% of one cpu.  We need to pump this value back
880                  * up to 100% so the estcpu calculation properly adjusts
881                  * the process's dynamic priority.
882                  *
883                  * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
884                  */
885                 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
886                 ucount = dfly_ucount;
887                 if (ucount > ncpus) {
888                         estcpu += estcpu * (ucount - ncpus) / ncpus;
889                 }
890
891                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
892                         kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
893                                 lp->lwp_proc->p_pid, lp,
894                                 estcpu, lp->lwp_estcpu,
895                                 lp->lwp_cpticks, ttlticks);
896                 }
897
898                 /*
899                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
900                  * quickly lwp_estcpu collapses to its realtime calculation.
901                  * A slower collapse gives us a more accurate number over
902                  * the long term but can create problems with bursty threads
903                  * or threads which become cpu hogs.
904                  *
905                  * To solve this problem, newly started lwps and lwps which
906                  * are restarting after having been asleep for a while are
907                  * given a much, much faster decay in order to quickly
908                  * detect whether they become cpu-bound.
909                  *
910                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
911                  *       and not here, but we must still ensure that a
912                  *       cpu-bound nice -20 process does not completely
913                  *       override a cpu-bound nice +20 process.
914                  *
915                  * NOTE: We must use ESTCPULIM() here to deal with any
916                  *       overshoot.
917                  */
918                 decay_factor = usched_dfly_decay;
919                 if (decay_factor < 1)
920                         decay_factor = 1;
921                 if (decay_factor > 1024)
922                         decay_factor = 1024;
923
924                 if (lp->lwp_estfast < usched_dfly_decay) {
925                         ++lp->lwp_estfast;
926                         lp->lwp_estcpu = ESTCPULIM(
927                                 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
928                                 (lp->lwp_estfast + 1));
929                 } else {
930                         lp->lwp_estcpu = ESTCPULIM(
931                                 (lp->lwp_estcpu * decay_factor + estcpu) /
932                                 (decay_factor + 1));
933                 }
934
935                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
936                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
937                 dfly_resetpriority(lp);
938                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
939                 lp->lwp_cpticks = 0;
940         }
941 }
942
943 /*
944  * Compute the priority of a process when running in user mode.
945  * Arrange to reschedule if the resulting priority is better
946  * than that of the current process.
947  *
948  * This routine may be called with any process.
949  *
950  * This routine is called by fork1() for initial setup with the process
951  * of the run queue, and also may be called normally with the process on or
952  * off the run queue.
953  */
954 static void
955 dfly_resetpriority(struct lwp *lp)
956 {
957         dfly_pcpu_t rdd;
958         int newpriority;
959         u_short newrqtype;
960         int rcpu;
961         int checkpri;
962         int estcpu;
963         int delta_uload;
964
965         crit_enter();
966
967         /*
968          * Lock the scheduler (lp) belongs to.  This can be on a different
969          * cpu.  Handle races.  This loop breaks out with the appropriate
970          * rdd locked.
971          */
972         for (;;) {
973                 rcpu = lp->lwp_qcpu;
974                 cpu_ccfence();
975                 rdd = &dfly_pcpu[rcpu];
976                 spin_lock(&rdd->spin);
977                 if (rcpu == lp->lwp_qcpu)
978                         break;
979                 spin_unlock(&rdd->spin);
980         }
981
982         /*
983          * Calculate the new priority and queue type
984          */
985         newrqtype = lp->lwp_rtprio.type;
986
987         switch(newrqtype) {
988         case RTP_PRIO_REALTIME:
989         case RTP_PRIO_FIFO:
990                 newpriority = PRIBASE_REALTIME +
991                              (lp->lwp_rtprio.prio & PRIMASK);
992                 break;
993         case RTP_PRIO_NORMAL:
994                 /*
995                  *
996                  */
997                 estcpu = lp->lwp_estcpu;
998
999                 /*
1000                  * p_nice piece         Adds (0-40) * 2         0-80
1001                  * estcpu               Adds 16384  * 4 / 512   0-128
1002                  */
1003                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1004                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1005                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1006                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1007                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1008                 break;
1009         case RTP_PRIO_IDLE:
1010                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1011                 break;
1012         case RTP_PRIO_THREAD:
1013                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1014                 break;
1015         default:
1016                 panic("Bad RTP_PRIO %d", newrqtype);
1017                 /* NOT REACHED */
1018         }
1019
1020         /*
1021          * The LWKT scheduler doesn't dive usched structures, give it a hint
1022          * on the relative priority of user threads running in the kernel.
1023          * The LWKT scheduler will always ensure that a user thread running
1024          * in the kernel will get cpu some time, regardless of its upri,
1025          * but can decide not to instantly switch from one kernel or user
1026          * mode user thread to a kernel-mode user thread when it has a less
1027          * desireable user priority.
1028          *
1029          * td_upri has normal sense (higher values are more desireable), so
1030          * negate it.
1031          */
1032         lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1033
1034         /*
1035          * The newpriority incorporates the queue type so do a simple masked
1036          * check to determine if the process has moved to another queue.  If
1037          * it has, and it is currently on a run queue, then move it.
1038          *
1039          * Since uload is ~PPQMASK masked, no modifications are necessary if
1040          * we end up in the same run queue.
1041          */
1042         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1043                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1044                         dfly_remrunqueue_locked(rdd, lp);
1045                         lp->lwp_priority = newpriority;
1046                         lp->lwp_rqtype = newrqtype;
1047                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1048                         dfly_setrunqueue_locked(rdd, lp);
1049                         checkpri = 1;
1050                 } else {
1051                         lp->lwp_priority = newpriority;
1052                         lp->lwp_rqtype = newrqtype;
1053                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1054                         checkpri = 0;
1055                 }
1056         } else {
1057                 /*
1058                  * In the same PPQ, uload cannot change.
1059                  */
1060                 lp->lwp_priority = newpriority;
1061                 checkpri = 1;
1062                 rcpu = -1;
1063         }
1064
1065         /*
1066          * Adjust effective load.
1067          *
1068          * Calculate load then scale up or down geometrically based on p_nice.
1069          * Processes niced up (positive) are less important, and processes
1070          * niced downard (negative) are more important.  The higher the uload,
1071          * the more important the thread.
1072          */
1073         /* 0-511, 0-100% cpu */
1074         delta_uload = lp->lwp_estcpu / NQS;
1075         delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1076
1077
1078         delta_uload -= lp->lwp_uload;
1079         lp->lwp_uload += delta_uload;
1080         if (lp->lwp_mpflags & LWP_MP_ULOAD)
1081                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1082
1083         /*
1084          * Determine if we need to reschedule the target cpu.  This only
1085          * occurs if the LWP is already on a scheduler queue, which means
1086          * that idle cpu notification has already occured.  At most we
1087          * need only issue a need_user_resched() on the appropriate cpu.
1088          *
1089          * The LWP may be owned by a CPU different from the current one,
1090          * in which case dd->uschedcp may be modified without an MP lock
1091          * or a spinlock held.  The worst that happens is that the code
1092          * below causes a spurious need_user_resched() on the target CPU
1093          * and dd->pri to be wrong for a short period of time, both of
1094          * which are harmless.
1095          *
1096          * If checkpri is 0 we are adjusting the priority of the current
1097          * process, possibly higher (less desireable), so ignore the upri
1098          * check which will fail in that case.
1099          */
1100         if (rcpu >= 0) {
1101                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1102                     (checkpri == 0 ||
1103                      (rdd->upri & ~PRIMASK) >
1104                      (lp->lwp_priority & ~PRIMASK))) {
1105                         if (rcpu == mycpu->gd_cpuid) {
1106                                 spin_unlock(&rdd->spin);
1107                                 need_user_resched();
1108                         } else {
1109                                 spin_unlock(&rdd->spin);
1110                                 lwkt_send_ipiq(globaldata_find(rcpu),
1111                                                dfly_need_user_resched_remote,
1112                                                NULL);
1113                         }
1114                 } else {
1115                         spin_unlock(&rdd->spin);
1116                 }
1117         } else {
1118                 spin_unlock(&rdd->spin);
1119         }
1120         crit_exit();
1121 }
1122
1123 static
1124 void
1125 dfly_yield(struct lwp *lp)
1126 {
1127 #if 0
1128         /* FUTURE (or something similar) */
1129         switch(lp->lwp_rqtype) {
1130         case RTP_PRIO_NORMAL:
1131                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1132                 break;
1133         default:
1134                 break;
1135         }
1136 #endif
1137         need_user_resched();
1138 }
1139
1140 /*
1141  * Thread was forcefully migrated to another cpu.  Normally forced migrations
1142  * are used for iterations and the kernel returns to the original cpu before
1143  * returning and this is not needed.  However, if the kernel migrates a
1144  * thread to another cpu and wants to leave it there, it has to call this
1145  * scheduler helper.
1146  *
1147  * Note that the lwkt_migratecpu() function also released the thread, so
1148  * we don't have to worry about that.
1149  */
1150 static
1151 void
1152 dfly_changedcpu(struct lwp *lp)
1153 {
1154         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1155         dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1156
1157         if (dd != rdd) {
1158                 spin_lock(&dd->spin);
1159                 dfly_changeqcpu_locked(lp, dd, rdd);
1160                 spin_unlock(&dd->spin);
1161         }
1162 }
1163
1164 /*
1165  * Called from fork1() when a new child process is being created.
1166  *
1167  * Give the child process an initial estcpu that is more batch then
1168  * its parent and dock the parent for the fork (but do not
1169  * reschedule the parent).
1170  *
1171  * fast
1172  *
1173  * XXX lwp should be "spawning" instead of "forking"
1174  */
1175 static void
1176 dfly_forking(struct lwp *plp, struct lwp *lp)
1177 {
1178         /*
1179          * Put the child 4 queue slots (out of 32) higher than the parent
1180          * (less desireable than the parent).
1181          */
1182         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1183         lp->lwp_forked = 1;
1184         lp->lwp_estfast = 0;
1185
1186         /*
1187          * Dock the parent a cost for the fork, protecting us from fork
1188          * bombs.  If the parent is forking quickly make the child more
1189          * batchy.
1190          */
1191         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1192 }
1193
1194 /*
1195  * Called when a lwp is being removed from this scheduler, typically
1196  * during lwp_exit().  We have to clean out any ULOAD accounting before
1197  * we can let the lp go.  The dd->spin lock is not needed for uload
1198  * updates.
1199  *
1200  * Scheduler dequeueing has already occurred, no further action in that
1201  * regard is needed.
1202  */
1203 static void
1204 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1205 {
1206         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1207
1208         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1209                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1210                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1211                 atomic_add_int(&dd->ucount, -1);
1212                 atomic_add_int(&dfly_ucount, -1);
1213         }
1214 }
1215
1216 /*
1217  * This function cannot block in any way, but spinlocks are ok.
1218  *
1219  * Update the uload based on the state of the thread (whether it is going
1220  * to sleep or running again).  The uload is meant to be a longer-term
1221  * load and not an instantanious load.
1222  */
1223 static void
1224 dfly_uload_update(struct lwp *lp)
1225 {
1226         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1227
1228         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1229                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1230                         spin_lock(&dd->spin);
1231                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1232                                 atomic_set_int(&lp->lwp_mpflags,
1233                                                LWP_MP_ULOAD);
1234                                 atomic_add_int(&dd->uload, lp->lwp_uload);
1235                                 atomic_add_int(&dd->ucount, 1);
1236                                 atomic_add_int(&dfly_ucount, 1);
1237                         }
1238                         spin_unlock(&dd->spin);
1239                 }
1240         } else if (lp->lwp_slptime > 0) {
1241                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1242                         spin_lock(&dd->spin);
1243                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1244                                 atomic_clear_int(&lp->lwp_mpflags,
1245                                                  LWP_MP_ULOAD);
1246                                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1247                                 atomic_add_int(&dd->ucount, -1);
1248                                 atomic_add_int(&dfly_ucount, -1);
1249                         }
1250                         spin_unlock(&dd->spin);
1251                 }
1252         }
1253 }
1254
1255 /*
1256  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1257  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1258  * has a better or equal priority then the process that would otherwise be
1259  * chosen, NULL is returned.
1260  *
1261  * Until we fix the RUNQ code the chklp test has to be strict or we may
1262  * bounce between processes trying to acquire the current process designation.
1263  *
1264  * Must be called with rdd->spin locked.  The spinlock is left intact through
1265  * the entire routine.  dd->spin does not have to be locked.
1266  *
1267  * If worst is non-zero this function finds the worst thread instead of the
1268  * best thread (used by the schedulerclock-based rover).
1269  */
1270 static
1271 struct lwp *
1272 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1273                        struct lwp *chklp, int worst)
1274 {
1275         struct lwp *lp;
1276         struct rq *q;
1277         u_int32_t *which;
1278         u_int32_t pri;
1279         u_int32_t rtqbits;
1280         u_int32_t tsqbits;
1281         u_int32_t idqbits;
1282
1283         rtqbits = rdd->rtqueuebits;
1284         tsqbits = rdd->queuebits;
1285         idqbits = rdd->idqueuebits;
1286
1287         if (worst) {
1288                 if (idqbits) {
1289                         pri = bsrl(idqbits);
1290                         q = &rdd->idqueues[pri];
1291                         which = &rdd->idqueuebits;
1292                 } else if (tsqbits) {
1293                         pri = bsrl(tsqbits);
1294                         q = &rdd->queues[pri];
1295                         which = &rdd->queuebits;
1296                 } else if (rtqbits) {
1297                         pri = bsrl(rtqbits);
1298                         q = &rdd->rtqueues[pri];
1299                         which = &rdd->rtqueuebits;
1300                 } else {
1301                         return (NULL);
1302                 }
1303                 lp = TAILQ_LAST(q, rq);
1304         } else {
1305                 if (rtqbits) {
1306                         pri = bsfl(rtqbits);
1307                         q = &rdd->rtqueues[pri];
1308                         which = &rdd->rtqueuebits;
1309                 } else if (tsqbits) {
1310                         pri = bsfl(tsqbits);
1311                         q = &rdd->queues[pri];
1312                         which = &rdd->queuebits;
1313                 } else if (idqbits) {
1314                         pri = bsfl(idqbits);
1315                         q = &rdd->idqueues[pri];
1316                         which = &rdd->idqueuebits;
1317                 } else {
1318                         return (NULL);
1319                 }
1320                 lp = TAILQ_FIRST(q);
1321         }
1322         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1323
1324         /*
1325          * If the passed lwp <chklp> is reasonably close to the selected
1326          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1327          *
1328          * Note that we must error on the side of <chklp> to avoid bouncing
1329          * between threads in the acquire code.
1330          */
1331         if (chklp) {
1332                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1333                         return(NULL);
1334         }
1335
1336         KTR_COND_LOG(usched_chooseproc,
1337             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1338             lp->lwp_proc->p_pid,
1339             lp->lwp_thread->td_gd->gd_cpuid,
1340             mycpu->gd_cpuid);
1341
1342         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1343         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1344         TAILQ_REMOVE(q, lp, lwp_procq);
1345         --rdd->runqcount;
1346         if (TAILQ_EMPTY(q))
1347                 *which &= ~(1 << pri);
1348
1349         /*
1350          * If we are choosing a process from rdd with the intent to
1351          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1352          * is still held.
1353          */
1354         if (rdd != dd) {
1355                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1356                         atomic_add_int(&rdd->uload, -lp->lwp_uload);
1357                         atomic_add_int(&rdd->ucount, -1);
1358                         atomic_add_int(&dfly_ucount, -1);
1359                 }
1360                 lp->lwp_qcpu = dd->cpuid;
1361                 atomic_add_int(&dd->uload, lp->lwp_uload);
1362                 atomic_add_int(&dd->ucount, 1);
1363                 atomic_add_int(&dfly_ucount, 1);
1364                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1365         }
1366         return lp;
1367 }
1368
1369 /*
1370  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1371  *
1372  * Choose a cpu node to schedule lp on, hopefully nearby its current
1373  * node.
1374  *
1375  * We give the current node a modest advantage for obvious reasons.
1376  *
1377  * We also give the node the thread was woken up FROM a slight advantage
1378  * in order to try to schedule paired threads which synchronize/block waiting
1379  * for each other fairly close to each other.  Similarly in a network setting
1380  * this feature will also attempt to place a user process near the kernel
1381  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1382  * algorithm as it heuristically groups synchronizing processes for locality
1383  * of reference in multi-socket systems.
1384  *
1385  * We check against running processes and give a big advantage if there
1386  * are none running.
1387  *
1388  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1389  *
1390  * When the topology is known choose a cpu whos group has, in aggregate,
1391  * has the lowest weighted load.
1392  */
1393 static
1394 dfly_pcpu_t
1395 dfly_choose_best_queue(struct lwp *lp)
1396 {
1397         cpumask_t wakemask;
1398         cpumask_t mask;
1399         cpu_node_t *cpup;
1400         cpu_node_t *cpun;
1401         cpu_node_t *cpub;
1402         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1403         dfly_pcpu_t rdd;
1404         int wakecpu;
1405         int cpuid;
1406         int n;
1407         int count;
1408         int load;
1409         int lowest_load;
1410
1411         /*
1412          * When the topology is unknown choose a random cpu that is hopefully
1413          * idle.
1414          */
1415         if (dd->cpunode == NULL)
1416                 return (dfly_choose_queue_simple(dd, lp));
1417
1418         /*
1419          * Pairing mask
1420          */
1421         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1422                 wakemask = dfly_pcpu[wakecpu].cpumask;
1423         else
1424                 wakemask = 0;
1425
1426         /*
1427          * When the topology is known choose a cpu whos group has, in
1428          * aggregate, has the lowest weighted load.
1429          */
1430         cpup = root_cpu_node;
1431         rdd = dd;
1432
1433         while (cpup) {
1434                 /*
1435                  * Degenerate case super-root
1436                  */
1437                 if (cpup->child_node && cpup->child_no == 1) {
1438                         cpup = cpup->child_node;
1439                         continue;
1440                 }
1441
1442                 /*
1443                  * Terminal cpunode
1444                  */
1445                 if (cpup->child_node == NULL) {
1446                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1447                         break;
1448                 }
1449
1450                 cpub = NULL;
1451                 lowest_load = 0x7FFFFFFF;
1452
1453                 for (n = 0; n < cpup->child_no; ++n) {
1454                         /*
1455                          * Accumulate load information for all cpus
1456                          * which are members of this node.
1457                          */
1458                         cpun = &cpup->child_node[n];
1459                         mask = cpun->members & usched_global_cpumask &
1460                                smp_active_mask & lp->lwp_cpumask;
1461                         if (mask == 0)
1462                                 continue;
1463
1464                         count = 0;
1465                         load = 0;
1466
1467                         while (mask) {
1468                                 cpuid = BSFCPUMASK(mask);
1469                                 rdd = &dfly_pcpu[cpuid];
1470                                 load += rdd->uload;
1471                                 load += rdd->ucount * usched_dfly_weight3;
1472
1473                                 if (rdd->uschedcp == NULL &&
1474                                     rdd->runqcount == 0 &&
1475                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1476                                 ) {
1477                                         load -= usched_dfly_weight4;
1478                                 }
1479 #if 0
1480                                 else if (rdd->upri > lp->lwp_priority + PPQ) {
1481                                         load -= usched_dfly_weight4 / 2;
1482                                 }
1483 #endif
1484                                 mask &= ~CPUMASK(cpuid);
1485                                 ++count;
1486                         }
1487
1488                         /*
1489                          * Compensate if the lp is already accounted for in
1490                          * the aggregate uload for this mask set.  We want
1491                          * to calculate the loads as if lp were not present,
1492                          * otherwise the calculation is bogus.
1493                          */
1494                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1495                             (dd->cpumask & cpun->members)) {
1496                                 load -= lp->lwp_uload;
1497                                 load -= usched_dfly_weight3;
1498                         }
1499
1500                         load /= count;
1501
1502                         /*
1503                          * Advantage the cpu group (lp) is already on.
1504                          */
1505                         if (cpun->members & dd->cpumask)
1506                                 load -= usched_dfly_weight1;
1507
1508                         /*
1509                          * Advantage the cpu group we want to pair (lp) to,
1510                          * but don't let it go to the exact same cpu as
1511                          * the wakecpu target.
1512                          *
1513                          * We do this by checking whether cpun is a
1514                          * terminal node or not.  All cpun's at the same
1515                          * level will either all be terminal or all not
1516                          * terminal.
1517                          *
1518                          * If it is and we match we disadvantage the load.
1519                          * If it is and we don't match we advantage the load.
1520                          *
1521                          * Also note that we are effectively disadvantaging
1522                          * all-but-one by the same amount, so it won't effect
1523                          * the weight1 factor for the all-but-one nodes.
1524                          */
1525                         if (cpun->members & wakemask) {
1526                                 if (cpun->child_node != NULL) {
1527                                         /* advantage */
1528                                         load -= usched_dfly_weight2;
1529                                 } else {
1530                                         if (usched_dfly_features & 0x10)
1531                                                 load += usched_dfly_weight2;
1532                                         else
1533                                                 load -= usched_dfly_weight2;
1534                                 }
1535                         }
1536
1537                         /*
1538                          * Calculate the best load
1539                          */
1540                         if (cpub == NULL || lowest_load > load ||
1541                             (lowest_load == load &&
1542                              (cpun->members & dd->cpumask))
1543                         ) {
1544                                 lowest_load = load;
1545                                 cpub = cpun;
1546                         }
1547                 }
1548                 cpup = cpub;
1549         }
1550         if (usched_dfly_chooser)
1551                 kprintf("lp %02d->%02d %s\n",
1552                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1553         return (rdd);
1554 }
1555
1556 /*
1557  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1558  *
1559  * Choose the worst queue close to dd's cpu node with a non-empty runq
1560  * that is NOT dd.  Also require that the moving of the highest-load thread
1561  * from rdd to dd does not cause the uload's to cross each other.
1562  *
1563  * This is used by the thread chooser when the current cpu's queues are
1564  * empty to steal a thread from another cpu's queue.  We want to offload
1565  * the most heavily-loaded queue.
1566  */
1567 static
1568 dfly_pcpu_t
1569 dfly_choose_worst_queue(dfly_pcpu_t dd)
1570 {
1571         cpumask_t mask;
1572         cpu_node_t *cpup;
1573         cpu_node_t *cpun;
1574         cpu_node_t *cpub;
1575         dfly_pcpu_t rdd;
1576         int cpuid;
1577         int n;
1578         int count;
1579         int load;
1580 #if 0
1581         int pri;
1582         int hpri;
1583 #endif
1584         int highest_load;
1585
1586         /*
1587          * When the topology is unknown choose a random cpu that is hopefully
1588          * idle.
1589          */
1590         if (dd->cpunode == NULL) {
1591                 return (NULL);
1592         }
1593
1594         /*
1595          * When the topology is known choose a cpu whos group has, in
1596          * aggregate, has the lowest weighted load.
1597          */
1598         cpup = root_cpu_node;
1599         rdd = dd;
1600         while (cpup) {
1601                 /*
1602                  * Degenerate case super-root
1603                  */
1604                 if (cpup->child_node && cpup->child_no == 1) {
1605                         cpup = cpup->child_node;
1606                         continue;
1607                 }
1608
1609                 /*
1610                  * Terminal cpunode
1611                  */
1612                 if (cpup->child_node == NULL) {
1613                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1614                         break;
1615                 }
1616
1617                 cpub = NULL;
1618                 highest_load = 0;
1619
1620                 for (n = 0; n < cpup->child_no; ++n) {
1621                         /*
1622                          * Accumulate load information for all cpus
1623                          * which are members of this node.
1624                          */
1625                         cpun = &cpup->child_node[n];
1626                         mask = cpun->members & usched_global_cpumask &
1627                                smp_active_mask;
1628                         if (mask == 0)
1629                                 continue;
1630                         count = 0;
1631                         load = 0;
1632
1633                         while (mask) {
1634                                 cpuid = BSFCPUMASK(mask);
1635                                 rdd = &dfly_pcpu[cpuid];
1636                                 load += rdd->uload;
1637                                 load += rdd->ucount * usched_dfly_weight3;
1638                                 if (rdd->uschedcp == NULL &&
1639                                     rdd->runqcount == 0 &&
1640                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1641                                 ) {
1642                                         load -= usched_dfly_weight4;
1643                                 }
1644 #if 0
1645                                 else if (rdd->upri > dd->upri + PPQ) {
1646                                         load -= usched_dfly_weight4 / 2;
1647                                 }
1648 #endif
1649                                 mask &= ~CPUMASK(cpuid);
1650                                 ++count;
1651                         }
1652                         load /= count;
1653
1654                         /*
1655                          * Prefer candidates which are somewhat closer to
1656                          * our cpu.
1657                          */
1658                         if (dd->cpumask & cpun->members)
1659                                 load += usched_dfly_weight1;
1660
1661                         /*
1662                          * The best candidate is the one with the worst
1663                          * (highest) load.
1664                          */
1665                         if (cpub == NULL || highest_load < load) {
1666                                 highest_load = load;
1667                                 cpub = cpun;
1668                         }
1669                 }
1670                 cpup = cpub;
1671         }
1672
1673         /*
1674          * We never return our own node (dd), and only return a remote
1675          * node if it's load is significantly worse than ours (i.e. where
1676          * stealing a thread would be considered reasonable).
1677          *
1678          * This also helps us avoid breaking paired threads apart which
1679          * can have disastrous effects on performance.
1680          */
1681         if (rdd == dd)
1682                 return(NULL);
1683
1684 #if 0
1685         hpri = 0;
1686         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1687                 hpri = pri;
1688         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1689                 hpri = pri;
1690         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1691                 hpri = pri;
1692         hpri *= PPQ;
1693         if (rdd->uload - hpri < dd->uload + hpri)
1694                 return(NULL);
1695 #endif
1696         return (rdd);
1697 }
1698
1699 static
1700 dfly_pcpu_t
1701 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1702 {
1703         dfly_pcpu_t rdd;
1704         cpumask_t tmpmask;
1705         cpumask_t mask;
1706         int cpuid;
1707
1708         /*
1709          * Fallback to the original heuristic, select random cpu,
1710          * first checking cpus not currently running a user thread.
1711          */
1712         ++dfly_scancpu;
1713         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1714         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1715                smp_active_mask & usched_global_cpumask;
1716
1717         while (mask) {
1718                 tmpmask = ~(CPUMASK(cpuid) - 1);
1719                 if (mask & tmpmask)
1720                         cpuid = BSFCPUMASK(mask & tmpmask);
1721                 else
1722                         cpuid = BSFCPUMASK(mask);
1723                 rdd = &dfly_pcpu[cpuid];
1724
1725                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1726                         goto found;
1727                 mask &= ~CPUMASK(cpuid);
1728         }
1729
1730         /*
1731          * Then cpus which might have a currently running lp
1732          */
1733         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1734         mask = dfly_curprocmask & dfly_rdyprocmask &
1735                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1736
1737         while (mask) {
1738                 tmpmask = ~(CPUMASK(cpuid) - 1);
1739                 if (mask & tmpmask)
1740                         cpuid = BSFCPUMASK(mask & tmpmask);
1741                 else
1742                         cpuid = BSFCPUMASK(mask);
1743                 rdd = &dfly_pcpu[cpuid];
1744
1745                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1746                         goto found;
1747                 mask &= ~CPUMASK(cpuid);
1748         }
1749
1750         /*
1751          * If we cannot find a suitable cpu we reload from dfly_scancpu
1752          * and round-robin.  Other cpus will pickup as they release their
1753          * current lwps or become ready.
1754          *
1755          * Avoid a degenerate system lockup case if usched_global_cpumask
1756          * is set to 0 or otherwise does not cover lwp_cpumask.
1757          *
1758          * We only kick the target helper thread in this case, we do not
1759          * set the user resched flag because
1760          */
1761         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1762         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1763                 cpuid = 0;
1764         rdd = &dfly_pcpu[cpuid];
1765 found:
1766         return (rdd);
1767 }
1768
1769 static
1770 void
1771 dfly_need_user_resched_remote(void *dummy)
1772 {
1773         globaldata_t gd = mycpu;
1774         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1775
1776         /*
1777          * Flag reschedule needed
1778          */
1779         need_user_resched();
1780
1781         /*
1782          * If no user thread is currently running we need to kick the helper
1783          * on our cpu to recover.  Otherwise the cpu will never schedule
1784          * anything again.
1785          *
1786          * We cannot schedule the process ourselves because this is an
1787          * IPI callback and we cannot acquire spinlocks in an IPI callback.
1788          *
1789          * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1790          */
1791         if (dd->uschedcp == NULL && (dfly_rdyprocmask & gd->gd_cpumask)) {
1792                 atomic_clear_cpumask(&dfly_rdyprocmask, gd->gd_cpumask);
1793                 wakeup_mycpu(&dd->helper_thread);
1794         }
1795 }
1796
1797 /*
1798  * dfly_remrunqueue_locked() removes a given process from the run queue
1799  * that it is on, clearing the queue busy bit if it becomes empty.
1800  *
1801  * Note that user process scheduler is different from the LWKT schedule.
1802  * The user process scheduler only manages user processes but it uses LWKT
1803  * underneath, and a user process operating in the kernel will often be
1804  * 'released' from our management.
1805  *
1806  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1807  * to sleep or the lwp is moved to a different runq.
1808  */
1809 static void
1810 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1811 {
1812         struct rq *q;
1813         u_int32_t *which;
1814         u_int8_t pri;
1815
1816         KKASSERT(rdd->runqcount >= 0);
1817
1818         pri = lp->lwp_rqindex;
1819
1820         switch(lp->lwp_rqtype) {
1821         case RTP_PRIO_NORMAL:
1822                 q = &rdd->queues[pri];
1823                 which = &rdd->queuebits;
1824                 break;
1825         case RTP_PRIO_REALTIME:
1826         case RTP_PRIO_FIFO:
1827                 q = &rdd->rtqueues[pri];
1828                 which = &rdd->rtqueuebits;
1829                 break;
1830         case RTP_PRIO_IDLE:
1831                 q = &rdd->idqueues[pri];
1832                 which = &rdd->idqueuebits;
1833                 break;
1834         default:
1835                 panic("remrunqueue: invalid rtprio type");
1836                 /* NOT REACHED */
1837         }
1838         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1839         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1840         TAILQ_REMOVE(q, lp, lwp_procq);
1841         --rdd->runqcount;
1842         if (TAILQ_EMPTY(q)) {
1843                 KASSERT((*which & (1 << pri)) != 0,
1844                         ("remrunqueue: remove from empty queue"));
1845                 *which &= ~(1 << pri);
1846         }
1847 }
1848
1849 /*
1850  * dfly_setrunqueue_locked()
1851  *
1852  * Add a process whos rqtype and rqindex had previously been calculated
1853  * onto the appropriate run queue.   Determine if the addition requires
1854  * a reschedule on a cpu and return the cpuid or -1.
1855  *
1856  * NOTE:          Lower priorities are better priorities.
1857  *
1858  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1859  *                sum of the rough lwp_priority for all running and runnable
1860  *                processes.  Lower priority processes (higher lwp_priority
1861  *                values) actually DO count as more load, not less, because
1862  *                these are the programs which require the most care with
1863  *                regards to cpu selection.
1864  */
1865 static void
1866 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1867 {
1868         struct rq *q;
1869         u_int32_t *which;
1870         int pri;
1871
1872         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1873
1874         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1875                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1876                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1877                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1878                 atomic_add_int(&dfly_ucount, 1);
1879         }
1880
1881         pri = lp->lwp_rqindex;
1882
1883         switch(lp->lwp_rqtype) {
1884         case RTP_PRIO_NORMAL:
1885                 q = &rdd->queues[pri];
1886                 which = &rdd->queuebits;
1887                 break;
1888         case RTP_PRIO_REALTIME:
1889         case RTP_PRIO_FIFO:
1890                 q = &rdd->rtqueues[pri];
1891                 which = &rdd->rtqueuebits;
1892                 break;
1893         case RTP_PRIO_IDLE:
1894                 q = &rdd->idqueues[pri];
1895                 which = &rdd->idqueuebits;
1896                 break;
1897         default:
1898                 panic("remrunqueue: invalid rtprio type");
1899                 /* NOT REACHED */
1900         }
1901
1902         /*
1903          * Place us on the selected queue.  Determine if we should be
1904          * placed at the head of the queue or at the end.
1905          *
1906          * We are placed at the tail if our round-robin count has expired,
1907          * or is about to expire and the system thinks its a good place to
1908          * round-robin, or there is already a next thread on the queue
1909          * (it might be trying to pick up where it left off and we don't
1910          * want to interfere).
1911          */
1912         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1913         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1914         ++rdd->runqcount;
1915
1916         if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
1917             (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
1918              (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC)) ||
1919             !TAILQ_EMPTY(q)
1920         ) {
1921                 atomic_clear_int(&lp->lwp_thread->td_mpflags,
1922                                  TDF_MP_BATCH_DEMARC);
1923                 lp->lwp_rrcount = 0;
1924                 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1925         } else {
1926                 if (TAILQ_EMPTY(q))
1927                         lp->lwp_rrcount = 0;
1928                 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
1929         }
1930         *which |= 1 << pri;
1931 }
1932
1933 /*
1934  * For SMP systems a user scheduler helper thread is created for each
1935  * cpu and is used to allow one cpu to wakeup another for the purposes of
1936  * scheduling userland threads from setrunqueue().
1937  *
1938  * UP systems do not need the helper since there is only one cpu.
1939  *
1940  * We can't use the idle thread for this because we might block.
1941  * Additionally, doing things this way allows us to HLT idle cpus
1942  * on MP systems.
1943  */
1944 static void
1945 dfly_helper_thread(void *dummy)
1946 {
1947     globaldata_t gd;
1948     dfly_pcpu_t dd;
1949     dfly_pcpu_t rdd;
1950     struct lwp *nlp;
1951     cpumask_t mask;
1952     int cpuid;
1953
1954     gd = mycpu;
1955     cpuid = gd->gd_cpuid;       /* doesn't change */
1956     mask = gd->gd_cpumask;      /* doesn't change */
1957     dd = &dfly_pcpu[cpuid];
1958
1959     /*
1960      * Since we only want to be woken up only when no user processes
1961      * are scheduled on a cpu, run at an ultra low priority.
1962      */
1963     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1964
1965     tsleep(&dd->helper_thread, 0, "schslp", 0);
1966
1967     for (;;) {
1968         /*
1969          * We use the LWKT deschedule-interlock trick to avoid racing
1970          * dfly_rdyprocmask.  This means we cannot block through to the
1971          * manual lwkt_switch() call we make below.
1972          */
1973         crit_enter_gd(gd);
1974         tsleep_interlock(&dd->helper_thread, 0);
1975
1976         spin_lock(&dd->spin);
1977
1978         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1979         clear_user_resched();   /* This satisfied the reschedule request */
1980 #if 0
1981         dd->rrcount = 0;        /* Reset the round-robin counter */
1982 #endif
1983
1984         if (dd->runqcount || dd->uschedcp != NULL) {
1985                 /*
1986                  * Threads are available.  A thread may or may not be
1987                  * currently scheduled.  Get the best thread already queued
1988                  * to this cpu.
1989                  */
1990                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
1991                 if (nlp) {
1992                         atomic_set_cpumask(&dfly_curprocmask, mask);
1993                         dd->upri = nlp->lwp_priority;
1994                         dd->uschedcp = nlp;
1995 #if 0
1996                         dd->rrcount = 0;        /* reset round robin */
1997 #endif
1998                         spin_unlock(&dd->spin);
1999                         lwkt_acquire(nlp->lwp_thread);
2000                         lwkt_schedule(nlp->lwp_thread);
2001                 } else {
2002                         /*
2003                          * This situation should not occur because we had
2004                          * at least one thread available.
2005                          */
2006                         spin_unlock(&dd->spin);
2007                 }
2008         } else if (usched_dfly_features & 0x01) {
2009                 /*
2010                  * This cpu is devoid of runnable threads, steal a thread
2011                  * from another cpu.  Since we're stealing, might as well
2012                  * load balance at the same time.
2013                  *
2014                  * We choose the highest-loaded thread from the worst queue.
2015                  *
2016                  * NOTE! This function only returns a non-NULL rdd when
2017                  *       another cpu's queue is obviously overloaded.  We
2018                  *       do not want to perform the type of rebalancing
2019                  *       the schedclock does here because it would result
2020                  *       in insane process pulling when 'steady' state is
2021                  *       partially unbalanced (e.g. 6 runnables and only
2022                  *       4 cores).
2023                  */
2024                 rdd = dfly_choose_worst_queue(dd);
2025                 if (rdd && spin_trylock(&rdd->spin)) {
2026                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2027                         spin_unlock(&rdd->spin);
2028                 } else {
2029                         nlp = NULL;
2030                 }
2031                 if (nlp) {
2032                         atomic_set_cpumask(&dfly_curprocmask, mask);
2033                         dd->upri = nlp->lwp_priority;
2034                         dd->uschedcp = nlp;
2035 #if 0
2036                         dd->rrcount = 0;        /* reset round robin */
2037 #endif
2038                         spin_unlock(&dd->spin);
2039                         lwkt_acquire(nlp->lwp_thread);
2040                         lwkt_schedule(nlp->lwp_thread);
2041                 } else {
2042                         /*
2043                          * Leave the thread on our run queue.  Another
2044                          * scheduler will try to pull it later.
2045                          */
2046                         spin_unlock(&dd->spin);
2047                 }
2048         } else {
2049                 /*
2050                  * devoid of runnable threads and not allowed to steal
2051                  * any.
2052                  */
2053                 spin_unlock(&dd->spin);
2054         }
2055
2056         /*
2057          * We're descheduled unless someone scheduled us.  Switch away.
2058          * Exiting the critical section will cause splz() to be called
2059          * for us if interrupts and such are pending.
2060          */
2061         crit_exit_gd(gd);
2062         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2063     }
2064 }
2065
2066 #if 0
2067 static int
2068 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2069 {
2070         int error, new_val;
2071
2072         new_val = usched_dfly_stick_to_level;
2073
2074         error = sysctl_handle_int(oidp, &new_val, 0, req);
2075         if (error != 0 || req->newptr == NULL)
2076                 return (error);
2077         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2078                 return (EINVAL);
2079         usched_dfly_stick_to_level = new_val;
2080         return (0);
2081 }
2082 #endif
2083
2084 /*
2085  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2086  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2087  * we should not mess with it further.
2088  */
2089 static void
2090 usched_dfly_cpu_init(void)
2091 {
2092         int i;
2093         int j;
2094         int cpuid;
2095         int smt_not_supported = 0;
2096         int cache_coherent_not_supported = 0;
2097
2098         if (bootverbose)
2099                 kprintf("Start scheduler helpers on cpus:\n");
2100
2101         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2102         usched_dfly_sysctl_tree =
2103                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2104                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2105                                 "usched_dfly", CTLFLAG_RD, 0, "");
2106
2107         for (i = 0; i < ncpus; ++i) {
2108                 dfly_pcpu_t dd = &dfly_pcpu[i];
2109                 cpumask_t mask = CPUMASK(i);
2110
2111                 if ((mask & smp_active_mask) == 0)
2112                     continue;
2113
2114                 spin_init(&dd->spin);
2115                 dd->cpunode = get_cpu_node_by_cpuid(i);
2116                 dd->cpuid = i;
2117                 dd->cpumask = CPUMASK(i);
2118                 for (j = 0; j < NQS; j++) {
2119                         TAILQ_INIT(&dd->queues[j]);
2120                         TAILQ_INIT(&dd->rtqueues[j]);
2121                         TAILQ_INIT(&dd->idqueues[j]);
2122                 }
2123                 atomic_clear_cpumask(&dfly_curprocmask, 1);
2124
2125                 if (dd->cpunode == NULL) {
2126                         smt_not_supported = 1;
2127                         cache_coherent_not_supported = 1;
2128                         if (bootverbose)
2129                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2130                                          "found for cpu\n", i);
2131                 } else {
2132                         switch (dd->cpunode->type) {
2133                         case THREAD_LEVEL:
2134                                 if (bootverbose)
2135                                         kprintf ("\tcpu%d - HyperThreading "
2136                                                  "available. Core siblings: ",
2137                                                  i);
2138                                 break;
2139                         case CORE_LEVEL:
2140                                 smt_not_supported = 1;
2141
2142                                 if (bootverbose)
2143                                         kprintf ("\tcpu%d - No HT available, "
2144                                                  "multi-core/physical "
2145                                                  "cpu. Physical siblings: ",
2146                                                  i);
2147                                 break;
2148                         case CHIP_LEVEL:
2149                                 smt_not_supported = 1;
2150
2151                                 if (bootverbose)
2152                                         kprintf ("\tcpu%d - No HT available, "
2153                                                  "single-core/physical cpu. "
2154                                                  "Package Siblings: ",
2155                                                  i);
2156                                 break;
2157                         default:
2158                                 /* Let's go for safe defaults here */
2159                                 smt_not_supported = 1;
2160                                 cache_coherent_not_supported = 1;
2161                                 if (bootverbose)
2162                                         kprintf ("\tcpu%d - Unknown cpunode->"
2163                                                  "type=%u. Siblings: ",
2164                                                  i,
2165                                                  (u_int)dd->cpunode->type);
2166                                 break;
2167                         }
2168
2169                         if (bootverbose) {
2170                                 if (dd->cpunode->parent_node != NULL) {
2171                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2172                                                 kprintf("cpu%d ", cpuid);
2173                                         kprintf("\n");
2174                                 } else {
2175                                         kprintf(" no siblings\n");
2176                                 }
2177                         }
2178                 }
2179
2180                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2181                             0, i, "usched %d", i);
2182
2183                 /*
2184                  * Allow user scheduling on the target cpu.  cpu #0 has already
2185                  * been enabled in rqinit().
2186                  */
2187                 if (i)
2188                     atomic_clear_cpumask(&dfly_curprocmask, mask);
2189                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2190                 dd->upri = PRIBASE_NULL;
2191
2192         }
2193
2194         /* usched_dfly sysctl configurable parameters */
2195
2196         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2197                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2198                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2199                        &usched_dfly_rrinterval, 0, "");
2200         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2201                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2202                        OID_AUTO, "decay", CTLFLAG_RW,
2203                        &usched_dfly_decay, 0, "Extra decay when not running");
2204
2205         /* Add enable/disable option for SMT scheduling if supported */
2206         if (smt_not_supported) {
2207                 usched_dfly_smt = 0;
2208                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2209                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2210                                   OID_AUTO, "smt", CTLFLAG_RD,
2211                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2212         } else {
2213                 usched_dfly_smt = 1;
2214                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2215                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2216                                OID_AUTO, "smt", CTLFLAG_RW,
2217                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2218         }
2219
2220         /*
2221          * Add enable/disable option for cache coherent scheduling
2222          * if supported
2223          */
2224         if (cache_coherent_not_supported) {
2225                 usched_dfly_cache_coherent = 0;
2226                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2227                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2228                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2229                                   "NOT SUPPORTED", 0,
2230                                   "Cache coherence NOT SUPPORTED");
2231         } else {
2232                 usched_dfly_cache_coherent = 1;
2233                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2234                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2235                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2236                                &usched_dfly_cache_coherent, 0,
2237                                "Enable/Disable cache coherent scheduling");
2238
2239                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2240                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2241                                OID_AUTO, "weight1", CTLFLAG_RW,
2242                                &usched_dfly_weight1, 200,
2243                                "Weight selection for current cpu");
2244
2245                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2246                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2247                                OID_AUTO, "weight2", CTLFLAG_RW,
2248                                &usched_dfly_weight2, 180,
2249                                "Weight selection for wakefrom cpu");
2250
2251                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2252                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2253                                OID_AUTO, "weight3", CTLFLAG_RW,
2254                                &usched_dfly_weight3, 40,
2255                                "Weight selection for num threads on queue");
2256
2257                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2258                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2259                                OID_AUTO, "weight4", CTLFLAG_RW,
2260                                &usched_dfly_weight4, 160,
2261                                "Availability of other idle cpus");
2262
2263                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2264                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2265                                OID_AUTO, "fast_resched", CTLFLAG_RW,
2266                                &usched_dfly_fast_resched, 0,
2267                                "Availability of other idle cpus");
2268
2269                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2270                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2271                                OID_AUTO, "features", CTLFLAG_RW,
2272                                &usched_dfly_features, 0x8F,
2273                                "Allow pulls into empty queues");
2274
2275                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2276                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2277                                OID_AUTO, "swmask", CTLFLAG_RW,
2278                                &usched_dfly_swmask, ~PPQMASK,
2279                                "Queue mask to force thread switch");
2280
2281 #if 0
2282                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2283                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2284                                 OID_AUTO, "stick_to_level",
2285                                 CTLTYPE_INT | CTLFLAG_RW,
2286                                 NULL, sizeof usched_dfly_stick_to_level,
2287                                 sysctl_usched_dfly_stick_to_level, "I",
2288                                 "Stick a process to this level. See sysctl"
2289                                 "paremter hw.cpu_topology.level_description");
2290 #endif
2291         }
2292 }
2293 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2294         usched_dfly_cpu_init, NULL)