nrelease: Minor style improvements to the Makefile
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012-2017 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51
52 #include <sys/ktr.h>
53
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
56
57 /*
58  * Priorities.  Note that with 32 run queues per scheduler each queue
59  * represents four priority levels.
60  */
61
62 int dfly_rebalanced;
63
64 #define MAXPRI                  128
65 #define PRIMASK                 (MAXPRI - 1)
66 #define PRIBASE_REALTIME        0
67 #define PRIBASE_NORMAL          MAXPRI
68 #define PRIBASE_IDLE            (MAXPRI * 2)
69 #define PRIBASE_THREAD          (MAXPRI * 3)
70 #define PRIBASE_NULL            (MAXPRI * 4)
71
72 #define NQS     32                      /* 32 run queues. */
73 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
74 #define PPQMASK (PPQ - 1)
75
76 /*
77  * NICE_QS      - maximum queues nice can shift the process
78  * EST_QS       - maximum queues estcpu can shift the process
79  *
80  * ESTCPUPPQ    - number of estcpu units per priority queue
81  * ESTCPUMAX    - number of estcpu units
82  *
83  * Remember that NICE runs over the whole -20 to +20 range.
84  */
85 #define NICE_QS         24      /* -20 to +20 shift in whole queues */
86 #define EST_QS          20      /* 0-MAX shift in whole queues */
87 #define ESTCPUPPQ       512
88 #define ESTCPUMAX       (ESTCPUPPQ * EST_QS)
89 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
90
91 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
92
93 TAILQ_HEAD(rq, lwp);
94
95 #define lwp_priority    lwp_usdata.dfly.priority
96 #define lwp_forked      lwp_usdata.dfly.forked
97 #define lwp_rqindex     lwp_usdata.dfly.rqindex
98 #define lwp_estcpu      lwp_usdata.dfly.estcpu
99 #define lwp_estfast     lwp_usdata.dfly.estfast
100 #define lwp_uload       lwp_usdata.dfly.uload
101 #define lwp_rqtype      lwp_usdata.dfly.rqtype
102 #define lwp_qcpu        lwp_usdata.dfly.qcpu
103 #define lwp_rrcount     lwp_usdata.dfly.rrcount
104
105 static __inline int
106 lptouload(struct lwp *lp)
107 {
108         int uload;
109
110         uload = lp->lwp_estcpu / NQS;
111         uload -= uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
112
113         return uload;
114 }
115
116 /*
117  * DFly scheduler pcpu structure.  Note that the pcpu uload field must
118  * be 64-bits to avoid overflowing in the situation where more than 32768
119  * processes are on a single cpu's queue.  Since high-end systems can
120  * easily run 900,000+ processes, we have to deal with it.
121  */
122 struct usched_dfly_pcpu {
123         struct spinlock spin;
124         struct thread   *helper_thread;
125         struct globaldata *gd;
126         u_short         scancpu;
127         short           upri;
128         long            uload;          /* 64-bits to avoid overflow (1) */
129         int             ucount;
130         int             flags;
131         struct lwp      *uschedcp;
132         struct rq       queues[NQS];
133         struct rq       rtqueues[NQS];
134         struct rq       idqueues[NQS];
135         u_int32_t       queuebits;
136         u_int32_t       rtqueuebits;
137         u_int32_t       idqueuebits;
138         int             runqcount;
139         int             cpuid;
140         cpumask_t       cpumask;
141         cpu_node_t      *cpunode;
142 } __cachealign;
143
144 /*
145  * Reflecting bits in the global atomic masks allows us to avoid
146  * a certain degree of global ping-ponging.
147  */
148 #define DFLY_PCPU_RDYMASK       0x0001  /* reflect rdyprocmask */
149 #define DFLY_PCPU_CURMASK       0x0002  /* reflect curprocmask */
150
151 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
152
153 static void dfly_acquire_curproc(struct lwp *lp);
154 static void dfly_release_curproc(struct lwp *lp);
155 static void dfly_select_curproc(globaldata_t gd);
156 static void dfly_setrunqueue(struct lwp *lp);
157 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
158 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
159                                 sysclock_t cpstamp);
160 static void dfly_recalculate_estcpu(struct lwp *lp);
161 static void dfly_resetpriority(struct lwp *lp);
162 static void dfly_forking(struct lwp *plp, struct lwp *lp);
163 static void dfly_exiting(struct lwp *lp, struct proc *);
164 static void dfly_uload_update(struct lwp *lp);
165 static void dfly_yield(struct lwp *lp);
166 static void dfly_changeqcpu_locked(struct lwp *lp,
167                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
168 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
169 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit);
170 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
171 static void dfly_need_user_resched_remote(void *dummy);
172 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
173                                           struct lwp *chklp, int worst);
174 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
175 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
176 static void dfly_changedcpu(struct lwp *lp);
177
178 struct usched usched_dfly = {
179         { NULL },
180         "dfly", "Original DragonFly Scheduler",
181         NULL,                   /* default registration */
182         NULL,                   /* default deregistration */
183         dfly_acquire_curproc,
184         dfly_release_curproc,
185         dfly_setrunqueue,
186         dfly_schedulerclock,
187         dfly_recalculate_estcpu,
188         dfly_resetpriority,
189         dfly_forking,
190         dfly_exiting,
191         dfly_uload_update,
192         NULL,                   /* setcpumask not supported */
193         dfly_yield,
194         dfly_changedcpu
195 };
196
197 /*
198  * We have NQS (32) run queues per scheduling class.  For the normal
199  * class, there are 128 priorities scaled onto these 32 queues.  New
200  * processes are added to the last entry in each queue, and processes
201  * are selected for running by taking them from the head and maintaining
202  * a simple FIFO arrangement.  Realtime and Idle priority processes have
203  * and explicit 0-31 priority which maps directly onto their class queue
204  * index.  When a queue has something in it, the corresponding bit is
205  * set in the queuebits variable, allowing a single read to determine
206  * the state of all 32 queues and then a ffs() to find the first busy
207  * queue.
208  *
209  * curprocmask is used to publish cpus with assigned curprocs to the rest
210  * of the cpus.  In certain situations curprocmask may leave a bit set
211  * (e.g. a yield or a token-based yield) even though dd->uschedcp is
212  * NULL'd out temporarily).
213  */
214                                         /* currently running a user process */
215 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
216 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
217 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
218 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
219 static struct sysctl_oid *usched_dfly_sysctl_tree;
220 static struct lock usched_dfly_config_lk = LOCK_INITIALIZER("usdfs", 0, 0);
221
222 /* Debug info exposed through debug.* sysctl */
223
224 static int usched_dfly_debug = -1;
225 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
226            &usched_dfly_debug, 0,
227            "Print debug information for this pid");
228
229 static int usched_dfly_pid_debug = -1;
230 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
231            &usched_dfly_pid_debug, 0,
232            "Print KTR debug information for this pid");
233
234 static int usched_dfly_chooser = 0;
235 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
236            &usched_dfly_chooser, 0,
237            "Print KTR debug information for this pid");
238
239 /*
240  * WARNING!
241  *
242  * The fork bias can have a large effect on the system in the face of a
243  * make -j N or other high-forking applications.
244  *
245  * Larger values are much less invasive vs other things that
246  * might be running in the system, but can cause exec chains
247  * such as those typically generated by make to have higher
248  * latencies in the face of modest load.
249  *
250  * Lower values are more invasive but have reduced latencies
251  * for such exec chains.
252  *
253  *      make -j 10 buildkernel example, build times:
254  *
255  *           +0 3:04
256  *           +1 3:14    -5.2%   <-- default
257  *           +2 3:22    -8.9%
258  *
259  * This issue occurs due to the way the scheduler affinity heuristics work.
260  * There is no way to really 'fix' the affinity heuristics because when it
261  * comes right down to it trying to instantly schedule a process on an
262  * available cpu (even if it will become unavailable a microsecond later)
263  * tends to cause processes to shift around between cpus and sockets too much
264  * and breaks the affinity.
265  *
266  * NOTE: Heavily concurrent builds typically have enough things on the pan
267  *       that they remain time-efficient even with a higher bias.
268  */
269 static int usched_dfly_forkbias = 1;
270 SYSCTL_INT(_debug, OID_AUTO, dfly_forkbias, CTLFLAG_RW,
271            &usched_dfly_forkbias, 0,
272            "Fork bias for estcpu in whole queues");
273
274 /*
275  * Tunning usched_dfly - configurable through kern.usched_dfly.
276  *
277  * weight1 - Tries to keep threads on their current cpu.  If you
278  *           make this value too large the scheduler will not be
279  *           able to load-balance large loads.
280  *
281  *           Generally set to a fairly low value, but high enough
282  *           such that estcpu jitter doesn't move threads around.
283  *
284  * weight2 - If non-zero, detects thread pairs undergoing synchronous
285  *           communications and tries to move them closer together.
286  *           The weight advantages the same package and socket and
287  *           disadvantages the same core and same cpu.
288  *
289  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
290  *           particularly against weight4.  change the default at your
291  *           peril.
292  *
293  * weight3 - Weighting based on the number of recently runnable threads
294  *           on the userland scheduling queue (ignoring their loads).
295  *
296  *           A nominal value here prevents high-priority (low-load)
297  *           threads from accumulating on one cpu core when other
298  *           cores are available.
299  *
300  *           This value should be left fairly small because low-load
301  *           high priority threads can still be mostly idle and too
302  *           high a value will kick cpu-bound processes off the cpu
303  *           unnecessarily.
304  *
305  * weight4 - Weighting based on availability of other logical cpus running
306  *           less important threads (by upri) than the thread we are trying
307  *           to schedule.
308  *
309  *           This allows a thread to migrate to another nearby cpu if it
310  *           is unable to run on the current cpu based on the other cpu
311  *           being idle or running a less important (higher lwp_priority)
312  *           thread.  This value should be large enough to override weight1,
313  *           but not so large as to override weight2.
314  *
315  *           This parameter generally ensures fairness at the cost of some
316  *           performance (if set to too high).  It should generally be just
317  *           a tad lower than weight2.
318  *
319  * weight5 - Weighting based on the relative amount of ram connected
320  *           to the node a cpu resides on.
321  *
322  *           This value should remain fairly low to allow assymetric
323  *           NUMA nodes to get threads scheduled to them.  Setting a very
324  *           high level will prevent scheduling on assymetric NUMA nodes
325  *           with low amounts of directly-attached memory.
326  *
327  *           Note that when testing e.g. N threads on a machine with N
328  *           cpu cores with assymtric NUMA nodes, a non-zero value will
329  *           cause some cpu threads on the low-priority NUMA nodes to remain
330  *           idle even when a few process threads are doubled-up on other
331  *           cpus.  But this is typically more ideal because it deschedules
332  *           low-priority NUMA nodes at lighter nodes.
333  *
334  *           Values between 50 and 200 are recommended.  Default is 50.
335  *
336  * weight6 - rdd transfer weight hysteresis.  Defaults to 0, can be increased
337  *           to improve stabillity at the cost of more mis-schedules.
338  *
339  * ipc_smt - If enabled, advantage IPC pairing to sibling cpu threads.
340  *           If -1, automatic when load >= 1/2 ncpus (default).
341  *
342  * ipc_same- If enabled, advantage IPC pairing to the same logical cpu.
343  *           If -1, automatic when load >= ncpus (default).
344  *
345  * features - These flags can be set or cleared to enable or disable various
346  *            features.
347  *
348  *            0x01      Enable idle-cpu pulling                 (default)
349  *            0x02      Enable proactive pushing                (default)
350  *            0x04      Enable rebalancing rover                (default)
351  *            0x08      Enable more proactive pushing           (default)
352  *            0x10      (unassigned)
353  *            0x20      choose best cpu for forked process      (default)
354  *            0x40      choose current cpu for forked process
355  *            0x80      choose random cpu for forked process
356  *
357  *           NOTE - The idea behind forking mechanic 0x20 is that most
358  *                  fork()ing is either followed by an exec in the child,
359  *                  or the parent wait*()s.  If the child is short-lived,
360  *                  there is effectively an IPC dependency (td_wakefromcpu
361  *                  is also set in kern_fork.c) and we want to implement
362  *                  the weight2 behavior to reduce IPIs and to reduce CPU
363  *                  cache ping-ponging.
364  */
365 __read_mostly static int usched_dfly_smt = 0;
366 __read_mostly static int usched_dfly_cache_coherent = 0;
367 __read_mostly static int usched_dfly_weight1 = 30;  /* keep thread on cpu */
368 __read_mostly static int usched_dfly_weight2 = 180; /* IPC locality */
369 __read_mostly static int usched_dfly_weight3 = 10;  /* threads on queue */
370 __read_mostly static int usched_dfly_weight4 = 120; /* availability of cores */
371 __read_mostly static int usched_dfly_weight5 = 50;  /* node attached memory */
372 __read_mostly static int usched_dfly_weight6 = 0;   /* rdd transfer weight */
373 __read_mostly static int usched_dfly_features = 0x2f;        /* allow pulls */
374 __read_mostly static int usched_dfly_fast_resched = PPQ / 2; /* delta pri */
375 __read_mostly static int usched_dfly_swmask = ~PPQMASK;      /* allow pulls */
376 __read_mostly static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
377 __read_mostly static int usched_dfly_decay = 8;
378 __read_mostly static int usched_dfly_ipc_smt = -1;  /* IPC auto smt pair */
379 __read_mostly static int usched_dfly_ipc_same = -1; /* IPC auto same log cpu */
380 __read_mostly static long usched_dfly_node_mem;
381
382 /* KTR debug printings */
383
384 KTR_INFO_MASTER(usched);
385
386 #if !defined(KTR_USCHED_DFLY)
387 #define KTR_USCHED_DFLY KTR_ALL
388 #endif
389
390 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
391     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
392     pid_t pid, int old_cpuid, int curr);
393
394 /*
395  * This function is called when the kernel intends to return to userland.
396  * It is responsible for making the thread the current designated userland
397  * thread for this cpu, blocking if necessary.
398  *
399  * The kernel will not depress our LWKT priority until after we return,
400  * in case we have to shove over to another cpu.
401  *
402  * We must determine our thread's disposition before we switch away.  This
403  * is very sensitive code.
404  *
405  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
406  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
407  * occur, this function is called only under very controlled circumstances.
408  */
409 static void
410 dfly_acquire_curproc(struct lwp *lp)
411 {
412         globaldata_t gd;
413         dfly_pcpu_t dd;
414         dfly_pcpu_t rdd;
415         thread_t td;
416         int force_resched;
417
418         /*
419          * Make sure we aren't sitting on a tsleep queue.
420          */
421         td = lp->lwp_thread;
422         crit_enter_quick(td);
423         if (td->td_flags & TDF_TSLEEPQ)
424                 tsleep_remove(td);
425         dfly_recalculate_estcpu(lp);
426
427         gd = mycpu;
428         dd = &dfly_pcpu[gd->gd_cpuid];
429
430         /*
431          * Process any pending interrupts/ipi's, then handle reschedule
432          * requests.  dfly_release_curproc() will try to assign a new
433          * uschedcp that isn't us and otherwise NULL it out.
434          */
435         force_resched = 0;
436         if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
437             lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
438                 force_resched = 1;
439         }
440
441         if (user_resched_wanted()) {
442                 if (dd->uschedcp == lp)
443                         force_resched = 1;
444                 clear_user_resched();
445                 dfly_release_curproc(lp);
446         }
447
448         /*
449          * Loop until we are the current user thread.
450          *
451          * NOTE: dd spinlock not held at top of loop.
452          */
453         if (dd->uschedcp == lp)
454                 lwkt_yield_quick();
455
456         while (dd->uschedcp != lp) {
457                 /*
458                  * Do not do a lwkt_yield_quick() here as it will prevent
459                  * the lwp from being placed on the dfly_bsd runqueue for
460                  * one cycle (possibly an entire round-robin), preventing
461                  * it from being scheduled to another cpu.
462                  */
463                 /* lwkt_yield_quick(); */
464
465                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
466                         kprintf(" pid %d acquire curcpu %d (force %d) ",
467                                 lp->lwp_proc->p_pid, gd->gd_cpuid,
468                                 force_resched);
469
470
471                 spin_lock(&dd->spin);
472
473                 /* This lwp is an outcast; force reschedule. */
474                 if (__predict_false(
475                     CPUMASK_TESTBIT(lp->lwp_cpumask, gd->gd_cpuid) == 0) &&
476                     (rdd = dfly_choose_best_queue(lp)) != dd) {
477                         dfly_changeqcpu_locked(lp, dd, rdd);
478                         spin_unlock(&dd->spin);
479                         lwkt_deschedule(lp->lwp_thread);
480                         dfly_setrunqueue_dd(rdd, lp);
481                         lwkt_switch();
482                         gd = mycpu;
483                         dd = &dfly_pcpu[gd->gd_cpuid];
484                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
485                                 kprintf("SEL-A cpu %d\n", gd->gd_cpuid);
486                         continue;
487                 }
488
489                 /*
490                  * We are not or are no longer the current lwp and a forced
491                  * reschedule was requested.  Figure out the best cpu to
492                  * run on (our current cpu will be given significant weight).
493                  *
494                  * Doing this on many cpus simultaneously leads to
495                  * instability so pace the operation.
496                  *
497                  * (if a reschedule was not requested we want to move this
498                  * step after the uschedcp tests).
499                  */
500                 if (force_resched &&
501                    (usched_dfly_features & 0x08) &&
502                    (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
503                         if ((rdd = dfly_choose_best_queue(lp)) != dd) {
504                                 dfly_changeqcpu_locked(lp, dd, rdd);
505                                 spin_unlock(&dd->spin);
506                                 lwkt_deschedule(lp->lwp_thread);
507                                 dfly_setrunqueue_dd(rdd, lp);
508                                 lwkt_switch();
509                                 gd = mycpu;
510                                 dd = &dfly_pcpu[gd->gd_cpuid];
511                                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
512                                         kprintf("SEL-B cpu %d\n", gd->gd_cpuid);
513                                 continue;
514                         }
515                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
516                                 kprintf("(SEL-B same cpu) ");
517                 }
518
519                 /*
520                  * Either no reschedule was requested or the best queue was
521                  * dd, and no current process has been selected.  We can
522                  * trivially become the current lwp on the current cpu.
523                  */
524                 if (dd->uschedcp == NULL) {
525                         atomic_clear_int(&lp->lwp_thread->td_mpflags,
526                                          TDF_MP_DIDYIELD);
527                         if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
528                                 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask,
529                                                      gd->gd_cpuid);
530                                 dd->flags |= DFLY_PCPU_CURMASK;
531                         }
532                         dd->uschedcp = lp;
533                         dd->upri = lp->lwp_priority;
534                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
535                         spin_unlock(&dd->spin);
536                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
537                                 kprintf("SEL-C cpu %d (same cpu)\n",
538                                         gd->gd_cpuid);
539                         break;
540                 }
541
542                 /*
543                  * Can we steal the current designated user thread?
544                  *
545                  * If we do the other thread will stall when it tries to
546                  * return to userland, possibly rescheduling elsewhere.
547                  * Set need_user_resched() to get the thread to cycle soonest.
548                  *
549                  * It is important to do a masked test to avoid the edge
550                  * case where two near-equal-priority threads are constantly
551                  * interrupting each other.
552                  *
553                  * In the exact match case another thread has already gained
554                  * uschedcp and lowered its priority, if we steal it the
555                  * other thread will stay stuck on the LWKT runq and not
556                  * push to another cpu.  So don't steal on equal-priority even
557                  * though it might appear to be more beneficial due to not
558                  * having to switch back to the other thread's context.
559                  *
560                  * usched_dfly_fast_resched requires that two threads be
561                  * significantly far apart in priority in order to interrupt.
562                  *
563                  * If better but not sufficiently far apart, the current
564                  * uschedcp will be interrupted at the next scheduler clock.
565                  */
566                 if (dd->uschedcp &&
567                    (dd->upri & ~PPQMASK) >
568                    (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
569                         dd->uschedcp = lp;
570                         dd->upri = lp->lwp_priority;
571                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
572                         need_user_resched();
573                         spin_unlock(&dd->spin);
574                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
575                                 kprintf("SEL-D cpu %d (same cpu)\n",
576                                         gd->gd_cpuid);
577                         break;
578                 }
579
580                 /*
581                  * Requeue us at lwp_priority, which recalculate_estcpu()
582                  * set for us.  Reset the rrcount to force placement
583                  * at the end of the queue.
584                  *
585                  * We used to move ourselves to the worst queue, but
586                  * this creates a fairly serious priority inversion
587                  * problem.
588                  */
589                 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
590                         spin_unlock(&dd->spin);
591                         lp->lwp_rrcount = usched_dfly_rrinterval;
592                         lp->lwp_rqindex = (lp->lwp_priority & PRIMASK) / PPQ;
593
594                         lwkt_deschedule(lp->lwp_thread);
595                         dfly_setrunqueue_dd(dd, lp);
596                         atomic_clear_int(&lp->lwp_thread->td_mpflags,
597                                          TDF_MP_DIDYIELD);
598                         lwkt_switch();
599                         gd = mycpu;
600                         dd = &dfly_pcpu[gd->gd_cpuid];
601                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
602                                 kprintf("SEL-E cpu %d (requeue)\n",
603                                         gd->gd_cpuid);
604                         continue;
605                 }
606
607                 /*
608                  * We are not the current lwp, figure out the best cpu
609                  * to run on (our current cpu will be given significant
610                  * weight).  Loop on cpu change.
611                  */
612                 if ((usched_dfly_features & 0x02) &&
613                     force_resched == 0 &&
614                     (rdd = dfly_choose_best_queue(lp)) != dd) {
615                         dfly_changeqcpu_locked(lp, dd, rdd);
616                         spin_unlock(&dd->spin);
617                         lwkt_deschedule(lp->lwp_thread);
618                         dfly_setrunqueue_dd(rdd, lp);
619                         lwkt_switch();
620                         gd = mycpu;
621                         dd = &dfly_pcpu[gd->gd_cpuid];
622                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
623                                 kprintf("SEL-F cpu %d (requeue new cpu)\n",
624                                         gd->gd_cpuid);
625                         continue;
626                 }
627
628                 /*
629                  * We cannot become the current lwp, place the lp on the
630                  * run-queue of this or another cpu and deschedule ourselves.
631                  *
632                  * When we are reactivated we will have another chance.
633                  *
634                  * Reload after a switch or setrunqueue/switch possibly
635                  * moved us to another cpu.
636                  */
637                 spin_unlock(&dd->spin);
638                 lwkt_deschedule(lp->lwp_thread);
639                 dfly_setrunqueue_dd(dd, lp);
640                 lwkt_switch();
641                 gd = mycpu;
642                 dd = &dfly_pcpu[gd->gd_cpuid];
643                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
644                         kprintf("SEL-G cpu %d (fallback setrunq)\n",
645                                 gd->gd_cpuid);
646         }
647         if (usched_dfly_debug == lp->lwp_proc->p_pid)
648                 kprintf(" pid %d acquire DONE cpu %d\n",
649                         lp->lwp_proc->p_pid, gd->gd_cpuid);
650
651         /*
652          * Make sure upri is synchronized, then yield to LWKT threads as
653          * needed before returning.  This could result in another reschedule.
654          * XXX
655          */
656         crit_exit_quick(td);
657
658         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
659 }
660
661 /*
662  * DFLY_RELEASE_CURPROC
663  *
664  * This routine detaches the current thread from the userland scheduler,
665  * usually because the thread needs to run or block in the kernel (at
666  * kernel priority) for a while.
667  *
668  * This routine is also responsible for selecting a new thread to
669  * make the current thread.
670  *
671  * NOTE: This implementation differs from the dummy example in that
672  * dfly_select_curproc() is able to select the current process, whereas
673  * dummy_select_curproc() is not able to select the current process.
674  * This means we have to NULL out uschedcp.
675  *
676  * Additionally, note that we may already be on a run queue if releasing
677  * via the lwkt_switch() in dfly_setrunqueue().
678  */
679 static void
680 dfly_release_curproc(struct lwp *lp)
681 {
682         globaldata_t gd = mycpu;
683         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
684
685         /*
686          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
687          * by wakeup().
688          */
689         if (dd->uschedcp == lp) {
690                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
691                 spin_lock(&dd->spin);
692                 if (dd->uschedcp == lp) {
693                         dd->uschedcp = NULL;    /* don't let lp be selected */
694                         dd->upri = PRIBASE_NULL;
695
696                         /*
697                          * We're just going to set it again, avoid the global
698                          * cache line ping-pong.
699                          */
700                         if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0) {
701                                 if (dd->flags & DFLY_PCPU_CURMASK) {
702                                         ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask,
703                                                                gd->gd_cpuid);
704                                         dd->flags &= ~DFLY_PCPU_CURMASK;
705                                 }
706                         }
707                         spin_unlock(&dd->spin);
708                         dfly_select_curproc(gd);
709                 } else {
710                         spin_unlock(&dd->spin);
711                 }
712         }
713 }
714
715 /*
716  * DFLY_SELECT_CURPROC
717  *
718  * Select a new current process for this cpu and clear any pending user
719  * reschedule request.  The cpu currently has no current process.
720  *
721  * This routine is also responsible for equal-priority round-robining,
722  * typically triggered from dfly_schedulerclock().  In our dummy example
723  * all the 'user' threads are LWKT scheduled all at once and we just
724  * call lwkt_switch().
725  *
726  * The calling process is not on the queue and cannot be selected.
727  */
728 static
729 void
730 dfly_select_curproc(globaldata_t gd)
731 {
732         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
733         struct lwp *nlp;
734         int cpuid = gd->gd_cpuid;
735
736         crit_enter_gd(gd);
737
738         spin_lock(&dd->spin);
739         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
740
741         if (nlp) {
742                 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
743                         ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
744                         dd->flags |= DFLY_PCPU_CURMASK;
745                 }
746                 dd->upri = nlp->lwp_priority;
747                 dd->uschedcp = nlp;
748 #if 0
749                 dd->rrcount = 0;                /* reset round robin */
750 #endif
751                 spin_unlock(&dd->spin);
752                 lwkt_acquire(nlp->lwp_thread);
753                 lwkt_schedule(nlp->lwp_thread);
754         } else {
755                 spin_unlock(&dd->spin);
756         }
757         crit_exit_gd(gd);
758 }
759
760 /*
761  * Place the specified lwp on the user scheduler's run queue.  This routine
762  * must be called with the thread descheduled.  The lwp must be runnable.
763  * It must not be possible for anyone else to explicitly schedule this thread.
764  *
765  * The thread may be the current thread as a special case.
766  */
767 static void
768 dfly_setrunqueue(struct lwp *lp)
769 {
770         dfly_pcpu_t dd;
771         dfly_pcpu_t rdd;
772
773         /*
774          * First validate the process LWKT state.
775          */
776         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
777         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
778             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
779              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
780         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
781
782         /*
783          * NOTE: dd/rdd do not necessarily represent the current cpu.
784          *       Instead they may represent the cpu the thread was last
785          *       scheduled on or inherited by its parent.
786          */
787         dd = &dfly_pcpu[lp->lwp_qcpu];
788         rdd = dd;
789
790         /*
791          * This process is not supposed to be scheduled anywhere or assigned
792          * as the current process anywhere.  Assert the condition.
793          */
794         KKASSERT(rdd->uschedcp != lp);
795
796         /*
797          * Ok, we have to setrunqueue some target cpu and request a reschedule
798          * if necessary.
799          *
800          * We have to choose the best target cpu.  It might not be the current
801          * target even if the current cpu has no running user thread (for
802          * example, because the current cpu might be a hyperthread and its
803          * sibling has a thread assigned).
804          *
805          * If we just forked it is most optimal to run the child on the same
806          * cpu just in case the parent decides to wait for it (thus getting
807          * off that cpu).  As long as there is nothing else runnable on the
808          * cpu, that is.  If we did this unconditionally a parent forking
809          * multiple children before waiting (e.g. make -j N) leaves other
810          * cpus idle that could be working.
811          */
812         if (lp->lwp_forked) {
813                 lp->lwp_forked = 0;
814                 if (usched_dfly_features & 0x20)
815                         rdd = dfly_choose_best_queue(lp);
816                 else if (usched_dfly_features & 0x40)
817                         rdd = &dfly_pcpu[lp->lwp_qcpu];
818                 else if (usched_dfly_features & 0x80)
819                         rdd = dfly_choose_queue_simple(rdd, lp);
820                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
821                         rdd = dfly_choose_best_queue(lp);
822                 else
823                         rdd = &dfly_pcpu[lp->lwp_qcpu];
824         } else {
825                 rdd = dfly_choose_best_queue(lp);
826                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
827         }
828         if (lp->lwp_qcpu != rdd->cpuid) {
829                 spin_lock(&dd->spin);
830                 dfly_changeqcpu_locked(lp, dd, rdd);
831                 spin_unlock(&dd->spin);
832         }
833         dfly_setrunqueue_dd(rdd, lp);
834 }
835
836 /*
837  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
838  * spin-locked on-call.  rdd does not have to be.
839  */
840 static void
841 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
842 {
843         if (lp->lwp_qcpu != rdd->cpuid) {
844                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
845                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
846                         atomic_add_long(&dd->uload, -lp->lwp_uload);
847                         atomic_add_int(&dd->ucount, -1);
848                 }
849                 lp->lwp_qcpu = rdd->cpuid;
850         }
851 }
852
853 /*
854  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
855  * also performs all necessary ancillary notification actions.
856  */
857 static void
858 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
859 {
860         globaldata_t rgd;
861
862         /*
863          * We might be moving the lp to another cpu's run queue, and once
864          * on the runqueue (even if it is our cpu's), another cpu can rip
865          * it away from us.
866          *
867          * TDF_MIGRATING might already be set if this is part of a
868          * remrunqueue+setrunqueue sequence.
869          */
870         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
871                 lwkt_giveaway(lp->lwp_thread);
872
873         rgd = rdd->gd;
874
875         /*
876          * We lose control of the lp the moment we release the spinlock
877          * after having placed it on the queue.  i.e. another cpu could pick
878          * it up, or it could exit, or its priority could be further
879          * adjusted, or something like that.
880          *
881          * WARNING! rdd can point to a foreign cpu!
882          */
883         spin_lock(&rdd->spin);
884         dfly_setrunqueue_locked(rdd, lp);
885
886         /*
887          * Potentially interrupt the currently-running thread
888          */
889         if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
890                 /*
891                  * Currently running thread is better or same, do not
892                  * interrupt.
893                  */
894                 spin_unlock(&rdd->spin);
895         } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
896                    usched_dfly_fast_resched) {
897                 /*
898                  * Currently running thread is not better, but not so bad
899                  * that we need to interrupt it.  Let it run for one more
900                  * scheduler tick.
901                  */
902                 if (rdd->uschedcp &&
903                     rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
904                         rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
905                 }
906                 spin_unlock(&rdd->spin);
907         } else if (rgd == mycpu) {
908                 /*
909                  * We should interrupt the currently running thread, which
910                  * is on the current cpu.  However, if DIDYIELD is set we
911                  * round-robin unconditionally and do not interrupt it.
912                  */
913                 spin_unlock(&rdd->spin);
914                 if (rdd->uschedcp == NULL)
915                         wakeup_mycpu(rdd->helper_thread); /* XXX */
916                 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
917                         need_user_resched();
918         } else {
919                 /*
920                  * We should interrupt the currently running thread, which
921                  * is on a different cpu.
922                  */
923                 spin_unlock(&rdd->spin);
924                 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
925         }
926 }
927
928 /*
929  * This routine is called from a systimer IPI.  It MUST be MP-safe and
930  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
931  * each cpu.
932  */
933 static
934 void
935 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
936 {
937         globaldata_t gd = mycpu;
938         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
939
940         /*
941          * Spinlocks also hold a critical section so there should not be
942          * any active.
943          */
944         KKASSERT(gd->gd_spinlocks == 0 || dumping);
945
946         /*
947          * If lp is NULL we might be contended and lwkt_switch() may have
948          * cycled into the idle thread.  Apply the tick to the current
949          * process on this cpu if it is contended.
950          */
951         if (gd->gd_curthread == &gd->gd_idlethread) {
952                 lp = dd->uschedcp;
953                 if (lp && (lp->lwp_thread == NULL ||
954                            lp->lwp_thread->td_contended == 0)) {
955                         lp = NULL;
956                 }
957         }
958
959         /*
960          * Dock thread for tick
961          */
962         if (lp) {
963                 /*
964                  * Do we need to round-robin?  We round-robin 10 times a
965                  * second.  This should only occur for cpu-bound batch
966                  * processes.
967                  */
968                 if (++lp->lwp_rrcount >= usched_dfly_rrinterval)
969                         need_user_resched();
970
971                 /*
972                  * Adjust estcpu upward using a real time equivalent
973                  * calculation, and recalculate lp's priority.  Estcpu
974                  * is increased such that it will cap-out over a period
975                  * of one second.
976                  */
977                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
978                                            ESTCPUMAX / ESTCPUFREQ + 1);
979                 dfly_resetpriority(lp);
980         }
981
982         /*
983          * Rebalance two cpus every 8 ticks, pulling the worst thread
984          * from the worst cpu's queue into a rotating cpu number.
985          * Also require that the moving of the highest-load thread
986          * from rdd to dd does not cause the uload to cross over.
987          *
988          * This mechanic is needed because the push algorithms can
989          * steady-state in an non-optimal configuration.  We need to mix it
990          * up a little, even if it means breaking up a paired thread, so
991          * the push algorithms can rebalance the degenerate conditions.
992          * This portion of the algorithm exists to ensure stability at the
993          * selected weightings.
994          *
995          * Because we might be breaking up optimal conditions we do not want
996          * to execute this too quickly, hence we only rebalance approximately
997          * ~7-8 times per second.  The push's, on the otherhand, are capable
998          * moving threads to other cpus at a much higher rate.
999          *
1000          * We choose the most heavily loaded thread from the worst queue
1001          * in order to ensure that multiple heavy-weight threads on the same
1002          * queue get broken up, and also because these threads are the most
1003          * likely to be able to remain in place.  Hopefully then any pairings,
1004          * if applicable, migrate to where these threads are.
1005          */
1006         if ((usched_dfly_features & 0x04) &&
1007             ((u_int)sched_ticks & 7) == 0 &&
1008             (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
1009                 /*
1010                  * Our cpu is up.
1011                  */
1012                 struct lwp *nlp;
1013                 dfly_pcpu_t rdd;
1014
1015                 rdd = dfly_choose_worst_queue(dd, 1);
1016                 if (rdd && dd->uload + usched_dfly_weight6 / 2 < rdd->uload) {
1017                         spin_lock(&dd->spin);
1018                         if (spin_trylock(&rdd->spin)) {
1019                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
1020                                 spin_unlock(&rdd->spin);
1021                                 if (nlp == NULL)
1022                                         spin_unlock(&dd->spin);
1023                         } else {
1024                                 spin_unlock(&dd->spin);
1025                                 nlp = NULL;
1026                         }
1027                 } else {
1028                         nlp = NULL;
1029                 }
1030                 /* dd->spin held if nlp != NULL */
1031
1032                 /*
1033                  * Either schedule it or add it to our queue.
1034                  */
1035                 if (nlp &&
1036                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
1037                         if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
1038                                 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask,
1039                                                       dd->cpumask);
1040                                 dd->flags |= DFLY_PCPU_CURMASK;
1041                         }
1042                         dd->upri = nlp->lwp_priority;
1043                         dd->uschedcp = nlp;
1044 #if 0
1045                         dd->rrcount = 0;        /* reset round robin */
1046 #endif
1047                         spin_unlock(&dd->spin);
1048                         lwkt_acquire(nlp->lwp_thread);
1049                         lwkt_schedule(nlp->lwp_thread);
1050                 } else if (nlp) {
1051                         dfly_setrunqueue_locked(dd, nlp);
1052                         spin_unlock(&dd->spin);
1053                 }
1054         }
1055 }
1056
1057 /*
1058  * Called from acquire and from kern_synch's one-second timer (one of the
1059  * callout helper threads) with a critical section held.
1060  *
1061  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
1062  * overall system load.
1063  *
1064  * Note that no recalculation occurs for a process which sleeps and wakes
1065  * up in the same tick.  That is, a system doing thousands of context
1066  * switches per second will still only do serious estcpu calculations
1067  * ESTCPUFREQ times per second.
1068  */
1069 static
1070 void
1071 dfly_recalculate_estcpu(struct lwp *lp)
1072 {
1073         globaldata_t gd = mycpu;
1074         sysclock_t cpbase;
1075         sysclock_t ttlticks;
1076         int estcpu;
1077         int decay_factor;
1078         int ucount;
1079
1080         /*
1081          * We have to subtract periodic to get the last schedclock
1082          * timeout time, otherwise we would get the upcoming timeout.
1083          * Keep in mind that a process can migrate between cpus and
1084          * while the scheduler clock should be very close, boundary
1085          * conditions could lead to a small negative delta.
1086          */
1087         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1088
1089         if (lp->lwp_slptime > 1) {
1090                 /*
1091                  * Too much time has passed, do a coarse correction.
1092                  */
1093                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1094                 dfly_resetpriority(lp);
1095                 lp->lwp_cpbase = cpbase;
1096                 lp->lwp_cpticks = 0;
1097                 lp->lwp_estfast = 0;
1098         } else if (lp->lwp_cpbase != cpbase) {
1099                 /*
1100                  * Adjust estcpu if we are in a different tick.  Don't waste
1101                  * time if we are in the same tick.
1102                  *
1103                  * First calculate the number of ticks in the measurement
1104                  * interval.  The ttlticks calculation can wind up 0 due to
1105                  * a bug in the handling of lwp_slptime  (as yet not found),
1106                  * so make sure we do not get a divide by 0 panic.
1107                  */
1108                 ttlticks = (cpbase - lp->lwp_cpbase) /
1109                            gd->gd_schedclock.periodic;
1110                 if ((ssysclock_t)ttlticks < 0) {
1111                         ttlticks = 0;
1112                         lp->lwp_cpbase = cpbase;
1113                 }
1114                 if (ttlticks < 4)
1115                         return;
1116                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1117
1118                 /*
1119                  * Calculate instant estcpu based percentage of (one) cpu
1120                  * used and exponentially average it into the current
1121                  * lwp_estcpu.
1122                  */
1123                 ucount = dfly_pcpu[lp->lwp_qcpu].ucount;
1124                 estcpu = lp->lwp_cpticks * ESTCPUMAX / ttlticks;
1125
1126                 /*
1127                  * The higher ttlticks gets, the more meaning the calculation
1128                  * has and the smaller our decay_factor in the exponential
1129                  * average.
1130                  *
1131                  * The uload calculation has been removed because it actually
1132                  * makes things worse, causing processes which use less cpu
1133                  * (such as a browser) to be pumped up and treated the same
1134                  * as a cpu-bound process (such as a make).  The same effect
1135                  * can occur with sufficient load without the uload
1136                  * calculation, but occurs less quickly and takes more load.
1137                  * In addition, the less cpu a process uses the smaller the
1138                  * effect of the overload.
1139                  */
1140                 if (ttlticks >= hz)
1141                         decay_factor = 1;
1142                 else
1143                         decay_factor = hz - ttlticks;
1144
1145                 lp->lwp_estcpu = ESTCPULIM(
1146                                 (lp->lwp_estcpu * ttlticks + estcpu) /
1147                                 (ttlticks + 1));
1148                 dfly_resetpriority(lp);
1149                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1150                 lp->lwp_cpticks = 0;
1151         }
1152 }
1153
1154 /*
1155  * Compute the priority of a process when running in user mode.
1156  * Arrange to reschedule if the resulting priority is better
1157  * than that of the current process.
1158  *
1159  * This routine may be called with any process.
1160  *
1161  * This routine is called by fork1() for initial setup with the process of
1162  * the run queue, and also may be called normally with the process on or
1163  * off the run queue.
1164  */
1165 static void
1166 dfly_resetpriority(struct lwp *lp)
1167 {
1168         dfly_pcpu_t rdd;
1169         int newpriority;
1170         u_short newrqtype;
1171         int rcpu;
1172         int checkpri;
1173         int estcpu;
1174         int delta_uload;
1175
1176         crit_enter();
1177
1178         /*
1179          * Lock the scheduler (lp) belongs to.  This can be on a different
1180          * cpu.  Handle races.  This loop breaks out with the appropriate
1181          * rdd locked.
1182          */
1183         for (;;) {
1184                 rcpu = lp->lwp_qcpu;
1185                 cpu_ccfence();
1186                 rdd = &dfly_pcpu[rcpu];
1187                 spin_lock(&rdd->spin);
1188                 if (rcpu == lp->lwp_qcpu)
1189                         break;
1190                 spin_unlock(&rdd->spin);
1191         }
1192
1193         /*
1194          * Calculate the new priority and queue type
1195          */
1196         newrqtype = lp->lwp_rtprio.type;
1197
1198         switch(newrqtype) {
1199         case RTP_PRIO_REALTIME:
1200         case RTP_PRIO_FIFO:
1201                 newpriority = PRIBASE_REALTIME +
1202                              (lp->lwp_rtprio.prio & PRIMASK);
1203                 break;
1204         case RTP_PRIO_NORMAL:
1205                 /*
1206                  * Calculate the new priority.
1207                  *
1208                  * nice contributes up to NICE_QS queues (typ 32 - full range)
1209                  * estcpu contributes up to EST_QS queues (typ 24)
1210                  *
1211                  * A nice +20 process receives 1/10 cpu vs nice+0.  Niced
1212                  * process more than 20 apart may receive no cpu, so cpu
1213                  * bound nice -20 can prevent a nice +5 from getting any
1214                  * cpu.  A nice+0, being in the middle, always gets some cpu
1215                  * no matter what.
1216                  */
1217                 estcpu = lp->lwp_estcpu;
1218                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) *
1219                               (NICE_QS * PPQ) / PRIO_RANGE;
1220                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1221                 if (newpriority < 0)
1222                         newpriority = 0;
1223                 if (newpriority >= MAXPRI)
1224                         newpriority = MAXPRI - 1;
1225                 newpriority += PRIBASE_NORMAL;
1226                 break;
1227         case RTP_PRIO_IDLE:
1228                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1229                 break;
1230         case RTP_PRIO_THREAD:
1231                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1232                 break;
1233         default:
1234                 panic("Bad RTP_PRIO %d", newrqtype);
1235                 /* NOT REACHED */
1236         }
1237
1238         /*
1239          * The LWKT scheduler doesn't dive usched structures, give it a hint
1240          * on the relative priority of user threads running in the kernel.
1241          * The LWKT scheduler will always ensure that a user thread running
1242          * in the kernel will get cpu some time, regardless of its upri,
1243          * but can decide not to instantly switch from one kernel or user
1244          * mode user thread to a kernel-mode user thread when it has a less
1245          * desireable user priority.
1246          *
1247          * td_upri has normal sense (higher values are more desireable), so
1248          * negate it (this is a different field lp->lwp_priority)
1249          */
1250         lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1251
1252         /*
1253          * The newpriority incorporates the queue type so do a simple masked
1254          * check to determine if the process has moved to another queue.  If
1255          * it has, and it is currently on a run queue, then move it.
1256          *
1257          * Since uload is ~PPQMASK masked, no modifications are necessary if
1258          * we end up in the same run queue.
1259          *
1260          * Reset rrcount if moving to a higher-priority queue, otherwise
1261          * retain rrcount.
1262          */
1263         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1264                 if (lp->lwp_priority < newpriority)
1265                         lp->lwp_rrcount = 0;
1266                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1267                         dfly_remrunqueue_locked(rdd, lp);
1268                         lp->lwp_priority = newpriority;
1269                         lp->lwp_rqtype = newrqtype;
1270                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1271                         dfly_setrunqueue_locked(rdd, lp);
1272                         checkpri = 1;
1273                 } else {
1274                         lp->lwp_priority = newpriority;
1275                         lp->lwp_rqtype = newrqtype;
1276                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1277                         checkpri = 0;
1278                 }
1279         } else {
1280                 /*
1281                  * In the same PPQ, uload cannot change.
1282                  */
1283                 lp->lwp_priority = newpriority;
1284                 checkpri = 1;
1285                 rcpu = -1;
1286         }
1287
1288         /*
1289          * Adjust effective load.
1290          *
1291          * Calculate load then scale up or down geometrically based on p_nice.
1292          * Processes niced up (positive) are less important, and processes
1293          * niced downard (negative) are more important.  The higher the uload,
1294          * the more important the thread.
1295          */
1296         /* 0-511, 0-100% cpu */
1297         delta_uload = lptouload(lp);
1298         delta_uload -= lp->lwp_uload;
1299         if (lp->lwp_uload + delta_uload < -32767) {
1300                 delta_uload = -32768 - lp->lwp_uload;
1301         } else if (lp->lwp_uload + delta_uload > 32767) {
1302                 delta_uload = 32767 - lp->lwp_uload;
1303         }
1304         lp->lwp_uload += delta_uload;
1305         if (lp->lwp_mpflags & LWP_MP_ULOAD)
1306                 atomic_add_long(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1307
1308         /*
1309          * Determine if we need to reschedule the target cpu.  This only
1310          * occurs if the LWP is already on a scheduler queue, which means
1311          * that idle cpu notification has already occured.  At most we
1312          * need only issue a need_user_resched() on the appropriate cpu.
1313          *
1314          * The LWP may be owned by a CPU different from the current one,
1315          * in which case dd->uschedcp may be modified without an MP lock
1316          * or a spinlock held.  The worst that happens is that the code
1317          * below causes a spurious need_user_resched() on the target CPU
1318          * and dd->pri to be wrong for a short period of time, both of
1319          * which are harmless.
1320          *
1321          * If checkpri is 0 we are adjusting the priority of the current
1322          * process, possibly higher (less desireable), so ignore the upri
1323          * check which will fail in that case.
1324          */
1325         if (rcpu >= 0) {
1326                 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1327                     (checkpri == 0 ||
1328                      (rdd->upri & ~PRIMASK) >
1329                      (lp->lwp_priority & ~PRIMASK))) {
1330                         if (rcpu == mycpu->gd_cpuid) {
1331                                 spin_unlock(&rdd->spin);
1332                                 need_user_resched();
1333                         } else {
1334                                 spin_unlock(&rdd->spin);
1335                                 lwkt_send_ipiq(globaldata_find(rcpu),
1336                                                dfly_need_user_resched_remote,
1337                                                NULL);
1338                         }
1339                 } else {
1340                         spin_unlock(&rdd->spin);
1341                 }
1342         } else {
1343                 spin_unlock(&rdd->spin);
1344         }
1345         crit_exit();
1346 }
1347
1348 static
1349 void
1350 dfly_yield(struct lwp *lp)
1351 {
1352         if (lp->lwp_qcpu != mycpu->gd_cpuid)
1353                 return;
1354         KKASSERT(lp == curthread->td_lwp);
1355
1356         /*
1357          * Don't set need_user_resched() or mess with rrcount or anything.
1358          * the TDF flag will override everything as long as we release.
1359          */
1360         atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1361         dfly_release_curproc(lp);
1362 }
1363
1364 /*
1365  * Thread was forcefully migrated to another cpu.  Normally forced migrations
1366  * are used for iterations and the kernel returns to the original cpu before
1367  * returning and this is not needed.  However, if the kernel migrates a
1368  * thread to another cpu and wants to leave it there, it has to call this
1369  * scheduler helper.
1370  *
1371  * Note that the lwkt_migratecpu() function also released the thread, so
1372  * we don't have to worry about that.
1373  */
1374 static
1375 void
1376 dfly_changedcpu(struct lwp *lp)
1377 {
1378         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1379         dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1380
1381         if (dd != rdd) {
1382                 spin_lock(&dd->spin);
1383                 dfly_changeqcpu_locked(lp, dd, rdd);
1384                 spin_unlock(&dd->spin);
1385         }
1386 }
1387
1388 /*
1389  * Called from fork1() when a new child process is being created.
1390  *
1391  * Give the child process an initial estcpu that is more batch then
1392  * its parent and dock the parent for the fork (but do not
1393  * reschedule the parent).
1394  *
1395  * fast
1396  *
1397  * XXX lwp should be "spawning" instead of "forking"
1398  */
1399 static void
1400 dfly_forking(struct lwp *plp, struct lwp *lp)
1401 {
1402         int estcpu;
1403
1404         /*
1405          * Put the child 4 queue slots (out of 32) higher than the parent
1406          * (less desireable than the parent).
1407          */
1408         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu +
1409                                    ESTCPUPPQ * usched_dfly_forkbias);
1410         lp->lwp_forked = 1;
1411         lp->lwp_estfast = 0;
1412
1413         /*
1414          * Even though the lp will be scheduled specially the first time
1415          * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1416          * to avoid favoring a fixed cpu.
1417          */
1418 #if 0
1419         static uint16_t save_cpu;
1420         lp->lwp_qcpu = ++save_cpu % ncpus;
1421 #else
1422         lp->lwp_qcpu = plp->lwp_qcpu;
1423         if (CPUMASK_TESTBIT(lp->lwp_cpumask, lp->lwp_qcpu) == 0)
1424                 lp->lwp_qcpu = BSFCPUMASK(lp->lwp_cpumask);
1425 #endif
1426
1427         /*
1428          * Dock the parent a cost for the fork, protecting us from fork
1429          * bombs.  If the parent is forking quickly this makes both the
1430          * parent and child more batchy.
1431          */
1432         estcpu = plp->lwp_estcpu + ESTCPUPPQ / 16;
1433         plp->lwp_estcpu = ESTCPULIM(estcpu);
1434 }
1435
1436 /*
1437  * Called when a lwp is being removed from this scheduler, typically
1438  * during lwp_exit().  We have to clean out any ULOAD accounting before
1439  * we can let the lp go.  The dd->spin lock is not needed for uload
1440  * updates.
1441  *
1442  * Scheduler dequeueing has already occurred, no further action in that
1443  * regard is needed.
1444  */
1445 static void
1446 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1447 {
1448         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1449
1450         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1451                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1452                 atomic_add_long(&dd->uload, -lp->lwp_uload);
1453                 atomic_add_int(&dd->ucount, -1);
1454         }
1455 }
1456
1457 /*
1458  * This function cannot block in any way, but spinlocks are ok.
1459  *
1460  * Update the uload based on the state of the thread (whether it is going
1461  * to sleep or running again).  The uload is meant to be a longer-term
1462  * load and not an instantanious load.
1463  */
1464 static void
1465 dfly_uload_update(struct lwp *lp)
1466 {
1467         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1468
1469         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1470                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1471                         spin_lock(&dd->spin);
1472                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1473                                 atomic_set_int(&lp->lwp_mpflags,
1474                                                LWP_MP_ULOAD);
1475                                 atomic_add_long(&dd->uload, lp->lwp_uload);
1476                                 atomic_add_int(&dd->ucount, 1);
1477                         }
1478                         spin_unlock(&dd->spin);
1479                 }
1480         } else if (lp->lwp_slptime > 0) {
1481                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1482                         spin_lock(&dd->spin);
1483                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1484                                 atomic_clear_int(&lp->lwp_mpflags,
1485                                                  LWP_MP_ULOAD);
1486                                 atomic_add_long(&dd->uload, -lp->lwp_uload);
1487                                 atomic_add_int(&dd->ucount, -1);
1488                         }
1489                         spin_unlock(&dd->spin);
1490                 }
1491         }
1492 }
1493
1494 /*
1495  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1496  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1497  * has a better or equal priority then the process that would otherwise be
1498  * chosen, NULL is returned.
1499  *
1500  * Until we fix the RUNQ code the chklp test has to be strict or we may
1501  * bounce between processes trying to acquire the current process designation.
1502  *
1503  * Must be called with rdd->spin locked.  The spinlock is left intact through
1504  * the entire routine.  dd->spin does not have to be locked.
1505  *
1506  * If worst is non-zero this function finds the worst thread instead of the
1507  * best thread (used by the schedulerclock-based rover).
1508  */
1509 static
1510 struct lwp *
1511 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1512                        struct lwp *chklp, int worst)
1513 {
1514         struct lwp *lp;
1515         struct rq *q;
1516         u_int32_t *which;
1517         u_int32_t pri;
1518         u_int32_t rtqbits;
1519         u_int32_t tsqbits;
1520         u_int32_t idqbits;
1521
1522         /*
1523          * Select best or worst process.  Once selected, clear the bit
1524          * in our local variable (idqbits, tsqbits, or rtqbits) just
1525          * in case we have to loop.
1526          */
1527         rtqbits = rdd->rtqueuebits;
1528         tsqbits = rdd->queuebits;
1529         idqbits = rdd->idqueuebits;
1530
1531 loopfar:
1532         if (worst) {
1533                 if (idqbits) {
1534                         pri = bsrl(idqbits);
1535                         idqbits &= ~(1U << pri);
1536                         q = &rdd->idqueues[pri];
1537                         which = &rdd->idqueuebits;
1538                 } else if (tsqbits) {
1539                         pri = bsrl(tsqbits);
1540                         tsqbits &= ~(1U << pri);
1541                         q = &rdd->queues[pri];
1542                         which = &rdd->queuebits;
1543                 } else if (rtqbits) {
1544                         pri = bsrl(rtqbits);
1545                         rtqbits &= ~(1U << pri);
1546                         q = &rdd->rtqueues[pri];
1547                         which = &rdd->rtqueuebits;
1548                 } else {
1549                         return (NULL);
1550                 }
1551                 lp = TAILQ_LAST(q, rq);
1552         } else {
1553                 if (rtqbits) {
1554                         pri = bsfl(rtqbits);
1555                         rtqbits &= ~(1U << pri);
1556                         q = &rdd->rtqueues[pri];
1557                         which = &rdd->rtqueuebits;
1558                 } else if (tsqbits) {
1559                         pri = bsfl(tsqbits);
1560                         tsqbits &= ~(1U << pri);
1561                         q = &rdd->queues[pri];
1562                         which = &rdd->queuebits;
1563                 } else if (idqbits) {
1564                         pri = bsfl(idqbits);
1565                         idqbits &= ~(1U << pri);
1566                         q = &rdd->idqueues[pri];
1567                         which = &rdd->idqueuebits;
1568                 } else {
1569                         return (NULL);
1570                 }
1571                 lp = TAILQ_FIRST(q);
1572         }
1573         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1574
1575 loopnear:
1576         /*
1577          * If the passed lwp <chklp> is reasonably close to the selected
1578          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1579          *
1580          * Note that we must error on the side of <chklp> to avoid bouncing
1581          * between threads in the acquire code.
1582          */
1583         if (chklp) {
1584                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1585                         return(NULL);
1586         }
1587
1588         /*
1589          * When rdd != dd, we have to make sure that the process we
1590          * are pulling is allow to run on our cpu.  This alternative
1591          * path is a bit more expensive but its not considered to be
1592          * in the critical path.
1593          */
1594         if (rdd != dd && CPUMASK_TESTBIT(lp->lwp_cpumask, dd->cpuid) == 0) {
1595                 if (worst)
1596                         lp = TAILQ_PREV(lp, rq, lwp_procq);
1597                 else
1598                         lp = TAILQ_NEXT(lp, lwp_procq);
1599                 if (lp)
1600                         goto loopnear;
1601                 goto loopfar;
1602         }
1603
1604         KTR_COND_LOG(usched_chooseproc,
1605             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1606             lp->lwp_proc->p_pid,
1607             lp->lwp_thread->td_gd->gd_cpuid,
1608             mycpu->gd_cpuid);
1609
1610         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1611         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1612         TAILQ_REMOVE(q, lp, lwp_procq);
1613         --rdd->runqcount;
1614         if (TAILQ_EMPTY(q))
1615                 *which &= ~(1 << pri);
1616
1617         /*
1618          * If we are choosing a process from rdd with the intent to
1619          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1620          * is still held.
1621          */
1622         if (rdd != dd) {
1623                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1624                         atomic_add_long(&rdd->uload, -lp->lwp_uload);
1625                         atomic_add_int(&rdd->ucount, -1);
1626                 }
1627                 lp->lwp_qcpu = dd->cpuid;
1628                 atomic_add_long(&dd->uload, lp->lwp_uload);
1629                 atomic_add_int(&dd->ucount, 1);
1630                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1631         }
1632         return lp;
1633 }
1634
1635 /*
1636  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1637  *
1638  * Choose a cpu node to schedule lp on, hopefully nearby its current
1639  * node.
1640  *
1641  * We give the current node a modest advantage for obvious reasons.
1642  *
1643  * We also give the node the thread was woken up FROM a slight advantage
1644  * in order to try to schedule paired threads which synchronize/block waiting
1645  * for each other fairly close to each other.  Similarly in a network setting
1646  * this feature will also attempt to place a user process near the kernel
1647  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1648  * algorithm as it heuristically groups synchronizing processes for locality
1649  * of reference in multi-socket systems.
1650  *
1651  * We check against running processes and give a big advantage if there
1652  * are none running.
1653  *
1654  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1655  *
1656  * When the topology is known choose a cpu whos group has, in aggregate,
1657  * has the lowest weighted load.
1658  */
1659 static
1660 dfly_pcpu_t
1661 dfly_choose_best_queue(struct lwp *lp)
1662 {
1663         cpumask_t wakemask;
1664         cpumask_t mask;
1665         cpu_node_t *cpup;
1666         cpu_node_t *cpun;
1667         cpu_node_t *cpub;
1668         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1669         dfly_pcpu_t rdd;
1670         int wakecpu;
1671         int cpuid;
1672         int n;
1673         int loadav;
1674         long load;
1675         long lowest_load;
1676
1677         /*
1678          * When the topology is unknown choose a random cpu that is hopefully
1679          * idle.
1680          */
1681         if (dd->cpunode == NULL)
1682                 return (dfly_choose_queue_simple(dd, lp));
1683
1684         loadav = (averunnable.ldavg[0] + FSCALE / 2) >> FSHIFT;
1685
1686         /*
1687          * Pairing mask
1688          */
1689         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1690                 wakemask = dfly_pcpu[wakecpu].cpumask;
1691         else
1692                 CPUMASK_ASSZERO(wakemask);
1693
1694         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1695                 kprintf("choosebest wakefromcpu %d:\n",
1696                         lp->lwp_thread->td_wakefromcpu);
1697
1698         /*
1699          * When the topology is known choose a cpu whos group has, in
1700          * aggregate, has the lowest weighted load.
1701          */
1702         cpup = root_cpu_node;
1703         rdd = dd;
1704
1705         while (cpup) {
1706                 /*
1707                  * Degenerate case super-root
1708                  */
1709                 if (cpup->child_no == 1) {
1710                         cpup = cpup->child_node[0];
1711                         continue;
1712                 }
1713
1714                 /*
1715                  * Terminal cpunode
1716                  */
1717                 if (cpup->child_no == 0) {
1718                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1719                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1720                                 kprintf("  last cpu %d\n", rdd->cpuid);
1721                         break;
1722                 }
1723
1724                 cpub = NULL;
1725                 lowest_load = 0x7FFFFFFFFFFFFFFFLL;
1726                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1727                         kprintf("  reset lowest_load for scan\n");
1728
1729                 for (n = 0; n < cpup->child_no; ++n) {
1730                         /*
1731                          * Accumulate load information for all cpus
1732                          * which are members of this node.
1733                          */
1734                         int count;
1735
1736                         cpun = cpup->child_node[n];
1737                         mask = cpun->members;
1738                         CPUMASK_ANDMASK(mask, usched_global_cpumask);
1739                         CPUMASK_ANDMASK(mask, smp_active_mask);
1740                         CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1741                         if (CPUMASK_TESTZERO(mask))
1742                                 continue;
1743
1744                         load = 0;
1745                         count = 0;
1746
1747                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1748                                 kprintf("  mask:");
1749                         while (CPUMASK_TESTNZERO(mask)) {
1750                                 cpuid = BSFCPUMASK(mask);
1751                                 rdd = &dfly_pcpu[cpuid];
1752
1753                                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1754                                         kprintf(" %d", cpuid);
1755
1756                                 /*
1757                                  * Cumulative load for members.  Note that
1758                                  * if (lp) is part of the group, lp's
1759                                  * contribution will be backed out later.
1760                                  */
1761                                 load += rdd->uload;
1762                                 load += rdd->ucount *
1763                                         usched_dfly_weight3;
1764
1765                                 /*
1766                                  * If the node is running a less important
1767                                  * thread than our thread, give it an
1768                                  * advantage.  Witha high-enough weighting
1769                                  * this can override most other considerations
1770                                  * to provide ultimate priority fairness at
1771                                  * the cost of localization.
1772                                  */
1773                                 if ((rdd->upri & ~PPQMASK) >
1774                                     (lp->lwp_priority & ~PPQMASK)) {
1775                                         load -= usched_dfly_weight4;
1776                                 }
1777
1778 #if 0
1779                                 if (rdd->uschedcp == NULL &&
1780                                     rdd->runqcount == 0 &&
1781                                     rdd->gd->gd_tdrunqcount == 0
1782                                 ) {
1783                                         load += rdd->uload / 2;
1784                                         load += rdd->ucount *
1785                                                 usched_dfly_weight3 / 2;
1786                                 } else {
1787                                         load += rdd->uload;
1788                                         load += rdd->ucount *
1789                                                 usched_dfly_weight3;
1790                                 }
1791 #endif
1792                                 CPUMASK_NANDBIT(mask, cpuid);
1793                                 ++count;
1794                         }
1795
1796                         /*
1797                          * Compensate if the lp is already accounted for in
1798                          * the aggregate uload for this mask set.  We want
1799                          * to calculate the loads as if lp were not present,
1800                          * otherwise the calculation is bogus.
1801                          */
1802                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1803                             CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1804                                 load -= lp->lwp_uload;
1805                                 load -= usched_dfly_weight3;    /* ucount */
1806                         }
1807
1808                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1809                                 kprintf("\n  accum_start c=%d ld=%ld "
1810                                         "cpu=%d ld/cnt=%ld ",
1811                                         count, load, rdd->cpuid,
1812                                         load / count);
1813
1814                         /*
1815                          * load is the aggregate load of count CPUs in the
1816                          * group.  For the weightings to work as intended,
1817                          * we want an average per-cpu load.
1818                          */
1819                         load = load / count;
1820
1821                         /*
1822                          * Advantage the cpu group (lp) is already on.
1823                          */
1824                         if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1825                                 load -= usched_dfly_weight1;
1826
1827                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1828                                 kprintf("B:%ld ", load);
1829
1830                         /*
1831                          * Advantage nodes with more memory
1832                          */
1833                         if (usched_dfly_node_mem) {
1834                                 load -= cpun->phys_mem * usched_dfly_weight5 /
1835                                         usched_dfly_node_mem;
1836                         }
1837
1838                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1839                                 kprintf("C:%ld ", load);
1840
1841                         /*
1842                          * Advantage the cpu group we desire to pair (lp)
1843                          * to, but Disadvantage hyperthreads on the same
1844                          * core, or the same thread as the ipc peer.
1845                          *
1846                          * Under very heavy loads it is usually beneficial
1847                          * to set kern.usched_dfly.ipc_smt to 1, and under
1848                          * extreme loads it might be beneficial to also set
1849                          * kern.usched_dfly.ipc_same to 1.
1850                          *
1851                          * load+    disadvantage
1852                          * load-    advantage
1853                          */
1854                         if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1855                                 if (cpun->child_no) {
1856                                         if (cpun->type == CORE_LEVEL &&
1857                                             usched_dfly_ipc_smt < 0 &&
1858                                             loadav >= (ncpus >> 1)) {
1859                                                 /*
1860                                                  * Advantage at higher levels
1861                                                  * of the topology.
1862                                                  */
1863                                                 load -= usched_dfly_weight2;
1864                                         } else if (cpun->type == CORE_LEVEL &&
1865                                                    usched_dfly_ipc_smt == 0) {
1866                                                 /*
1867                                                  * Disadvantage the same core
1868                                                  * when there are hyperthreads.
1869                                                  */
1870                                                 load += usched_dfly_weight2;
1871                                         } else {
1872                                                 /*
1873                                                  * Advantage at higher levels
1874                                                  * of the topology.
1875                                                  */
1876                                                 load -= usched_dfly_weight2;
1877                                         }
1878                                 } else {
1879                                         /*
1880                                          * Disadvantage the last level (core
1881                                          * or hyperthread).  Try to schedule
1882                                          * the ipc
1883                                          */
1884                                         if (usched_dfly_ipc_same < 0 &&
1885                                             loadav >= ncpus) {
1886                                                 load -= usched_dfly_weight2;
1887                                         } else if (usched_dfly_ipc_same) {
1888                                                 load -= usched_dfly_weight2;
1889                                         } else {
1890                                                 load += usched_dfly_weight2;
1891                                         }
1892                                 }
1893 #if 0
1894                                 if (cpun->child_no != 0) {
1895                                         /* advantage */
1896                                         load -= usched_dfly_weight2;
1897                                 } else {
1898                                         /*
1899                                          * 0x10 (disadvantage)
1900                                          * 0x00 (advantage)   - default
1901                                          */
1902                                         if (usched_dfly_features & 0x10)
1903                                                 load += usched_dfly_weight2;
1904                                         else
1905                                                 load -= usched_dfly_weight2;
1906                                 }
1907 #endif
1908                         }
1909
1910                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1911                                 kprintf("D:%ld ", load);
1912
1913                         /*
1914                          * Calculate the best load
1915                          */
1916                         if (cpub == NULL || lowest_load > load ||
1917                             (lowest_load == load &&
1918                              CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1919                         ) {
1920                                 lowest_load = load;
1921                                 cpub = cpun;
1922                         }
1923
1924                         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1925                                 kprintf("low=%ld]\n", lowest_load);
1926                 }
1927                 cpup = cpub;
1928         }
1929         /* Dispatch this outcast to a proper CPU. */
1930         if (__predict_false(CPUMASK_TESTBIT(lp->lwp_cpumask, rdd->cpuid) == 0))
1931                 rdd = &dfly_pcpu[BSFCPUMASK(lp->lwp_cpumask)];
1932         if (usched_dfly_chooser > 0) {
1933                 --usched_dfly_chooser;          /* only N lines */
1934                 kprintf("lp %02d->%02d %s\n",
1935                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1936         }
1937         if (usched_dfly_debug == lp->lwp_proc->p_pid)
1938                 kprintf("final cpu %d\n", rdd->cpuid);
1939         return (rdd);
1940 }
1941
1942 /*
1943  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1944  *
1945  * Choose the worst queue close to dd's cpu node with a non-empty runq
1946  * that is NOT dd.
1947  *
1948  * This is used by the thread chooser when the current cpu's queues are
1949  * empty to steal a thread from another cpu's queue.  We want to offload
1950  * the most heavily-loaded queue.
1951  *
1952  * However, we do not want to steal from far-away nodes who themselves
1953  * have idle cpu's that are more suitable to distribute the far-away
1954  * thread to.
1955  */
1956 static
1957 dfly_pcpu_t
1958 dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit)
1959 {
1960         cpumask_t mask;
1961         cpu_node_t *cpup;
1962         cpu_node_t *cpun;
1963         cpu_node_t *cpub;
1964         dfly_pcpu_t rdd;
1965         int cpuid;
1966         int n;
1967         long load;
1968         long highest_load;
1969 #if 0
1970         int pri;
1971         int hpri;
1972 #endif
1973
1974         /*
1975          * When the topology is unknown choose a random cpu that is hopefully
1976          * idle.
1977          */
1978         if (dd->cpunode == NULL) {
1979                 return (NULL);
1980         }
1981
1982         /*
1983          * When the topology is known choose a cpu whos group has, in
1984          * aggregate, has the highest weighted load.
1985          */
1986         cpup = root_cpu_node;
1987         rdd = dd;
1988         while (cpup) {
1989                 /*
1990                  * Degenerate case super-root
1991                  */
1992                 if (cpup->child_no == 1) {
1993                         cpup = cpup->child_node[0];
1994                         continue;
1995                 }
1996
1997                 /*
1998                  * Terminal cpunode
1999                  */
2000                 if (cpup->child_no == 0) {
2001                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
2002                         break;
2003                 }
2004
2005                 cpub = NULL;
2006                 highest_load = -0x7FFFFFFFFFFFFFFFLL;
2007
2008                 for (n = 0; n < cpup->child_no; ++n) {
2009                         /*
2010                          * Accumulate load information for all cpus
2011                          * which are members of this node.
2012                          */
2013                         int count;
2014
2015                         cpun = cpup->child_node[n];
2016                         mask = cpun->members;
2017                         CPUMASK_ANDMASK(mask, usched_global_cpumask);
2018                         CPUMASK_ANDMASK(mask, smp_active_mask);
2019                         if (CPUMASK_TESTZERO(mask))
2020                                 continue;
2021
2022                         load = 0;
2023                         count = 0;
2024
2025                         while (CPUMASK_TESTNZERO(mask)) {
2026                                 cpuid = BSFCPUMASK(mask);
2027                                 rdd = &dfly_pcpu[cpuid];
2028
2029                                 load += rdd->uload;
2030                                 load += rdd->ucount * usched_dfly_weight3;
2031
2032 #if 0
2033                                 if (rdd->uschedcp == NULL &&
2034                                     rdd->runqcount == 0 &&
2035                                     rdd->gd->gd_tdrunqcount == 0
2036                                 ) {
2037                                         load += rdd->uload / 2;
2038                                         load += rdd->ucount *
2039                                                 usched_dfly_weight3 / 2;
2040                                 } else {
2041                                         load += rdd->uload;
2042                                         load += rdd->ucount *
2043                                                 usched_dfly_weight3;
2044                                 }
2045 #endif
2046                                 CPUMASK_NANDBIT(mask, cpuid);
2047                                 ++count;
2048                         }
2049                         load /= count;
2050
2051                         /*
2052                          * Advantage the cpu group (dd) is already on.
2053                          *
2054                          * When choosing the worst queue we reverse the
2055                          * sign, but only count half the weight.
2056                          *
2057                          * weight1 needs to be high enough to be stable,
2058                          * but this can also cause it to be too sticky,
2059                          * so the iterator which rebalances the load sets
2060                          * forceit to ignore it.
2061                          */
2062                         if (forceit == 0 &&
2063                             CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
2064                                 load += usched_dfly_weight1 / 2;
2065                         }
2066
2067                         /*
2068                          * Disadvantage nodes with more memory (same sign).
2069                          */
2070                         if (usched_dfly_node_mem) {
2071                                 load -= cpun->phys_mem * usched_dfly_weight5 /
2072                                         usched_dfly_node_mem;
2073                         }
2074
2075
2076                         /*
2077                          * The best candidate is the one with the worst
2078                          * (highest) load.
2079                          */
2080                         if (cpub == NULL || highest_load < load ||
2081                             (highest_load == load &&
2082                              CPUMASK_TESTMASK(cpun->members, dd->cpumask))) {
2083                                 highest_load = load;
2084                                 cpub = cpun;
2085                         }
2086                 }
2087                 cpup = cpub;
2088         }
2089
2090         /*
2091          * We never return our own node (dd), and only return a remote
2092          * node if it's load is significantly worse than ours (i.e. where
2093          * stealing a thread would be considered reasonable).
2094          *
2095          * This also helps us avoid breaking paired threads apart which
2096          * can have disastrous effects on performance.
2097          */
2098         if (rdd == dd)
2099                 return(NULL);
2100
2101 #if 0
2102         hpri = 0;
2103         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
2104                 hpri = pri;
2105         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
2106                 hpri = pri;
2107         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
2108                 hpri = pri;
2109         hpri *= PPQ;
2110         if (rdd->uload - hpri < dd->uload + hpri)
2111                 return(NULL);
2112 #endif
2113         return (rdd);
2114 }
2115
2116 static
2117 dfly_pcpu_t
2118 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
2119 {
2120         dfly_pcpu_t rdd;
2121         cpumask_t tmpmask;
2122         cpumask_t mask;
2123         int cpubase;
2124         int cpuid;
2125
2126         /*
2127          * Fallback to the original heuristic, select random cpu,
2128          * first checking the cpus not currently running a user thread.
2129          *
2130          * Use cpuid as the base cpu in our scan, first checking
2131          * cpuid...(ncpus-1), then 0...(cpuid-1).  This avoid favoring
2132          * lower-numbered cpus.
2133          */
2134         ++dd->scancpu;          /* SMP race ok */
2135         mask = dfly_rdyprocmask;
2136         CPUMASK_NANDMASK(mask, dfly_curprocmask);
2137         CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2138         CPUMASK_ANDMASK(mask, smp_active_mask);
2139         CPUMASK_ANDMASK(mask, usched_global_cpumask);
2140
2141         cpubase = (int)(dd->scancpu % ncpus);
2142         CPUMASK_ASSBMASK(tmpmask, cpubase);
2143         CPUMASK_INVMASK(tmpmask);
2144         CPUMASK_ANDMASK(tmpmask, mask);
2145         while (CPUMASK_TESTNZERO(tmpmask)) {
2146                 cpuid = BSFCPUMASK(tmpmask);
2147                 rdd = &dfly_pcpu[cpuid];
2148
2149                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2150                         goto found;
2151                 CPUMASK_NANDBIT(tmpmask, cpuid);
2152         }
2153
2154         CPUMASK_ASSBMASK(tmpmask, cpubase);
2155         CPUMASK_ANDMASK(tmpmask, mask);
2156         while (CPUMASK_TESTNZERO(tmpmask)) {
2157                 cpuid = BSFCPUMASK(tmpmask);
2158                 rdd = &dfly_pcpu[cpuid];
2159
2160                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2161                         goto found;
2162                 CPUMASK_NANDBIT(tmpmask, cpuid);
2163         }
2164
2165         /*
2166          * Then cpus which might have a currently running lp
2167          */
2168         mask = dfly_rdyprocmask;
2169         CPUMASK_ANDMASK(mask, dfly_curprocmask);
2170         CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2171         CPUMASK_ANDMASK(mask, smp_active_mask);
2172         CPUMASK_ANDMASK(mask, usched_global_cpumask);
2173
2174         CPUMASK_ASSBMASK(tmpmask, cpubase);
2175         CPUMASK_INVMASK(tmpmask);
2176         CPUMASK_ANDMASK(tmpmask, mask);
2177         while (CPUMASK_TESTNZERO(tmpmask)) {
2178                 cpuid = BSFCPUMASK(tmpmask);
2179                 rdd = &dfly_pcpu[cpuid];
2180
2181                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2182                         goto found;
2183                 CPUMASK_NANDBIT(tmpmask, cpuid);
2184         }
2185
2186         CPUMASK_ASSBMASK(tmpmask, cpubase);
2187         CPUMASK_ANDMASK(tmpmask, mask);
2188         while (CPUMASK_TESTNZERO(tmpmask)) {
2189                 cpuid = BSFCPUMASK(tmpmask);
2190                 rdd = &dfly_pcpu[cpuid];
2191
2192                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2193                         goto found;
2194                 CPUMASK_NANDBIT(tmpmask, cpuid);
2195         }
2196
2197         /*
2198          * If we cannot find a suitable cpu we round-robin using scancpu.
2199          * Other cpus will pickup as they release their current lwps or
2200          * become ready.
2201          *
2202          * Avoid a degenerate system lockup case if usched_global_cpumask
2203          * is set to 0 or otherwise does not cover lwp_cpumask.
2204          *
2205          * We only kick the target helper thread in this case, we do not
2206          * set the user resched flag because
2207          */
2208         cpuid = cpubase;
2209         if (CPUMASK_TESTBIT(lp->lwp_cpumask, cpuid) == 0)
2210                 cpuid = BSFCPUMASK(lp->lwp_cpumask);
2211         else if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
2212                 cpuid = 0;
2213         rdd = &dfly_pcpu[cpuid];
2214 found:
2215         return (rdd);
2216 }
2217
2218 static
2219 void
2220 dfly_need_user_resched_remote(void *dummy)
2221 {
2222         globaldata_t gd = mycpu;
2223         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
2224
2225         /*
2226          * Flag reschedule needed
2227          */
2228         need_user_resched();
2229
2230         /*
2231          * If no user thread is currently running we need to kick the helper
2232          * on our cpu to recover.  Otherwise the cpu will never schedule
2233          * anything again.
2234          *
2235          * We cannot schedule the process ourselves because this is an
2236          * IPI callback and we cannot acquire spinlocks in an IPI callback.
2237          *
2238          * Call wakeup_mycpu to avoid sending IPIs to other CPUs
2239          */
2240         if (dd->uschedcp == NULL && (dd->flags & DFLY_PCPU_RDYMASK)) {
2241                 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
2242                 dd->flags &= ~DFLY_PCPU_RDYMASK;
2243                 wakeup_mycpu(dd->helper_thread);
2244         }
2245 }
2246
2247 /*
2248  * dfly_remrunqueue_locked() removes a given process from the run queue
2249  * that it is on, clearing the queue busy bit if it becomes empty.
2250  *
2251  * Note that user process scheduler is different from the LWKT schedule.
2252  * The user process scheduler only manages user processes but it uses LWKT
2253  * underneath, and a user process operating in the kernel will often be
2254  * 'released' from our management.
2255  *
2256  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
2257  * to sleep or the lwp is moved to a different runq.
2258  */
2259 static void
2260 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2261 {
2262         struct rq *q;
2263         u_int32_t *which;
2264         u_int8_t pri;
2265
2266         KKASSERT(rdd->runqcount >= 0);
2267
2268         pri = lp->lwp_rqindex;
2269
2270         switch(lp->lwp_rqtype) {
2271         case RTP_PRIO_NORMAL:
2272                 q = &rdd->queues[pri];
2273                 which = &rdd->queuebits;
2274                 break;
2275         case RTP_PRIO_REALTIME:
2276         case RTP_PRIO_FIFO:
2277                 q = &rdd->rtqueues[pri];
2278                 which = &rdd->rtqueuebits;
2279                 break;
2280         case RTP_PRIO_IDLE:
2281                 q = &rdd->idqueues[pri];
2282                 which = &rdd->idqueuebits;
2283                 break;
2284         default:
2285                 panic("remrunqueue: invalid rtprio type");
2286                 /* NOT REACHED */
2287         }
2288         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
2289         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2290         TAILQ_REMOVE(q, lp, lwp_procq);
2291         --rdd->runqcount;
2292         if (TAILQ_EMPTY(q)) {
2293                 KASSERT((*which & (1 << pri)) != 0,
2294                         ("remrunqueue: remove from empty queue"));
2295                 *which &= ~(1 << pri);
2296         }
2297 }
2298
2299 /*
2300  * dfly_setrunqueue_locked()
2301  *
2302  * Add a process whos rqtype and rqindex had previously been calculated
2303  * onto the appropriate run queue.   Determine if the addition requires
2304  * a reschedule on a cpu and return the cpuid or -1.
2305  *
2306  * NOTE:          Lower priorities are better priorities.
2307  *
2308  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
2309  *                sum of the rough lwp_priority for all running and runnable
2310  *                processes.  Lower priority processes (higher lwp_priority
2311  *                values) actually DO count as more load, not less, because
2312  *                these are the programs which require the most care with
2313  *                regards to cpu selection.
2314  */
2315 static void
2316 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2317 {
2318         u_int32_t *which;
2319         struct rq *q;
2320         int pri;
2321
2322         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
2323
2324         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
2325                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
2326                 atomic_add_long(&rdd->uload, lp->lwp_uload);
2327                 atomic_add_int(&rdd->ucount, 1);
2328         }
2329
2330         pri = lp->lwp_rqindex;
2331
2332         switch(lp->lwp_rqtype) {
2333         case RTP_PRIO_NORMAL:
2334                 q = &rdd->queues[pri];
2335                 which = &rdd->queuebits;
2336                 break;
2337         case RTP_PRIO_REALTIME:
2338         case RTP_PRIO_FIFO:
2339                 q = &rdd->rtqueues[pri];
2340                 which = &rdd->rtqueuebits;
2341                 break;
2342         case RTP_PRIO_IDLE:
2343                 q = &rdd->idqueues[pri];
2344                 which = &rdd->idqueuebits;
2345                 break;
2346         default:
2347                 panic("remrunqueue: invalid rtprio type");
2348                 /* NOT REACHED */
2349         }
2350
2351         /*
2352          * Place us on the selected queue.  Determine if we should be
2353          * placed at the head of the queue or at the end.
2354          *
2355          * We are placed at the tail if our round-robin count has expired,
2356          * or is about to expire and the system thinks its a good place to
2357          * round-robin, or there is already a next thread on the queue
2358          * (it might be trying to pick up where it left off and we don't
2359          * want to interfere).
2360          */
2361         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2362         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2363         ++rdd->runqcount;
2364
2365         if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2366             (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2367              (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2368         ) {
2369                 /*
2370                  * Place on tail
2371                  */
2372                 atomic_clear_int(&lp->lwp_thread->td_mpflags,
2373                                  TDF_MP_BATCH_DEMARC);
2374                 lp->lwp_rrcount = 0;
2375                 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2376         } else {
2377                 /*
2378                  * Retain rrcount and place on head.  Count is retained
2379                  * even if the queue is empty.
2380                  */
2381                 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2382         }
2383         *which |= 1 << pri;
2384 }
2385
2386 /*
2387  * For SMP systems a user scheduler helper thread is created for each
2388  * cpu and is used to allow one cpu to wakeup another for the purposes of
2389  * scheduling userland threads from setrunqueue().
2390  *
2391  * UP systems do not need the helper since there is only one cpu.
2392  *
2393  * We can't use the idle thread for this because we might block.
2394  * Additionally, doing things this way allows us to HLT idle cpus
2395  * on MP systems.
2396  */
2397 static void
2398 dfly_helper_thread(void *dummy)
2399 {
2400     globaldata_t gd;
2401     dfly_pcpu_t dd;
2402     dfly_pcpu_t rdd;
2403     struct lwp *nlp;
2404     cpumask_t mask;
2405     int cpuid;
2406
2407     gd = mycpu;
2408     cpuid = gd->gd_cpuid;       /* doesn't change */
2409     mask = gd->gd_cpumask;      /* doesn't change */
2410     dd = &dfly_pcpu[cpuid];
2411
2412     /*
2413      * Initial interlock, make sure all dfly_pcpu[] structures have
2414      * been initialized before proceeding.
2415      */
2416     lockmgr(&usched_dfly_config_lk, LK_SHARED);
2417     lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2418
2419     /*
2420      * Since we only want to be woken up only when no user processes
2421      * are scheduled on a cpu, run at an ultra low priority.
2422      */
2423     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2424
2425     for (;;) {
2426         /*
2427          * We use the LWKT deschedule-interlock trick to avoid racing
2428          * dfly_rdyprocmask.  This means we cannot block through to the
2429          * manual lwkt_switch() call we make below.
2430          */
2431         crit_enter_gd(gd);
2432         tsleep_interlock(dd->helper_thread, 0);
2433
2434         spin_lock(&dd->spin);
2435         if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2436                 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2437                 dd->flags |= DFLY_PCPU_RDYMASK;
2438         }
2439         clear_user_resched();   /* This satisfied the reschedule request */
2440 #if 0
2441         dd->rrcount = 0;        /* Reset the round-robin counter */
2442 #endif
2443
2444         if (dd->runqcount || dd->uschedcp != NULL) {
2445                 /*
2446                  * Threads are available.  A thread may or may not be
2447                  * currently scheduled.  Get the best thread already queued
2448                  * to this cpu.
2449                  */
2450                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2451                 if (nlp) {
2452                         if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2453                                 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2454                                 dd->flags |= DFLY_PCPU_CURMASK;
2455                         }
2456                         dd->upri = nlp->lwp_priority;
2457                         dd->uschedcp = nlp;
2458 #if 0
2459                         dd->rrcount = 0;        /* reset round robin */
2460 #endif
2461                         spin_unlock(&dd->spin);
2462                         lwkt_acquire(nlp->lwp_thread);
2463                         lwkt_schedule(nlp->lwp_thread);
2464                 } else {
2465                         /*
2466                          * This situation should not occur because we had
2467                          * at least one thread available.
2468                          */
2469                         spin_unlock(&dd->spin);
2470                 }
2471         } else if (usched_dfly_features & 0x01) {
2472                 /*
2473                  * This cpu is devoid of runnable threads, steal a thread
2474                  * from another cpu.  Since we're stealing, might as well
2475                  * load balance at the same time.
2476                  *
2477                  * We choose the highest-loaded thread from the worst queue.
2478                  *
2479                  * NOTE! This function only returns a non-NULL rdd when
2480                  *       another cpu's queue is obviously overloaded.  We
2481                  *       do not want to perform the type of rebalancing
2482                  *       the schedclock does here because it would result
2483                  *       in insane process pulling when 'steady' state is
2484                  *       partially unbalanced (e.g. 6 runnables and only
2485                  *       4 cores).
2486                  */
2487                 rdd = dfly_choose_worst_queue(dd, 0);
2488                 if (rdd && dd->uload + usched_dfly_weight6 < rdd->uload &&
2489                     spin_trylock(&rdd->spin)) {
2490                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2491                         spin_unlock(&rdd->spin);
2492                 } else {
2493                         nlp = NULL;
2494                 }
2495                 if (nlp) {
2496                         if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2497                                 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2498                                 dd->flags |= DFLY_PCPU_CURMASK;
2499                         }
2500                         dd->upri = nlp->lwp_priority;
2501                         dd->uschedcp = nlp;
2502 #if 0
2503                         dd->rrcount = 0;        /* reset round robin */
2504 #endif
2505                         spin_unlock(&dd->spin);
2506                         lwkt_acquire(nlp->lwp_thread);
2507                         lwkt_schedule(nlp->lwp_thread);
2508                 } else {
2509                         /*
2510                          * Leave the thread on our run queue.  Another
2511                          * scheduler will try to pull it later.
2512                          */
2513                         spin_unlock(&dd->spin);
2514                 }
2515         } else {
2516                 /*
2517                  * devoid of runnable threads and not allowed to steal
2518                  * any.
2519                  */
2520                 spin_unlock(&dd->spin);
2521         }
2522
2523         /*
2524          * We're descheduled unless someone scheduled us.  Switch away.
2525          * Exiting the critical section will cause splz() to be called
2526          * for us if interrupts and such are pending.
2527          */
2528         crit_exit_gd(gd);
2529         tsleep(dd->helper_thread, PINTERLOCKED, "schslp", 0);
2530     }
2531 }
2532
2533 #if 0
2534 static int
2535 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2536 {
2537         int error, new_val;
2538
2539         new_val = usched_dfly_stick_to_level;
2540
2541         error = sysctl_handle_int(oidp, &new_val, 0, req);
2542         if (error != 0 || req->newptr == NULL)
2543                 return (error);
2544         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2545                 return (EINVAL);
2546         usched_dfly_stick_to_level = new_val;
2547         return (0);
2548 }
2549 #endif
2550
2551 /*
2552  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2553  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2554  * we should not mess with it further.
2555  */
2556 static void
2557 usched_dfly_cpu_init(void)
2558 {
2559         int i;
2560         int j;
2561         int smt_not_supported = 0;
2562         int cache_coherent_not_supported = 0;
2563
2564         if (bootverbose)
2565                 kprintf("Start usched_dfly helpers on cpus:\n");
2566
2567         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2568         usched_dfly_sysctl_tree =
2569                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2570                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2571                                 "usched_dfly", CTLFLAG_RD, 0, "");
2572
2573         usched_dfly_node_mem = get_highest_node_memory();
2574
2575         lockmgr(&usched_dfly_config_lk, LK_EXCLUSIVE);
2576
2577         for (i = 0; i < ncpus; ++i) {
2578                 dfly_pcpu_t dd = &dfly_pcpu[i];
2579                 cpumask_t mask;
2580
2581                 CPUMASK_ASSBIT(mask, i);
2582                 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2583                     continue;
2584
2585                 spin_init(&dd->spin, "uschedcpuinit");
2586                 dd->cpunode = get_cpu_node_by_cpuid(i);
2587                 dd->cpuid = i;
2588                 dd->gd = globaldata_find(i);
2589                 CPUMASK_ASSBIT(dd->cpumask, i);
2590                 for (j = 0; j < NQS; j++) {
2591                         TAILQ_INIT(&dd->queues[j]);
2592                         TAILQ_INIT(&dd->rtqueues[j]);
2593                         TAILQ_INIT(&dd->idqueues[j]);
2594                 }
2595                 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2596                 if (i == 0)
2597                         dd->flags &= ~DFLY_PCPU_CURMASK;
2598
2599                 if (dd->cpunode == NULL) {
2600                         smt_not_supported = 1;
2601                         cache_coherent_not_supported = 1;
2602                         if (bootverbose)
2603                                 kprintf ("    cpu%d - WARNING: No CPU NODE "
2604                                          "found for cpu\n", i);
2605                 } else {
2606                         switch (dd->cpunode->type) {
2607                         case THREAD_LEVEL:
2608                                 if (bootverbose)
2609                                         kprintf ("    cpu%d - HyperThreading "
2610                                                  "available. Core siblings: ",
2611                                                  i);
2612                                 break;
2613                         case CORE_LEVEL:
2614                                 smt_not_supported = 1;
2615
2616                                 if (bootverbose)
2617                                         kprintf ("    cpu%d - No HT available, "
2618                                                  "multi-core/physical "
2619                                                  "cpu. Physical siblings: ",
2620                                                  i);
2621                                 break;
2622                         case CHIP_LEVEL:
2623                                 smt_not_supported = 1;
2624
2625                                 if (bootverbose)
2626                                         kprintf ("    cpu%d - No HT available, "
2627                                                  "single-core/physical cpu. "
2628                                                  "Package siblings: ",
2629                                                  i);
2630                                 break;
2631                         default:
2632                                 /* Let's go for safe defaults here */
2633                                 smt_not_supported = 1;
2634                                 cache_coherent_not_supported = 1;
2635                                 if (bootverbose)
2636                                         kprintf ("    cpu%d - Unknown cpunode->"
2637                                                  "type=%u. siblings: ",
2638                                                  i,
2639                                                  (u_int)dd->cpunode->type);
2640                                 break;
2641                         }
2642
2643                         if (bootverbose) {
2644                                 if (dd->cpunode->parent_node != NULL) {
2645                                         kprint_cpuset(&dd->cpunode->
2646                                                         parent_node->members);
2647                                         kprintf("\n");
2648                                 } else {
2649                                         kprintf(" no siblings\n");
2650                                 }
2651                         }
2652                 }
2653
2654                 lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2655                             0, i, "usched %d", i);
2656
2657                 /*
2658                  * Allow user scheduling on the target cpu.  cpu #0 has already
2659                  * been enabled in rqinit().
2660                  */
2661                 if (i) {
2662                         ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2663                         dd->flags &= ~DFLY_PCPU_CURMASK;
2664                 }
2665                 if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2666                         ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2667                         dd->flags |= DFLY_PCPU_RDYMASK;
2668                 }
2669                 dd->upri = PRIBASE_NULL;
2670
2671         }
2672
2673         /* usched_dfly sysctl configurable parameters */
2674
2675         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2676                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2677                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2678                        &usched_dfly_rrinterval, 0, "");
2679         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2680                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2681                        OID_AUTO, "decay", CTLFLAG_RW,
2682                        &usched_dfly_decay, 0, "Extra decay when not running");
2683         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2684                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2685                        OID_AUTO, "ipc_smt", CTLFLAG_RW,
2686                        &usched_dfly_ipc_smt, 0, "Pair IPC on hyper-threads");
2687         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2688                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2689                        OID_AUTO, "ipc_same", CTLFLAG_RW,
2690                        &usched_dfly_ipc_same, 0, "Pair IPC on same thread");
2691
2692         /* Add enable/disable option for SMT scheduling if supported */
2693         if (smt_not_supported) {
2694                 usched_dfly_smt = 0;
2695                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2696                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2697                                   OID_AUTO, "smt", CTLFLAG_RD,
2698                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2699         } else {
2700                 usched_dfly_smt = 1;
2701                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2702                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2703                                OID_AUTO, "smt", CTLFLAG_RW,
2704                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2705         }
2706
2707         /*
2708          * Add enable/disable option for cache coherent scheduling
2709          * if supported
2710          */
2711         if (cache_coherent_not_supported) {
2712                 usched_dfly_cache_coherent = 0;
2713                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2714                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2715                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2716                                   "NOT SUPPORTED", 0,
2717                                   "Cache coherence NOT SUPPORTED");
2718         } else {
2719                 usched_dfly_cache_coherent = 1;
2720                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2721                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2722                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2723                                &usched_dfly_cache_coherent, 0,
2724                                "Enable/Disable cache coherent scheduling");
2725
2726                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2727                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2728                                OID_AUTO, "weight1", CTLFLAG_RW,
2729                                &usched_dfly_weight1, 200,
2730                                "Weight selection for current cpu");
2731
2732                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2733                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2734                                OID_AUTO, "weight2", CTLFLAG_RW,
2735                                &usched_dfly_weight2, 180,
2736                                "Weight selection for wakefrom cpu");
2737
2738                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2739                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2740                                OID_AUTO, "weight3", CTLFLAG_RW,
2741                                &usched_dfly_weight3, 40,
2742                                "Weight selection for num threads on queue");
2743
2744                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2745                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2746                                OID_AUTO, "weight4", CTLFLAG_RW,
2747                                &usched_dfly_weight4, 160,
2748                                "Availability of other idle cpus");
2749
2750                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2751                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2752                                OID_AUTO, "weight5", CTLFLAG_RW,
2753                                &usched_dfly_weight5, 50,
2754                                "Memory attached to node");
2755
2756                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2757                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2758                                OID_AUTO, "weight6", CTLFLAG_RW,
2759                                &usched_dfly_weight6, 150,
2760                                "Transfer weight");
2761
2762                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2763                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2764                                OID_AUTO, "fast_resched", CTLFLAG_RW,
2765                                &usched_dfly_fast_resched, 0,
2766                                "Availability of other idle cpus");
2767
2768                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2769                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2770                                OID_AUTO, "features", CTLFLAG_RW,
2771                                &usched_dfly_features, 0x8F,
2772                                "Allow pulls into empty queues");
2773
2774                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2775                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2776                                OID_AUTO, "swmask", CTLFLAG_RW,
2777                                &usched_dfly_swmask, ~PPQMASK,
2778                                "Queue mask to force thread switch");
2779
2780 #if 0
2781                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2782                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2783                                 OID_AUTO, "stick_to_level",
2784                                 CTLTYPE_INT | CTLFLAG_RW,
2785                                 NULL, sizeof usched_dfly_stick_to_level,
2786                                 sysctl_usched_dfly_stick_to_level, "I",
2787                                 "Stick a process to this level. See sysctl"
2788                                 "paremter hw.cpu_topology.level_description");
2789 #endif
2790         }
2791         lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2792 }
2793
2794 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2795         usched_dfly_cpu_init, NULL);