bc676d24894fd1bd1d9cc980c12046960924e2ea
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_estfast     lwp_usdata.dfly.estfast
97 #define lwp_uload       lwp_usdata.dfly.uload
98 #define lwp_rqtype      lwp_usdata.dfly.rqtype
99 #define lwp_qcpu        lwp_usdata.dfly.qcpu
100
101 struct usched_dfly_pcpu {
102         struct spinlock spin;
103         struct thread   helper_thread;
104         short           rrcount;
105         short           upri;
106         int             uload;
107         int             ucount;
108         struct lwp      *uschedcp;
109         struct rq       queues[NQS];
110         struct rq       rtqueues[NQS];
111         struct rq       idqueues[NQS];
112         u_int32_t       queuebits;
113         u_int32_t       rtqueuebits;
114         u_int32_t       idqueuebits;
115         int             runqcount;
116         int             cpuid;
117         cpumask_t       cpumask;
118 #ifdef SMP
119         cpu_node_t      *cpunode;
120 #endif
121 };
122
123 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
124
125 static void dfly_acquire_curproc(struct lwp *lp);
126 static void dfly_release_curproc(struct lwp *lp);
127 static void dfly_select_curproc(globaldata_t gd);
128 static void dfly_setrunqueue(struct lwp *lp);
129 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
130 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
131                                 sysclock_t cpstamp);
132 static void dfly_recalculate_estcpu(struct lwp *lp);
133 static void dfly_resetpriority(struct lwp *lp);
134 static void dfly_forking(struct lwp *plp, struct lwp *lp);
135 static void dfly_exiting(struct lwp *lp, struct proc *);
136 static void dfly_uload_update(struct lwp *lp);
137 static void dfly_yield(struct lwp *lp);
138 #ifdef SMP
139 static void dfly_changeqcpu_locked(struct lwp *lp,
140                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
141 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
142 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
143 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
144 #endif
145
146 #ifdef SMP
147 static void dfly_need_user_resched_remote(void *dummy);
148 #endif
149 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
150                                           struct lwp *chklp, int worst);
151 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
152 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
153
154 struct usched usched_dfly = {
155         { NULL },
156         "dfly", "Original DragonFly Scheduler",
157         NULL,                   /* default registration */
158         NULL,                   /* default deregistration */
159         dfly_acquire_curproc,
160         dfly_release_curproc,
161         dfly_setrunqueue,
162         dfly_schedulerclock,
163         dfly_recalculate_estcpu,
164         dfly_resetpriority,
165         dfly_forking,
166         dfly_exiting,
167         dfly_uload_update,
168         NULL,                   /* setcpumask not supported */
169         dfly_yield
170 };
171
172 /*
173  * We have NQS (32) run queues per scheduling class.  For the normal
174  * class, there are 128 priorities scaled onto these 32 queues.  New
175  * processes are added to the last entry in each queue, and processes
176  * are selected for running by taking them from the head and maintaining
177  * a simple FIFO arrangement.  Realtime and Idle priority processes have
178  * and explicit 0-31 priority which maps directly onto their class queue
179  * index.  When a queue has something in it, the corresponding bit is
180  * set in the queuebits variable, allowing a single read to determine
181  * the state of all 32 queues and then a ffs() to find the first busy
182  * queue.
183  */
184 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
185 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
186 #ifdef SMP
187 static volatile int dfly_scancpu;
188 #endif
189 static volatile int dfly_ucount;        /* total running on whole system */
190 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
191 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
192 static struct sysctl_oid *usched_dfly_sysctl_tree;
193
194 /* Debug info exposed through debug.* sysctl */
195
196 static int usched_dfly_debug = -1;
197 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
198            &usched_dfly_debug, 0,
199            "Print debug information for this pid");
200
201 static int usched_dfly_pid_debug = -1;
202 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
203            &usched_dfly_pid_debug, 0,
204            "Print KTR debug information for this pid");
205
206 static int usched_dfly_chooser = 0;
207 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
208            &usched_dfly_chooser, 0,
209            "Print KTR debug information for this pid");
210
211 /*
212  * Tunning usched_dfly - configurable through kern.usched_dfly.
213  *
214  * weight1 - Tries to keep threads on their current cpu.  If you
215  *           make this value too large the scheduler will not be
216  *           able to load-balance large loads.
217  *
218  * weight2 - If non-zero, detects thread pairs undergoing synchronous
219  *           communications and tries to move them closer together.
220  *           Behavior is adjusted by bit 4 of features (0x10).
221  *
222  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
223  *           a small value is recommended.
224  *
225  * weight3 - Weighting based on the number of recently runnable threads
226  *           on the userland scheduling queue (ignoring their loads).
227  *           A nominal value here prevents high-priority (low-load)
228  *           threads from accumulating on one cpu core when other
229  *           cores are available.
230  *
231  *           This value should be left fairly small relative to weight1
232  *           and weight4.
233  *
234  * weight4 - Weighting based on other cpu queues being available
235  *           or running processes with higher lwp_priority's.
236  *
237  *           This allows a thread to migrate to another nearby cpu if it
238  *           is unable to run on the current cpu based on the other cpu
239  *           being idle or running a lower priority (higher lwp_priority)
240  *           thread.  This value should be large enough to override weight1
241  *
242  * features - These flags can be set or cleared to enable or disable various
243  *            features.
244  *
245  *            0x01      Enable idle-cpu pulling                 (default)
246  *            0x02      Enable proactive pushing                (default)
247  *            0x04      Enable rebalancing rover                (default)
248  *            0x08      Enable more proactive pushing           (default)
249  *            0x10      (flip weight2 limit on same cpu)        (default)
250  *            0x20      choose best cpu for forked process
251  *            0x40      choose current cpu for forked process
252  *            0x80      choose random cpu for forked process    (default)
253  */
254 #ifdef SMP
255 static int usched_dfly_smt = 0;
256 static int usched_dfly_cache_coherent = 0;
257 static int usched_dfly_weight1 = 200;   /* keep thread on current cpu */
258 static int usched_dfly_weight2 = 120;   /* synchronous peer's current cpu */
259 static int usched_dfly_weight3 = 40;    /* number of threads on queue */
260 static int usched_dfly_weight4 = 160;   /* availability of idle cores */
261 static int usched_dfly_features = 0x8F; /* allow pulls */
262 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
263 #endif
264 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
265 static int usched_dfly_decay = 8;
266
267 /* KTR debug printings */
268
269 KTR_INFO_MASTER(usched);
270
271 #if !defined(KTR_USCHED_DFLY)
272 #define KTR_USCHED_DFLY KTR_ALL
273 #endif
274
275 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
276     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
277     pid_t pid, int old_cpuid, int curr);
278
279 /*
280  * This function is called when the kernel intends to return to userland.
281  * It is responsible for making the thread the current designated userland
282  * thread for this cpu, blocking if necessary.
283  *
284  * The kernel has already depressed our LWKT priority so we must not switch
285  * until we have either assigned or disposed of the thread.
286  *
287  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
288  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
289  * occur, this function is called only under very controlled circumstances.
290  */
291 static void
292 dfly_acquire_curproc(struct lwp *lp)
293 {
294         globaldata_t gd;
295         dfly_pcpu_t dd;
296 #ifdef SMP
297         dfly_pcpu_t rdd;
298 #endif
299         thread_t td;
300         int force_resched;
301
302         /*
303          * Make sure we aren't sitting on a tsleep queue.
304          */
305         td = lp->lwp_thread;
306         crit_enter_quick(td);
307         if (td->td_flags & TDF_TSLEEPQ)
308                 tsleep_remove(td);
309         dfly_recalculate_estcpu(lp);
310
311         gd = mycpu;
312         dd = &dfly_pcpu[gd->gd_cpuid];
313
314         /*
315          * Process any pending interrupts/ipi's, then handle reschedule
316          * requests.  dfly_release_curproc() will try to assign a new
317          * uschedcp that isn't us and otherwise NULL it out.
318          */
319         force_resched = 0;
320         if (user_resched_wanted()) {
321                 if (dd->uschedcp == lp)
322                         force_resched = 1;
323                 clear_user_resched();
324                 dfly_release_curproc(lp);
325         }
326
327         /*
328          * Loop until we are the current user thread.
329          *
330          * NOTE: dd spinlock not held at top of loop.
331          */
332         if (dd->uschedcp == lp)
333                 lwkt_yield_quick();
334
335         while (dd->uschedcp != lp) {
336                 lwkt_yield_quick();
337
338                 spin_lock(&dd->spin);
339
340                 /*
341                  * We are not or are no longer the current lwp and a forced
342                  * reschedule was requested.  Figure out the best cpu to
343                  * run on (our current cpu will be given significant weight).
344                  *
345                  * (if a reschedule was not requested we want to move this
346                  *  step after the uschedcp tests).
347                  */
348 #ifdef SMP
349                 if (force_resched &&
350                     (usched_dfly_features & 0x08) &&
351                     (rdd = dfly_choose_best_queue(lp)) != dd) {
352                         dfly_changeqcpu_locked(lp, dd, rdd);
353                         spin_unlock(&dd->spin);
354                         lwkt_deschedule(lp->lwp_thread);
355                         dfly_setrunqueue_dd(rdd, lp);
356                         lwkt_switch();
357                         gd = mycpu;
358                         dd = &dfly_pcpu[gd->gd_cpuid];
359                         continue;
360                 }
361 #endif
362
363                 /*
364                  * Either no reschedule was requested or the best queue was
365                  * dd, and no current process has been selected.  We can
366                  * trivially become the current lwp on the current cpu.
367                  */
368                 if (dd->uschedcp == NULL) {
369                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
370                         dd->uschedcp = lp;
371                         dd->upri = lp->lwp_priority;
372                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
373                         spin_unlock(&dd->spin);
374                         break;
375                 }
376
377                 /*
378                  * Can we steal the current designated user thread?
379                  *
380                  * If we do the other thread will stall when it tries to
381                  * return to userland, possibly rescheduling elsewhere.
382                  *
383                  * It is important to do a masked test to avoid the edge
384                  * case where two near-equal-priority threads are constantly
385                  * interrupting each other.  Since our context is the one
386                  * that is active NOW, we WANT to steal the uschedcp
387                  * designation and not switch-flap.
388                  */
389                 if (dd->uschedcp &&
390                    (dd->upri & ~PPQMASK) >=
391                    (lp->lwp_priority & ~PPQMASK)) {
392                         dd->uschedcp = lp;
393                         dd->upri = lp->lwp_priority;
394                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
395                         spin_unlock(&dd->spin);
396                         break;
397                 }
398
399 #ifdef SMP
400                 /*
401                  * We are not the current lwp, figure out the best cpu
402                  * to run on (our current cpu will be given significant
403                  * weight).  Loop on cpu change.
404                  */
405                 if ((usched_dfly_features & 0x02) &&
406                     force_resched == 0 &&
407                     (rdd = dfly_choose_best_queue(lp)) != dd) {
408                         dfly_changeqcpu_locked(lp, dd, rdd);
409                         spin_unlock(&dd->spin);
410                         lwkt_deschedule(lp->lwp_thread);
411                         dfly_setrunqueue_dd(rdd, lp);
412                         lwkt_switch();
413                         gd = mycpu;
414                         dd = &dfly_pcpu[gd->gd_cpuid];
415                         continue;
416                 }
417 #endif
418
419                 /*
420                  * We cannot become the current lwp, place the lp on the
421                  * run-queue of this or another cpu and deschedule ourselves.
422                  *
423                  * When we are reactivated we will have another chance.
424                  *
425                  * Reload after a switch or setrunqueue/switch possibly
426                  * moved us to another cpu.
427                  */
428                 spin_unlock(&dd->spin);
429                 lwkt_deschedule(lp->lwp_thread);
430                 dfly_setrunqueue_dd(dd, lp);
431                 lwkt_switch();
432                 gd = mycpu;
433                 dd = &dfly_pcpu[gd->gd_cpuid];
434         }
435
436         /*
437          * Make sure upri is synchronized, then yield to LWKT threads as
438          * needed before returning.  This could result in another reschedule.
439          * XXX
440          */
441         crit_exit_quick(td);
442
443         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
444 }
445
446 /*
447  * DFLY_RELEASE_CURPROC
448  *
449  * This routine detaches the current thread from the userland scheduler,
450  * usually because the thread needs to run or block in the kernel (at
451  * kernel priority) for a while.
452  *
453  * This routine is also responsible for selecting a new thread to
454  * make the current thread.
455  *
456  * NOTE: This implementation differs from the dummy example in that
457  * dfly_select_curproc() is able to select the current process, whereas
458  * dummy_select_curproc() is not able to select the current process.
459  * This means we have to NULL out uschedcp.
460  *
461  * Additionally, note that we may already be on a run queue if releasing
462  * via the lwkt_switch() in dfly_setrunqueue().
463  */
464 static void
465 dfly_release_curproc(struct lwp *lp)
466 {
467         globaldata_t gd = mycpu;
468         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
469
470         /*
471          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
472          * by wakeup().
473          */
474         if (dd->uschedcp == lp) {
475                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
476                 spin_lock(&dd->spin);
477                 if (dd->uschedcp == lp) {
478                         dd->uschedcp = NULL;    /* don't let lp be selected */
479                         dd->upri = PRIBASE_NULL;
480                         atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
481                         spin_unlock(&dd->spin);
482                         dfly_select_curproc(gd);
483                 } else {
484                         spin_unlock(&dd->spin);
485                 }
486         }
487 }
488
489 /*
490  * DFLY_SELECT_CURPROC
491  *
492  * Select a new current process for this cpu and clear any pending user
493  * reschedule request.  The cpu currently has no current process.
494  *
495  * This routine is also responsible for equal-priority round-robining,
496  * typically triggered from dfly_schedulerclock().  In our dummy example
497  * all the 'user' threads are LWKT scheduled all at once and we just
498  * call lwkt_switch().
499  *
500  * The calling process is not on the queue and cannot be selected.
501  */
502 static
503 void
504 dfly_select_curproc(globaldata_t gd)
505 {
506         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
507         struct lwp *nlp;
508         int cpuid = gd->gd_cpuid;
509
510         crit_enter_gd(gd);
511
512         spin_lock(&dd->spin);
513         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
514
515         if (nlp) {
516                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
517                 dd->upri = nlp->lwp_priority;
518                 dd->uschedcp = nlp;
519                 dd->rrcount = 0;                /* reset round robin */
520                 spin_unlock(&dd->spin);
521 #ifdef SMP
522                 lwkt_acquire(nlp->lwp_thread);
523 #endif
524                 lwkt_schedule(nlp->lwp_thread);
525         } else {
526                 spin_unlock(&dd->spin);
527         }
528         crit_exit_gd(gd);
529 }
530
531 /*
532  * Place the specified lwp on the user scheduler's run queue.  This routine
533  * must be called with the thread descheduled.  The lwp must be runnable.
534  * It must not be possible for anyone else to explicitly schedule this thread.
535  *
536  * The thread may be the current thread as a special case.
537  */
538 static void
539 dfly_setrunqueue(struct lwp *lp)
540 {
541         dfly_pcpu_t dd;
542         dfly_pcpu_t rdd;
543
544         /*
545          * First validate the process LWKT state.
546          */
547         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
548         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
549             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
550              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
551         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
552
553         /*
554          * NOTE: dd/rdd do not necessarily represent the current cpu.
555          *       Instead they may represent the cpu the thread was last
556          *       scheduled on or inherited by its parent.
557          */
558         dd = &dfly_pcpu[lp->lwp_qcpu];
559         rdd = dd;
560
561         /*
562          * This process is not supposed to be scheduled anywhere or assigned
563          * as the current process anywhere.  Assert the condition.
564          */
565         KKASSERT(rdd->uschedcp != lp);
566
567 #ifndef SMP
568         /*
569          * If we are not SMP we do not have a scheduler helper to kick
570          * and must directly activate the process if none are scheduled.
571          *
572          * This is really only an issue when bootstrapping init since
573          * the caller in all other cases will be a user process, and
574          * even if released (rdd->uschedcp == NULL), that process will
575          * kickstart the scheduler when it returns to user mode from
576          * the kernel.
577          *
578          * NOTE: On SMP we can't just set some other cpu's uschedcp.
579          */
580         if (rdd->uschedcp == NULL) {
581                 spin_lock(&rdd->spin);
582                 if (rdd->uschedcp == NULL) {
583                         atomic_set_cpumask(&dfly_curprocmask, 1);
584                         rdd->uschedcp = lp;
585                         rdd->upri = lp->lwp_priority;
586                         spin_unlock(&rdd->spin);
587                         lwkt_schedule(lp->lwp_thread);
588                         return;
589                 }
590                 spin_unlock(&rdd->spin);
591         }
592 #endif
593
594 #ifdef SMP
595         /*
596          * Ok, we have to setrunqueue some target cpu and request a reschedule
597          * if necessary.
598          *
599          * We have to choose the best target cpu.  It might not be the current
600          * target even if the current cpu has no running user thread (for
601          * example, because the current cpu might be a hyperthread and its
602          * sibling has a thread assigned).
603          *
604          * If we just forked it is most optimal to run the child on the same
605          * cpu just in case the parent decides to wait for it (thus getting
606          * off that cpu).  As long as there is nothing else runnable on the
607          * cpu, that is.  If we did this unconditionally a parent forking
608          * multiple children before waiting (e.g. make -j N) leaves other
609          * cpus idle that could be working.
610          */
611         if (lp->lwp_forked) {
612                 lp->lwp_forked = 0;
613                 if (usched_dfly_features & 0x20)
614                         rdd = dfly_choose_best_queue(lp);
615                 else if (usched_dfly_features & 0x40)
616                         rdd = &dfly_pcpu[lp->lwp_qcpu];
617                 else if (usched_dfly_features & 0x80)
618                         rdd = dfly_choose_queue_simple(rdd, lp);
619                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
620                         rdd = dfly_choose_best_queue(lp);
621                 else
622                         rdd = &dfly_pcpu[lp->lwp_qcpu];
623         } else {
624                 rdd = dfly_choose_best_queue(lp);
625                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
626         }
627         if (lp->lwp_qcpu != rdd->cpuid) {
628                 spin_lock(&dd->spin);
629                 dfly_changeqcpu_locked(lp, dd, rdd);
630                 spin_unlock(&dd->spin);
631         }
632 #endif
633         dfly_setrunqueue_dd(rdd, lp);
634 }
635
636 #ifdef SMP
637
638 /*
639  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
640  * spin-locked on-call.  rdd does not have to be.
641  */
642 static void
643 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
644 {
645         if (lp->lwp_qcpu != rdd->cpuid) {
646                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
647                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
648                         atomic_add_int(&dd->uload, -lp->lwp_uload);
649                         atomic_add_int(&dd->ucount, -1);
650                         atomic_add_int(&dfly_ucount, -1);
651                 }
652                 lp->lwp_qcpu = rdd->cpuid;
653         }
654 }
655
656 #endif
657
658 /*
659  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
660  * also performs all necessary ancillary notification actions.
661  */
662 static void
663 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
664 {
665 #ifdef SMP
666         globaldata_t rgd;
667
668         /*
669          * We might be moving the lp to another cpu's run queue, and once
670          * on the runqueue (even if it is our cpu's), another cpu can rip
671          * it away from us.
672          *
673          * TDF_MIGRATING might already be set if this is part of a
674          * remrunqueue+setrunqueue sequence.
675          */
676         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
677                 lwkt_giveaway(lp->lwp_thread);
678
679         rgd = globaldata_find(rdd->cpuid);
680
681         /*
682          * We lose control of the lp the moment we release the spinlock
683          * after having placed it on the queue.  i.e. another cpu could pick
684          * it up, or it could exit, or its priority could be further
685          * adjusted, or something like that.
686          *
687          * WARNING! rdd can point to a foreign cpu!
688          */
689         spin_lock(&rdd->spin);
690         dfly_setrunqueue_locked(rdd, lp);
691
692         if (rgd == mycpu) {
693                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
694                         spin_unlock(&rdd->spin);
695                         if (rdd->uschedcp == NULL) {
696                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
697                                 need_user_resched();
698                         } else {
699                                 need_user_resched();
700                         }
701                 } else {
702                         spin_unlock(&rdd->spin);
703                 }
704         } else {
705                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
706                         spin_unlock(&rdd->spin);
707                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
708                                        NULL);
709                 } else if (dfly_rdyprocmask & rgd->gd_cpumask) {
710                         atomic_clear_cpumask(&dfly_rdyprocmask,
711                                              rgd->gd_cpumask);
712                         spin_unlock(&rdd->spin);
713                         wakeup(&rdd->helper_thread);
714                 } else {
715                         spin_unlock(&rdd->spin);
716                 }
717         }
718 #else
719         /*
720          * Request a reschedule if appropriate.
721          */
722         spin_lock(&rdd->spin);
723         dfly_setrunqueue_locked(rdd, lp);
724         spin_unlock(&rdd->spin);
725         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
726                 need_user_resched();
727         }
728 #endif
729 }
730
731 /*
732  * This routine is called from a systimer IPI.  It MUST be MP-safe and
733  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
734  * each cpu.
735  */
736 static
737 void
738 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
739 {
740         globaldata_t gd = mycpu;
741         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
742
743         /*
744          * Spinlocks also hold a critical section so there should not be
745          * any active.
746          */
747         KKASSERT(gd->gd_spinlocks == 0);
748
749         if (lp == NULL)
750                 return;
751
752         /*
753          * Do we need to round-robin?  We round-robin 10 times a second.
754          * This should only occur for cpu-bound batch processes.
755          */
756         if (++dd->rrcount >= usched_dfly_rrinterval) {
757                 lp->lwp_thread->td_wakefromcpu = -1;
758                 dd->rrcount = 0;
759                 need_user_resched();
760         }
761
762         /*
763          * Adjust estcpu upward using a real time equivalent calculation,
764          * and recalculate lp's priority.
765          */
766         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
767         dfly_resetpriority(lp);
768
769         /*
770          * Rebalance two cpus every 8 ticks, pulling the worst thread
771          * from the worst cpu's queue into a rotating cpu number.
772          *
773          * This mechanic is needed because the push algorithms can
774          * steady-state in an non-optimal configuration.  We need to mix it
775          * up a little, even if it means breaking up a paired thread, so
776          * the push algorithms can rebalance the degenerate conditions.
777          * This portion of the algorithm exists to ensure stability at the
778          * selected weightings.
779          *
780          * Because we might be breaking up optimal conditions we do not want
781          * to execute this too quickly, hence we only rebalance approximately
782          * ~7-8 times per second.  The push's, on the otherhand, are capable
783          * moving threads to other cpus at a much higher rate.
784          *
785          * We choose the most heavily loaded thread from the worst queue
786          * in order to ensure that multiple heavy-weight threads on the same
787          * queue get broken up, and also because these threads are the most
788          * likely to be able to remain in place.  Hopefully then any pairings,
789          * if applicable, migrate to where these threads are.
790          */
791 #ifdef SMP
792         if ((usched_dfly_features & 0x04) &&
793             ((u_int)sched_ticks & 7) == 0 &&
794             (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
795                 /*
796                  * Our cpu is up.
797                  */
798                 struct lwp *nlp;
799                 dfly_pcpu_t rdd;
800
801                 rdd = dfly_choose_worst_queue(dd);
802                 if (rdd) {
803                         spin_lock(&dd->spin);
804                         if (spin_trylock(&rdd->spin)) {
805                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
806                                 spin_unlock(&rdd->spin);
807                                 if (nlp == NULL)
808                                         spin_unlock(&dd->spin);
809                         } else {
810                                 spin_unlock(&dd->spin);
811                                 nlp = NULL;
812                         }
813                 } else {
814                         nlp = NULL;
815                 }
816                 /* dd->spin held if nlp != NULL */
817
818                 /*
819                  * Either schedule it or add it to our queue.
820                  */
821                 if (nlp &&
822                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
823                         atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
824                         dd->upri = nlp->lwp_priority;
825                         dd->uschedcp = nlp;
826                         dd->rrcount = 0;        /* reset round robin */
827                         spin_unlock(&dd->spin);
828                         lwkt_acquire(nlp->lwp_thread);
829                         lwkt_schedule(nlp->lwp_thread);
830                 } else if (nlp) {
831                         dfly_setrunqueue_locked(dd, nlp);
832                         spin_unlock(&dd->spin);
833                 }
834         }
835 #endif
836 }
837
838 /*
839  * Called from acquire and from kern_synch's one-second timer (one of the
840  * callout helper threads) with a critical section held.
841  *
842  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
843  * overall system load.
844  *
845  * Note that no recalculation occurs for a process which sleeps and wakes
846  * up in the same tick.  That is, a system doing thousands of context
847  * switches per second will still only do serious estcpu calculations
848  * ESTCPUFREQ times per second.
849  */
850 static
851 void
852 dfly_recalculate_estcpu(struct lwp *lp)
853 {
854         globaldata_t gd = mycpu;
855         sysclock_t cpbase;
856         sysclock_t ttlticks;
857         int estcpu;
858         int decay_factor;
859         int ucount;
860
861         /*
862          * We have to subtract periodic to get the last schedclock
863          * timeout time, otherwise we would get the upcoming timeout.
864          * Keep in mind that a process can migrate between cpus and
865          * while the scheduler clock should be very close, boundary
866          * conditions could lead to a small negative delta.
867          */
868         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
869
870         if (lp->lwp_slptime > 1) {
871                 /*
872                  * Too much time has passed, do a coarse correction.
873                  */
874                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
875                 dfly_resetpriority(lp);
876                 lp->lwp_cpbase = cpbase;
877                 lp->lwp_cpticks = 0;
878                 lp->lwp_estfast = 0;
879         } else if (lp->lwp_cpbase != cpbase) {
880                 /*
881                  * Adjust estcpu if we are in a different tick.  Don't waste
882                  * time if we are in the same tick.
883                  *
884                  * First calculate the number of ticks in the measurement
885                  * interval.  The ttlticks calculation can wind up 0 due to
886                  * a bug in the handling of lwp_slptime  (as yet not found),
887                  * so make sure we do not get a divide by 0 panic.
888                  */
889                 ttlticks = (cpbase - lp->lwp_cpbase) /
890                            gd->gd_schedclock.periodic;
891                 if (ttlticks < 0) {
892                         ttlticks = 0;
893                         lp->lwp_cpbase = cpbase;
894                 }
895                 if (ttlticks == 0)
896                         return;
897                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
898
899                 /*
900                  * Calculate the percentage of one cpu being used then
901                  * compensate for any system load in excess of ncpus.
902                  *
903                  * For example, if we have 8 cores and 16 running cpu-bound
904                  * processes then all things being equal each process will
905                  * get 50% of one cpu.  We need to pump this value back
906                  * up to 100% so the estcpu calculation properly adjusts
907                  * the process's dynamic priority.
908                  *
909                  * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
910                  */
911                 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
912                 ucount = dfly_ucount;
913                 if (ucount > ncpus) {
914                         estcpu += estcpu * (ucount - ncpus) / ncpus;
915                 }
916
917                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
918                         kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
919                                 lp->lwp_proc->p_pid, lp,
920                                 estcpu, lp->lwp_estcpu,
921                                 lp->lwp_cpticks, ttlticks);
922                 }
923
924                 /*
925                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
926                  * quickly lwp_estcpu collapses to its realtime calculation.
927                  * A slower collapse gives us a more accurate number over
928                  * the long term but can create problems with bursty threads
929                  * or threads which become cpu hogs.
930                  *
931                  * To solve this problem, newly started lwps and lwps which
932                  * are restarting after having been asleep for a while are
933                  * given a much, much faster decay in order to quickly
934                  * detect whether they become cpu-bound.
935                  *
936                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
937                  *       and not here, but we must still ensure that a
938                  *       cpu-bound nice -20 process does not completely
939                  *       override a cpu-bound nice +20 process.
940                  *
941                  * NOTE: We must use ESTCPULIM() here to deal with any
942                  *       overshoot.
943                  */
944                 decay_factor = usched_dfly_decay;
945                 if (decay_factor < 1)
946                         decay_factor = 1;
947                 if (decay_factor > 1024)
948                         decay_factor = 1024;
949
950                 if (lp->lwp_estfast < usched_dfly_decay) {
951                         ++lp->lwp_estfast;
952                         lp->lwp_estcpu = ESTCPULIM(
953                                 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
954                                 (lp->lwp_estfast + 1));
955                 } else {
956                         lp->lwp_estcpu = ESTCPULIM(
957                                 (lp->lwp_estcpu * decay_factor + estcpu) /
958                                 (decay_factor + 1));
959                 }
960
961                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
962                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
963                 dfly_resetpriority(lp);
964                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
965                 lp->lwp_cpticks = 0;
966         }
967 }
968
969 /*
970  * Compute the priority of a process when running in user mode.
971  * Arrange to reschedule if the resulting priority is better
972  * than that of the current process.
973  *
974  * This routine may be called with any process.
975  *
976  * This routine is called by fork1() for initial setup with the process
977  * of the run queue, and also may be called normally with the process on or
978  * off the run queue.
979  */
980 static void
981 dfly_resetpriority(struct lwp *lp)
982 {
983         dfly_pcpu_t rdd;
984         int newpriority;
985         u_short newrqtype;
986         int rcpu;
987         int checkpri;
988         int estcpu;
989         int delta_uload;
990
991         crit_enter();
992
993         /*
994          * Lock the scheduler (lp) belongs to.  This can be on a different
995          * cpu.  Handle races.  This loop breaks out with the appropriate
996          * rdd locked.
997          */
998         for (;;) {
999                 rcpu = lp->lwp_qcpu;
1000                 cpu_ccfence();
1001                 rdd = &dfly_pcpu[rcpu];
1002                 spin_lock(&rdd->spin);
1003                 if (rcpu == lp->lwp_qcpu)
1004                         break;
1005                 spin_unlock(&rdd->spin);
1006         }
1007
1008         /*
1009          * Calculate the new priority and queue type
1010          */
1011         newrqtype = lp->lwp_rtprio.type;
1012
1013         switch(newrqtype) {
1014         case RTP_PRIO_REALTIME:
1015         case RTP_PRIO_FIFO:
1016                 newpriority = PRIBASE_REALTIME +
1017                              (lp->lwp_rtprio.prio & PRIMASK);
1018                 break;
1019         case RTP_PRIO_NORMAL:
1020                 /*
1021                  *
1022                  */
1023                 estcpu = lp->lwp_estcpu;
1024
1025                 /*
1026                  * p_nice piece         Adds (0-40) * 2         0-80
1027                  * estcpu               Adds 16384  * 4 / 512   0-128
1028                  */
1029                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1030                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1031                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1032                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1033                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1034                 break;
1035         case RTP_PRIO_IDLE:
1036                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1037                 break;
1038         case RTP_PRIO_THREAD:
1039                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1040                 break;
1041         default:
1042                 panic("Bad RTP_PRIO %d", newrqtype);
1043                 /* NOT REACHED */
1044         }
1045
1046         /*
1047          * The LWKT scheduler doesn't dive usched structures, give it a hint
1048          * on the relative priority of user threads running in the kernel.
1049          * The LWKT scheduler will always ensure that a user thread running
1050          * in the kernel will get cpu some time, regardless of its upri,
1051          * but can decide not to instantly switch from one kernel or user
1052          * mode user thread to a kernel-mode user thread when it has a less
1053          * desireable user priority.
1054          *
1055          * td_upri has normal sense (higher values are more desireable), so
1056          * negate it.
1057          */
1058         lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1059
1060         /*
1061          * The newpriority incorporates the queue type so do a simple masked
1062          * check to determine if the process has moved to another queue.  If
1063          * it has, and it is currently on a run queue, then move it.
1064          *
1065          * Since uload is ~PPQMASK masked, no modifications are necessary if
1066          * we end up in the same run queue.
1067          */
1068         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1069                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1070                         dfly_remrunqueue_locked(rdd, lp);
1071                         lp->lwp_priority = newpriority;
1072                         lp->lwp_rqtype = newrqtype;
1073                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1074                         dfly_setrunqueue_locked(rdd, lp);
1075                         checkpri = 1;
1076                 } else {
1077                         lp->lwp_priority = newpriority;
1078                         lp->lwp_rqtype = newrqtype;
1079                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1080                         checkpri = 0;
1081                 }
1082         } else {
1083                 /*
1084                  * In the same PPQ, uload cannot change.
1085                  */
1086                 lp->lwp_priority = newpriority;
1087                 checkpri = 1;
1088                 rcpu = -1;
1089         }
1090
1091         /*
1092          * Adjust effective load.
1093          *
1094          * Calculate load then scale up or down geometrically based on p_nice.
1095          * Processes niced up (positive) are less important, and processes
1096          * niced downard (negative) are more important.  The higher the uload,
1097          * the more important the thread.
1098          */
1099         /* 0-511, 0-100% cpu */
1100         delta_uload = lp->lwp_estcpu / NQS;
1101         delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1102
1103
1104         delta_uload -= lp->lwp_uload;
1105         lp->lwp_uload += delta_uload;
1106         if (lp->lwp_mpflags & LWP_MP_ULOAD)
1107                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1108
1109         /*
1110          * Determine if we need to reschedule the target cpu.  This only
1111          * occurs if the LWP is already on a scheduler queue, which means
1112          * that idle cpu notification has already occured.  At most we
1113          * need only issue a need_user_resched() on the appropriate cpu.
1114          *
1115          * The LWP may be owned by a CPU different from the current one,
1116          * in which case dd->uschedcp may be modified without an MP lock
1117          * or a spinlock held.  The worst that happens is that the code
1118          * below causes a spurious need_user_resched() on the target CPU
1119          * and dd->pri to be wrong for a short period of time, both of
1120          * which are harmless.
1121          *
1122          * If checkpri is 0 we are adjusting the priority of the current
1123          * process, possibly higher (less desireable), so ignore the upri
1124          * check which will fail in that case.
1125          */
1126         if (rcpu >= 0) {
1127                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1128                     (checkpri == 0 ||
1129                      (rdd->upri & ~PRIMASK) >
1130                      (lp->lwp_priority & ~PRIMASK))) {
1131 #ifdef SMP
1132                         if (rcpu == mycpu->gd_cpuid) {
1133                                 spin_unlock(&rdd->spin);
1134                                 need_user_resched();
1135                         } else {
1136                                 atomic_clear_cpumask(&dfly_rdyprocmask,
1137                                                      CPUMASK(rcpu));
1138                                 spin_unlock(&rdd->spin);
1139                                 lwkt_send_ipiq(globaldata_find(rcpu),
1140                                                dfly_need_user_resched_remote,
1141                                                NULL);
1142                         }
1143 #else
1144                         spin_unlock(&rdd->spin);
1145                         need_user_resched();
1146 #endif
1147                 } else {
1148                         spin_unlock(&rdd->spin);
1149                 }
1150         } else {
1151                 spin_unlock(&rdd->spin);
1152         }
1153         crit_exit();
1154 }
1155
1156 static
1157 void
1158 dfly_yield(struct lwp *lp)
1159 {
1160 #if 0
1161         /* FUTURE (or something similar) */
1162         switch(lp->lwp_rqtype) {
1163         case RTP_PRIO_NORMAL:
1164                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1165                 break;
1166         default:
1167                 break;
1168         }
1169 #endif
1170         need_user_resched();
1171 }
1172
1173 /*
1174  * Called from fork1() when a new child process is being created.
1175  *
1176  * Give the child process an initial estcpu that is more batch then
1177  * its parent and dock the parent for the fork (but do not
1178  * reschedule the parent).
1179  *
1180  * fast
1181  *
1182  * XXX lwp should be "spawning" instead of "forking"
1183  */
1184 static void
1185 dfly_forking(struct lwp *plp, struct lwp *lp)
1186 {
1187         /*
1188          * Put the child 4 queue slots (out of 32) higher than the parent
1189          * (less desireable than the parent).
1190          */
1191         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1192         lp->lwp_forked = 1;
1193         lp->lwp_estfast = 0;
1194
1195         /*
1196          * Dock the parent a cost for the fork, protecting us from fork
1197          * bombs.  If the parent is forking quickly make the child more
1198          * batchy.
1199          */
1200         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1201 }
1202
1203 /*
1204  * Called when a lwp is being removed from this scheduler, typically
1205  * during lwp_exit().  We have to clean out any ULOAD accounting before
1206  * we can let the lp go.  The dd->spin lock is not needed for uload
1207  * updates.
1208  *
1209  * Scheduler dequeueing has already occurred, no further action in that
1210  * regard is needed.
1211  */
1212 static void
1213 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1214 {
1215         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1216
1217         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1218                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1219                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1220                 atomic_add_int(&dd->ucount, -1);
1221                 atomic_add_int(&dfly_ucount, -1);
1222         }
1223 }
1224
1225 /*
1226  * This function cannot block in any way, but spinlocks are ok.
1227  *
1228  * Update the uload based on the state of the thread (whether it is going
1229  * to sleep or running again).  The uload is meant to be a longer-term
1230  * load and not an instantanious load.
1231  */
1232 static void
1233 dfly_uload_update(struct lwp *lp)
1234 {
1235         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1236
1237         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1238                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1239                         spin_lock(&dd->spin);
1240                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1241                                 atomic_set_int(&lp->lwp_mpflags,
1242                                                LWP_MP_ULOAD);
1243                                 atomic_add_int(&dd->uload, lp->lwp_uload);
1244                                 atomic_add_int(&dd->ucount, 1);
1245                                 atomic_add_int(&dfly_ucount, 1);
1246                         }
1247                         spin_unlock(&dd->spin);
1248                 }
1249         } else if (lp->lwp_slptime > 0) {
1250                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1251                         spin_lock(&dd->spin);
1252                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1253                                 atomic_clear_int(&lp->lwp_mpflags,
1254                                                  LWP_MP_ULOAD);
1255                                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1256                                 atomic_add_int(&dd->ucount, -1);
1257                                 atomic_add_int(&dfly_ucount, -1);
1258                         }
1259                         spin_unlock(&dd->spin);
1260                 }
1261         }
1262 }
1263
1264 /*
1265  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1266  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1267  * has a better or equal priority then the process that would otherwise be
1268  * chosen, NULL is returned.
1269  *
1270  * Until we fix the RUNQ code the chklp test has to be strict or we may
1271  * bounce between processes trying to acquire the current process designation.
1272  *
1273  * Must be called with rdd->spin locked.  The spinlock is left intact through
1274  * the entire routine.  dd->spin does not have to be locked.
1275  *
1276  * If worst is non-zero this function finds the worst thread instead of the
1277  * best thread (used by the schedulerclock-based rover).
1278  */
1279 static
1280 struct lwp *
1281 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1282                        struct lwp *chklp, int worst)
1283 {
1284         struct lwp *lp;
1285         struct rq *q;
1286         u_int32_t *which, *which2;
1287         u_int32_t pri;
1288         u_int32_t rtqbits;
1289         u_int32_t tsqbits;
1290         u_int32_t idqbits;
1291
1292         rtqbits = rdd->rtqueuebits;
1293         tsqbits = rdd->queuebits;
1294         idqbits = rdd->idqueuebits;
1295
1296         if (worst) {
1297                 if (idqbits) {
1298                         pri = bsrl(idqbits);
1299                         q = &rdd->idqueues[pri];
1300                         which = &rdd->idqueuebits;
1301                         which2 = &idqbits;
1302                 } else if (tsqbits) {
1303                         pri = bsrl(tsqbits);
1304                         q = &rdd->queues[pri];
1305                         which = &rdd->queuebits;
1306                         which2 = &tsqbits;
1307                 } else if (rtqbits) {
1308                         pri = bsrl(rtqbits);
1309                         q = &rdd->rtqueues[pri];
1310                         which = &rdd->rtqueuebits;
1311                         which2 = &rtqbits;
1312                 } else {
1313                         return (NULL);
1314                 }
1315                 lp = TAILQ_LAST(q, rq);
1316         } else {
1317                 if (rtqbits) {
1318                         pri = bsfl(rtqbits);
1319                         q = &rdd->rtqueues[pri];
1320                         which = &rdd->rtqueuebits;
1321                         which2 = &rtqbits;
1322                 } else if (tsqbits) {
1323                         pri = bsfl(tsqbits);
1324                         q = &rdd->queues[pri];
1325                         which = &rdd->queuebits;
1326                         which2 = &tsqbits;
1327                 } else if (idqbits) {
1328                         pri = bsfl(idqbits);
1329                         q = &rdd->idqueues[pri];
1330                         which = &rdd->idqueuebits;
1331                         which2 = &idqbits;
1332                 } else {
1333                         return (NULL);
1334                 }
1335                 lp = TAILQ_FIRST(q);
1336         }
1337         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1338
1339         /*
1340          * If the passed lwp <chklp> is reasonably close to the selected
1341          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1342          *
1343          * Note that we must error on the side of <chklp> to avoid bouncing
1344          * between threads in the acquire code.
1345          */
1346         if (chklp) {
1347                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1348                         return(NULL);
1349         }
1350
1351         KTR_COND_LOG(usched_chooseproc,
1352             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1353             lp->lwp_proc->p_pid,
1354             lp->lwp_thread->td_gd->gd_cpuid,
1355             mycpu->gd_cpuid);
1356
1357         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1358         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1359         TAILQ_REMOVE(q, lp, lwp_procq);
1360         --rdd->runqcount;
1361         if (TAILQ_EMPTY(q))
1362                 *which &= ~(1 << pri);
1363
1364         /*
1365          * If we are choosing a process from rdd with the intent to
1366          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1367          * is still held.
1368          */
1369         if (rdd != dd) {
1370                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1371                         atomic_add_int(&rdd->uload, -lp->lwp_uload);
1372                         atomic_add_int(&rdd->ucount, -1);
1373                         atomic_add_int(&dfly_ucount, -1);
1374                 }
1375                 lp->lwp_qcpu = dd->cpuid;
1376                 atomic_add_int(&dd->uload, lp->lwp_uload);
1377                 atomic_add_int(&dd->ucount, 1);
1378                 atomic_add_int(&dfly_ucount, 1);
1379                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1380         }
1381         return lp;
1382 }
1383
1384 #ifdef SMP
1385
1386 /*
1387  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1388  *
1389  * Choose a cpu node to schedule lp on, hopefully nearby its current
1390  * node.
1391  *
1392  * We give the current node a modest advantage for obvious reasons.
1393  *
1394  * We also give the node the thread was woken up FROM a slight advantage
1395  * in order to try to schedule paired threads which synchronize/block waiting
1396  * for each other fairly close to each other.  Similarly in a network setting
1397  * this feature will also attempt to place a user process near the kernel
1398  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1399  * algorithm as it heuristically groups synchronizing processes for locality
1400  * of reference in multi-socket systems.
1401  *
1402  * We check against running processes and give a big advantage if there
1403  * are none running.
1404  *
1405  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1406  *
1407  * When the topology is known choose a cpu whos group has, in aggregate,
1408  * has the lowest weighted load.
1409  */
1410 static
1411 dfly_pcpu_t
1412 dfly_choose_best_queue(struct lwp *lp)
1413 {
1414         cpumask_t wakemask;
1415         cpumask_t mask;
1416         cpu_node_t *cpup;
1417         cpu_node_t *cpun;
1418         cpu_node_t *cpub;
1419         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1420         dfly_pcpu_t rdd;
1421         int wakecpu;
1422         int cpuid;
1423         int n;
1424         int count;
1425         int load;
1426         int lowest_load;
1427
1428         /*
1429          * When the topology is unknown choose a random cpu that is hopefully
1430          * idle.
1431          */
1432         if (dd->cpunode == NULL)
1433                 return (dfly_choose_queue_simple(dd, lp));
1434
1435         /*
1436          * Pairing mask
1437          */
1438         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1439                 wakemask = dfly_pcpu[wakecpu].cpumask;
1440         else
1441                 wakemask = 0;
1442
1443         /*
1444          * When the topology is known choose a cpu whos group has, in
1445          * aggregate, has the lowest weighted load.
1446          */
1447         cpup = root_cpu_node;
1448         rdd = dd;
1449
1450         while (cpup) {
1451                 /*
1452                  * Degenerate case super-root
1453                  */
1454                 if (cpup->child_node && cpup->child_no == 1) {
1455                         cpup = cpup->child_node;
1456                         continue;
1457                 }
1458
1459                 /*
1460                  * Terminal cpunode
1461                  */
1462                 if (cpup->child_node == NULL) {
1463                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1464                         break;
1465                 }
1466
1467                 cpub = NULL;
1468                 lowest_load = 0x7FFFFFFF;
1469
1470                 for (n = 0; n < cpup->child_no; ++n) {
1471                         /*
1472                          * Accumulate load information for all cpus
1473                          * which are members of this node.
1474                          */
1475                         cpun = &cpup->child_node[n];
1476                         mask = cpun->members & usched_global_cpumask &
1477                                smp_active_mask & lp->lwp_cpumask;
1478                         if (mask == 0)
1479                                 continue;
1480
1481                         count = 0;
1482                         load = 0;
1483
1484                         while (mask) {
1485                                 cpuid = BSFCPUMASK(mask);
1486                                 rdd = &dfly_pcpu[cpuid];
1487                                 load += rdd->uload;
1488                                 load += rdd->ucount * usched_dfly_weight3;
1489
1490                                 if (rdd->uschedcp == NULL &&
1491                                     rdd->runqcount == 0) {
1492                                         load -= usched_dfly_weight4;
1493                                 } else if (rdd->upri > lp->lwp_priority + PPQ) {
1494                                         load -= usched_dfly_weight4 / 2;
1495                                 }
1496                                 mask &= ~CPUMASK(cpuid);
1497                                 ++count;
1498                         }
1499
1500                         /*
1501                          * Compensate if the lp is already accounted for in
1502                          * the aggregate uload for this mask set.  We want
1503                          * to calculate the loads as if lp were not present,
1504                          * otherwise the calculation is bogus.
1505                          */
1506                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1507                             (dd->cpumask & cpun->members)) {
1508                                 load -= lp->lwp_uload;
1509                                 load -= usched_dfly_weight3;
1510                         }
1511
1512                         load /= count;
1513
1514                         /*
1515                          * Advantage the cpu group (lp) is already on.
1516                          */
1517                         if (cpun->members & dd->cpumask)
1518                                 load -= usched_dfly_weight1;
1519
1520                         /*
1521                          * Advantage the cpu group we want to pair (lp) to,
1522                          * but don't let it go to the exact same cpu as
1523                          * the wakecpu target.
1524                          *
1525                          * We do this by checking whether cpun is a
1526                          * terminal node or not.  All cpun's at the same
1527                          * level will either all be terminal or all not
1528                          * terminal.
1529                          *
1530                          * If it is and we match we disadvantage the load.
1531                          * If it is and we don't match we advantage the load.
1532                          *
1533                          * Also note that we are effectively disadvantaging
1534                          * all-but-one by the same amount, so it won't effect
1535                          * the weight1 factor for the all-but-one nodes.
1536                          */
1537                         if (cpun->members & wakemask) {
1538                                 if (cpun->child_node != NULL) {
1539                                         /* advantage */
1540                                         load -= usched_dfly_weight2;
1541                                 } else {
1542                                         if (usched_dfly_features & 0x10)
1543                                                 load += usched_dfly_weight2;
1544                                         else
1545                                                 load -= usched_dfly_weight2;
1546                                 }
1547                         }
1548
1549                         /*
1550                          * Calculate the best load
1551                          */
1552                         if (cpub == NULL || lowest_load > load ||
1553                             (lowest_load == load &&
1554                              (cpun->members & dd->cpumask))
1555                         ) {
1556                                 lowest_load = load;
1557                                 cpub = cpun;
1558                         }
1559                 }
1560                 cpup = cpub;
1561         }
1562         if (usched_dfly_chooser)
1563                 kprintf("lp %02d->%02d %s\n",
1564                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1565         return (rdd);
1566 }
1567
1568 /*
1569  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1570  *
1571  * Choose the worst queue close to dd's cpu node with a non-empty runq
1572  * that is NOT dd.  Also require that the moving of the highest-load thread
1573  * from rdd to dd does not cause the uload's to cross each other.
1574  *
1575  * This is used by the thread chooser when the current cpu's queues are
1576  * empty to steal a thread from another cpu's queue.  We want to offload
1577  * the most heavily-loaded queue.
1578  */
1579 static
1580 dfly_pcpu_t
1581 dfly_choose_worst_queue(dfly_pcpu_t dd)
1582 {
1583         cpumask_t mask;
1584         cpu_node_t *cpup;
1585         cpu_node_t *cpun;
1586         cpu_node_t *cpub;
1587         dfly_pcpu_t rdd;
1588         int cpuid;
1589         int n;
1590         int count;
1591         int load;
1592 #if 0
1593         int pri;
1594         int hpri;
1595 #endif
1596         int highest_load;
1597
1598         /*
1599          * When the topology is unknown choose a random cpu that is hopefully
1600          * idle.
1601          */
1602         if (dd->cpunode == NULL) {
1603                 return (NULL);
1604         }
1605
1606         /*
1607          * When the topology is known choose a cpu whos group has, in
1608          * aggregate, has the lowest weighted load.
1609          */
1610         cpup = root_cpu_node;
1611         rdd = dd;
1612         while (cpup) {
1613                 /*
1614                  * Degenerate case super-root
1615                  */
1616                 if (cpup->child_node && cpup->child_no == 1) {
1617                         cpup = cpup->child_node;
1618                         continue;
1619                 }
1620
1621                 /*
1622                  * Terminal cpunode
1623                  */
1624                 if (cpup->child_node == NULL) {
1625                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1626                         break;
1627                 }
1628
1629                 cpub = NULL;
1630                 highest_load = 0;
1631
1632                 for (n = 0; n < cpup->child_no; ++n) {
1633                         /*
1634                          * Accumulate load information for all cpus
1635                          * which are members of this node.
1636                          */
1637                         cpun = &cpup->child_node[n];
1638                         mask = cpun->members & usched_global_cpumask &
1639                                smp_active_mask;
1640                         if (mask == 0)
1641                                 continue;
1642                         count = 0;
1643                         load = 0;
1644
1645                         while (mask) {
1646                                 cpuid = BSFCPUMASK(mask);
1647                                 rdd = &dfly_pcpu[cpuid];
1648                                 load += rdd->uload;
1649                                 load += rdd->ucount * usched_dfly_weight3;
1650                                 if (rdd->uschedcp == NULL &&
1651                                     rdd->runqcount == 0 &&
1652                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1653                                 ) {
1654                                         load -= usched_dfly_weight4;
1655                                 } else if (rdd->upri > dd->upri + PPQ) {
1656                                         load -= usched_dfly_weight4 / 2;
1657                                 }
1658                                 mask &= ~CPUMASK(cpuid);
1659                                 ++count;
1660                         }
1661                         load /= count;
1662
1663                         /*
1664                          * Prefer candidates which are somewhat closer to
1665                          * our cpu.
1666                          */
1667                         if (dd->cpumask & cpun->members)
1668                                 load += usched_dfly_weight1;
1669
1670                         /*
1671                          * The best candidate is the one with the worst
1672                          * (highest) load.
1673                          */
1674                         if (cpub == NULL || highest_load < load) {
1675                                 highest_load = load;
1676                                 cpub = cpun;
1677                         }
1678                 }
1679                 cpup = cpub;
1680         }
1681
1682         /*
1683          * We never return our own node (dd), and only return a remote
1684          * node if it's load is significantly worse than ours (i.e. where
1685          * stealing a thread would be considered reasonable).
1686          *
1687          * This also helps us avoid breaking paired threads apart which
1688          * can have disastrous effects on performance.
1689          */
1690         if (rdd == dd)
1691                 return(NULL);
1692
1693 #if 0
1694         hpri = 0;
1695         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1696                 hpri = pri;
1697         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1698                 hpri = pri;
1699         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1700                 hpri = pri;
1701         hpri *= PPQ;
1702         if (rdd->uload - hpri < dd->uload + hpri)
1703                 return(NULL);
1704 #endif
1705         return (rdd);
1706 }
1707
1708 static
1709 dfly_pcpu_t
1710 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1711 {
1712         dfly_pcpu_t rdd;
1713         cpumask_t tmpmask;
1714         cpumask_t mask;
1715         int cpuid;
1716
1717         /*
1718          * Fallback to the original heuristic, select random cpu,
1719          * first checking cpus not currently running a user thread.
1720          */
1721         ++dfly_scancpu;
1722         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1723         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1724                smp_active_mask & usched_global_cpumask;
1725
1726         while (mask) {
1727                 tmpmask = ~(CPUMASK(cpuid) - 1);
1728                 if (mask & tmpmask)
1729                         cpuid = BSFCPUMASK(mask & tmpmask);
1730                 else
1731                         cpuid = BSFCPUMASK(mask);
1732                 rdd = &dfly_pcpu[cpuid];
1733
1734                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1735                         goto found;
1736                 mask &= ~CPUMASK(cpuid);
1737         }
1738
1739         /*
1740          * Then cpus which might have a currently running lp
1741          */
1742         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1743         mask = dfly_curprocmask & dfly_rdyprocmask &
1744                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1745
1746         while (mask) {
1747                 tmpmask = ~(CPUMASK(cpuid) - 1);
1748                 if (mask & tmpmask)
1749                         cpuid = BSFCPUMASK(mask & tmpmask);
1750                 else
1751                         cpuid = BSFCPUMASK(mask);
1752                 rdd = &dfly_pcpu[cpuid];
1753
1754                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1755                         goto found;
1756                 mask &= ~CPUMASK(cpuid);
1757         }
1758
1759         /*
1760          * If we cannot find a suitable cpu we reload from dfly_scancpu
1761          * and round-robin.  Other cpus will pickup as they release their
1762          * current lwps or become ready.
1763          *
1764          * Avoid a degenerate system lockup case if usched_global_cpumask
1765          * is set to 0 or otherwise does not cover lwp_cpumask.
1766          *
1767          * We only kick the target helper thread in this case, we do not
1768          * set the user resched flag because
1769          */
1770         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1771         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1772                 cpuid = 0;
1773         rdd = &dfly_pcpu[cpuid];
1774 found:
1775         return (rdd);
1776 }
1777
1778 static
1779 void
1780 dfly_need_user_resched_remote(void *dummy)
1781 {
1782         globaldata_t gd = mycpu;
1783         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1784
1785         need_user_resched();
1786
1787         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1788         wakeup_mycpu(&dd->helper_thread);
1789 }
1790
1791 #endif
1792
1793 /*
1794  * dfly_remrunqueue_locked() removes a given process from the run queue
1795  * that it is on, clearing the queue busy bit if it becomes empty.
1796  *
1797  * Note that user process scheduler is different from the LWKT schedule.
1798  * The user process scheduler only manages user processes but it uses LWKT
1799  * underneath, and a user process operating in the kernel will often be
1800  * 'released' from our management.
1801  *
1802  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1803  * to sleep or the lwp is moved to a different runq.
1804  */
1805 static void
1806 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1807 {
1808         struct rq *q;
1809         u_int32_t *which;
1810         u_int8_t pri;
1811
1812         KKASSERT(rdd->runqcount >= 0);
1813
1814         pri = lp->lwp_rqindex;
1815
1816         switch(lp->lwp_rqtype) {
1817         case RTP_PRIO_NORMAL:
1818                 q = &rdd->queues[pri];
1819                 which = &rdd->queuebits;
1820                 break;
1821         case RTP_PRIO_REALTIME:
1822         case RTP_PRIO_FIFO:
1823                 q = &rdd->rtqueues[pri];
1824                 which = &rdd->rtqueuebits;
1825                 break;
1826         case RTP_PRIO_IDLE:
1827                 q = &rdd->idqueues[pri];
1828                 which = &rdd->idqueuebits;
1829                 break;
1830         default:
1831                 panic("remrunqueue: invalid rtprio type");
1832                 /* NOT REACHED */
1833         }
1834         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1835         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1836         TAILQ_REMOVE(q, lp, lwp_procq);
1837         --rdd->runqcount;
1838         if (TAILQ_EMPTY(q)) {
1839                 KASSERT((*which & (1 << pri)) != 0,
1840                         ("remrunqueue: remove from empty queue"));
1841                 *which &= ~(1 << pri);
1842         }
1843 }
1844
1845 /*
1846  * dfly_setrunqueue_locked()
1847  *
1848  * Add a process whos rqtype and rqindex had previously been calculated
1849  * onto the appropriate run queue.   Determine if the addition requires
1850  * a reschedule on a cpu and return the cpuid or -1.
1851  *
1852  * NOTE:          Lower priorities are better priorities.
1853  *
1854  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1855  *                sum of the rough lwp_priority for all running and runnable
1856  *                processes.  Lower priority processes (higher lwp_priority
1857  *                values) actually DO count as more load, not less, because
1858  *                these are the programs which require the most care with
1859  *                regards to cpu selection.
1860  */
1861 static void
1862 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1863 {
1864         struct rq *q;
1865         u_int32_t *which;
1866         int pri;
1867
1868         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1869
1870         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1871                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1872                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1873                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1874                 atomic_add_int(&dfly_ucount, 1);
1875         }
1876
1877         pri = lp->lwp_rqindex;
1878
1879         switch(lp->lwp_rqtype) {
1880         case RTP_PRIO_NORMAL:
1881                 q = &rdd->queues[pri];
1882                 which = &rdd->queuebits;
1883                 break;
1884         case RTP_PRIO_REALTIME:
1885         case RTP_PRIO_FIFO:
1886                 q = &rdd->rtqueues[pri];
1887                 which = &rdd->rtqueuebits;
1888                 break;
1889         case RTP_PRIO_IDLE:
1890                 q = &rdd->idqueues[pri];
1891                 which = &rdd->idqueuebits;
1892                 break;
1893         default:
1894                 panic("remrunqueue: invalid rtprio type");
1895                 /* NOT REACHED */
1896         }
1897
1898         /*
1899          * Add to the correct queue and set the appropriate bit.  If no
1900          * lower priority (i.e. better) processes are in the queue then
1901          * we want a reschedule, calculate the best cpu for the job.
1902          *
1903          * Always run reschedules on the LWPs original cpu.
1904          */
1905         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1906         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1907         ++rdd->runqcount;
1908         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1909         *which |= 1 << pri;
1910 }
1911
1912 #ifdef SMP
1913
1914 /*
1915  * For SMP systems a user scheduler helper thread is created for each
1916  * cpu and is used to allow one cpu to wakeup another for the purposes of
1917  * scheduling userland threads from setrunqueue().
1918  *
1919  * UP systems do not need the helper since there is only one cpu.
1920  *
1921  * We can't use the idle thread for this because we might block.
1922  * Additionally, doing things this way allows us to HLT idle cpus
1923  * on MP systems.
1924  */
1925 static void
1926 dfly_helper_thread(void *dummy)
1927 {
1928     globaldata_t gd;
1929     dfly_pcpu_t dd;
1930     dfly_pcpu_t rdd;
1931     struct lwp *nlp;
1932     cpumask_t mask;
1933     int cpuid;
1934
1935     gd = mycpu;
1936     cpuid = gd->gd_cpuid;       /* doesn't change */
1937     mask = gd->gd_cpumask;      /* doesn't change */
1938     dd = &dfly_pcpu[cpuid];
1939
1940     /*
1941      * Since we only want to be woken up only when no user processes
1942      * are scheduled on a cpu, run at an ultra low priority.
1943      */
1944     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1945
1946     tsleep(&dd->helper_thread, 0, "schslp", 0);
1947
1948     for (;;) {
1949         /*
1950          * We use the LWKT deschedule-interlock trick to avoid racing
1951          * dfly_rdyprocmask.  This means we cannot block through to the
1952          * manual lwkt_switch() call we make below.
1953          */
1954         crit_enter_gd(gd);
1955         tsleep_interlock(&dd->helper_thread, 0);
1956
1957         spin_lock(&dd->spin);
1958
1959         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1960         clear_user_resched();   /* This satisfied the reschedule request */
1961         dd->rrcount = 0;        /* Reset the round-robin counter */
1962
1963         if (dd->runqcount || dd->uschedcp != NULL) {
1964                 /*
1965                  * Threads are available.  A thread may or may not be
1966                  * currently scheduled.  Get the best thread already queued
1967                  * to this cpu.
1968                  */
1969                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
1970                 if (nlp) {
1971                         atomic_set_cpumask(&dfly_curprocmask, mask);
1972                         dd->upri = nlp->lwp_priority;
1973                         dd->uschedcp = nlp;
1974                         dd->rrcount = 0;        /* reset round robin */
1975                         spin_unlock(&dd->spin);
1976                         lwkt_acquire(nlp->lwp_thread);
1977                         lwkt_schedule(nlp->lwp_thread);
1978                 } else {
1979                         /*
1980                          * This situation should not occur because we had
1981                          * at least one thread available.
1982                          */
1983                         spin_unlock(&dd->spin);
1984                 }
1985         } else if (usched_dfly_features & 0x01) {
1986                 /*
1987                  * This cpu is devoid of runnable threads, steal a thread
1988                  * from another cpu.  Since we're stealing, might as well
1989                  * load balance at the same time.
1990                  *
1991                  * We choose the highest-loaded thread from the worst queue.
1992                  *
1993                  * NOTE! This function only returns a non-NULL rdd when
1994                  *       another cpu's queue is obviously overloaded.  We
1995                  *       do not want to perform the type of rebalancing
1996                  *       the schedclock does here because it would result
1997                  *       in insane process pulling when 'steady' state is
1998                  *       partially unbalanced (e.g. 6 runnables and only
1999                  *       4 cores).
2000                  */
2001                 rdd = dfly_choose_worst_queue(dd);
2002                 if (rdd && spin_trylock(&rdd->spin)) {
2003                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2004                         spin_unlock(&rdd->spin);
2005                 } else {
2006                         nlp = NULL;
2007                 }
2008                 if (nlp) {
2009                         atomic_set_cpumask(&dfly_curprocmask, mask);
2010                         dd->upri = nlp->lwp_priority;
2011                         dd->uschedcp = nlp;
2012                         dd->rrcount = 0;        /* reset round robin */
2013                         spin_unlock(&dd->spin);
2014                         lwkt_acquire(nlp->lwp_thread);
2015                         lwkt_schedule(nlp->lwp_thread);
2016                 } else {
2017                         /*
2018                          * Leave the thread on our run queue.  Another
2019                          * scheduler will try to pull it later.
2020                          */
2021                         spin_unlock(&dd->spin);
2022                 }
2023         } else {
2024                 /*
2025                  * devoid of runnable threads and not allowed to steal
2026                  * any.
2027                  */
2028                 spin_unlock(&dd->spin);
2029         }
2030
2031         /*
2032          * We're descheduled unless someone scheduled us.  Switch away.
2033          * Exiting the critical section will cause splz() to be called
2034          * for us if interrupts and such are pending.
2035          */
2036         crit_exit_gd(gd);
2037         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2038     }
2039 }
2040
2041 #if 0
2042 static int
2043 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2044 {
2045         int error, new_val;
2046
2047         new_val = usched_dfly_stick_to_level;
2048
2049         error = sysctl_handle_int(oidp, &new_val, 0, req);
2050         if (error != 0 || req->newptr == NULL)
2051                 return (error);
2052         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2053                 return (EINVAL);
2054         usched_dfly_stick_to_level = new_val;
2055         return (0);
2056 }
2057 #endif
2058
2059 /*
2060  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
2061  * been cleared by rqinit() and we should not mess with it further.
2062  */
2063 static void
2064 dfly_helper_thread_cpu_init(void)
2065 {
2066         int i;
2067         int j;
2068         int cpuid;
2069         int smt_not_supported = 0;
2070         int cache_coherent_not_supported = 0;
2071
2072         if (bootverbose)
2073                 kprintf("Start scheduler helpers on cpus:\n");
2074
2075         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2076         usched_dfly_sysctl_tree =
2077                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2078                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2079                                 "usched_dfly", CTLFLAG_RD, 0, "");
2080
2081         for (i = 0; i < ncpus; ++i) {
2082                 dfly_pcpu_t dd = &dfly_pcpu[i];
2083                 cpumask_t mask = CPUMASK(i);
2084
2085                 if ((mask & smp_active_mask) == 0)
2086                     continue;
2087
2088                 spin_init(&dd->spin);
2089                 dd->cpunode = get_cpu_node_by_cpuid(i);
2090                 dd->cpuid = i;
2091                 dd->cpumask = CPUMASK(i);
2092                 for (j = 0; j < NQS; j++) {
2093                         TAILQ_INIT(&dd->queues[j]);
2094                         TAILQ_INIT(&dd->rtqueues[j]);
2095                         TAILQ_INIT(&dd->idqueues[j]);
2096                 }
2097                 atomic_clear_cpumask(&dfly_curprocmask, 1);
2098
2099                 if (dd->cpunode == NULL) {
2100                         smt_not_supported = 1;
2101                         cache_coherent_not_supported = 1;
2102                         if (bootverbose)
2103                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2104                                          "found for cpu\n", i);
2105                 } else {
2106                         switch (dd->cpunode->type) {
2107                         case THREAD_LEVEL:
2108                                 if (bootverbose)
2109                                         kprintf ("\tcpu%d - HyperThreading "
2110                                                  "available. Core siblings: ",
2111                                                  i);
2112                                 break;
2113                         case CORE_LEVEL:
2114                                 smt_not_supported = 1;
2115
2116                                 if (bootverbose)
2117                                         kprintf ("\tcpu%d - No HT available, "
2118                                                  "multi-core/physical "
2119                                                  "cpu. Physical siblings: ",
2120                                                  i);
2121                                 break;
2122                         case CHIP_LEVEL:
2123                                 smt_not_supported = 1;
2124
2125                                 if (bootverbose)
2126                                         kprintf ("\tcpu%d - No HT available, "
2127                                                  "single-core/physical cpu. "
2128                                                  "Package Siblings: ",
2129                                                  i);
2130                                 break;
2131                         default:
2132                                 /* Let's go for safe defaults here */
2133                                 smt_not_supported = 1;
2134                                 cache_coherent_not_supported = 1;
2135                                 if (bootverbose)
2136                                         kprintf ("\tcpu%d - Unknown cpunode->"
2137                                                  "type=%u. Siblings: ",
2138                                                  i,
2139                                                  (u_int)dd->cpunode->type);
2140                                 break;
2141                         }
2142
2143                         if (bootverbose) {
2144                                 if (dd->cpunode->parent_node != NULL) {
2145                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2146                                                 kprintf("cpu%d ", cpuid);
2147                                         kprintf("\n");
2148                                 } else {
2149                                         kprintf(" no siblings\n");
2150                                 }
2151                         }
2152                 }
2153
2154                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2155                             0, i, "usched %d", i);
2156
2157                 /*
2158                  * Allow user scheduling on the target cpu.  cpu #0 has already
2159                  * been enabled in rqinit().
2160                  */
2161                 if (i)
2162                     atomic_clear_cpumask(&dfly_curprocmask, mask);
2163                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2164                 dd->upri = PRIBASE_NULL;
2165
2166         }
2167
2168         /* usched_dfly sysctl configurable parameters */
2169
2170         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2171                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2172                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2173                        &usched_dfly_rrinterval, 0, "");
2174         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2175                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2176                        OID_AUTO, "decay", CTLFLAG_RW,
2177                        &usched_dfly_decay, 0, "Extra decay when not running");
2178
2179         /* Add enable/disable option for SMT scheduling if supported */
2180         if (smt_not_supported) {
2181                 usched_dfly_smt = 0;
2182                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2183                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2184                                   OID_AUTO, "smt", CTLFLAG_RD,
2185                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2186         } else {
2187                 usched_dfly_smt = 1;
2188                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2189                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2190                                OID_AUTO, "smt", CTLFLAG_RW,
2191                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2192         }
2193
2194         /*
2195          * Add enable/disable option for cache coherent scheduling
2196          * if supported
2197          */
2198         if (cache_coherent_not_supported) {
2199                 usched_dfly_cache_coherent = 0;
2200                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2201                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2202                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2203                                   "NOT SUPPORTED", 0,
2204                                   "Cache coherence NOT SUPPORTED");
2205         } else {
2206                 usched_dfly_cache_coherent = 1;
2207                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2208                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2209                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2210                                &usched_dfly_cache_coherent, 0,
2211                                "Enable/Disable cache coherent scheduling");
2212
2213                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2214                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2215                                OID_AUTO, "weight1", CTLFLAG_RW,
2216                                &usched_dfly_weight1, 10,
2217                                "Weight selection for current cpu");
2218
2219                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2220                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2221                                OID_AUTO, "weight2", CTLFLAG_RW,
2222                                &usched_dfly_weight2, 5,
2223                                "Weight selection for wakefrom cpu");
2224
2225                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2226                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2227                                OID_AUTO, "weight3", CTLFLAG_RW,
2228                                &usched_dfly_weight3, 50,
2229                                "Weight selection for num threads on queue");
2230
2231                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2232                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2233                                OID_AUTO, "weight4", CTLFLAG_RW,
2234                                &usched_dfly_weight4, 50,
2235                                "Availability of other idle cpus");
2236
2237                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2238                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2239                                OID_AUTO, "features", CTLFLAG_RW,
2240                                &usched_dfly_features, 15,
2241                                "Allow pulls into empty queues");
2242
2243                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2244                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2245                                OID_AUTO, "swmask", CTLFLAG_RW,
2246                                &usched_dfly_swmask, ~PPQMASK,
2247                                "Queue mask to force thread switch");
2248
2249
2250 #if 0
2251                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2252                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2253                                 OID_AUTO, "stick_to_level",
2254                                 CTLTYPE_INT | CTLFLAG_RW,
2255                                 NULL, sizeof usched_dfly_stick_to_level,
2256                                 sysctl_usched_dfly_stick_to_level, "I",
2257                                 "Stick a process to this level. See sysctl"
2258                                 "paremter hw.cpu_topology.level_description");
2259 #endif
2260         }
2261 }
2262 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2263         dfly_helper_thread_cpu_init, NULL)
2264
2265 #else /* No SMP options - just add the configurable parameters to sysctl */
2266
2267 static void
2268 sched_sysctl_tree_init(void)
2269 {
2270         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2271         usched_dfly_sysctl_tree =
2272                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2273                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2274                                 "usched_dfly", CTLFLAG_RD, 0, "");
2275
2276         /* usched_dfly sysctl configurable parameters */
2277         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2278                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2279                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2280                        &usched_dfly_rrinterval, 0, "");
2281         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2282                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2283                        OID_AUTO, "decay", CTLFLAG_RW,
2284                        &usched_dfly_decay, 0, "Extra decay when not running");
2285 }
2286 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2287         sched_sysctl_tree_init, NULL)
2288 #endif