Merge branch 'vendor/NCURSES'
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_estfast     lwp_usdata.dfly.estfast
97 #define lwp_uload       lwp_usdata.dfly.uload
98 #define lwp_rqtype      lwp_usdata.dfly.rqtype
99 #define lwp_qcpu        lwp_usdata.dfly.qcpu
100 #define lwp_rrcount     lwp_usdata.dfly.rrcount
101
102 struct usched_dfly_pcpu {
103         struct spinlock spin;
104         struct thread   helper_thread;
105         short           unusde01;
106         short           upri;
107         int             uload;
108         int             ucount;
109         struct lwp      *uschedcp;
110         struct rq       queues[NQS];
111         struct rq       rtqueues[NQS];
112         struct rq       idqueues[NQS];
113         u_int32_t       queuebits;
114         u_int32_t       rtqueuebits;
115         u_int32_t       idqueuebits;
116         int             runqcount;
117         int             cpuid;
118         cpumask_t       cpumask;
119         cpu_node_t      *cpunode;
120 };
121
122 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
123
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
130                                 sysclock_t cpstamp);
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
137 static void dfly_changeqcpu_locked(struct lwp *lp,
138                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
139 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
140 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
141 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
142 static void dfly_need_user_resched_remote(void *dummy);
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
144                                           struct lwp *chklp, int worst);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147 static void dfly_changedcpu(struct lwp *lp);
148
149 struct usched usched_dfly = {
150         { NULL },
151         "dfly", "Original DragonFly Scheduler",
152         NULL,                   /* default registration */
153         NULL,                   /* default deregistration */
154         dfly_acquire_curproc,
155         dfly_release_curproc,
156         dfly_setrunqueue,
157         dfly_schedulerclock,
158         dfly_recalculate_estcpu,
159         dfly_resetpriority,
160         dfly_forking,
161         dfly_exiting,
162         dfly_uload_update,
163         NULL,                   /* setcpumask not supported */
164         dfly_yield,
165         dfly_changedcpu
166 };
167
168 /*
169  * We have NQS (32) run queues per scheduling class.  For the normal
170  * class, there are 128 priorities scaled onto these 32 queues.  New
171  * processes are added to the last entry in each queue, and processes
172  * are selected for running by taking them from the head and maintaining
173  * a simple FIFO arrangement.  Realtime and Idle priority processes have
174  * and explicit 0-31 priority which maps directly onto their class queue
175  * index.  When a queue has something in it, the corresponding bit is
176  * set in the queuebits variable, allowing a single read to determine
177  * the state of all 32 queues and then a ffs() to find the first busy
178  * queue.
179  */
180                                         /* currently running a user process */
181 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
182 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
183 static volatile int dfly_scancpu;
184 static volatile int dfly_ucount;        /* total running on whole system */
185 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
186 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
187 static struct sysctl_oid *usched_dfly_sysctl_tree;
188
189 /* Debug info exposed through debug.* sysctl */
190
191 static int usched_dfly_debug = -1;
192 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
193            &usched_dfly_debug, 0,
194            "Print debug information for this pid");
195
196 static int usched_dfly_pid_debug = -1;
197 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
198            &usched_dfly_pid_debug, 0,
199            "Print KTR debug information for this pid");
200
201 static int usched_dfly_chooser = 0;
202 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
203            &usched_dfly_chooser, 0,
204            "Print KTR debug information for this pid");
205
206 /*
207  * Tunning usched_dfly - configurable through kern.usched_dfly.
208  *
209  * weight1 - Tries to keep threads on their current cpu.  If you
210  *           make this value too large the scheduler will not be
211  *           able to load-balance large loads.
212  *
213  * weight2 - If non-zero, detects thread pairs undergoing synchronous
214  *           communications and tries to move them closer together.
215  *           Behavior is adjusted by bit 4 of features (0x10).
216  *
217  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
218  *           a small value is recommended.
219  *
220  * weight3 - Weighting based on the number of recently runnable threads
221  *           on the userland scheduling queue (ignoring their loads).
222  *           A nominal value here prevents high-priority (low-load)
223  *           threads from accumulating on one cpu core when other
224  *           cores are available.
225  *
226  *           This value should be left fairly small relative to weight1
227  *           and weight4.
228  *
229  * weight4 - Weighting based on other cpu queues being available
230  *           or running processes with higher lwp_priority's.
231  *
232  *           This allows a thread to migrate to another nearby cpu if it
233  *           is unable to run on the current cpu based on the other cpu
234  *           being idle or running a lower priority (higher lwp_priority)
235  *           thread.  This value should be large enough to override weight1
236  *
237  * features - These flags can be set or cleared to enable or disable various
238  *            features.
239  *
240  *            0x01      Enable idle-cpu pulling                 (default)
241  *            0x02      Enable proactive pushing                (default)
242  *            0x04      Enable rebalancing rover                (default)
243  *            0x08      Enable more proactive pushing           (default)
244  *            0x10      (flip weight2 limit on same cpu)        (default)
245  *            0x20      choose best cpu for forked process
246  *            0x40      choose current cpu for forked process
247  *            0x80      choose random cpu for forked process    (default)
248  */
249 static int usched_dfly_smt = 0;
250 static int usched_dfly_cache_coherent = 0;
251 static int usched_dfly_weight1 = 200;   /* keep thread on current cpu */
252 static int usched_dfly_weight2 = 180;   /* synchronous peer's current cpu */
253 static int usched_dfly_weight3 = 40;    /* number of threads on queue */
254 static int usched_dfly_weight4 = 160;   /* availability of idle cores */
255 static int usched_dfly_features = 0x8F; /* allow pulls */
256 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
257 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
258 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
259 static int usched_dfly_decay = 8;
260
261 /* KTR debug printings */
262
263 KTR_INFO_MASTER(usched);
264
265 #if !defined(KTR_USCHED_DFLY)
266 #define KTR_USCHED_DFLY KTR_ALL
267 #endif
268
269 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
270     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
271     pid_t pid, int old_cpuid, int curr);
272
273 /*
274  * This function is called when the kernel intends to return to userland.
275  * It is responsible for making the thread the current designated userland
276  * thread for this cpu, blocking if necessary.
277  *
278  * The kernel will not depress our LWKT priority until after we return,
279  * in case we have to shove over to another cpu.
280  *
281  * We must determine our thread's disposition before we switch away.  This
282  * is very sensitive code.
283  *
284  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
285  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
286  * occur, this function is called only under very controlled circumstances.
287  */
288 static void
289 dfly_acquire_curproc(struct lwp *lp)
290 {
291         globaldata_t gd;
292         dfly_pcpu_t dd;
293         dfly_pcpu_t rdd;
294         thread_t td;
295         int force_resched;
296
297         /*
298          * Make sure we aren't sitting on a tsleep queue.
299          */
300         td = lp->lwp_thread;
301         crit_enter_quick(td);
302         if (td->td_flags & TDF_TSLEEPQ)
303                 tsleep_remove(td);
304         dfly_recalculate_estcpu(lp);
305
306         gd = mycpu;
307         dd = &dfly_pcpu[gd->gd_cpuid];
308
309         /*
310          * Process any pending interrupts/ipi's, then handle reschedule
311          * requests.  dfly_release_curproc() will try to assign a new
312          * uschedcp that isn't us and otherwise NULL it out.
313          */
314         force_resched = 0;
315         if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
316             lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
317                 force_resched = 1;
318         }
319
320         if (user_resched_wanted()) {
321                 if (dd->uschedcp == lp)
322                         force_resched = 1;
323                 clear_user_resched();
324                 dfly_release_curproc(lp);
325         }
326
327         /*
328          * Loop until we are the current user thread.
329          *
330          * NOTE: dd spinlock not held at top of loop.
331          */
332         if (dd->uschedcp == lp)
333                 lwkt_yield_quick();
334
335         while (dd->uschedcp != lp) {
336                 lwkt_yield_quick();
337
338                 spin_lock(&dd->spin);
339
340                 if (force_resched &&
341                    (usched_dfly_features & 0x08) &&
342                    (rdd = dfly_choose_best_queue(lp)) != dd) {
343                         /*
344                          * We are not or are no longer the current lwp and a
345                          * forced reschedule was requested.  Figure out the
346                          * best cpu to run on (our current cpu will be given
347                          * significant weight).
348                          *
349                          * (if a reschedule was not requested we want to
350                          *  move this step after the uschedcp tests).
351                          */
352                         dfly_changeqcpu_locked(lp, dd, rdd);
353                         spin_unlock(&dd->spin);
354                         lwkt_deschedule(lp->lwp_thread);
355                         dfly_setrunqueue_dd(rdd, lp);
356                         lwkt_switch();
357                         gd = mycpu;
358                         dd = &dfly_pcpu[gd->gd_cpuid];
359                         continue;
360                 }
361
362                 /*
363                  * Either no reschedule was requested or the best queue was
364                  * dd, and no current process has been selected.  We can
365                  * trivially become the current lwp on the current cpu.
366                  */
367                 if (dd->uschedcp == NULL) {
368                         atomic_clear_int(&lp->lwp_thread->td_mpflags,
369                                          TDF_MP_DIDYIELD);
370                         ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, gd->gd_cpuid);
371                         dd->uschedcp = lp;
372                         dd->upri = lp->lwp_priority;
373                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
374                         spin_unlock(&dd->spin);
375                         break;
376                 }
377
378                 /*
379                  * Put us back on the same run queue unconditionally.
380                  *
381                  * Set rrinterval to force placement at end of queue.
382                  * Select the worst queue to ensure we round-robin,
383                  * but do not change estcpu.
384                  */
385                 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
386                         u_int32_t tsqbits;
387
388                         switch(lp->lwp_rqtype) {
389                         case RTP_PRIO_NORMAL:
390                                 tsqbits = dd->queuebits;
391                                 spin_unlock(&dd->spin);
392
393                                 lp->lwp_rrcount = usched_dfly_rrinterval;
394                                 if (tsqbits)
395                                         lp->lwp_rqindex = bsrl(tsqbits);
396                                 break;
397                         default:
398                                 spin_unlock(&dd->spin);
399                                 break;
400                         }
401                         lwkt_deschedule(lp->lwp_thread);
402                         dfly_setrunqueue_dd(dd, lp);
403                         atomic_clear_int(&lp->lwp_thread->td_mpflags,
404                                          TDF_MP_DIDYIELD);
405                         lwkt_switch();
406                         gd = mycpu;
407                         dd = &dfly_pcpu[gd->gd_cpuid];
408                         continue;
409                 }
410
411                 /*
412                  * Can we steal the current designated user thread?
413                  *
414                  * If we do the other thread will stall when it tries to
415                  * return to userland, possibly rescheduling elsewhere.
416                  *
417                  * It is important to do a masked test to avoid the edge
418                  * case where two near-equal-priority threads are constantly
419                  * interrupting each other.
420                  *
421                  * In the exact match case another thread has already gained
422                  * uschedcp and lowered its priority, if we steal it the
423                  * other thread will stay stuck on the LWKT runq and not
424                  * push to another cpu.  So don't steal on equal-priority even
425                  * though it might appear to be more beneficial due to not
426                  * having to switch back to the other thread's context.
427                  *
428                  * usched_dfly_fast_resched requires that two threads be
429                  * significantly far apart in priority in order to interrupt.
430                  *
431                  * If better but not sufficiently far apart, the current
432                  * uschedcp will be interrupted at the next scheduler clock.
433                  */
434                 if (dd->uschedcp &&
435                    (dd->upri & ~PPQMASK) >
436                    (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
437                         dd->uschedcp = lp;
438                         dd->upri = lp->lwp_priority;
439                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
440                         spin_unlock(&dd->spin);
441                         break;
442                 }
443                 /*
444                  * We are not the current lwp, figure out the best cpu
445                  * to run on (our current cpu will be given significant
446                  * weight).  Loop on cpu change.
447                  */
448                 if ((usched_dfly_features & 0x02) &&
449                     force_resched == 0 &&
450                     (rdd = dfly_choose_best_queue(lp)) != dd) {
451                         dfly_changeqcpu_locked(lp, dd, rdd);
452                         spin_unlock(&dd->spin);
453                         lwkt_deschedule(lp->lwp_thread);
454                         dfly_setrunqueue_dd(rdd, lp);
455                         lwkt_switch();
456                         gd = mycpu;
457                         dd = &dfly_pcpu[gd->gd_cpuid];
458                         continue;
459                 }
460
461                 /*
462                  * We cannot become the current lwp, place the lp on the
463                  * run-queue of this or another cpu and deschedule ourselves.
464                  *
465                  * When we are reactivated we will have another chance.
466                  *
467                  * Reload after a switch or setrunqueue/switch possibly
468                  * moved us to another cpu.
469                  */
470                 spin_unlock(&dd->spin);
471                 lwkt_deschedule(lp->lwp_thread);
472                 dfly_setrunqueue_dd(dd, lp);
473                 lwkt_switch();
474                 gd = mycpu;
475                 dd = &dfly_pcpu[gd->gd_cpuid];
476         }
477
478         /*
479          * Make sure upri is synchronized, then yield to LWKT threads as
480          * needed before returning.  This could result in another reschedule.
481          * XXX
482          */
483         crit_exit_quick(td);
484
485         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
486 }
487
488 /*
489  * DFLY_RELEASE_CURPROC
490  *
491  * This routine detaches the current thread from the userland scheduler,
492  * usually because the thread needs to run or block in the kernel (at
493  * kernel priority) for a while.
494  *
495  * This routine is also responsible for selecting a new thread to
496  * make the current thread.
497  *
498  * NOTE: This implementation differs from the dummy example in that
499  * dfly_select_curproc() is able to select the current process, whereas
500  * dummy_select_curproc() is not able to select the current process.
501  * This means we have to NULL out uschedcp.
502  *
503  * Additionally, note that we may already be on a run queue if releasing
504  * via the lwkt_switch() in dfly_setrunqueue().
505  */
506 static void
507 dfly_release_curproc(struct lwp *lp)
508 {
509         globaldata_t gd = mycpu;
510         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
511
512         /*
513          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
514          * by wakeup().
515          */
516         if (dd->uschedcp == lp) {
517                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
518                 spin_lock(&dd->spin);
519                 if (dd->uschedcp == lp) {
520                         dd->uschedcp = NULL;    /* don't let lp be selected */
521                         dd->upri = PRIBASE_NULL;
522                         ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, gd->gd_cpuid);
523                         spin_unlock(&dd->spin);
524                         dfly_select_curproc(gd);
525                 } else {
526                         spin_unlock(&dd->spin);
527                 }
528         }
529 }
530
531 /*
532  * DFLY_SELECT_CURPROC
533  *
534  * Select a new current process for this cpu and clear any pending user
535  * reschedule request.  The cpu currently has no current process.
536  *
537  * This routine is also responsible for equal-priority round-robining,
538  * typically triggered from dfly_schedulerclock().  In our dummy example
539  * all the 'user' threads are LWKT scheduled all at once and we just
540  * call lwkt_switch().
541  *
542  * The calling process is not on the queue and cannot be selected.
543  */
544 static
545 void
546 dfly_select_curproc(globaldata_t gd)
547 {
548         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
549         struct lwp *nlp;
550         int cpuid = gd->gd_cpuid;
551
552         crit_enter_gd(gd);
553
554         spin_lock(&dd->spin);
555         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
556
557         if (nlp) {
558                 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
559                 dd->upri = nlp->lwp_priority;
560                 dd->uschedcp = nlp;
561 #if 0
562                 dd->rrcount = 0;                /* reset round robin */
563 #endif
564                 spin_unlock(&dd->spin);
565                 lwkt_acquire(nlp->lwp_thread);
566                 lwkt_schedule(nlp->lwp_thread);
567         } else {
568                 spin_unlock(&dd->spin);
569         }
570         crit_exit_gd(gd);
571 }
572
573 /*
574  * Place the specified lwp on the user scheduler's run queue.  This routine
575  * must be called with the thread descheduled.  The lwp must be runnable.
576  * It must not be possible for anyone else to explicitly schedule this thread.
577  *
578  * The thread may be the current thread as a special case.
579  */
580 static void
581 dfly_setrunqueue(struct lwp *lp)
582 {
583         dfly_pcpu_t dd;
584         dfly_pcpu_t rdd;
585
586         /*
587          * First validate the process LWKT state.
588          */
589         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
590         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
591             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
592              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
593         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
594
595         /*
596          * NOTE: dd/rdd do not necessarily represent the current cpu.
597          *       Instead they may represent the cpu the thread was last
598          *       scheduled on or inherited by its parent.
599          */
600         dd = &dfly_pcpu[lp->lwp_qcpu];
601         rdd = dd;
602
603         /*
604          * This process is not supposed to be scheduled anywhere or assigned
605          * as the current process anywhere.  Assert the condition.
606          */
607         KKASSERT(rdd->uschedcp != lp);
608
609         /*
610          * Ok, we have to setrunqueue some target cpu and request a reschedule
611          * if necessary.
612          *
613          * We have to choose the best target cpu.  It might not be the current
614          * target even if the current cpu has no running user thread (for
615          * example, because the current cpu might be a hyperthread and its
616          * sibling has a thread assigned).
617          *
618          * If we just forked it is most optimal to run the child on the same
619          * cpu just in case the parent decides to wait for it (thus getting
620          * off that cpu).  As long as there is nothing else runnable on the
621          * cpu, that is.  If we did this unconditionally a parent forking
622          * multiple children before waiting (e.g. make -j N) leaves other
623          * cpus idle that could be working.
624          */
625         if (lp->lwp_forked) {
626                 lp->lwp_forked = 0;
627                 if (usched_dfly_features & 0x20)
628                         rdd = dfly_choose_best_queue(lp);
629                 else if (usched_dfly_features & 0x40)
630                         rdd = &dfly_pcpu[lp->lwp_qcpu];
631                 else if (usched_dfly_features & 0x80)
632                         rdd = dfly_choose_queue_simple(rdd, lp);
633                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
634                         rdd = dfly_choose_best_queue(lp);
635                 else
636                         rdd = &dfly_pcpu[lp->lwp_qcpu];
637         } else {
638                 rdd = dfly_choose_best_queue(lp);
639                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
640         }
641         if (lp->lwp_qcpu != rdd->cpuid) {
642                 spin_lock(&dd->spin);
643                 dfly_changeqcpu_locked(lp, dd, rdd);
644                 spin_unlock(&dd->spin);
645         }
646         dfly_setrunqueue_dd(rdd, lp);
647 }
648
649 /*
650  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
651  * spin-locked on-call.  rdd does not have to be.
652  */
653 static void
654 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
655 {
656         if (lp->lwp_qcpu != rdd->cpuid) {
657                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
658                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
659                         atomic_add_int(&dd->uload, -lp->lwp_uload);
660                         atomic_add_int(&dd->ucount, -1);
661                         atomic_add_int(&dfly_ucount, -1);
662                 }
663                 lp->lwp_qcpu = rdd->cpuid;
664         }
665 }
666
667 /*
668  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
669  * also performs all necessary ancillary notification actions.
670  */
671 static void
672 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
673 {
674         globaldata_t rgd;
675
676         /*
677          * We might be moving the lp to another cpu's run queue, and once
678          * on the runqueue (even if it is our cpu's), another cpu can rip
679          * it away from us.
680          *
681          * TDF_MIGRATING might already be set if this is part of a
682          * remrunqueue+setrunqueue sequence.
683          */
684         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
685                 lwkt_giveaway(lp->lwp_thread);
686
687         rgd = globaldata_find(rdd->cpuid);
688
689         /*
690          * We lose control of the lp the moment we release the spinlock
691          * after having placed it on the queue.  i.e. another cpu could pick
692          * it up, or it could exit, or its priority could be further
693          * adjusted, or something like that.
694          *
695          * WARNING! rdd can point to a foreign cpu!
696          */
697         spin_lock(&rdd->spin);
698         dfly_setrunqueue_locked(rdd, lp);
699
700         /*
701          * Potentially interrupt the currently-running thread
702          */
703         if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
704                 /*
705                  * Currently running thread is better or same, do not
706                  * interrupt.
707                  */
708                 spin_unlock(&rdd->spin);
709         } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
710                    usched_dfly_fast_resched) {
711                 /*
712                  * Currently running thread is not better, but not so bad
713                  * that we need to interrupt it.  Let it run for one more
714                  * scheduler tick.
715                  */
716                 if (rdd->uschedcp &&
717                     rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
718                         rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
719                 }
720                 spin_unlock(&rdd->spin);
721         } else if (rgd == mycpu) {
722                 /*
723                  * We should interrupt the currently running thread, which
724                  * is on the current cpu.  However, if DIDYIELD is set we
725                  * round-robin unconditionally and do not interrupt it.
726                  */
727                 spin_unlock(&rdd->spin);
728                 if (rdd->uschedcp == NULL)
729                         wakeup_mycpu(&rdd->helper_thread); /* XXX */
730                 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
731                         need_user_resched();
732         } else {
733                 /*
734                  * We should interrupt the currently running thread, which
735                  * is on a different cpu.
736                  */
737                 spin_unlock(&rdd->spin);
738                 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
739         }
740 }
741
742 /*
743  * This routine is called from a systimer IPI.  It MUST be MP-safe and
744  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
745  * each cpu.
746  */
747 static
748 void
749 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
750 {
751         globaldata_t gd = mycpu;
752         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
753
754         /*
755          * Spinlocks also hold a critical section so there should not be
756          * any active.
757          */
758         KKASSERT(gd->gd_spinlocks == 0);
759
760         if (lp == NULL)
761                 return;
762
763         /*
764          * Do we need to round-robin?  We round-robin 10 times a second.
765          * This should only occur for cpu-bound batch processes.
766          */
767         if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
768                 lp->lwp_thread->td_wakefromcpu = -1;
769                 need_user_resched();
770         }
771
772         /*
773          * Adjust estcpu upward using a real time equivalent calculation,
774          * and recalculate lp's priority.
775          */
776         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
777         dfly_resetpriority(lp);
778
779         /*
780          * Rebalance two cpus every 8 ticks, pulling the worst thread
781          * from the worst cpu's queue into a rotating cpu number.
782          *
783          * This mechanic is needed because the push algorithms can
784          * steady-state in an non-optimal configuration.  We need to mix it
785          * up a little, even if it means breaking up a paired thread, so
786          * the push algorithms can rebalance the degenerate conditions.
787          * This portion of the algorithm exists to ensure stability at the
788          * selected weightings.
789          *
790          * Because we might be breaking up optimal conditions we do not want
791          * to execute this too quickly, hence we only rebalance approximately
792          * ~7-8 times per second.  The push's, on the otherhand, are capable
793          * moving threads to other cpus at a much higher rate.
794          *
795          * We choose the most heavily loaded thread from the worst queue
796          * in order to ensure that multiple heavy-weight threads on the same
797          * queue get broken up, and also because these threads are the most
798          * likely to be able to remain in place.  Hopefully then any pairings,
799          * if applicable, migrate to where these threads are.
800          */
801         if ((usched_dfly_features & 0x04) &&
802             ((u_int)sched_ticks & 7) == 0 &&
803             (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
804                 /*
805                  * Our cpu is up.
806                  */
807                 struct lwp *nlp;
808                 dfly_pcpu_t rdd;
809
810                 rdd = dfly_choose_worst_queue(dd);
811                 if (rdd) {
812                         spin_lock(&dd->spin);
813                         if (spin_trylock(&rdd->spin)) {
814                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
815                                 spin_unlock(&rdd->spin);
816                                 if (nlp == NULL)
817                                         spin_unlock(&dd->spin);
818                         } else {
819                                 spin_unlock(&dd->spin);
820                                 nlp = NULL;
821                         }
822                 } else {
823                         nlp = NULL;
824                 }
825                 /* dd->spin held if nlp != NULL */
826
827                 /*
828                  * Either schedule it or add it to our queue.
829                  */
830                 if (nlp &&
831                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
832                         ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, dd->cpumask);
833                         dd->upri = nlp->lwp_priority;
834                         dd->uschedcp = nlp;
835 #if 0
836                         dd->rrcount = 0;        /* reset round robin */
837 #endif
838                         spin_unlock(&dd->spin);
839                         lwkt_acquire(nlp->lwp_thread);
840                         lwkt_schedule(nlp->lwp_thread);
841                 } else if (nlp) {
842                         dfly_setrunqueue_locked(dd, nlp);
843                         spin_unlock(&dd->spin);
844                 }
845         }
846 }
847
848 /*
849  * Called from acquire and from kern_synch's one-second timer (one of the
850  * callout helper threads) with a critical section held.
851  *
852  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
853  * overall system load.
854  *
855  * Note that no recalculation occurs for a process which sleeps and wakes
856  * up in the same tick.  That is, a system doing thousands of context
857  * switches per second will still only do serious estcpu calculations
858  * ESTCPUFREQ times per second.
859  */
860 static
861 void
862 dfly_recalculate_estcpu(struct lwp *lp)
863 {
864         globaldata_t gd = mycpu;
865         sysclock_t cpbase;
866         sysclock_t ttlticks;
867         int estcpu;
868         int decay_factor;
869         int ucount;
870
871         /*
872          * We have to subtract periodic to get the last schedclock
873          * timeout time, otherwise we would get the upcoming timeout.
874          * Keep in mind that a process can migrate between cpus and
875          * while the scheduler clock should be very close, boundary
876          * conditions could lead to a small negative delta.
877          */
878         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
879
880         if (lp->lwp_slptime > 1) {
881                 /*
882                  * Too much time has passed, do a coarse correction.
883                  */
884                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
885                 dfly_resetpriority(lp);
886                 lp->lwp_cpbase = cpbase;
887                 lp->lwp_cpticks = 0;
888                 lp->lwp_estfast = 0;
889         } else if (lp->lwp_cpbase != cpbase) {
890                 /*
891                  * Adjust estcpu if we are in a different tick.  Don't waste
892                  * time if we are in the same tick.
893                  *
894                  * First calculate the number of ticks in the measurement
895                  * interval.  The ttlticks calculation can wind up 0 due to
896                  * a bug in the handling of lwp_slptime  (as yet not found),
897                  * so make sure we do not get a divide by 0 panic.
898                  */
899                 ttlticks = (cpbase - lp->lwp_cpbase) /
900                            gd->gd_schedclock.periodic;
901                 if ((ssysclock_t)ttlticks < 0) {
902                         ttlticks = 0;
903                         lp->lwp_cpbase = cpbase;
904                 }
905                 if (ttlticks == 0)
906                         return;
907                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
908
909                 /*
910                  * Calculate the percentage of one cpu being used then
911                  * compensate for any system load in excess of ncpus.
912                  *
913                  * For example, if we have 8 cores and 16 running cpu-bound
914                  * processes then all things being equal each process will
915                  * get 50% of one cpu.  We need to pump this value back
916                  * up to 100% so the estcpu calculation properly adjusts
917                  * the process's dynamic priority.
918                  *
919                  * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
920                  */
921                 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
922                 ucount = dfly_ucount;
923                 if (ucount > ncpus) {
924                         estcpu += estcpu * (ucount - ncpus) / ncpus;
925                 }
926
927                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
928                         kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
929                                 lp->lwp_proc->p_pid, lp,
930                                 estcpu, lp->lwp_estcpu,
931                                 lp->lwp_cpticks, ttlticks);
932                 }
933
934                 /*
935                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
936                  * quickly lwp_estcpu collapses to its realtime calculation.
937                  * A slower collapse gives us a more accurate number over
938                  * the long term but can create problems with bursty threads
939                  * or threads which become cpu hogs.
940                  *
941                  * To solve this problem, newly started lwps and lwps which
942                  * are restarting after having been asleep for a while are
943                  * given a much, much faster decay in order to quickly
944                  * detect whether they become cpu-bound.
945                  *
946                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
947                  *       and not here, but we must still ensure that a
948                  *       cpu-bound nice -20 process does not completely
949                  *       override a cpu-bound nice +20 process.
950                  *
951                  * NOTE: We must use ESTCPULIM() here to deal with any
952                  *       overshoot.
953                  */
954                 decay_factor = usched_dfly_decay;
955                 if (decay_factor < 1)
956                         decay_factor = 1;
957                 if (decay_factor > 1024)
958                         decay_factor = 1024;
959
960                 if (lp->lwp_estfast < usched_dfly_decay) {
961                         ++lp->lwp_estfast;
962                         lp->lwp_estcpu = ESTCPULIM(
963                                 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
964                                 (lp->lwp_estfast + 1));
965                 } else {
966                         lp->lwp_estcpu = ESTCPULIM(
967                                 (lp->lwp_estcpu * decay_factor + estcpu) /
968                                 (decay_factor + 1));
969                 }
970
971                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
972                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
973                 dfly_resetpriority(lp);
974                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
975                 lp->lwp_cpticks = 0;
976         }
977 }
978
979 /*
980  * Compute the priority of a process when running in user mode.
981  * Arrange to reschedule if the resulting priority is better
982  * than that of the current process.
983  *
984  * This routine may be called with any process.
985  *
986  * This routine is called by fork1() for initial setup with the process of
987  * the run queue, and also may be called normally with the process on or
988  * off the run queue.
989  */
990 static void
991 dfly_resetpriority(struct lwp *lp)
992 {
993         dfly_pcpu_t rdd;
994         int newpriority;
995         u_short newrqtype;
996         int rcpu;
997         int checkpri;
998         int estcpu;
999         int delta_uload;
1000
1001         crit_enter();
1002
1003         /*
1004          * Lock the scheduler (lp) belongs to.  This can be on a different
1005          * cpu.  Handle races.  This loop breaks out with the appropriate
1006          * rdd locked.
1007          */
1008         for (;;) {
1009                 rcpu = lp->lwp_qcpu;
1010                 cpu_ccfence();
1011                 rdd = &dfly_pcpu[rcpu];
1012                 spin_lock(&rdd->spin);
1013                 if (rcpu == lp->lwp_qcpu)
1014                         break;
1015                 spin_unlock(&rdd->spin);
1016         }
1017
1018         /*
1019          * Calculate the new priority and queue type
1020          */
1021         newrqtype = lp->lwp_rtprio.type;
1022
1023         switch(newrqtype) {
1024         case RTP_PRIO_REALTIME:
1025         case RTP_PRIO_FIFO:
1026                 newpriority = PRIBASE_REALTIME +
1027                              (lp->lwp_rtprio.prio & PRIMASK);
1028                 break;
1029         case RTP_PRIO_NORMAL:
1030                 /*
1031                  *
1032                  */
1033                 estcpu = lp->lwp_estcpu;
1034
1035                 /*
1036                  * p_nice piece         Adds (0-40) * 2         0-80
1037                  * estcpu               Adds 16384  * 4 / 512   0-128
1038                  */
1039                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1040                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1041                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1042                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1043                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1044                 break;
1045         case RTP_PRIO_IDLE:
1046                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1047                 break;
1048         case RTP_PRIO_THREAD:
1049                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1050                 break;
1051         default:
1052                 panic("Bad RTP_PRIO %d", newrqtype);
1053                 /* NOT REACHED */
1054         }
1055
1056         /*
1057          * The LWKT scheduler doesn't dive usched structures, give it a hint
1058          * on the relative priority of user threads running in the kernel.
1059          * The LWKT scheduler will always ensure that a user thread running
1060          * in the kernel will get cpu some time, regardless of its upri,
1061          * but can decide not to instantly switch from one kernel or user
1062          * mode user thread to a kernel-mode user thread when it has a less
1063          * desireable user priority.
1064          *
1065          * td_upri has normal sense (higher values are more desireable), so
1066          * negate it.
1067          */
1068         lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1069
1070         /*
1071          * The newpriority incorporates the queue type so do a simple masked
1072          * check to determine if the process has moved to another queue.  If
1073          * it has, and it is currently on a run queue, then move it.
1074          *
1075          * Since uload is ~PPQMASK masked, no modifications are necessary if
1076          * we end up in the same run queue.
1077          *
1078          * Reset rrcount if moving to a higher-priority queue, otherwise
1079          * retain rrcount.
1080          */
1081         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1082                 if (lp->lwp_priority < newpriority)
1083                         lp->lwp_rrcount = 0;
1084                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1085                         dfly_remrunqueue_locked(rdd, lp);
1086                         lp->lwp_priority = newpriority;
1087                         lp->lwp_rqtype = newrqtype;
1088                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1089                         dfly_setrunqueue_locked(rdd, lp);
1090                         checkpri = 1;
1091                 } else {
1092                         lp->lwp_priority = newpriority;
1093                         lp->lwp_rqtype = newrqtype;
1094                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1095                         checkpri = 0;
1096                 }
1097         } else {
1098                 /*
1099                  * In the same PPQ, uload cannot change.
1100                  */
1101                 lp->lwp_priority = newpriority;
1102                 checkpri = 1;
1103                 rcpu = -1;
1104         }
1105
1106         /*
1107          * Adjust effective load.
1108          *
1109          * Calculate load then scale up or down geometrically based on p_nice.
1110          * Processes niced up (positive) are less important, and processes
1111          * niced downard (negative) are more important.  The higher the uload,
1112          * the more important the thread.
1113          */
1114         /* 0-511, 0-100% cpu */
1115         delta_uload = lp->lwp_estcpu / NQS;
1116         delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1117
1118
1119         delta_uload -= lp->lwp_uload;
1120         lp->lwp_uload += delta_uload;
1121         if (lp->lwp_mpflags & LWP_MP_ULOAD)
1122                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1123
1124         /*
1125          * Determine if we need to reschedule the target cpu.  This only
1126          * occurs if the LWP is already on a scheduler queue, which means
1127          * that idle cpu notification has already occured.  At most we
1128          * need only issue a need_user_resched() on the appropriate cpu.
1129          *
1130          * The LWP may be owned by a CPU different from the current one,
1131          * in which case dd->uschedcp may be modified without an MP lock
1132          * or a spinlock held.  The worst that happens is that the code
1133          * below causes a spurious need_user_resched() on the target CPU
1134          * and dd->pri to be wrong for a short period of time, both of
1135          * which are harmless.
1136          *
1137          * If checkpri is 0 we are adjusting the priority of the current
1138          * process, possibly higher (less desireable), so ignore the upri
1139          * check which will fail in that case.
1140          */
1141         if (rcpu >= 0) {
1142                 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1143                     (checkpri == 0 ||
1144                      (rdd->upri & ~PRIMASK) >
1145                      (lp->lwp_priority & ~PRIMASK))) {
1146                         if (rcpu == mycpu->gd_cpuid) {
1147                                 spin_unlock(&rdd->spin);
1148                                 need_user_resched();
1149                         } else {
1150                                 spin_unlock(&rdd->spin);
1151                                 lwkt_send_ipiq(globaldata_find(rcpu),
1152                                                dfly_need_user_resched_remote,
1153                                                NULL);
1154                         }
1155                 } else {
1156                         spin_unlock(&rdd->spin);
1157                 }
1158         } else {
1159                 spin_unlock(&rdd->spin);
1160         }
1161         crit_exit();
1162 }
1163
1164 static
1165 void
1166 dfly_yield(struct lwp *lp)
1167 {
1168         if (lp->lwp_qcpu != mycpu->gd_cpuid)
1169                 return;
1170         KKASSERT(lp == curthread->td_lwp);
1171
1172         /*
1173          * Don't set need_user_resched() or mess with rrcount or anything.
1174          * the TDF flag will override everything as long as we release.
1175          */
1176         atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1177         dfly_release_curproc(lp);
1178 }
1179
1180 /*
1181  * Thread was forcefully migrated to another cpu.  Normally forced migrations
1182  * are used for iterations and the kernel returns to the original cpu before
1183  * returning and this is not needed.  However, if the kernel migrates a
1184  * thread to another cpu and wants to leave it there, it has to call this
1185  * scheduler helper.
1186  *
1187  * Note that the lwkt_migratecpu() function also released the thread, so
1188  * we don't have to worry about that.
1189  */
1190 static
1191 void
1192 dfly_changedcpu(struct lwp *lp)
1193 {
1194         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1195         dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1196
1197         if (dd != rdd) {
1198                 spin_lock(&dd->spin);
1199                 dfly_changeqcpu_locked(lp, dd, rdd);
1200                 spin_unlock(&dd->spin);
1201         }
1202 }
1203
1204 /*
1205  * Called from fork1() when a new child process is being created.
1206  *
1207  * Give the child process an initial estcpu that is more batch then
1208  * its parent and dock the parent for the fork (but do not
1209  * reschedule the parent).
1210  *
1211  * fast
1212  *
1213  * XXX lwp should be "spawning" instead of "forking"
1214  */
1215 static void
1216 dfly_forking(struct lwp *plp, struct lwp *lp)
1217 {
1218         /*
1219          * Put the child 4 queue slots (out of 32) higher than the parent
1220          * (less desireable than the parent).
1221          */
1222         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1223         lp->lwp_forked = 1;
1224         lp->lwp_estfast = 0;
1225
1226         /*
1227          * Dock the parent a cost for the fork, protecting us from fork
1228          * bombs.  If the parent is forking quickly make the child more
1229          * batchy.
1230          */
1231         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1232 }
1233
1234 /*
1235  * Called when a lwp is being removed from this scheduler, typically
1236  * during lwp_exit().  We have to clean out any ULOAD accounting before
1237  * we can let the lp go.  The dd->spin lock is not needed for uload
1238  * updates.
1239  *
1240  * Scheduler dequeueing has already occurred, no further action in that
1241  * regard is needed.
1242  */
1243 static void
1244 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1245 {
1246         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1247
1248         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1249                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1250                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1251                 atomic_add_int(&dd->ucount, -1);
1252                 atomic_add_int(&dfly_ucount, -1);
1253         }
1254 }
1255
1256 /*
1257  * This function cannot block in any way, but spinlocks are ok.
1258  *
1259  * Update the uload based on the state of the thread (whether it is going
1260  * to sleep or running again).  The uload is meant to be a longer-term
1261  * load and not an instantanious load.
1262  */
1263 static void
1264 dfly_uload_update(struct lwp *lp)
1265 {
1266         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1267
1268         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1269                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1270                         spin_lock(&dd->spin);
1271                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1272                                 atomic_set_int(&lp->lwp_mpflags,
1273                                                LWP_MP_ULOAD);
1274                                 atomic_add_int(&dd->uload, lp->lwp_uload);
1275                                 atomic_add_int(&dd->ucount, 1);
1276                                 atomic_add_int(&dfly_ucount, 1);
1277                         }
1278                         spin_unlock(&dd->spin);
1279                 }
1280         } else if (lp->lwp_slptime > 0) {
1281                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1282                         spin_lock(&dd->spin);
1283                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1284                                 atomic_clear_int(&lp->lwp_mpflags,
1285                                                  LWP_MP_ULOAD);
1286                                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1287                                 atomic_add_int(&dd->ucount, -1);
1288                                 atomic_add_int(&dfly_ucount, -1);
1289                         }
1290                         spin_unlock(&dd->spin);
1291                 }
1292         }
1293 }
1294
1295 /*
1296  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1297  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1298  * has a better or equal priority then the process that would otherwise be
1299  * chosen, NULL is returned.
1300  *
1301  * Until we fix the RUNQ code the chklp test has to be strict or we may
1302  * bounce between processes trying to acquire the current process designation.
1303  *
1304  * Must be called with rdd->spin locked.  The spinlock is left intact through
1305  * the entire routine.  dd->spin does not have to be locked.
1306  *
1307  * If worst is non-zero this function finds the worst thread instead of the
1308  * best thread (used by the schedulerclock-based rover).
1309  */
1310 static
1311 struct lwp *
1312 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1313                        struct lwp *chklp, int worst)
1314 {
1315         struct lwp *lp;
1316         struct rq *q;
1317         u_int32_t *which;
1318         u_int32_t pri;
1319         u_int32_t rtqbits;
1320         u_int32_t tsqbits;
1321         u_int32_t idqbits;
1322
1323         rtqbits = rdd->rtqueuebits;
1324         tsqbits = rdd->queuebits;
1325         idqbits = rdd->idqueuebits;
1326
1327         if (worst) {
1328                 if (idqbits) {
1329                         pri = bsrl(idqbits);
1330                         q = &rdd->idqueues[pri];
1331                         which = &rdd->idqueuebits;
1332                 } else if (tsqbits) {
1333                         pri = bsrl(tsqbits);
1334                         q = &rdd->queues[pri];
1335                         which = &rdd->queuebits;
1336                 } else if (rtqbits) {
1337                         pri = bsrl(rtqbits);
1338                         q = &rdd->rtqueues[pri];
1339                         which = &rdd->rtqueuebits;
1340                 } else {
1341                         return (NULL);
1342                 }
1343                 lp = TAILQ_LAST(q, rq);
1344         } else {
1345                 if (rtqbits) {
1346                         pri = bsfl(rtqbits);
1347                         q = &rdd->rtqueues[pri];
1348                         which = &rdd->rtqueuebits;
1349                 } else if (tsqbits) {
1350                         pri = bsfl(tsqbits);
1351                         q = &rdd->queues[pri];
1352                         which = &rdd->queuebits;
1353                 } else if (idqbits) {
1354                         pri = bsfl(idqbits);
1355                         q = &rdd->idqueues[pri];
1356                         which = &rdd->idqueuebits;
1357                 } else {
1358                         return (NULL);
1359                 }
1360                 lp = TAILQ_FIRST(q);
1361         }
1362         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1363
1364         /*
1365          * If the passed lwp <chklp> is reasonably close to the selected
1366          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1367          *
1368          * Note that we must error on the side of <chklp> to avoid bouncing
1369          * between threads in the acquire code.
1370          */
1371         if (chklp) {
1372                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1373                         return(NULL);
1374         }
1375
1376         KTR_COND_LOG(usched_chooseproc,
1377             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1378             lp->lwp_proc->p_pid,
1379             lp->lwp_thread->td_gd->gd_cpuid,
1380             mycpu->gd_cpuid);
1381
1382         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1383         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1384         TAILQ_REMOVE(q, lp, lwp_procq);
1385         --rdd->runqcount;
1386         if (TAILQ_EMPTY(q))
1387                 *which &= ~(1 << pri);
1388
1389         /*
1390          * If we are choosing a process from rdd with the intent to
1391          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1392          * is still held.
1393          */
1394         if (rdd != dd) {
1395                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1396                         atomic_add_int(&rdd->uload, -lp->lwp_uload);
1397                         atomic_add_int(&rdd->ucount, -1);
1398                         atomic_add_int(&dfly_ucount, -1);
1399                 }
1400                 lp->lwp_qcpu = dd->cpuid;
1401                 atomic_add_int(&dd->uload, lp->lwp_uload);
1402                 atomic_add_int(&dd->ucount, 1);
1403                 atomic_add_int(&dfly_ucount, 1);
1404                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1405         }
1406         return lp;
1407 }
1408
1409 /*
1410  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1411  *
1412  * Choose a cpu node to schedule lp on, hopefully nearby its current
1413  * node.
1414  *
1415  * We give the current node a modest advantage for obvious reasons.
1416  *
1417  * We also give the node the thread was woken up FROM a slight advantage
1418  * in order to try to schedule paired threads which synchronize/block waiting
1419  * for each other fairly close to each other.  Similarly in a network setting
1420  * this feature will also attempt to place a user process near the kernel
1421  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1422  * algorithm as it heuristically groups synchronizing processes for locality
1423  * of reference in multi-socket systems.
1424  *
1425  * We check against running processes and give a big advantage if there
1426  * are none running.
1427  *
1428  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1429  *
1430  * When the topology is known choose a cpu whos group has, in aggregate,
1431  * has the lowest weighted load.
1432  */
1433 static
1434 dfly_pcpu_t
1435 dfly_choose_best_queue(struct lwp *lp)
1436 {
1437         cpumask_t wakemask;
1438         cpumask_t mask;
1439         cpu_node_t *cpup;
1440         cpu_node_t *cpun;
1441         cpu_node_t *cpub;
1442         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1443         dfly_pcpu_t rdd;
1444         int wakecpu;
1445         int cpuid;
1446         int n;
1447         int count;
1448         int load;
1449         int lowest_load;
1450
1451         /*
1452          * When the topology is unknown choose a random cpu that is hopefully
1453          * idle.
1454          */
1455         if (dd->cpunode == NULL)
1456                 return (dfly_choose_queue_simple(dd, lp));
1457
1458         /*
1459          * Pairing mask
1460          */
1461         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1462                 wakemask = dfly_pcpu[wakecpu].cpumask;
1463         else
1464                 CPUMASK_ASSZERO(wakemask);
1465
1466         /*
1467          * When the topology is known choose a cpu whos group has, in
1468          * aggregate, has the lowest weighted load.
1469          */
1470         cpup = root_cpu_node;
1471         rdd = dd;
1472
1473         while (cpup) {
1474                 /*
1475                  * Degenerate case super-root
1476                  */
1477                 if (cpup->child_no == 1) {
1478                         cpup = cpup->child_node[0];
1479                         continue;
1480                 }
1481
1482                 /*
1483                  * Terminal cpunode
1484                  */
1485                 if (cpup->child_no == 0) {
1486                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1487                         break;
1488                 }
1489
1490                 cpub = NULL;
1491                 lowest_load = 0x7FFFFFFF;
1492
1493                 for (n = 0; n < cpup->child_no; ++n) {
1494                         /*
1495                          * Accumulate load information for all cpus
1496                          * which are members of this node.
1497                          */
1498                         cpun = cpup->child_node[n];
1499                         mask = cpun->members;
1500                         CPUMASK_ANDMASK(mask, usched_global_cpumask);
1501                         CPUMASK_ANDMASK(mask, smp_active_mask);
1502                         CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1503                         if (CPUMASK_TESTZERO(mask))
1504                                 continue;
1505
1506                         count = 0;
1507                         load = 0;
1508
1509                         while (CPUMASK_TESTNZERO(mask)) {
1510                                 cpuid = BSFCPUMASK(mask);
1511                                 rdd = &dfly_pcpu[cpuid];
1512                                 load += rdd->uload;
1513                                 load += rdd->ucount * usched_dfly_weight3;
1514
1515                                 if (rdd->uschedcp == NULL &&
1516                                     rdd->runqcount == 0 &&
1517                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1518                                 ) {
1519                                         load -= usched_dfly_weight4;
1520                                 }
1521 #if 0
1522                                 else if (rdd->upri > lp->lwp_priority + PPQ) {
1523                                         load -= usched_dfly_weight4 / 2;
1524                                 }
1525 #endif
1526                                 CPUMASK_NANDBIT(mask, cpuid);
1527                                 ++count;
1528                         }
1529
1530                         /*
1531                          * Compensate if the lp is already accounted for in
1532                          * the aggregate uload for this mask set.  We want
1533                          * to calculate the loads as if lp were not present,
1534                          * otherwise the calculation is bogus.
1535                          */
1536                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1537                             CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1538                                 load -= lp->lwp_uload;
1539                                 load -= usched_dfly_weight3;
1540                         }
1541
1542                         load /= count;
1543
1544                         /*
1545                          * Advantage the cpu group (lp) is already on.
1546                          */
1547                         if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1548                                 load -= usched_dfly_weight1;
1549
1550                         /*
1551                          * Advantage the cpu group we want to pair (lp) to,
1552                          * but don't let it go to the exact same cpu as
1553                          * the wakecpu target.
1554                          *
1555                          * We do this by checking whether cpun is a
1556                          * terminal node or not.  All cpun's at the same
1557                          * level will either all be terminal or all not
1558                          * terminal.
1559                          *
1560                          * If it is and we match we disadvantage the load.
1561                          * If it is and we don't match we advantage the load.
1562                          *
1563                          * Also note that we are effectively disadvantaging
1564                          * all-but-one by the same amount, so it won't effect
1565                          * the weight1 factor for the all-but-one nodes.
1566                          */
1567                         if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1568                                 if (cpun->child_no != 0) {
1569                                         /* advantage */
1570                                         load -= usched_dfly_weight2;
1571                                 } else {
1572                                         if (usched_dfly_features & 0x10)
1573                                                 load += usched_dfly_weight2;
1574                                         else
1575                                                 load -= usched_dfly_weight2;
1576                                 }
1577                         }
1578
1579                         /*
1580                          * Calculate the best load
1581                          */
1582                         if (cpub == NULL || lowest_load > load ||
1583                             (lowest_load == load &&
1584                              CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1585                         ) {
1586                                 lowest_load = load;
1587                                 cpub = cpun;
1588                         }
1589                 }
1590                 cpup = cpub;
1591         }
1592         if (usched_dfly_chooser)
1593                 kprintf("lp %02d->%02d %s\n",
1594                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1595         return (rdd);
1596 }
1597
1598 /*
1599  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1600  *
1601  * Choose the worst queue close to dd's cpu node with a non-empty runq
1602  * that is NOT dd.  Also require that the moving of the highest-load thread
1603  * from rdd to dd does not cause the uload's to cross each other.
1604  *
1605  * This is used by the thread chooser when the current cpu's queues are
1606  * empty to steal a thread from another cpu's queue.  We want to offload
1607  * the most heavily-loaded queue.
1608  */
1609 static
1610 dfly_pcpu_t
1611 dfly_choose_worst_queue(dfly_pcpu_t dd)
1612 {
1613         cpumask_t mask;
1614         cpu_node_t *cpup;
1615         cpu_node_t *cpun;
1616         cpu_node_t *cpub;
1617         dfly_pcpu_t rdd;
1618         int cpuid;
1619         int n;
1620         int count;
1621         int load;
1622 #if 0
1623         int pri;
1624         int hpri;
1625 #endif
1626         int highest_load;
1627
1628         /*
1629          * When the topology is unknown choose a random cpu that is hopefully
1630          * idle.
1631          */
1632         if (dd->cpunode == NULL) {
1633                 return (NULL);
1634         }
1635
1636         /*
1637          * When the topology is known choose a cpu whos group has, in
1638          * aggregate, has the lowest weighted load.
1639          */
1640         cpup = root_cpu_node;
1641         rdd = dd;
1642         while (cpup) {
1643                 /*
1644                  * Degenerate case super-root
1645                  */
1646                 if (cpup->child_no == 1) {
1647                         cpup = cpup->child_node[0];
1648                         continue;
1649                 }
1650
1651                 /*
1652                  * Terminal cpunode
1653                  */
1654                 if (cpup->child_no == 0) {
1655                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1656                         break;
1657                 }
1658
1659                 cpub = NULL;
1660                 highest_load = 0;
1661
1662                 for (n = 0; n < cpup->child_no; ++n) {
1663                         /*
1664                          * Accumulate load information for all cpus
1665                          * which are members of this node.
1666                          */
1667                         cpun = cpup->child_node[n];
1668                         mask = cpun->members;
1669                         CPUMASK_ANDMASK(mask, usched_global_cpumask);
1670                         CPUMASK_ANDMASK(mask, smp_active_mask);
1671                         if (CPUMASK_TESTZERO(mask))
1672                                 continue;
1673                         count = 0;
1674                         load = 0;
1675
1676                         while (CPUMASK_TESTNZERO(mask)) {
1677                                 cpuid = BSFCPUMASK(mask);
1678                                 rdd = &dfly_pcpu[cpuid];
1679                                 load += rdd->uload;
1680                                 load += rdd->ucount * usched_dfly_weight3;
1681                                 if (rdd->uschedcp == NULL &&
1682                                     rdd->runqcount == 0 &&
1683                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1684                                 ) {
1685                                         load -= usched_dfly_weight4;
1686                                 }
1687 #if 0
1688                                 else if (rdd->upri > dd->upri + PPQ) {
1689                                         load -= usched_dfly_weight4 / 2;
1690                                 }
1691 #endif
1692                                 CPUMASK_NANDBIT(mask, cpuid);
1693                                 ++count;
1694                         }
1695                         load /= count;
1696
1697                         /*
1698                          * Prefer candidates which are somewhat closer to
1699                          * our cpu.
1700                          */
1701                         if (CPUMASK_TESTMASK(dd->cpumask, cpun->members))
1702                                 load += usched_dfly_weight1;
1703
1704                         /*
1705                          * The best candidate is the one with the worst
1706                          * (highest) load.
1707                          */
1708                         if (cpub == NULL || highest_load < load) {
1709                                 highest_load = load;
1710                                 cpub = cpun;
1711                         }
1712                 }
1713                 cpup = cpub;
1714         }
1715
1716         /*
1717          * We never return our own node (dd), and only return a remote
1718          * node if it's load is significantly worse than ours (i.e. where
1719          * stealing a thread would be considered reasonable).
1720          *
1721          * This also helps us avoid breaking paired threads apart which
1722          * can have disastrous effects on performance.
1723          */
1724         if (rdd == dd)
1725                 return(NULL);
1726
1727 #if 0
1728         hpri = 0;
1729         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1730                 hpri = pri;
1731         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1732                 hpri = pri;
1733         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1734                 hpri = pri;
1735         hpri *= PPQ;
1736         if (rdd->uload - hpri < dd->uload + hpri)
1737                 return(NULL);
1738 #endif
1739         return (rdd);
1740 }
1741
1742 static
1743 dfly_pcpu_t
1744 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1745 {
1746         dfly_pcpu_t rdd;
1747         cpumask_t tmpmask;
1748         cpumask_t mask;
1749         int cpuid;
1750
1751         /*
1752          * Fallback to the original heuristic, select random cpu,
1753          * first checking cpus not currently running a user thread.
1754          */
1755         ++dfly_scancpu;
1756         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1757         mask = dfly_rdyprocmask;
1758         CPUMASK_NANDMASK(mask, dfly_curprocmask);
1759         CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1760         CPUMASK_ANDMASK(mask, smp_active_mask);
1761         CPUMASK_ANDMASK(mask, usched_global_cpumask);
1762
1763         while (CPUMASK_TESTNZERO(mask)) {
1764                 CPUMASK_ASSNBMASK(tmpmask, cpuid);
1765                 if (CPUMASK_TESTMASK(tmpmask, mask)) {
1766                         CPUMASK_ANDMASK(tmpmask, mask);
1767                         cpuid = BSFCPUMASK(tmpmask);
1768                 } else {
1769                         cpuid = BSFCPUMASK(mask);
1770                 }
1771                 rdd = &dfly_pcpu[cpuid];
1772
1773                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1774                         goto found;
1775                 CPUMASK_NANDBIT(mask, cpuid);
1776         }
1777
1778         /*
1779          * Then cpus which might have a currently running lp
1780          */
1781         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1782         mask = dfly_rdyprocmask;
1783         CPUMASK_ANDMASK(mask, dfly_curprocmask);
1784         CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1785         CPUMASK_ANDMASK(mask, smp_active_mask);
1786         CPUMASK_ANDMASK(mask, usched_global_cpumask);
1787
1788         while (CPUMASK_TESTNZERO(mask)) {
1789                 CPUMASK_ASSNBMASK(tmpmask, cpuid);
1790                 if (CPUMASK_TESTMASK(tmpmask, mask)) {
1791                         CPUMASK_ANDMASK(tmpmask, mask);
1792                         cpuid = BSFCPUMASK(tmpmask);
1793                 } else {
1794                         cpuid = BSFCPUMASK(mask);
1795                 }
1796                 rdd = &dfly_pcpu[cpuid];
1797
1798                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1799                         goto found;
1800                 CPUMASK_NANDBIT(mask, cpuid);
1801         }
1802
1803         /*
1804          * If we cannot find a suitable cpu we reload from dfly_scancpu
1805          * and round-robin.  Other cpus will pickup as they release their
1806          * current lwps or become ready.
1807          *
1808          * Avoid a degenerate system lockup case if usched_global_cpumask
1809          * is set to 0 or otherwise does not cover lwp_cpumask.
1810          *
1811          * We only kick the target helper thread in this case, we do not
1812          * set the user resched flag because
1813          */
1814         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1815         if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
1816                 cpuid = 0;
1817         rdd = &dfly_pcpu[cpuid];
1818 found:
1819         return (rdd);
1820 }
1821
1822 static
1823 void
1824 dfly_need_user_resched_remote(void *dummy)
1825 {
1826         globaldata_t gd = mycpu;
1827         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1828
1829         /*
1830          * Flag reschedule needed
1831          */
1832         need_user_resched();
1833
1834         /*
1835          * If no user thread is currently running we need to kick the helper
1836          * on our cpu to recover.  Otherwise the cpu will never schedule
1837          * anything again.
1838          *
1839          * We cannot schedule the process ourselves because this is an
1840          * IPI callback and we cannot acquire spinlocks in an IPI callback.
1841          *
1842          * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1843          */
1844         if (dd->uschedcp == NULL &&
1845             CPUMASK_TESTBIT(dfly_rdyprocmask, gd->gd_cpuid)) {
1846                 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
1847                 wakeup_mycpu(&dd->helper_thread);
1848         }
1849 }
1850
1851 /*
1852  * dfly_remrunqueue_locked() removes a given process from the run queue
1853  * that it is on, clearing the queue busy bit if it becomes empty.
1854  *
1855  * Note that user process scheduler is different from the LWKT schedule.
1856  * The user process scheduler only manages user processes but it uses LWKT
1857  * underneath, and a user process operating in the kernel will often be
1858  * 'released' from our management.
1859  *
1860  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1861  * to sleep or the lwp is moved to a different runq.
1862  */
1863 static void
1864 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1865 {
1866         struct rq *q;
1867         u_int32_t *which;
1868         u_int8_t pri;
1869
1870         KKASSERT(rdd->runqcount >= 0);
1871
1872         pri = lp->lwp_rqindex;
1873
1874         switch(lp->lwp_rqtype) {
1875         case RTP_PRIO_NORMAL:
1876                 q = &rdd->queues[pri];
1877                 which = &rdd->queuebits;
1878                 break;
1879         case RTP_PRIO_REALTIME:
1880         case RTP_PRIO_FIFO:
1881                 q = &rdd->rtqueues[pri];
1882                 which = &rdd->rtqueuebits;
1883                 break;
1884         case RTP_PRIO_IDLE:
1885                 q = &rdd->idqueues[pri];
1886                 which = &rdd->idqueuebits;
1887                 break;
1888         default:
1889                 panic("remrunqueue: invalid rtprio type");
1890                 /* NOT REACHED */
1891         }
1892         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1893         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1894         TAILQ_REMOVE(q, lp, lwp_procq);
1895         --rdd->runqcount;
1896         if (TAILQ_EMPTY(q)) {
1897                 KASSERT((*which & (1 << pri)) != 0,
1898                         ("remrunqueue: remove from empty queue"));
1899                 *which &= ~(1 << pri);
1900         }
1901 }
1902
1903 /*
1904  * dfly_setrunqueue_locked()
1905  *
1906  * Add a process whos rqtype and rqindex had previously been calculated
1907  * onto the appropriate run queue.   Determine if the addition requires
1908  * a reschedule on a cpu and return the cpuid or -1.
1909  *
1910  * NOTE:          Lower priorities are better priorities.
1911  *
1912  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1913  *                sum of the rough lwp_priority for all running and runnable
1914  *                processes.  Lower priority processes (higher lwp_priority
1915  *                values) actually DO count as more load, not less, because
1916  *                these are the programs which require the most care with
1917  *                regards to cpu selection.
1918  */
1919 static void
1920 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1921 {
1922         u_int32_t *which;
1923         struct rq *q;
1924         int pri;
1925
1926         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1927
1928         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1929                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1930                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1931                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1932                 atomic_add_int(&dfly_ucount, 1);
1933         }
1934
1935         pri = lp->lwp_rqindex;
1936
1937         switch(lp->lwp_rqtype) {
1938         case RTP_PRIO_NORMAL:
1939                 q = &rdd->queues[pri];
1940                 which = &rdd->queuebits;
1941                 break;
1942         case RTP_PRIO_REALTIME:
1943         case RTP_PRIO_FIFO:
1944                 q = &rdd->rtqueues[pri];
1945                 which = &rdd->rtqueuebits;
1946                 break;
1947         case RTP_PRIO_IDLE:
1948                 q = &rdd->idqueues[pri];
1949                 which = &rdd->idqueuebits;
1950                 break;
1951         default:
1952                 panic("remrunqueue: invalid rtprio type");
1953                 /* NOT REACHED */
1954         }
1955
1956         /*
1957          * Place us on the selected queue.  Determine if we should be
1958          * placed at the head of the queue or at the end.
1959          *
1960          * We are placed at the tail if our round-robin count has expired,
1961          * or is about to expire and the system thinks its a good place to
1962          * round-robin, or there is already a next thread on the queue
1963          * (it might be trying to pick up where it left off and we don't
1964          * want to interfere).
1965          */
1966         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1967         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1968         ++rdd->runqcount;
1969
1970         if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
1971             (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
1972              (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
1973         ) {
1974                 /*
1975                  * Place on tail
1976                  */
1977                 atomic_clear_int(&lp->lwp_thread->td_mpflags,
1978                                  TDF_MP_BATCH_DEMARC);
1979                 lp->lwp_rrcount = 0;
1980                 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1981         } else {
1982                 /*
1983                  * Retain rrcount and place on head.  Count is retained
1984                  * even if the queue is empty.
1985                  */
1986                 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
1987         }
1988         *which |= 1 << pri;
1989 }
1990
1991 /*
1992  * For SMP systems a user scheduler helper thread is created for each
1993  * cpu and is used to allow one cpu to wakeup another for the purposes of
1994  * scheduling userland threads from setrunqueue().
1995  *
1996  * UP systems do not need the helper since there is only one cpu.
1997  *
1998  * We can't use the idle thread for this because we might block.
1999  * Additionally, doing things this way allows us to HLT idle cpus
2000  * on MP systems.
2001  */
2002 static void
2003 dfly_helper_thread(void *dummy)
2004 {
2005     globaldata_t gd;
2006     dfly_pcpu_t dd;
2007     dfly_pcpu_t rdd;
2008     struct lwp *nlp;
2009     cpumask_t mask;
2010     int cpuid;
2011
2012     gd = mycpu;
2013     cpuid = gd->gd_cpuid;       /* doesn't change */
2014     mask = gd->gd_cpumask;      /* doesn't change */
2015     dd = &dfly_pcpu[cpuid];
2016
2017     /*
2018      * Since we only want to be woken up only when no user processes
2019      * are scheduled on a cpu, run at an ultra low priority.
2020      */
2021     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2022
2023     tsleep(&dd->helper_thread, 0, "schslp", 0);
2024
2025     for (;;) {
2026         /*
2027          * We use the LWKT deschedule-interlock trick to avoid racing
2028          * dfly_rdyprocmask.  This means we cannot block through to the
2029          * manual lwkt_switch() call we make below.
2030          */
2031         crit_enter_gd(gd);
2032         tsleep_interlock(&dd->helper_thread, 0);
2033
2034         spin_lock(&dd->spin);
2035
2036         ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2037         clear_user_resched();   /* This satisfied the reschedule request */
2038 #if 0
2039         dd->rrcount = 0;        /* Reset the round-robin counter */
2040 #endif
2041
2042         if (dd->runqcount || dd->uschedcp != NULL) {
2043                 /*
2044                  * Threads are available.  A thread may or may not be
2045                  * currently scheduled.  Get the best thread already queued
2046                  * to this cpu.
2047                  */
2048                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2049                 if (nlp) {
2050                         ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2051                         dd->upri = nlp->lwp_priority;
2052                         dd->uschedcp = nlp;
2053 #if 0
2054                         dd->rrcount = 0;        /* reset round robin */
2055 #endif
2056                         spin_unlock(&dd->spin);
2057                         lwkt_acquire(nlp->lwp_thread);
2058                         lwkt_schedule(nlp->lwp_thread);
2059                 } else {
2060                         /*
2061                          * This situation should not occur because we had
2062                          * at least one thread available.
2063                          */
2064                         spin_unlock(&dd->spin);
2065                 }
2066         } else if (usched_dfly_features & 0x01) {
2067                 /*
2068                  * This cpu is devoid of runnable threads, steal a thread
2069                  * from another cpu.  Since we're stealing, might as well
2070                  * load balance at the same time.
2071                  *
2072                  * We choose the highest-loaded thread from the worst queue.
2073                  *
2074                  * NOTE! This function only returns a non-NULL rdd when
2075                  *       another cpu's queue is obviously overloaded.  We
2076                  *       do not want to perform the type of rebalancing
2077                  *       the schedclock does here because it would result
2078                  *       in insane process pulling when 'steady' state is
2079                  *       partially unbalanced (e.g. 6 runnables and only
2080                  *       4 cores).
2081                  */
2082                 rdd = dfly_choose_worst_queue(dd);
2083                 if (rdd && spin_trylock(&rdd->spin)) {
2084                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2085                         spin_unlock(&rdd->spin);
2086                 } else {
2087                         nlp = NULL;
2088                 }
2089                 if (nlp) {
2090                         ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2091                         dd->upri = nlp->lwp_priority;
2092                         dd->uschedcp = nlp;
2093 #if 0
2094                         dd->rrcount = 0;        /* reset round robin */
2095 #endif
2096                         spin_unlock(&dd->spin);
2097                         lwkt_acquire(nlp->lwp_thread);
2098                         lwkt_schedule(nlp->lwp_thread);
2099                 } else {
2100                         /*
2101                          * Leave the thread on our run queue.  Another
2102                          * scheduler will try to pull it later.
2103                          */
2104                         spin_unlock(&dd->spin);
2105                 }
2106         } else {
2107                 /*
2108                  * devoid of runnable threads and not allowed to steal
2109                  * any.
2110                  */
2111                 spin_unlock(&dd->spin);
2112         }
2113
2114         /*
2115          * We're descheduled unless someone scheduled us.  Switch away.
2116          * Exiting the critical section will cause splz() to be called
2117          * for us if interrupts and such are pending.
2118          */
2119         crit_exit_gd(gd);
2120         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2121     }
2122 }
2123
2124 #if 0
2125 static int
2126 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2127 {
2128         int error, new_val;
2129
2130         new_val = usched_dfly_stick_to_level;
2131
2132         error = sysctl_handle_int(oidp, &new_val, 0, req);
2133         if (error != 0 || req->newptr == NULL)
2134                 return (error);
2135         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2136                 return (EINVAL);
2137         usched_dfly_stick_to_level = new_val;
2138         return (0);
2139 }
2140 #endif
2141
2142 /*
2143  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2144  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2145  * we should not mess with it further.
2146  */
2147 static void
2148 usched_dfly_cpu_init(void)
2149 {
2150         int i;
2151         int j;
2152         int smt_not_supported = 0;
2153         int cache_coherent_not_supported = 0;
2154
2155         if (bootverbose)
2156                 kprintf("Start usched_dfly helpers on cpus:\n");
2157
2158         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2159         usched_dfly_sysctl_tree =
2160                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2161                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2162                                 "usched_dfly", CTLFLAG_RD, 0, "");
2163
2164         for (i = 0; i < ncpus; ++i) {
2165                 dfly_pcpu_t dd = &dfly_pcpu[i];
2166                 cpumask_t mask;
2167
2168                 CPUMASK_ASSBIT(mask, i);
2169                 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2170                     continue;
2171
2172                 spin_init(&dd->spin, "uschedcpuinit");
2173                 dd->cpunode = get_cpu_node_by_cpuid(i);
2174                 dd->cpuid = i;
2175                 CPUMASK_ASSBIT(dd->cpumask, i);
2176                 for (j = 0; j < NQS; j++) {
2177                         TAILQ_INIT(&dd->queues[j]);
2178                         TAILQ_INIT(&dd->rtqueues[j]);
2179                         TAILQ_INIT(&dd->idqueues[j]);
2180                 }
2181                 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2182
2183                 if (dd->cpunode == NULL) {
2184                         smt_not_supported = 1;
2185                         cache_coherent_not_supported = 1;
2186                         if (bootverbose)
2187                                 kprintf ("    cpu%d - WARNING: No CPU NODE "
2188                                          "found for cpu\n", i);
2189                 } else {
2190                         switch (dd->cpunode->type) {
2191                         case THREAD_LEVEL:
2192                                 if (bootverbose)
2193                                         kprintf ("    cpu%d - HyperThreading "
2194                                                  "available. Core siblings: ",
2195                                                  i);
2196                                 break;
2197                         case CORE_LEVEL:
2198                                 smt_not_supported = 1;
2199
2200                                 if (bootverbose)
2201                                         kprintf ("    cpu%d - No HT available, "
2202                                                  "multi-core/physical "
2203                                                  "cpu. Physical siblings: ",
2204                                                  i);
2205                                 break;
2206                         case CHIP_LEVEL:
2207                                 smt_not_supported = 1;
2208
2209                                 if (bootverbose)
2210                                         kprintf ("    cpu%d - No HT available, "
2211                                                  "single-core/physical cpu. "
2212                                                  "Package siblings: ",
2213                                                  i);
2214                                 break;
2215                         default:
2216                                 /* Let's go for safe defaults here */
2217                                 smt_not_supported = 1;
2218                                 cache_coherent_not_supported = 1;
2219                                 if (bootverbose)
2220                                         kprintf ("    cpu%d - Unknown cpunode->"
2221                                                  "type=%u. siblings: ",
2222                                                  i,
2223                                                  (u_int)dd->cpunode->type);
2224                                 break;
2225                         }
2226
2227                         if (bootverbose) {
2228                                 if (dd->cpunode->parent_node != NULL) {
2229                                         kprint_cpuset(&dd->cpunode->
2230                                                         parent_node->members);
2231                                         kprintf("\n");
2232                                 } else {
2233                                         kprintf(" no siblings\n");
2234                                 }
2235                         }
2236                 }
2237
2238                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2239                             0, i, "usched %d", i);
2240
2241                 /*
2242                  * Allow user scheduling on the target cpu.  cpu #0 has already
2243                  * been enabled in rqinit().
2244                  */
2245                 if (i)
2246                         ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2247                 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2248                 dd->upri = PRIBASE_NULL;
2249
2250         }
2251
2252         /* usched_dfly sysctl configurable parameters */
2253
2254         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2255                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2256                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2257                        &usched_dfly_rrinterval, 0, "");
2258         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2259                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2260                        OID_AUTO, "decay", CTLFLAG_RW,
2261                        &usched_dfly_decay, 0, "Extra decay when not running");
2262
2263         /* Add enable/disable option for SMT scheduling if supported */
2264         if (smt_not_supported) {
2265                 usched_dfly_smt = 0;
2266                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2267                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2268                                   OID_AUTO, "smt", CTLFLAG_RD,
2269                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2270         } else {
2271                 usched_dfly_smt = 1;
2272                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2273                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2274                                OID_AUTO, "smt", CTLFLAG_RW,
2275                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2276         }
2277
2278         /*
2279          * Add enable/disable option for cache coherent scheduling
2280          * if supported
2281          */
2282         if (cache_coherent_not_supported) {
2283                 usched_dfly_cache_coherent = 0;
2284                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2285                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2286                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2287                                   "NOT SUPPORTED", 0,
2288                                   "Cache coherence NOT SUPPORTED");
2289         } else {
2290                 usched_dfly_cache_coherent = 1;
2291                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2292                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2293                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2294                                &usched_dfly_cache_coherent, 0,
2295                                "Enable/Disable cache coherent scheduling");
2296
2297                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2298                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2299                                OID_AUTO, "weight1", CTLFLAG_RW,
2300                                &usched_dfly_weight1, 200,
2301                                "Weight selection for current cpu");
2302
2303                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2304                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2305                                OID_AUTO, "weight2", CTLFLAG_RW,
2306                                &usched_dfly_weight2, 180,
2307                                "Weight selection for wakefrom cpu");
2308
2309                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2310                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2311                                OID_AUTO, "weight3", CTLFLAG_RW,
2312                                &usched_dfly_weight3, 40,
2313                                "Weight selection for num threads on queue");
2314
2315                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2316                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2317                                OID_AUTO, "weight4", CTLFLAG_RW,
2318                                &usched_dfly_weight4, 160,
2319                                "Availability of other idle cpus");
2320
2321                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2322                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2323                                OID_AUTO, "fast_resched", CTLFLAG_RW,
2324                                &usched_dfly_fast_resched, 0,
2325                                "Availability of other idle cpus");
2326
2327                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2328                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2329                                OID_AUTO, "features", CTLFLAG_RW,
2330                                &usched_dfly_features, 0x8F,
2331                                "Allow pulls into empty queues");
2332
2333                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2334                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2335                                OID_AUTO, "swmask", CTLFLAG_RW,
2336                                &usched_dfly_swmask, ~PPQMASK,
2337                                "Queue mask to force thread switch");
2338
2339 #if 0
2340                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2341                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2342                                 OID_AUTO, "stick_to_level",
2343                                 CTLTYPE_INT | CTLFLAG_RW,
2344                                 NULL, sizeof usched_dfly_stick_to_level,
2345                                 sysctl_usched_dfly_stick_to_level, "I",
2346                                 "Stick a process to this level. See sysctl"
2347                                 "paremter hw.cpu_topology.level_description");
2348 #endif
2349         }
2350 }
2351 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2352         usched_dfly_cpu_init, NULL);