kernel - Fix sysclock_t comparison in usched code
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_estfast     lwp_usdata.dfly.estfast
97 #define lwp_uload       lwp_usdata.dfly.uload
98 #define lwp_rqtype      lwp_usdata.dfly.rqtype
99 #define lwp_qcpu        lwp_usdata.dfly.qcpu
100 #define lwp_rrcount     lwp_usdata.dfly.rrcount
101
102 struct usched_dfly_pcpu {
103         struct spinlock spin;
104         struct thread   helper_thread;
105         short           unusde01;
106         short           upri;
107         int             uload;
108         int             ucount;
109         struct lwp      *uschedcp;
110         struct rq       queues[NQS];
111         struct rq       rtqueues[NQS];
112         struct rq       idqueues[NQS];
113         u_int32_t       queuebits;
114         u_int32_t       rtqueuebits;
115         u_int32_t       idqueuebits;
116         int             runqcount;
117         int             cpuid;
118         cpumask_t       cpumask;
119 #ifdef SMP
120         cpu_node_t      *cpunode;
121 #endif
122 };
123
124 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
125
126 static void dfly_acquire_curproc(struct lwp *lp);
127 static void dfly_release_curproc(struct lwp *lp);
128 static void dfly_select_curproc(globaldata_t gd);
129 static void dfly_setrunqueue(struct lwp *lp);
130 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
131 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
132                                 sysclock_t cpstamp);
133 static void dfly_recalculate_estcpu(struct lwp *lp);
134 static void dfly_resetpriority(struct lwp *lp);
135 static void dfly_forking(struct lwp *plp, struct lwp *lp);
136 static void dfly_exiting(struct lwp *lp, struct proc *);
137 static void dfly_uload_update(struct lwp *lp);
138 static void dfly_yield(struct lwp *lp);
139 #ifdef SMP
140 static void dfly_changeqcpu_locked(struct lwp *lp,
141                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
142 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
143 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
144 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
145 #endif
146
147 #ifdef SMP
148 static void dfly_need_user_resched_remote(void *dummy);
149 #endif
150 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
151                                           struct lwp *chklp, int worst);
152 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
153 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
154
155 struct usched usched_dfly = {
156         { NULL },
157         "dfly", "Original DragonFly Scheduler",
158         NULL,                   /* default registration */
159         NULL,                   /* default deregistration */
160         dfly_acquire_curproc,
161         dfly_release_curproc,
162         dfly_setrunqueue,
163         dfly_schedulerclock,
164         dfly_recalculate_estcpu,
165         dfly_resetpriority,
166         dfly_forking,
167         dfly_exiting,
168         dfly_uload_update,
169         NULL,                   /* setcpumask not supported */
170         dfly_yield
171 };
172
173 /*
174  * We have NQS (32) run queues per scheduling class.  For the normal
175  * class, there are 128 priorities scaled onto these 32 queues.  New
176  * processes are added to the last entry in each queue, and processes
177  * are selected for running by taking them from the head and maintaining
178  * a simple FIFO arrangement.  Realtime and Idle priority processes have
179  * and explicit 0-31 priority which maps directly onto their class queue
180  * index.  When a queue has something in it, the corresponding bit is
181  * set in the queuebits variable, allowing a single read to determine
182  * the state of all 32 queues and then a ffs() to find the first busy
183  * queue.
184  */
185 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
186 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
187 #ifdef SMP
188 static volatile int dfly_scancpu;
189 #endif
190 static volatile int dfly_ucount;        /* total running on whole system */
191 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
192 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
193 static struct sysctl_oid *usched_dfly_sysctl_tree;
194
195 /* Debug info exposed through debug.* sysctl */
196
197 static int usched_dfly_debug = -1;
198 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
199            &usched_dfly_debug, 0,
200            "Print debug information for this pid");
201
202 static int usched_dfly_pid_debug = -1;
203 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
204            &usched_dfly_pid_debug, 0,
205            "Print KTR debug information for this pid");
206
207 static int usched_dfly_chooser = 0;
208 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
209            &usched_dfly_chooser, 0,
210            "Print KTR debug information for this pid");
211
212 /*
213  * Tunning usched_dfly - configurable through kern.usched_dfly.
214  *
215  * weight1 - Tries to keep threads on their current cpu.  If you
216  *           make this value too large the scheduler will not be
217  *           able to load-balance large loads.
218  *
219  * weight2 - If non-zero, detects thread pairs undergoing synchronous
220  *           communications and tries to move them closer together.
221  *           Behavior is adjusted by bit 4 of features (0x10).
222  *
223  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
224  *           a small value is recommended.
225  *
226  * weight3 - Weighting based on the number of recently runnable threads
227  *           on the userland scheduling queue (ignoring their loads).
228  *           A nominal value here prevents high-priority (low-load)
229  *           threads from accumulating on one cpu core when other
230  *           cores are available.
231  *
232  *           This value should be left fairly small relative to weight1
233  *           and weight4.
234  *
235  * weight4 - Weighting based on other cpu queues being available
236  *           or running processes with higher lwp_priority's.
237  *
238  *           This allows a thread to migrate to another nearby cpu if it
239  *           is unable to run on the current cpu based on the other cpu
240  *           being idle or running a lower priority (higher lwp_priority)
241  *           thread.  This value should be large enough to override weight1
242  *
243  * features - These flags can be set or cleared to enable or disable various
244  *            features.
245  *
246  *            0x01      Enable idle-cpu pulling                 (default)
247  *            0x02      Enable proactive pushing                (default)
248  *            0x04      Enable rebalancing rover                (default)
249  *            0x08      Enable more proactive pushing           (default)
250  *            0x10      (flip weight2 limit on same cpu)        (default)
251  *            0x20      choose best cpu for forked process
252  *            0x40      choose current cpu for forked process
253  *            0x80      choose random cpu for forked process    (default)
254  */
255 #ifdef SMP
256 static int usched_dfly_smt = 0;
257 static int usched_dfly_cache_coherent = 0;
258 static int usched_dfly_weight1 = 200;   /* keep thread on current cpu */
259 static int usched_dfly_weight2 = 180;   /* synchronous peer's current cpu */
260 static int usched_dfly_weight3 = 40;    /* number of threads on queue */
261 static int usched_dfly_weight4 = 160;   /* availability of idle cores */
262 static int usched_dfly_features = 0x8F; /* allow pulls */
263 #endif
264 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
265 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
266 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
267 static int usched_dfly_decay = 8;
268
269 /* KTR debug printings */
270
271 KTR_INFO_MASTER(usched);
272
273 #if !defined(KTR_USCHED_DFLY)
274 #define KTR_USCHED_DFLY KTR_ALL
275 #endif
276
277 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
278     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
279     pid_t pid, int old_cpuid, int curr);
280
281 /*
282  * This function is called when the kernel intends to return to userland.
283  * It is responsible for making the thread the current designated userland
284  * thread for this cpu, blocking if necessary.
285  *
286  * The kernel will not depress our LWKT priority until after we return,
287  * in case we have to shove over to another cpu.
288  *
289  * We must determine our thread's disposition before we switch away.  This
290  * is very sensitive code.
291  *
292  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
293  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
294  * occur, this function is called only under very controlled circumstances.
295  */
296 static void
297 dfly_acquire_curproc(struct lwp *lp)
298 {
299         globaldata_t gd;
300         dfly_pcpu_t dd;
301 #ifdef SMP
302         dfly_pcpu_t rdd;
303 #endif
304         thread_t td;
305         int force_resched;
306
307         /*
308          * Make sure we aren't sitting on a tsleep queue.
309          */
310         td = lp->lwp_thread;
311         crit_enter_quick(td);
312         if (td->td_flags & TDF_TSLEEPQ)
313                 tsleep_remove(td);
314         dfly_recalculate_estcpu(lp);
315
316         gd = mycpu;
317         dd = &dfly_pcpu[gd->gd_cpuid];
318
319         /*
320          * Process any pending interrupts/ipi's, then handle reschedule
321          * requests.  dfly_release_curproc() will try to assign a new
322          * uschedcp that isn't us and otherwise NULL it out.
323          */
324         force_resched = 0;
325         if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
326             lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
327                 force_resched = 1;
328         }
329
330         if (user_resched_wanted()) {
331                 if (dd->uschedcp == lp)
332                         force_resched = 1;
333                 clear_user_resched();
334                 dfly_release_curproc(lp);
335         }
336
337         /*
338          * Loop until we are the current user thread.
339          *
340          * NOTE: dd spinlock not held at top of loop.
341          */
342         if (dd->uschedcp == lp)
343                 lwkt_yield_quick();
344
345         while (dd->uschedcp != lp) {
346                 lwkt_yield_quick();
347
348                 spin_lock(&dd->spin);
349
350                 /*
351                  * We are not or are no longer the current lwp and a forced
352                  * reschedule was requested.  Figure out the best cpu to
353                  * run on (our current cpu will be given significant weight).
354                  *
355                  * (if a reschedule was not requested we want to move this
356                  *  step after the uschedcp tests).
357                  */
358 #ifdef SMP
359                 if (force_resched &&
360                     (usched_dfly_features & 0x08) &&
361                     (rdd = dfly_choose_best_queue(lp)) != dd) {
362                         dfly_changeqcpu_locked(lp, dd, rdd);
363                         spin_unlock(&dd->spin);
364                         lwkt_deschedule(lp->lwp_thread);
365                         dfly_setrunqueue_dd(rdd, lp);
366                         lwkt_switch();
367                         gd = mycpu;
368                         dd = &dfly_pcpu[gd->gd_cpuid];
369                         continue;
370                 }
371 #endif
372
373                 /*
374                  * Either no reschedule was requested or the best queue was
375                  * dd, and no current process has been selected.  We can
376                  * trivially become the current lwp on the current cpu.
377                  */
378                 if (dd->uschedcp == NULL) {
379                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
380                         dd->uschedcp = lp;
381                         dd->upri = lp->lwp_priority;
382                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
383                         spin_unlock(&dd->spin);
384                         break;
385                 }
386
387                 /*
388                  * Can we steal the current designated user thread?
389                  *
390                  * If we do the other thread will stall when it tries to
391                  * return to userland, possibly rescheduling elsewhere.
392                  *
393                  * It is important to do a masked test to avoid the edge
394                  * case where two near-equal-priority threads are constantly
395                  * interrupting each other.
396                  *
397                  * In the exact match case another thread has already gained
398                  * uschedcp and lowered its priority, if we steal it the
399                  * other thread will stay stuck on the LWKT runq and not
400                  * push to another cpu.  So don't steal on equal-priority even
401                  * though it might appear to be more beneficial due to not
402                  * having to switch back to the other thread's context.
403                  *
404                  * usched_dfly_fast_resched requires that two threads be
405                  * significantly far apart in priority in order to interrupt.
406                  *
407                  * If better but not sufficiently far apart, the current
408                  * uschedcp will be interrupted at the next scheduler clock.
409                  */
410                 if (dd->uschedcp &&
411                    (dd->upri & ~PPQMASK) >
412                    (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
413                         dd->uschedcp = lp;
414                         dd->upri = lp->lwp_priority;
415                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
416                         spin_unlock(&dd->spin);
417                         break;
418                 }
419 #ifdef SMP
420                 /*
421                  * We are not the current lwp, figure out the best cpu
422                  * to run on (our current cpu will be given significant
423                  * weight).  Loop on cpu change.
424                  */
425                 if ((usched_dfly_features & 0x02) &&
426                     force_resched == 0 &&
427                     (rdd = dfly_choose_best_queue(lp)) != dd) {
428                         dfly_changeqcpu_locked(lp, dd, rdd);
429                         spin_unlock(&dd->spin);
430                         lwkt_deschedule(lp->lwp_thread);
431                         dfly_setrunqueue_dd(rdd, lp);
432                         lwkt_switch();
433                         gd = mycpu;
434                         dd = &dfly_pcpu[gd->gd_cpuid];
435                         continue;
436                 }
437 #endif
438
439                 /*
440                  * We cannot become the current lwp, place the lp on the
441                  * run-queue of this or another cpu and deschedule ourselves.
442                  *
443                  * When we are reactivated we will have another chance.
444                  *
445                  * Reload after a switch or setrunqueue/switch possibly
446                  * moved us to another cpu.
447                  */
448                 spin_unlock(&dd->spin);
449                 lwkt_deschedule(lp->lwp_thread);
450                 dfly_setrunqueue_dd(dd, lp);
451                 lwkt_switch();
452                 gd = mycpu;
453                 dd = &dfly_pcpu[gd->gd_cpuid];
454         }
455
456         /*
457          * Make sure upri is synchronized, then yield to LWKT threads as
458          * needed before returning.  This could result in another reschedule.
459          * XXX
460          */
461         crit_exit_quick(td);
462
463         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
464 }
465
466 /*
467  * DFLY_RELEASE_CURPROC
468  *
469  * This routine detaches the current thread from the userland scheduler,
470  * usually because the thread needs to run or block in the kernel (at
471  * kernel priority) for a while.
472  *
473  * This routine is also responsible for selecting a new thread to
474  * make the current thread.
475  *
476  * NOTE: This implementation differs from the dummy example in that
477  * dfly_select_curproc() is able to select the current process, whereas
478  * dummy_select_curproc() is not able to select the current process.
479  * This means we have to NULL out uschedcp.
480  *
481  * Additionally, note that we may already be on a run queue if releasing
482  * via the lwkt_switch() in dfly_setrunqueue().
483  */
484 static void
485 dfly_release_curproc(struct lwp *lp)
486 {
487         globaldata_t gd = mycpu;
488         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
489
490         /*
491          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
492          * by wakeup().
493          */
494         if (dd->uschedcp == lp) {
495                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
496                 spin_lock(&dd->spin);
497                 if (dd->uschedcp == lp) {
498                         dd->uschedcp = NULL;    /* don't let lp be selected */
499                         dd->upri = PRIBASE_NULL;
500                         atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
501                         spin_unlock(&dd->spin);
502                         dfly_select_curproc(gd);
503                 } else {
504                         spin_unlock(&dd->spin);
505                 }
506         }
507 }
508
509 /*
510  * DFLY_SELECT_CURPROC
511  *
512  * Select a new current process for this cpu and clear any pending user
513  * reschedule request.  The cpu currently has no current process.
514  *
515  * This routine is also responsible for equal-priority round-robining,
516  * typically triggered from dfly_schedulerclock().  In our dummy example
517  * all the 'user' threads are LWKT scheduled all at once and we just
518  * call lwkt_switch().
519  *
520  * The calling process is not on the queue and cannot be selected.
521  */
522 static
523 void
524 dfly_select_curproc(globaldata_t gd)
525 {
526         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
527         struct lwp *nlp;
528         int cpuid = gd->gd_cpuid;
529
530         crit_enter_gd(gd);
531
532         spin_lock(&dd->spin);
533         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
534
535         if (nlp) {
536                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
537                 dd->upri = nlp->lwp_priority;
538                 dd->uschedcp = nlp;
539 #if 0
540                 dd->rrcount = 0;                /* reset round robin */
541 #endif
542                 spin_unlock(&dd->spin);
543 #ifdef SMP
544                 lwkt_acquire(nlp->lwp_thread);
545 #endif
546                 lwkt_schedule(nlp->lwp_thread);
547         } else {
548                 spin_unlock(&dd->spin);
549         }
550         crit_exit_gd(gd);
551 }
552
553 /*
554  * Place the specified lwp on the user scheduler's run queue.  This routine
555  * must be called with the thread descheduled.  The lwp must be runnable.
556  * It must not be possible for anyone else to explicitly schedule this thread.
557  *
558  * The thread may be the current thread as a special case.
559  */
560 static void
561 dfly_setrunqueue(struct lwp *lp)
562 {
563         dfly_pcpu_t dd;
564         dfly_pcpu_t rdd;
565
566         /*
567          * First validate the process LWKT state.
568          */
569         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
570         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
571             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
572              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
573         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
574
575         /*
576          * NOTE: dd/rdd do not necessarily represent the current cpu.
577          *       Instead they may represent the cpu the thread was last
578          *       scheduled on or inherited by its parent.
579          */
580         dd = &dfly_pcpu[lp->lwp_qcpu];
581         rdd = dd;
582
583         /*
584          * This process is not supposed to be scheduled anywhere or assigned
585          * as the current process anywhere.  Assert the condition.
586          */
587         KKASSERT(rdd->uschedcp != lp);
588
589 #ifndef SMP
590         /*
591          * If we are not SMP we do not have a scheduler helper to kick
592          * and must directly activate the process if none are scheduled.
593          *
594          * This is really only an issue when bootstrapping init since
595          * the caller in all other cases will be a user process, and
596          * even if released (rdd->uschedcp == NULL), that process will
597          * kickstart the scheduler when it returns to user mode from
598          * the kernel.
599          *
600          * NOTE: On SMP we can't just set some other cpu's uschedcp.
601          */
602         if (rdd->uschedcp == NULL) {
603                 spin_lock(&rdd->spin);
604                 if (rdd->uschedcp == NULL) {
605                         atomic_set_cpumask(&dfly_curprocmask, 1);
606                         rdd->uschedcp = lp;
607                         rdd->upri = lp->lwp_priority;
608                         spin_unlock(&rdd->spin);
609                         lwkt_schedule(lp->lwp_thread);
610                         return;
611                 }
612                 spin_unlock(&rdd->spin);
613         }
614 #endif
615
616 #ifdef SMP
617         /*
618          * Ok, we have to setrunqueue some target cpu and request a reschedule
619          * if necessary.
620          *
621          * We have to choose the best target cpu.  It might not be the current
622          * target even if the current cpu has no running user thread (for
623          * example, because the current cpu might be a hyperthread and its
624          * sibling has a thread assigned).
625          *
626          * If we just forked it is most optimal to run the child on the same
627          * cpu just in case the parent decides to wait for it (thus getting
628          * off that cpu).  As long as there is nothing else runnable on the
629          * cpu, that is.  If we did this unconditionally a parent forking
630          * multiple children before waiting (e.g. make -j N) leaves other
631          * cpus idle that could be working.
632          */
633         if (lp->lwp_forked) {
634                 lp->lwp_forked = 0;
635                 if (usched_dfly_features & 0x20)
636                         rdd = dfly_choose_best_queue(lp);
637                 else if (usched_dfly_features & 0x40)
638                         rdd = &dfly_pcpu[lp->lwp_qcpu];
639                 else if (usched_dfly_features & 0x80)
640                         rdd = dfly_choose_queue_simple(rdd, lp);
641                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
642                         rdd = dfly_choose_best_queue(lp);
643                 else
644                         rdd = &dfly_pcpu[lp->lwp_qcpu];
645         } else {
646                 rdd = dfly_choose_best_queue(lp);
647                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
648         }
649         if (lp->lwp_qcpu != rdd->cpuid) {
650                 spin_lock(&dd->spin);
651                 dfly_changeqcpu_locked(lp, dd, rdd);
652                 spin_unlock(&dd->spin);
653         }
654 #endif
655         dfly_setrunqueue_dd(rdd, lp);
656 }
657
658 #ifdef SMP
659
660 /*
661  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
662  * spin-locked on-call.  rdd does not have to be.
663  */
664 static void
665 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
666 {
667         if (lp->lwp_qcpu != rdd->cpuid) {
668                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
669                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
670                         atomic_add_int(&dd->uload, -lp->lwp_uload);
671                         atomic_add_int(&dd->ucount, -1);
672                         atomic_add_int(&dfly_ucount, -1);
673                 }
674                 lp->lwp_qcpu = rdd->cpuid;
675         }
676 }
677
678 #endif
679
680 /*
681  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
682  * also performs all necessary ancillary notification actions.
683  */
684 static void
685 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
686 {
687 #ifdef SMP
688         globaldata_t rgd;
689
690         /*
691          * We might be moving the lp to another cpu's run queue, and once
692          * on the runqueue (even if it is our cpu's), another cpu can rip
693          * it away from us.
694          *
695          * TDF_MIGRATING might already be set if this is part of a
696          * remrunqueue+setrunqueue sequence.
697          */
698         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
699                 lwkt_giveaway(lp->lwp_thread);
700
701         rgd = globaldata_find(rdd->cpuid);
702
703         /*
704          * We lose control of the lp the moment we release the spinlock
705          * after having placed it on the queue.  i.e. another cpu could pick
706          * it up, or it could exit, or its priority could be further
707          * adjusted, or something like that.
708          *
709          * WARNING! rdd can point to a foreign cpu!
710          */
711         spin_lock(&rdd->spin);
712         dfly_setrunqueue_locked(rdd, lp);
713
714         /*
715          * Potentially interrupt the currently-running thread
716          */
717         if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
718                 /*
719                  * Currently running thread is better or same, do not
720                  * interrupt.
721                  */
722                 spin_unlock(&rdd->spin);
723         } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
724                    usched_dfly_fast_resched) {
725                 /*
726                  * Currently running thread is not better, but not so bad
727                  * that we need to interrupt it.  Let it run for one more
728                  * scheduler tick.
729                  */
730                 if (rdd->uschedcp &&
731                     rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
732                         rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
733                 }
734                 spin_unlock(&rdd->spin);
735         } else if (rgd == mycpu) {
736                 /*
737                  * We should interrupt the currently running thread, which
738                  * is on the current cpu.
739                  */
740                 spin_unlock(&rdd->spin);
741                 if (rdd->uschedcp == NULL) {
742                         wakeup_mycpu(&rdd->helper_thread); /* XXX */
743                         need_user_resched();
744                 } else {
745                         need_user_resched();
746                 }
747         } else {
748                 /*
749                  * We should interrupt the currently running thread, which
750                  * is on a different cpu.
751                  */
752                 spin_unlock(&rdd->spin);
753                 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
754         }
755 #else
756         /*
757          * Request a reschedule if appropriate.
758          */
759         spin_lock(&rdd->spin);
760         dfly_setrunqueue_locked(rdd, lp);
761         spin_unlock(&rdd->spin);
762         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
763                 need_user_resched();
764         }
765 #endif
766 }
767
768 /*
769  * This routine is called from a systimer IPI.  It MUST be MP-safe and
770  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
771  * each cpu.
772  */
773 static
774 void
775 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
776 {
777         globaldata_t gd = mycpu;
778 #ifdef SMP
779         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
780 #endif
781
782         /*
783          * Spinlocks also hold a critical section so there should not be
784          * any active.
785          */
786         KKASSERT(gd->gd_spinlocks == 0);
787
788         if (lp == NULL)
789                 return;
790
791         /*
792          * Do we need to round-robin?  We round-robin 10 times a second.
793          * This should only occur for cpu-bound batch processes.
794          */
795         if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
796                 lp->lwp_thread->td_wakefromcpu = -1;
797                 need_user_resched();
798         }
799
800         /*
801          * Adjust estcpu upward using a real time equivalent calculation,
802          * and recalculate lp's priority.
803          */
804         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
805         dfly_resetpriority(lp);
806
807         /*
808          * Rebalance two cpus every 8 ticks, pulling the worst thread
809          * from the worst cpu's queue into a rotating cpu number.
810          *
811          * This mechanic is needed because the push algorithms can
812          * steady-state in an non-optimal configuration.  We need to mix it
813          * up a little, even if it means breaking up a paired thread, so
814          * the push algorithms can rebalance the degenerate conditions.
815          * This portion of the algorithm exists to ensure stability at the
816          * selected weightings.
817          *
818          * Because we might be breaking up optimal conditions we do not want
819          * to execute this too quickly, hence we only rebalance approximately
820          * ~7-8 times per second.  The push's, on the otherhand, are capable
821          * moving threads to other cpus at a much higher rate.
822          *
823          * We choose the most heavily loaded thread from the worst queue
824          * in order to ensure that multiple heavy-weight threads on the same
825          * queue get broken up, and also because these threads are the most
826          * likely to be able to remain in place.  Hopefully then any pairings,
827          * if applicable, migrate to where these threads are.
828          */
829 #ifdef SMP
830         if ((usched_dfly_features & 0x04) &&
831             ((u_int)sched_ticks & 7) == 0 &&
832             (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
833                 /*
834                  * Our cpu is up.
835                  */
836                 struct lwp *nlp;
837                 dfly_pcpu_t rdd;
838
839                 rdd = dfly_choose_worst_queue(dd);
840                 if (rdd) {
841                         spin_lock(&dd->spin);
842                         if (spin_trylock(&rdd->spin)) {
843                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
844                                 spin_unlock(&rdd->spin);
845                                 if (nlp == NULL)
846                                         spin_unlock(&dd->spin);
847                         } else {
848                                 spin_unlock(&dd->spin);
849                                 nlp = NULL;
850                         }
851                 } else {
852                         nlp = NULL;
853                 }
854                 /* dd->spin held if nlp != NULL */
855
856                 /*
857                  * Either schedule it or add it to our queue.
858                  */
859                 if (nlp &&
860                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
861                         atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
862                         dd->upri = nlp->lwp_priority;
863                         dd->uschedcp = nlp;
864 #if 0
865                         dd->rrcount = 0;        /* reset round robin */
866 #endif
867                         spin_unlock(&dd->spin);
868                         lwkt_acquire(nlp->lwp_thread);
869                         lwkt_schedule(nlp->lwp_thread);
870                 } else if (nlp) {
871                         dfly_setrunqueue_locked(dd, nlp);
872                         spin_unlock(&dd->spin);
873                 }
874         }
875 #endif
876 }
877
878 /*
879  * Called from acquire and from kern_synch's one-second timer (one of the
880  * callout helper threads) with a critical section held.
881  *
882  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
883  * overall system load.
884  *
885  * Note that no recalculation occurs for a process which sleeps and wakes
886  * up in the same tick.  That is, a system doing thousands of context
887  * switches per second will still only do serious estcpu calculations
888  * ESTCPUFREQ times per second.
889  */
890 static
891 void
892 dfly_recalculate_estcpu(struct lwp *lp)
893 {
894         globaldata_t gd = mycpu;
895         sysclock_t cpbase;
896         sysclock_t ttlticks;
897         int estcpu;
898         int decay_factor;
899         int ucount;
900
901         /*
902          * We have to subtract periodic to get the last schedclock
903          * timeout time, otherwise we would get the upcoming timeout.
904          * Keep in mind that a process can migrate between cpus and
905          * while the scheduler clock should be very close, boundary
906          * conditions could lead to a small negative delta.
907          */
908         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
909
910         if (lp->lwp_slptime > 1) {
911                 /*
912                  * Too much time has passed, do a coarse correction.
913                  */
914                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
915                 dfly_resetpriority(lp);
916                 lp->lwp_cpbase = cpbase;
917                 lp->lwp_cpticks = 0;
918                 lp->lwp_estfast = 0;
919         } else if (lp->lwp_cpbase != cpbase) {
920                 /*
921                  * Adjust estcpu if we are in a different tick.  Don't waste
922                  * time if we are in the same tick.
923                  *
924                  * First calculate the number of ticks in the measurement
925                  * interval.  The ttlticks calculation can wind up 0 due to
926                  * a bug in the handling of lwp_slptime  (as yet not found),
927                  * so make sure we do not get a divide by 0 panic.
928                  */
929                 ttlticks = (cpbase - lp->lwp_cpbase) /
930                            gd->gd_schedclock.periodic;
931                 if ((ssysclock_t)ttlticks < 0) {
932                         ttlticks = 0;
933                         lp->lwp_cpbase = cpbase;
934                 }
935                 if (ttlticks == 0)
936                         return;
937                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
938
939                 /*
940                  * Calculate the percentage of one cpu being used then
941                  * compensate for any system load in excess of ncpus.
942                  *
943                  * For example, if we have 8 cores and 16 running cpu-bound
944                  * processes then all things being equal each process will
945                  * get 50% of one cpu.  We need to pump this value back
946                  * up to 100% so the estcpu calculation properly adjusts
947                  * the process's dynamic priority.
948                  *
949                  * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
950                  */
951                 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
952                 ucount = dfly_ucount;
953                 if (ucount > ncpus) {
954                         estcpu += estcpu * (ucount - ncpus) / ncpus;
955                 }
956
957                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
958                         kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
959                                 lp->lwp_proc->p_pid, lp,
960                                 estcpu, lp->lwp_estcpu,
961                                 lp->lwp_cpticks, ttlticks);
962                 }
963
964                 /*
965                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
966                  * quickly lwp_estcpu collapses to its realtime calculation.
967                  * A slower collapse gives us a more accurate number over
968                  * the long term but can create problems with bursty threads
969                  * or threads which become cpu hogs.
970                  *
971                  * To solve this problem, newly started lwps and lwps which
972                  * are restarting after having been asleep for a while are
973                  * given a much, much faster decay in order to quickly
974                  * detect whether they become cpu-bound.
975                  *
976                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
977                  *       and not here, but we must still ensure that a
978                  *       cpu-bound nice -20 process does not completely
979                  *       override a cpu-bound nice +20 process.
980                  *
981                  * NOTE: We must use ESTCPULIM() here to deal with any
982                  *       overshoot.
983                  */
984                 decay_factor = usched_dfly_decay;
985                 if (decay_factor < 1)
986                         decay_factor = 1;
987                 if (decay_factor > 1024)
988                         decay_factor = 1024;
989
990                 if (lp->lwp_estfast < usched_dfly_decay) {
991                         ++lp->lwp_estfast;
992                         lp->lwp_estcpu = ESTCPULIM(
993                                 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
994                                 (lp->lwp_estfast + 1));
995                 } else {
996                         lp->lwp_estcpu = ESTCPULIM(
997                                 (lp->lwp_estcpu * decay_factor + estcpu) /
998                                 (decay_factor + 1));
999                 }
1000
1001                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1002                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
1003                 dfly_resetpriority(lp);
1004                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1005                 lp->lwp_cpticks = 0;
1006         }
1007 }
1008
1009 /*
1010  * Compute the priority of a process when running in user mode.
1011  * Arrange to reschedule if the resulting priority is better
1012  * than that of the current process.
1013  *
1014  * This routine may be called with any process.
1015  *
1016  * This routine is called by fork1() for initial setup with the process
1017  * of the run queue, and also may be called normally with the process on or
1018  * off the run queue.
1019  */
1020 static void
1021 dfly_resetpriority(struct lwp *lp)
1022 {
1023         dfly_pcpu_t rdd;
1024         int newpriority;
1025         u_short newrqtype;
1026         int rcpu;
1027         int checkpri;
1028         int estcpu;
1029         int delta_uload;
1030
1031         crit_enter();
1032
1033         /*
1034          * Lock the scheduler (lp) belongs to.  This can be on a different
1035          * cpu.  Handle races.  This loop breaks out with the appropriate
1036          * rdd locked.
1037          */
1038         for (;;) {
1039                 rcpu = lp->lwp_qcpu;
1040                 cpu_ccfence();
1041                 rdd = &dfly_pcpu[rcpu];
1042                 spin_lock(&rdd->spin);
1043                 if (rcpu == lp->lwp_qcpu)
1044                         break;
1045                 spin_unlock(&rdd->spin);
1046         }
1047
1048         /*
1049          * Calculate the new priority and queue type
1050          */
1051         newrqtype = lp->lwp_rtprio.type;
1052
1053         switch(newrqtype) {
1054         case RTP_PRIO_REALTIME:
1055         case RTP_PRIO_FIFO:
1056                 newpriority = PRIBASE_REALTIME +
1057                              (lp->lwp_rtprio.prio & PRIMASK);
1058                 break;
1059         case RTP_PRIO_NORMAL:
1060                 /*
1061                  *
1062                  */
1063                 estcpu = lp->lwp_estcpu;
1064
1065                 /*
1066                  * p_nice piece         Adds (0-40) * 2         0-80
1067                  * estcpu               Adds 16384  * 4 / 512   0-128
1068                  */
1069                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1070                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1071                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1072                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1073                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1074                 break;
1075         case RTP_PRIO_IDLE:
1076                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1077                 break;
1078         case RTP_PRIO_THREAD:
1079                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1080                 break;
1081         default:
1082                 panic("Bad RTP_PRIO %d", newrqtype);
1083                 /* NOT REACHED */
1084         }
1085
1086         /*
1087          * The LWKT scheduler doesn't dive usched structures, give it a hint
1088          * on the relative priority of user threads running in the kernel.
1089          * The LWKT scheduler will always ensure that a user thread running
1090          * in the kernel will get cpu some time, regardless of its upri,
1091          * but can decide not to instantly switch from one kernel or user
1092          * mode user thread to a kernel-mode user thread when it has a less
1093          * desireable user priority.
1094          *
1095          * td_upri has normal sense (higher values are more desireable), so
1096          * negate it.
1097          */
1098         lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1099
1100         /*
1101          * The newpriority incorporates the queue type so do a simple masked
1102          * check to determine if the process has moved to another queue.  If
1103          * it has, and it is currently on a run queue, then move it.
1104          *
1105          * Since uload is ~PPQMASK masked, no modifications are necessary if
1106          * we end up in the same run queue.
1107          */
1108         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1109                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1110                         dfly_remrunqueue_locked(rdd, lp);
1111                         lp->lwp_priority = newpriority;
1112                         lp->lwp_rqtype = newrqtype;
1113                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1114                         dfly_setrunqueue_locked(rdd, lp);
1115                         checkpri = 1;
1116                 } else {
1117                         lp->lwp_priority = newpriority;
1118                         lp->lwp_rqtype = newrqtype;
1119                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1120                         checkpri = 0;
1121                 }
1122         } else {
1123                 /*
1124                  * In the same PPQ, uload cannot change.
1125                  */
1126                 lp->lwp_priority = newpriority;
1127                 checkpri = 1;
1128                 rcpu = -1;
1129         }
1130
1131         /*
1132          * Adjust effective load.
1133          *
1134          * Calculate load then scale up or down geometrically based on p_nice.
1135          * Processes niced up (positive) are less important, and processes
1136          * niced downard (negative) are more important.  The higher the uload,
1137          * the more important the thread.
1138          */
1139         /* 0-511, 0-100% cpu */
1140         delta_uload = lp->lwp_estcpu / NQS;
1141         delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1142
1143
1144         delta_uload -= lp->lwp_uload;
1145         lp->lwp_uload += delta_uload;
1146         if (lp->lwp_mpflags & LWP_MP_ULOAD)
1147                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1148
1149         /*
1150          * Determine if we need to reschedule the target cpu.  This only
1151          * occurs if the LWP is already on a scheduler queue, which means
1152          * that idle cpu notification has already occured.  At most we
1153          * need only issue a need_user_resched() on the appropriate cpu.
1154          *
1155          * The LWP may be owned by a CPU different from the current one,
1156          * in which case dd->uschedcp may be modified without an MP lock
1157          * or a spinlock held.  The worst that happens is that the code
1158          * below causes a spurious need_user_resched() on the target CPU
1159          * and dd->pri to be wrong for a short period of time, both of
1160          * which are harmless.
1161          *
1162          * If checkpri is 0 we are adjusting the priority of the current
1163          * process, possibly higher (less desireable), so ignore the upri
1164          * check which will fail in that case.
1165          */
1166         if (rcpu >= 0) {
1167                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1168                     (checkpri == 0 ||
1169                      (rdd->upri & ~PRIMASK) >
1170                      (lp->lwp_priority & ~PRIMASK))) {
1171 #ifdef SMP
1172                         if (rcpu == mycpu->gd_cpuid) {
1173                                 spin_unlock(&rdd->spin);
1174                                 need_user_resched();
1175                         } else {
1176                                 spin_unlock(&rdd->spin);
1177                                 lwkt_send_ipiq(globaldata_find(rcpu),
1178                                                dfly_need_user_resched_remote,
1179                                                NULL);
1180                         }
1181 #else
1182                         spin_unlock(&rdd->spin);
1183                         need_user_resched();
1184 #endif
1185                 } else {
1186                         spin_unlock(&rdd->spin);
1187                 }
1188         } else {
1189                 spin_unlock(&rdd->spin);
1190         }
1191         crit_exit();
1192 }
1193
1194 static
1195 void
1196 dfly_yield(struct lwp *lp)
1197 {
1198 #if 0
1199         /* FUTURE (or something similar) */
1200         switch(lp->lwp_rqtype) {
1201         case RTP_PRIO_NORMAL:
1202                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1203                 break;
1204         default:
1205                 break;
1206         }
1207 #endif
1208         need_user_resched();
1209 }
1210
1211 /*
1212  * Called from fork1() when a new child process is being created.
1213  *
1214  * Give the child process an initial estcpu that is more batch then
1215  * its parent and dock the parent for the fork (but do not
1216  * reschedule the parent).
1217  *
1218  * fast
1219  *
1220  * XXX lwp should be "spawning" instead of "forking"
1221  */
1222 static void
1223 dfly_forking(struct lwp *plp, struct lwp *lp)
1224 {
1225         /*
1226          * Put the child 4 queue slots (out of 32) higher than the parent
1227          * (less desireable than the parent).
1228          */
1229         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1230         lp->lwp_forked = 1;
1231         lp->lwp_estfast = 0;
1232
1233         /*
1234          * Dock the parent a cost for the fork, protecting us from fork
1235          * bombs.  If the parent is forking quickly make the child more
1236          * batchy.
1237          */
1238         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1239 }
1240
1241 /*
1242  * Called when a lwp is being removed from this scheduler, typically
1243  * during lwp_exit().  We have to clean out any ULOAD accounting before
1244  * we can let the lp go.  The dd->spin lock is not needed for uload
1245  * updates.
1246  *
1247  * Scheduler dequeueing has already occurred, no further action in that
1248  * regard is needed.
1249  */
1250 static void
1251 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1252 {
1253         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1254
1255         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1256                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1257                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1258                 atomic_add_int(&dd->ucount, -1);
1259                 atomic_add_int(&dfly_ucount, -1);
1260         }
1261 }
1262
1263 /*
1264  * This function cannot block in any way, but spinlocks are ok.
1265  *
1266  * Update the uload based on the state of the thread (whether it is going
1267  * to sleep or running again).  The uload is meant to be a longer-term
1268  * load and not an instantanious load.
1269  */
1270 static void
1271 dfly_uload_update(struct lwp *lp)
1272 {
1273         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1274
1275         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1276                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1277                         spin_lock(&dd->spin);
1278                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1279                                 atomic_set_int(&lp->lwp_mpflags,
1280                                                LWP_MP_ULOAD);
1281                                 atomic_add_int(&dd->uload, lp->lwp_uload);
1282                                 atomic_add_int(&dd->ucount, 1);
1283                                 atomic_add_int(&dfly_ucount, 1);
1284                         }
1285                         spin_unlock(&dd->spin);
1286                 }
1287         } else if (lp->lwp_slptime > 0) {
1288                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1289                         spin_lock(&dd->spin);
1290                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1291                                 atomic_clear_int(&lp->lwp_mpflags,
1292                                                  LWP_MP_ULOAD);
1293                                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1294                                 atomic_add_int(&dd->ucount, -1);
1295                                 atomic_add_int(&dfly_ucount, -1);
1296                         }
1297                         spin_unlock(&dd->spin);
1298                 }
1299         }
1300 }
1301
1302 /*
1303  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1304  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1305  * has a better or equal priority then the process that would otherwise be
1306  * chosen, NULL is returned.
1307  *
1308  * Until we fix the RUNQ code the chklp test has to be strict or we may
1309  * bounce between processes trying to acquire the current process designation.
1310  *
1311  * Must be called with rdd->spin locked.  The spinlock is left intact through
1312  * the entire routine.  dd->spin does not have to be locked.
1313  *
1314  * If worst is non-zero this function finds the worst thread instead of the
1315  * best thread (used by the schedulerclock-based rover).
1316  */
1317 static
1318 struct lwp *
1319 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1320                        struct lwp *chklp, int worst)
1321 {
1322         struct lwp *lp;
1323         struct rq *q;
1324         u_int32_t *which, *which2;
1325         u_int32_t pri;
1326         u_int32_t rtqbits;
1327         u_int32_t tsqbits;
1328         u_int32_t idqbits;
1329
1330         rtqbits = rdd->rtqueuebits;
1331         tsqbits = rdd->queuebits;
1332         idqbits = rdd->idqueuebits;
1333
1334         if (worst) {
1335                 if (idqbits) {
1336                         pri = bsrl(idqbits);
1337                         q = &rdd->idqueues[pri];
1338                         which = &rdd->idqueuebits;
1339                         which2 = &idqbits;
1340                 } else if (tsqbits) {
1341                         pri = bsrl(tsqbits);
1342                         q = &rdd->queues[pri];
1343                         which = &rdd->queuebits;
1344                         which2 = &tsqbits;
1345                 } else if (rtqbits) {
1346                         pri = bsrl(rtqbits);
1347                         q = &rdd->rtqueues[pri];
1348                         which = &rdd->rtqueuebits;
1349                         which2 = &rtqbits;
1350                 } else {
1351                         return (NULL);
1352                 }
1353                 lp = TAILQ_LAST(q, rq);
1354         } else {
1355                 if (rtqbits) {
1356                         pri = bsfl(rtqbits);
1357                         q = &rdd->rtqueues[pri];
1358                         which = &rdd->rtqueuebits;
1359                         which2 = &rtqbits;
1360                 } else if (tsqbits) {
1361                         pri = bsfl(tsqbits);
1362                         q = &rdd->queues[pri];
1363                         which = &rdd->queuebits;
1364                         which2 = &tsqbits;
1365                 } else if (idqbits) {
1366                         pri = bsfl(idqbits);
1367                         q = &rdd->idqueues[pri];
1368                         which = &rdd->idqueuebits;
1369                         which2 = &idqbits;
1370                 } else {
1371                         return (NULL);
1372                 }
1373                 lp = TAILQ_FIRST(q);
1374         }
1375         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1376
1377         /*
1378          * If the passed lwp <chklp> is reasonably close to the selected
1379          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1380          *
1381          * Note that we must error on the side of <chklp> to avoid bouncing
1382          * between threads in the acquire code.
1383          */
1384         if (chklp) {
1385                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1386                         return(NULL);
1387         }
1388
1389         KTR_COND_LOG(usched_chooseproc,
1390             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1391             lp->lwp_proc->p_pid,
1392             lp->lwp_thread->td_gd->gd_cpuid,
1393             mycpu->gd_cpuid);
1394
1395         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1396         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1397         TAILQ_REMOVE(q, lp, lwp_procq);
1398         --rdd->runqcount;
1399         if (TAILQ_EMPTY(q))
1400                 *which &= ~(1 << pri);
1401
1402         /*
1403          * If we are choosing a process from rdd with the intent to
1404          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1405          * is still held.
1406          */
1407         if (rdd != dd) {
1408                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1409                         atomic_add_int(&rdd->uload, -lp->lwp_uload);
1410                         atomic_add_int(&rdd->ucount, -1);
1411                         atomic_add_int(&dfly_ucount, -1);
1412                 }
1413                 lp->lwp_qcpu = dd->cpuid;
1414                 atomic_add_int(&dd->uload, lp->lwp_uload);
1415                 atomic_add_int(&dd->ucount, 1);
1416                 atomic_add_int(&dfly_ucount, 1);
1417                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1418         }
1419         return lp;
1420 }
1421
1422 #ifdef SMP
1423
1424 /*
1425  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1426  *
1427  * Choose a cpu node to schedule lp on, hopefully nearby its current
1428  * node.
1429  *
1430  * We give the current node a modest advantage for obvious reasons.
1431  *
1432  * We also give the node the thread was woken up FROM a slight advantage
1433  * in order to try to schedule paired threads which synchronize/block waiting
1434  * for each other fairly close to each other.  Similarly in a network setting
1435  * this feature will also attempt to place a user process near the kernel
1436  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1437  * algorithm as it heuristically groups synchronizing processes for locality
1438  * of reference in multi-socket systems.
1439  *
1440  * We check against running processes and give a big advantage if there
1441  * are none running.
1442  *
1443  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1444  *
1445  * When the topology is known choose a cpu whos group has, in aggregate,
1446  * has the lowest weighted load.
1447  */
1448 static
1449 dfly_pcpu_t
1450 dfly_choose_best_queue(struct lwp *lp)
1451 {
1452         cpumask_t wakemask;
1453         cpumask_t mask;
1454         cpu_node_t *cpup;
1455         cpu_node_t *cpun;
1456         cpu_node_t *cpub;
1457         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1458         dfly_pcpu_t rdd;
1459         int wakecpu;
1460         int cpuid;
1461         int n;
1462         int count;
1463         int load;
1464         int lowest_load;
1465
1466         /*
1467          * When the topology is unknown choose a random cpu that is hopefully
1468          * idle.
1469          */
1470         if (dd->cpunode == NULL)
1471                 return (dfly_choose_queue_simple(dd, lp));
1472
1473         /*
1474          * Pairing mask
1475          */
1476         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1477                 wakemask = dfly_pcpu[wakecpu].cpumask;
1478         else
1479                 wakemask = 0;
1480
1481         /*
1482          * When the topology is known choose a cpu whos group has, in
1483          * aggregate, has the lowest weighted load.
1484          */
1485         cpup = root_cpu_node;
1486         rdd = dd;
1487
1488         while (cpup) {
1489                 /*
1490                  * Degenerate case super-root
1491                  */
1492                 if (cpup->child_node && cpup->child_no == 1) {
1493                         cpup = cpup->child_node;
1494                         continue;
1495                 }
1496
1497                 /*
1498                  * Terminal cpunode
1499                  */
1500                 if (cpup->child_node == NULL) {
1501                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1502                         break;
1503                 }
1504
1505                 cpub = NULL;
1506                 lowest_load = 0x7FFFFFFF;
1507
1508                 for (n = 0; n < cpup->child_no; ++n) {
1509                         /*
1510                          * Accumulate load information for all cpus
1511                          * which are members of this node.
1512                          */
1513                         cpun = &cpup->child_node[n];
1514                         mask = cpun->members & usched_global_cpumask &
1515                                smp_active_mask & lp->lwp_cpumask;
1516                         if (mask == 0)
1517                                 continue;
1518
1519                         count = 0;
1520                         load = 0;
1521
1522                         while (mask) {
1523                                 cpuid = BSFCPUMASK(mask);
1524                                 rdd = &dfly_pcpu[cpuid];
1525                                 load += rdd->uload;
1526                                 load += rdd->ucount * usched_dfly_weight3;
1527
1528                                 if (rdd->uschedcp == NULL &&
1529                                     rdd->runqcount == 0 &&
1530                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1531                                 ) {
1532                                         load -= usched_dfly_weight4;
1533                                 }
1534 #if 0
1535                                 else if (rdd->upri > lp->lwp_priority + PPQ) {
1536                                         load -= usched_dfly_weight4 / 2;
1537                                 }
1538 #endif
1539                                 mask &= ~CPUMASK(cpuid);
1540                                 ++count;
1541                         }
1542
1543                         /*
1544                          * Compensate if the lp is already accounted for in
1545                          * the aggregate uload for this mask set.  We want
1546                          * to calculate the loads as if lp were not present,
1547                          * otherwise the calculation is bogus.
1548                          */
1549                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1550                             (dd->cpumask & cpun->members)) {
1551                                 load -= lp->lwp_uload;
1552                                 load -= usched_dfly_weight3;
1553                         }
1554
1555                         load /= count;
1556
1557                         /*
1558                          * Advantage the cpu group (lp) is already on.
1559                          */
1560                         if (cpun->members & dd->cpumask)
1561                                 load -= usched_dfly_weight1;
1562
1563                         /*
1564                          * Advantage the cpu group we want to pair (lp) to,
1565                          * but don't let it go to the exact same cpu as
1566                          * the wakecpu target.
1567                          *
1568                          * We do this by checking whether cpun is a
1569                          * terminal node or not.  All cpun's at the same
1570                          * level will either all be terminal or all not
1571                          * terminal.
1572                          *
1573                          * If it is and we match we disadvantage the load.
1574                          * If it is and we don't match we advantage the load.
1575                          *
1576                          * Also note that we are effectively disadvantaging
1577                          * all-but-one by the same amount, so it won't effect
1578                          * the weight1 factor for the all-but-one nodes.
1579                          */
1580                         if (cpun->members & wakemask) {
1581                                 if (cpun->child_node != NULL) {
1582                                         /* advantage */
1583                                         load -= usched_dfly_weight2;
1584                                 } else {
1585                                         if (usched_dfly_features & 0x10)
1586                                                 load += usched_dfly_weight2;
1587                                         else
1588                                                 load -= usched_dfly_weight2;
1589                                 }
1590                         }
1591
1592                         /*
1593                          * Calculate the best load
1594                          */
1595                         if (cpub == NULL || lowest_load > load ||
1596                             (lowest_load == load &&
1597                              (cpun->members & dd->cpumask))
1598                         ) {
1599                                 lowest_load = load;
1600                                 cpub = cpun;
1601                         }
1602                 }
1603                 cpup = cpub;
1604         }
1605         if (usched_dfly_chooser)
1606                 kprintf("lp %02d->%02d %s\n",
1607                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1608         return (rdd);
1609 }
1610
1611 /*
1612  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1613  *
1614  * Choose the worst queue close to dd's cpu node with a non-empty runq
1615  * that is NOT dd.  Also require that the moving of the highest-load thread
1616  * from rdd to dd does not cause the uload's to cross each other.
1617  *
1618  * This is used by the thread chooser when the current cpu's queues are
1619  * empty to steal a thread from another cpu's queue.  We want to offload
1620  * the most heavily-loaded queue.
1621  */
1622 static
1623 dfly_pcpu_t
1624 dfly_choose_worst_queue(dfly_pcpu_t dd)
1625 {
1626         cpumask_t mask;
1627         cpu_node_t *cpup;
1628         cpu_node_t *cpun;
1629         cpu_node_t *cpub;
1630         dfly_pcpu_t rdd;
1631         int cpuid;
1632         int n;
1633         int count;
1634         int load;
1635 #if 0
1636         int pri;
1637         int hpri;
1638 #endif
1639         int highest_load;
1640
1641         /*
1642          * When the topology is unknown choose a random cpu that is hopefully
1643          * idle.
1644          */
1645         if (dd->cpunode == NULL) {
1646                 return (NULL);
1647         }
1648
1649         /*
1650          * When the topology is known choose a cpu whos group has, in
1651          * aggregate, has the lowest weighted load.
1652          */
1653         cpup = root_cpu_node;
1654         rdd = dd;
1655         while (cpup) {
1656                 /*
1657                  * Degenerate case super-root
1658                  */
1659                 if (cpup->child_node && cpup->child_no == 1) {
1660                         cpup = cpup->child_node;
1661                         continue;
1662                 }
1663
1664                 /*
1665                  * Terminal cpunode
1666                  */
1667                 if (cpup->child_node == NULL) {
1668                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1669                         break;
1670                 }
1671
1672                 cpub = NULL;
1673                 highest_load = 0;
1674
1675                 for (n = 0; n < cpup->child_no; ++n) {
1676                         /*
1677                          * Accumulate load information for all cpus
1678                          * which are members of this node.
1679                          */
1680                         cpun = &cpup->child_node[n];
1681                         mask = cpun->members & usched_global_cpumask &
1682                                smp_active_mask;
1683                         if (mask == 0)
1684                                 continue;
1685                         count = 0;
1686                         load = 0;
1687
1688                         while (mask) {
1689                                 cpuid = BSFCPUMASK(mask);
1690                                 rdd = &dfly_pcpu[cpuid];
1691                                 load += rdd->uload;
1692                                 load += rdd->ucount * usched_dfly_weight3;
1693                                 if (rdd->uschedcp == NULL &&
1694                                     rdd->runqcount == 0 &&
1695                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1696                                 ) {
1697                                         load -= usched_dfly_weight4;
1698                                 }
1699 #if 0
1700                                 else if (rdd->upri > dd->upri + PPQ) {
1701                                         load -= usched_dfly_weight4 / 2;
1702                                 }
1703 #endif
1704                                 mask &= ~CPUMASK(cpuid);
1705                                 ++count;
1706                         }
1707                         load /= count;
1708
1709                         /*
1710                          * Prefer candidates which are somewhat closer to
1711                          * our cpu.
1712                          */
1713                         if (dd->cpumask & cpun->members)
1714                                 load += usched_dfly_weight1;
1715
1716                         /*
1717                          * The best candidate is the one with the worst
1718                          * (highest) load.
1719                          */
1720                         if (cpub == NULL || highest_load < load) {
1721                                 highest_load = load;
1722                                 cpub = cpun;
1723                         }
1724                 }
1725                 cpup = cpub;
1726         }
1727
1728         /*
1729          * We never return our own node (dd), and only return a remote
1730          * node if it's load is significantly worse than ours (i.e. where
1731          * stealing a thread would be considered reasonable).
1732          *
1733          * This also helps us avoid breaking paired threads apart which
1734          * can have disastrous effects on performance.
1735          */
1736         if (rdd == dd)
1737                 return(NULL);
1738
1739 #if 0
1740         hpri = 0;
1741         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1742                 hpri = pri;
1743         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1744                 hpri = pri;
1745         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1746                 hpri = pri;
1747         hpri *= PPQ;
1748         if (rdd->uload - hpri < dd->uload + hpri)
1749                 return(NULL);
1750 #endif
1751         return (rdd);
1752 }
1753
1754 static
1755 dfly_pcpu_t
1756 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1757 {
1758         dfly_pcpu_t rdd;
1759         cpumask_t tmpmask;
1760         cpumask_t mask;
1761         int cpuid;
1762
1763         /*
1764          * Fallback to the original heuristic, select random cpu,
1765          * first checking cpus not currently running a user thread.
1766          */
1767         ++dfly_scancpu;
1768         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1769         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1770                smp_active_mask & usched_global_cpumask;
1771
1772         while (mask) {
1773                 tmpmask = ~(CPUMASK(cpuid) - 1);
1774                 if (mask & tmpmask)
1775                         cpuid = BSFCPUMASK(mask & tmpmask);
1776                 else
1777                         cpuid = BSFCPUMASK(mask);
1778                 rdd = &dfly_pcpu[cpuid];
1779
1780                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1781                         goto found;
1782                 mask &= ~CPUMASK(cpuid);
1783         }
1784
1785         /*
1786          * Then cpus which might have a currently running lp
1787          */
1788         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1789         mask = dfly_curprocmask & dfly_rdyprocmask &
1790                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1791
1792         while (mask) {
1793                 tmpmask = ~(CPUMASK(cpuid) - 1);
1794                 if (mask & tmpmask)
1795                         cpuid = BSFCPUMASK(mask & tmpmask);
1796                 else
1797                         cpuid = BSFCPUMASK(mask);
1798                 rdd = &dfly_pcpu[cpuid];
1799
1800                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1801                         goto found;
1802                 mask &= ~CPUMASK(cpuid);
1803         }
1804
1805         /*
1806          * If we cannot find a suitable cpu we reload from dfly_scancpu
1807          * and round-robin.  Other cpus will pickup as they release their
1808          * current lwps or become ready.
1809          *
1810          * Avoid a degenerate system lockup case if usched_global_cpumask
1811          * is set to 0 or otherwise does not cover lwp_cpumask.
1812          *
1813          * We only kick the target helper thread in this case, we do not
1814          * set the user resched flag because
1815          */
1816         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1817         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1818                 cpuid = 0;
1819         rdd = &dfly_pcpu[cpuid];
1820 found:
1821         return (rdd);
1822 }
1823
1824 static
1825 void
1826 dfly_need_user_resched_remote(void *dummy)
1827 {
1828         globaldata_t gd = mycpu;
1829         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1830
1831         /*
1832          * Flag reschedule needed
1833          */
1834         need_user_resched();
1835
1836         /*
1837          * If no user thread is currently running we need to kick the helper
1838          * on our cpu to recover.  Otherwise the cpu will never schedule
1839          * anything again.
1840          *
1841          * We cannot schedule the process ourselves because this is an
1842          * IPI callback and we cannot acquire spinlocks in an IPI callback.
1843          *
1844          * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1845          */
1846         if (dd->uschedcp == NULL && (dfly_rdyprocmask & gd->gd_cpumask)) {
1847                 atomic_clear_cpumask(&dfly_rdyprocmask, gd->gd_cpumask);
1848                 wakeup_mycpu(&dd->helper_thread);
1849         }
1850 }
1851
1852 #endif
1853
1854 /*
1855  * dfly_remrunqueue_locked() removes a given process from the run queue
1856  * that it is on, clearing the queue busy bit if it becomes empty.
1857  *
1858  * Note that user process scheduler is different from the LWKT schedule.
1859  * The user process scheduler only manages user processes but it uses LWKT
1860  * underneath, and a user process operating in the kernel will often be
1861  * 'released' from our management.
1862  *
1863  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1864  * to sleep or the lwp is moved to a different runq.
1865  */
1866 static void
1867 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1868 {
1869         struct rq *q;
1870         u_int32_t *which;
1871         u_int8_t pri;
1872
1873         KKASSERT(rdd->runqcount >= 0);
1874
1875         pri = lp->lwp_rqindex;
1876
1877         switch(lp->lwp_rqtype) {
1878         case RTP_PRIO_NORMAL:
1879                 q = &rdd->queues[pri];
1880                 which = &rdd->queuebits;
1881                 break;
1882         case RTP_PRIO_REALTIME:
1883         case RTP_PRIO_FIFO:
1884                 q = &rdd->rtqueues[pri];
1885                 which = &rdd->rtqueuebits;
1886                 break;
1887         case RTP_PRIO_IDLE:
1888                 q = &rdd->idqueues[pri];
1889                 which = &rdd->idqueuebits;
1890                 break;
1891         default:
1892                 panic("remrunqueue: invalid rtprio type");
1893                 /* NOT REACHED */
1894         }
1895         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1896         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1897         TAILQ_REMOVE(q, lp, lwp_procq);
1898         --rdd->runqcount;
1899         if (TAILQ_EMPTY(q)) {
1900                 KASSERT((*which & (1 << pri)) != 0,
1901                         ("remrunqueue: remove from empty queue"));
1902                 *which &= ~(1 << pri);
1903         }
1904 }
1905
1906 /*
1907  * dfly_setrunqueue_locked()
1908  *
1909  * Add a process whos rqtype and rqindex had previously been calculated
1910  * onto the appropriate run queue.   Determine if the addition requires
1911  * a reschedule on a cpu and return the cpuid or -1.
1912  *
1913  * NOTE:          Lower priorities are better priorities.
1914  *
1915  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1916  *                sum of the rough lwp_priority for all running and runnable
1917  *                processes.  Lower priority processes (higher lwp_priority
1918  *                values) actually DO count as more load, not less, because
1919  *                these are the programs which require the most care with
1920  *                regards to cpu selection.
1921  */
1922 static void
1923 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1924 {
1925         struct rq *q;
1926         u_int32_t *which;
1927         int pri;
1928
1929         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1930
1931         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1932                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1933                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1934                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1935                 atomic_add_int(&dfly_ucount, 1);
1936         }
1937
1938         pri = lp->lwp_rqindex;
1939
1940         switch(lp->lwp_rqtype) {
1941         case RTP_PRIO_NORMAL:
1942                 q = &rdd->queues[pri];
1943                 which = &rdd->queuebits;
1944                 break;
1945         case RTP_PRIO_REALTIME:
1946         case RTP_PRIO_FIFO:
1947                 q = &rdd->rtqueues[pri];
1948                 which = &rdd->rtqueuebits;
1949                 break;
1950         case RTP_PRIO_IDLE:
1951                 q = &rdd->idqueues[pri];
1952                 which = &rdd->idqueuebits;
1953                 break;
1954         default:
1955                 panic("remrunqueue: invalid rtprio type");
1956                 /* NOT REACHED */
1957         }
1958
1959         /*
1960          * Place us on the selected queue.  Determine if we should be
1961          * placed at the head of the queue or at the end.
1962          *
1963          * We are placed at the tail if our round-robin count has expired,
1964          * or is about to expire and the system thinks its a good place to
1965          * round-robin, or there is already a next thread on the queue
1966          * (it might be trying to pick up where it left off and we don't
1967          * want to interfere).
1968          */
1969         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1970         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1971         ++rdd->runqcount;
1972
1973         if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
1974             (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
1975              (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC)) ||
1976             !TAILQ_EMPTY(q)
1977         ) {
1978                 atomic_clear_int(&lp->lwp_thread->td_mpflags,
1979                                  TDF_MP_BATCH_DEMARC);
1980                 lp->lwp_rrcount = 0;
1981                 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1982         } else {
1983                 if (TAILQ_EMPTY(q))
1984                         lp->lwp_rrcount = 0;
1985                 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
1986         }
1987         *which |= 1 << pri;
1988 }
1989
1990 #ifdef SMP
1991
1992 /*
1993  * For SMP systems a user scheduler helper thread is created for each
1994  * cpu and is used to allow one cpu to wakeup another for the purposes of
1995  * scheduling userland threads from setrunqueue().
1996  *
1997  * UP systems do not need the helper since there is only one cpu.
1998  *
1999  * We can't use the idle thread for this because we might block.
2000  * Additionally, doing things this way allows us to HLT idle cpus
2001  * on MP systems.
2002  */
2003 static void
2004 dfly_helper_thread(void *dummy)
2005 {
2006     globaldata_t gd;
2007     dfly_pcpu_t dd;
2008     dfly_pcpu_t rdd;
2009     struct lwp *nlp;
2010     cpumask_t mask;
2011     int cpuid;
2012
2013     gd = mycpu;
2014     cpuid = gd->gd_cpuid;       /* doesn't change */
2015     mask = gd->gd_cpumask;      /* doesn't change */
2016     dd = &dfly_pcpu[cpuid];
2017
2018     /*
2019      * Since we only want to be woken up only when no user processes
2020      * are scheduled on a cpu, run at an ultra low priority.
2021      */
2022     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2023
2024     tsleep(&dd->helper_thread, 0, "schslp", 0);
2025
2026     for (;;) {
2027         /*
2028          * We use the LWKT deschedule-interlock trick to avoid racing
2029          * dfly_rdyprocmask.  This means we cannot block through to the
2030          * manual lwkt_switch() call we make below.
2031          */
2032         crit_enter_gd(gd);
2033         tsleep_interlock(&dd->helper_thread, 0);
2034
2035         spin_lock(&dd->spin);
2036
2037         atomic_set_cpumask(&dfly_rdyprocmask, mask);
2038         clear_user_resched();   /* This satisfied the reschedule request */
2039 #if 0
2040         dd->rrcount = 0;        /* Reset the round-robin counter */
2041 #endif
2042
2043         if (dd->runqcount || dd->uschedcp != NULL) {
2044                 /*
2045                  * Threads are available.  A thread may or may not be
2046                  * currently scheduled.  Get the best thread already queued
2047                  * to this cpu.
2048                  */
2049                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2050                 if (nlp) {
2051                         atomic_set_cpumask(&dfly_curprocmask, mask);
2052                         dd->upri = nlp->lwp_priority;
2053                         dd->uschedcp = nlp;
2054 #if 0
2055                         dd->rrcount = 0;        /* reset round robin */
2056 #endif
2057                         spin_unlock(&dd->spin);
2058                         lwkt_acquire(nlp->lwp_thread);
2059                         lwkt_schedule(nlp->lwp_thread);
2060                 } else {
2061                         /*
2062                          * This situation should not occur because we had
2063                          * at least one thread available.
2064                          */
2065                         spin_unlock(&dd->spin);
2066                 }
2067         } else if (usched_dfly_features & 0x01) {
2068                 /*
2069                  * This cpu is devoid of runnable threads, steal a thread
2070                  * from another cpu.  Since we're stealing, might as well
2071                  * load balance at the same time.
2072                  *
2073                  * We choose the highest-loaded thread from the worst queue.
2074                  *
2075                  * NOTE! This function only returns a non-NULL rdd when
2076                  *       another cpu's queue is obviously overloaded.  We
2077                  *       do not want to perform the type of rebalancing
2078                  *       the schedclock does here because it would result
2079                  *       in insane process pulling when 'steady' state is
2080                  *       partially unbalanced (e.g. 6 runnables and only
2081                  *       4 cores).
2082                  */
2083                 rdd = dfly_choose_worst_queue(dd);
2084                 if (rdd && spin_trylock(&rdd->spin)) {
2085                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2086                         spin_unlock(&rdd->spin);
2087                 } else {
2088                         nlp = NULL;
2089                 }
2090                 if (nlp) {
2091                         atomic_set_cpumask(&dfly_curprocmask, mask);
2092                         dd->upri = nlp->lwp_priority;
2093                         dd->uschedcp = nlp;
2094 #if 0
2095                         dd->rrcount = 0;        /* reset round robin */
2096 #endif
2097                         spin_unlock(&dd->spin);
2098                         lwkt_acquire(nlp->lwp_thread);
2099                         lwkt_schedule(nlp->lwp_thread);
2100                 } else {
2101                         /*
2102                          * Leave the thread on our run queue.  Another
2103                          * scheduler will try to pull it later.
2104                          */
2105                         spin_unlock(&dd->spin);
2106                 }
2107         } else {
2108                 /*
2109                  * devoid of runnable threads and not allowed to steal
2110                  * any.
2111                  */
2112                 spin_unlock(&dd->spin);
2113         }
2114
2115         /*
2116          * We're descheduled unless someone scheduled us.  Switch away.
2117          * Exiting the critical section will cause splz() to be called
2118          * for us if interrupts and such are pending.
2119          */
2120         crit_exit_gd(gd);
2121         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2122     }
2123 }
2124
2125 #if 0
2126 static int
2127 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2128 {
2129         int error, new_val;
2130
2131         new_val = usched_dfly_stick_to_level;
2132
2133         error = sysctl_handle_int(oidp, &new_val, 0, req);
2134         if (error != 0 || req->newptr == NULL)
2135                 return (error);
2136         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2137                 return (EINVAL);
2138         usched_dfly_stick_to_level = new_val;
2139         return (0);
2140 }
2141 #endif
2142
2143 /*
2144  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
2145  * been cleared by rqinit() and we should not mess with it further.
2146  */
2147 static void
2148 dfly_helper_thread_cpu_init(void)
2149 {
2150         int i;
2151         int j;
2152         int cpuid;
2153         int smt_not_supported = 0;
2154         int cache_coherent_not_supported = 0;
2155
2156         if (bootverbose)
2157                 kprintf("Start scheduler helpers on cpus:\n");
2158
2159         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2160         usched_dfly_sysctl_tree =
2161                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2162                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2163                                 "usched_dfly", CTLFLAG_RD, 0, "");
2164
2165         for (i = 0; i < ncpus; ++i) {
2166                 dfly_pcpu_t dd = &dfly_pcpu[i];
2167                 cpumask_t mask = CPUMASK(i);
2168
2169                 if ((mask & smp_active_mask) == 0)
2170                     continue;
2171
2172                 spin_init(&dd->spin);
2173                 dd->cpunode = get_cpu_node_by_cpuid(i);
2174                 dd->cpuid = i;
2175                 dd->cpumask = CPUMASK(i);
2176                 for (j = 0; j < NQS; j++) {
2177                         TAILQ_INIT(&dd->queues[j]);
2178                         TAILQ_INIT(&dd->rtqueues[j]);
2179                         TAILQ_INIT(&dd->idqueues[j]);
2180                 }
2181                 atomic_clear_cpumask(&dfly_curprocmask, 1);
2182
2183                 if (dd->cpunode == NULL) {
2184                         smt_not_supported = 1;
2185                         cache_coherent_not_supported = 1;
2186                         if (bootverbose)
2187                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2188                                          "found for cpu\n", i);
2189                 } else {
2190                         switch (dd->cpunode->type) {
2191                         case THREAD_LEVEL:
2192                                 if (bootverbose)
2193                                         kprintf ("\tcpu%d - HyperThreading "
2194                                                  "available. Core siblings: ",
2195                                                  i);
2196                                 break;
2197                         case CORE_LEVEL:
2198                                 smt_not_supported = 1;
2199
2200                                 if (bootverbose)
2201                                         kprintf ("\tcpu%d - No HT available, "
2202                                                  "multi-core/physical "
2203                                                  "cpu. Physical siblings: ",
2204                                                  i);
2205                                 break;
2206                         case CHIP_LEVEL:
2207                                 smt_not_supported = 1;
2208
2209                                 if (bootverbose)
2210                                         kprintf ("\tcpu%d - No HT available, "
2211                                                  "single-core/physical cpu. "
2212                                                  "Package Siblings: ",
2213                                                  i);
2214                                 break;
2215                         default:
2216                                 /* Let's go for safe defaults here */
2217                                 smt_not_supported = 1;
2218                                 cache_coherent_not_supported = 1;
2219                                 if (bootverbose)
2220                                         kprintf ("\tcpu%d - Unknown cpunode->"
2221                                                  "type=%u. Siblings: ",
2222                                                  i,
2223                                                  (u_int)dd->cpunode->type);
2224                                 break;
2225                         }
2226
2227                         if (bootverbose) {
2228                                 if (dd->cpunode->parent_node != NULL) {
2229                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2230                                                 kprintf("cpu%d ", cpuid);
2231                                         kprintf("\n");
2232                                 } else {
2233                                         kprintf(" no siblings\n");
2234                                 }
2235                         }
2236                 }
2237
2238                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2239                             0, i, "usched %d", i);
2240
2241                 /*
2242                  * Allow user scheduling on the target cpu.  cpu #0 has already
2243                  * been enabled in rqinit().
2244                  */
2245                 if (i)
2246                     atomic_clear_cpumask(&dfly_curprocmask, mask);
2247                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2248                 dd->upri = PRIBASE_NULL;
2249
2250         }
2251
2252         /* usched_dfly sysctl configurable parameters */
2253
2254         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2255                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2256                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2257                        &usched_dfly_rrinterval, 0, "");
2258         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2259                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2260                        OID_AUTO, "decay", CTLFLAG_RW,
2261                        &usched_dfly_decay, 0, "Extra decay when not running");
2262
2263         /* Add enable/disable option for SMT scheduling if supported */
2264         if (smt_not_supported) {
2265                 usched_dfly_smt = 0;
2266                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2267                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2268                                   OID_AUTO, "smt", CTLFLAG_RD,
2269                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2270         } else {
2271                 usched_dfly_smt = 1;
2272                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2273                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2274                                OID_AUTO, "smt", CTLFLAG_RW,
2275                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2276         }
2277
2278         /*
2279          * Add enable/disable option for cache coherent scheduling
2280          * if supported
2281          */
2282         if (cache_coherent_not_supported) {
2283                 usched_dfly_cache_coherent = 0;
2284                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2285                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2286                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2287                                   "NOT SUPPORTED", 0,
2288                                   "Cache coherence NOT SUPPORTED");
2289         } else {
2290                 usched_dfly_cache_coherent = 1;
2291                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2292                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2293                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2294                                &usched_dfly_cache_coherent, 0,
2295                                "Enable/Disable cache coherent scheduling");
2296
2297                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2298                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2299                                OID_AUTO, "weight1", CTLFLAG_RW,
2300                                &usched_dfly_weight1, 200,
2301                                "Weight selection for current cpu");
2302
2303                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2304                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2305                                OID_AUTO, "weight2", CTLFLAG_RW,
2306                                &usched_dfly_weight2, 180,
2307                                "Weight selection for wakefrom cpu");
2308
2309                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2310                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2311                                OID_AUTO, "weight3", CTLFLAG_RW,
2312                                &usched_dfly_weight3, 40,
2313                                "Weight selection for num threads on queue");
2314
2315                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2316                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2317                                OID_AUTO, "weight4", CTLFLAG_RW,
2318                                &usched_dfly_weight4, 160,
2319                                "Availability of other idle cpus");
2320
2321                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2322                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2323                                OID_AUTO, "fast_resched", CTLFLAG_RW,
2324                                &usched_dfly_fast_resched, 0,
2325                                "Availability of other idle cpus");
2326
2327                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2328                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2329                                OID_AUTO, "features", CTLFLAG_RW,
2330                                &usched_dfly_features, 0x8F,
2331                                "Allow pulls into empty queues");
2332
2333                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2334                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2335                                OID_AUTO, "swmask", CTLFLAG_RW,
2336                                &usched_dfly_swmask, ~PPQMASK,
2337                                "Queue mask to force thread switch");
2338
2339
2340 #if 0
2341                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2342                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2343                                 OID_AUTO, "stick_to_level",
2344                                 CTLTYPE_INT | CTLFLAG_RW,
2345                                 NULL, sizeof usched_dfly_stick_to_level,
2346                                 sysctl_usched_dfly_stick_to_level, "I",
2347                                 "Stick a process to this level. See sysctl"
2348                                 "paremter hw.cpu_topology.level_description");
2349 #endif
2350         }
2351 }
2352 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2353         dfly_helper_thread_cpu_init, NULL)
2354
2355 #else /* No SMP options - just add the configurable parameters to sysctl */
2356
2357 static void
2358 sched_sysctl_tree_init(void)
2359 {
2360         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2361         usched_dfly_sysctl_tree =
2362                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2363                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2364                                 "usched_dfly", CTLFLAG_RD, 0, "");
2365
2366         /* usched_dfly sysctl configurable parameters */
2367         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2368                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2369                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2370                        &usched_dfly_rrinterval, 0, "");
2371         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2372                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2373                        OID_AUTO, "decay", CTLFLAG_RW,
2374                        &usched_dfly_decay, 0, "Extra decay when not running");
2375 }
2376 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2377         sched_sysctl_tree_init, NULL)
2378 #endif