kernel - usched_dfly revamp (7), bring back td_release, sysv_sem, weights
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_estfast     lwp_usdata.dfly.estfast
97 #define lwp_uload       lwp_usdata.dfly.uload
98 #define lwp_rqtype      lwp_usdata.dfly.rqtype
99 #define lwp_qcpu        lwp_usdata.dfly.qcpu
100 #define lwp_rrcount     lwp_usdata.dfly.rrcount
101
102 struct usched_dfly_pcpu {
103         struct spinlock spin;
104         struct thread   helper_thread;
105         short           unusde01;
106         short           upri;
107         int             uload;
108         int             ucount;
109         struct lwp      *uschedcp;
110         struct rq       queues[NQS];
111         struct rq       rtqueues[NQS];
112         struct rq       idqueues[NQS];
113         u_int32_t       queuebits;
114         u_int32_t       rtqueuebits;
115         u_int32_t       idqueuebits;
116         int             runqcount;
117         int             cpuid;
118         cpumask_t       cpumask;
119 #ifdef SMP
120         cpu_node_t      *cpunode;
121 #endif
122 };
123
124 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
125
126 static void dfly_acquire_curproc(struct lwp *lp);
127 static void dfly_release_curproc(struct lwp *lp);
128 static void dfly_select_curproc(globaldata_t gd);
129 static void dfly_setrunqueue(struct lwp *lp);
130 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
131 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
132                                 sysclock_t cpstamp);
133 static void dfly_recalculate_estcpu(struct lwp *lp);
134 static void dfly_resetpriority(struct lwp *lp);
135 static void dfly_forking(struct lwp *plp, struct lwp *lp);
136 static void dfly_exiting(struct lwp *lp, struct proc *);
137 static void dfly_uload_update(struct lwp *lp);
138 static void dfly_yield(struct lwp *lp);
139 #ifdef SMP
140 static void dfly_changeqcpu_locked(struct lwp *lp,
141                                 dfly_pcpu_t dd, dfly_pcpu_t rdd);
142 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
143 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
144 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
145 #endif
146
147 #ifdef SMP
148 static void dfly_need_user_resched_remote(void *dummy);
149 #endif
150 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
151                                           struct lwp *chklp, int worst);
152 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
153 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
154
155 struct usched usched_dfly = {
156         { NULL },
157         "dfly", "Original DragonFly Scheduler",
158         NULL,                   /* default registration */
159         NULL,                   /* default deregistration */
160         dfly_acquire_curproc,
161         dfly_release_curproc,
162         dfly_setrunqueue,
163         dfly_schedulerclock,
164         dfly_recalculate_estcpu,
165         dfly_resetpriority,
166         dfly_forking,
167         dfly_exiting,
168         dfly_uload_update,
169         NULL,                   /* setcpumask not supported */
170         dfly_yield
171 };
172
173 /*
174  * We have NQS (32) run queues per scheduling class.  For the normal
175  * class, there are 128 priorities scaled onto these 32 queues.  New
176  * processes are added to the last entry in each queue, and processes
177  * are selected for running by taking them from the head and maintaining
178  * a simple FIFO arrangement.  Realtime and Idle priority processes have
179  * and explicit 0-31 priority which maps directly onto their class queue
180  * index.  When a queue has something in it, the corresponding bit is
181  * set in the queuebits variable, allowing a single read to determine
182  * the state of all 32 queues and then a ffs() to find the first busy
183  * queue.
184  */
185 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
186 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
187 #ifdef SMP
188 static volatile int dfly_scancpu;
189 #endif
190 static volatile int dfly_ucount;        /* total running on whole system */
191 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
192 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
193 static struct sysctl_oid *usched_dfly_sysctl_tree;
194
195 /* Debug info exposed through debug.* sysctl */
196
197 static int usched_dfly_debug = -1;
198 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
199            &usched_dfly_debug, 0,
200            "Print debug information for this pid");
201
202 static int usched_dfly_pid_debug = -1;
203 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
204            &usched_dfly_pid_debug, 0,
205            "Print KTR debug information for this pid");
206
207 static int usched_dfly_chooser = 0;
208 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
209            &usched_dfly_chooser, 0,
210            "Print KTR debug information for this pid");
211
212 /*
213  * Tunning usched_dfly - configurable through kern.usched_dfly.
214  *
215  * weight1 - Tries to keep threads on their current cpu.  If you
216  *           make this value too large the scheduler will not be
217  *           able to load-balance large loads.
218  *
219  * weight2 - If non-zero, detects thread pairs undergoing synchronous
220  *           communications and tries to move them closer together.
221  *           Behavior is adjusted by bit 4 of features (0x10).
222  *
223  *           WARNING!  Weight2 is a ridiculously sensitive parameter,
224  *           a small value is recommended.
225  *
226  * weight3 - Weighting based on the number of recently runnable threads
227  *           on the userland scheduling queue (ignoring their loads).
228  *           A nominal value here prevents high-priority (low-load)
229  *           threads from accumulating on one cpu core when other
230  *           cores are available.
231  *
232  *           This value should be left fairly small relative to weight1
233  *           and weight4.
234  *
235  * weight4 - Weighting based on other cpu queues being available
236  *           or running processes with higher lwp_priority's.
237  *
238  *           This allows a thread to migrate to another nearby cpu if it
239  *           is unable to run on the current cpu based on the other cpu
240  *           being idle or running a lower priority (higher lwp_priority)
241  *           thread.  This value should be large enough to override weight1
242  *
243  * features - These flags can be set or cleared to enable or disable various
244  *            features.
245  *
246  *            0x01      Enable idle-cpu pulling                 (default)
247  *            0x02      Enable proactive pushing                (default)
248  *            0x04      Enable rebalancing rover                (default)
249  *            0x08      Enable more proactive pushing           (default)
250  *            0x10      (flip weight2 limit on same cpu)        (default)
251  *            0x20      choose best cpu for forked process
252  *            0x40      choose current cpu for forked process
253  *            0x80      choose random cpu for forked process    (default)
254  */
255 #ifdef SMP
256 static int usched_dfly_smt = 0;
257 static int usched_dfly_cache_coherent = 0;
258 static int usched_dfly_weight1 = 200;   /* keep thread on current cpu */
259 static int usched_dfly_weight2 = 180;   /* synchronous peer's current cpu */
260 static int usched_dfly_weight3 = 40;    /* number of threads on queue */
261 static int usched_dfly_weight4 = 160;   /* availability of idle cores */
262 static int usched_dfly_features = 0x8F; /* allow pulls */
263 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
264 #endif
265 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
266 static int usched_dfly_decay = 8;
267
268 /* KTR debug printings */
269
270 KTR_INFO_MASTER(usched);
271
272 #if !defined(KTR_USCHED_DFLY)
273 #define KTR_USCHED_DFLY KTR_ALL
274 #endif
275
276 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
277     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
278     pid_t pid, int old_cpuid, int curr);
279
280 /*
281  * This function is called when the kernel intends to return to userland.
282  * It is responsible for making the thread the current designated userland
283  * thread for this cpu, blocking if necessary.
284  *
285  * The kernel will not depress our LWKT priority until after we return,
286  * in case we have to shove over to another cpu.
287  *
288  * We must determine our thread's disposition before we switch away.  This
289  * is very sensitive code.
290  *
291  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
292  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
293  * occur, this function is called only under very controlled circumstances.
294  */
295 static void
296 dfly_acquire_curproc(struct lwp *lp)
297 {
298         globaldata_t gd;
299         dfly_pcpu_t dd;
300 #ifdef SMP
301         dfly_pcpu_t rdd;
302 #endif
303         thread_t td;
304         int force_resched;
305
306         /*
307          * Make sure we aren't sitting on a tsleep queue.
308          */
309         td = lp->lwp_thread;
310         crit_enter_quick(td);
311         if (td->td_flags & TDF_TSLEEPQ)
312                 tsleep_remove(td);
313         dfly_recalculate_estcpu(lp);
314
315         gd = mycpu;
316         dd = &dfly_pcpu[gd->gd_cpuid];
317
318         /*
319          * Process any pending interrupts/ipi's, then handle reschedule
320          * requests.  dfly_release_curproc() will try to assign a new
321          * uschedcp that isn't us and otherwise NULL it out.
322          */
323         force_resched = 0;
324         if (user_resched_wanted()) {
325                 if (dd->uschedcp == lp)
326                         force_resched = 1;
327                 clear_user_resched();
328                 dfly_release_curproc(lp);
329         }
330
331         /*
332          * Loop until we are the current user thread.
333          *
334          * NOTE: dd spinlock not held at top of loop.
335          */
336         if (dd->uschedcp == lp)
337                 lwkt_yield_quick();
338
339         while (dd->uschedcp != lp) {
340                 lwkt_yield_quick();
341
342                 spin_lock(&dd->spin);
343
344                 /*
345                  * We are not or are no longer the current lwp and a forced
346                  * reschedule was requested.  Figure out the best cpu to
347                  * run on (our current cpu will be given significant weight).
348                  *
349                  * (if a reschedule was not requested we want to move this
350                  *  step after the uschedcp tests).
351                  */
352 #ifdef SMP
353                 if (force_resched &&
354                     (usched_dfly_features & 0x08) &&
355                     (rdd = dfly_choose_best_queue(lp)) != dd) {
356                         dfly_changeqcpu_locked(lp, dd, rdd);
357                         spin_unlock(&dd->spin);
358                         lwkt_deschedule(lp->lwp_thread);
359                         dfly_setrunqueue_dd(rdd, lp);
360                         lwkt_switch();
361                         gd = mycpu;
362                         dd = &dfly_pcpu[gd->gd_cpuid];
363                         continue;
364                 }
365 #endif
366
367                 /*
368                  * Either no reschedule was requested or the best queue was
369                  * dd, and no current process has been selected.  We can
370                  * trivially become the current lwp on the current cpu.
371                  */
372                 if (dd->uschedcp == NULL) {
373                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
374                         dd->uschedcp = lp;
375                         dd->upri = lp->lwp_priority;
376                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
377                         spin_unlock(&dd->spin);
378                         break;
379                 }
380
381                 /*
382                  * Can we steal the current designated user thread?
383                  *
384                  * If we do the other thread will stall when it tries to
385                  * return to userland, possibly rescheduling elsewhere.
386                  *
387                  * It is important to do a masked test to avoid the edge
388                  * case where two near-equal-priority threads are constantly
389                  * interrupting each other.
390                  *
391                  * In the exact match case another thread has already gained
392                  * uschedcp and lowered its priority, if we steal it the
393                  * other thread will stay stuck on the LWKT runq and not
394                  * push to another cpu.  So don't steal on equal-priority even
395                  * though it might appear to be more beneficial due to not
396                  * having to switch back to the other thread's context.
397                  */
398                 if (dd->uschedcp &&
399                    (dd->upri & ~PPQMASK) >
400                    (lp->lwp_priority & ~PPQMASK)) {
401                         dd->uschedcp = lp;
402                         dd->upri = lp->lwp_priority;
403                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
404                         spin_unlock(&dd->spin);
405                         break;
406                 }
407
408 #ifdef SMP
409                 /*
410                  * We are not the current lwp, figure out the best cpu
411                  * to run on (our current cpu will be given significant
412                  * weight).  Loop on cpu change.
413                  */
414                 if ((usched_dfly_features & 0x02) &&
415                     force_resched == 0 &&
416                     (rdd = dfly_choose_best_queue(lp)) != dd) {
417                         dfly_changeqcpu_locked(lp, dd, rdd);
418                         spin_unlock(&dd->spin);
419                         lwkt_deschedule(lp->lwp_thread);
420                         dfly_setrunqueue_dd(rdd, lp);
421                         lwkt_switch();
422                         gd = mycpu;
423                         dd = &dfly_pcpu[gd->gd_cpuid];
424                         continue;
425                 }
426 #endif
427
428                 /*
429                  * We cannot become the current lwp, place the lp on the
430                  * run-queue of this or another cpu and deschedule ourselves.
431                  *
432                  * When we are reactivated we will have another chance.
433                  *
434                  * Reload after a switch or setrunqueue/switch possibly
435                  * moved us to another cpu.
436                  */
437                 spin_unlock(&dd->spin);
438                 lwkt_deschedule(lp->lwp_thread);
439                 dfly_setrunqueue_dd(dd, lp);
440                 lwkt_switch();
441                 gd = mycpu;
442                 dd = &dfly_pcpu[gd->gd_cpuid];
443         }
444
445         /*
446          * Make sure upri is synchronized, then yield to LWKT threads as
447          * needed before returning.  This could result in another reschedule.
448          * XXX
449          */
450         crit_exit_quick(td);
451
452         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
453 }
454
455 /*
456  * DFLY_RELEASE_CURPROC
457  *
458  * This routine detaches the current thread from the userland scheduler,
459  * usually because the thread needs to run or block in the kernel (at
460  * kernel priority) for a while.
461  *
462  * This routine is also responsible for selecting a new thread to
463  * make the current thread.
464  *
465  * NOTE: This implementation differs from the dummy example in that
466  * dfly_select_curproc() is able to select the current process, whereas
467  * dummy_select_curproc() is not able to select the current process.
468  * This means we have to NULL out uschedcp.
469  *
470  * Additionally, note that we may already be on a run queue if releasing
471  * via the lwkt_switch() in dfly_setrunqueue().
472  */
473 static void
474 dfly_release_curproc(struct lwp *lp)
475 {
476         globaldata_t gd = mycpu;
477         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
478
479         /*
480          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
481          * by wakeup().
482          */
483         if (dd->uschedcp == lp) {
484                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
485                 spin_lock(&dd->spin);
486                 if (dd->uschedcp == lp) {
487                         dd->uschedcp = NULL;    /* don't let lp be selected */
488                         dd->upri = PRIBASE_NULL;
489                         atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
490                         spin_unlock(&dd->spin);
491                         dfly_select_curproc(gd);
492                 } else {
493                         spin_unlock(&dd->spin);
494                 }
495         }
496 }
497
498 /*
499  * DFLY_SELECT_CURPROC
500  *
501  * Select a new current process for this cpu and clear any pending user
502  * reschedule request.  The cpu currently has no current process.
503  *
504  * This routine is also responsible for equal-priority round-robining,
505  * typically triggered from dfly_schedulerclock().  In our dummy example
506  * all the 'user' threads are LWKT scheduled all at once and we just
507  * call lwkt_switch().
508  *
509  * The calling process is not on the queue and cannot be selected.
510  */
511 static
512 void
513 dfly_select_curproc(globaldata_t gd)
514 {
515         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
516         struct lwp *nlp;
517         int cpuid = gd->gd_cpuid;
518
519         crit_enter_gd(gd);
520
521         spin_lock(&dd->spin);
522         nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
523
524         if (nlp) {
525                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
526                 dd->upri = nlp->lwp_priority;
527                 dd->uschedcp = nlp;
528 #if 0
529                 dd->rrcount = 0;                /* reset round robin */
530 #endif
531                 spin_unlock(&dd->spin);
532 #ifdef SMP
533                 lwkt_acquire(nlp->lwp_thread);
534 #endif
535                 lwkt_schedule(nlp->lwp_thread);
536         } else {
537                 spin_unlock(&dd->spin);
538         }
539         crit_exit_gd(gd);
540 }
541
542 /*
543  * Place the specified lwp on the user scheduler's run queue.  This routine
544  * must be called with the thread descheduled.  The lwp must be runnable.
545  * It must not be possible for anyone else to explicitly schedule this thread.
546  *
547  * The thread may be the current thread as a special case.
548  */
549 static void
550 dfly_setrunqueue(struct lwp *lp)
551 {
552         dfly_pcpu_t dd;
553         dfly_pcpu_t rdd;
554
555         /*
556          * First validate the process LWKT state.
557          */
558         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
559         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
560             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
561              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
562         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
563
564         /*
565          * NOTE: dd/rdd do not necessarily represent the current cpu.
566          *       Instead they may represent the cpu the thread was last
567          *       scheduled on or inherited by its parent.
568          */
569         dd = &dfly_pcpu[lp->lwp_qcpu];
570         rdd = dd;
571
572         /*
573          * This process is not supposed to be scheduled anywhere or assigned
574          * as the current process anywhere.  Assert the condition.
575          */
576         KKASSERT(rdd->uschedcp != lp);
577
578 #ifndef SMP
579         /*
580          * If we are not SMP we do not have a scheduler helper to kick
581          * and must directly activate the process if none are scheduled.
582          *
583          * This is really only an issue when bootstrapping init since
584          * the caller in all other cases will be a user process, and
585          * even if released (rdd->uschedcp == NULL), that process will
586          * kickstart the scheduler when it returns to user mode from
587          * the kernel.
588          *
589          * NOTE: On SMP we can't just set some other cpu's uschedcp.
590          */
591         if (rdd->uschedcp == NULL) {
592                 spin_lock(&rdd->spin);
593                 if (rdd->uschedcp == NULL) {
594                         atomic_set_cpumask(&dfly_curprocmask, 1);
595                         rdd->uschedcp = lp;
596                         rdd->upri = lp->lwp_priority;
597                         spin_unlock(&rdd->spin);
598                         lwkt_schedule(lp->lwp_thread);
599                         return;
600                 }
601                 spin_unlock(&rdd->spin);
602         }
603 #endif
604
605 #ifdef SMP
606         /*
607          * Ok, we have to setrunqueue some target cpu and request a reschedule
608          * if necessary.
609          *
610          * We have to choose the best target cpu.  It might not be the current
611          * target even if the current cpu has no running user thread (for
612          * example, because the current cpu might be a hyperthread and its
613          * sibling has a thread assigned).
614          *
615          * If we just forked it is most optimal to run the child on the same
616          * cpu just in case the parent decides to wait for it (thus getting
617          * off that cpu).  As long as there is nothing else runnable on the
618          * cpu, that is.  If we did this unconditionally a parent forking
619          * multiple children before waiting (e.g. make -j N) leaves other
620          * cpus idle that could be working.
621          */
622         if (lp->lwp_forked) {
623                 lp->lwp_forked = 0;
624                 if (usched_dfly_features & 0x20)
625                         rdd = dfly_choose_best_queue(lp);
626                 else if (usched_dfly_features & 0x40)
627                         rdd = &dfly_pcpu[lp->lwp_qcpu];
628                 else if (usched_dfly_features & 0x80)
629                         rdd = dfly_choose_queue_simple(rdd, lp);
630                 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
631                         rdd = dfly_choose_best_queue(lp);
632                 else
633                         rdd = &dfly_pcpu[lp->lwp_qcpu];
634         } else {
635                 rdd = dfly_choose_best_queue(lp);
636                 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
637         }
638         if (lp->lwp_qcpu != rdd->cpuid) {
639                 spin_lock(&dd->spin);
640                 dfly_changeqcpu_locked(lp, dd, rdd);
641                 spin_unlock(&dd->spin);
642         }
643 #endif
644         dfly_setrunqueue_dd(rdd, lp);
645 }
646
647 #ifdef SMP
648
649 /*
650  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
651  * spin-locked on-call.  rdd does not have to be.
652  */
653 static void
654 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
655 {
656         if (lp->lwp_qcpu != rdd->cpuid) {
657                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
658                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
659                         atomic_add_int(&dd->uload, -lp->lwp_uload);
660                         atomic_add_int(&dd->ucount, -1);
661                         atomic_add_int(&dfly_ucount, -1);
662                 }
663                 lp->lwp_qcpu = rdd->cpuid;
664         }
665 }
666
667 #endif
668
669 /*
670  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
671  * also performs all necessary ancillary notification actions.
672  */
673 static void
674 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
675 {
676 #ifdef SMP
677         globaldata_t rgd;
678
679         /*
680          * We might be moving the lp to another cpu's run queue, and once
681          * on the runqueue (even if it is our cpu's), another cpu can rip
682          * it away from us.
683          *
684          * TDF_MIGRATING might already be set if this is part of a
685          * remrunqueue+setrunqueue sequence.
686          */
687         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
688                 lwkt_giveaway(lp->lwp_thread);
689
690         rgd = globaldata_find(rdd->cpuid);
691
692         /*
693          * We lose control of the lp the moment we release the spinlock
694          * after having placed it on the queue.  i.e. another cpu could pick
695          * it up, or it could exit, or its priority could be further
696          * adjusted, or something like that.
697          *
698          * WARNING! rdd can point to a foreign cpu!
699          */
700         spin_lock(&rdd->spin);
701         dfly_setrunqueue_locked(rdd, lp);
702
703         if (rgd == mycpu) {
704                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
705                         spin_unlock(&rdd->spin);
706                         if (rdd->uschedcp == NULL) {
707                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
708                                 need_user_resched();
709                         } else {
710                                 need_user_resched();
711                         }
712                 } else {
713                         spin_unlock(&rdd->spin);
714                 }
715         } else {
716                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
717                         spin_unlock(&rdd->spin);
718                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
719                                        NULL);
720                 } else if (dfly_rdyprocmask & rgd->gd_cpumask) {
721                         atomic_clear_cpumask(&dfly_rdyprocmask,
722                                              rgd->gd_cpumask);
723                         spin_unlock(&rdd->spin);
724                         wakeup(&rdd->helper_thread);
725                 } else {
726                         spin_unlock(&rdd->spin);
727                 }
728         }
729 #else
730         /*
731          * Request a reschedule if appropriate.
732          */
733         spin_lock(&rdd->spin);
734         dfly_setrunqueue_locked(rdd, lp);
735         spin_unlock(&rdd->spin);
736         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
737                 need_user_resched();
738         }
739 #endif
740 }
741
742 /*
743  * This routine is called from a systimer IPI.  It MUST be MP-safe and
744  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
745  * each cpu.
746  */
747 static
748 void
749 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
750 {
751         globaldata_t gd = mycpu;
752         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
753
754         /*
755          * Spinlocks also hold a critical section so there should not be
756          * any active.
757          */
758         KKASSERT(gd->gd_spinlocks == 0);
759
760         if (lp == NULL)
761                 return;
762
763         /*
764          * Do we need to round-robin?  We round-robin 10 times a second.
765          * This should only occur for cpu-bound batch processes.
766          */
767         if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
768                 lp->lwp_thread->td_wakefromcpu = -1;
769                 need_user_resched();
770         }
771
772         /*
773          * Adjust estcpu upward using a real time equivalent calculation,
774          * and recalculate lp's priority.
775          */
776         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
777         dfly_resetpriority(lp);
778
779         /*
780          * Rebalance two cpus every 8 ticks, pulling the worst thread
781          * from the worst cpu's queue into a rotating cpu number.
782          *
783          * This mechanic is needed because the push algorithms can
784          * steady-state in an non-optimal configuration.  We need to mix it
785          * up a little, even if it means breaking up a paired thread, so
786          * the push algorithms can rebalance the degenerate conditions.
787          * This portion of the algorithm exists to ensure stability at the
788          * selected weightings.
789          *
790          * Because we might be breaking up optimal conditions we do not want
791          * to execute this too quickly, hence we only rebalance approximately
792          * ~7-8 times per second.  The push's, on the otherhand, are capable
793          * moving threads to other cpus at a much higher rate.
794          *
795          * We choose the most heavily loaded thread from the worst queue
796          * in order to ensure that multiple heavy-weight threads on the same
797          * queue get broken up, and also because these threads are the most
798          * likely to be able to remain in place.  Hopefully then any pairings,
799          * if applicable, migrate to where these threads are.
800          */
801 #ifdef SMP
802         if ((usched_dfly_features & 0x04) &&
803             ((u_int)sched_ticks & 7) == 0 &&
804             (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
805                 /*
806                  * Our cpu is up.
807                  */
808                 struct lwp *nlp;
809                 dfly_pcpu_t rdd;
810
811                 rdd = dfly_choose_worst_queue(dd);
812                 if (rdd) {
813                         spin_lock(&dd->spin);
814                         if (spin_trylock(&rdd->spin)) {
815                                 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
816                                 spin_unlock(&rdd->spin);
817                                 if (nlp == NULL)
818                                         spin_unlock(&dd->spin);
819                         } else {
820                                 spin_unlock(&dd->spin);
821                                 nlp = NULL;
822                         }
823                 } else {
824                         nlp = NULL;
825                 }
826                 /* dd->spin held if nlp != NULL */
827
828                 /*
829                  * Either schedule it or add it to our queue.
830                  */
831                 if (nlp &&
832                     (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
833                         atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
834                         dd->upri = nlp->lwp_priority;
835                         dd->uschedcp = nlp;
836 #if 0
837                         dd->rrcount = 0;        /* reset round robin */
838 #endif
839                         spin_unlock(&dd->spin);
840                         lwkt_acquire(nlp->lwp_thread);
841                         lwkt_schedule(nlp->lwp_thread);
842                 } else if (nlp) {
843                         dfly_setrunqueue_locked(dd, nlp);
844                         spin_unlock(&dd->spin);
845                 }
846         }
847 #endif
848 }
849
850 /*
851  * Called from acquire and from kern_synch's one-second timer (one of the
852  * callout helper threads) with a critical section held.
853  *
854  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
855  * overall system load.
856  *
857  * Note that no recalculation occurs for a process which sleeps and wakes
858  * up in the same tick.  That is, a system doing thousands of context
859  * switches per second will still only do serious estcpu calculations
860  * ESTCPUFREQ times per second.
861  */
862 static
863 void
864 dfly_recalculate_estcpu(struct lwp *lp)
865 {
866         globaldata_t gd = mycpu;
867         sysclock_t cpbase;
868         sysclock_t ttlticks;
869         int estcpu;
870         int decay_factor;
871         int ucount;
872
873         /*
874          * We have to subtract periodic to get the last schedclock
875          * timeout time, otherwise we would get the upcoming timeout.
876          * Keep in mind that a process can migrate between cpus and
877          * while the scheduler clock should be very close, boundary
878          * conditions could lead to a small negative delta.
879          */
880         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
881
882         if (lp->lwp_slptime > 1) {
883                 /*
884                  * Too much time has passed, do a coarse correction.
885                  */
886                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
887                 dfly_resetpriority(lp);
888                 lp->lwp_cpbase = cpbase;
889                 lp->lwp_cpticks = 0;
890                 lp->lwp_estfast = 0;
891         } else if (lp->lwp_cpbase != cpbase) {
892                 /*
893                  * Adjust estcpu if we are in a different tick.  Don't waste
894                  * time if we are in the same tick.
895                  *
896                  * First calculate the number of ticks in the measurement
897                  * interval.  The ttlticks calculation can wind up 0 due to
898                  * a bug in the handling of lwp_slptime  (as yet not found),
899                  * so make sure we do not get a divide by 0 panic.
900                  */
901                 ttlticks = (cpbase - lp->lwp_cpbase) /
902                            gd->gd_schedclock.periodic;
903                 if (ttlticks < 0) {
904                         ttlticks = 0;
905                         lp->lwp_cpbase = cpbase;
906                 }
907                 if (ttlticks == 0)
908                         return;
909                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
910
911                 /*
912                  * Calculate the percentage of one cpu being used then
913                  * compensate for any system load in excess of ncpus.
914                  *
915                  * For example, if we have 8 cores and 16 running cpu-bound
916                  * processes then all things being equal each process will
917                  * get 50% of one cpu.  We need to pump this value back
918                  * up to 100% so the estcpu calculation properly adjusts
919                  * the process's dynamic priority.
920                  *
921                  * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
922                  */
923                 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
924                 ucount = dfly_ucount;
925                 if (ucount > ncpus) {
926                         estcpu += estcpu * (ucount - ncpus) / ncpus;
927                 }
928
929                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
930                         kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
931                                 lp->lwp_proc->p_pid, lp,
932                                 estcpu, lp->lwp_estcpu,
933                                 lp->lwp_cpticks, ttlticks);
934                 }
935
936                 /*
937                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
938                  * quickly lwp_estcpu collapses to its realtime calculation.
939                  * A slower collapse gives us a more accurate number over
940                  * the long term but can create problems with bursty threads
941                  * or threads which become cpu hogs.
942                  *
943                  * To solve this problem, newly started lwps and lwps which
944                  * are restarting after having been asleep for a while are
945                  * given a much, much faster decay in order to quickly
946                  * detect whether they become cpu-bound.
947                  *
948                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
949                  *       and not here, but we must still ensure that a
950                  *       cpu-bound nice -20 process does not completely
951                  *       override a cpu-bound nice +20 process.
952                  *
953                  * NOTE: We must use ESTCPULIM() here to deal with any
954                  *       overshoot.
955                  */
956                 decay_factor = usched_dfly_decay;
957                 if (decay_factor < 1)
958                         decay_factor = 1;
959                 if (decay_factor > 1024)
960                         decay_factor = 1024;
961
962                 if (lp->lwp_estfast < usched_dfly_decay) {
963                         ++lp->lwp_estfast;
964                         lp->lwp_estcpu = ESTCPULIM(
965                                 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
966                                 (lp->lwp_estfast + 1));
967                 } else {
968                         lp->lwp_estcpu = ESTCPULIM(
969                                 (lp->lwp_estcpu * decay_factor + estcpu) /
970                                 (decay_factor + 1));
971                 }
972
973                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
974                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
975                 dfly_resetpriority(lp);
976                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
977                 lp->lwp_cpticks = 0;
978         }
979 }
980
981 /*
982  * Compute the priority of a process when running in user mode.
983  * Arrange to reschedule if the resulting priority is better
984  * than that of the current process.
985  *
986  * This routine may be called with any process.
987  *
988  * This routine is called by fork1() for initial setup with the process
989  * of the run queue, and also may be called normally with the process on or
990  * off the run queue.
991  */
992 static void
993 dfly_resetpriority(struct lwp *lp)
994 {
995         dfly_pcpu_t rdd;
996         int newpriority;
997         u_short newrqtype;
998         int rcpu;
999         int checkpri;
1000         int estcpu;
1001         int delta_uload;
1002
1003         crit_enter();
1004
1005         /*
1006          * Lock the scheduler (lp) belongs to.  This can be on a different
1007          * cpu.  Handle races.  This loop breaks out with the appropriate
1008          * rdd locked.
1009          */
1010         for (;;) {
1011                 rcpu = lp->lwp_qcpu;
1012                 cpu_ccfence();
1013                 rdd = &dfly_pcpu[rcpu];
1014                 spin_lock(&rdd->spin);
1015                 if (rcpu == lp->lwp_qcpu)
1016                         break;
1017                 spin_unlock(&rdd->spin);
1018         }
1019
1020         /*
1021          * Calculate the new priority and queue type
1022          */
1023         newrqtype = lp->lwp_rtprio.type;
1024
1025         switch(newrqtype) {
1026         case RTP_PRIO_REALTIME:
1027         case RTP_PRIO_FIFO:
1028                 newpriority = PRIBASE_REALTIME +
1029                              (lp->lwp_rtprio.prio & PRIMASK);
1030                 break;
1031         case RTP_PRIO_NORMAL:
1032                 /*
1033                  *
1034                  */
1035                 estcpu = lp->lwp_estcpu;
1036
1037                 /*
1038                  * p_nice piece         Adds (0-40) * 2         0-80
1039                  * estcpu               Adds 16384  * 4 / 512   0-128
1040                  */
1041                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1042                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1043                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1044                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1045                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1046                 break;
1047         case RTP_PRIO_IDLE:
1048                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1049                 break;
1050         case RTP_PRIO_THREAD:
1051                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1052                 break;
1053         default:
1054                 panic("Bad RTP_PRIO %d", newrqtype);
1055                 /* NOT REACHED */
1056         }
1057
1058         /*
1059          * The LWKT scheduler doesn't dive usched structures, give it a hint
1060          * on the relative priority of user threads running in the kernel.
1061          * The LWKT scheduler will always ensure that a user thread running
1062          * in the kernel will get cpu some time, regardless of its upri,
1063          * but can decide not to instantly switch from one kernel or user
1064          * mode user thread to a kernel-mode user thread when it has a less
1065          * desireable user priority.
1066          *
1067          * td_upri has normal sense (higher values are more desireable), so
1068          * negate it.
1069          */
1070         lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1071
1072         /*
1073          * The newpriority incorporates the queue type so do a simple masked
1074          * check to determine if the process has moved to another queue.  If
1075          * it has, and it is currently on a run queue, then move it.
1076          *
1077          * Since uload is ~PPQMASK masked, no modifications are necessary if
1078          * we end up in the same run queue.
1079          */
1080         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1081                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1082                         dfly_remrunqueue_locked(rdd, lp);
1083                         lp->lwp_priority = newpriority;
1084                         lp->lwp_rqtype = newrqtype;
1085                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1086                         dfly_setrunqueue_locked(rdd, lp);
1087                         checkpri = 1;
1088                 } else {
1089                         lp->lwp_priority = newpriority;
1090                         lp->lwp_rqtype = newrqtype;
1091                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1092                         checkpri = 0;
1093                 }
1094         } else {
1095                 /*
1096                  * In the same PPQ, uload cannot change.
1097                  */
1098                 lp->lwp_priority = newpriority;
1099                 checkpri = 1;
1100                 rcpu = -1;
1101         }
1102
1103         /*
1104          * Adjust effective load.
1105          *
1106          * Calculate load then scale up or down geometrically based on p_nice.
1107          * Processes niced up (positive) are less important, and processes
1108          * niced downard (negative) are more important.  The higher the uload,
1109          * the more important the thread.
1110          */
1111         /* 0-511, 0-100% cpu */
1112         delta_uload = lp->lwp_estcpu / NQS;
1113         delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1114
1115
1116         delta_uload -= lp->lwp_uload;
1117         lp->lwp_uload += delta_uload;
1118         if (lp->lwp_mpflags & LWP_MP_ULOAD)
1119                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1120
1121         /*
1122          * Determine if we need to reschedule the target cpu.  This only
1123          * occurs if the LWP is already on a scheduler queue, which means
1124          * that idle cpu notification has already occured.  At most we
1125          * need only issue a need_user_resched() on the appropriate cpu.
1126          *
1127          * The LWP may be owned by a CPU different from the current one,
1128          * in which case dd->uschedcp may be modified without an MP lock
1129          * or a spinlock held.  The worst that happens is that the code
1130          * below causes a spurious need_user_resched() on the target CPU
1131          * and dd->pri to be wrong for a short period of time, both of
1132          * which are harmless.
1133          *
1134          * If checkpri is 0 we are adjusting the priority of the current
1135          * process, possibly higher (less desireable), so ignore the upri
1136          * check which will fail in that case.
1137          */
1138         if (rcpu >= 0) {
1139                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1140                     (checkpri == 0 ||
1141                      (rdd->upri & ~PRIMASK) >
1142                      (lp->lwp_priority & ~PRIMASK))) {
1143 #ifdef SMP
1144                         if (rcpu == mycpu->gd_cpuid) {
1145                                 spin_unlock(&rdd->spin);
1146                                 need_user_resched();
1147                         } else {
1148                                 atomic_clear_cpumask(&dfly_rdyprocmask,
1149                                                      CPUMASK(rcpu));
1150                                 spin_unlock(&rdd->spin);
1151                                 lwkt_send_ipiq(globaldata_find(rcpu),
1152                                                dfly_need_user_resched_remote,
1153                                                NULL);
1154                         }
1155 #else
1156                         spin_unlock(&rdd->spin);
1157                         need_user_resched();
1158 #endif
1159                 } else {
1160                         spin_unlock(&rdd->spin);
1161                 }
1162         } else {
1163                 spin_unlock(&rdd->spin);
1164         }
1165         crit_exit();
1166 }
1167
1168 static
1169 void
1170 dfly_yield(struct lwp *lp)
1171 {
1172 #if 0
1173         /* FUTURE (or something similar) */
1174         switch(lp->lwp_rqtype) {
1175         case RTP_PRIO_NORMAL:
1176                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1177                 break;
1178         default:
1179                 break;
1180         }
1181 #endif
1182         need_user_resched();
1183 }
1184
1185 /*
1186  * Called from fork1() when a new child process is being created.
1187  *
1188  * Give the child process an initial estcpu that is more batch then
1189  * its parent and dock the parent for the fork (but do not
1190  * reschedule the parent).
1191  *
1192  * fast
1193  *
1194  * XXX lwp should be "spawning" instead of "forking"
1195  */
1196 static void
1197 dfly_forking(struct lwp *plp, struct lwp *lp)
1198 {
1199         /*
1200          * Put the child 4 queue slots (out of 32) higher than the parent
1201          * (less desireable than the parent).
1202          */
1203         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1204         lp->lwp_forked = 1;
1205         lp->lwp_estfast = 0;
1206
1207         /*
1208          * Dock the parent a cost for the fork, protecting us from fork
1209          * bombs.  If the parent is forking quickly make the child more
1210          * batchy.
1211          */
1212         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1213 }
1214
1215 /*
1216  * Called when a lwp is being removed from this scheduler, typically
1217  * during lwp_exit().  We have to clean out any ULOAD accounting before
1218  * we can let the lp go.  The dd->spin lock is not needed for uload
1219  * updates.
1220  *
1221  * Scheduler dequeueing has already occurred, no further action in that
1222  * regard is needed.
1223  */
1224 static void
1225 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1226 {
1227         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1228
1229         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1230                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1231                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1232                 atomic_add_int(&dd->ucount, -1);
1233                 atomic_add_int(&dfly_ucount, -1);
1234         }
1235 }
1236
1237 /*
1238  * This function cannot block in any way, but spinlocks are ok.
1239  *
1240  * Update the uload based on the state of the thread (whether it is going
1241  * to sleep or running again).  The uload is meant to be a longer-term
1242  * load and not an instantanious load.
1243  */
1244 static void
1245 dfly_uload_update(struct lwp *lp)
1246 {
1247         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1248
1249         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1250                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1251                         spin_lock(&dd->spin);
1252                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1253                                 atomic_set_int(&lp->lwp_mpflags,
1254                                                LWP_MP_ULOAD);
1255                                 atomic_add_int(&dd->uload, lp->lwp_uload);
1256                                 atomic_add_int(&dd->ucount, 1);
1257                                 atomic_add_int(&dfly_ucount, 1);
1258                         }
1259                         spin_unlock(&dd->spin);
1260                 }
1261         } else if (lp->lwp_slptime > 0) {
1262                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1263                         spin_lock(&dd->spin);
1264                         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1265                                 atomic_clear_int(&lp->lwp_mpflags,
1266                                                  LWP_MP_ULOAD);
1267                                 atomic_add_int(&dd->uload, -lp->lwp_uload);
1268                                 atomic_add_int(&dd->ucount, -1);
1269                                 atomic_add_int(&dfly_ucount, -1);
1270                         }
1271                         spin_unlock(&dd->spin);
1272                 }
1273         }
1274 }
1275
1276 /*
1277  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1278  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1279  * has a better or equal priority then the process that would otherwise be
1280  * chosen, NULL is returned.
1281  *
1282  * Until we fix the RUNQ code the chklp test has to be strict or we may
1283  * bounce between processes trying to acquire the current process designation.
1284  *
1285  * Must be called with rdd->spin locked.  The spinlock is left intact through
1286  * the entire routine.  dd->spin does not have to be locked.
1287  *
1288  * If worst is non-zero this function finds the worst thread instead of the
1289  * best thread (used by the schedulerclock-based rover).
1290  */
1291 static
1292 struct lwp *
1293 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1294                        struct lwp *chklp, int worst)
1295 {
1296         struct lwp *lp;
1297         struct rq *q;
1298         u_int32_t *which, *which2;
1299         u_int32_t pri;
1300         u_int32_t rtqbits;
1301         u_int32_t tsqbits;
1302         u_int32_t idqbits;
1303
1304         rtqbits = rdd->rtqueuebits;
1305         tsqbits = rdd->queuebits;
1306         idqbits = rdd->idqueuebits;
1307
1308         if (worst) {
1309                 if (idqbits) {
1310                         pri = bsrl(idqbits);
1311                         q = &rdd->idqueues[pri];
1312                         which = &rdd->idqueuebits;
1313                         which2 = &idqbits;
1314                 } else if (tsqbits) {
1315                         pri = bsrl(tsqbits);
1316                         q = &rdd->queues[pri];
1317                         which = &rdd->queuebits;
1318                         which2 = &tsqbits;
1319                 } else if (rtqbits) {
1320                         pri = bsrl(rtqbits);
1321                         q = &rdd->rtqueues[pri];
1322                         which = &rdd->rtqueuebits;
1323                         which2 = &rtqbits;
1324                 } else {
1325                         return (NULL);
1326                 }
1327                 lp = TAILQ_LAST(q, rq);
1328         } else {
1329                 if (rtqbits) {
1330                         pri = bsfl(rtqbits);
1331                         q = &rdd->rtqueues[pri];
1332                         which = &rdd->rtqueuebits;
1333                         which2 = &rtqbits;
1334                 } else if (tsqbits) {
1335                         pri = bsfl(tsqbits);
1336                         q = &rdd->queues[pri];
1337                         which = &rdd->queuebits;
1338                         which2 = &tsqbits;
1339                 } else if (idqbits) {
1340                         pri = bsfl(idqbits);
1341                         q = &rdd->idqueues[pri];
1342                         which = &rdd->idqueuebits;
1343                         which2 = &idqbits;
1344                 } else {
1345                         return (NULL);
1346                 }
1347                 lp = TAILQ_FIRST(q);
1348         }
1349         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1350
1351         /*
1352          * If the passed lwp <chklp> is reasonably close to the selected
1353          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1354          *
1355          * Note that we must error on the side of <chklp> to avoid bouncing
1356          * between threads in the acquire code.
1357          */
1358         if (chklp) {
1359                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1360                         return(NULL);
1361         }
1362
1363         KTR_COND_LOG(usched_chooseproc,
1364             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1365             lp->lwp_proc->p_pid,
1366             lp->lwp_thread->td_gd->gd_cpuid,
1367             mycpu->gd_cpuid);
1368
1369         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1370         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1371         TAILQ_REMOVE(q, lp, lwp_procq);
1372         --rdd->runqcount;
1373         if (TAILQ_EMPTY(q))
1374                 *which &= ~(1 << pri);
1375
1376         /*
1377          * If we are choosing a process from rdd with the intent to
1378          * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1379          * is still held.
1380          */
1381         if (rdd != dd) {
1382                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1383                         atomic_add_int(&rdd->uload, -lp->lwp_uload);
1384                         atomic_add_int(&rdd->ucount, -1);
1385                         atomic_add_int(&dfly_ucount, -1);
1386                 }
1387                 lp->lwp_qcpu = dd->cpuid;
1388                 atomic_add_int(&dd->uload, lp->lwp_uload);
1389                 atomic_add_int(&dd->ucount, 1);
1390                 atomic_add_int(&dfly_ucount, 1);
1391                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1392         }
1393         return lp;
1394 }
1395
1396 #ifdef SMP
1397
1398 /*
1399  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1400  *
1401  * Choose a cpu node to schedule lp on, hopefully nearby its current
1402  * node.
1403  *
1404  * We give the current node a modest advantage for obvious reasons.
1405  *
1406  * We also give the node the thread was woken up FROM a slight advantage
1407  * in order to try to schedule paired threads which synchronize/block waiting
1408  * for each other fairly close to each other.  Similarly in a network setting
1409  * this feature will also attempt to place a user process near the kernel
1410  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1411  * algorithm as it heuristically groups synchronizing processes for locality
1412  * of reference in multi-socket systems.
1413  *
1414  * We check against running processes and give a big advantage if there
1415  * are none running.
1416  *
1417  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1418  *
1419  * When the topology is known choose a cpu whos group has, in aggregate,
1420  * has the lowest weighted load.
1421  */
1422 static
1423 dfly_pcpu_t
1424 dfly_choose_best_queue(struct lwp *lp)
1425 {
1426         cpumask_t wakemask;
1427         cpumask_t mask;
1428         cpu_node_t *cpup;
1429         cpu_node_t *cpun;
1430         cpu_node_t *cpub;
1431         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1432         dfly_pcpu_t rdd;
1433         int wakecpu;
1434         int cpuid;
1435         int n;
1436         int count;
1437         int load;
1438         int lowest_load;
1439
1440         /*
1441          * When the topology is unknown choose a random cpu that is hopefully
1442          * idle.
1443          */
1444         if (dd->cpunode == NULL)
1445                 return (dfly_choose_queue_simple(dd, lp));
1446
1447         /*
1448          * Pairing mask
1449          */
1450         if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1451                 wakemask = dfly_pcpu[wakecpu].cpumask;
1452         else
1453                 wakemask = 0;
1454
1455         /*
1456          * When the topology is known choose a cpu whos group has, in
1457          * aggregate, has the lowest weighted load.
1458          */
1459         cpup = root_cpu_node;
1460         rdd = dd;
1461
1462         while (cpup) {
1463                 /*
1464                  * Degenerate case super-root
1465                  */
1466                 if (cpup->child_node && cpup->child_no == 1) {
1467                         cpup = cpup->child_node;
1468                         continue;
1469                 }
1470
1471                 /*
1472                  * Terminal cpunode
1473                  */
1474                 if (cpup->child_node == NULL) {
1475                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1476                         break;
1477                 }
1478
1479                 cpub = NULL;
1480                 lowest_load = 0x7FFFFFFF;
1481
1482                 for (n = 0; n < cpup->child_no; ++n) {
1483                         /*
1484                          * Accumulate load information for all cpus
1485                          * which are members of this node.
1486                          */
1487                         cpun = &cpup->child_node[n];
1488                         mask = cpun->members & usched_global_cpumask &
1489                                smp_active_mask & lp->lwp_cpumask;
1490                         if (mask == 0)
1491                                 continue;
1492
1493                         count = 0;
1494                         load = 0;
1495
1496                         while (mask) {
1497                                 cpuid = BSFCPUMASK(mask);
1498                                 rdd = &dfly_pcpu[cpuid];
1499                                 load += rdd->uload;
1500                                 load += rdd->ucount * usched_dfly_weight3;
1501
1502                                 if (rdd->uschedcp == NULL &&
1503                                     rdd->runqcount == 0 &&
1504                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1505                                 ) {
1506                                         load -= usched_dfly_weight4;
1507                                 }
1508 #if 0
1509                                 else if (rdd->upri > lp->lwp_priority + PPQ) {
1510                                         load -= usched_dfly_weight4 / 2;
1511                                 }
1512 #endif
1513                                 mask &= ~CPUMASK(cpuid);
1514                                 ++count;
1515                         }
1516
1517                         /*
1518                          * Compensate if the lp is already accounted for in
1519                          * the aggregate uload for this mask set.  We want
1520                          * to calculate the loads as if lp were not present,
1521                          * otherwise the calculation is bogus.
1522                          */
1523                         if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1524                             (dd->cpumask & cpun->members)) {
1525                                 load -= lp->lwp_uload;
1526                                 load -= usched_dfly_weight3;
1527                         }
1528
1529                         load /= count;
1530
1531                         /*
1532                          * Advantage the cpu group (lp) is already on.
1533                          */
1534                         if (cpun->members & dd->cpumask)
1535                                 load -= usched_dfly_weight1;
1536
1537                         /*
1538                          * Advantage the cpu group we want to pair (lp) to,
1539                          * but don't let it go to the exact same cpu as
1540                          * the wakecpu target.
1541                          *
1542                          * We do this by checking whether cpun is a
1543                          * terminal node or not.  All cpun's at the same
1544                          * level will either all be terminal or all not
1545                          * terminal.
1546                          *
1547                          * If it is and we match we disadvantage the load.
1548                          * If it is and we don't match we advantage the load.
1549                          *
1550                          * Also note that we are effectively disadvantaging
1551                          * all-but-one by the same amount, so it won't effect
1552                          * the weight1 factor for the all-but-one nodes.
1553                          */
1554                         if (cpun->members & wakemask) {
1555                                 if (cpun->child_node != NULL) {
1556                                         /* advantage */
1557                                         load -= usched_dfly_weight2;
1558                                 } else {
1559                                         if (usched_dfly_features & 0x10)
1560                                                 load += usched_dfly_weight2;
1561                                         else
1562                                                 load -= usched_dfly_weight2;
1563                                 }
1564                         }
1565
1566                         /*
1567                          * Calculate the best load
1568                          */
1569                         if (cpub == NULL || lowest_load > load ||
1570                             (lowest_load == load &&
1571                              (cpun->members & dd->cpumask))
1572                         ) {
1573                                 lowest_load = load;
1574                                 cpub = cpun;
1575                         }
1576                 }
1577                 cpup = cpub;
1578         }
1579         if (usched_dfly_chooser)
1580                 kprintf("lp %02d->%02d %s\n",
1581                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1582         return (rdd);
1583 }
1584
1585 /*
1586  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1587  *
1588  * Choose the worst queue close to dd's cpu node with a non-empty runq
1589  * that is NOT dd.  Also require that the moving of the highest-load thread
1590  * from rdd to dd does not cause the uload's to cross each other.
1591  *
1592  * This is used by the thread chooser when the current cpu's queues are
1593  * empty to steal a thread from another cpu's queue.  We want to offload
1594  * the most heavily-loaded queue.
1595  */
1596 static
1597 dfly_pcpu_t
1598 dfly_choose_worst_queue(dfly_pcpu_t dd)
1599 {
1600         cpumask_t mask;
1601         cpu_node_t *cpup;
1602         cpu_node_t *cpun;
1603         cpu_node_t *cpub;
1604         dfly_pcpu_t rdd;
1605         int cpuid;
1606         int n;
1607         int count;
1608         int load;
1609 #if 0
1610         int pri;
1611         int hpri;
1612 #endif
1613         int highest_load;
1614
1615         /*
1616          * When the topology is unknown choose a random cpu that is hopefully
1617          * idle.
1618          */
1619         if (dd->cpunode == NULL) {
1620                 return (NULL);
1621         }
1622
1623         /*
1624          * When the topology is known choose a cpu whos group has, in
1625          * aggregate, has the lowest weighted load.
1626          */
1627         cpup = root_cpu_node;
1628         rdd = dd;
1629         while (cpup) {
1630                 /*
1631                  * Degenerate case super-root
1632                  */
1633                 if (cpup->child_node && cpup->child_no == 1) {
1634                         cpup = cpup->child_node;
1635                         continue;
1636                 }
1637
1638                 /*
1639                  * Terminal cpunode
1640                  */
1641                 if (cpup->child_node == NULL) {
1642                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1643                         break;
1644                 }
1645
1646                 cpub = NULL;
1647                 highest_load = 0;
1648
1649                 for (n = 0; n < cpup->child_no; ++n) {
1650                         /*
1651                          * Accumulate load information for all cpus
1652                          * which are members of this node.
1653                          */
1654                         cpun = &cpup->child_node[n];
1655                         mask = cpun->members & usched_global_cpumask &
1656                                smp_active_mask;
1657                         if (mask == 0)
1658                                 continue;
1659                         count = 0;
1660                         load = 0;
1661
1662                         while (mask) {
1663                                 cpuid = BSFCPUMASK(mask);
1664                                 rdd = &dfly_pcpu[cpuid];
1665                                 load += rdd->uload;
1666                                 load += rdd->ucount * usched_dfly_weight3;
1667                                 if (rdd->uschedcp == NULL &&
1668                                     rdd->runqcount == 0 &&
1669                                     globaldata_find(cpuid)->gd_tdrunqcount == 0
1670                                 ) {
1671                                         load -= usched_dfly_weight4;
1672                                 }
1673 #if 0
1674                                 else if (rdd->upri > dd->upri + PPQ) {
1675                                         load -= usched_dfly_weight4 / 2;
1676                                 }
1677 #endif
1678                                 mask &= ~CPUMASK(cpuid);
1679                                 ++count;
1680                         }
1681                         load /= count;
1682
1683                         /*
1684                          * Prefer candidates which are somewhat closer to
1685                          * our cpu.
1686                          */
1687                         if (dd->cpumask & cpun->members)
1688                                 load += usched_dfly_weight1;
1689
1690                         /*
1691                          * The best candidate is the one with the worst
1692                          * (highest) load.
1693                          */
1694                         if (cpub == NULL || highest_load < load) {
1695                                 highest_load = load;
1696                                 cpub = cpun;
1697                         }
1698                 }
1699                 cpup = cpub;
1700         }
1701
1702         /*
1703          * We never return our own node (dd), and only return a remote
1704          * node if it's load is significantly worse than ours (i.e. where
1705          * stealing a thread would be considered reasonable).
1706          *
1707          * This also helps us avoid breaking paired threads apart which
1708          * can have disastrous effects on performance.
1709          */
1710         if (rdd == dd)
1711                 return(NULL);
1712
1713 #if 0
1714         hpri = 0;
1715         if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1716                 hpri = pri;
1717         if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1718                 hpri = pri;
1719         if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1720                 hpri = pri;
1721         hpri *= PPQ;
1722         if (rdd->uload - hpri < dd->uload + hpri)
1723                 return(NULL);
1724 #endif
1725         return (rdd);
1726 }
1727
1728 static
1729 dfly_pcpu_t
1730 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1731 {
1732         dfly_pcpu_t rdd;
1733         cpumask_t tmpmask;
1734         cpumask_t mask;
1735         int cpuid;
1736
1737         /*
1738          * Fallback to the original heuristic, select random cpu,
1739          * first checking cpus not currently running a user thread.
1740          */
1741         ++dfly_scancpu;
1742         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1743         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1744                smp_active_mask & usched_global_cpumask;
1745
1746         while (mask) {
1747                 tmpmask = ~(CPUMASK(cpuid) - 1);
1748                 if (mask & tmpmask)
1749                         cpuid = BSFCPUMASK(mask & tmpmask);
1750                 else
1751                         cpuid = BSFCPUMASK(mask);
1752                 rdd = &dfly_pcpu[cpuid];
1753
1754                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1755                         goto found;
1756                 mask &= ~CPUMASK(cpuid);
1757         }
1758
1759         /*
1760          * Then cpus which might have a currently running lp
1761          */
1762         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1763         mask = dfly_curprocmask & dfly_rdyprocmask &
1764                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1765
1766         while (mask) {
1767                 tmpmask = ~(CPUMASK(cpuid) - 1);
1768                 if (mask & tmpmask)
1769                         cpuid = BSFCPUMASK(mask & tmpmask);
1770                 else
1771                         cpuid = BSFCPUMASK(mask);
1772                 rdd = &dfly_pcpu[cpuid];
1773
1774                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1775                         goto found;
1776                 mask &= ~CPUMASK(cpuid);
1777         }
1778
1779         /*
1780          * If we cannot find a suitable cpu we reload from dfly_scancpu
1781          * and round-robin.  Other cpus will pickup as they release their
1782          * current lwps or become ready.
1783          *
1784          * Avoid a degenerate system lockup case if usched_global_cpumask
1785          * is set to 0 or otherwise does not cover lwp_cpumask.
1786          *
1787          * We only kick the target helper thread in this case, we do not
1788          * set the user resched flag because
1789          */
1790         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1791         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1792                 cpuid = 0;
1793         rdd = &dfly_pcpu[cpuid];
1794 found:
1795         return (rdd);
1796 }
1797
1798 static
1799 void
1800 dfly_need_user_resched_remote(void *dummy)
1801 {
1802         globaldata_t gd = mycpu;
1803         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1804
1805         need_user_resched();
1806
1807         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1808         wakeup_mycpu(&dd->helper_thread);
1809 }
1810
1811 #endif
1812
1813 /*
1814  * dfly_remrunqueue_locked() removes a given process from the run queue
1815  * that it is on, clearing the queue busy bit if it becomes empty.
1816  *
1817  * Note that user process scheduler is different from the LWKT schedule.
1818  * The user process scheduler only manages user processes but it uses LWKT
1819  * underneath, and a user process operating in the kernel will often be
1820  * 'released' from our management.
1821  *
1822  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1823  * to sleep or the lwp is moved to a different runq.
1824  */
1825 static void
1826 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1827 {
1828         struct rq *q;
1829         u_int32_t *which;
1830         u_int8_t pri;
1831
1832         KKASSERT(rdd->runqcount >= 0);
1833
1834         pri = lp->lwp_rqindex;
1835
1836         switch(lp->lwp_rqtype) {
1837         case RTP_PRIO_NORMAL:
1838                 q = &rdd->queues[pri];
1839                 which = &rdd->queuebits;
1840                 break;
1841         case RTP_PRIO_REALTIME:
1842         case RTP_PRIO_FIFO:
1843                 q = &rdd->rtqueues[pri];
1844                 which = &rdd->rtqueuebits;
1845                 break;
1846         case RTP_PRIO_IDLE:
1847                 q = &rdd->idqueues[pri];
1848                 which = &rdd->idqueuebits;
1849                 break;
1850         default:
1851                 panic("remrunqueue: invalid rtprio type");
1852                 /* NOT REACHED */
1853         }
1854         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1855         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1856         TAILQ_REMOVE(q, lp, lwp_procq);
1857         --rdd->runqcount;
1858         if (TAILQ_EMPTY(q)) {
1859                 KASSERT((*which & (1 << pri)) != 0,
1860                         ("remrunqueue: remove from empty queue"));
1861                 *which &= ~(1 << pri);
1862         }
1863 }
1864
1865 /*
1866  * dfly_setrunqueue_locked()
1867  *
1868  * Add a process whos rqtype and rqindex had previously been calculated
1869  * onto the appropriate run queue.   Determine if the addition requires
1870  * a reschedule on a cpu and return the cpuid or -1.
1871  *
1872  * NOTE:          Lower priorities are better priorities.
1873  *
1874  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1875  *                sum of the rough lwp_priority for all running and runnable
1876  *                processes.  Lower priority processes (higher lwp_priority
1877  *                values) actually DO count as more load, not less, because
1878  *                these are the programs which require the most care with
1879  *                regards to cpu selection.
1880  */
1881 static void
1882 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1883 {
1884         struct rq *q;
1885         u_int32_t *which;
1886         int pri;
1887
1888         KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1889
1890         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1891                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1892                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1893                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1894                 atomic_add_int(&dfly_ucount, 1);
1895         }
1896
1897         pri = lp->lwp_rqindex;
1898
1899         switch(lp->lwp_rqtype) {
1900         case RTP_PRIO_NORMAL:
1901                 q = &rdd->queues[pri];
1902                 which = &rdd->queuebits;
1903                 break;
1904         case RTP_PRIO_REALTIME:
1905         case RTP_PRIO_FIFO:
1906                 q = &rdd->rtqueues[pri];
1907                 which = &rdd->rtqueuebits;
1908                 break;
1909         case RTP_PRIO_IDLE:
1910                 q = &rdd->idqueues[pri];
1911                 which = &rdd->idqueuebits;
1912                 break;
1913         default:
1914                 panic("remrunqueue: invalid rtprio type");
1915                 /* NOT REACHED */
1916         }
1917
1918         /*
1919          * Add to the correct queue and set the appropriate bit.  If no
1920          * lower priority (i.e. better) processes are in the queue then
1921          * we want a reschedule, calculate the best cpu for the job.
1922          *
1923          * Always run reschedules on the LWPs original cpu.
1924          *
1925          * If the lp's rrcount has not been exhausted we want to resume with
1926          * it when this queue is reached the next time, instead of resuming
1927          * with a different lp.  This improves cache effects and also avoids
1928          * leaving interrupted MP servers out in the cold holding internal
1929          * locks while trying to run a different thread.
1930          */
1931         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1932         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1933         ++rdd->runqcount;
1934         if (lp->lwp_rrcount >= usched_dfly_rrinterval) {
1935                 lp->lwp_rrcount = 0;
1936                 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1937         } else {
1938                 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
1939                 if (TAILQ_NEXT(lp, lwp_procq) == NULL)
1940                         lp->lwp_rrcount = 0;
1941         }
1942         *which |= 1 << pri;
1943 }
1944
1945 #ifdef SMP
1946
1947 /*
1948  * For SMP systems a user scheduler helper thread is created for each
1949  * cpu and is used to allow one cpu to wakeup another for the purposes of
1950  * scheduling userland threads from setrunqueue().
1951  *
1952  * UP systems do not need the helper since there is only one cpu.
1953  *
1954  * We can't use the idle thread for this because we might block.
1955  * Additionally, doing things this way allows us to HLT idle cpus
1956  * on MP systems.
1957  */
1958 static void
1959 dfly_helper_thread(void *dummy)
1960 {
1961     globaldata_t gd;
1962     dfly_pcpu_t dd;
1963     dfly_pcpu_t rdd;
1964     struct lwp *nlp;
1965     cpumask_t mask;
1966     int cpuid;
1967
1968     gd = mycpu;
1969     cpuid = gd->gd_cpuid;       /* doesn't change */
1970     mask = gd->gd_cpumask;      /* doesn't change */
1971     dd = &dfly_pcpu[cpuid];
1972
1973     /*
1974      * Since we only want to be woken up only when no user processes
1975      * are scheduled on a cpu, run at an ultra low priority.
1976      */
1977     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1978
1979     tsleep(&dd->helper_thread, 0, "schslp", 0);
1980
1981     for (;;) {
1982         /*
1983          * We use the LWKT deschedule-interlock trick to avoid racing
1984          * dfly_rdyprocmask.  This means we cannot block through to the
1985          * manual lwkt_switch() call we make below.
1986          */
1987         crit_enter_gd(gd);
1988         tsleep_interlock(&dd->helper_thread, 0);
1989
1990         spin_lock(&dd->spin);
1991
1992         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1993         clear_user_resched();   /* This satisfied the reschedule request */
1994 #if 0
1995         dd->rrcount = 0;        /* Reset the round-robin counter */
1996 #endif
1997
1998         if (dd->runqcount || dd->uschedcp != NULL) {
1999                 /*
2000                  * Threads are available.  A thread may or may not be
2001                  * currently scheduled.  Get the best thread already queued
2002                  * to this cpu.
2003                  */
2004                 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2005                 if (nlp) {
2006                         atomic_set_cpumask(&dfly_curprocmask, mask);
2007                         dd->upri = nlp->lwp_priority;
2008                         dd->uschedcp = nlp;
2009 #if 0
2010                         dd->rrcount = 0;        /* reset round robin */
2011 #endif
2012                         spin_unlock(&dd->spin);
2013                         lwkt_acquire(nlp->lwp_thread);
2014                         lwkt_schedule(nlp->lwp_thread);
2015                 } else {
2016                         /*
2017                          * This situation should not occur because we had
2018                          * at least one thread available.
2019                          */
2020                         spin_unlock(&dd->spin);
2021                 }
2022         } else if (usched_dfly_features & 0x01) {
2023                 /*
2024                  * This cpu is devoid of runnable threads, steal a thread
2025                  * from another cpu.  Since we're stealing, might as well
2026                  * load balance at the same time.
2027                  *
2028                  * We choose the highest-loaded thread from the worst queue.
2029                  *
2030                  * NOTE! This function only returns a non-NULL rdd when
2031                  *       another cpu's queue is obviously overloaded.  We
2032                  *       do not want to perform the type of rebalancing
2033                  *       the schedclock does here because it would result
2034                  *       in insane process pulling when 'steady' state is
2035                  *       partially unbalanced (e.g. 6 runnables and only
2036                  *       4 cores).
2037                  */
2038                 rdd = dfly_choose_worst_queue(dd);
2039                 if (rdd && spin_trylock(&rdd->spin)) {
2040                         nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2041                         spin_unlock(&rdd->spin);
2042                 } else {
2043                         nlp = NULL;
2044                 }
2045                 if (nlp) {
2046                         atomic_set_cpumask(&dfly_curprocmask, mask);
2047                         dd->upri = nlp->lwp_priority;
2048                         dd->uschedcp = nlp;
2049 #if 0
2050                         dd->rrcount = 0;        /* reset round robin */
2051 #endif
2052                         spin_unlock(&dd->spin);
2053                         lwkt_acquire(nlp->lwp_thread);
2054                         lwkt_schedule(nlp->lwp_thread);
2055                 } else {
2056                         /*
2057                          * Leave the thread on our run queue.  Another
2058                          * scheduler will try to pull it later.
2059                          */
2060                         spin_unlock(&dd->spin);
2061                 }
2062         } else {
2063                 /*
2064                  * devoid of runnable threads and not allowed to steal
2065                  * any.
2066                  */
2067                 spin_unlock(&dd->spin);
2068         }
2069
2070         /*
2071          * We're descheduled unless someone scheduled us.  Switch away.
2072          * Exiting the critical section will cause splz() to be called
2073          * for us if interrupts and such are pending.
2074          */
2075         crit_exit_gd(gd);
2076         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2077     }
2078 }
2079
2080 #if 0
2081 static int
2082 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2083 {
2084         int error, new_val;
2085
2086         new_val = usched_dfly_stick_to_level;
2087
2088         error = sysctl_handle_int(oidp, &new_val, 0, req);
2089         if (error != 0 || req->newptr == NULL)
2090                 return (error);
2091         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2092                 return (EINVAL);
2093         usched_dfly_stick_to_level = new_val;
2094         return (0);
2095 }
2096 #endif
2097
2098 /*
2099  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
2100  * been cleared by rqinit() and we should not mess with it further.
2101  */
2102 static void
2103 dfly_helper_thread_cpu_init(void)
2104 {
2105         int i;
2106         int j;
2107         int cpuid;
2108         int smt_not_supported = 0;
2109         int cache_coherent_not_supported = 0;
2110
2111         if (bootverbose)
2112                 kprintf("Start scheduler helpers on cpus:\n");
2113
2114         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2115         usched_dfly_sysctl_tree =
2116                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2117                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2118                                 "usched_dfly", CTLFLAG_RD, 0, "");
2119
2120         for (i = 0; i < ncpus; ++i) {
2121                 dfly_pcpu_t dd = &dfly_pcpu[i];
2122                 cpumask_t mask = CPUMASK(i);
2123
2124                 if ((mask & smp_active_mask) == 0)
2125                     continue;
2126
2127                 spin_init(&dd->spin);
2128                 dd->cpunode = get_cpu_node_by_cpuid(i);
2129                 dd->cpuid = i;
2130                 dd->cpumask = CPUMASK(i);
2131                 for (j = 0; j < NQS; j++) {
2132                         TAILQ_INIT(&dd->queues[j]);
2133                         TAILQ_INIT(&dd->rtqueues[j]);
2134                         TAILQ_INIT(&dd->idqueues[j]);
2135                 }
2136                 atomic_clear_cpumask(&dfly_curprocmask, 1);
2137
2138                 if (dd->cpunode == NULL) {
2139                         smt_not_supported = 1;
2140                         cache_coherent_not_supported = 1;
2141                         if (bootverbose)
2142                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2143                                          "found for cpu\n", i);
2144                 } else {
2145                         switch (dd->cpunode->type) {
2146                         case THREAD_LEVEL:
2147                                 if (bootverbose)
2148                                         kprintf ("\tcpu%d - HyperThreading "
2149                                                  "available. Core siblings: ",
2150                                                  i);
2151                                 break;
2152                         case CORE_LEVEL:
2153                                 smt_not_supported = 1;
2154
2155                                 if (bootverbose)
2156                                         kprintf ("\tcpu%d - No HT available, "
2157                                                  "multi-core/physical "
2158                                                  "cpu. Physical siblings: ",
2159                                                  i);
2160                                 break;
2161                         case CHIP_LEVEL:
2162                                 smt_not_supported = 1;
2163
2164                                 if (bootverbose)
2165                                         kprintf ("\tcpu%d - No HT available, "
2166                                                  "single-core/physical cpu. "
2167                                                  "Package Siblings: ",
2168                                                  i);
2169                                 break;
2170                         default:
2171                                 /* Let's go for safe defaults here */
2172                                 smt_not_supported = 1;
2173                                 cache_coherent_not_supported = 1;
2174                                 if (bootverbose)
2175                                         kprintf ("\tcpu%d - Unknown cpunode->"
2176                                                  "type=%u. Siblings: ",
2177                                                  i,
2178                                                  (u_int)dd->cpunode->type);
2179                                 break;
2180                         }
2181
2182                         if (bootverbose) {
2183                                 if (dd->cpunode->parent_node != NULL) {
2184                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2185                                                 kprintf("cpu%d ", cpuid);
2186                                         kprintf("\n");
2187                                 } else {
2188                                         kprintf(" no siblings\n");
2189                                 }
2190                         }
2191                 }
2192
2193                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2194                             0, i, "usched %d", i);
2195
2196                 /*
2197                  * Allow user scheduling on the target cpu.  cpu #0 has already
2198                  * been enabled in rqinit().
2199                  */
2200                 if (i)
2201                     atomic_clear_cpumask(&dfly_curprocmask, mask);
2202                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2203                 dd->upri = PRIBASE_NULL;
2204
2205         }
2206
2207         /* usched_dfly sysctl configurable parameters */
2208
2209         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2210                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2211                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2212                        &usched_dfly_rrinterval, 0, "");
2213         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2214                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2215                        OID_AUTO, "decay", CTLFLAG_RW,
2216                        &usched_dfly_decay, 0, "Extra decay when not running");
2217
2218         /* Add enable/disable option for SMT scheduling if supported */
2219         if (smt_not_supported) {
2220                 usched_dfly_smt = 0;
2221                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2222                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2223                                   OID_AUTO, "smt", CTLFLAG_RD,
2224                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2225         } else {
2226                 usched_dfly_smt = 1;
2227                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2228                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2229                                OID_AUTO, "smt", CTLFLAG_RW,
2230                                &usched_dfly_smt, 0, "Enable SMT scheduling");
2231         }
2232
2233         /*
2234          * Add enable/disable option for cache coherent scheduling
2235          * if supported
2236          */
2237         if (cache_coherent_not_supported) {
2238                 usched_dfly_cache_coherent = 0;
2239                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2240                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2241                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2242                                   "NOT SUPPORTED", 0,
2243                                   "Cache coherence NOT SUPPORTED");
2244         } else {
2245                 usched_dfly_cache_coherent = 1;
2246                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2247                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2248                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2249                                &usched_dfly_cache_coherent, 0,
2250                                "Enable/Disable cache coherent scheduling");
2251
2252                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2253                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2254                                OID_AUTO, "weight1", CTLFLAG_RW,
2255                                &usched_dfly_weight1, 10,
2256                                "Weight selection for current cpu");
2257
2258                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2259                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2260                                OID_AUTO, "weight2", CTLFLAG_RW,
2261                                &usched_dfly_weight2, 5,
2262                                "Weight selection for wakefrom cpu");
2263
2264                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2265                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2266                                OID_AUTO, "weight3", CTLFLAG_RW,
2267                                &usched_dfly_weight3, 50,
2268                                "Weight selection for num threads on queue");
2269
2270                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2271                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2272                                OID_AUTO, "weight4", CTLFLAG_RW,
2273                                &usched_dfly_weight4, 50,
2274                                "Availability of other idle cpus");
2275
2276                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2277                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2278                                OID_AUTO, "features", CTLFLAG_RW,
2279                                &usched_dfly_features, 15,
2280                                "Allow pulls into empty queues");
2281
2282                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2283                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2284                                OID_AUTO, "swmask", CTLFLAG_RW,
2285                                &usched_dfly_swmask, ~PPQMASK,
2286                                "Queue mask to force thread switch");
2287
2288
2289 #if 0
2290                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2291                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2292                                 OID_AUTO, "stick_to_level",
2293                                 CTLTYPE_INT | CTLFLAG_RW,
2294                                 NULL, sizeof usched_dfly_stick_to_level,
2295                                 sysctl_usched_dfly_stick_to_level, "I",
2296                                 "Stick a process to this level. See sysctl"
2297                                 "paremter hw.cpu_topology.level_description");
2298 #endif
2299         }
2300 }
2301 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2302         dfly_helper_thread_cpu_init, NULL)
2303
2304 #else /* No SMP options - just add the configurable parameters to sysctl */
2305
2306 static void
2307 sched_sysctl_tree_init(void)
2308 {
2309         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2310         usched_dfly_sysctl_tree =
2311                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2312                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2313                                 "usched_dfly", CTLFLAG_RD, 0, "");
2314
2315         /* usched_dfly sysctl configurable parameters */
2316         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2317                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2318                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2319                        &usched_dfly_rrinterval, 0, "");
2320         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2321                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2322                        OID_AUTO, "decay", CTLFLAG_RW,
2323                        &usched_dfly_decay, 0, "Extra decay when not running");
2324 }
2325 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2326         sched_sysctl_tree_init, NULL)
2327 #endif