ifpoll: Setup if_start cpuid for NPOLLING properly
[dragonfly.git] / sys / kern / usched_bsd4.c
1 /*
2  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/kernel.h>
30 #include <sys/lock.h>
31 #include <sys/queue.h>
32 #include <sys/proc.h>
33 #include <sys/rtprio.h>
34 #include <sys/uio.h>
35 #include <sys/sysctl.h>
36 #include <sys/resourcevar.h>
37 #include <sys/spinlock.h>
38 #include <sys/cpu_topology.h>
39 #include <sys/thread2.h>
40 #include <sys/spinlock2.h>
41 #include <sys/mplock2.h>
42
43 #include <sys/ktr.h>
44
45 #include <machine/cpu.h>
46 #include <machine/smp.h>
47
48 /*
49  * Priorities.  Note that with 32 run queues per scheduler each queue
50  * represents four priority levels.
51  */
52
53 #define MAXPRI                  128
54 #define PRIMASK                 (MAXPRI - 1)
55 #define PRIBASE_REALTIME        0
56 #define PRIBASE_NORMAL          MAXPRI
57 #define PRIBASE_IDLE            (MAXPRI * 2)
58 #define PRIBASE_THREAD          (MAXPRI * 3)
59 #define PRIBASE_NULL            (MAXPRI * 4)
60
61 #define NQS     32                      /* 32 run queues. */
62 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
63 #define PPQMASK (PPQ - 1)
64
65 /*
66  * NICEPPQ      - number of nice units per priority queue
67  *
68  * ESTCPUPPQ    - number of estcpu units per priority queue
69  * ESTCPUMAX    - number of estcpu units
70  */
71 #define NICEPPQ         2
72 #define ESTCPUPPQ       512
73 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
74 #define BATCHMAX        (ESTCPUFREQ * 30)
75 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
76
77 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
78
79 TAILQ_HEAD(rq, lwp);
80
81 #define lwp_priority    lwp_usdata.bsd4.priority
82 #define lwp_rqindex     lwp_usdata.bsd4.rqindex
83 #define lwp_estcpu      lwp_usdata.bsd4.estcpu
84 #define lwp_batch       lwp_usdata.bsd4.batch
85 #define lwp_rqtype      lwp_usdata.bsd4.rqtype
86
87 static void bsd4_acquire_curproc(struct lwp *lp);
88 static void bsd4_release_curproc(struct lwp *lp);
89 static void bsd4_select_curproc(globaldata_t gd);
90 static void bsd4_setrunqueue(struct lwp *lp);
91 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
92                                 sysclock_t cpstamp);
93 static void bsd4_recalculate_estcpu(struct lwp *lp);
94 static void bsd4_resetpriority(struct lwp *lp);
95 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
96 static void bsd4_exiting(struct lwp *lp, struct proc *);
97 static void bsd4_yield(struct lwp *lp);
98
99 #ifdef SMP
100 static void need_user_resched_remote(void *dummy);
101 static int batchy_looser_pri_test(struct lwp* lp);
102 static struct lwp *chooseproc_locked_cache_coherent(struct lwp *chklp);
103 #endif
104 static struct lwp *chooseproc_locked(struct lwp *chklp);
105 static void bsd4_remrunqueue_locked(struct lwp *lp);
106 static void bsd4_setrunqueue_locked(struct lwp *lp);
107 static void kick_helper(struct lwp *lp);
108
109 struct usched usched_bsd4 = {
110         { NULL },
111         "bsd4", "Original DragonFly Scheduler",
112         NULL,                   /* default registration */
113         NULL,                   /* default deregistration */
114         bsd4_acquire_curproc,
115         bsd4_release_curproc,
116         bsd4_setrunqueue,
117         bsd4_schedulerclock,
118         bsd4_recalculate_estcpu,
119         bsd4_resetpriority,
120         bsd4_forking,
121         bsd4_exiting,
122         NULL,                   /* setcpumask not supported */
123         bsd4_yield
124 };
125
126 struct usched_bsd4_pcpu {
127         struct thread   helper_thread;
128         short           rrcount;
129         short           upri;
130         struct lwp      *uschedcp;
131         struct lwp      *old_uschedcp;
132 #ifdef SMP
133         cpu_node_t      *cpunode;
134 #endif
135 };
136
137 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
138
139 /*
140  * We have NQS (32) run queues per scheduling class.  For the normal
141  * class, there are 128 priorities scaled onto these 32 queues.  New
142  * processes are added to the last entry in each queue, and processes
143  * are selected for running by taking them from the head and maintaining
144  * a simple FIFO arrangement.  Realtime and Idle priority processes have
145  * and explicit 0-31 priority which maps directly onto their class queue
146  * index.  When a queue has something in it, the corresponding bit is
147  * set in the queuebits variable, allowing a single read to determine
148  * the state of all 32 queues and then a ffs() to find the first busy
149  * queue.
150  */
151 static struct rq bsd4_queues[NQS];
152 static struct rq bsd4_rtqueues[NQS];
153 static struct rq bsd4_idqueues[NQS];
154 static u_int32_t bsd4_queuebits;
155 static u_int32_t bsd4_rtqueuebits;
156 static u_int32_t bsd4_idqueuebits;
157 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */
158 static cpumask_t bsd4_rdyprocmask;      /* ready to accept a user process */
159 static int       bsd4_runqcount;
160 #ifdef SMP
161 static volatile int bsd4_scancpu;
162 #endif
163 static struct spinlock bsd4_spin;
164 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
165 static struct sysctl_ctx_list usched_bsd4_sysctl_ctx;
166 static struct sysctl_oid *usched_bsd4_sysctl_tree;
167
168 /* Debug info exposed through debug.* sysctl */
169
170 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0,
171     "Number of run queues");
172 #ifdef INVARIANTS
173 static int usched_nonoptimal;
174 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
175         &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
176 static int usched_optimal;
177 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
178         &usched_optimal, 0, "acquire_curproc() was optimal");
179 #endif
180
181 static int usched_bsd4_debug = -1;
182 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_bsd4_debug, 0,
183     "Print debug information for this pid");
184 static int usched_bsd4_pid_debug = -1;
185 SYSCTL_INT(_debug, OID_AUTO, pid_debug, CTLFLAG_RW, &usched_bsd4_pid_debug, 0,
186     "Print KTR debug information for this pid");
187
188 #ifdef SMP
189 static int remote_resched_nonaffinity;
190 static int remote_resched_affinity;
191 static int choose_affinity;
192 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
193         &remote_resched_nonaffinity, 0, "Number of remote rescheds");
194 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
195         &remote_resched_affinity, 0, "Number of remote rescheds");
196 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
197         &choose_affinity, 0, "chooseproc() was smart");
198 #endif
199
200
201 /* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */
202 #ifdef SMP
203 static int usched_bsd4_smt = 0;
204 static int usched_bsd4_cache_coherent = 0;
205 static int usched_bsd4_upri_affinity = 16; /* 32 queues - half-way */
206 static int usched_bsd4_queue_checks = 5;
207 static int usched_bsd4_stick_to_level = 0;
208 #endif
209 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
210 static int usched_bsd4_decay = 8;
211 static int usched_bsd4_batch_time = 10;
212 static long usched_bsd4_kicks;
213
214 /* KTR debug printings */
215
216 KTR_INFO_MASTER(usched);
217
218 #if !defined(KTR_USCHED_BSD4)
219 #define KTR_USCHED_BSD4 KTR_ALL
220 #endif
221
222 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_urw, 0,
223     "USCHED_BSD4(bsd4_acquire_curproc in user_reseched_wanted "
224     "after release: pid %d, cpuid %d, curr_cpuid %d)",
225     pid_t pid, int cpuid, int curr);
226 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_before_loop, 0,
227     "USCHED_BSD4(bsd4_acquire_curproc before loop: pid %d, cpuid %d, "
228     "curr_cpuid %d)",
229     pid_t pid, int cpuid, int curr);
230 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_not, 0,
231     "USCHED_BSD4(bsd4_acquire_curproc couldn't acquire after "
232     "bsd4_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
233     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
234 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_switch, 0,
235     "USCHED_BSD4(bsd4_acquire_curproc after lwkt_switch: pid %d, "
236     "cpuid %d, curr_cpuid %d)",
237     pid_t pid, int cpuid, int curr);
238
239 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_release_curproc, 0,
240     "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
241     "cpuid %d, curr_cpuid %d)",
242     pid_t pid, int cpuid, int curr);
243
244 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_select_curproc, 0,
245     "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
246     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
247     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
248
249 #ifdef SMP
250 KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_false, 0,
251     "USCHED_BSD4(batchy_looser_pri_test false: pid %d, "
252     "cpuid %d, verify_mask %lu)",
253     pid_t pid, int cpuid, cpumask_t mask);
254 KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_true, 0,
255     "USCHED_BSD4(batchy_looser_pri_test true: pid %d, "
256     "cpuid %d, verify_mask %lu)",
257     pid_t pid, int cpuid, cpumask_t mask);
258
259 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_smt, 0,
260     "USCHED_BSD4(bsd4_setrunqueue free cpus smt: pid %d, cpuid %d, "
261     "mask %lu, curr_cpuid %d)",
262     pid_t pid, int cpuid, cpumask_t mask, int curr);
263 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_non_smt, 0,
264     "USCHED_BSD4(bsd4_setrunqueue free cpus check non_smt: pid %d, "
265     "cpuid %d, mask %lu, curr_cpuid %d)",
266     pid_t pid, int cpuid, cpumask_t mask, int curr);
267 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_rc, 0,
268     "USCHED_BSD4(bsd4_setrunqueue running cpus check: pid %d, "
269     "cpuid %d, mask %lu, curr_cpuid %d)",
270     pid_t pid, int cpuid, cpumask_t mask, int curr);
271 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found, 0,
272     "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
273     "mask %lu, found_cpuid %d, curr_cpuid %d)",
274     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
275 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_not_found, 0,
276     "USCHED_BSD4(bsd4_setrunqueue not found cpu: pid %d, cpuid %d, "
277     "try_cpuid %d, curr_cpuid %d)",
278     pid_t pid, int cpuid, int try_cpuid, int curr);
279 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found_best_cpuid, 0,
280     "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
281     "mask %lu, found_cpuid %d, curr_cpuid %d)",
282     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
283 #endif
284
285 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc, 0,
286     "USCHED_BSD4(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
287     pid_t pid, int old_cpuid, int curr);
288 #ifdef SMP
289 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc, 0,
290     "USCHED_BSD4(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
291     pid_t pid, int old_cpuid, int curr);
292 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_not_good, 0,
293     "USCHED_BSD4(chooseproc_cc not good: pid %d, old_cpumask %lu, "
294     "sibling_mask %lu, curr_cpumask %lu)",
295     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
296 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_elected, 0,
297     "USCHED_BSD4(chooseproc_cc elected: pid %d, old_cpumask %lu, "
298     "sibling_mask %lu, curr_cpumask: %lu)",
299     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
300
301 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process, 0,
302     "USCHED_BSD4(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
303     int id, pid_t pid, int cpuid);
304 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_process, 0,
305     "USCHED_BSD4(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
306     int id, pid_t pid, int cpuid);
307 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process_found, 0,
308     "USCHED_BSD4(sched_thread %d no process found; tmpmask %lu)",
309     int id, cpumask_t tmpmask);
310 #endif
311
312 /*
313  * Initialize the run queues at boot time.
314  */
315 static void
316 rqinit(void *dummy)
317 {
318         int i;
319
320         spin_init(&bsd4_spin);
321         for (i = 0; i < NQS; i++) {
322                 TAILQ_INIT(&bsd4_queues[i]);
323                 TAILQ_INIT(&bsd4_rtqueues[i]);
324                 TAILQ_INIT(&bsd4_idqueues[i]);
325         }
326         atomic_clear_cpumask(&bsd4_curprocmask, 1);
327 }
328 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL)
329
330 /*
331  * BSD4_ACQUIRE_CURPROC
332  *
333  * This function is called when the kernel intends to return to userland.
334  * It is responsible for making the thread the current designated userland
335  * thread for this cpu, blocking if necessary.
336  *
337  * The kernel has already depressed our LWKT priority so we must not switch
338  * until we have either assigned or disposed of the thread.
339  *
340  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
341  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
342  * occur, this function is called only under very controlled circumstances.
343  *
344  * MPSAFE
345  */
346 static void
347 bsd4_acquire_curproc(struct lwp *lp)
348 {
349         globaldata_t gd;
350         bsd4_pcpu_t dd;
351         thread_t td;
352 #if 0
353         struct lwp *olp;
354 #endif
355
356         /*
357          * Make sure we aren't sitting on a tsleep queue.
358          */
359         td = lp->lwp_thread;
360         crit_enter_quick(td);
361         if (td->td_flags & TDF_TSLEEPQ)
362                 tsleep_remove(td);
363         bsd4_recalculate_estcpu(lp);
364
365         /*
366          * If a reschedule was requested give another thread the
367          * driver's seat.
368          */
369         if (user_resched_wanted()) {
370                 clear_user_resched();
371                 bsd4_release_curproc(lp);
372
373                 KTR_COND_LOG(usched_bsd4_acquire_curproc_urw,
374                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
375                     lp->lwp_proc->p_pid,
376                     lp->lwp_thread->td_gd->gd_cpuid,
377                     mycpu->gd_cpuid);
378         }
379
380         /*
381          * Loop until we are the current user thread
382          */
383         gd = mycpu;
384         dd = &bsd4_pcpu[gd->gd_cpuid];
385
386         KTR_COND_LOG(usched_bsd4_acquire_curproc_before_loop,
387             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
388             lp->lwp_proc->p_pid,
389             lp->lwp_thread->td_gd->gd_cpuid,
390             gd->gd_cpuid);
391
392         do {
393                 /*
394                  * Process any pending events and higher priority threads.
395                  */
396                 lwkt_yield();
397
398                 /*
399                  * Become the currently scheduled user thread for this cpu
400                  * if we can do so trivially.
401                  *
402                  * We can steal another thread's current thread designation
403                  * on this cpu since if we are running that other thread
404                  * must not be, so we can safely deschedule it.
405                  */
406                 if (dd->uschedcp == lp) {
407                         /*
408                          * We are already the current lwp (hot path).
409                          */
410                         dd->upri = lp->lwp_priority;
411                 } else if (dd->uschedcp == NULL) {
412                         /*
413                          * We can trivially become the current lwp.
414                          */
415                         atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
416                         dd->uschedcp = lp;
417                         dd->upri = lp->lwp_priority;
418                 } else if (dd->upri > lp->lwp_priority) {
419                         /*
420                          * We can steal the current cpu's lwp designation
421                          * away simply by replacing it.  The other thread
422                          * will stall when it tries to return to userland.
423                          */
424                         dd->uschedcp = lp;
425                         dd->upri = lp->lwp_priority;
426                         /*
427                         lwkt_deschedule(olp->lwp_thread);
428                         bsd4_setrunqueue(olp);
429                         */
430                 } else {
431                         /*
432                          * We cannot become the current lwp, place the lp
433                          * on the bsd4 run-queue and deschedule ourselves.
434                          *
435                          * When we are reactivated we will have another
436                          * chance.
437                          */
438                         lwkt_deschedule(lp->lwp_thread);
439
440                         bsd4_setrunqueue(lp);
441
442                         KTR_COND_LOG(usched_bsd4_acquire_curproc_not,
443                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
444                             lp->lwp_proc->p_pid,
445                             lp->lwp_thread->td_gd->gd_cpuid,
446                             dd->uschedcp->lwp_proc->p_pid,
447                             gd->gd_cpuid);
448
449
450                         lwkt_switch();
451
452                         /*
453                          * Reload after a switch or setrunqueue/switch possibly
454                          * moved us to another cpu.
455                          */
456                         gd = mycpu;
457                         dd = &bsd4_pcpu[gd->gd_cpuid];
458
459                         KTR_COND_LOG(usched_bsd4_acquire_curproc_switch,
460                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
461                             lp->lwp_proc->p_pid,
462                             lp->lwp_thread->td_gd->gd_cpuid,
463                             gd->gd_cpuid);
464                 }
465         } while (dd->uschedcp != lp);
466
467         crit_exit_quick(td);
468         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
469 }
470
471 /*
472  * BSD4_RELEASE_CURPROC
473  *
474  * This routine detaches the current thread from the userland scheduler,
475  * usually because the thread needs to run or block in the kernel (at
476  * kernel priority) for a while.
477  *
478  * This routine is also responsible for selecting a new thread to
479  * make the current thread.
480  *
481  * NOTE: This implementation differs from the dummy example in that
482  * bsd4_select_curproc() is able to select the current process, whereas
483  * dummy_select_curproc() is not able to select the current process.
484  * This means we have to NULL out uschedcp.
485  *
486  * Additionally, note that we may already be on a run queue if releasing
487  * via the lwkt_switch() in bsd4_setrunqueue().
488  *
489  * MPSAFE
490  */
491
492 static void
493 bsd4_release_curproc(struct lwp *lp)
494 {
495         globaldata_t gd = mycpu;
496         bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
497
498         if (dd->uschedcp == lp) {
499                 crit_enter();
500                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
501
502                 KTR_COND_LOG(usched_bsd4_release_curproc,
503                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
504                     lp->lwp_proc->p_pid,
505                     lp->lwp_thread->td_gd->gd_cpuid,
506                     gd->gd_cpuid);
507
508                 dd->uschedcp = NULL;    /* don't let lp be selected */
509                 dd->upri = PRIBASE_NULL;
510                 atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
511                 dd->old_uschedcp = lp;  /* used only for KTR debug prints */
512                 bsd4_select_curproc(gd);
513                 crit_exit();
514         }
515 }
516
517 /*
518  * BSD4_SELECT_CURPROC
519  *
520  * Select a new current process for this cpu and clear any pending user
521  * reschedule request.  The cpu currently has no current process.
522  *
523  * This routine is also responsible for equal-priority round-robining,
524  * typically triggered from bsd4_schedulerclock().  In our dummy example
525  * all the 'user' threads are LWKT scheduled all at once and we just
526  * call lwkt_switch().
527  *
528  * The calling process is not on the queue and cannot be selected.
529  *
530  * MPSAFE
531  */
532 static
533 void
534 bsd4_select_curproc(globaldata_t gd)
535 {
536         bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
537         struct lwp *nlp;
538         int cpuid = gd->gd_cpuid;
539
540         crit_enter_gd(gd);
541
542         spin_lock(&bsd4_spin);
543 #ifdef SMP
544         if(usched_bsd4_cache_coherent)
545                 nlp = chooseproc_locked_cache_coherent(dd->uschedcp);
546         else
547 #endif
548                 nlp = chooseproc_locked(dd->uschedcp);
549
550         if (nlp) {
551
552                 KTR_COND_LOG(usched_bsd4_select_curproc,
553                     nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
554                     nlp->lwp_proc->p_pid,
555                     nlp->lwp_thread->td_gd->gd_cpuid,
556                     dd->old_uschedcp->lwp_proc->p_pid,
557                     dd->old_uschedcp->lwp_thread->td_gd->gd_cpuid,
558                     gd->gd_cpuid);
559
560                 atomic_set_cpumask(&bsd4_curprocmask, CPUMASK(cpuid));
561                 dd->upri = nlp->lwp_priority;
562                 dd->uschedcp = nlp;
563                 dd->rrcount = 0;                /* reset round robin */
564                 spin_unlock(&bsd4_spin);
565 #ifdef SMP
566                 lwkt_acquire(nlp->lwp_thread);
567 #endif
568                 lwkt_schedule(nlp->lwp_thread);
569         } else {
570                 spin_unlock(&bsd4_spin);
571         }
572
573 #if 0
574         } else if (bsd4_runqcount && (bsd4_rdyprocmask & CPUMASK(cpuid))) {
575                 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
576                 spin_unlock(&bsd4_spin);
577                 lwkt_schedule(&dd->helper_thread);
578         } else {
579                 spin_unlock(&bsd4_spin);
580         }
581 #endif
582         crit_exit_gd(gd);
583 }
584 #ifdef SMP
585
586 /*
587  * batchy_looser_pri_test() - determine if a process is batchy or not
588  * relative to the other processes running in the system
589  */
590 static int
591 batchy_looser_pri_test(struct lwp* lp)
592 {
593         cpumask_t mask;
594         bsd4_pcpu_t other_dd;
595         int cpu;
596
597         /* Current running processes */
598         mask = bsd4_curprocmask & smp_active_mask
599             & usched_global_cpumask;
600
601         while(mask) {
602                 cpu = BSFCPUMASK(mask);
603                 other_dd = &bsd4_pcpu[cpu];
604                 if (other_dd->upri - lp->lwp_priority > usched_bsd4_upri_affinity * PPQ) {
605
606                         KTR_COND_LOG(usched_batchy_test_false,
607                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
608                             lp->lwp_proc->p_pid,
609                             lp->lwp_thread->td_gd->gd_cpuid,
610                             (unsigned long)mask);
611
612                         return 0;
613                 }
614                 mask &= ~CPUMASK(cpu);
615         }
616
617         KTR_COND_LOG(usched_batchy_test_true,
618             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
619             lp->lwp_proc->p_pid,
620             lp->lwp_thread->td_gd->gd_cpuid,
621             (unsigned long)mask);
622
623         return 1;
624 }
625
626 #endif
627 /*
628  *
629  * BSD4_SETRUNQUEUE
630  *
631  * Place the specified lwp on the user scheduler's run queue.  This routine
632  * must be called with the thread descheduled.  The lwp must be runnable.
633  *
634  * The thread may be the current thread as a special case.
635  *
636  * MPSAFE
637  */
638 static void
639 bsd4_setrunqueue(struct lwp *lp)
640 {
641         globaldata_t gd;
642         bsd4_pcpu_t dd;
643 #ifdef SMP
644         int cpuid;
645         cpumask_t mask;
646         cpumask_t tmpmask;
647 #endif
648
649         /*
650          * First validate the process state relative to the current cpu.
651          * We don't need the spinlock for this, just a critical section.
652          * We are in control of the process.
653          */
654         crit_enter();
655         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
656         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
657             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
658              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
659         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
660
661         /*
662          * Note: gd and dd are relative to the target thread's last cpu,
663          * NOT our current cpu.
664          */
665         gd = lp->lwp_thread->td_gd;
666         dd = &bsd4_pcpu[gd->gd_cpuid];
667
668         /*
669          * This process is not supposed to be scheduled anywhere or assigned
670          * as the current process anywhere.  Assert the condition.
671          */
672         KKASSERT(dd->uschedcp != lp);
673
674 #ifndef SMP
675         /*
676          * If we are not SMP we do not have a scheduler helper to kick
677          * and must directly activate the process if none are scheduled.
678          *
679          * This is really only an issue when bootstrapping init since
680          * the caller in all other cases will be a user process, and
681          * even if released (dd->uschedcp == NULL), that process will
682          * kickstart the scheduler when it returns to user mode from
683          * the kernel.
684          */
685         if (dd->uschedcp == NULL) {
686                 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
687                 dd->uschedcp = lp;
688                 dd->upri = lp->lwp_priority;
689                 lwkt_schedule(lp->lwp_thread);
690                 crit_exit();
691                 return;
692         }
693 #endif
694
695 #ifdef SMP
696         /*
697          * XXX fixme.  Could be part of a remrunqueue/setrunqueue
698          * operation when the priority is recalculated, so TDF_MIGRATING
699          * may already be set.
700          */
701         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
702                 lwkt_giveaway(lp->lwp_thread);
703 #endif
704
705         /*
706          * We lose control of lp the moment we release the spinlock after
707          * having placed lp on the queue.  i.e. another cpu could pick it
708          * up and it could exit, or its priority could be further adjusted,
709          * or something like that.
710          */
711         spin_lock(&bsd4_spin);
712         bsd4_setrunqueue_locked(lp);
713         lp->lwp_setrunqueue_ticks = sched_ticks;
714
715 #ifdef SMP
716         /*
717          * Kick the scheduler helper on one of the other cpu's
718          * and request a reschedule if appropriate.
719          *
720          * NOTE: We check all cpus whos rdyprocmask is set.  First we
721          *       look for cpus without designated lps, then we look for
722          *       cpus with designated lps with a worse priority than our
723          *       process.
724          */
725         ++bsd4_scancpu;
726
727         if (usched_bsd4_smt) {
728
729                 /*
730                  * SMT heuristic - Try to schedule on a free physical core.
731                  * If no physical core found than choose the one that has
732                  * an interactive thread.
733                  */
734
735                 int best_cpuid = -1;
736                 int min_prio = MAXPRI * MAXPRI;
737                 int sibling;
738
739                 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
740                 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
741                     smp_active_mask & usched_global_cpumask;
742
743                 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_smt,
744                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
745                     lp->lwp_proc->p_pid,
746                     lp->lwp_thread->td_gd->gd_cpuid,
747                     (unsigned long)mask,
748                     mycpu->gd_cpuid);
749
750                 while (mask) {
751                         tmpmask = ~(CPUMASK(cpuid) - 1);
752                         if (mask & tmpmask)
753                                 cpuid = BSFCPUMASK(mask & tmpmask);
754                         else
755                                 cpuid = BSFCPUMASK(mask);
756                         gd = globaldata_find(cpuid);
757                         dd = &bsd4_pcpu[cpuid];
758
759                         if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
760                                 if (dd->cpunode->parent_node->members & ~dd->cpunode->members & mask) {
761
762                                         KTR_COND_LOG(usched_bsd4_setrunqueue_found,
763                                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
764                                             lp->lwp_proc->p_pid,
765                                             lp->lwp_thread->td_gd->gd_cpuid,
766                                             (unsigned long)mask,
767                                             cpuid,
768                                             mycpu->gd_cpuid);
769
770                                         goto found;
771                                 } else {
772                                         sibling = BSFCPUMASK(dd->cpunode->parent_node->members &
773                                             ~dd->cpunode->members);
774                                         if (min_prio > bsd4_pcpu[sibling].upri) {
775                                                 min_prio = bsd4_pcpu[sibling].upri;
776                                                 best_cpuid = cpuid;
777                                         }
778                                 }
779                         }
780                         mask &= ~CPUMASK(cpuid);
781                 }
782
783                 if (best_cpuid != -1) {
784                         cpuid = best_cpuid;
785                         gd = globaldata_find(cpuid);
786                         dd = &bsd4_pcpu[cpuid];
787
788                         KTR_COND_LOG(usched_bsd4_setrunqueue_found_best_cpuid,
789                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
790                             lp->lwp_proc->p_pid,
791                             lp->lwp_thread->td_gd->gd_cpuid,
792                             (unsigned long)mask,
793                             cpuid,
794                             mycpu->gd_cpuid);
795
796                         goto found;
797                 }
798         } else {
799                 /* Fallback to the original heuristic */
800                 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
801                 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
802                        smp_active_mask & usched_global_cpumask;
803
804                 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_non_smt,
805                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
806                     lp->lwp_proc->p_pid,
807                     lp->lwp_thread->td_gd->gd_cpuid,
808                     (unsigned long)mask,
809                     mycpu->gd_cpuid);
810
811                 while (mask) {
812                         tmpmask = ~(CPUMASK(cpuid) - 1);
813                         if (mask & tmpmask)
814                                 cpuid = BSFCPUMASK(mask & tmpmask);
815                         else
816                                 cpuid = BSFCPUMASK(mask);
817                         gd = globaldata_find(cpuid);
818                         dd = &bsd4_pcpu[cpuid];
819
820                         if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
821
822                                 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
823                                     lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
824                                     lp->lwp_proc->p_pid,
825                                     lp->lwp_thread->td_gd->gd_cpuid,
826                                     (unsigned long)mask,
827                                     cpuid,
828                                     mycpu->gd_cpuid);
829
830                                 goto found;
831                         }
832                         mask &= ~CPUMASK(cpuid);
833                 }
834         }
835
836         /*
837          * Then cpus which might have a currently running lp
838          */
839         mask = bsd4_curprocmask & bsd4_rdyprocmask &
840                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
841
842         KTR_COND_LOG(usched_bsd4_setrunqueue_rc,
843             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
844             lp->lwp_proc->p_pid,
845             lp->lwp_thread->td_gd->gd_cpuid,
846             (unsigned long)mask,
847             mycpu->gd_cpuid);
848
849         while (mask) {
850                 tmpmask = ~(CPUMASK(cpuid) - 1);
851                 if (mask & tmpmask)
852                         cpuid = BSFCPUMASK(mask & tmpmask);
853                 else
854                         cpuid = BSFCPUMASK(mask);
855                 gd = globaldata_find(cpuid);
856                 dd = &bsd4_pcpu[cpuid];
857
858                 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
859
860                         KTR_COND_LOG(usched_bsd4_setrunqueue_found,
861                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
862                             lp->lwp_proc->p_pid,
863                             lp->lwp_thread->td_gd->gd_cpuid,
864                             (unsigned long)mask,
865                             cpuid,
866                             mycpu->gd_cpuid);
867
868                         goto found;
869                 }
870                 mask &= ~CPUMASK(cpuid);
871         }
872
873         /*
874          * If we cannot find a suitable cpu we reload from bsd4_scancpu
875          * and round-robin.  Other cpus will pickup as they release their
876          * current lwps or become ready.
877          *
878          * Avoid a degenerate system lockup case if usched_global_cpumask
879          * is set to 0 or otherwise does not cover lwp_cpumask.
880          *
881          * We only kick the target helper thread in this case, we do not
882          * set the user resched flag because
883          */
884         cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
885         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) {
886                 cpuid = 0;
887         }
888         gd = globaldata_find(cpuid);
889         dd = &bsd4_pcpu[cpuid];
890
891         KTR_COND_LOG(usched_bsd4_setrunqueue_not_found,
892             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
893             lp->lwp_proc->p_pid,
894             lp->lwp_thread->td_gd->gd_cpuid,
895             cpuid,
896             mycpu->gd_cpuid);
897
898 found:
899         if (gd == mycpu) {
900                 spin_unlock(&bsd4_spin);
901                 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
902                         if (dd->uschedcp == NULL) {
903                                 wakeup_mycpu(&dd->helper_thread);
904                         } else {
905                                 need_user_resched();
906                         }
907                 }
908         } else {
909                 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
910                 spin_unlock(&bsd4_spin);
911                 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
912                         lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
913                 else
914                         wakeup(&dd->helper_thread);
915         }
916 #else
917         /*
918          * Request a reschedule if appropriate.
919          */
920         spin_unlock(&bsd4_spin);
921         if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
922                 need_user_resched();
923         }
924 #endif
925         crit_exit();
926 }
927
928 /*
929  * This routine is called from a systimer IPI.  It MUST be MP-safe and
930  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
931  * each cpu.
932  *
933  * MPSAFE
934  */
935 static
936 void
937 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
938 {
939         globaldata_t gd = mycpu;
940         bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
941
942         /*
943          * Do we need to round-robin?  We round-robin 10 times a second.
944          * This should only occur for cpu-bound batch processes.
945          */
946         if (++dd->rrcount >= usched_bsd4_rrinterval) {
947                 dd->rrcount = 0;
948                 need_user_resched();
949         }
950
951         /*
952          * Adjust estcpu upward using a real time equivalent calculation.
953          */
954         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
955
956         /*
957          * Spinlocks also hold a critical section so there should not be
958          * any active.
959          */
960         KKASSERT(gd->gd_spinlocks_wr == 0);
961
962         bsd4_resetpriority(lp);
963 #if 0
964         /*
965         * if we can't call bsd4_resetpriority for some reason we must call
966          * need user_resched().
967          */
968         need_user_resched();
969 #endif
970 }
971
972 /*
973  * Called from acquire and from kern_synch's one-second timer (one of the
974  * callout helper threads) with a critical section held.
975  *
976  * Decay p_estcpu based on the number of ticks we haven't been running
977  * and our p_nice.  As the load increases each process observes a larger
978  * number of idle ticks (because other processes are running in them).
979  * This observation leads to a larger correction which tends to make the
980  * system more 'batchy'.
981  *
982  * Note that no recalculation occurs for a process which sleeps and wakes
983  * up in the same tick.  That is, a system doing thousands of context
984  * switches per second will still only do serious estcpu calculations
985  * ESTCPUFREQ times per second.
986  *
987  * MPSAFE
988  */
989 static
990 void
991 bsd4_recalculate_estcpu(struct lwp *lp)
992 {
993         globaldata_t gd = mycpu;
994         sysclock_t cpbase;
995         sysclock_t ttlticks;
996         int estcpu;
997         int decay_factor;
998
999         /*
1000          * We have to subtract periodic to get the last schedclock
1001          * timeout time, otherwise we would get the upcoming timeout.
1002          * Keep in mind that a process can migrate between cpus and
1003          * while the scheduler clock should be very close, boundary
1004          * conditions could lead to a small negative delta.
1005          */
1006         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1007
1008         if (lp->lwp_slptime > 1) {
1009                 /*
1010                  * Too much time has passed, do a coarse correction.
1011                  */
1012                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1013                 bsd4_resetpriority(lp);
1014                 lp->lwp_cpbase = cpbase;
1015                 lp->lwp_cpticks = 0;
1016                 lp->lwp_batch -= ESTCPUFREQ;
1017                 if (lp->lwp_batch < 0)
1018                         lp->lwp_batch = 0;
1019         } else if (lp->lwp_cpbase != cpbase) {
1020                 /*
1021                  * Adjust estcpu if we are in a different tick.  Don't waste
1022                  * time if we are in the same tick.
1023                  *
1024                  * First calculate the number of ticks in the measurement
1025                  * interval.  The ttlticks calculation can wind up 0 due to
1026                  * a bug in the handling of lwp_slptime  (as yet not found),
1027                  * so make sure we do not get a divide by 0 panic.
1028                  */
1029                 ttlticks = (cpbase - lp->lwp_cpbase) /
1030                            gd->gd_schedclock.periodic;
1031                 if (ttlticks < 0) {
1032                         ttlticks = 0;
1033                         lp->lwp_cpbase = cpbase;
1034                 }
1035                 if (ttlticks == 0)
1036                         return;
1037                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1038
1039                 /*
1040                  * Calculate the percentage of one cpu used factoring in ncpus
1041                  * and the load and adjust estcpu.  Handle degenerate cases
1042                  * by adding 1 to bsd4_runqcount.
1043                  *
1044                  * estcpu is scaled by ESTCPUMAX.
1045                  *
1046                  * bsd4_runqcount is the excess number of user processes
1047                  * that cannot be immediately scheduled to cpus.  We want
1048                  * to count these as running to avoid range compression
1049                  * in the base calculation (which is the actual percentage
1050                  * of one cpu used).
1051                  */
1052                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
1053                          (bsd4_runqcount + ncpus) / (ncpus * ttlticks);
1054
1055                 /*
1056                  * If estcpu is > 50% we become more batch-like
1057                  * If estcpu is <= 50% we become less batch-like
1058                  *
1059                  * It takes 30 cpu seconds to traverse the entire range.
1060                  */
1061                 if (estcpu > ESTCPUMAX / 2) {
1062                         lp->lwp_batch += ttlticks;
1063                         if (lp->lwp_batch > BATCHMAX)
1064                                 lp->lwp_batch = BATCHMAX;
1065                 } else {
1066                         lp->lwp_batch -= ttlticks;
1067                         if (lp->lwp_batch < 0)
1068                                 lp->lwp_batch = 0;
1069                 }
1070
1071                 if (usched_bsd4_debug == lp->lwp_proc->p_pid) {
1072                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
1073                                 lp->lwp_proc->p_pid, lp,
1074                                 estcpu, lp->lwp_estcpu,
1075                                 lp->lwp_batch,
1076                                 lp->lwp_cpticks, ttlticks);
1077                 }
1078
1079                 /*
1080                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
1081                  * quickly lwp_estcpu collapses to its realtime calculation.
1082                  * A slower collapse gives us a more accurate number but
1083                  * can cause a cpu hog to eat too much cpu before the
1084                  * scheduler decides to downgrade it.
1085                  *
1086                  * NOTE: p_nice is accounted for in bsd4_resetpriority(),
1087                  *       and not here, but we must still ensure that a
1088                  *       cpu-bound nice -20 process does not completely
1089                  *       override a cpu-bound nice +20 process.
1090                  *
1091                  * NOTE: We must use ESTCPULIM() here to deal with any
1092                  *       overshoot.
1093                  */
1094                 decay_factor = usched_bsd4_decay;
1095                 if (decay_factor < 1)
1096                         decay_factor = 1;
1097                 if (decay_factor > 1024)
1098                         decay_factor = 1024;
1099
1100                 lp->lwp_estcpu = ESTCPULIM(
1101                         (lp->lwp_estcpu * decay_factor + estcpu) /
1102                         (decay_factor + 1));
1103
1104                 if (usched_bsd4_debug == lp->lwp_proc->p_pid)
1105                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
1106                 bsd4_resetpriority(lp);
1107                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1108                 lp->lwp_cpticks = 0;
1109         }
1110 }
1111
1112 /*
1113  * Compute the priority of a process when running in user mode.
1114  * Arrange to reschedule if the resulting priority is better
1115  * than that of the current process.
1116  *
1117  * This routine may be called with any process.
1118  *
1119  * This routine is called by fork1() for initial setup with the process
1120  * of the run queue, and also may be called normally with the process on or
1121  * off the run queue.
1122  *
1123  * MPSAFE
1124  */
1125 static void
1126 bsd4_resetpriority(struct lwp *lp)
1127 {
1128         bsd4_pcpu_t dd;
1129         int newpriority;
1130         u_short newrqtype;
1131         int reschedcpu;
1132         int checkpri;
1133         int estcpu;
1134
1135         /*
1136          * Calculate the new priority and queue type
1137          */
1138         crit_enter();
1139         spin_lock(&bsd4_spin);
1140
1141         newrqtype = lp->lwp_rtprio.type;
1142
1143         switch(newrqtype) {
1144         case RTP_PRIO_REALTIME:
1145         case RTP_PRIO_FIFO:
1146                 newpriority = PRIBASE_REALTIME +
1147                              (lp->lwp_rtprio.prio & PRIMASK);
1148                 break;
1149         case RTP_PRIO_NORMAL:
1150                 /*
1151                  * Detune estcpu based on batchiness.  lwp_batch ranges
1152                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
1153                  * the priority calculation to between 50% and 100%.
1154                  */
1155                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
1156                          (BATCHMAX * 2);
1157
1158                 /*
1159                  * p_nice piece         Adds (0-40) * 2         0-80
1160                  * estcpu               Adds 16384  * 4 / 512   0-128
1161                  */
1162                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1163                 newpriority += estcpu * PPQ / ESTCPUPPQ;
1164                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1165                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1166                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1167                 break;
1168         case RTP_PRIO_IDLE:
1169                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1170                 break;
1171         case RTP_PRIO_THREAD:
1172                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1173                 break;
1174         default:
1175                 panic("Bad RTP_PRIO %d", newrqtype);
1176                 /* NOT REACHED */
1177         }
1178
1179         /*
1180          * The newpriority incorporates the queue type so do a simple masked
1181          * check to determine if the process has moved to another queue.  If
1182          * it has, and it is currently on a run queue, then move it.
1183          */
1184         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1185                 lp->lwp_priority = newpriority;
1186                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1187                         bsd4_remrunqueue_locked(lp);
1188                         lp->lwp_rqtype = newrqtype;
1189                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1190                         bsd4_setrunqueue_locked(lp);
1191                         checkpri = 1;
1192                 } else {
1193                         lp->lwp_rqtype = newrqtype;
1194                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1195                         checkpri = 0;
1196                 }
1197                 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
1198         } else {
1199                 lp->lwp_priority = newpriority;
1200                 reschedcpu = -1;
1201                 checkpri = 1;
1202         }
1203
1204         /*
1205          * Determine if we need to reschedule the target cpu.  This only
1206          * occurs if the LWP is already on a scheduler queue, which means
1207          * that idle cpu notification has already occured.  At most we
1208          * need only issue a need_user_resched() on the appropriate cpu.
1209          *
1210          * The LWP may be owned by a CPU different from the current one,
1211          * in which case dd->uschedcp may be modified without an MP lock
1212          * or a spinlock held.  The worst that happens is that the code
1213          * below causes a spurious need_user_resched() on the target CPU
1214          * and dd->pri to be wrong for a short period of time, both of
1215          * which are harmless.
1216          *
1217          * If checkpri is 0 we are adjusting the priority of the current
1218          * process, possibly higher (less desireable), so ignore the upri
1219          * check which will fail in that case.
1220          */
1221         if (reschedcpu >= 0) {
1222                 dd = &bsd4_pcpu[reschedcpu];
1223                 if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) &&
1224                     (checkpri == 0 ||
1225                      (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1226 #ifdef SMP
1227                         if (reschedcpu == mycpu->gd_cpuid) {
1228                                 spin_unlock(&bsd4_spin);
1229                                 need_user_resched();
1230                         } else {
1231                                 spin_unlock(&bsd4_spin);
1232                                 atomic_clear_cpumask(&bsd4_rdyprocmask,
1233                                                      CPUMASK(reschedcpu));
1234                                 lwkt_send_ipiq(lp->lwp_thread->td_gd,
1235                                                need_user_resched_remote, NULL);
1236                         }
1237 #else
1238                         spin_unlock(&bsd4_spin);
1239                         need_user_resched();
1240 #endif
1241                 } else {
1242                         spin_unlock(&bsd4_spin);
1243                 }
1244         } else {
1245                 spin_unlock(&bsd4_spin);
1246         }
1247         crit_exit();
1248 }
1249
1250 /*
1251  * MPSAFE
1252  */
1253 static
1254 void
1255 bsd4_yield(struct lwp *lp)
1256 {
1257 #if 0
1258         /* FUTURE (or something similar) */
1259         switch(lp->lwp_rqtype) {
1260         case RTP_PRIO_NORMAL:
1261                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1262                 break;
1263         default:
1264                 break;
1265         }
1266 #endif
1267         need_user_resched();
1268 }
1269
1270 /*
1271  * Called from fork1() when a new child process is being created.
1272  *
1273  * Give the child process an initial estcpu that is more batch then
1274  * its parent and dock the parent for the fork (but do not
1275  * reschedule the parent).   This comprises the main part of our batch
1276  * detection heuristic for both parallel forking and sequential execs.
1277  *
1278  * XXX lwp should be "spawning" instead of "forking"
1279  *
1280  * MPSAFE
1281  */
1282 static void
1283 bsd4_forking(struct lwp *plp, struct lwp *lp)
1284 {
1285         /*
1286          * Put the child 4 queue slots (out of 32) higher than the parent
1287          * (less desireable than the parent).
1288          */
1289         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1290
1291         /*
1292          * The batch status of children always starts out centerline
1293          * and will inch-up or inch-down as appropriate.  It takes roughly
1294          * ~15 seconds of >50% cpu to hit the limit.
1295          */
1296         lp->lwp_batch = BATCHMAX / 2;
1297
1298         /*
1299          * Dock the parent a cost for the fork, protecting us from fork
1300          * bombs.  If the parent is forking quickly make the child more
1301          * batchy.
1302          */
1303         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1304 }
1305
1306 /*
1307  * Called when a parent waits for a child.
1308  *
1309  * MPSAFE
1310  */
1311 static void
1312 bsd4_exiting(struct lwp *lp, struct proc *child_proc)
1313 {
1314 }
1315
1316 /*
1317  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1318  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1319  * has a better or equal priority then the process that would otherwise be
1320  * chosen, NULL is returned.
1321  *
1322  * Until we fix the RUNQ code the chklp test has to be strict or we may
1323  * bounce between processes trying to acquire the current process designation.
1324  *
1325  * MPSAFE - must be called with bsd4_spin exclusive held.  The spinlock is
1326  *          left intact through the entire routine.
1327  */
1328 static
1329 struct lwp *
1330 chooseproc_locked(struct lwp *chklp)
1331 {
1332         struct lwp *lp;
1333         struct rq *q;
1334         u_int32_t *which, *which2;
1335         u_int32_t pri;
1336         u_int32_t rtqbits;
1337         u_int32_t tsqbits;
1338         u_int32_t idqbits;
1339         cpumask_t cpumask;
1340
1341         rtqbits = bsd4_rtqueuebits;
1342         tsqbits = bsd4_queuebits;
1343         idqbits = bsd4_idqueuebits;
1344         cpumask = mycpu->gd_cpumask;
1345
1346
1347 #ifdef SMP
1348 again:
1349 #endif
1350         if (rtqbits) {
1351                 pri = bsfl(rtqbits);
1352                 q = &bsd4_rtqueues[pri];
1353                 which = &bsd4_rtqueuebits;
1354                 which2 = &rtqbits;
1355         } else if (tsqbits) {
1356                 pri = bsfl(tsqbits);
1357                 q = &bsd4_queues[pri];
1358                 which = &bsd4_queuebits;
1359                 which2 = &tsqbits;
1360         } else if (idqbits) {
1361                 pri = bsfl(idqbits);
1362                 q = &bsd4_idqueues[pri];
1363                 which = &bsd4_idqueuebits;
1364                 which2 = &idqbits;
1365         } else {
1366                 return NULL;
1367         }
1368         lp = TAILQ_FIRST(q);
1369         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1370
1371 #ifdef SMP
1372         while ((lp->lwp_cpumask & cpumask) == 0) {
1373                 lp = TAILQ_NEXT(lp, lwp_procq);
1374                 if (lp == NULL) {
1375                         *which2 &= ~(1 << pri);
1376                         goto again;
1377                 }
1378         }
1379 #endif
1380
1381         /*
1382          * If the passed lwp <chklp> is reasonably close to the selected
1383          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1384          *
1385          * Note that we must error on the side of <chklp> to avoid bouncing
1386          * between threads in the acquire code.
1387          */
1388         if (chklp) {
1389                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1390                         return(NULL);
1391         }
1392
1393 #ifdef SMP
1394         /*
1395          * If the chosen lwp does not reside on this cpu spend a few
1396          * cycles looking for a better candidate at the same priority level.
1397          * This is a fallback check, setrunqueue() tries to wakeup the
1398          * correct cpu and is our front-line affinity.
1399          */
1400         if (lp->lwp_thread->td_gd != mycpu &&
1401             (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
1402         ) {
1403                 if (chklp->lwp_thread->td_gd == mycpu) {
1404                         ++choose_affinity;
1405                         lp = chklp;
1406                 }
1407         }
1408 #endif
1409
1410         KTR_COND_LOG(usched_chooseproc,
1411             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1412             lp->lwp_proc->p_pid,
1413             lp->lwp_thread->td_gd->gd_cpuid,
1414             mycpu->gd_cpuid);
1415
1416         TAILQ_REMOVE(q, lp, lwp_procq);
1417         --bsd4_runqcount;
1418         if (TAILQ_EMPTY(q))
1419                 *which &= ~(1 << pri);
1420         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1421         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1422
1423         return lp;
1424 }
1425
1426 #ifdef SMP
1427 /*
1428  * chooseproc() - with a cache coherence heuristic. Try to pull a process that
1429  * has its home on the current CPU> If the process doesn't have its home here
1430  * and is a batchy one (see batcy_looser_pri_test), we can wait for a
1431  * sched_tick, may be its home will become free and pull it in. Anyway,
1432  * we can't wait more than one tick. If that tick expired, we pull in that
1433  * process, no matter what.
1434  */
1435 static
1436 struct lwp *
1437 chooseproc_locked_cache_coherent(struct lwp *chklp)
1438 {
1439         struct lwp *lp;
1440         struct rq *q;
1441         u_int32_t *which, *which2;
1442         u_int32_t pri;
1443         u_int32_t checks;
1444         u_int32_t rtqbits;
1445         u_int32_t tsqbits;
1446         u_int32_t idqbits;
1447         cpumask_t cpumask;
1448
1449         struct lwp * min_level_lwp = NULL;
1450         struct rq *min_q = NULL;
1451         cpumask_t siblings;
1452         cpu_node_t* cpunode = NULL;
1453         u_int32_t min_level = MAXCPU;   /* number of levels < MAXCPU */
1454         u_int32_t *min_which = NULL;
1455         u_int32_t min_pri = 0;
1456         u_int32_t level = 0;
1457
1458         rtqbits = bsd4_rtqueuebits;
1459         tsqbits = bsd4_queuebits;
1460         idqbits = bsd4_idqueuebits;
1461         cpumask = mycpu->gd_cpumask;
1462
1463         /* Get the mask coresponding to the sysctl configured level */
1464         cpunode = bsd4_pcpu[mycpu->gd_cpuid].cpunode;
1465         level = usched_bsd4_stick_to_level;
1466         while (level) {
1467                 cpunode = cpunode->parent_node;
1468                 level--;
1469         }
1470         /* The cpus which can ellect a process */
1471         siblings = cpunode->members;
1472         checks = 0;
1473
1474 again:
1475         if (rtqbits) {
1476                 pri = bsfl(rtqbits);
1477                 q = &bsd4_rtqueues[pri];
1478                 which = &bsd4_rtqueuebits;
1479                 which2 = &rtqbits;
1480         } else if (tsqbits) {
1481                 pri = bsfl(tsqbits);
1482                 q = &bsd4_queues[pri];
1483                 which = &bsd4_queuebits;
1484                 which2 = &tsqbits;
1485         } else if (idqbits) {
1486                 pri = bsfl(idqbits);
1487                 q = &bsd4_idqueues[pri];
1488                 which = &bsd4_idqueuebits;
1489                 which2 = &idqbits;
1490         } else {
1491                 /*
1492                  * No more left and we didn't reach the checks limit.
1493                  */
1494                 kick_helper(min_level_lwp);
1495                 return NULL;
1496         }
1497         lp = TAILQ_FIRST(q);
1498         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1499
1500         /*
1501          * Limit the number of checks/queue to a configurable value to
1502          * minimize the contention (we are in a locked region
1503          */
1504         while (checks < usched_bsd4_queue_checks) {
1505                 if ((lp->lwp_cpumask & cpumask) == 0 ||
1506                     ((siblings & lp->lwp_thread->td_gd->gd_cpumask) == 0 &&
1507                       (lp->lwp_setrunqueue_ticks == sched_ticks ||
1508                        lp->lwp_setrunqueue_ticks == (int)(sched_ticks - 1)) &&
1509                       batchy_looser_pri_test(lp))) {
1510
1511                         KTR_COND_LOG(usched_chooseproc_cc_not_good,
1512                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1513                             lp->lwp_proc->p_pid,
1514                             (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1515                             (unsigned long)siblings,
1516                             (unsigned long)cpumask);
1517
1518                         cpunode = bsd4_pcpu[lp->lwp_thread->td_gd->gd_cpuid].cpunode;
1519                         level = 0;
1520                         while (cpunode) {
1521                                 if (cpunode->members & cpumask)
1522                                         break;
1523                                 cpunode = cpunode->parent_node;
1524                                 level++;
1525                         }
1526                         if (level < min_level ||
1527                             (level == min_level && min_level_lwp &&
1528                              lp->lwp_priority < min_level_lwp->lwp_priority)) {
1529                                 kick_helper(min_level_lwp);
1530                                 min_level_lwp = lp;
1531                                 min_level = level;
1532                                 min_q = q;
1533                                 min_which = which;
1534                                 min_pri = pri;
1535                         } else {
1536                                 kick_helper(lp);
1537                         }
1538                         lp = TAILQ_NEXT(lp, lwp_procq);
1539                         if (lp == NULL) {
1540                                 *which2 &= ~(1 << pri);
1541                                 goto again;
1542                         }
1543                 } else {
1544                         KTR_COND_LOG(usched_chooseproc_cc_elected,
1545                             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1546                             lp->lwp_proc->p_pid,
1547                             (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1548                             (unsigned long)siblings,
1549                             (unsigned long)cpumask);
1550
1551                         goto found;
1552                 }
1553                 ++checks;
1554         }
1555
1556         /*
1557          * Checks exhausted, we tried to defer too many threads, so schedule
1558          * the best of the worst.
1559          */
1560         lp = min_level_lwp;
1561         q = min_q;
1562         which = min_which;
1563         pri = min_pri;
1564         KASSERT(lp, ("chooseproc: at least the first lp was good"));
1565
1566 found:
1567
1568         /*
1569          * If the passed lwp <chklp> is reasonably close to the selected
1570          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1571          *
1572          * Note that we must error on the side of <chklp> to avoid bouncing
1573          * between threads in the acquire code.
1574          */
1575         if (chklp) {
1576                 if (chklp->lwp_priority < lp->lwp_priority + PPQ) {
1577                         kick_helper(lp);
1578                         return(NULL);
1579                 }
1580         }
1581
1582         KTR_COND_LOG(usched_chooseproc_cc,
1583             lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1584             lp->lwp_proc->p_pid,
1585             lp->lwp_thread->td_gd->gd_cpuid,
1586             mycpu->gd_cpuid);
1587
1588         TAILQ_REMOVE(q, lp, lwp_procq);
1589         --bsd4_runqcount;
1590         if (TAILQ_EMPTY(q))
1591                 *which &= ~(1 << pri);
1592         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1593         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1594
1595         return lp;
1596 }
1597
1598 /*
1599  * If we aren't willing to schedule a ready process on our cpu, give it's
1600  * target cpu a kick rather than wait for the next tick.
1601  *
1602  * Called with bsd4_spin held.
1603  */
1604 static
1605 void
1606 kick_helper(struct lwp *lp)
1607 {
1608         globaldata_t gd;
1609         bsd4_pcpu_t dd;
1610
1611         if (lp == NULL)
1612                 return;
1613         gd = lp->lwp_thread->td_gd;
1614         dd = &bsd4_pcpu[gd->gd_cpuid];
1615         if ((smp_active_mask & usched_global_cpumask &
1616             bsd4_rdyprocmask & gd->gd_cpumask) == 0) {
1617                 return;
1618         }
1619         ++usched_bsd4_kicks;
1620         atomic_clear_cpumask(&bsd4_rdyprocmask, gd->gd_cpumask);
1621         if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
1622                 lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
1623         } else {
1624                 wakeup(&dd->helper_thread);
1625         }
1626 }
1627
1628 static
1629 void
1630 need_user_resched_remote(void *dummy)
1631 {
1632         globaldata_t gd = mycpu;
1633         bsd4_pcpu_t  dd = &bsd4_pcpu[gd->gd_cpuid];
1634
1635         need_user_resched();
1636
1637         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1638         wakeup_mycpu(&dd->helper_thread);
1639 }
1640
1641 #endif
1642
1643 /*
1644  * bsd4_remrunqueue_locked() removes a given process from the run queue
1645  * that it is on, clearing the queue busy bit if it becomes empty.
1646  *
1647  * Note that user process scheduler is different from the LWKT schedule.
1648  * The user process scheduler only manages user processes but it uses LWKT
1649  * underneath, and a user process operating in the kernel will often be
1650  * 'released' from our management.
1651  *
1652  * MPSAFE - bsd4_spin must be held exclusively on call
1653  */
1654 static void
1655 bsd4_remrunqueue_locked(struct lwp *lp)
1656 {
1657         struct rq *q;
1658         u_int32_t *which;
1659         u_int8_t pri;
1660
1661         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1662         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1663         --bsd4_runqcount;
1664         KKASSERT(bsd4_runqcount >= 0);
1665
1666         pri = lp->lwp_rqindex;
1667         switch(lp->lwp_rqtype) {
1668         case RTP_PRIO_NORMAL:
1669                 q = &bsd4_queues[pri];
1670                 which = &bsd4_queuebits;
1671                 break;
1672         case RTP_PRIO_REALTIME:
1673         case RTP_PRIO_FIFO:
1674                 q = &bsd4_rtqueues[pri];
1675                 which = &bsd4_rtqueuebits;
1676                 break;
1677         case RTP_PRIO_IDLE:
1678                 q = &bsd4_idqueues[pri];
1679                 which = &bsd4_idqueuebits;
1680                 break;
1681         default:
1682                 panic("remrunqueue: invalid rtprio type");
1683                 /* NOT REACHED */
1684         }
1685         TAILQ_REMOVE(q, lp, lwp_procq);
1686         if (TAILQ_EMPTY(q)) {
1687                 KASSERT((*which & (1 << pri)) != 0,
1688                         ("remrunqueue: remove from empty queue"));
1689                 *which &= ~(1 << pri);
1690         }
1691 }
1692
1693 /*
1694  * bsd4_setrunqueue_locked()
1695  *
1696  * Add a process whos rqtype and rqindex had previously been calculated
1697  * onto the appropriate run queue.   Determine if the addition requires
1698  * a reschedule on a cpu and return the cpuid or -1.
1699  *
1700  * NOTE: Lower priorities are better priorities.
1701  *
1702  * MPSAFE - bsd4_spin must be held exclusively on call
1703  */
1704 static void
1705 bsd4_setrunqueue_locked(struct lwp *lp)
1706 {
1707         struct rq *q;
1708         u_int32_t *which;
1709         int pri;
1710
1711         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1712         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1713         ++bsd4_runqcount;
1714
1715         pri = lp->lwp_rqindex;
1716
1717         switch(lp->lwp_rqtype) {
1718         case RTP_PRIO_NORMAL:
1719                 q = &bsd4_queues[pri];
1720                 which = &bsd4_queuebits;
1721                 break;
1722         case RTP_PRIO_REALTIME:
1723         case RTP_PRIO_FIFO:
1724                 q = &bsd4_rtqueues[pri];
1725                 which = &bsd4_rtqueuebits;
1726                 break;
1727         case RTP_PRIO_IDLE:
1728                 q = &bsd4_idqueues[pri];
1729                 which = &bsd4_idqueuebits;
1730                 break;
1731         default:
1732                 panic("remrunqueue: invalid rtprio type");
1733                 /* NOT REACHED */
1734         }
1735
1736         /*
1737          * Add to the correct queue and set the appropriate bit.  If no
1738          * lower priority (i.e. better) processes are in the queue then
1739          * we want a reschedule, calculate the best cpu for the job.
1740          *
1741          * Always run reschedules on the LWPs original cpu.
1742          */
1743         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1744         *which |= 1 << pri;
1745 }
1746
1747 #ifdef SMP
1748
1749 /*
1750  * For SMP systems a user scheduler helper thread is created for each
1751  * cpu and is used to allow one cpu to wakeup another for the purposes of
1752  * scheduling userland threads from setrunqueue().
1753  *
1754  * UP systems do not need the helper since there is only one cpu.
1755  *
1756  * We can't use the idle thread for this because we might block.
1757  * Additionally, doing things this way allows us to HLT idle cpus
1758  * on MP systems.
1759  *
1760  * MPSAFE
1761  */
1762 static void
1763 sched_thread(void *dummy)
1764 {
1765     globaldata_t gd;
1766     bsd4_pcpu_t  dd;
1767     bsd4_pcpu_t  tmpdd;
1768     struct lwp *nlp;
1769     cpumask_t mask;
1770     int cpuid;
1771 #ifdef SMP
1772     cpumask_t tmpmask;
1773     int tmpid;
1774 #endif
1775
1776     gd = mycpu;
1777     cpuid = gd->gd_cpuid;       /* doesn't change */
1778     mask = gd->gd_cpumask;      /* doesn't change */
1779     dd = &bsd4_pcpu[cpuid];
1780
1781     /*
1782      * Since we are woken up only when no user processes are scheduled
1783      * on a cpu, we can run at an ultra low priority.
1784      */
1785     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1786
1787     tsleep(&dd->helper_thread, 0, "sched_thread_sleep", 0);
1788
1789     for (;;) {
1790         /*
1791          * We use the LWKT deschedule-interlock trick to avoid racing
1792          * bsd4_rdyprocmask.  This means we cannot block through to the
1793          * manual lwkt_switch() call we make below.
1794          */
1795         crit_enter_gd(gd);
1796         tsleep_interlock(&dd->helper_thread, 0);
1797         spin_lock(&bsd4_spin);
1798         atomic_set_cpumask(&bsd4_rdyprocmask, mask);
1799
1800         clear_user_resched();   /* This satisfied the reschedule request */
1801         dd->rrcount = 0;        /* Reset the round-robin counter */
1802
1803         if ((bsd4_curprocmask & mask) == 0) {
1804                 /*
1805                  * No thread is currently scheduled.
1806                  */
1807                 KKASSERT(dd->uschedcp == NULL);
1808                 if ((nlp = chooseproc_locked(NULL)) != NULL) {
1809                         KTR_COND_LOG(usched_sched_thread_no_process,
1810                             nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1811                             gd->gd_cpuid,
1812                             nlp->lwp_proc->p_pid,
1813                             nlp->lwp_thread->td_gd->gd_cpuid);
1814
1815                         atomic_set_cpumask(&bsd4_curprocmask, mask);
1816                         dd->upri = nlp->lwp_priority;
1817                         dd->uschedcp = nlp;
1818                         dd->rrcount = 0;        /* reset round robin */
1819                         spin_unlock(&bsd4_spin);
1820 #ifdef SMP
1821                         lwkt_acquire(nlp->lwp_thread);
1822 #endif
1823                         lwkt_schedule(nlp->lwp_thread);
1824                 } else {
1825                         spin_unlock(&bsd4_spin);
1826                 }
1827         } else if (bsd4_runqcount) {
1828                 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
1829                         KTR_COND_LOG(usched_sched_thread_process,
1830                             nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1831                             gd->gd_cpuid,
1832                             nlp->lwp_proc->p_pid,
1833                             nlp->lwp_thread->td_gd->gd_cpuid);
1834
1835                         dd->upri = nlp->lwp_priority;
1836                         dd->uschedcp = nlp;
1837                         dd->rrcount = 0;        /* reset round robin */
1838                         spin_unlock(&bsd4_spin);
1839 #ifdef SMP
1840                         lwkt_acquire(nlp->lwp_thread);
1841 #endif
1842                         lwkt_schedule(nlp->lwp_thread);
1843                 } else {
1844                         /*
1845                          * CHAINING CONDITION TRAIN
1846                          *
1847                          * We could not deal with the scheduler wakeup
1848                          * request on this cpu, locate a ready scheduler
1849                          * with no current lp assignment and chain to it.
1850                          *
1851                          * This ensures that a wakeup race which fails due
1852                          * to priority test does not leave other unscheduled
1853                          * cpus idle when the runqueue is not empty.
1854                          */
1855                         tmpmask = ~bsd4_curprocmask &
1856                                   bsd4_rdyprocmask & smp_active_mask;
1857                         if (tmpmask) {
1858                                 tmpid = BSFCPUMASK(tmpmask);
1859                                 tmpdd = &bsd4_pcpu[tmpid];
1860                                 atomic_clear_cpumask(&bsd4_rdyprocmask,
1861                                                      CPUMASK(tmpid));
1862                                 spin_unlock(&bsd4_spin);
1863                                 wakeup(&tmpdd->helper_thread);
1864                         } else {
1865                                 spin_unlock(&bsd4_spin);
1866                         }
1867
1868                         KTR_LOG(usched_sched_thread_no_process_found,
1869                                 gd->gd_cpuid, (unsigned long)tmpmask);
1870                 }
1871         } else {
1872                 /*
1873                  * The runq is empty.
1874                  */
1875                 spin_unlock(&bsd4_spin);
1876         }
1877
1878         /*
1879          * We're descheduled unless someone scheduled us.  Switch away.
1880          * Exiting the critical section will cause splz() to be called
1881          * for us if interrupts and such are pending.
1882          */
1883         crit_exit_gd(gd);
1884         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1885     }
1886 }
1887
1888 /* sysctl stick_to_level parameter */
1889 static int
1890 sysctl_usched_bsd4_stick_to_level(SYSCTL_HANDLER_ARGS)
1891 {
1892         int error, new_val;
1893
1894         new_val = usched_bsd4_stick_to_level;
1895
1896         error = sysctl_handle_int(oidp, &new_val, 0, req);
1897         if (error != 0 || req->newptr == NULL)
1898                 return (error);
1899         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1900                 return (EINVAL);
1901         usched_bsd4_stick_to_level = new_val;
1902         return (0);
1903 }
1904
1905 /*
1906  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1907  * been cleared by rqinit() and we should not mess with it further.
1908  */
1909 static void
1910 sched_thread_cpu_init(void)
1911 {
1912         int i;
1913         int cpuid;
1914         int smt_not_supported = 0;
1915         int cache_coherent_not_supported = 0;
1916
1917         if (bootverbose)
1918                 kprintf("Start scheduler helpers on cpus:\n");
1919
1920         sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
1921         usched_bsd4_sysctl_tree =
1922                 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
1923                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1924                                 "usched_bsd4", CTLFLAG_RD, 0, "");
1925
1926         for (i = 0; i < ncpus; ++i) {
1927                 bsd4_pcpu_t dd = &bsd4_pcpu[i];
1928                 cpumask_t mask = CPUMASK(i);
1929
1930                 if ((mask & smp_active_mask) == 0)
1931                     continue;
1932
1933                 dd->cpunode = get_cpu_node_by_cpuid(i);
1934
1935                 if (dd->cpunode == NULL) {
1936                         smt_not_supported = 1;
1937                         cache_coherent_not_supported = 1;
1938                         if (bootverbose)
1939                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1940                                          "found for cpu\n", i);
1941                 } else {
1942                         switch (dd->cpunode->type) {
1943                         case THREAD_LEVEL:
1944                                 if (bootverbose)
1945                                         kprintf ("\tcpu%d - HyperThreading "
1946                                                  "available. Core siblings: ",
1947                                                  i);
1948                                 break;
1949                         case CORE_LEVEL:
1950                                 smt_not_supported = 1;
1951
1952                                 if (bootverbose)
1953                                         kprintf ("\tcpu%d - No HT available, "
1954                                                  "multi-core/physical "
1955                                                  "cpu. Physical siblings: ",
1956                                                  i);
1957                                 break;
1958                         case CHIP_LEVEL:
1959                                 smt_not_supported = 1;
1960
1961                                 if (bootverbose)
1962                                         kprintf ("\tcpu%d - No HT available, "
1963                                                  "single-core/physical cpu. "
1964                                                  "Package Siblings: ",
1965                                                  i);
1966                                 break;
1967                         default:
1968                                 /* Let's go for safe defaults here */
1969                                 smt_not_supported = 1;
1970                                 cache_coherent_not_supported = 1;
1971                                 if (bootverbose)
1972                                         kprintf ("\tcpu%d - Unknown cpunode->"
1973                                                  "type=%u. Siblings: ",
1974                                                  i,
1975                                                  (u_int)dd->cpunode->type);
1976                                 break;
1977                         }
1978
1979                         if (bootverbose) {
1980                                 if (dd->cpunode->parent_node != NULL) {
1981                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1982                                                 kprintf("cpu%d ", cpuid);
1983                                         kprintf("\n");
1984                                 } else {
1985                                         kprintf(" no siblings\n");
1986                                 }
1987                         }
1988                 }
1989
1990                 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
1991                             0, i, "usched %d", i);
1992
1993                 /*
1994                  * Allow user scheduling on the target cpu.  cpu #0 has already
1995                  * been enabled in rqinit().
1996                  */
1997                 if (i)
1998                     atomic_clear_cpumask(&bsd4_curprocmask, mask);
1999                 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
2000                 dd->upri = PRIBASE_NULL;
2001
2002         }
2003
2004         /* usched_bsd4 sysctl configurable parameters */
2005
2006         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2007                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2008                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2009                        &usched_bsd4_rrinterval, 0, "");
2010         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2011                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2012                        OID_AUTO, "decay", CTLFLAG_RW,
2013                        &usched_bsd4_decay, 0, "Extra decay when not running");
2014         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2015                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2016                        OID_AUTO, "batch_time", CTLFLAG_RW,
2017                        &usched_bsd4_batch_time, 0, "Min batch counter value");
2018         SYSCTL_ADD_LONG(&usched_bsd4_sysctl_ctx,
2019                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2020                        OID_AUTO, "kicks", CTLFLAG_RW,
2021                        &usched_bsd4_kicks, "Number of kickstarts");
2022
2023         /* Add enable/disable option for SMT scheduling if supported */
2024         if (smt_not_supported) {
2025                 usched_bsd4_smt = 0;
2026                 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
2027                                   SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2028                                   OID_AUTO, "smt", CTLFLAG_RD,
2029                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2030         } else {
2031                 usched_bsd4_smt = 1;
2032                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2033                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2034                                OID_AUTO, "smt", CTLFLAG_RW,
2035                                &usched_bsd4_smt, 0, "Enable SMT scheduling");
2036         }
2037
2038         /*
2039          * Add enable/disable option for cache coherent scheduling
2040          * if supported
2041          */
2042         if (cache_coherent_not_supported) {
2043 #ifdef SMP
2044                 usched_bsd4_cache_coherent = 0;
2045                 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
2046                                   SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2047                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
2048                                   "NOT SUPPORTED", 0,
2049                                   "Cache coherence NOT SUPPORTED");
2050 #endif
2051         } else {
2052 #ifdef SMP
2053                 usched_bsd4_cache_coherent = 1;
2054                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2055                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2056                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
2057                                &usched_bsd4_cache_coherent, 0,
2058                                "Enable/Disable cache coherent scheduling");
2059 #endif
2060
2061                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2062                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2063                                OID_AUTO, "upri_affinity", CTLFLAG_RW,
2064                                &usched_bsd4_upri_affinity, 1,
2065                                "Number of PPQs in user priority check");
2066
2067                 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2068                                SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2069                                OID_AUTO, "queue_checks", CTLFLAG_RW,
2070                                &usched_bsd4_queue_checks, 5,
2071                                "LWPs to check from a queue before giving up");
2072
2073                 SYSCTL_ADD_PROC(&usched_bsd4_sysctl_ctx,
2074                                 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2075                                 OID_AUTO, "stick_to_level",
2076                                 CTLTYPE_INT | CTLFLAG_RW,
2077                                 NULL, sizeof usched_bsd4_stick_to_level,
2078                                 sysctl_usched_bsd4_stick_to_level, "I",
2079                                 "Stick a process to this level. See sysctl"
2080                                 "paremter hw.cpu_topology.level_description");
2081         }
2082 }
2083 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2084         sched_thread_cpu_init, NULL)
2085
2086 #else /* No SMP options - just add the configurable parameters to sysctl */
2087
2088 static void
2089 sched_sysctl_tree_init(void)
2090 {
2091         sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
2092         usched_bsd4_sysctl_tree =
2093                 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
2094                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2095                                 "usched_bsd4", CTLFLAG_RD, 0, "");
2096
2097         /* usched_bsd4 sysctl configurable parameters */
2098         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2099                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2100                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2101                        &usched_bsd4_rrinterval, 0, "");
2102         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2103                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2104                        OID_AUTO, "decay", CTLFLAG_RW,
2105                        &usched_bsd4_decay, 0, "Extra decay when not running");
2106         SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2107                        SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2108                        OID_AUTO, "batch_time", CTLFLAG_RW,
2109                        &usched_bsd4_batch_time, 0, "Min batch counter value");
2110 }
2111 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2112         sched_sysctl_tree_init, NULL)
2113 #endif
2114