2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
68 * $FreeBSD: src/sys/kern/kern_timeout.c,v 1.59.2.1 2001/11/13 18:24:52 archie Exp $
71 * DRAGONFLY BGL STATUS
73 * All the API functions should be MP safe.
75 * The callback functions will be flagged as being MP safe if the
76 * timeout structure is initialized with callout_init_mp() instead of
79 * The helper threads cannot be made preempt-capable until after we
80 * clean up all the uses of splsoftclock() and related interlocks (which
81 * require the related functions to be MP safe as well).
84 * The callout mechanism is based on the work of Adam M. Costello and
85 * George Varghese, published in a technical report entitled "Redesigning
86 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
87 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
88 * used in this implementation was published by G. Varghese and T. Lauck in
89 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
90 * the Efficient Implementation of a Timer Facility" in the Proceedings of
91 * the 11th ACM Annual Symposium on Operating Systems Principles,
92 * Austin, Texas Nov 1987.
94 * The per-cpu augmentation was done by Matthew Dillon.
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/callout.h>
100 #include <sys/kernel.h>
101 #include <sys/interrupt.h>
102 #include <sys/thread.h>
104 #include <sys/thread2.h>
105 #include <sys/mplock2.h>
107 #ifndef MAX_SOFTCLOCK_STEPS
108 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
112 struct softclock_pcpu {
113 struct callout_tailq *callwheel;
114 struct callout * volatile next;
115 struct callout *running;/* currently running callout */
116 int softticks; /* softticks index */
117 int curticks; /* per-cpu ticks counter */
119 struct thread thread;
123 typedef struct softclock_pcpu *softclock_pcpu_t;
127 * allocate more timeout table slots when table overflows.
129 static MALLOC_DEFINE(M_CALLOUT, "callout", "callout structures");
130 static int callwheelsize;
131 static int callwheelmask;
132 static struct softclock_pcpu softclock_pcpu_ary[MAXCPU];
134 static void softclock_handler(void *arg);
137 swi_softclock_setup(void *arg)
144 * Figure out how large a callwheel we need. It must be a power of 2.
146 * ncallout is primarily based on available memory, don't explode
147 * the allocations if the system has a lot of cpus.
149 target = ncallout / ncpus + 16;
152 while (callwheelsize < target)
154 callwheelmask = callwheelsize - 1;
157 * Initialize per-cpu data structures.
159 for (cpu = 0; cpu < ncpus; ++cpu) {
162 sc = &softclock_pcpu_ary[cpu];
164 sc->callwheel = kmalloc(sizeof(*sc->callwheel) * callwheelsize,
165 M_CALLOUT, M_WAITOK|M_ZERO);
166 for (i = 0; i < callwheelsize; ++i)
167 TAILQ_INIT(&sc->callwheel[i]);
170 * Mark the softclock handler as being an interrupt thread
171 * even though it really isn't, but do not allow it to
172 * preempt other threads (do not assign td_preemptable).
174 * Kernel code now assumes that callouts do not preempt
175 * the cpu they were scheduled on.
177 lwkt_create(softclock_handler, sc, NULL,
178 &sc->thread, TDF_NOSTART | TDF_INTTHREAD,
179 cpu, "softclock %d", cpu);
184 * Must occur after ncpus has been initialized.
186 SYSINIT(softclock_setup, SI_BOOT2_SOFTCLOCK, SI_ORDER_SECOND,
187 swi_softclock_setup, NULL);
190 * This routine is called from the hardclock() (basically a FASTint/IPI) on
191 * each cpu in the system. sc->curticks is this cpu's notion of the timebase.
192 * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where
193 * the callwheel is currently indexed.
195 * WARNING! The MP lock is not necessarily held on call, nor can it be
198 * sc->softticks is adjusted by either this routine or our helper thread
199 * depending on whether the helper thread is running or not.
202 hardclock_softtick(globaldata_t gd)
206 sc = &softclock_pcpu_ary[gd->gd_cpuid];
210 if (sc->softticks == sc->curticks) {
212 * in sync, only wakeup the thread if there is something to
215 if (TAILQ_FIRST(&sc->callwheel[sc->softticks & callwheelmask]))
218 lwkt_schedule(&sc->thread);
224 * out of sync, wakeup the thread unconditionally so it can
228 lwkt_schedule(&sc->thread);
233 * This procedure is the main loop of our per-cpu helper thread. The
234 * sc->isrunning flag prevents us from racing hardclock_softtick() and
235 * a critical section is sufficient to interlock sc->curticks and protect
236 * us from remote IPI's / list removal.
238 * The thread starts with the MP lock released and not in a critical
239 * section. The loop itself is MP safe while individual callbacks
240 * may or may not be, so we obtain or release the MP lock as appropriate.
243 softclock_handler(void *arg)
247 struct callout_tailq *bucket;
248 void (*c_func)(void *);
253 * Run the callout thread at the same priority as other kernel
254 * threads so it can be round-robined.
256 /*lwkt_setpri_self(TDPRI_SOFT_NORM);*/
261 while (sc->softticks != (int)(sc->curticks + 1)) {
262 bucket = &sc->callwheel[sc->softticks & callwheelmask];
264 for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
265 if (c->c_time != sc->softticks) {
266 sc->next = TAILQ_NEXT(c, c_links.tqe);
269 if (c->c_flags & CALLOUT_MPSAFE) {
276 * The request might be removed while we
277 * are waiting to get the MP lock. If it
278 * was removed sc->next will point to the
279 * next valid request or NULL, loop up.
289 sc->next = TAILQ_NEXT(c, c_links.tqe);
290 TAILQ_REMOVE(bucket, c, c_links.tqe);
296 KKASSERT(c->c_flags & CALLOUT_DID_INIT);
297 c->c_flags &= ~CALLOUT_PENDING;
302 /* NOTE: list may have changed */
307 lwkt_deschedule_self(&sc->thread); /* == curthread */
314 * New interface; clients allocate their own callout structures.
316 * callout_reset() - establish or change a timeout
317 * callout_stop() - disestablish a timeout
318 * callout_init() - initialize a callout structure so that it can
319 * safely be passed to callout_reset() and callout_stop()
320 * callout_init_mp() - same but any installed functions must be MP safe.
322 * <sys/callout.h> defines three convenience macros:
324 * callout_active() - returns truth if callout has not been serviced
325 * callout_pending() - returns truth if callout is still waiting for timeout
326 * callout_deactivate() - marks the callout as having been serviced
330 * Start or restart a timeout. Install the callout structure in the
331 * callwheel. Callers may legally pass any value, even if 0 or negative,
332 * but since the sc->curticks index may have already been processed a
333 * minimum timeout of 1 tick will be enforced.
335 * The callout is installed on and will be processed on the current cpu's
338 * WARNING! This function may be called from any cpu but the caller must
339 * serialize callout_stop() and callout_reset() calls on the passed
340 * structure regardless of cpu.
343 callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *),
350 if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
353 "callout_reset(%p) from %p: callout was not initialized\n",
354 c, ((int **)&c)[-1]);
359 sc = &softclock_pcpu_ary[gd->gd_cpuid];
362 if (c->c_flags & CALLOUT_ACTIVE)
369 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
371 c->c_time = sc->curticks + to_ticks;
374 TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & callwheelmask],
379 struct callout_remote_arg {
387 callout_reset_ipi(void *arg)
389 struct callout_remote_arg *rmt = arg;
391 callout_reset(rmt->c, rmt->to_ticks, rmt->ftn, rmt->arg);
395 callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *),
396 void *arg, int cpuid)
398 KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d", cpuid));
400 if (cpuid == mycpuid) {
401 callout_reset(c, to_ticks, ftn, arg);
403 struct globaldata *target_gd;
404 struct callout_remote_arg rmt;
410 rmt.to_ticks = to_ticks;
412 target_gd = globaldata_find(cpuid);
414 seq = lwkt_send_ipiq(target_gd, callout_reset_ipi, &rmt);
415 lwkt_wait_ipiq(target_gd, seq);
420 * Stop a running timer. WARNING! If called on a cpu other then the one
421 * the callout was started on this function will liveloop on its IPI to
422 * the target cpu to process the request. It is possible for the callout
423 * to execute in that case.
425 * WARNING! This function may be called from any cpu but the caller must
426 * serialize callout_stop() and callout_reset() calls on the passed
427 * structure regardless of cpu.
429 * WARNING! This routine may be called from an IPI
431 * WARNING! This function can return while it's c_func is still running
432 * in the callout thread, a secondary check may be needed.
433 * Use callout_stop_sync() to wait for any callout function to
434 * complete before returning, being sure that no deadlock is
435 * possible if you do.
438 callout_stop(struct callout *c)
440 globaldata_t gd = mycpu;
445 if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
448 "callout_stop(%p) from %p: callout was not initialized\n",
449 c, ((int **)&c)[-1]);
456 * Don't attempt to delete a callout that's not on the queue. The
457 * callout may not have a cpu assigned to it. Callers do not have
458 * to be on the issuing cpu but must still serialize access to the
461 * We are not cpu-localized here and cannot safely modify the
462 * flags field in the callout structure. Note that most of the
463 * time CALLOUT_ACTIVE will be 0 if CALLOUT_PENDING is also 0.
465 * If we race another cpu's dispatch of this callout it is possible
466 * for CALLOUT_ACTIVE to be set with CALLOUT_PENDING unset. This
467 * will cause us to fall through and synchronize with the other
470 if ((c->c_flags & CALLOUT_PENDING) == 0) {
471 if ((c->c_flags & CALLOUT_ACTIVE) == 0) {
475 if (c->c_gd == NULL || c->c_gd == gd) {
476 c->c_flags &= ~CALLOUT_ACTIVE;
481 if ((tgd = c->c_gd) != gd) {
483 * If the callout is owned by a different CPU we have to
484 * execute the function synchronously on the target cpu.
488 cpu_ccfence(); /* don't let tgd alias c_gd */
489 seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c);
490 lwkt_wait_ipiq(tgd, seq);
493 * If the callout is owned by the same CPU we can
494 * process it directly, but if we are racing our helper
495 * thread (sc->next), we have to adjust sc->next. The
496 * race is interlocked by a critical section.
498 sc = &softclock_pcpu_ary[gd->gd_cpuid];
500 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
502 sc->next = TAILQ_NEXT(c, c_links.tqe);
504 TAILQ_REMOVE(&sc->callwheel[c->c_time & callwheelmask],
513 * Issue a callout_stop() and ensure that any callout race completes
514 * before returning. Does NOT de-initialized the callout.
517 callout_stop_sync(struct callout *c)
521 while (c->c_flags & CALLOUT_DID_INIT) {
524 sc = &softclock_pcpu_ary[c->c_gd->gd_cpuid];
525 if (sc->running == c) {
526 while (sc->running == c)
527 tsleep(&sc->running, 0, "crace", 1);
530 if ((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 0)
532 kprintf("Warning: %s: callout race\n", curthread->td_comm);
537 * Terminate a callout
539 * This function will stop any pending callout and also block while the
540 * callout's function is running. It should only be used in cases where
541 * no deadlock is possible (due to the callout function acquiring locks
542 * that the current caller of callout_terminate() already holds), when
543 * the caller is ready to destroy the callout structure.
545 * This function clears the CALLOUT_DID_INIT flag.
547 * lwkt_token locks are ok.
550 callout_terminate(struct callout *c)
554 if (c->c_flags & CALLOUT_DID_INIT) {
556 sc = &softclock_pcpu_ary[c->c_gd->gd_cpuid];
557 if (sc->running == c) {
558 while (sc->running == c)
559 tsleep(&sc->running, 0, "crace", 1);
561 KKASSERT((c->c_flags & (CALLOUT_PENDING|CALLOUT_ACTIVE)) == 0);
562 c->c_flags &= ~CALLOUT_DID_INIT;
567 * Prepare a callout structure for use by callout_reset() and/or
568 * callout_stop(). The MP version of this routine requires that the callback
569 * function installed by callout_reset() be MP safe.
571 * The init functions can be called from any cpu and do not have to be
572 * called from the cpu that the timer will eventually run on.
575 callout_init(struct callout *c)
578 c->c_flags = CALLOUT_DID_INIT;
582 callout_init_mp(struct callout *c)
585 c->c_flags |= CALLOUT_MPSAFE;