2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
72 * $FreeBSD: src/sys/kern/kern_timeout.c,v 1.59.2.1 2001/11/13 18:24:52 archie Exp $
73 * $DragonFly: src/sys/kern/kern_timeout.c,v 1.12 2004/09/17 00:18:09 dillon Exp $
76 * DRAGONFLY BGL STATUS
78 * All the API functions should be MP safe.
80 * The callback functions will be flagged as being MP safe if the
81 * timeout structure is initialized with callout_init_mp() instead of
84 * The helper threads cannot be made preempt-capable until after we
85 * clean up all the uses of splsoftclock() and related interlocks (which
86 * require the related functions to be MP safe as well).
89 * The callout mechanism is based on the work of Adam M. Costello and
90 * George Varghese, published in a technical report entitled "Redesigning
91 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
92 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
93 * used in this implementation was published by G. Varghese and T. Lauck in
94 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
95 * the Efficient Implementation of a Timer Facility" in the Proceedings of
96 * the 11th ACM Annual Symposium on Operating Systems Principles,
97 * Austin, Texas Nov 1987.
99 * The per-cpu augmentation was done by Matthew Dillon.
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/callout.h>
105 #include <sys/kernel.h>
106 #include <sys/interrupt.h>
107 #include <sys/thread.h>
108 #include <sys/thread2.h>
109 #include <machine/ipl.h>
112 #ifndef MAX_SOFTCLOCK_STEPS
113 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
117 struct softclock_pcpu {
118 struct callout_list callfree;
119 struct callout_tailq *callwheel;
120 struct callout * volatile next;
121 int softticks; /* softticks index */
122 int curticks; /* per-cpu ticks counter */
124 struct thread thread;
128 typedef struct softclock_pcpu *softclock_pcpu_t;
132 * allocate more timeout table slots when table overflows.
134 static MALLOC_DEFINE(M_CALLOUT, "callout", "callout structures");
135 static int callwheelsize;
136 static int callwheelbits;
137 static int callwheelmask;
138 static struct softclock_pcpu softclock_pcpu_ary[MAXCPU];
140 static void softclock_handler(void *arg);
143 swi_softclock_setup(void *arg)
149 * Figure out how large a callwheel we need. It must be a power of 2.
153 while (callwheelsize < ncallout) {
157 callwheelmask = callwheelsize - 1;
160 * Initialize per-cpu data structures.
162 for (cpu = 0; cpu < ncpus; ++cpu) {
164 struct callout *callout;
166 sc = &softclock_pcpu_ary[cpu];
168 sc->callwheel = malloc(sizeof(*sc->callwheel) * callwheelsize,
169 M_CALLOUT, M_WAITOK|M_ZERO);
170 for (i = 0; i < callwheelsize; ++i)
171 TAILQ_INIT(&sc->callwheel[i]);
173 SLIST_INIT(&sc->callfree);
174 callout = malloc(sizeof(struct callout) * ncallout,
175 M_CALLOUT, M_WAITOK|M_ZERO);
176 for (i = 0; i < ncallout; ++i) {
177 callout_init(&callout[i]);
178 callout[i].c_flags |= CALLOUT_LOCAL_ALLOC;
179 SLIST_INSERT_HEAD(&sc->callfree, &callout[i],
184 * Create a preemption-capable thread for each cpu to handle
185 * softclock timeouts on that cpu. The preemption can only
186 * be blocked by a critical section. The thread can itself
187 * be preempted by normal interrupts.
189 lwkt_create(softclock_handler, sc, NULL,
190 &sc->thread, TDF_STOPREQ|TDF_INTTHREAD, -1,
191 "softclock %d", cpu);
192 lwkt_setpri(&sc->thread, TDPRI_SOFT_NORM);
195 * Do not make the thread preemptable until we clean up all
196 * the splsoftclock() calls in the system. Since the threads
197 * are no longer operated as a software interrupt, the
198 * splsoftclock() calls will not have any effect on them.
200 sc->thread.td_preemptable = lwkt_preempt;
205 SYSINIT(softclock_setup, SI_SUB_CPU, SI_ORDER_ANY, swi_softclock_setup, NULL);
208 * This routine is called from the hardclock() (basically a FASTint/IPI) on
209 * each cpu in the system. sc->curticks is this cpu's notion of the timebase.
210 * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where
211 * the callwheel is currently indexed.
213 * WARNING! The MP lock is not necessarily held on call, nor can it be
216 * sc->softticks is adjusted by either this routine or our helper thread
217 * depending on whether the helper thread is running or not.
220 hardclock_softtick(globaldata_t gd)
224 sc = &softclock_pcpu_ary[gd->gd_cpuid];
228 if (sc->softticks == sc->curticks) {
230 * in sync, only wakeup the thread if there is something to
233 if (TAILQ_FIRST(&sc->callwheel[sc->softticks & callwheelmask]))
236 lwkt_schedule(&sc->thread);
242 * out of sync, wakeup the thread unconditionally so it can
246 lwkt_schedule(&sc->thread);
251 * This procedure is the main loop of our per-cpu helper thread. The
252 * sc->isrunning flag prevents us from racing hardclock_softtick() and
253 * a critical section is sufficient to interlock sc->curticks.
255 * The thread starts with the MP lock held and not in a critical section.
256 * The loop itself is MP safe while individual callbacks may or may not
257 * be, so we obtain or release the MP lock as appropriate.
260 softclock_handler(void *arg)
264 struct callout_tailq *bucket;
265 void (*c_func)(void *);
275 while (sc->softticks != (int)(sc->curticks + 1)) {
276 bucket = &sc->callwheel[sc->softticks & callwheelmask];
278 for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
279 sc->next = TAILQ_NEXT(c, c_links.tqe);
280 if (c->c_time != sc->softticks)
282 TAILQ_REMOVE(bucket, c, c_links.tqe);
285 c_flags = c->c_flags;
287 KKASSERT(c->c_flags & CALLOUT_DID_INIT);
288 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
289 c->c_flags = CALLOUT_LOCAL_ALLOC |
291 SLIST_INSERT_HEAD(&sc->callfree,
294 c->c_flags &= ~CALLOUT_PENDING;
298 if (c_flags & CALLOUT_MPSAFE) {
312 /* NOTE: list may have changed */
317 lwkt_deschedule_self(&sc->thread); /* == curthread */
325 * Execute a function after a specified length of time.
328 * Cancel previous timeout function call.
330 * callout_handle_init --
331 * Initialize a handle so that using it with untimeout is benign.
333 * See AT&T BCI Driver Reference Manual for specification. This
334 * implementation differs from that one in that although an
335 * identification value is returned from timeout, the original
336 * arguments to timeout as well as the identifier are used to
337 * identify entries for untimeout.
339 struct callout_handle
340 timeout(timeout_t *ftn, void *arg, int to_ticks)
344 struct callout_handle handle;
346 sc = &softclock_pcpu_ary[mycpu->gd_cpuid];
349 /* Fill in the next free callout structure. */
350 new = SLIST_FIRST(&sc->callfree);
352 /* XXX Attempt to malloc first */
353 panic("timeout table full");
355 SLIST_REMOVE_HEAD(&sc->callfree, c_links.sle);
357 callout_reset(new, to_ticks, ftn, arg);
359 handle.callout = new;
365 untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
368 * Check for a handle that was initialized
369 * by callout_handle_init, but never used
370 * for a real timeout.
372 if (handle.callout == NULL)
376 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
377 callout_stop(handle.callout);
382 callout_handle_init(struct callout_handle *handle)
384 handle->callout = NULL;
388 * New interface; clients allocate their own callout structures.
390 * callout_reset() - establish or change a timeout
391 * callout_stop() - disestablish a timeout
392 * callout_init() - initialize a callout structure so that it can
393 * safely be passed to callout_reset() and callout_stop()
394 * callout_init_mp() - same but any installed functions must be MP safe.
396 * <sys/callout.h> defines three convenience macros:
398 * callout_active() - returns truth if callout has not been serviced
399 * callout_pending() - returns truth if callout is still waiting for timeout
400 * callout_deactivate() - marks the callout as having been serviced
404 * Start or restart a timeout. Install the callout structure in the
405 * callwheel. Callers may legally pass any value, even if 0 or negative,
406 * but since the sc->curticks index may have already been processed a
407 * minimum timeout of 1 tick will be enforced.
409 * The callout is installed on and will be processed on the current cpu's
413 callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *),
420 if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
423 "callout_reset(%p) from %p: callout was not initialized\n",
424 c, ((int **)&c)[-1]);
426 db_print_backtrace();
431 sc = &softclock_pcpu_ary[gd->gd_cpuid];
434 if (c->c_flags & CALLOUT_PENDING)
441 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
443 c->c_time = sc->curticks + to_ticks;
448 TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & callwheelmask],
454 * Stop a running timer. WARNING! If called on a cpu other then the one
455 * the callout was started on this function will liveloop on its IPI to
456 * the target cpu to process the request. It is possible for the callout
457 * to execute in that case.
459 * WARNING! This routine may be called from an IPI
462 callout_stop(struct callout *c)
464 globaldata_t gd = mycpu;
471 if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
474 "callout_reset(%p) from %p: callout was not initialized\n",
475 c, ((int **)&c)[-1]);
477 db_print_backtrace();
484 * Don't attempt to delete a callout that's not on the queue.
486 if ((c->c_flags & CALLOUT_PENDING) == 0) {
487 c->c_flags &= ~CALLOUT_ACTIVE;
492 if ((tgd = c->c_gd) != gd) {
494 * If the callout is owned by a different CPU we have to
495 * execute the function synchronously on the target cpu.
499 cpu_mb1(); /* don't let tgd alias c_gd */
500 seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c);
501 lwkt_wait_ipiq(tgd, seq);
506 * If the callout is owned by the same CPU we can
507 * process it directly, but if we are racing our helper
508 * thread (sc->next), we have to adjust sc->next. The
509 * race is interlocked by a critical section.
511 sc = &softclock_pcpu_ary[gd->gd_cpuid];
513 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
515 sc->next = TAILQ_NEXT(c, c_links.tqe);
517 TAILQ_REMOVE(&sc->callwheel[c->c_time & callwheelmask],
521 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
522 SLIST_INSERT_HEAD(&sc->callfree, c, c_links.sle);
530 * Prepare a callout structure for use by callout_reset() and/or
531 * callout_stop(). The MP version of this routine requires that the callback
532 * function installed by callout_reset() by MP safe.
535 callout_init(struct callout *c)
538 c->c_flags = CALLOUT_DID_INIT;
542 callout_init_mp(struct callout *c)
545 c->c_flags |= CALLOUT_MPSAFE;
548 /* What, are you joking? This is nuts! -Matt */
550 #ifdef APM_FIXUP_CALLTODO
552 * Adjust the kernel calltodo timeout list. This routine is used after
553 * an APM resume to recalculate the calltodo timer list values with the
554 * number of hz's we have been sleeping. The next hardclock() will detect
555 * that there are fired timers and run softclock() to execute them.
557 * Please note, I have not done an exhaustive analysis of what code this
558 * might break. I am motivated to have my select()'s and alarm()'s that
559 * have expired during suspend firing upon resume so that the applications
560 * which set the timer can do the maintanence the timer was for as close
561 * as possible to the originally intended time. Testing this code for a
562 * week showed that resuming from a suspend resulted in 22 to 25 timers
563 * firing, which seemed independant on whether the suspend was 2 hours or
564 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
567 adjust_timeout_calltodo(struct timeval *time_change)
570 unsigned long delta_ticks;
574 * How many ticks were we asleep?
575 * (stolen from tvtohz()).
578 /* Don't do anything */
579 if (time_change->tv_sec < 0)
581 else if (time_change->tv_sec <= LONG_MAX / 1000000)
582 delta_ticks = (time_change->tv_sec * 1000000 +
583 time_change->tv_usec + (tick - 1)) / tick + 1;
584 else if (time_change->tv_sec <= LONG_MAX / hz)
585 delta_ticks = time_change->tv_sec * hz +
586 (time_change->tv_usec + (tick - 1)) / tick + 1;
588 delta_ticks = LONG_MAX;
590 if (delta_ticks > INT_MAX)
591 delta_ticks = INT_MAX;
594 * Now rip through the timer calltodo list looking for timers
598 /* don't collide with softclock() */
600 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
601 p->c_time -= delta_ticks;
603 /* Break if the timer had more time on it than delta_ticks */
607 /* take back the ticks the timer didn't use (p->c_time <= 0) */
608 delta_ticks = -p->c_time;
614 #endif /* APM_FIXUP_CALLTODO */