From: Matthew Dillon Date: Fri, 17 Sep 2004 00:18:16 +0000 (+0000) Subject: Move all the softclock/callout initialization out of MD and into MI. Get rid X-Git-Tag: v2.0.1~10208 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/92b561b7dcf46484c308ba342eff570648027647 Move all the softclock/callout initialization out of MD and into MI. Get rid of the extremely low-level virtual address space allocation for the old timeout() API's data structures and simply malloc() them in the new MI code (they will soon be removed entirely when the old API is removed). Replace the softclock swi with a non-preempting high priority helper thread (one per cpu). The lack of preemption allows us to retain compatibility with splsoftclock() use elsewhere in the kernel until those uses can be cleaned up, then we *might* make softclock threads preempt-capable. Rewrite the callout_*() implementation to make all data structures per-cpu. Add an MP flag (similar to what FreeBSD did), but create a separate API function, callout_init_mp() instead of adding an additional badly defined argument to callout_init(). callout_reset() and callout_stop() will now complain if the supplied structure is uninitialized (zero'd, e.g. callout_init() was not called on it). It is our intention to obsolete the old timeout()/untimeout() APIs and work is ongoing in that regard, and Joerg will soon import the new OpenBSD timeout_{set,add,del}() API. The callout*() API will be retained. In DragonFly these APIs will guarentee that the callback will occur on the same cpu that the timeout was added on, a feature that the TCP protocol stacks will soon be able to take advantage of. Idea and Brainstorming by: Jeffrey Hsu, Joerg Sonnenberger, and Matthew Dillon --- diff --git a/sys/dev/crypto/rndtest/rndtest.c b/sys/dev/crypto/rndtest/rndtest.c index 7876d9e35c..4317788cb8 100644 --- a/sys/dev/crypto/rndtest/rndtest.c +++ b/sys/dev/crypto/rndtest/rndtest.c @@ -1,5 +1,5 @@ /* $FreeBSD: src/sys/dev/rndtest/rndtest.c,v 1.1.4.1 2003/06/04 17:10:30 sam Exp $ */ -/* $DragonFly: src/sys/dev/crypto/rndtest/rndtest.c,v 1.6 2004/05/13 19:44:31 dillon Exp $ */ +/* $DragonFly: src/sys/dev/crypto/rndtest/rndtest.c,v 1.7 2004/09/17 00:18:16 dillon Exp $ */ /* $OpenBSD$ */ /* @@ -93,12 +93,8 @@ rndtest_attach(device_t dev) rsp->rs_discard = 1; rsp->rs_collect = 1; rsp->rs_parent = dev; -#if defined(__DragonFly__) || __FreeBSD_version < 500000 - callout_init(&rsp->rs_to); -#else /* NB: 1 means the callout runs w/o Giant locked */ - callout_init(&rsp->rs_to, 1); -#endif + callout_init_mp(&rsp->rs_to); return (rsp); } diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index 7f1deb6f4a..1a953f852a 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -36,7 +36,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.65 2004/08/12 19:59:30 eirikn Exp $ + * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.66 2004/09/17 00:18:07 dillon Exp $ */ #include "use_apm.h" @@ -66,7 +66,6 @@ #include #include #include -#include #include #include #include @@ -253,7 +252,6 @@ static void cpu_startup(dummy) void *dummy; { - unsigned i; caddr_t v; vm_offset_t minaddr; vm_offset_t maxaddr; @@ -290,15 +288,6 @@ cpu_startup(dummy) } } - /* - * Calculate callout wheel size - */ - for (callwheelsize = 1, callwheelbits = 0; - callwheelsize < ncallout; - callwheelsize <<= 1, ++callwheelbits) - ; - callwheelmask = callwheelsize - 1; - /* * Allocate space for system data structures. * The first available kernel virtual address is in "v". @@ -323,9 +312,6 @@ again: #define valloclim(name, type, num, lim) \ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) - valloc(callout, struct callout, ncallout); - valloc(callwheel, struct callout_tailq, callwheelsize); - /* * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. * For the first 64MB of ram nominally allocate sufficient buffers to @@ -401,20 +387,6 @@ again: exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, (16*(ARG_MAX+(PAGE_SIZE*3)))); - /* - * Initialize callouts - */ - SLIST_INIT(&callfree); - for (i = 0; i < ncallout; i++) { - callout_init(&callout[i]); - callout[i].c_flags = CALLOUT_LOCAL_ALLOC; - SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); - } - - for (i = 0; i < callwheelsize; i++) { - TAILQ_INIT(&callwheel[i]); - } - #if defined(USERCONFIG) userconfig(); cninit(); /* the preferred console may have changed */ diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 35cd2e27ae..d64e348f8f 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -70,7 +70,7 @@ * * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ - * $DragonFly: src/sys/kern/kern_clock.c,v 1.23 2004/08/02 23:20:30 dillon Exp $ + * $DragonFly: src/sys/kern/kern_clock.c,v 1.24 2004/09/17 00:18:09 dillon Exp $ */ #include "opt_ntp.h" @@ -288,10 +288,9 @@ hardclock(systimer_t info, struct intrframe *frame) } /* - * The system-wide ticks and softticks are only updated by cpu #0. - * Callwheel actions are also (at the moment) only handled by cpu #0. - * Finally, we also do NTP related timedelta/tickdelta adjustments - * by adjusting basetime. + * The system-wide ticks counter and NTP related timedelta/tickdelta + * adjustments only occur on cpu #0. NTP adjustments are accomplished + * by updating basetime. */ if (gd->gd_cpuid == 0) { struct timespec nts; @@ -303,12 +302,6 @@ hardclock(systimer_t info, struct intrframe *frame) hardclock_device_poll(); /* mpsafe, short and quick */ #endif /* DEVICE_POLLING */ - if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { - setsoftclock(); - } else if (softticks + 1 == ticks) { - ++softticks; - } - #if 0 if (tco->tc_poll_pps) tco->tc_poll_pps(tco); @@ -370,6 +363,11 @@ hardclock(systimer_t info, struct intrframe *frame) } } + /* + * softticks are handled for all cpus + */ + hardclock_softtick(gd); + /* * ITimer handling is per-tick, per-cpu. I don't think psignal() * is mpsafe on curproc, so XXX get the mplock. diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 126aea28c1..a8911df73b 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -1,4 +1,37 @@ -/*- +/* + * Copyright (c) 2004 The DragonFly Project. All rights reserved. + * + * This code is derived from software contributed to The DragonFly Project + * by Matthew Dillon + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of The DragonFly Project nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific, prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. @@ -37,7 +70,33 @@ * * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 * $FreeBSD: src/sys/kern/kern_timeout.c,v 1.59.2.1 2001/11/13 18:24:52 archie Exp $ - * $DragonFly: src/sys/kern/kern_timeout.c,v 1.11 2004/09/13 23:18:20 dillon Exp $ + * $DragonFly: src/sys/kern/kern_timeout.c,v 1.12 2004/09/17 00:18:09 dillon Exp $ + */ +/* + * DRAGONFLY BGL STATUS + * + * All the API functions should be MP safe. + * + * The callback functions will be flagged as being MP safe if the + * timeout structure is initialized with callout_init_mp() instead of + * callout_init(). + * + * The helper threads cannot be made preempt-capable until after we + * clean up all the uses of splsoftclock() and related interlocks (which + * require the related functions to be MP safe as well). + */ +/* + * The callout mechanism is based on the work of Adam M. Costello and + * George Varghese, published in a technical report entitled "Redesigning + * the BSD Callout and Timer Facilities" and modified slightly for inclusion + * in FreeBSD by Justin T. Gibbs. The original work on the data structures + * used in this implementation was published by G. Varghese and T. Lauck in + * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for + * the Efficient Implementation of a Timer Facility" in the Proceedings of + * the 11th ACM Annual Symposium on Operating Systems Principles, + * Austin, Texas Nov 1987. + * + * The per-cpu augmentation was done by Matthew Dillon. */ #include @@ -48,99 +107,217 @@ #include #include #include +#include "opt_ddb.h" + +#ifndef MAX_SOFTCLOCK_STEPS +#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ +#endif + + +struct softclock_pcpu { + struct callout_list callfree; + struct callout_tailq *callwheel; + struct callout * volatile next; + int softticks; /* softticks index */ + int curticks; /* per-cpu ticks counter */ + int isrunning; + struct thread thread; + +}; + +typedef struct softclock_pcpu *softclock_pcpu_t; /* * TODO: * allocate more timeout table slots when table overflows. */ +static MALLOC_DEFINE(M_CALLOUT, "callout", "callout structures"); +static int callwheelsize; +static int callwheelbits; +static int callwheelmask; +static struct softclock_pcpu softclock_pcpu_ary[MAXCPU]; + +static void softclock_handler(void *arg); + +static void +swi_softclock_setup(void *arg) +{ + int cpu; + int i; -/* Exported to machdep.c and/or kern_clock.c. */ -struct callout *callout; -struct callout_list callfree; -int callwheelsize, callwheelbits, callwheelmask; -struct callout_tailq *callwheel; -int softticks; /* Like ticks, but for softclock(). */ + /* + * Figure out how large a callwheel we need. It must be a power of 2. + */ + callwheelsize = 1; + callwheelbits = 0; + while (callwheelsize < ncallout) { + callwheelsize <<= 1; + ++callwheelbits; + } + callwheelmask = callwheelsize - 1; -static struct callout * volatile nextsoftcheck; /* Next callout to checked. */ + /* + * Initialize per-cpu data structures. + */ + for (cpu = 0; cpu < ncpus; ++cpu) { + softclock_pcpu_t sc; + struct callout *callout; + + sc = &softclock_pcpu_ary[cpu]; + + sc->callwheel = malloc(sizeof(*sc->callwheel) * callwheelsize, + M_CALLOUT, M_WAITOK|M_ZERO); + for (i = 0; i < callwheelsize; ++i) + TAILQ_INIT(&sc->callwheel[i]); + + SLIST_INIT(&sc->callfree); + callout = malloc(sizeof(struct callout) * ncallout, + M_CALLOUT, M_WAITOK|M_ZERO); + for (i = 0; i < ncallout; ++i) { + callout_init(&callout[i]); + callout[i].c_flags |= CALLOUT_LOCAL_ALLOC; + SLIST_INSERT_HEAD(&sc->callfree, &callout[i], + c_links.sle); + } + + /* + * Create a preemption-capable thread for each cpu to handle + * softclock timeouts on that cpu. The preemption can only + * be blocked by a critical section. The thread can itself + * be preempted by normal interrupts. + */ + lwkt_create(softclock_handler, sc, NULL, + &sc->thread, TDF_STOPREQ|TDF_INTTHREAD, -1, + "softclock %d", cpu); + lwkt_setpri(&sc->thread, TDPRI_SOFT_NORM); +#if 0 + /* + * Do not make the thread preemptable until we clean up all + * the splsoftclock() calls in the system. Since the threads + * are no longer operated as a software interrupt, the + * splsoftclock() calls will not have any effect on them. + */ + sc->thread.td_preemptable = lwkt_preempt; +#endif + } +} + +SYSINIT(softclock_setup, SI_SUB_CPU, SI_ORDER_ANY, swi_softclock_setup, NULL); /* - * The callout mechanism is based on the work of Adam M. Costello and - * George Varghese, published in a technical report entitled "Redesigning - * the BSD Callout and Timer Facilities" and modified slightly for inclusion - * in FreeBSD by Justin T. Gibbs. The original work on the data structures - * used in this implementation was published by G. Varghese and T. Lauck in - * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for - * the Efficient Implementation of a Timer Facility" in the Proceedings of - * the 11th ACM Annual Symposium on Operating Systems Principles, - * Austin, Texas Nov 1987. + * This routine is called from the hardclock() (basically a FASTint/IPI) on + * each cpu in the system. sc->curticks is this cpu's notion of the timebase. + * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where + * the callwheel is currently indexed. + * + * WARNING! The MP lock is not necessarily held on call, nor can it be + * safely obtained. + * + * sc->softticks is adjusted by either this routine or our helper thread + * depending on whether the helper thread is running or not. */ +void +hardclock_softtick(globaldata_t gd) +{ + softclock_pcpu_t sc; + + sc = &softclock_pcpu_ary[gd->gd_cpuid]; + ++sc->curticks; + if (sc->isrunning) + return; + if (sc->softticks == sc->curticks) { + /* + * in sync, only wakeup the thread if there is something to + * do. + */ + if (TAILQ_FIRST(&sc->callwheel[sc->softticks & callwheelmask])) + { + sc->isrunning = 1; + lwkt_schedule(&sc->thread); + } else { + ++sc->softticks; + } + } else { + /* + * out of sync, wakeup the thread unconditionally so it can + * catch up. + */ + sc->isrunning = 1; + lwkt_schedule(&sc->thread); + } +} /* - * Software (low priority) clock interrupt. - * Run periodic events from timeout queue. + * This procedure is the main loop of our per-cpu helper thread. The + * sc->isrunning flag prevents us from racing hardclock_softtick() and + * a critical section is sufficient to interlock sc->curticks. + * + * The thread starts with the MP lock held and not in a critical section. + * The loop itself is MP safe while individual callbacks may or may not + * be, so we obtain or release the MP lock as appropriate. */ static void -swi_softclock(void *dummy) +softclock_handler(void *arg) { + softclock_pcpu_t sc; struct callout *c; struct callout_tailq *bucket; - int curticks; - int steps; /* #steps since we last allowed interrupts */ - -#ifndef MAX_SOFTCLOCK_STEPS -#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ -#endif /* MAX_SOFTCLOCK_STEPS */ - - steps = 0; + void (*c_func)(void *); + void *c_arg; + int c_flags; +#ifdef SMP + int mpsafe = 0; +#endif + + sc = arg; crit_enter(); - while (softticks != ticks) { - softticks++; - /* - * softticks may be modified by hard clock, so cache - * it while we work on a given bucket. - */ - curticks = softticks; - bucket = &callwheel[curticks & callwheelmask]; - c = TAILQ_FIRST(bucket); - while (c) { - if (c->c_time != curticks) { - c = TAILQ_NEXT(c, c_links.tqe); - ++steps; - if (steps >= MAX_SOFTCLOCK_STEPS) { - nextsoftcheck = c; - /* Give interrupts a chance. */ - crit_exit(); - crit_enter(); - c = nextsoftcheck; - steps = 0; +loop: + while (sc->softticks != (int)(sc->curticks + 1)) { + bucket = &sc->callwheel[sc->softticks & callwheelmask]; + + for (c = TAILQ_FIRST(bucket); c; c = sc->next) { + sc->next = TAILQ_NEXT(c, c_links.tqe); + if (c->c_time != sc->softticks) + continue; + TAILQ_REMOVE(bucket, c, c_links.tqe); + c_func = c->c_func; + c_arg = c->c_arg; + c_flags = c->c_flags; + c->c_func = NULL; + KKASSERT(c->c_flags & CALLOUT_DID_INIT); + if (c->c_flags & CALLOUT_LOCAL_ALLOC) { + c->c_flags = CALLOUT_LOCAL_ALLOC | + CALLOUT_DID_INIT; + SLIST_INSERT_HEAD(&sc->callfree, + c, c_links.sle); + } else { + c->c_flags &= ~CALLOUT_PENDING; + } + crit_exit(); +#ifdef SMP + if (c_flags & CALLOUT_MPSAFE) { + if (mpsafe == 0) { + mpsafe = 1; + rel_mplock(); } } else { - void (*c_func)(void *); - void *c_arg; - - nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); - TAILQ_REMOVE(bucket, c, c_links.tqe); - c_func = c->c_func; - c_arg = c->c_arg; - c->c_func = NULL; - if (c->c_flags & CALLOUT_LOCAL_ALLOC) { - c->c_flags = CALLOUT_LOCAL_ALLOC; - SLIST_INSERT_HEAD(&callfree, c, - c_links.sle); - } else { - c->c_flags = - (c->c_flags & ~CALLOUT_PENDING); + if (mpsafe) { + mpsafe = 0; + get_mplock(); } - crit_exit(); - c_func(c_arg); - crit_enter(); - steps = 0; - c = nextsoftcheck; } +#endif + c_func(c_arg); + crit_enter(); + /* NOTE: list may have changed */ } + ++sc->softticks; } - nextsoftcheck = NULL; - crit_exit(); + sc->isrunning = 0; + lwkt_deschedule_self(&sc->thread); /* == curthread */ + lwkt_switch(); + goto loop; + /* NOT REACHED */ } /* @@ -162,31 +339,31 @@ swi_softclock(void *dummy) struct callout_handle timeout(timeout_t *ftn, void *arg, int to_ticks) { - int s; + softclock_pcpu_t sc; struct callout *new; struct callout_handle handle; - s = splhigh(); + sc = &softclock_pcpu_ary[mycpu->gd_cpuid]; + crit_enter(); /* Fill in the next free callout structure. */ - new = SLIST_FIRST(&callfree); - if (new == NULL) + new = SLIST_FIRST(&sc->callfree); + if (new == NULL) { /* XXX Attempt to malloc first */ panic("timeout table full"); - SLIST_REMOVE_HEAD(&callfree, c_links.sle); + } + SLIST_REMOVE_HEAD(&sc->callfree, c_links.sle); callout_reset(new, to_ticks, ftn, arg); handle.callout = new; - splx(s); + crit_exit(); return (handle); } void untimeout(timeout_t *ftn, void *arg, struct callout_handle handle) { - int s; - /* * Check for a handle that was initialized * by callout_handle_init, but never used @@ -195,10 +372,10 @@ untimeout(timeout_t *ftn, void *arg, struct callout_handle handle) if (handle.callout == NULL) return; - s = splhigh(); + crit_enter(); if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) callout_stop(handle.callout); - splx(s); + crit_exit(); } void @@ -213,7 +390,8 @@ callout_handle_init(struct callout_handle *handle) * callout_reset() - establish or change a timeout * callout_stop() - disestablish a timeout * callout_init() - initialize a callout structure so that it can - * safely be passed to callout_reset() and callout_stop() + * safely be passed to callout_reset() and callout_stop() + * callout_init_mp() - same but any installed functions must be MP safe. * * defines three convenience macros: * @@ -221,78 +399,154 @@ callout_handle_init(struct callout_handle *handle) * callout_pending() - returns truth if callout is still waiting for timeout * callout_deactivate() - marks the callout as having been serviced */ + +/* + * Start or restart a timeout. Install the callout structure in the + * callwheel. Callers may legally pass any value, even if 0 or negative, + * but since the sc->curticks index may have already been processed a + * minimum timeout of 1 tick will be enforced. + * + * The callout is installed on and will be processed on the current cpu's + * callout wheel. + */ void -callout_reset(struct callout *c, int to_ticks, - void (*ftn)(void *), void *arg) +callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), + void *arg) { - int s; + softclock_pcpu_t sc; + globaldata_t gd; + +#ifdef INVARIANTS + if ((c->c_flags & CALLOUT_DID_INIT) == 0) { + callout_init(c); + printf( + "callout_reset(%p) from %p: callout was not initialized\n", + c, ((int **)&c)[-1]); +#ifdef DDB + db_print_backtrace(); +#endif + } +#endif + gd = mycpu; + sc = &softclock_pcpu_ary[gd->gd_cpuid]; + crit_enter_gd(gd); - s = splhigh(); if (c->c_flags & CALLOUT_PENDING) callout_stop(c); - /* - * We could spl down here and back up at the TAILQ_INSERT_TAIL, - * but there's no point since doing this setup doesn't take much - * time. - */ if (to_ticks <= 0) to_ticks = 1; c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); c->c_func = ftn; - c->c_time = ticks + to_ticks; - TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], + c->c_time = sc->curticks + to_ticks; +#ifdef SMP + c->c_gd = gd; +#endif + + TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & callwheelmask], c, c_links.tqe); - splx(s); - + crit_exit_gd(gd); } +/* + * Stop a running timer. WARNING! If called on a cpu other then the one + * the callout was started on this function will liveloop on its IPI to + * the target cpu to process the request. It is possible for the callout + * to execute in that case. + * + * WARNING! This routine may be called from an IPI + */ int callout_stop(struct callout *c) { - int s; + globaldata_t gd = mycpu; +#ifdef SMP + globaldata_t tgd; +#endif + softclock_pcpu_t sc; + +#ifdef INVARIANTS + if ((c->c_flags & CALLOUT_DID_INIT) == 0) { + callout_init(c); + printf( + "callout_reset(%p) from %p: callout was not initialized\n", + c, ((int **)&c)[-1]); +#ifdef DDB + db_print_backtrace(); +#endif + } +#endif + crit_enter_gd(gd); - s = splhigh(); /* * Don't attempt to delete a callout that's not on the queue. */ - if (!(c->c_flags & CALLOUT_PENDING)) { + if ((c->c_flags & CALLOUT_PENDING) == 0) { c->c_flags &= ~CALLOUT_ACTIVE; - splx(s); + crit_exit_gd(gd); return (0); } - c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); +#ifdef SMP + if ((tgd = c->c_gd) != gd) { + /* + * If the callout is owned by a different CPU we have to + * execute the function synchronously on the target cpu. + */ + int seq; + + cpu_mb1(); /* don't let tgd alias c_gd */ + seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c); + lwkt_wait_ipiq(tgd, seq); + } else +#endif + { + /* + * If the callout is owned by the same CPU we can + * process it directly, but if we are racing our helper + * thread (sc->next), we have to adjust sc->next. The + * race is interlocked by a critical section. + */ + sc = &softclock_pcpu_ary[gd->gd_cpuid]; - if (nextsoftcheck == c) { - nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); - } - TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); - c->c_func = NULL; + c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); + if (sc->next == c) + sc->next = TAILQ_NEXT(c, c_links.tqe); + + TAILQ_REMOVE(&sc->callwheel[c->c_time & callwheelmask], + c, c_links.tqe); + c->c_func = NULL; - if (c->c_flags & CALLOUT_LOCAL_ALLOC) { - SLIST_INSERT_HEAD(&callfree, c, c_links.sle); + if (c->c_flags & CALLOUT_LOCAL_ALLOC) { + SLIST_INSERT_HEAD(&sc->callfree, c, c_links.sle); + } } - splx(s); + crit_exit_gd(gd); return (1); } +/* + * Prepare a callout structure for use by callout_reset() and/or + * callout_stop(). The MP version of this routine requires that the callback + * function installed by callout_reset() by MP safe. + */ void callout_init(struct callout *c) { bzero(c, sizeof *c); + c->c_flags = CALLOUT_DID_INIT; } -static void -swi_softclock_setup(void *arg) +void +callout_init_mp(struct callout *c) { - register_swi(SWI_CLOCK, swi_softclock, NULL, "swi_sftclk"); - swi_setpriority(SWI_CLOCK, TDPRI_SOFT_TIMER); + callout_init(c); + c->c_flags |= CALLOUT_MPSAFE; } -SYSINIT(vm_setup, SI_SUB_CPU, SI_ORDER_ANY, swi_softclock_setup, NULL); - +/* What, are you joking? This is nuts! -Matt */ +#if 0 #ifdef APM_FIXUP_CALLTODO /* * Adjust the kernel calltodo timeout list. This routine is used after @@ -358,3 +612,5 @@ adjust_timeout_calltodo(struct timeval *time_change) return; } #endif /* APM_FIXUP_CALLTODO */ +#endif + diff --git a/sys/platform/pc32/i386/machdep.c b/sys/platform/pc32/i386/machdep.c index fc07bd533b..a2785e88b1 100644 --- a/sys/platform/pc32/i386/machdep.c +++ b/sys/platform/pc32/i386/machdep.c @@ -36,7 +36,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.65 2004/08/12 19:59:30 eirikn Exp $ + * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.66 2004/09/17 00:18:07 dillon Exp $ */ #include "use_apm.h" @@ -66,7 +66,6 @@ #include #include #include -#include #include #include #include @@ -253,7 +252,6 @@ static void cpu_startup(dummy) void *dummy; { - unsigned i; caddr_t v; vm_offset_t minaddr; vm_offset_t maxaddr; @@ -290,15 +288,6 @@ cpu_startup(dummy) } } - /* - * Calculate callout wheel size - */ - for (callwheelsize = 1, callwheelbits = 0; - callwheelsize < ncallout; - callwheelsize <<= 1, ++callwheelbits) - ; - callwheelmask = callwheelsize - 1; - /* * Allocate space for system data structures. * The first available kernel virtual address is in "v". @@ -323,9 +312,6 @@ again: #define valloclim(name, type, num, lim) \ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) - valloc(callout, struct callout, ncallout); - valloc(callwheel, struct callout_tailq, callwheelsize); - /* * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. * For the first 64MB of ram nominally allocate sufficient buffers to @@ -401,20 +387,6 @@ again: exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, (16*(ARG_MAX+(PAGE_SIZE*3)))); - /* - * Initialize callouts - */ - SLIST_INIT(&callfree); - for (i = 0; i < ncallout; i++) { - callout_init(&callout[i]); - callout[i].c_flags = CALLOUT_LOCAL_ALLOC; - SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); - } - - for (i = 0; i < callwheelsize; i++) { - TAILQ_INIT(&callwheel[i]); - } - #if defined(USERCONFIG) userconfig(); cninit(); /* the preferred console may have changed */ diff --git a/sys/sys/callout.h b/sys/sys/callout.h index 74bbb7f3a1..cd4903c9de 100644 --- a/sys/sys/callout.h +++ b/sys/sys/callout.h @@ -37,7 +37,7 @@ * * @(#)callout.h 8.2 (Berkeley) 1/21/94 * $FreeBSD: src/sys/sys/callout.h,v 1.15.2.1 2001/11/13 18:24:52 archie Exp $ - * $DragonFly: src/sys/sys/callout.h,v 1.3 2003/08/20 07:31:21 rob Exp $ + * $DragonFly: src/sys/sys/callout.h,v 1.4 2004/09/17 00:18:10 dillon Exp $ */ #ifndef _SYS_CALLOUT_H_ @@ -53,31 +53,44 @@ struct callout { SLIST_ENTRY(callout) sle; TAILQ_ENTRY(callout) tqe; } c_links; - int c_time; /* ticks to the event */ - void *c_arg; /* function argument */ + int c_time; /* ticks to the event */ + void *c_arg; /* function argument */ void (*c_func) (void *); /* function to call */ - int c_flags; /* state of this entry */ + int c_flags; /* state of this entry */ +#ifdef SMP + struct globaldata *c_gd; +#else + void *c_gd_reserved; +#endif }; -#define CALLOUT_LOCAL_ALLOC 0x0001 /* was allocated from callfree */ -#define CALLOUT_ACTIVE 0x0002 /* callout is currently active */ -#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */ +#define CALLOUT_LOCAL_ALLOC 0x0001 /* was allocated from callfree */ +#define CALLOUT_ACTIVE 0x0002 /* callout is currently active */ +#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */ +#define CALLOUT_MPSAFE 0x0008 /* callout does not need the BGL */ +#define CALLOUT_DID_INIT 0x0008 /* safety check */ struct callout_handle { struct callout *callout; }; +/* + * WARNING! These macros may only be used when the state of the callout + * structure is stable, meaning from within the callback function or after + * the callback function has been called but the timer has not yet been reset. + */ +#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) +#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) +#define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) + #ifdef _KERNEL -extern struct callout_list callfree; -extern struct callout *callout; extern int ncallout; -extern struct callout_tailq *callwheel; -extern int callwheelsize, callwheelbits, callwheelmask, softticks; -#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) -#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) +struct globaldata; + +void hardclock_softtick(struct globaldata *); void callout_init (struct callout *); -#define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) +void callout_init_mp (struct callout *); void callout_reset (struct callout *, int, void (*)(void *), void *); int callout_stop (struct callout *);