kernel - Fix very annoying lockup (SMP)
authorMatthew Dillon <dillon@apollo.backplane.com>
Tue, 11 Jan 2011 22:36:24 +0000 (14:36 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Tue, 11 Jan 2011 22:40:30 +0000 (14:40 -0800)
* Fix an extremely annoying lockup that took a week+ to find.  The cpusync
  code is rather fragile and any for (;;) or while (1) style loops in the
  kernel can trip it up and cause a deadlock.  These loops are careful to
  call lwkt_process_ipiq() to ensure that cpusync processing occurs.

  However, there is a race in the LWKT thread migration code where a thread
  deschedules itself on one cpu and schedules itself on another via a remote
  ipi.  The target cpu expects the thread's TDF_RUNNING state to clear and
  will loop until that happens.

  An IPI could sneak itself into the deschedule/lwkt_switch() path and
  deadlock against a cpusync, preventing the thread from leaving the
  TDF_RUNNING state.

  The solution is to ensure that lwkt_process_ipiq() is *NOT* run in
  the lwkt_switch() path if the calling thread has descheduled itself.

* The original bug could be reproduced by running blogbench in one window
  and a while (1) ps axl shell script in another.

* Add DEBUG_PUSH_INFO(msg)/DEBUG_POP_INFO() macros which record (msg)
  in the globaldata structure as a debugging aid.

* Remove unused platform/pc64/x86_64/systimer.c file.  The entire contents
  of this file was #ifdef'd out and its functionality is handled elsewhere
  by the lapic timer code.

* #if 0 out numerous debugging bits but don't remove the code because it
  is extremely useful for finding lockup conditions.

12 files changed:
sys/kern/kern_ktr.c
sys/kern/lwkt_ipiq.c
sys/kern/lwkt_thread.c
sys/platform/pc32/apic/mpapic.c
sys/platform/pc32/i386/pmap.c
sys/platform/pc32/i386/pmap_inval.c
sys/platform/pc64/apic/mpapic.c
sys/platform/pc64/conf/files
sys/platform/pc64/x86_64/pmap.c
sys/platform/pc64/x86_64/pmap_inval.c
sys/platform/pc64/x86_64/systimer.c [deleted file]
sys/sys/globaldata.h

index 238762e..efcb4a2 100644 (file)
@@ -311,8 +311,10 @@ ktr_resync_callback(void *dummy __unused)
        ktr_sync_tsc = rdtsc();
        count = lwkt_send_ipiq_mask(mycpu->gd_other_cpus & smp_active_mask,
                                    (ipifunc1_t)ktr_resync_remote, NULL);
+       DEBUG_PUSH_INFO("ktrsync1");
        while (ktr_sync_count != count)
                lwkt_process_ipiq();
+       DEBUG_POP_INFO();
 
        /*
         * Continuously update the TSC for cpu 0 while waiting for all other
@@ -351,9 +353,11 @@ ktr_resync_remote(void *dummy __unused)
         */
        KKASSERT(ktr_sync_state == 1);
        atomic_add_int(&ktr_sync_count, 1);
+       DEBUG_PUSH_INFO("ktrsync2");
        while (ktr_sync_state == 1) {
                lwkt_process_ipiq();
        }
+       DEBUG_POP_INFO();
 
        /*
         * Now the master is in a hard loop, synchronize the TSC and
index b5b6bee..d657fc6 100644 (file)
@@ -193,10 +193,12 @@ lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
        }
        cpu_enable_intr();
        ++ipiq_fifofull;
+       DEBUG_PUSH_INFO("send_ipiq3");
        while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
            KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
            lwkt_process_ipiq();
        }
+       DEBUG_POP_INFO();
 #if defined(__i386__)
        write_eflags(eflags);
 #elif defined(__x86_64__)
@@ -284,10 +286,12 @@ lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
        }
        cpu_enable_intr();
        ++ipiq_fifofull;
+       DEBUG_PUSH_INFO("send_ipiq3_passive");
        while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
            KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
            lwkt_process_ipiq();
        }
+       DEBUG_POP_INFO();
 #if defined(__i386__)
        write_eflags(eflags);
 #elif defined(__x86_64__)
@@ -425,6 +429,7 @@ lwkt_wait_ipiq(globaldata_t target, int seq)
            unsigned long rflags = read_rflags();
 #endif
            cpu_enable_intr();
+           DEBUG_PUSH_INFO("wait_ipiq");
            while ((int)(ip->ip_xindex - seq) < 0) {
                crit_enter();
                lwkt_process_ipiq();
@@ -440,6 +445,7 @@ lwkt_wait_ipiq(globaldata_t target, int seq)
                 */
                cpu_lfence();
            }
+           DEBUG_POP_INFO();
 #if defined(__i386__)
            write_eflags(eflags);
 #elif defined(__x86_64__)
@@ -527,6 +533,14 @@ again:
     }
 }
 
+#if 0
+static int iqticks[SMP_MAXCPU];
+static int iqcount[SMP_MAXCPU];
+#endif
+#if 0
+static int iqterm[SMP_MAXCPU];
+#endif
+
 static int
 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 
                       struct intrframe *frame)
@@ -538,6 +552,30 @@ lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
     void *copy_arg1;
     int copy_arg2;
 
+#if 0
+    if (iqticks[mygd->gd_cpuid] != ticks) {
+           iqticks[mygd->gd_cpuid] = ticks;
+           iqcount[mygd->gd_cpuid] = 0;
+    }
+    if (++iqcount[mygd->gd_cpuid] > 3000000) {
+       kprintf("cpu %d ipiq maxed cscount %d spin %d\n",
+               mygd->gd_cpuid,
+               mygd->gd_curthread->td_cscount,
+               mygd->gd_spinlocks_wr);
+       iqcount[mygd->gd_cpuid] = 0;
+#if 0
+       if (++iqterm[mygd->gd_cpuid] > 10)
+               panic("cpu %d ipiq maxed", mygd->gd_cpuid);
+#endif
+       int i;
+       for (i = 0; i < ncpus; ++i) {
+               if (globaldata_find(i)->gd_infomsg)
+                       kprintf(" %s", globaldata_find(i)->gd_infomsg);
+       }
+       kprintf("\n");
+    }
+#endif
+
     /*
      * Obtain the current write index, which is modified by a remote cpu.
      * Issue a load fence to prevent speculative reads of e.g. data written
@@ -682,6 +720,7 @@ lwkt_cpusync_interlock(lwkt_cpusync_t cs)
     cs->cs_mack = 0;
     crit_enter_id("cpusync");
     if (mask) {
+       DEBUG_PUSH_INFO("cpusync_interlock");
        ++ipiq_cscount;
        ++gd->gd_curthread->td_cscount;
        lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs);
@@ -690,6 +729,7 @@ lwkt_cpusync_interlock(lwkt_cpusync_t cs)
            lwkt_process_ipiq();
            cpu_pause();
        }
+       DEBUG_POP_INFO();
     }
 #else
     cs->cs_mack = 0;
@@ -724,10 +764,17 @@ lwkt_cpusync_deinterlock(lwkt_cpusync_t cs)
     if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask))
            cs->cs_func(cs->cs_data);
     if (mask) {
+       DEBUG_PUSH_INFO("cpusync_deinterlock");
        while (cs->cs_mack != mask) {
            lwkt_process_ipiq();
            cpu_pause();
        }
+       DEBUG_POP_INFO();
+       /*
+        * cpusyncq ipis may be left queued without the RQF flag set due to
+        * a non-zero td_cscount, so be sure to process any laggards after
+        * decrementing td_cscount.
+        */
        --gd->gd_curthread->td_cscount;
        lwkt_process_ipiq();
        logipiq2(sync_end, mask);
@@ -783,6 +830,11 @@ lwkt_cpusync_remote2(lwkt_cpusync_t cs)
        ip->ip_arg2[wi] = 0;
        cpu_sfence();
        ++ip->ip_windex;
+       if ((ip->ip_windex & 0xFFFFFF) == 0)
+               kprintf("cpu %d cm=%016jx %016jx f=%p\n",
+                       gd->gd_cpuid,
+                       (intmax_t)cs->cs_mask, (intmax_t)cs->cs_mack,
+                       cs->cs_func);
     }
 }
 
index 2310f2d..6cfae46 100644 (file)
@@ -253,6 +253,7 @@ lwkt_init(void)
 void
 lwkt_schedule_self(thread_t td)
 {
+    KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
     crit_enter_quick(td);
     KASSERT(td != &td->td_gd->gd_idlethread,
            ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
@@ -484,15 +485,18 @@ lwkt_free_thread(thread_t td)
  * different beast and LWKT priorities should not be confused with
  * user process priorities.
  *
- * Note that the td_switch() function cannot do anything that requires
- * the MP lock since the MP lock will have already been setup for
- * the target thread (not the current thread).  It's nice to have a scheduler
- * that does not need the MP lock to work because it allows us to do some
- * really cool high-performance MP lock optimizations.
- *
  * PREEMPTION NOTE: Preemption occurs via lwkt_preempt().  lwkt_switch()
  * is not called by the current thread in the preemption case, only when
  * the preempting thread blocks (in order to return to the original thread).
+ *
+ * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread
+ * migration and tsleep deschedule the current lwkt thread and call
+ * lwkt_switch().  In particular, the target cpu of the migration fully
+ * expects the thread to become non-runnable and can deadlock against
+ * cpusync operations if we run any IPIs prior to switching the thread out.
+ *
+ * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF
+ * THE CURRENET THREAD HAS BEEN DESCHEDULED!
  */
 void
 lwkt_switch(void)
@@ -609,14 +613,11 @@ lwkt_switch(void)
      * Implement round-robin fairq with priority insertion.  The priority
      * insertion is handled by _lwkt_enqueue()
      *
-     * We have to adjust the MP lock for the target thread.  If we
-     * need the MP lock and cannot obtain it we try to locate a
-     * thread that does not need the MP lock.  If we cannot, we spin
-     * instead of HLT.
-     *
-     * A similar issue exists for the tokens held by the target thread.
      * If we cannot obtain ownership of the tokens we cannot immediately
-     * schedule the thread.
+     * schedule the target thread.
+     *
+     * Reminder: Again, we cannot afford to run any IPIs in this path if
+     * the current thread has been descheduled.
      */
     for (;;) {
        /*
@@ -660,7 +661,7 @@ lwkt_switch(void)
            if (ntd->td_fairq_accum >= 0)
                    break;
 
-           splz_check();
+           /*splz_check(); cannot do this here, see above */
            lwkt_fairq_accumulate(gd, ntd);
            TAILQ_REMOVE(&gd->gd_tdrunq, ntd, td_threadq);
            TAILQ_INSERT_TAIL(&gd->gd_tdrunq, ntd, td_threadq);
@@ -820,8 +821,12 @@ skip:
         * idle thread will check for pending reschedules already set
         * (RQF_AST_LWKT_RESCHED) before actually halting so we don't have
         * to here.
+        *
+        * Also, if TDF_RUNQ is not set the current thread is trying to
+        * deschedule, possibly in an atomic fashion.  We cannot afford to
+        * stay here.
         */
-       if (spinning <= 0) {
+       if (spinning <= 0 || (td->td_flags & TDF_RUNQ) == 0) {
            atomic_clear_int(&gd->gd_reqflags, RQF_WAKEUP);
            goto haveidle;
        }
@@ -882,7 +887,7 @@ skip:
            cseq = 1000;
        DELAY(cseq);
        atomic_add_int(&lwkt_cseq_rindex, 1);
-       splz_check();
+       splz_check();   /* ok, we already checked that td is still scheduled */
        /* highest level for(;;) loop */
     }
 
@@ -1242,6 +1247,7 @@ _lwkt_schedule(thread_t td, int reschedok)
 
     KASSERT(td != &td->td_gd->gd_idlethread,
            ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
+    KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
     crit_enter_gd(mygd);
     KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0);
     if (td == mygd->gd_curthread) {
@@ -1343,12 +1349,14 @@ lwkt_acquire(thread_t td)
        cpu_lfence();
        KKASSERT((td->td_flags & TDF_RUNQ) == 0);
        crit_enter_gd(mygd);
+       DEBUG_PUSH_INFO("lwkt_acquire");
        while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
 #ifdef SMP
            lwkt_process_ipiq();
 #endif
            cpu_lfence();
        }
+       DEBUG_POP_INFO();
        cpu_mfence();
        td->td_gd = mygd;
        TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
@@ -1550,14 +1558,22 @@ lwkt_setcpu_remote(void *arg)
 {
     thread_t td = arg;
     globaldata_t gd = mycpu;
+    int retry = 10000000;
 
+    DEBUG_PUSH_INFO("lwkt_setcpu_remote");
     while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
 #ifdef SMP
        lwkt_process_ipiq();
 #endif
        cpu_lfence();
        cpu_pause();
+       if (--retry == 0) {
+               kprintf("lwkt_setcpu_remote: td->td_flags %08x\n",
+                       td->td_flags);
+               retry = 10000000;
+       }
     }
+    DEBUG_POP_INFO();
     td->td_gd = gd;
     cpu_mfence();
     td->td_flags &= ~TDF_MIGRATING;
index c34c491..a54b05c 100644 (file)
@@ -842,9 +842,11 @@ apic_ipi(int dest_type, int vector, int delivery_mode)
        if ((lapic.icr_lo & APIC_DELSTAT_MASK) != 0) {
            unsigned int eflags = read_eflags();
            cpu_enable_intr();
+           DEBUG_PUSH_INFO("apic_ipi");
            while ((lapic.icr_lo & APIC_DELSTAT_MASK) != 0) {
                lwkt_process_ipiq();
            }
+           DEBUG_POP_INFO();
            write_eflags(eflags);
        }
 
@@ -865,9 +867,11 @@ single_apic_ipi(int cpu, int vector, int delivery_mode)
        if ((lapic.icr_lo & APIC_DELSTAT_MASK) != 0) {
            unsigned int eflags = read_eflags();
            cpu_enable_intr();
+           DEBUG_PUSH_INFO("single_apic_ipi");
            while ((lapic.icr_lo & APIC_DELSTAT_MASK) != 0) {
                lwkt_process_ipiq();
            }
+           DEBUG_POP_INFO();
            write_eflags(eflags);
        }
        icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
index 39add33..e93cca2 100644 (file)
@@ -3481,11 +3481,13 @@ pmap_interlock_wait(struct vmspace *vm)
        struct pmap *pmap = &vm->vm_pmap;
 
        if (pmap->pm_active & CPUMASK_LOCK) {
+               DEBUG_PUSH_INFO("pmap_interlock_wait");
                while (pmap->pm_active & CPUMASK_LOCK) {
                        cpu_pause();
                        cpu_ccfence();
                        lwkt_process_ipiq();
                }
+               DEBUG_POP_INFO();
        }
 }
 
index 664c485..05e65e5 100644 (file)
@@ -87,6 +87,7 @@ pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
 #ifdef SMP
     cpumask_t nactive;
 
+    DEBUG_PUSH_INFO("pmap_inval_interlock");
     for (;;) {
        oactive = pmap->pm_active & ~CPUMASK_LOCK;
        nactive = oactive | CPUMASK_LOCK;
@@ -95,6 +96,7 @@ pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
        lwkt_process_ipiq();
        cpu_pause();
     }
+    DEBUG_POP_INFO();
 #else
     oactive = pmap->pm_active & ~CPUMASK_LOCK;
 #endif
index b98c556..72a99e9 100644 (file)
@@ -291,29 +291,57 @@ lapic_timer_process_frame(struct intrframe *frame)
  * This manual debugging code is called unconditionally from Xtimer
  * (the lapic timer interrupt) whether the current thread is in a
  * critical section or not) and can be useful in tracking down lockups.
+ *
+ * NOTE: MANUAL DEBUG CODE
  */
+#if 0
+static int saveticks[SMP_MAXCPU];
+static int savecounts[SMP_MAXCPU];
+#endif
+
 void
 lapic_timer_always(struct intrframe *frame)
 {
 #if 0
        globaldata_t gd = mycpu;
        int cpu = gd->gd_cpuid;
-       int i;
        char buf[64];
        short *gptr;
+       int i;
 
-       if (cpu > 20)
-               return;
-
-       gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu;
-       *gptr = ((*gptr + 1) & 0x00FF) | 0x0700;
-       ++gptr;
+       if (cpu <= 20) {
+               gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu;
+               *gptr = ((*gptr + 1) & 0x00FF) | 0x0700;
+               ++gptr;
 
-       ksnprintf(buf, sizeof(buf), " %p %16.16s",
-               (void *)frame->if_rip, gd->gd_curthread->td_comm);
-       for (i = 0; buf[i]; ++i) {
-               gptr[i] = 0x0700 | (unsigned char)buf[i];
+               ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ",
+                   (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks,
+                   gd->gd_infomsg);
+               for (i = 0; buf[i]; ++i) {
+                       gptr[i] = 0x0700 | (unsigned char)buf[i];
+               }
+       }
+#if 0
+       if (saveticks[gd->gd_cpuid] != ticks) {
+               saveticks[gd->gd_cpuid] = ticks;
+               savecounts[gd->gd_cpuid] = 0;
+       }
+       ++savecounts[gd->gd_cpuid];
+       if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) {
+               panic("cpud %d panicing on ticks failure",
+                       gd->gd_cpuid);
        }
+       for (i = 0; i < ncpus; ++i) {
+               int delta;
+               if (saveticks[i] && panicstr == NULL) {
+                       delta = saveticks[i] - ticks;
+                       if (delta < -10 || delta > 10) {
+                               panic("cpu %d panicing on cpu %d watchdog",
+                                     gd->gd_cpuid, i);
+                       }
+               }
+       }
+#endif
 #endif
 }
 
@@ -879,9 +907,11 @@ apic_ipi(int dest_type, int vector, int delivery_mode)
        if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
            unsigned long rflags = read_rflags();
            cpu_enable_intr();
+           DEBUG_PUSH_INFO("apic_ipi");
            while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
                lwkt_process_ipiq();
            }
+           DEBUG_POP_INFO();
            write_rflags(rflags);
        }
 
@@ -902,9 +932,11 @@ single_apic_ipi(int cpu, int vector, int delivery_mode)
        if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
            unsigned long rflags = read_rflags();
            cpu_enable_intr();
+           DEBUG_PUSH_INFO("single_apic_ipi");
            while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
                lwkt_process_ipiq();
            }
+           DEBUG_POP_INFO();
            write_rflags(rflags);
        }
        icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
index ae8585c..91a5bca 100644 (file)
@@ -194,7 +194,6 @@ platform/pc64/x86_64/pmap.c         standard
 platform/pc64/x86_64/pmap_inval.c              standard
 platform/pc64/x86_64/busdma_machdep.c standard
 platform/pc64/x86_64/sysarch.c standard
-platform/pc64/x86_64/systimer.c        standard
 platform/pc64/x86_64/console.c standard
 platform/pc64/x86_64/ipl_funcs.c       standard
 kern/syscalls.c                        standard
index 89e52ff..6d7a9c7 100644 (file)
@@ -3895,11 +3895,13 @@ pmap_interlock_wait(struct vmspace *vm)
        struct pmap *pmap = &vm->vm_pmap;
 
        if (pmap->pm_active & CPUMASK_LOCK) {
+               DEBUG_PUSH_INFO("pmap_interlock_wait");
                while (pmap->pm_active & CPUMASK_LOCK) {
                        cpu_pause();
                        cpu_ccfence();
                        lwkt_process_ipiq();
                }
+               DEBUG_POP_INFO();
        }
 }
 
index b559adf..5efbbd6 100644 (file)
@@ -93,6 +93,7 @@ pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
 #ifdef SMP
     cpumask_t nactive;
 
+    DEBUG_PUSH_INFO("pmap_inval_interlock");
     for (;;) {
        oactive = pmap->pm_active & ~CPUMASK_LOCK;
        nactive = oactive | CPUMASK_LOCK;
@@ -101,6 +102,7 @@ pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
        lwkt_process_ipiq();
        cpu_pause();
     }
+    DEBUG_POP_INFO();
 #else
     oactive = pmap->pm_active & ~CPUMASK_LOCK;
 #endif
diff --git a/sys/platform/pc64/x86_64/systimer.c b/sys/platform/pc64/x86_64/systimer.c
deleted file mode 100644 (file)
index 38a949b..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2006,2008 The DragonFly Project.  All rights reserved.
- * 
- * This code is derived from software contributed to The DragonFly Project
- * by Matthew Dillon <dillon@backplane.com>
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- * 3. Neither the name of The DragonFly Project nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific, prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
- * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/types.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/systimer.h>
-#include <sys/sysctl.h>
-#include <sys/signal.h>
-#include <sys/interrupt.h>
-#include <sys/bus.h>
-#include <sys/time.h>
-#include <machine/globaldata.h>
-#include <machine/md_var.h>
-
-#if JG
-int adjkerntz;
-int wall_cmos_clock = 0;
-#endif
-
-/*
- * SYSTIMER IMPLEMENTATION
- */
-#if JG
-/*
- * Initialize the systimer subsystem, called from MI code in early boot.
- */
-void
-cpu_initclocks(void *arg __unused)
-{
-}
-
-/*
- * Configure the interrupt for our core systimer.  Use the kqueue timer
- * support functions.
- */
-void
-cputimer_intr_config(struct cputimer *timer)
-{
-}
-
-/*
- * Reload the interrupt for our core systimer.  Because the caller's
- * reload calculation can be negatively indexed, we need a minimal
- * check to ensure that a reasonable reload value is selected. 
- */
-void
-cputimer_intr_reload(sysclock_t reload)
-{
-}
-
-/*
- * Initialize the time of day register, based on the time base which is, e.g.
- * from a filesystem.
- */
-void
-inittodr(time_t base)
-{
-}
-
-/*
- * Write system time back to the RTC
- */
-void
-resettodr(void)
-{
-}
-
-void
-DELAY(int usec)
-{
-}
-
-void
-DRIVERSLEEP(int usec)
-{
-}
-#endif
index e1fe49a..8845f29 100644 (file)
@@ -165,7 +165,8 @@ struct globaldata {
        int             gd_timer_running;
        u_int           gd_idle_repeat;         /* repeated switches to idle */
        int             gd_ireserved[7];
-       void            *gd_preserved[11];      /* future fields */
+       const char      *gd_infomsg;            /* debugging */
+       void            *gd_preserved[10];      /* future fields */
        /* extended by <machine/globaldata.h> */
 };
 
@@ -206,6 +207,29 @@ typedef struct globaldata *globaldata_t;
 
 #endif
 
+/*
+ * MANUAL DEBUG CODE FOR DEBUGGING LOCKUPS
+ */
+#ifdef _KERNEL
+
+#if 0
+
+#define DEBUG_PUSH_INFO(msg)                           \
+       const char *save_infomsg;                       \
+       save_infomsg = mycpu->gd_infomsg;               \
+       mycpu->gd_infomsg = msg                         \
+
+#define DEBUG_POP_INFO()       mycpu->gd_infomsg = save_infomsg
+
+#else
+
+#define DEBUG_PUSH_INFO(msg)
+#define DEBUG_POP_INFO()
+
+#endif
+
+#endif
+
 #ifdef _KERNEL
 struct globaldata *globaldata_find(int cpu);
 int is_globaldata_space(vm_offset_t saddr, vm_offset_t eaddr);