clock/tsc: Detect invariant TSC CPU synchronization
authorSepherosa Ziehau <sephe@dragonflybsd.org>
Thu, 20 Jun 2013 03:10:03 +0000 (11:10 +0800)
committerSepherosa Ziehau <sephe@dragonflybsd.org>
Thu, 20 Jun 2013 03:25:39 +0000 (11:25 +0800)
The detected result could be used to determine whether TSC could be
used as cputimer or not, and could be used by other stuffs, e.g.
CoDel AQM packet time stamping.

- Only invariant TSC will be tested
- If there is only one CPU, then invariant TSC is always synchronized
- Only CPUs from Intel are tested (*)

The test is conducted using lwkt_cpusync interfaces:
BSP read the TSC, then ask APs to read TSC.  If TSC read from any APs
is less then the BSP's TSC, the invariant TSC is not synchronized
across CPUs.

Currently the test runs ~100ms.

(*)
AMD family 15h model 00h-0fh may also have synchronized TSC across
CPUs as pointed out by vsrinivas@, however, according to AMD:

  <Revision Guide for AMD Family 15h Models 00h-0Fh Processors
   Rev. 3.18 October 2012>
  759 One Core May Observe a Time Stamp Counter Skew

AMD family 15h model 00h-0fh is _not_ ready yet.

sys/platform/pc32/include/clock.h
sys/platform/pc32/isa/clock.c
sys/platform/pc64/include/clock.h
sys/platform/pc64/isa/clock.c
sys/platform/vkernel/include/clock.h
sys/platform/vkernel/platform/init.c
sys/platform/vkernel/platform/systimer.c
sys/platform/vkernel64/include/clock.h
sys/platform/vkernel64/platform/init.c
sys/platform/vkernel64/platform/systimer.c

index 7144a38..fda63d7 100644 (file)
@@ -35,6 +35,7 @@ extern u_int  timer_freq;
 extern int     timer0_max_count;
 extern int     tsc_present;
 extern int     tsc_invariant;
+extern int     tsc_mpsync;
 extern int64_t tsc_frequency;
 extern int     tsc_is_broken;
 extern int     wall_cmos_clock;
index 1aab043..c8346ae 100644 (file)
@@ -106,6 +106,7 @@ int adjkerntz;              /* local offset from GMT in seconds */
 int    disable_rtc_set;        /* disable resettodr() if != 0 */
 int    tsc_present;
 int    tsc_invariant;
+int    tsc_mpsync;
 int64_t        tsc_frequency;
 int    tsc_is_broken;
 int    wall_cmos_clock;        /* wall CMOS clock assumed if != 0 */
@@ -1166,6 +1167,89 @@ hw_i8254_timestamp(SYSCTL_HANDLER_ARGS)
     return(SYSCTL_OUT(req, buf, strlen(buf) + 1));
 }
 
+static uint64_t                tsc_mpsync_target;
+
+static void
+tsc_mpsync_test_remote(void *arg __unused)
+{
+       uint64_t tsc;
+
+       tsc = rdtsc();
+       if (tsc < tsc_mpsync_target)
+               tsc_mpsync = 0;
+}
+
+static void
+tsc_mpsync_test(void)
+{
+       struct globaldata *gd = mycpu;
+       uint64_t test_end, test_begin;
+       u_int i;
+
+       if (!tsc_invariant) {
+               /* Not even invariant TSC */
+               return;
+       }
+
+       if (ncpus == 1) {
+               /* Only one CPU */
+               tsc_mpsync = 1;
+               return;
+       }
+
+       if (cpu_vendor_id != CPU_VENDOR_INTEL) {
+               /* XXX only Intel works */
+               return;
+       }
+
+       kprintf("TSC testing MP synchronization ...\n");
+       tsc_mpsync = 1;
+
+       /* Run test for 100ms */
+       test_begin = rdtsc();
+       test_end = test_begin + (tsc_frequency / 10);
+
+#define TSC_TEST_TRYMAX                1000000 /* Make sure we could stop */
+
+       for (i = 0; i < TSC_TEST_TRYMAX; ++i) {
+               struct lwkt_cpusync cs;
+
+               crit_enter();
+               lwkt_cpusync_init(&cs, gd->gd_other_cpus,
+                   tsc_mpsync_test_remote, NULL);
+               lwkt_cpusync_interlock(&cs);
+               tsc_mpsync_target = rdtsc();
+               cpu_mfence();
+               lwkt_cpusync_deinterlock(&cs);
+               crit_exit();
+
+               if (!tsc_mpsync) {
+                       kprintf("TSC is not MP synchronized @%u\n", i);
+                       break;
+               }
+               if (tsc_mpsync_target > test_end)
+                       break;
+       }
+
+#undef TSC_TEST_TRYMAX
+
+       if (tsc_mpsync) {
+               if (tsc_mpsync_target == test_begin) {
+                       kprintf("TSC does not tick?!");
+                       /* XXX disable TSC? */
+                       tsc_invariant = 0;
+                       tsc_mpsync = 0;
+                       return;
+               }
+
+               kprintf("TSC is MP synchronized");
+               if (bootverbose)
+                       kprintf(", after %u tries", i);
+               kprintf("\n");
+       }
+}
+SYSINIT(tsc_mpsync, SI_BOOT2_FINISH_SMP, SI_ORDER_ANY, tsc_mpsync_test, NULL);
+
 SYSCTL_NODE(_hw, OID_AUTO, i8254, CTLFLAG_RW, 0, "I8254");
 SYSCTL_UINT(_hw_i8254, OID_AUTO, freq, CTLFLAG_RD, &i8254_cputimer.freq, 0,
            "frequency");
@@ -1176,5 +1260,7 @@ SYSCTL_INT(_hw, OID_AUTO, tsc_present, CTLFLAG_RD,
            &tsc_present, 0, "TSC Available");
 SYSCTL_INT(_hw, OID_AUTO, tsc_invariant, CTLFLAG_RD,
            &tsc_invariant, 0, "Invariant TSC");
+SYSCTL_INT(_hw, OID_AUTO, tsc_mpsync, CTLFLAG_RD,
+           &tsc_mpsync, 0, "TSC is synchronized across CPUs");
 SYSCTL_QUAD(_hw, OID_AUTO, tsc_frequency, CTLFLAG_RD,
            &tsc_frequency, 0, "TSC Frequency");
index 7144a38..fda63d7 100644 (file)
@@ -35,6 +35,7 @@ extern u_int  timer_freq;
 extern int     timer0_max_count;
 extern int     tsc_present;
 extern int     tsc_invariant;
+extern int     tsc_mpsync;
 extern int64_t tsc_frequency;
 extern int     tsc_is_broken;
 extern int     wall_cmos_clock;
index 933962d..836591a 100644 (file)
@@ -108,6 +108,7 @@ int adjkerntz;              /* local offset from GMT in seconds */
 int    disable_rtc_set;        /* disable resettodr() if != 0 */
 int    tsc_present;
 int    tsc_invariant;
+int    tsc_mpsync;
 int64_t        tsc_frequency;
 int    tsc_is_broken;
 int    wall_cmos_clock;        /* wall CMOS clock assumed if != 0 */
@@ -1173,6 +1174,89 @@ hw_i8254_timestamp(SYSCTL_HANDLER_ARGS)
     return(SYSCTL_OUT(req, buf, strlen(buf) + 1));
 }
 
+static uint64_t                tsc_mpsync_target;
+
+static void
+tsc_mpsync_test_remote(void *arg __unused)
+{
+       uint64_t tsc;
+
+       tsc = rdtsc();
+       if (tsc < tsc_mpsync_target)
+               tsc_mpsync = 0;
+}
+
+static void
+tsc_mpsync_test(void)
+{
+       struct globaldata *gd = mycpu;
+       uint64_t test_end, test_begin;
+       u_int i;
+
+       if (!tsc_invariant) {
+               /* Not even invariant TSC */
+               return;
+       }
+
+       if (ncpus == 1) {
+               /* Only one CPU */
+               tsc_mpsync = 1;
+               return;
+       }
+
+       if (cpu_vendor_id != CPU_VENDOR_INTEL) {
+               /* XXX only Intel works */
+               return;
+       }
+
+       kprintf("TSC testing MP synchronization ...\n");
+       tsc_mpsync = 1;
+
+       /* Run test for 100ms */
+       test_begin = rdtsc();
+       test_end = test_begin + (tsc_frequency / 10);
+
+#define TSC_TEST_TRYMAX                1000000 /* Make sure we could stop */
+
+       for (i = 0; i < TSC_TEST_TRYMAX; ++i) {
+               struct lwkt_cpusync cs;
+
+               crit_enter();
+               lwkt_cpusync_init(&cs, gd->gd_other_cpus,
+                   tsc_mpsync_test_remote, NULL);
+               lwkt_cpusync_interlock(&cs);
+               tsc_mpsync_target = rdtsc();
+               cpu_mfence();
+               lwkt_cpusync_deinterlock(&cs);
+               crit_exit();
+
+               if (!tsc_mpsync) {
+                       kprintf("TSC is not MP synchronized @%u\n", i);
+                       break;
+               }
+               if (tsc_mpsync_target > test_end)
+                       break;
+       }
+
+#undef TSC_TEST_TRYMAX
+
+       if (tsc_mpsync) {
+               if (tsc_mpsync_target == test_begin) {
+                       kprintf("TSC does not tick?!");
+                       /* XXX disable TSC? */
+                       tsc_invariant = 0;
+                       tsc_mpsync = 0;
+                       return;
+               }
+
+               kprintf("TSC is MP synchronized");
+               if (bootverbose)
+                       kprintf(", after %u tries", i);
+               kprintf("\n");
+       }
+}
+SYSINIT(tsc_mpsync, SI_BOOT2_FINISH_SMP, SI_ORDER_ANY, tsc_mpsync_test, NULL);
+
 SYSCTL_NODE(_hw, OID_AUTO, i8254, CTLFLAG_RW, 0, "I8254");
 SYSCTL_UINT(_hw_i8254, OID_AUTO, freq, CTLFLAG_RD, &i8254_cputimer.freq, 0,
            "frequency");
@@ -1183,5 +1267,7 @@ SYSCTL_INT(_hw, OID_AUTO, tsc_present, CTLFLAG_RD,
            &tsc_present, 0, "TSC Available");
 SYSCTL_INT(_hw, OID_AUTO, tsc_invariant, CTLFLAG_RD,
            &tsc_invariant, 0, "Invariant TSC");
+SYSCTL_INT(_hw, OID_AUTO, tsc_mpsync, CTLFLAG_RD,
+           &tsc_mpsync, 0, "TSC is synchronized across CPUs");
 SYSCTL_QUAD(_hw, OID_AUTO, tsc_frequency, CTLFLAG_RD,
            &tsc_frequency, 0, "TSC Frequency");
index 56f85c9..5c84a59 100644 (file)
@@ -25,6 +25,7 @@ extern u_int  timer_freq;
 extern int     timer0_max_count;
 extern int     tsc_present;
 extern int     tsc_invariant;
+extern int     tsc_mpsync;
 extern int64_t tsc_frequency;
 extern int     tsc_is_broken;
 extern int     wall_cmos_clock;
index 4a59a3d..764a639 100644 (file)
@@ -107,6 +107,7 @@ vpte_t      *KernelPTA;     /* Warning: Offset for direct VA translation */
 u_int cpu_feature;     /* XXX */
 int tsc_present;
 int tsc_invariant;
+int tsc_mpsync;
 int64_t tsc_frequency;
 int optcpus;           /* number of cpus - see mp_start() */
 int lwp_cpu_lock;      /* if/how to lock virtual CPUs to real CPUs */
@@ -371,6 +372,8 @@ main(int ac, char **av)
        sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
        vsize = sizeof(tsc_invariant);
        sysctlbyname("hw.tsc_invariant", &tsc_invariant, &vsize, NULL, 0);
+       vsize = sizeof(tsc_mpsync);
+       sysctlbyname("hw.tsc_mpsync", &tsc_mpsync, &vsize, NULL, 0);
        vsize = sizeof(tsc_frequency);
        sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
        if (tsc_present)
index 3e81fb0..ec5a2a2 100644 (file)
@@ -62,6 +62,8 @@ SYSCTL_INT(_hw, OID_AUTO, tsc_present, CTLFLAG_RD,
             &tsc_present, 0, "TSC Available");
 SYSCTL_INT(_hw, OID_AUTO, tsc_invariant, CTLFLAG_RD,
             &tsc_invariant, 0, "Invariant TSC");
+SYSCTL_INT(_hw, OID_AUTO, tsc_mpsync, CTLFLAG_RD,
+            &tsc_mpsync, 0, "TSC is synchronized across CPUs");
 SYSCTL_QUAD(_hw, OID_AUTO, tsc_frequency, CTLFLAG_RD,
            &tsc_frequency, 0, "TSC Frequency");
 
index 56f85c9..5c84a59 100644 (file)
@@ -25,6 +25,7 @@ extern u_int  timer_freq;
 extern int     timer0_max_count;
 extern int     tsc_present;
 extern int     tsc_invariant;
+extern int     tsc_mpsync;
 extern int64_t tsc_frequency;
 extern int     tsc_is_broken;
 extern int     wall_cmos_clock;
index 95ecab9..a1152a9 100644 (file)
@@ -109,6 +109,7 @@ void *dmap_min_address;
 u_int cpu_feature;     /* XXX */
 int tsc_present;
 int tsc_invariant;
+int tsc_mpsync;
 int64_t tsc_frequency;
 int optcpus;           /* number of cpus - see mp_start() */
 int lwp_cpu_lock;      /* if/how to lock virtual CPUs to real CPUs */
@@ -370,6 +371,8 @@ main(int ac, char **av)
        sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
        vsize = sizeof(tsc_invariant);
        sysctlbyname("hw.tsc_invariant", &tsc_invariant, &vsize, NULL, 0);
+       vsize = sizeof(tsc_mpsync);
+       sysctlbyname("hw.tsc_mpsync", &tsc_mpsync, &vsize, NULL, 0);
        vsize = sizeof(tsc_frequency);
        sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
        if (tsc_present)
index 49b57b5..8713dda 100644 (file)
@@ -63,6 +63,8 @@ SYSCTL_INT(_hw, OID_AUTO, tsc_present, CTLFLAG_RD,
             &tsc_present, 0, "TSC Available");
 SYSCTL_INT(_hw, OID_AUTO, tsc_invariant, CTLFLAG_RD,
             &tsc_invariant, 0, "Invariant TSC");
+SYSCTL_INT(_hw, OID_AUTO, tsc_mpsync, CTLFLAG_RD,
+            &tsc_mpsync, 0, "TSC is synchronized across CPUs");
 SYSCTL_QUAD(_hw, OID_AUTO, tsc_frequency, CTLFLAG_RD,
            &tsc_frequency, 0, "TSC Frequency");