|
@@ -43,6 +43,11 @@ static DEFINE_STATIC_KEY_FALSE(__use_tsc);
|
|
|
|
|
|
int tsc_clocksource_reliable;
|
|
int tsc_clocksource_reliable;
|
|
|
|
|
|
|
|
+static u32 art_to_tsc_numerator;
|
|
|
|
+static u32 art_to_tsc_denominator;
|
|
|
|
+static u64 art_to_tsc_offset;
|
|
|
|
+struct clocksource *art_related_clocksource;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Use a ring-buffer like data structure, where a writer advances the head by
|
|
* Use a ring-buffer like data structure, where a writer advances the head by
|
|
* writing a new data entry and a reader advances the tail when it observes a
|
|
* writing a new data entry and a reader advances the tail when it observes a
|
|
@@ -964,6 +969,37 @@ core_initcall(cpufreq_tsc);
|
|
|
|
|
|
#endif /* CONFIG_CPU_FREQ */
|
|
#endif /* CONFIG_CPU_FREQ */
|
|
|
|
|
|
|
|
+#define ART_CPUID_LEAF (0x15)
|
|
|
|
+#define ART_MIN_DENOMINATOR (1)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * If ART is present detect the numerator:denominator to convert to TSC
|
|
|
|
+ */
|
|
|
|
+static void detect_art(void)
|
|
|
|
+{
|
|
|
|
+ unsigned int unused[2];
|
|
|
|
+
|
|
|
|
+ if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
|
|
|
|
+ &art_to_tsc_numerator, unused, unused+1);
|
|
|
|
+
|
|
|
|
+ /* Don't enable ART in a VM, non-stop TSC required */
|
|
|
|
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
|
|
|
|
+ !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
|
|
|
|
+ art_to_tsc_denominator < ART_MIN_DENOMINATOR)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* Make this sticky over multiple CPU init calls */
|
|
|
|
+ setup_force_cpu_cap(X86_FEATURE_ART);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/* clocksource code */
|
|
/* clocksource code */
|
|
|
|
|
|
static struct clocksource clocksource_tsc;
|
|
static struct clocksource clocksource_tsc;
|
|
@@ -1071,6 +1107,25 @@ int unsynchronized_tsc(void)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Convert ART to TSC given numerator/denominator found in detect_art()
|
|
|
|
+ */
|
|
|
|
+struct system_counterval_t convert_art_to_tsc(cycle_t art)
|
|
|
|
+{
|
|
|
|
+ u64 tmp, res, rem;
|
|
|
|
+
|
|
|
|
+ rem = do_div(art, art_to_tsc_denominator);
|
|
|
|
+
|
|
|
|
+ res = art * art_to_tsc_numerator;
|
|
|
|
+ tmp = rem * art_to_tsc_numerator;
|
|
|
|
+
|
|
|
|
+ do_div(tmp, art_to_tsc_denominator);
|
|
|
|
+ res += tmp + art_to_tsc_offset;
|
|
|
|
+
|
|
|
|
+ return (struct system_counterval_t) {.cs = art_related_clocksource,
|
|
|
|
+ .cycles = res};
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(convert_art_to_tsc);
|
|
|
|
|
|
static void tsc_refine_calibration_work(struct work_struct *work);
|
|
static void tsc_refine_calibration_work(struct work_struct *work);
|
|
static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
|
|
static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
|
|
@@ -1142,6 +1197,8 @@ static void tsc_refine_calibration_work(struct work_struct *work)
|
|
(unsigned long)tsc_khz % 1000);
|
|
(unsigned long)tsc_khz % 1000);
|
|
|
|
|
|
out:
|
|
out:
|
|
|
|
+ if (boot_cpu_has(X86_FEATURE_ART))
|
|
|
|
+ art_related_clocksource = &clocksource_tsc;
|
|
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
|
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1235,6 +1292,8 @@ void __init tsc_init(void)
|
|
mark_tsc_unstable("TSCs unsynchronized");
|
|
mark_tsc_unstable("TSCs unsynchronized");
|
|
|
|
|
|
check_system_tsc_reliable();
|
|
check_system_tsc_reliable();
|
|
|
|
+
|
|
|
|
+ detect_art();
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|