|
@@ -239,7 +239,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
|
return ns;
|
|
return ns;
|
|
}
|
|
}
|
|
|
|
|
|
-static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
|
|
|
|
|
+static void set_cyc2ns_scale(unsigned long khz, int cpu)
|
|
{
|
|
{
|
|
unsigned long long tsc_now, ns_now;
|
|
unsigned long long tsc_now, ns_now;
|
|
struct cyc2ns_data *data;
|
|
struct cyc2ns_data *data;
|
|
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
sched_clock_idle_sleep_event();
|
|
sched_clock_idle_sleep_event();
|
|
|
|
|
|
- if (!cpu_khz)
|
|
|
|
|
|
+ if (!khz)
|
|
goto done;
|
|
goto done;
|
|
|
|
|
|
data = cyc2ns_write_begin(cpu);
|
|
data = cyc2ns_write_begin(cpu);
|
|
@@ -261,7 +261,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
|
* time function is continuous; see the comment near struct
|
|
* time function is continuous; see the comment near struct
|
|
* cyc2ns_data.
|
|
* cyc2ns_data.
|
|
*/
|
|
*/
|
|
- clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
|
|
|
|
|
|
+ clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
|
|
NSEC_PER_MSEC, 0);
|
|
NSEC_PER_MSEC, 0);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -665,15 +665,72 @@ success:
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * native_calibrate_tsc - calibrate the tsc on boot
|
|
|
|
|
|
+ * native_calibrate_tsc
|
|
|
|
+ * Determine TSC frequency via CPUID, else return 0.
|
|
*/
|
|
*/
|
|
unsigned long native_calibrate_tsc(void)
|
|
unsigned long native_calibrate_tsc(void)
|
|
|
|
+{
|
|
|
|
+ unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
|
|
|
|
+ unsigned int crystal_khz;
|
|
|
|
+
|
|
|
|
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (boot_cpu_data.cpuid_level < 0x15)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ eax_denominator = ebx_numerator = ecx_hz = edx = 0;
|
|
|
|
+
|
|
|
|
+ /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
|
|
|
|
+ cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
|
|
|
|
+
|
|
|
|
+ if (ebx_numerator == 0 || eax_denominator == 0)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ crystal_khz = ecx_hz / 1000;
|
|
|
|
+
|
|
|
|
+ if (crystal_khz == 0) {
|
|
|
|
+ switch (boot_cpu_data.x86_model) {
|
|
|
|
+ case 0x4E: /* SKL */
|
|
|
|
+ case 0x5E: /* SKL */
|
|
|
|
+ crystal_khz = 24000; /* 24 MHz */
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return crystal_khz * ebx_numerator / eax_denominator;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static unsigned long cpu_khz_from_cpuid(void)
|
|
|
|
+{
|
|
|
|
+ unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
|
|
|
|
+
|
|
|
|
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (boot_cpu_data.cpuid_level < 0x16)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
|
|
|
|
+
|
|
|
|
+ cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
|
|
|
|
+
|
|
|
|
+ return eax_base_mhz * 1000;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * native_calibrate_cpu - calibrate the cpu on boot
|
|
|
|
+ */
|
|
|
|
+unsigned long native_calibrate_cpu(void)
|
|
{
|
|
{
|
|
u64 tsc1, tsc2, delta, ref1, ref2;
|
|
u64 tsc1, tsc2, delta, ref1, ref2;
|
|
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
|
|
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
|
|
unsigned long flags, latch, ms, fast_calibrate;
|
|
unsigned long flags, latch, ms, fast_calibrate;
|
|
int hpet = is_hpet_enabled(), i, loopmin;
|
|
int hpet = is_hpet_enabled(), i, loopmin;
|
|
|
|
|
|
|
|
+ fast_calibrate = cpu_khz_from_cpuid();
|
|
|
|
+ if (fast_calibrate)
|
|
|
|
+ return fast_calibrate;
|
|
|
|
+
|
|
fast_calibrate = cpu_khz_from_msr();
|
|
fast_calibrate = cpu_khz_from_msr();
|
|
if (fast_calibrate)
|
|
if (fast_calibrate)
|
|
return fast_calibrate;
|
|
return fast_calibrate;
|
|
@@ -834,8 +891,10 @@ int recalibrate_cpu_khz(void)
|
|
if (!boot_cpu_has(X86_FEATURE_TSC))
|
|
if (!boot_cpu_has(X86_FEATURE_TSC))
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
+ cpu_khz = x86_platform.calibrate_cpu();
|
|
tsc_khz = x86_platform.calibrate_tsc();
|
|
tsc_khz = x86_platform.calibrate_tsc();
|
|
- cpu_khz = tsc_khz;
|
|
|
|
|
|
+ if (tsc_khz == 0)
|
|
|
|
+ tsc_khz = cpu_khz;
|
|
cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
|
|
cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
|
|
cpu_khz_old, cpu_khz);
|
|
cpu_khz_old, cpu_khz);
|
|
|
|
|
|
@@ -1241,8 +1300,10 @@ void __init tsc_init(void)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ cpu_khz = x86_platform.calibrate_cpu();
|
|
tsc_khz = x86_platform.calibrate_tsc();
|
|
tsc_khz = x86_platform.calibrate_tsc();
|
|
- cpu_khz = tsc_khz;
|
|
|
|
|
|
+ if (tsc_khz == 0)
|
|
|
|
+ tsc_khz = cpu_khz;
|
|
|
|
|
|
if (!tsc_khz) {
|
|
if (!tsc_khz) {
|
|
mark_tsc_unstable("could not calculate TSC khz");
|
|
mark_tsc_unstable("could not calculate TSC khz");
|
|
@@ -1262,7 +1323,7 @@ void __init tsc_init(void)
|
|
*/
|
|
*/
|
|
for_each_possible_cpu(cpu) {
|
|
for_each_possible_cpu(cpu) {
|
|
cyc2ns_init(cpu);
|
|
cyc2ns_init(cpu);
|
|
- set_cyc2ns_scale(cpu_khz, cpu);
|
|
|
|
|
|
+ set_cyc2ns_scale(tsc_khz, cpu);
|
|
}
|
|
}
|
|
|
|
|
|
if (tsc_disabled > 0)
|
|
if (tsc_disabled > 0)
|