|
@@ -38,11 +38,6 @@ EXPORT_SYMBOL(tsc_khz);
|
|
|
*/
|
|
|
static int __read_mostly tsc_unstable;
|
|
|
|
|
|
-/* native_sched_clock() is called before tsc_init(), so
|
|
|
- we must start with the TSC soft disabled to prevent
|
|
|
- erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
|
|
|
-static int __read_mostly tsc_disabled = -1;
|
|
|
-
|
|
|
static DEFINE_STATIC_KEY_FALSE(__use_tsc);
|
|
|
|
|
|
int tsc_clocksource_reliable;
|
|
@@ -248,8 +243,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
|
|
|
#ifdef CONFIG_X86_TSC
|
|
|
int __init notsc_setup(char *str)
|
|
|
{
|
|
|
- pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
|
|
|
- tsc_disabled = 1;
|
|
|
+ mark_tsc_unstable("boot parameter notsc");
|
|
|
return 1;
|
|
|
}
|
|
|
#else
|
|
@@ -1307,7 +1301,7 @@ unreg:
|
|
|
|
|
|
static int __init init_tsc_clocksource(void)
|
|
|
{
|
|
|
- if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
|
|
|
return 0;
|
|
|
|
|
|
if (tsc_unstable)
|
|
@@ -1414,12 +1408,6 @@ void __init tsc_init(void)
|
|
|
set_cyc2ns_scale(tsc_khz, cpu, cyc);
|
|
|
}
|
|
|
|
|
|
- if (tsc_disabled > 0)
|
|
|
- return;
|
|
|
-
|
|
|
- /* now allow native_sched_clock() to use rdtsc */
|
|
|
-
|
|
|
- tsc_disabled = 0;
|
|
|
static_branch_enable(&__use_tsc);
|
|
|
|
|
|
if (!no_sched_irq_time)
|
|
@@ -1455,7 +1443,7 @@ unsigned long calibrate_delay_is_known(void)
|
|
|
int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
|
|
|
const struct cpumask *mask = topology_core_cpumask(cpu);
|
|
|
|
|
|
- if (tsc_disabled || !constant_tsc || !mask)
|
|
|
+ if (!constant_tsc || !mask)
|
|
|
return 0;
|
|
|
|
|
|
sibling = cpumask_any_but(mask, cpu);
|