|
@@ -11,6 +11,7 @@
|
|
#include <linux/clocksource.h>
|
|
#include <linux/clocksource.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/timex.h>
|
|
#include <linux/timex.h>
|
|
|
|
+#include <linux/static_key.h>
|
|
|
|
|
|
#include <asm/hpet.h>
|
|
#include <asm/hpet.h>
|
|
#include <asm/timer.h>
|
|
#include <asm/timer.h>
|
|
@@ -37,6 +38,8 @@ static int __read_mostly tsc_unstable;
|
|
erroneous rdtsc usage on !cpu_has_tsc processors */
|
|
erroneous rdtsc usage on !cpu_has_tsc processors */
|
|
static int __read_mostly tsc_disabled = -1;
|
|
static int __read_mostly tsc_disabled = -1;
|
|
|
|
|
|
|
|
+static struct static_key __use_tsc = STATIC_KEY_INIT;
|
|
|
|
+
|
|
int tsc_clocksource_reliable;
|
|
int tsc_clocksource_reliable;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -282,7 +285,7 @@ u64 native_sched_clock(void)
|
|
* very important for it to be as fast as the platform
|
|
* very important for it to be as fast as the platform
|
|
* can achieve it. )
|
|
* can achieve it. )
|
|
*/
|
|
*/
|
|
- if (unlikely(tsc_disabled)) {
|
|
|
|
|
|
+ if (!static_key_false(&__use_tsc)) {
|
|
/* No locking but a rare wrong value is not a big deal: */
|
|
/* No locking but a rare wrong value is not a big deal: */
|
|
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
|
|
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
|
|
}
|
|
}
|
|
@@ -1193,7 +1196,9 @@ void __init tsc_init(void)
|
|
return;
|
|
return;
|
|
|
|
|
|
/* now allow native_sched_clock() to use rdtsc */
|
|
/* now allow native_sched_clock() to use rdtsc */
|
|
|
|
+
|
|
tsc_disabled = 0;
|
|
tsc_disabled = 0;
|
|
|
|
+ static_key_slow_inc(&__use_tsc);
|
|
|
|
|
|
if (!no_sched_irq_time)
|
|
if (!no_sched_irq_time)
|
|
enable_sched_clock_irqtime();
|
|
enable_sched_clock_irqtime();
|