|
@@ -38,7 +38,7 @@ static int __read_mostly tsc_unstable;
|
|
|
erroneous rdtsc usage on !cpu_has_tsc processors */
|
|
|
static int __read_mostly tsc_disabled = -1;
|
|
|
|
|
|
-static struct static_key __use_tsc = STATIC_KEY_INIT;
|
|
|
+static DEFINE_STATIC_KEY_FALSE(__use_tsc);
|
|
|
|
|
|
int tsc_clocksource_reliable;
|
|
|
|
|
@@ -274,7 +274,12 @@ done:
|
|
|
*/
|
|
|
u64 native_sched_clock(void)
|
|
|
{
|
|
|
- u64 tsc_now;
|
|
|
+ if (static_branch_likely(&__use_tsc)) {
|
|
|
+ u64 tsc_now = rdtsc();
|
|
|
+
|
|
|
+ /* return the value in ns */
|
|
|
+ return cycles_2_ns(tsc_now);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Fall back to jiffies if there's no TSC available:
|
|
@@ -284,16 +289,9 @@ u64 native_sched_clock(void)
|
|
|
* very important for it to be as fast as the platform
|
|
|
* can achieve it. )
|
|
|
*/
|
|
|
- if (!static_key_false(&__use_tsc)) {
|
|
|
- /* No locking but a rare wrong value is not a big deal: */
|
|
|
- return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
|
|
|
- }
|
|
|
-
|
|
|
- /* read the Time Stamp Counter: */
|
|
|
- tsc_now = rdtsc();
|
|
|
|
|
|
- /* return the value in ns */
|
|
|
- return cycles_2_ns(tsc_now);
|
|
|
+ /* No locking but a rare wrong value is not a big deal: */
|
|
|
+ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
|
|
|
}
|
|
|
|
|
|
/* We need to define a real function for sched_clock, to override the
|
|
@@ -1204,7 +1202,7 @@ void __init tsc_init(void)
|
|
|
/* now allow native_sched_clock() to use rdtsc */
|
|
|
|
|
|
tsc_disabled = 0;
|
|
|
- static_key_slow_inc(&__use_tsc);
|
|
|
+ static_branch_enable(&__use_tsc);
|
|
|
|
|
|
if (!no_sched_irq_time)
|
|
|
enable_sched_clock_irqtime();
|