|
@@ -103,23 +103,6 @@ void cyc2ns_read_end(void)
|
|
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
|
|
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
|
|
*/
|
|
*/
|
|
|
|
|
|
-static void cyc2ns_data_init(struct cyc2ns_data *data)
|
|
|
|
-{
|
|
|
|
- data->cyc2ns_mul = 0;
|
|
|
|
- data->cyc2ns_shift = 0;
|
|
|
|
- data->cyc2ns_offset = 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void __init cyc2ns_init(int cpu)
|
|
|
|
-{
|
|
|
|
- struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
|
|
|
|
-
|
|
|
|
- cyc2ns_data_init(&c2n->data[0]);
|
|
|
|
- cyc2ns_data_init(&c2n->data[1]);
|
|
|
|
-
|
|
|
|
- seqcount_init(&c2n->seq);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
|
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
|
{
|
|
{
|
|
struct cyc2ns_data data;
|
|
struct cyc2ns_data data;
|
|
@@ -135,18 +118,11 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
|
return ns;
|
|
return ns;
|
|
}
|
|
}
|
|
|
|
|
|
-static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
|
|
|
|
|
|
+static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
|
|
{
|
|
{
|
|
unsigned long long ns_now;
|
|
unsigned long long ns_now;
|
|
struct cyc2ns_data data;
|
|
struct cyc2ns_data data;
|
|
struct cyc2ns *c2n;
|
|
struct cyc2ns *c2n;
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- sched_clock_idle_sleep_event();
|
|
|
|
-
|
|
|
|
- if (!khz)
|
|
|
|
- goto done;
|
|
|
|
|
|
|
|
ns_now = cycles_2_ns(tsc_now);
|
|
ns_now = cycles_2_ns(tsc_now);
|
|
|
|
|
|
@@ -178,12 +154,55 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_
|
|
c2n->data[0] = data;
|
|
c2n->data[0] = data;
|
|
raw_write_seqcount_latch(&c2n->seq);
|
|
raw_write_seqcount_latch(&c2n->seq);
|
|
c2n->data[1] = data;
|
|
c2n->data[1] = data;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ local_irq_save(flags);
|
|
|
|
+ sched_clock_idle_sleep_event();
|
|
|
|
+
|
|
|
|
+ if (khz)
|
|
|
|
+ __set_cyc2ns_scale(khz, cpu, tsc_now);
|
|
|
|
|
|
-done:
|
|
|
|
sched_clock_idle_wakeup_event();
|
|
sched_clock_idle_wakeup_event();
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Initialize cyc2ns for boot cpu
|
|
|
|
+ */
|
|
|
|
+static void __init cyc2ns_init_boot_cpu(void)
|
|
|
|
+{
|
|
|
|
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
|
|
|
|
+
|
|
|
|
+ seqcount_init(&c2n->seq);
|
|
|
|
+ __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Secondary CPUs do not run through cyc2ns_init(), so set up
|
|
|
|
+ * all the scale factors for all CPUs, assuming the same
|
|
|
|
+ * speed as the bootup CPU. (cpufreq notifiers will fix this
|
|
|
|
+ * up if their speed diverges)
|
|
|
|
+ */
|
|
|
|
+static void __init cyc2ns_init_secondary_cpus(void)
|
|
|
|
+{
|
|
|
|
+ unsigned int cpu, this_cpu = smp_processor_id();
|
|
|
|
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
|
|
|
|
+ struct cyc2ns_data *data = c2n->data;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ if (cpu != this_cpu) {
|
|
|
|
+ seqcount_init(&c2n->seq);
|
|
|
|
+ c2n = per_cpu_ptr(&cyc2ns, cpu);
|
|
|
|
+ c2n->data[0] = data[0];
|
|
|
|
+ c2n->data[1] = data[1];
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Scheduler clock - returns current time in nanosec units.
|
|
* Scheduler clock - returns current time in nanosec units.
|
|
*/
|
|
*/
|
|
@@ -1385,6 +1404,10 @@ void __init tsc_early_init(void)
|
|
if (!determine_cpu_tsc_frequencies())
|
|
if (!determine_cpu_tsc_frequencies())
|
|
return;
|
|
return;
|
|
loops_per_jiffy = get_loops_per_jiffy();
|
|
loops_per_jiffy = get_loops_per_jiffy();
|
|
|
|
+
|
|
|
|
+ /* Sanitize TSC ADJUST before cyc2ns gets initialized */
|
|
|
|
+ tsc_store_and_check_tsc_adjust(true);
|
|
|
|
+ cyc2ns_init_boot_cpu();
|
|
}
|
|
}
|
|
|
|
|
|
void __init tsc_init(void)
|
|
void __init tsc_init(void)
|
|
@@ -1401,23 +1424,12 @@ void __init tsc_init(void)
|
|
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
|
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
+ /* Sanitize TSC ADJUST before cyc2ns gets initialized */
|
|
|
|
+ tsc_store_and_check_tsc_adjust(true);
|
|
|
|
+ cyc2ns_init_boot_cpu();
|
|
}
|
|
}
|
|
|
|
|
|
- /* Sanitize TSC ADJUST before cyc2ns gets initialized */
|
|
|
|
- tsc_store_and_check_tsc_adjust(true);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Secondary CPUs do not run through tsc_init(), so set up
|
|
|
|
- * all the scale factors for all CPUs, assuming the same
|
|
|
|
- * speed as the bootup CPU. (cpufreq notifiers will fix this
|
|
|
|
- * up if their speed diverges)
|
|
|
|
- */
|
|
|
|
- cyc = rdtsc();
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
|
- cyc2ns_init(cpu);
|
|
|
|
- set_cyc2ns_scale(tsc_khz, cpu, cyc);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
|
|
+ cyc2ns_init_secondary_cpus();
|
|
static_branch_enable(&__use_tsc);
|
|
static_branch_enable(&__use_tsc);
|
|
|
|
|
|
if (!no_sched_irq_time)
|
|
if (!no_sched_irq_time)
|