|
@@ -31,6 +31,7 @@
|
|
|
#include <asm/nmi.h>
|
|
|
#include <asm/smp.h>
|
|
|
#include <asm/alternative.h>
|
|
|
+#include <asm/mmu_context.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/timer.h>
|
|
|
#include <asm/desc.h>
|
|
@@ -1328,8 +1329,6 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
break;
|
|
|
|
|
|
case CPU_STARTING:
|
|
|
- if (x86_pmu.attr_rdpmc)
|
|
|
- cr4_set_bits(X86_CR4_PCE);
|
|
|
if (x86_pmu.cpu_starting)
|
|
|
x86_pmu.cpu_starting(cpu);
|
|
|
break;
|
|
@@ -1805,14 +1804,44 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|
|
event->destroy(event);
|
|
|
}
|
|
|
|
|
|
+ if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
|
|
|
+ event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static void refresh_pce(void *ignored)
|
|
|
+{
|
|
|
+ if (current->mm)
|
|
|
+ load_mm_cr4(current->mm);
|
|
|
+}
|
|
|
+
|
|
|
+static void x86_pmu_event_mapped(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
|
|
+ on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
|
|
+}
|
|
|
+
|
|
|
+static void x86_pmu_event_unmapped(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (!current->mm)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed))
|
|
|
+ on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
|
|
+}
|
|
|
+
|
|
|
static int x86_pmu_event_idx(struct perf_event *event)
|
|
|
{
|
|
|
int idx = event->hw.idx;
|
|
|
|
|
|
- if (!x86_pmu.attr_rdpmc)
|
|
|
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
|
|
return 0;
|
|
|
|
|
|
if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
|
|
@@ -1830,16 +1859,6 @@ static ssize_t get_attr_rdpmc(struct device *cdev,
|
|
|
return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
|
|
|
}
|
|
|
|
|
|
-static void change_rdpmc(void *info)
|
|
|
-{
|
|
|
- bool enable = !!(unsigned long)info;
|
|
|
-
|
|
|
- if (enable)
|
|
|
- cr4_set_bits(X86_CR4_PCE);
|
|
|
- else
|
|
|
- cr4_clear_bits(X86_CR4_PCE);
|
|
|
-}
|
|
|
-
|
|
|
static ssize_t set_attr_rdpmc(struct device *cdev,
|
|
|
struct device_attribute *attr,
|
|
|
const char *buf, size_t count)
|
|
@@ -1854,11 +1873,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
|
|
|
if (x86_pmu.attr_rdpmc_broken)
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- if (!!val != !!x86_pmu.attr_rdpmc) {
|
|
|
- x86_pmu.attr_rdpmc = !!val;
|
|
|
- on_each_cpu(change_rdpmc, (void *)val, 1);
|
|
|
- }
|
|
|
-
|
|
|
+ x86_pmu.attr_rdpmc = !!val;
|
|
|
return count;
|
|
|
}
|
|
|
|
|
@@ -1901,6 +1916,9 @@ static struct pmu pmu = {
|
|
|
|
|
|
.event_init = x86_pmu_event_init,
|
|
|
|
|
|
+ .event_mapped = x86_pmu_event_mapped,
|
|
|
+ .event_unmapped = x86_pmu_event_unmapped,
|
|
|
+
|
|
|
.add = x86_pmu_add,
|
|
|
.del = x86_pmu_del,
|
|
|
.start = x86_pmu_start,
|
|
@@ -1922,7 +1940,8 @@ void arch_perf_update_userpage(struct perf_event *event,
|
|
|
|
|
|
userpg->cap_user_time = 0;
|
|
|
userpg->cap_user_time_zero = 0;
|
|
|
- userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
|
|
|
+ userpg->cap_user_rdpmc =
|
|
|
+ !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
|
|
|
userpg->pmc_width = x86_pmu.cntval_bits;
|
|
|
|
|
|
if (!sched_clock_stable())
|