|
@@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event)
|
|
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
|
|
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
|
|
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
|
|
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
|
|
|
|
|
|
|
|
+ if (unlikely(event->attr.precise_ip))
|
|
|
|
+ intel_pmu_pebs_disable(event);
|
|
|
|
+
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
intel_pmu_disable_fixed(hwc);
|
|
intel_pmu_disable_fixed(hwc);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
x86_pmu_disable_event(event);
|
|
x86_pmu_disable_event(event);
|
|
-
|
|
|
|
- if (unlikely(event->attr.precise_ip))
|
|
|
|
- intel_pmu_pebs_disable(event);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void intel_pmu_del_event(struct perf_event *event)
|
|
static void intel_pmu_del_event(struct perf_event *event)
|
|
@@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event)
|
|
x86_perf_event_update(event);
|
|
x86_perf_event_update(event);
|
|
}
|
|
}
|
|
|
|
|
|
-static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
|
|
|
|
|
+static void intel_pmu_enable_fixed(struct perf_event *event)
|
|
{
|
|
{
|
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
|
|
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
|
|
- u64 ctrl_val, bits, mask;
|
|
|
|
|
|
+ u64 ctrl_val, mask, bits = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Enable IRQ generation (0x8),
|
|
|
|
|
|
+ * Enable IRQ generation (0x8), if not PEBS,
|
|
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
|
|
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
|
|
* if requested:
|
|
* if requested:
|
|
*/
|
|
*/
|
|
- bits = 0x8ULL;
|
|
|
|
|
|
+ if (!event->attr.precise_ip)
|
|
|
|
+ bits |= 0x8;
|
|
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
|
|
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
|
|
bits |= 0x2;
|
|
bits |= 0x2;
|
|
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
|
|
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
|
|
@@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|
if (unlikely(event_is_checkpointed(event)))
|
|
if (unlikely(event_is_checkpointed(event)))
|
|
cpuc->intel_cp_status |= (1ull << hwc->idx);
|
|
cpuc->intel_cp_status |= (1ull << hwc->idx);
|
|
|
|
|
|
|
|
+ if (unlikely(event->attr.precise_ip))
|
|
|
|
+ intel_pmu_pebs_enable(event);
|
|
|
|
+
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
- intel_pmu_enable_fixed(hwc);
|
|
|
|
|
|
+ intel_pmu_enable_fixed(event);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- if (unlikely(event->attr.precise_ip))
|
|
|
|
- intel_pmu_pebs_enable(event);
|
|
|
|
-
|
|
|
|
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
|
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
|
}
|
|
}
|
|
|
|
|