|
@@ -789,6 +789,11 @@ void intel_pmu_pebs_disable(struct perf_event *event)
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
struct debug_store *ds = cpuc->ds;
|
|
struct debug_store *ds = cpuc->ds;
|
|
|
|
+ bool large_pebs = ds->pebs_interrupt_threshold >
|
|
|
|
+ ds->pebs_buffer_base + x86_pmu.pebs_record_size;
|
|
|
|
+
|
|
|
|
+ if (large_pebs)
|
|
|
|
+ intel_pmu_drain_pebs_buffer();
|
|
|
|
|
|
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
|
|
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
|
|
|
|
|
|
@@ -797,12 +802,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
|
|
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
|
|
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
|
|
cpuc->pebs_enabled &= ~(1ULL << 63);
|
|
cpuc->pebs_enabled &= ~(1ULL << 63);
|
|
|
|
|
|
- if (ds->pebs_interrupt_threshold >
|
|
|
|
- ds->pebs_buffer_base + x86_pmu.pebs_record_size) {
|
|
|
|
- intel_pmu_drain_pebs_buffer();
|
|
|
|
- if (!pebs_is_enabled(cpuc))
|
|
|
|
- perf_sched_cb_dec(event->ctx->pmu);
|
|
|
|
- }
|
|
|
|
|
|
+ if (large_pebs && !pebs_is_enabled(cpuc))
|
|
|
|
+ perf_sched_cb_dec(event->ctx->pmu);
|
|
|
|
|
|
if (cpuc->enabled)
|
|
if (cpuc->enabled)
|
|
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
|
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|