|
@@ -1907,13 +1907,6 @@ static void intel_pmu_disable_event(struct perf_event *event)
|
|
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
|
|
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
|
|
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
|
|
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
|
|
|
|
|
|
- /*
|
|
|
|
- * must disable before any actual event
|
|
|
|
- * because any event may be combined with LBR
|
|
|
|
- */
|
|
|
|
- if (needs_branch_stack(event))
|
|
|
|
- intel_pmu_lbr_disable(event);
|
|
|
|
-
|
|
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
intel_pmu_disable_fixed(hwc);
|
|
intel_pmu_disable_fixed(hwc);
|
|
return;
|
|
return;
|
|
@@ -1925,6 +1918,14 @@ static void intel_pmu_disable_event(struct perf_event *event)
|
|
intel_pmu_pebs_disable(event);
|
|
intel_pmu_pebs_disable(event);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void intel_pmu_del_event(struct perf_event *event)
|
|
|
|
+{
|
|
|
|
+ if (needs_branch_stack(event))
|
|
|
|
+ intel_pmu_lbr_del(event);
|
|
|
|
+ if (event->attr.precise_ip)
|
|
|
|
+ intel_pmu_pebs_del(event);
|
|
|
|
+}
|
|
|
|
+
|
|
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
|
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
|
{
|
|
{
|
|
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
|
|
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
|
|
@@ -1968,12 +1969,6 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|
intel_pmu_enable_bts(hwc->config);
|
|
intel_pmu_enable_bts(hwc->config);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
- /*
|
|
|
|
- * must enabled before any actual event
|
|
|
|
- * because any event may be combined with LBR
|
|
|
|
- */
|
|
|
|
- if (needs_branch_stack(event))
|
|
|
|
- intel_pmu_lbr_enable(event);
|
|
|
|
|
|
|
|
if (event->attr.exclude_host)
|
|
if (event->attr.exclude_host)
|
|
cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
|
|
cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
|
|
@@ -1994,6 +1989,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
|
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void intel_pmu_add_event(struct perf_event *event)
|
|
|
|
+{
|
|
|
|
+ if (event->attr.precise_ip)
|
|
|
|
+ intel_pmu_pebs_add(event);
|
|
|
|
+ if (needs_branch_stack(event))
|
|
|
|
+ intel_pmu_lbr_add(event);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Save and restart an expired event. Called by NMI contexts,
|
|
* Save and restart an expired event. Called by NMI contexts,
|
|
* so it has to be careful about preempting normal event ops:
|
|
* so it has to be careful about preempting normal event ops:
|
|
@@ -3290,6 +3293,8 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|
.enable_all = intel_pmu_enable_all,
|
|
.enable_all = intel_pmu_enable_all,
|
|
.enable = intel_pmu_enable_event,
|
|
.enable = intel_pmu_enable_event,
|
|
.disable = intel_pmu_disable_event,
|
|
.disable = intel_pmu_disable_event,
|
|
|
|
+ .add = intel_pmu_add_event,
|
|
|
|
+ .del = intel_pmu_del_event,
|
|
.hw_config = intel_pmu_hw_config,
|
|
.hw_config = intel_pmu_hw_config,
|
|
.schedule_events = x86_schedule_events,
|
|
.schedule_events = x86_schedule_events,
|
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|