|
@@ -3102,10 +3102,49 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
|
|
|
return flags;
|
|
|
}
|
|
|
|
|
|
+static int intel_pmu_bts_config(struct perf_event *event)
|
|
|
+{
|
|
|
+ struct perf_event_attr *attr = &event->attr;
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
|
|
|
+ !attr->freq && hwc->sample_period == 1) {
|
|
|
+ /* BTS is not supported by this architecture. */
|
|
|
+ if (!x86_pmu.bts_active)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ /* BTS is currently only allowed for user-mode. */
|
|
|
+ if (!attr->exclude_kernel)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ /* disallow bts if conflicting events are present */
|
|
|
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ event->destroy = hw_perf_lbr_event_destroy;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int core_pmu_hw_config(struct perf_event *event)
|
|
|
+{
|
|
|
+ int ret = x86_pmu_hw_config(event);
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return intel_pmu_bts_config(event);
|
|
|
+}
|
|
|
+
|
|
|
static int intel_pmu_hw_config(struct perf_event *event)
|
|
|
{
|
|
|
int ret = x86_pmu_hw_config(event);
|
|
|
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = intel_pmu_bts_config(event);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -3600,7 +3639,7 @@ static __initconst const struct x86_pmu core_pmu = {
|
|
|
.enable_all = core_pmu_enable_all,
|
|
|
.enable = core_pmu_enable_event,
|
|
|
.disable = x86_pmu_disable_event,
|
|
|
- .hw_config = x86_pmu_hw_config,
|
|
|
+ .hw_config = core_pmu_hw_config,
|
|
|
.schedule_events = x86_schedule_events,
|
|
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
|
|
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|