|
@@ -2096,6 +2096,32 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
return c;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Broadwell:
|
|
|
+ *
|
|
|
+ * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
|
|
|
+ * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
|
|
|
+ * the two to enforce a minimum period of 128 (the smallest value that has bits
|
|
|
+ * 0-5 cleared and >= 100).
|
|
|
+ *
|
|
|
+ * Because of how the code in x86_perf_event_set_period() works, the truncation
|
|
|
+ * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
|
|
|
+ * to make up for the 'lost' events due to carrying the 'error' in period_left.
|
|
|
+ *
|
|
|
+ * Therefore the effective (average) period matches the requested period,
|
|
|
+ * despite coarser hardware granularity.
|
|
|
+ */
|
|
|
+static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
|
|
|
+{
|
|
|
+ if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
|
|
|
+ X86_CONFIG(.event=0xc0, .umask=0x01)) {
|
|
|
+ if (left < 128)
|
|
|
+ left = 128;
|
|
|
+ left &= ~0x3fu;
|
|
|
+ }
|
|
|
+ return left;
|
|
|
+}
|
|
|
+
|
|
|
PMU_FORMAT_ATTR(event, "config:0-7" );
|
|
|
PMU_FORMAT_ATTR(umask, "config:8-15" );
|
|
|
PMU_FORMAT_ATTR(edge, "config:18" );
|
|
@@ -2774,6 +2800,7 @@ __init int intel_pmu_init(void)
|
|
|
x86_pmu.hw_config = hsw_hw_config;
|
|
|
x86_pmu.get_event_constraints = hsw_get_event_constraints;
|
|
|
x86_pmu.cpu_events = hsw_events_attrs;
|
|
|
+ x86_pmu.limit_period = bdw_limit_period;
|
|
|
pr_cont("Broadwell events, ");
|
|
|
break;
|
|
|
|