|
@@ -1029,20 +1029,6 @@ static __initconst const u64 slm_hw_cache_event_ids
|
|
|
},
|
|
|
};
|
|
|
|
|
|
-static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
|
|
|
-{
|
|
|
- /* user explicitly requested branch sampling */
|
|
|
- if (has_branch_stack(event))
|
|
|
- return true;
|
|
|
-
|
|
|
- /* implicit branch sampling to correct PEBS skid */
|
|
|
- if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
|
|
|
- x86_pmu.intel_cap.pebs_format < 2)
|
|
|
- return true;
|
|
|
-
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
static void intel_pmu_disable_all(void)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
@@ -1207,7 +1193,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
|
|
|
* must disable before any actual event
|
|
|
* because any event may be combined with LBR
|
|
|
*/
|
|
|
- if (intel_pmu_needs_lbr_smpl(event))
|
|
|
+ if (needs_branch_stack(event))
|
|
|
intel_pmu_lbr_disable(event);
|
|
|
|
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
@@ -1268,7 +1254,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|
|
* must enabled before any actual event
|
|
|
* because any event may be combined with LBR
|
|
|
*/
|
|
|
- if (intel_pmu_needs_lbr_smpl(event))
|
|
|
+ if (needs_branch_stack(event))
|
|
|
intel_pmu_lbr_enable(event);
|
|
|
|
|
|
if (event->attr.exclude_host)
|
|
@@ -1747,7 +1733,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|
|
if (event->attr.precise_ip && x86_pmu.pebs_aliases)
|
|
|
x86_pmu.pebs_aliases(event);
|
|
|
|
|
|
- if (intel_pmu_needs_lbr_smpl(event)) {
|
|
|
+ if (needs_branch_stack(event)) {
|
|
|
ret = intel_pmu_setup_lbr_filter(event);
|
|
|
if (ret)
|
|
|
return ret;
|