|
@@ -1678,12 +1678,33 @@ static bool is_orphaned_event(struct perf_event *event)
|
|
|
return event->state == PERF_EVENT_STATE_DEAD;
|
|
|
}
|
|
|
|
|
|
-static inline int pmu_filter_match(struct perf_event *event)
|
|
|
+static inline int __pmu_filter_match(struct perf_event *event)
|
|
|
{
|
|
|
struct pmu *pmu = event->pmu;
|
|
|
return pmu->filter_match ? pmu->filter_match(event) : 1;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Check whether we should attempt to schedule an event group based on
|
|
|
+ * PMU-specific filtering. An event group can consist of HW and SW events,
|
|
|
+ * potentially with a SW leader, so we must check all the filters, to
|
|
|
+ * determine whether a group is schedulable:
|
|
|
+ */
|
|
|
+static inline int pmu_filter_match(struct perf_event *event)
|
|
|
+{
|
|
|
+ struct perf_event *child;
|
|
|
+
|
|
|
+ if (!__pmu_filter_match(event))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ list_for_each_entry(child, &event->sibling_list, group_entry) {
|
|
|
+ if (!__pmu_filter_match(child))
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static inline int
|
|
|
event_filter_match(struct perf_event *event)
|
|
|
{
|