|
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
|
|
return __perf_event_account_interrupt(event, 1);
|
|
return __perf_event_account_interrupt(event, 1);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * Due to interrupt latency (AKA "skid"), we may enter the
|
|
|
|
+ * kernel before taking an overflow, even if the PMU is only
|
|
|
|
+ * counting user events.
|
|
|
|
+ * To avoid leaking information to userspace, we must always
|
|
|
|
+ * reject kernel samples when exclude_kernel is set.
|
|
|
|
+ */
|
|
|
|
+ if (event->attr.exclude_kernel && !user_mode(regs))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Generic event overflow handling, sampling.
|
|
* Generic event overflow handling, sampling.
|
|
*/
|
|
*/
|
|
@@ -7336,6 +7351,12 @@ static int __perf_event_overflow(struct perf_event *event,
|
|
|
|
|
|
ret = __perf_event_account_interrupt(event, throttle);
|
|
ret = __perf_event_account_interrupt(event, throttle);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * For security, drop the skid kernel samples if necessary.
|
|
|
|
+ */
|
|
|
|
+ if (!sample_is_allowed(event, regs))
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* XXX event_limit might not quite work as expected on inherited
|
|
* XXX event_limit might not quite work as expected on inherited
|
|
* events
|
|
* events
|