|
@@ -7992,9 +7992,77 @@ static struct pmu perf_tracepoint = {
|
|
|
.read = perf_swevent_read,
|
|
|
};
|
|
|
|
|
|
+#ifdef CONFIG_KPROBE_EVENTS
|
|
|
+/*
|
|
|
+ * Flags in config, used by dynamic PMU kprobe and uprobe
|
|
|
+ * The flags should match following PMU_FORMAT_ATTR().
|
|
|
+ *
|
|
|
+ * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
|
|
|
+ * if not set, create kprobe/uprobe
|
|
|
+ */
|
|
|
+enum perf_probe_config {
|
|
|
+ PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */
|
|
|
+};
|
|
|
+
|
|
|
+PMU_FORMAT_ATTR(retprobe, "config:0");
|
|
|
+
|
|
|
+static struct attribute *probe_attrs[] = {
|
|
|
+ &format_attr_retprobe.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group probe_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = probe_attrs,
|
|
|
+};
|
|
|
+
|
|
|
+static const struct attribute_group *probe_attr_groups[] = {
|
|
|
+ &probe_format_group,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static int perf_kprobe_event_init(struct perf_event *event);
|
|
|
+static struct pmu perf_kprobe = {
|
|
|
+ .task_ctx_nr = perf_sw_context,
|
|
|
+ .event_init = perf_kprobe_event_init,
|
|
|
+ .add = perf_trace_add,
|
|
|
+ .del = perf_trace_del,
|
|
|
+ .start = perf_swevent_start,
|
|
|
+ .stop = perf_swevent_stop,
|
|
|
+ .read = perf_swevent_read,
|
|
|
+ .attr_groups = probe_attr_groups,
|
|
|
+};
|
|
|
+
|
|
|
+static int perf_kprobe_event_init(struct perf_event *event)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ bool is_retprobe;
|
|
|
+
|
|
|
+ if (event->attr.type != perf_kprobe.type)
|
|
|
+ return -ENOENT;
|
|
|
+ /*
|
|
|
+ * no branch sampling for probe events
|
|
|
+ */
|
|
|
+ if (has_branch_stack(event))
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
|
|
|
+ err = perf_kprobe_init(event, is_retprobe);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ event->destroy = perf_kprobe_destroy;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif /* CONFIG_KPROBE_EVENTS */
|
|
|
+
|
|
|
static inline void perf_tp_register(void)
|
|
|
{
|
|
|
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
|
|
|
+#ifdef CONFIG_KPROBE_EVENTS
|
|
|
+ perf_pmu_register(&perf_kprobe, "kprobe", -1);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static void perf_event_free_filter(struct perf_event *event)
|
|
@@ -8071,13 +8139,28 @@ static void perf_event_free_bpf_handler(struct perf_event *event)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * returns true if the event is a tracepoint, or a kprobe/upprobe created
|
|
|
+ * with perf_event_open()
|
|
|
+ */
|
|
|
+static inline bool perf_event_is_tracing(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (event->pmu == &perf_tracepoint)
|
|
|
+ return true;
|
|
|
+#ifdef CONFIG_KPROBE_EVENTS
|
|
|
+ if (event->pmu == &perf_kprobe)
|
|
|
+ return true;
|
|
|
+#endif
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
|
|
{
|
|
|
bool is_kprobe, is_tracepoint, is_syscall_tp;
|
|
|
struct bpf_prog *prog;
|
|
|
int ret;
|
|
|
|
|
|
- if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
|
|
+ if (!perf_event_is_tracing(event))
|
|
|
return perf_event_set_bpf_handler(event, prog_fd);
|
|
|
|
|
|
is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
|
|
@@ -8116,7 +8199,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
|
|
|
|
|
static void perf_event_free_bpf_prog(struct perf_event *event)
|
|
|
{
|
|
|
- if (event->attr.type != PERF_TYPE_TRACEPOINT) {
|
|
|
+ if (!perf_event_is_tracing(event)) {
|
|
|
perf_event_free_bpf_handler(event);
|
|
|
return;
|
|
|
}
|
|
@@ -8535,47 +8618,36 @@ fail_clear_files:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int
|
|
|
-perf_tracepoint_set_filter(struct perf_event *event, char *filter_str)
|
|
|
-{
|
|
|
- struct perf_event_context *ctx = event->ctx;
|
|
|
- int ret;
|
|
|
-
|
|
|
- /*
|
|
|
- * Beware, here be dragons!!
|
|
|
- *
|
|
|
- * the tracepoint muck will deadlock against ctx->mutex, but the tracepoint
|
|
|
- * stuff does not actually need it. So temporarily drop ctx->mutex. As per
|
|
|
- * perf_event_ctx_lock() we already have a reference on ctx.
|
|
|
- *
|
|
|
- * This can result in event getting moved to a different ctx, but that
|
|
|
- * does not affect the tracepoint state.
|
|
|
- */
|
|
|
- mutex_unlock(&ctx->mutex);
|
|
|
- ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
|
|
|
- mutex_lock(&ctx->mutex);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
|
|
|
{
|
|
|
- char *filter_str;
|
|
|
int ret = -EINVAL;
|
|
|
-
|
|
|
- if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
|
|
|
- !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
|
|
|
- !has_addr_filter(event))
|
|
|
- return -EINVAL;
|
|
|
+ char *filter_str;
|
|
|
|
|
|
filter_str = strndup_user(arg, PAGE_SIZE);
|
|
|
if (IS_ERR(filter_str))
|
|
|
return PTR_ERR(filter_str);
|
|
|
|
|
|
- if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
|
|
|
- event->attr.type == PERF_TYPE_TRACEPOINT)
|
|
|
- ret = perf_tracepoint_set_filter(event, filter_str);
|
|
|
- else if (has_addr_filter(event))
|
|
|
+#ifdef CONFIG_EVENT_TRACING
|
|
|
+ if (perf_event_is_tracing(event)) {
|
|
|
+ struct perf_event_context *ctx = event->ctx;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Beware, here be dragons!!
|
|
|
+ *
|
|
|
+ * the tracepoint muck will deadlock against ctx->mutex, but
|
|
|
+ * the tracepoint stuff does not actually need it. So
|
|
|
+ * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
|
|
|
+ * already have a reference on ctx.
|
|
|
+ *
|
|
|
+ * This can result in event getting moved to a different ctx,
|
|
|
+ * but that does not affect the tracepoint state.
|
|
|
+ */
|
|
|
+ mutex_unlock(&ctx->mutex);
|
|
|
+ ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
|
|
|
+ mutex_lock(&ctx->mutex);
|
|
|
+ } else
|
|
|
+#endif
|
|
|
+ if (has_addr_filter(event))
|
|
|
ret = perf_event_set_addr_filter(event, filter_str);
|
|
|
|
|
|
kfree(filter_str);
|