|
@@ -395,6 +395,36 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
|
|
|
.arg2_type = ARG_ANYTHING,
|
|
|
};
|
|
|
|
|
|
+BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
|
|
|
+ const void *, unsafe_ptr)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The strncpy_from_unsafe() call will likely not fill the entire
|
|
|
+ * buffer, but that's okay in this circumstance as we're probing
|
|
|
+ * arbitrary memory anyway similar to bpf_probe_read() and might
|
|
|
+ * as well probe the stack. Thus, memory is explicitly cleared
|
|
|
+ * only in error case, so that improper users ignoring return
|
|
|
+ * code altogether don't copy garbage; otherwise length of string
|
|
|
+ * is returned that can be used for bpf_perf_event_output() et al.
|
|
|
+ */
|
|
|
+ ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
|
|
|
+ if (unlikely(ret < 0))
|
|
|
+ memset(dst, 0, size);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct bpf_func_proto bpf_probe_read_str_proto = {
|
|
|
+ .func = bpf_probe_read_str,
|
|
|
+ .gpl_only = true,
|
|
|
+ .ret_type = RET_INTEGER,
|
|
|
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
|
|
+ .arg2_type = ARG_CONST_SIZE,
|
|
|
+ .arg3_type = ARG_ANYTHING,
|
|
|
+};
|
|
|
+
|
|
|
static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
|
|
|
{
|
|
|
switch (func_id) {
|
|
@@ -432,6 +462,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
|
|
|
return &bpf_current_task_under_cgroup_proto;
|
|
|
case BPF_FUNC_get_prandom_u32:
|
|
|
return &bpf_get_prandom_u32_proto;
|
|
|
+ case BPF_FUNC_probe_read_str:
|
|
|
+ return &bpf_probe_read_str_proto;
|
|
|
default:
|
|
|
return NULL;
|
|
|
}
|