|
@@ -376,6 +376,34 @@ static const struct bpf_func_proto bpf_get_current_task_proto = {
|
|
|
.ret_type = RET_INTEGER,
|
|
|
};
|
|
|
|
|
|
+static u64 bpf_current_task_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
|
|
+{
|
|
|
+ struct bpf_map *map = (struct bpf_map *)(long)r1;
|
|
|
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
|
+ struct cgroup *cgrp;
|
|
|
+ u32 idx = (u32)r2;
|
|
|
+
|
|
|
+ if (unlikely(in_interrupt()))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (unlikely(idx >= array->map.max_entries))
|
|
|
+ return -E2BIG;
|
|
|
+
|
|
|
+ cgrp = READ_ONCE(array->ptrs[idx]);
|
|
|
+ if (unlikely(!cgrp))
|
|
|
+ return -EAGAIN;
|
|
|
+
|
|
|
+ return task_under_cgroup_hierarchy(current, cgrp);
|
|
|
+}
|
|
|
+
|
|
|
+static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
|
|
|
+ .func = bpf_current_task_under_cgroup,
|
|
|
+ .gpl_only = false,
|
|
|
+ .ret_type = RET_INTEGER,
|
|
|
+ .arg1_type = ARG_CONST_MAP_PTR,
|
|
|
+ .arg2_type = ARG_ANYTHING,
|
|
|
+};
|
|
|
+
|
|
|
static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
|
|
|
{
|
|
|
switch (func_id) {
|
|
@@ -407,6 +435,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
|
|
|
return &bpf_perf_event_read_proto;
|
|
|
case BPF_FUNC_probe_write_user:
|
|
|
return bpf_get_probe_write_proto();
|
|
|
+ case BPF_FUNC_current_task_under_cgroup:
|
|
|
+ return &bpf_current_task_under_cgroup_proto;
|
|
|
default:
|
|
|
return NULL;
|
|
|
}
|