Browse Source

Merge branch 'bpf_cgroup_classid'

Daniel Borkmann says:

====================
BPF update

This small helper allows for accessing net_cls cgroups classid. Please
see individual patches for more details.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 10 năm trước cách đây
mục cha
commit
03b6dc7d17

+ 29 - 0
include/net/cls_cgroup.h

@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
 	if (classid != sk->sk_classid)
 	if (classid != sk->sk_classid)
 		sk->sk_classid = classid;
 		sk->sk_classid = classid;
 }
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+	u32 classid = task_cls_state(current)->classid;
+
+	/* Due to the nature of the classifier it is required to ignore all
+	 * packets originating from softirq context as accessing `current'
+	 * would lead to false results.
+	 *
+	 * This test assumes that all callers of dev_queue_xmit() explicitly
+	 * disable bh. Knowing this, it is possible to detect softirq based
+	 * calls by looking at the number of nested bh disable calls because
+	 * softirqs always disables bh.
+	 */
+	if (in_serving_softirq()) {
+		/* If there is an sk_classid we'll use that. */
+		if (!skb->sk)
+			return 0;
+
+		classid = skb->sk->sk_classid;
+	}
+
+	return classid;
+}
 #else /* !CONFIG_CGROUP_NET_CLASSID */
 #else /* !CONFIG_CGROUP_NET_CLASSID */
 static inline void sock_update_classid(struct sock *sk)
 static inline void sock_update_classid(struct sock *sk)
 {
 {
 }
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+	return 0;
+}
 #endif /* CONFIG_CGROUP_NET_CLASSID */
 #endif /* CONFIG_CGROUP_NET_CLASSID */
 #endif  /* _NET_CLS_CGROUP_H */
 #endif  /* _NET_CLS_CGROUP_H */

+ 7 - 0
include/uapi/linux/bpf.h

@@ -249,6 +249,13 @@ enum bpf_func_id {
 	 * Return: 0 on success
 	 * Return: 0 on success
 	 */
 	 */
 	BPF_FUNC_get_current_comm,
 	BPF_FUNC_get_current_comm,
+
+	/**
+	 * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+	 * @skb: pointer to skb
+	 * Return: classid if != 0
+	 */
+	BPF_FUNC_get_cgroup_classid,
 	__BPF_FUNC_MAX_ID,
 	__BPF_FUNC_MAX_ID,
 };
 };
 
 

+ 15 - 0
net/core/filter.c

@@ -47,6 +47,7 @@
 #include <linux/if_vlan.h>
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
 #include <linux/bpf.h>
 #include <net/sch_generic.h>
 #include <net/sch_generic.h>
+#include <net/cls_cgroup.h>
 
 
 /**
 /**
  *	sk_filter - run a packet through a socket filter
  *	sk_filter - run a packet through a socket filter
@@ -1424,6 +1425,18 @@ const struct bpf_func_proto bpf_clone_redirect_proto = {
 	.arg3_type      = ARG_ANYTHING,
 	.arg3_type      = ARG_ANYTHING,
 };
 };
 
 
+static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	return task_get_classid((struct sk_buff *) (unsigned long) r1);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
+	.func           = bpf_get_cgroup_classid,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
 static const struct bpf_func_proto *
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
 {
@@ -1461,6 +1474,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
 		return &bpf_l4_csum_replace_proto;
 		return &bpf_l4_csum_replace_proto;
 	case BPF_FUNC_clone_redirect:
 	case BPF_FUNC_clone_redirect:
 		return &bpf_clone_redirect_proto;
 		return &bpf_clone_redirect_proto;
+	case BPF_FUNC_get_cgroup_classid:
+		return &bpf_get_cgroup_classid_proto;
 	default:
 	default:
 		return sk_filter_func_proto(func_id);
 		return sk_filter_func_proto(func_id);
 	}
 	}

+ 2 - 21
net/sched/cls_cgroup.c

@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 			       struct tcf_result *res)
 			       struct tcf_result *res)
 {
 {
 	struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
 	struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
-	u32 classid;
-
-	classid = task_cls_state(current)->classid;
-
-	/*
-	 * Due to the nature of the classifier it is required to ignore all
-	 * packets originating from softirq context as accessing `current'
-	 * would lead to false results.
-	 *
-	 * This test assumes that all callers of dev_queue_xmit() explicitely
-	 * disable bh. Knowing this, it is possible to detect softirq based
-	 * calls by looking at the number of nested bh disable calls because
-	 * softirqs always disables bh.
-	 */
-	if (in_serving_softirq()) {
-		/* If there is an sk_classid we'll use that. */
-		if (!skb->sk)
-			return -1;
-		classid = skb->sk->sk_classid;
-	}
+	u32 classid = task_get_classid(skb);
 
 
 	if (!classid)
 	if (!classid)
 		return -1;
 		return -1;
-
 	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
 	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
 		return -1;
 		return -1;
 
 
 	res->classid = classid;
 	res->classid = classid;
 	res->class = 0;
 	res->class = 0;
+
 	return tcf_exts_exec(skb, &head->exts, res);
 	return tcf_exts_exec(skb, &head->exts, res);
 }
 }