Browse Source

net: sched: cls_flow: no need to call tcf_exts_change for newly allocated struct

As the fnew struct just was allocated, so no need to use tcf_exts_change
to do atomic change, and we can just fill-up the unused exts struct
directly by tcf_exts_validate.

Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Jiri Pirko 8 years ago
parent
commit
c09fc2e11e
1 changed files with 16 additions and 25 deletions
  1. 16 25
      net/sched/cls_flow.c

+ 16 - 25
net/sched/cls_flow.c

@@ -388,7 +388,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 	struct flow_filter *fold, *fnew;
 	struct flow_filter *fold, *fnew;
 	struct nlattr *opt = tca[TCA_OPTIONS];
 	struct nlattr *opt = tca[TCA_OPTIONS];
 	struct nlattr *tb[TCA_FLOW_MAX + 1];
 	struct nlattr *tb[TCA_FLOW_MAX + 1];
-	struct tcf_exts e;
 	unsigned int nkeys = 0;
 	unsigned int nkeys = 0;
 	unsigned int perturb_period = 0;
 	unsigned int perturb_period = 0;
 	u32 baseclass = 0;
 	u32 baseclass = 0;
@@ -424,31 +423,27 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 			return -EOPNOTSUPP;
 			return -EOPNOTSUPP;
 	}
 	}
 
 
-	err = tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
-	if (err < 0)
-		goto err1;
-	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
-	if (err < 0)
-		goto err1;
-
-	err = -ENOBUFS;
 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
 	if (!fnew)
 	if (!fnew)
-		goto err1;
+		return -ENOBUFS;
 
 
 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
 	if (err < 0)
 	if (err < 0)
-		goto err2;
+		goto err1;
 
 
 	err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
 	err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
 	if (err < 0)
 	if (err < 0)
-		goto err3;
+		goto err2;
+
+	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
+	if (err < 0)
+		goto err2;
 
 
 	fold = (struct flow_filter *)*arg;
 	fold = (struct flow_filter *)*arg;
 	if (fold) {
 	if (fold) {
 		err = -EINVAL;
 		err = -EINVAL;
 		if (fold->handle != handle && handle)
 		if (fold->handle != handle && handle)
-			goto err3;
+			goto err2;
 
 
 		/* Copy fold into fnew */
 		/* Copy fold into fnew */
 		fnew->tp = fold->tp;
 		fnew->tp = fold->tp;
@@ -468,31 +463,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 		if (tb[TCA_FLOW_MODE])
 		if (tb[TCA_FLOW_MODE])
 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 		if (mode != FLOW_MODE_HASH && nkeys > 1)
 		if (mode != FLOW_MODE_HASH && nkeys > 1)
-			goto err3;
+			goto err2;
 
 
 		if (mode == FLOW_MODE_HASH)
 		if (mode == FLOW_MODE_HASH)
 			perturb_period = fold->perturb_period;
 			perturb_period = fold->perturb_period;
 		if (tb[TCA_FLOW_PERTURB]) {
 		if (tb[TCA_FLOW_PERTURB]) {
 			if (mode != FLOW_MODE_HASH)
 			if (mode != FLOW_MODE_HASH)
-				goto err3;
+				goto err2;
 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 		}
 		}
 	} else {
 	} else {
 		err = -EINVAL;
 		err = -EINVAL;
 		if (!handle)
 		if (!handle)
-			goto err3;
+			goto err2;
 		if (!tb[TCA_FLOW_KEYS])
 		if (!tb[TCA_FLOW_KEYS])
-			goto err3;
+			goto err2;
 
 
 		mode = FLOW_MODE_MAP;
 		mode = FLOW_MODE_MAP;
 		if (tb[TCA_FLOW_MODE])
 		if (tb[TCA_FLOW_MODE])
 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 		if (mode != FLOW_MODE_HASH && nkeys > 1)
 		if (mode != FLOW_MODE_HASH && nkeys > 1)
-			goto err3;
+			goto err2;
 
 
 		if (tb[TCA_FLOW_PERTURB]) {
 		if (tb[TCA_FLOW_PERTURB]) {
 			if (mode != FLOW_MODE_HASH)
 			if (mode != FLOW_MODE_HASH)
-				goto err3;
+				goto err2;
 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 		}
 		}
 
 
@@ -510,8 +505,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 	setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation,
 	setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation,
 			       (unsigned long)fnew);
 			       (unsigned long)fnew);
 
 
-	tcf_exts_change(tp, &fnew->exts, &e);
-
 	netif_keep_dst(qdisc_dev(tp->q));
 	netif_keep_dst(qdisc_dev(tp->q));
 
 
 	if (tb[TCA_FLOW_KEYS]) {
 	if (tb[TCA_FLOW_KEYS]) {
@@ -550,13 +543,11 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 		call_rcu(&fold->rcu, flow_destroy_filter);
 		call_rcu(&fold->rcu, flow_destroy_filter);
 	return 0;
 	return 0;
 
 
-err3:
+err2:
 	tcf_exts_destroy(&fnew->exts);
 	tcf_exts_destroy(&fnew->exts);
 	tcf_em_tree_destroy(&fnew->ematches);
 	tcf_em_tree_destroy(&fnew->ematches);
-err2:
-	kfree(fnew);
 err1:
 err1:
-	tcf_exts_destroy(&e);
+	kfree(fnew);
 	return err;
 	return err;
 }
 }