|
@@ -1676,6 +1676,22 @@ void net_dec_ingress_queue(void)
|
|
|
EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_NET_EGRESS
|
|
|
+static struct static_key egress_needed __read_mostly;
|
|
|
+
|
|
|
+void net_inc_egress_queue(void)
|
|
|
+{
|
|
|
+ static_key_slow_inc(&egress_needed);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(net_inc_egress_queue);
|
|
|
+
|
|
|
+void net_dec_egress_queue(void)
|
|
|
+{
|
|
|
+ static_key_slow_dec(&egress_needed);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(net_dec_egress_queue);
|
|
|
+#endif
|
|
|
+
|
|
|
static struct static_key netstamp_needed __read_mostly;
|
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
/* We are not allowed to call static_key_slow_dec() from irq context
|
|
@@ -3007,7 +3023,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|
|
bool contended;
|
|
|
int rc;
|
|
|
|
|
|
- qdisc_pkt_len_init(skb);
|
|
|
qdisc_calculate_pkt_len(skb, q);
|
|
|
/*
|
|
|
* Heuristic to force contended enqueues to serialize on a
|
|
@@ -3100,6 +3115,49 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
|
}
|
|
|
EXPORT_SYMBOL(dev_loopback_xmit);
|
|
|
|
|
|
+#ifdef CONFIG_NET_EGRESS
|
|
|
+static struct sk_buff *
|
|
|
+sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
|
|
|
+{
|
|
|
+ struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
|
|
|
+ struct tcf_result cl_res;
|
|
|
+
|
|
|
+ if (!cl)
|
|
|
+ return skb;
|
|
|
+
|
|
|
+ /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
|
|
|
+ * earlier by the caller.
|
|
|
+ */
|
|
|
+ qdisc_bstats_cpu_update(cl->q, skb);
|
|
|
+
|
|
|
+ switch (tc_classify(skb, cl, &cl_res, false)) {
|
|
|
+ case TC_ACT_OK:
|
|
|
+ case TC_ACT_RECLASSIFY:
|
|
|
+ skb->tc_index = TC_H_MIN(cl_res.classid);
|
|
|
+ break;
|
|
|
+ case TC_ACT_SHOT:
|
|
|
+ qdisc_qstats_cpu_drop(cl->q);
|
|
|
+ *ret = NET_XMIT_DROP;
|
|
|
+ goto drop;
|
|
|
+ case TC_ACT_STOLEN:
|
|
|
+ case TC_ACT_QUEUED:
|
|
|
+ *ret = NET_XMIT_SUCCESS;
|
|
|
+drop:
|
|
|
+ kfree_skb(skb);
|
|
|
+ return NULL;
|
|
|
+ case TC_ACT_REDIRECT:
|
|
|
+ /* No need to push/pop skb's mac_header here on egress! */
|
|
|
+ skb_do_redirect(skb);
|
|
|
+ *ret = NET_XMIT_SUCCESS;
|
|
|
+ return NULL;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return skb;
|
|
|
+}
|
|
|
+#endif /* CONFIG_NET_EGRESS */
|
|
|
+
|
|
|
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|
|
{
|
|
|
#ifdef CONFIG_XPS
|
|
@@ -3226,6 +3284,17 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
|
|
|
|
skb_update_prio(skb);
|
|
|
|
|
|
+ qdisc_pkt_len_init(skb);
|
|
|
+#ifdef CONFIG_NET_CLS_ACT
|
|
|
+ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
|
|
|
+# ifdef CONFIG_NET_EGRESS
|
|
|
+ if (static_key_false(&egress_needed)) {
|
|
|
+ skb = sch_handle_egress(skb, &rc, dev);
|
|
|
+ if (!skb)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+# endif
|
|
|
+#endif
|
|
|
/* If device/qdisc don't need skb->dst, release it right now while
|
|
|
* its hot in this cpu cache.
|
|
|
*/
|
|
@@ -3247,9 +3316,6 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
|
txq = netdev_pick_tx(dev, skb, accel_priv);
|
|
|
q = rcu_dereference_bh(txq->qdisc);
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
|
- skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
|
|
|
-#endif
|
|
|
trace_net_dev_queue(skb);
|
|
|
if (q->enqueue) {
|
|
|
rc = __dev_xmit_skb(skb, q, dev, txq);
|
|
@@ -3806,9 +3872,9 @@ int (*br_fdb_test_addr_hook)(struct net_device *dev,
|
|
|
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
|
|
|
#endif
|
|
|
|
|
|
-static inline struct sk_buff *handle_ing(struct sk_buff *skb,
|
|
|
- struct packet_type **pt_prev,
|
|
|
- int *ret, struct net_device *orig_dev)
|
|
|
+static inline struct sk_buff *
|
|
|
+sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
|
|
|
+ struct net_device *orig_dev)
|
|
|
{
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
|
struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
|
|
@@ -4002,7 +4068,7 @@ another_round:
|
|
|
skip_taps:
|
|
|
#ifdef CONFIG_NET_INGRESS
|
|
|
if (static_key_false(&ingress_needed)) {
|
|
|
- skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
|
|
|
+ skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
|
|
|
if (!skb)
|
|
|
goto out;
|
|
|
|