|
@@ -300,9 +300,9 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
|
|
|
mtu);
|
|
|
}
|
|
|
|
|
|
-int hns_nic_net_xmit_hw(struct net_device *ndev,
|
|
|
- struct sk_buff *skb,
|
|
|
- struct hns_nic_ring_data *ring_data)
|
|
|
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
|
|
|
+ struct sk_buff *skb,
|
|
|
+ struct hns_nic_ring_data *ring_data)
|
|
|
{
|
|
|
struct hns_nic_priv *priv = netdev_priv(ndev);
|
|
|
struct hnae_ring *ring = ring_data->ring;
|
|
@@ -361,6 +361,10 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
|
|
|
dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
|
|
|
netdev_tx_sent_queue(dev_queue, skb->len);
|
|
|
|
|
|
+ netif_trans_update(ndev);
|
|
|
+ ndev->stats.tx_bytes += skb->len;
|
|
|
+ ndev->stats.tx_packets++;
|
|
|
+
|
|
|
wmb(); /* commit all data before submit */
|
|
|
assert(skb->queue_mapping < priv->ae_handle->q_num);
|
|
|
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
|
|
@@ -1469,17 +1473,11 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
|
|
|
struct net_device *ndev)
|
|
|
{
|
|
|
struct hns_nic_priv *priv = netdev_priv(ndev);
|
|
|
- int ret;
|
|
|
|
|
|
assert(skb->queue_mapping < ndev->ae_handle->q_num);
|
|
|
- ret = hns_nic_net_xmit_hw(ndev, skb,
|
|
|
- &tx_ring_data(priv, skb->queue_mapping));
|
|
|
- if (ret == NETDEV_TX_OK) {
|
|
|
- netif_trans_update(ndev);
|
|
|
- ndev->stats.tx_bytes += skb->len;
|
|
|
- ndev->stats.tx_packets++;
|
|
|
- }
|
|
|
- return (netdev_tx_t)ret;
|
|
|
+
|
|
|
+ return hns_nic_net_xmit_hw(ndev, skb,
|
|
|
+ &tx_ring_data(priv, skb->queue_mapping));
|
|
|
}
|
|
|
|
|
|
static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
|