|
@@ -25,6 +25,9 @@
|
|
|
#include "hnae3.h"
|
|
|
#include "hns3_enet.h"
|
|
|
|
|
|
+static void hns3_clear_all_ring(struct hnae3_handle *h);
|
|
|
+static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
|
|
|
+
|
|
|
static const char hns3_driver_name[] = "hns3";
|
|
|
const char hns3_driver_version[] = VERMAGIC_STRING;
|
|
|
static const char hns3_driver_string[] =
|
|
@@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
|
|
|
int i, j;
|
|
|
int ret;
|
|
|
|
|
|
+ ret = hns3_nic_reset_all_ring(h);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
/* get irq resource for all vectors */
|
|
|
ret = hns3_nic_init_irq(priv);
|
|
|
if (ret) {
|
|
@@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
|
|
|
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
|
|
|
return;
|
|
|
|
|
|
+ /* disable vectors */
|
|
|
+ for (i = 0; i < priv->vector_num; i++)
|
|
|
+ hns3_vector_disable(&priv->tqp_vector[i]);
|
|
|
+
|
|
|
/* stop ae_dev */
|
|
|
ops = priv->ae_handle->ae_algo->ops;
|
|
|
if (ops->stop)
|
|
|
ops->stop(priv->ae_handle);
|
|
|
|
|
|
- /* disable vectors */
|
|
|
- for (i = 0; i < priv->vector_num; i++)
|
|
|
- hns3_vector_disable(&priv->tqp_vector[i]);
|
|
|
-
|
|
|
/* free irq resources */
|
|
|
hns3_nic_uninit_irq(priv);
|
|
|
+
|
|
|
+ hns3_clear_all_ring(priv->ae_handle);
|
|
|
}
|
|
|
|
|
|
static int hns3_nic_net_stop(struct net_device *netdev)
|
|
@@ -1819,6 +1828,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
|
|
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
|
|
ring->desc_cb[i] = *res_cb;
|
|
|
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
|
|
|
+ ring->desc[i].rx.bd_base_info = 0;
|
|
|
}
|
|
|
|
|
|
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
|
|
@@ -1826,6 +1836,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
|
|
|
ring->desc_cb[i].reuse_flag = 0;
|
|
|
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
|
|
|
+ ring->desc_cb[i].page_offset);
|
|
|
+ ring->desc[i].rx.bd_base_info = 0;
|
|
|
}
|
|
|
|
|
|
static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
|
|
@@ -2066,6 +2077,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
|
|
|
napi_gro_receive(&ring->tqp_vector->napi, skb);
|
|
|
}
|
|
|
|
|
|
+static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
|
|
|
+ struct hns3_desc *desc, u32 l234info)
|
|
|
+{
|
|
|
+ struct pci_dev *pdev = ring->tqp->handle->pdev;
|
|
|
+ u16 vlan_tag;
|
|
|
+
|
|
|
+ if (pdev->revision == 0x20) {
|
|
|
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
|
|
|
+ if (!(vlan_tag & VLAN_VID_MASK))
|
|
|
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
|
|
|
+
|
|
|
+ return vlan_tag;
|
|
|
+ }
|
|
|
+
|
|
|
+#define HNS3_STRP_OUTER_VLAN 0x1
|
|
|
+#define HNS3_STRP_INNER_VLAN 0x2
|
|
|
+
|
|
|
+ switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
|
|
|
+ HNS3_RXD_STRP_TAGP_S)) {
|
|
|
+ case HNS3_STRP_OUTER_VLAN:
|
|
|
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
|
|
|
+ break;
|
|
|
+ case HNS3_STRP_INNER_VLAN:
|
|
|
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ vlan_tag = 0;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return vlan_tag;
|
|
|
+}
|
|
|
+
|
|
|
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|
|
struct sk_buff **out_skb, int *out_bnum)
|
|
|
{
|
|
@@ -2085,9 +2129,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|
|
|
|
|
prefetch(desc);
|
|
|
|
|
|
- length = le16_to_cpu(desc->rx.pkt_len);
|
|
|
+ length = le16_to_cpu(desc->rx.size);
|
|
|
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
|
|
|
- l234info = le32_to_cpu(desc->rx.l234_info);
|
|
|
|
|
|
/* Check valid BD */
|
|
|
if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
|
|
@@ -2121,22 +2164,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|
|
|
|
|
prefetchw(skb->data);
|
|
|
|
|
|
- /* Based on hw strategy, the tag offloaded will be stored at
|
|
|
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
|
|
|
- * in one layer tag case.
|
|
|
- */
|
|
|
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
|
|
- u16 vlan_tag;
|
|
|
-
|
|
|
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
|
|
|
- if (!(vlan_tag & VLAN_VID_MASK))
|
|
|
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
|
|
|
- if (vlan_tag & VLAN_VID_MASK)
|
|
|
- __vlan_hwaccel_put_tag(skb,
|
|
|
- htons(ETH_P_8021Q),
|
|
|
- vlan_tag);
|
|
|
- }
|
|
|
-
|
|
|
bnum = 1;
|
|
|
if (length <= HNS3_RX_HEAD_SIZE) {
|
|
|
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
|
|
@@ -2173,6 +2200,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|
|
|
|
|
*out_bnum = bnum;
|
|
|
|
|
|
+ l234info = le32_to_cpu(desc->rx.l234_info);
|
|
|
+
|
|
|
+ /* Based on hw strategy, the tag offloaded will be stored at
|
|
|
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
|
|
|
+ * in one layer tag case.
|
|
|
+ */
|
|
|
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
|
|
+ u16 vlan_tag;
|
|
|
+
|
|
|
+ vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
|
|
|
+ if (vlan_tag & VLAN_VID_MASK)
|
|
|
+ __vlan_hwaccel_put_tag(skb,
|
|
|
+ htons(ETH_P_8021Q),
|
|
|
+ vlan_tag);
|
|
|
+ }
|
|
|
+
|
|
|
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
|
|
|
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
|
|
|
((u64 *)desc)[0], ((u64 *)desc)[1]);
|
|
@@ -2905,8 +2948,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
|
|
|
goto out_when_alloc_ring_memory;
|
|
|
}
|
|
|
|
|
|
- hns3_init_ring_hw(priv->ring_data[i].ring);
|
|
|
-
|
|
|
u64_stats_init(&priv->ring_data[i].ring->syncp);
|
|
|
}
|
|
|
|
|
@@ -3068,6 +3109,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
|
|
|
if (netdev->reg_state != NETREG_UNINITIALIZED)
|
|
|
unregister_netdev(netdev);
|
|
|
|
|
|
+ hns3_force_clear_all_rx_ring(handle);
|
|
|
+
|
|
|
ret = hns3_nic_uninit_vector_data(priv);
|
|
|
if (ret)
|
|
|
netdev_err(netdev, "uninit vector error\n");
|
|
@@ -3183,20 +3226,48 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
|
|
|
|
|
|
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
|
|
|
{
|
|
|
- if (!HNAE3_IS_TX_RING(ring))
|
|
|
- return;
|
|
|
-
|
|
|
while (ring->next_to_clean != ring->next_to_use) {
|
|
|
+ ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
|
|
|
hns3_free_buffer_detach(ring, ring->next_to_clean);
|
|
|
ring_ptr_move_fw(ring, next_to_clean);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void hns3_clear_rx_ring(struct hns3_enet_ring *ring)
|
|
|
+static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
|
|
|
{
|
|
|
- if (HNAE3_IS_TX_RING(ring))
|
|
|
- return;
|
|
|
+ struct hns3_desc_cb res_cbs;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ while (ring->next_to_use != ring->next_to_clean) {
|
|
|
+ /* When a buffer is not reused, it's memory has been
|
|
|
+ * freed in hns3_handle_rx_bd or will be freed by
|
|
|
+ * stack, so we need to replace the buffer here.
|
|
|
+ */
|
|
|
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
|
|
|
+ ret = hns3_reserve_buffer_map(ring, &res_cbs);
|
|
|
+ if (ret) {
|
|
|
+ u64_stats_update_begin(&ring->syncp);
|
|
|
+ ring->stats.sw_err_cnt++;
|
|
|
+ u64_stats_update_end(&ring->syncp);
|
|
|
+ /* if alloc new buffer fail, exit directly
|
|
|
+ * and reclear in up flow.
|
|
|
+ */
|
|
|
+ netdev_warn(ring->tqp->handle->kinfo.netdev,
|
|
|
+ "reserve buffer map failed, ret = %d\n",
|
|
|
+ ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ hns3_replace_buffer(ring, ring->next_to_use,
|
|
|
+ &res_cbs);
|
|
|
+ }
|
|
|
+ ring_ptr_move_fw(ring, next_to_use);
|
|
|
+ }
|
|
|
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
|
|
|
+{
|
|
|
while (ring->next_to_use != ring->next_to_clean) {
|
|
|
/* When a buffer is not reused, it's memory has been
|
|
|
* freed in hns3_handle_rx_bd or will be freed by
|
|
@@ -3212,6 +3283,19 @@ static void hns3_clear_rx_ring(struct hns3_enet_ring *ring)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
|
|
|
+{
|
|
|
+ struct net_device *ndev = h->kinfo.netdev;
|
|
|
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
|
|
|
+ struct hns3_enet_ring *ring;
|
|
|
+ u32 i;
|
|
|
+
|
|
|
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
+ ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
|
|
+ hns3_force_clear_rx_ring(ring);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void hns3_clear_all_ring(struct hnae3_handle *h)
|
|
|
{
|
|
|
struct net_device *ndev = h->kinfo.netdev;
|
|
@@ -3229,10 +3313,51 @@ static void hns3_clear_all_ring(struct hnae3_handle *h)
|
|
|
netdev_tx_reset_queue(dev_queue);
|
|
|
|
|
|
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
|
|
+ /* Continue to clear other rings even if clearing some
|
|
|
+ * rings failed.
|
|
|
+ */
|
|
|
hns3_clear_rx_ring(ring);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+int hns3_nic_reset_all_ring(struct hnae3_handle *h)
|
|
|
+{
|
|
|
+ struct net_device *ndev = h->kinfo.netdev;
|
|
|
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
|
|
|
+ struct hns3_enet_ring *rx_ring;
|
|
|
+ int i, j;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
+ h->ae_algo->ops->reset_queue(h, i);
|
|
|
+ hns3_init_ring_hw(priv->ring_data[i].ring);
|
|
|
+
|
|
|
+ /* We need to clear tx ring here because self test will
|
|
|
+ * use the ring and will not run down before up
|
|
|
+ */
|
|
|
+ hns3_clear_tx_ring(priv->ring_data[i].ring);
|
|
|
+ priv->ring_data[i].ring->next_to_clean = 0;
|
|
|
+ priv->ring_data[i].ring->next_to_use = 0;
|
|
|
+
|
|
|
+ rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
|
|
+ hns3_init_ring_hw(rx_ring);
|
|
|
+ ret = hns3_clear_rx_ring(rx_ring);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /* We can not know the hardware head and tail when this
|
|
|
+ * function is called in reset flow, so we reuse all desc.
|
|
|
+ */
|
|
|
+ for (j = 0; j < rx_ring->desc_num; j++)
|
|
|
+ hns3_reuse_buffer(rx_ring, j);
|
|
|
+
|
|
|
+ rx_ring->next_to_clean = 0;
|
|
|
+ rx_ring->next_to_use = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
|
|
|
{
|
|
|
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
|
@@ -3302,7 +3427,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
int ret;
|
|
|
|
|
|
- hns3_clear_all_ring(handle);
|
|
|
+ hns3_force_clear_all_rx_ring(handle);
|
|
|
|
|
|
ret = hns3_nic_uninit_vector_data(priv);
|
|
|
if (ret) {
|
|
@@ -3438,8 +3563,6 @@ int hns3_set_channels(struct net_device *netdev,
|
|
|
if (if_running)
|
|
|
hns3_nic_net_stop(netdev);
|
|
|
|
|
|
- hns3_clear_all_ring(h);
|
|
|
-
|
|
|
ret = hns3_nic_uninit_vector_data(priv);
|
|
|
if (ret) {
|
|
|
dev_err(&netdev->dev,
|