浏览代码

Merge branch 'aquantia-next'

Pavel Belous says:

====================
net: ethernet: aquantia: improvements and fixes

The following patchset contains improvements and fixes for aQuantia
AQtion ethernet driver from net-next tree.

Most fixes are based on the comments from Lino Sanfilippo.

Sanity testing was performed on real HW. No regression found.

v1->v2: 1)Removed buffers copying.
	2)Fixed dma error handling.

v2->v3: 1)Fixes for aq_ndev_change_mtu:
	-Use core MTU checking for min_mtu.
	-Removed extra new_mtu assigment.
	2)Reverse XMAS tree in aq_ring_rx_fill.
v3->v4: 1)Use ndev->reg_state instead "is_ndev_registered" flag.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 年之前
父节点
当前提交
8676ea8f3f

+ 3 - 37
drivers/net/ethernet/aquantia/atlantic/aq_main.c

@@ -87,33 +87,17 @@ err_exit:
 static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
-	int err = 0;
-
-	err = aq_nic_xmit(aq_nic, skb);
-	if (err < 0)
-		goto err_exit;
 
-err_exit:
-	return err;
+	return aq_nic_xmit(aq_nic, skb);
 }
 
 static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
 {
 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
-	int err = 0;
+	int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
 
-	if (new_mtu == ndev->mtu) {
-		err = 0;
-		goto err_exit;
-	}
-	if (new_mtu < 68) {
-		err = -EINVAL;
-		goto err_exit;
-	}
-	err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
 	if (err < 0)
 		goto err_exit;
-	ndev->mtu = new_mtu;
 
 	if (netif_running(ndev)) {
 		aq_ndev_close(ndev);
@@ -252,22 +236,4 @@ static struct pci_driver aq_pci_ops = {
 	.resume = aq_pci_resume,
 };
 
-static int __init aq_module_init(void)
-{
-	int err = 0;
-
-	err = pci_register_driver(&aq_pci_ops);
-	if (err < 0)
-		goto err_exit;
-
-err_exit:
-	return err;
-}
-
-static void __exit aq_module_exit(void)
-{
-	pci_unregister_driver(&aq_pci_ops);
-}
-
-module_init(aq_module_init);
-module_exit(aq_module_exit);
+module_pci_driver(aq_pci_ops);

+ 111 - 95
drivers/net/ethernet/aquantia/atlantic/aq_nic.c

@@ -122,14 +122,11 @@ static void aq_nic_service_timer_cb(unsigned long param)
 	struct aq_nic_s *self = (struct aq_nic_s *)param;
 	struct net_device *ndev = aq_nic_get_ndev(self);
 	int err = 0;
-	bool is_busy = false;
 	unsigned int i = 0U;
 	struct aq_hw_link_status_s link_status;
 	struct aq_ring_stats_rx_s stats_rx;
 	struct aq_ring_stats_tx_s stats_tx;
 
-	atomic_inc(&self->header.busy_count);
-	is_busy = true;
 	if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
 		goto err_exit;
 
@@ -170,8 +167,6 @@ static void aq_nic_service_timer_cb(unsigned long param)
 	ndev->stats.tx_errors = stats_tx.errors;
 
 err_exit:
-	if (is_busy)
-		atomic_dec(&self->header.busy_count);
 	mod_timer(&self->service_timer,
 		  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
 }
@@ -207,18 +202,20 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
 	int err = 0;
 
 	ndev = aq_nic_ndev_alloc();
-	self = netdev_priv(ndev);
-	if (!self) {
-		err = -EINVAL;
+	if (!ndev) {
+		err = -ENOMEM;
 		goto err_exit;
 	}
 
+	self = netdev_priv(ndev);
+
 	ndev->netdev_ops = ndev_ops;
 	ndev->ethtool_ops = et_ops;
 
 	SET_NETDEV_DEV(ndev, dev);
 
 	ndev->if_port = port;
+	ndev->min_mtu = ETH_MIN_MTU;
 	self->ndev = ndev;
 
 	self->aq_pci_func = aq_pci_func;
@@ -264,16 +261,16 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
 		ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
 	}
 #endif
-	err = register_netdev(self->ndev);
-	if (err < 0)
-		goto err_exit;
 
-	self->is_ndev_registered = true;
 	netif_carrier_off(self->ndev);
 
 	for (i = AQ_CFG_VECS_MAX; i--;)
 		aq_nic_ndev_queue_stop(self, i);
 
+	err = register_netdev(self->ndev);
+	if (err < 0)
+		goto err_exit;
+
 err_exit:
 	return err;
 }
@@ -296,7 +293,7 @@ void aq_nic_ndev_free(struct aq_nic_s *self)
 	if (!self->ndev)
 		goto err_exit;
 
-	if (self->is_ndev_registered)
+	if (self->ndev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(self->ndev);
 
 	if (self->aq_hw)
@@ -471,95 +468,116 @@ err_exit:
 	return err;
 }
 
-static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self,
-					struct sk_buff *skb,
-					struct aq_ring_buff_s *dx)
+static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
+				   struct sk_buff *skb,
+				   struct aq_ring_s *ring)
 {
 	unsigned int ret = 0U;
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	unsigned int frag_count = 0U;
+	unsigned int dx = ring->sw_tail;
+	struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
 
-	dx->flags = 0U;
-	dx->len = skb_headlen(skb);
-	dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len,
-				DMA_TO_DEVICE);
-	dx->len_pkt = skb->len;
-	dx->is_sop = 1U;
-	dx->is_mapped = 1U;
+	if (unlikely(skb_is_gso(skb))) {
+		dx_buff->flags = 0U;
+		dx_buff->len_pkt = skb->len;
+		dx_buff->len_l2 = ETH_HLEN;
+		dx_buff->len_l3 = ip_hdrlen(skb);
+		dx_buff->len_l4 = tcp_hdrlen(skb);
+		dx_buff->mss = skb_shinfo(skb)->gso_size;
+		dx_buff->is_txc = 1U;
+
+		dx = aq_ring_next_dx(ring, dx);
+		dx_buff = &ring->buff_ring[dx];
+		++ret;
+	}
 
+	dx_buff->flags = 0U;
+	dx_buff->len = skb_headlen(skb);
+	dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
+				     skb->data,
+				     dx_buff->len,
+				     DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
+		goto exit;
+
+	dx_buff->len_pkt = skb->len;
+	dx_buff->is_sop = 1U;
+	dx_buff->is_mapped = 1U;
 	++ret;
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U;
-		dx->is_tcp_cso =
+		dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
+			1U : 0U;
+		dx_buff->is_tcp_cso =
 			(ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
-		dx->is_udp_cso =
+		dx_buff->is_udp_cso =
 			(ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
 	}
 
 	for (; nr_frags--; ++frag_count) {
-		unsigned int frag_len;
+		unsigned int frag_len = 0U;
 		dma_addr_t frag_pa;
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
 
 		frag_len = skb_frag_size(frag);
-
 		frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
 					   frag_len, DMA_TO_DEVICE);
 
+		if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
+			goto mapping_error;
+
 		while (frag_len > AQ_CFG_TX_FRAME_MAX) {
-			++dx;
-			++ret;
-			dx->flags = 0U;
-			dx->len = AQ_CFG_TX_FRAME_MAX;
-			dx->pa = frag_pa;
-			dx->is_mapped = 1U;
+			dx = aq_ring_next_dx(ring, dx);
+			dx_buff = &ring->buff_ring[dx];
+
+			dx_buff->flags = 0U;
+			dx_buff->len = AQ_CFG_TX_FRAME_MAX;
+			dx_buff->pa = frag_pa;
+			dx_buff->is_mapped = 1U;
 
 			frag_len -= AQ_CFG_TX_FRAME_MAX;
 			frag_pa += AQ_CFG_TX_FRAME_MAX;
+			++ret;
 		}
 
-		++dx;
-		++ret;
+		dx = aq_ring_next_dx(ring, dx);
+		dx_buff = &ring->buff_ring[dx];
 
-		dx->flags = 0U;
-		dx->len = frag_len;
-		dx->pa = frag_pa;
-		dx->is_mapped = 1U;
+		dx_buff->flags = 0U;
+		dx_buff->len = frag_len;
+		dx_buff->pa = frag_pa;
+		dx_buff->is_mapped = 1U;
+		++ret;
 	}
 
-	dx->is_eop = 1U;
-	dx->skb = skb;
-
-	return ret;
-}
-
-static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self,
-				       struct sk_buff *skb,
-				       struct aq_ring_buff_s *dx)
-{
-	dx->flags = 0U;
-	dx->len_pkt = skb->len;
-	dx->len_l2 = ETH_HLEN;
-	dx->len_l3 = ip_hdrlen(skb);
-	dx->len_l4 = tcp_hdrlen(skb);
-	dx->mss = skb_shinfo(skb)->gso_size;
-	dx->is_txc = 1U;
-	return 1U;
-}
-
-static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
-				   struct aq_ring_buff_s *dx)
-{
-	unsigned int ret = 0U;
-
-	if (unlikely(skb_is_gso(skb))) {
-		ret = aq_nic_map_skb_lso(self, skb, dx);
-		++dx;
+	dx_buff->is_eop = 1U;
+	dx_buff->skb = skb;
+	goto exit;
+
+mapping_error:
+	for (dx = ring->sw_tail;
+	     ret > 0;
+	     --ret, dx = aq_ring_next_dx(ring, dx)) {
+		dx_buff = &ring->buff_ring[dx];
+
+		if (!dx_buff->is_txc && dx_buff->pa) {
+			if (unlikely(dx_buff->is_sop)) {
+				dma_unmap_single(aq_nic_get_dev(self),
+						 dx_buff->pa,
+						 dx_buff->len,
+						 DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(aq_nic_get_dev(self),
+					       dx_buff->pa,
+					       dx_buff->len,
+					       DMA_TO_DEVICE);
+			}
+		}
 	}
 
-	ret += aq_nic_map_skb_frag(self, skb, dx);
-
+exit:
 	return ret;
 }
 
@@ -572,18 +590,13 @@ __acquires(&ring->lock)
 	unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
 	unsigned int tc = 0U;
 	unsigned int trys = AQ_CFG_LOCK_TRYS;
-	int err = 0;
+	int err = NETDEV_TX_OK;
 	bool is_nic_in_bad_state;
-	bool is_busy = false;
-	struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX];
 
 	frags = skb_shinfo(skb)->nr_frags + 1;
 
 	ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
 
-	atomic_inc(&self->header.busy_count);
-	is_busy = true;
-
 	if (frags > AQ_CFG_SKB_FRAGS_MAX) {
 		dev_kfree_skb_any(skb);
 		goto err_exit;
@@ -602,23 +615,27 @@ __acquires(&ring->lock)
 
 	do {
 		if (spin_trylock(&ring->header.lock)) {
-			frags = aq_nic_map_skb(self, skb, &buffers[0]);
-
-			aq_ring_tx_append_buffs(ring, &buffers[0], frags);
-
-			err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
-							      ring, frags);
-			if (err >= 0) {
-				if (aq_ring_avail_dx(ring) <
-				    AQ_CFG_SKB_FRAGS_MAX + 1)
-					aq_nic_ndev_queue_stop(self, ring->idx);
+			frags = aq_nic_map_skb(self, skb, ring);
+
+			if (likely(frags)) {
+				err = self->aq_hw_ops.hw_ring_tx_xmit(
+								self->aq_hw,
+								ring, frags);
+				if (err >= 0) {
+					if (aq_ring_avail_dx(ring) <
+					    AQ_CFG_SKB_FRAGS_MAX + 1)
+						aq_nic_ndev_queue_stop(
+								self,
+								ring->idx);
+
+					++ring->stats.tx.packets;
+					ring->stats.tx.bytes += skb->len;
+				}
+			} else {
+				err = NETDEV_TX_BUSY;
 			}
-			spin_unlock(&ring->header.lock);
 
-			if (err >= 0) {
-				++ring->stats.tx.packets;
-				ring->stats.tx.bytes += skb->len;
-			}
+			spin_unlock(&ring->header.lock);
 			break;
 		}
 	} while (--trys);
@@ -629,8 +646,6 @@ __acquires(&ring->lock)
 	}
 
 err_exit:
-	if (is_busy)
-		atomic_dec(&self->header.busy_count);
 	return err;
 }
 
@@ -942,7 +957,7 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
 
 	if (!netif_running(self->ndev)) {
 		err = 0;
-		goto err_exit;
+		goto out;
 	}
 	rtnl_lock();
 	if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
@@ -967,8 +982,9 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
 		netif_device_attach(self->ndev);
 		netif_tx_start_all_queues(self->ndev);
 	}
-	rtnl_unlock();
 
 err_exit:
+	rtnl_unlock();
+out:
 	return err;
 }

+ 0 - 1
drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h

@@ -22,7 +22,6 @@ struct aq_nic_s {
 	unsigned int aq_vecs;
 	unsigned int packet_filter;
 	unsigned int power_state;
-	bool is_ndev_registered;
 	u8 port;
 	struct aq_hw_ops aq_hw_ops;
 	struct aq_hw_caps_s aq_hw_caps;

+ 3 - 21
drivers/net/ethernet/aquantia/atlantic/aq_ring.c

@@ -104,25 +104,6 @@ int aq_ring_init(struct aq_ring_s *self)
 	return 0;
 }
 
-void aq_ring_tx_append_buffs(struct aq_ring_s *self,
-			     struct aq_ring_buff_s *buffer,
-			     unsigned int buffers)
-{
-	if (likely(self->sw_tail + buffers < self->size)) {
-		memcpy(&self->buff_ring[self->sw_tail], buffer,
-		       sizeof(buffer[0]) * buffers);
-	} else {
-		unsigned int first_part = self->size - self->sw_tail;
-		unsigned int second_part = buffers - first_part;
-
-		memcpy(&self->buff_ring[self->sw_tail], buffer,
-		       sizeof(buffer[0]) * first_part);
-
-		memcpy(&self->buff_ring[0], &buffer[first_part],
-		       sizeof(buffer[0]) * second_part);
-	}
-}
-
 void aq_ring_tx_clean(struct aq_ring_s *self)
 {
 	struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -209,7 +190,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
 				goto err_exit;
 			}
 
-			skb->dev = ndev;
 			skb_put(skb, buff->len);
 		} else {
 			skb = netdev_alloc_skb(ndev, ETH_HLEN);
@@ -271,6 +251,8 @@ err_exit:
 
 int aq_ring_rx_fill(struct aq_ring_s *self)
 {
+	unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
+		(AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
 	struct aq_ring_buff_s *buff = NULL;
 	int err = 0;
 	int i = 0;
@@ -283,7 +265,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
 		buff->len = AQ_CFG_RX_FRAME_MAX;
 
 		buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
-					 __GFP_COMP, 0);
+					 __GFP_COMP, pages_order);
 		if (!buff->page) {
 			err = -ENOMEM;
 			goto err_exit;

+ 0 - 3
drivers/net/ethernet/aquantia/atlantic/aq_ring.h

@@ -146,9 +146,6 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
 int aq_ring_init(struct aq_ring_s *self);
 void aq_ring_rx_deinit(struct aq_ring_s *self);
 void aq_ring_free(struct aq_ring_s *self);
-void aq_ring_tx_append_buffs(struct aq_ring_s *ring,
-			     struct aq_ring_buff_s *buffer,
-			     unsigned int buffers);
 void aq_ring_tx_clean(struct aq_ring_s *self);
 int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget);
 int aq_ring_rx_fill(struct aq_ring_s *self);

+ 0 - 1
drivers/net/ethernet/aquantia/atlantic/aq_utils.h

@@ -19,7 +19,6 @@
 struct aq_obj_s {
 	spinlock_t lock; /* spinlock for nic/rings processing */
 	atomic_t flags;
-	atomic_t busy_count;
 };
 
 static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)

+ 2 - 2
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c

@@ -659,8 +659,8 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
 			}
 
 			if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
-				buff->len = (rxd_wb->pkt_len &
-						(AQ_CFG_RX_FRAME_MAX - 1U));
+				buff->len = rxd_wb->pkt_len %
+					AQ_CFG_RX_FRAME_MAX;
 				buff->len = buff->len ?
 					buff->len : AQ_CFG_RX_FRAME_MAX;
 				buff->next = 0U;

+ 2 - 2
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c

@@ -673,8 +673,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 			}
 
 			if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
-				buff->len = (rxd_wb->pkt_len &
-						(AQ_CFG_RX_FRAME_MAX - 1U));
+				buff->len = rxd_wb->pkt_len %
+					AQ_CFG_RX_FRAME_MAX;
 				buff->len = buff->len ?
 					buff->len : AQ_CFG_RX_FRAME_MAX;
 				buff->next = 0U;