Przeglądaj źródła

Merge branch 'remove-__napi_complete_done'

Eric Dumazet says:

====================
net: get rid of __napi_complete()

This patch series removes __napi_complete() calls, in an effort
to make NAPI API simpler and generalize GRO and napi_complete_done()
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 lat temu
rodzic
commit
bd092ad146

+ 1 - 1
drivers/net/ethernet/aeroflex/greth.c

@@ -1008,7 +1008,7 @@ restart_txrx_poll:
 			spin_unlock_irqrestore(&greth->devlock, flags);
 			goto restart_txrx_poll;
 		} else {
-			__napi_complete(napi);
+			napi_complete_done(napi, work_done);
 			spin_unlock_irqrestore(&greth->devlock, flags);
 		}
 	}

+ 72 - 92
drivers/net/ethernet/amd/amd8111e.c

@@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
 	void __iomem *mmio = lp->mmio;
 	struct sk_buff *skb,*new_skb;
 	int min_pkt_len, status;
-	unsigned int intr0;
 	int num_rx_pkt = 0;
 	short pkt_len;
 #if AMD8111E_VLAN_TAG_USED
 	short vtag;
 #endif
-	int rx_pkt_limit = budget;
-	unsigned long flags;
 
-	if (rx_pkt_limit <= 0)
-		goto rx_not_empty;
+	while (num_rx_pkt < budget) {
+		status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+		if (status & OWN_BIT)
+			break;
 
-	do{
-		/* process receive packets until we use the quota.
-		 * If we own the next entry, it's a new packet. Send it up.
+		/* There is a tricky error noted by John Murphy,
+		 * <murf@perftech.com> to Russ Nelson: Even with
+		 * full-sized * buffers it's possible for a
+		 * jabber packet to use two buffers, with only
+		 * the last correctly noting the error.
 		 */
-		while(1) {
-			status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
-			if (status & OWN_BIT)
-				break;
-
-			/* There is a tricky error noted by John Murphy,
-			 * <murf@perftech.com> to Russ Nelson: Even with
-			 * full-sized * buffers it's possible for a
-			 * jabber packet to use two buffers, with only
-			 * the last correctly noting the error.
-			 */
-			if(status & ERR_BIT) {
-				/* resetting flags */
-				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-				goto err_next_pkt;
-			}
-			/* check for STP and ENP */
-			if(!((status & STP_BIT) && (status & ENP_BIT))){
-				/* resetting flags */
-				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-				goto err_next_pkt;
-			}
-			pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+		if (status & ERR_BIT) {
+			/* resetting flags */
+			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+			goto err_next_pkt;
+		}
+		/* check for STP and ENP */
+		if (!((status & STP_BIT) && (status & ENP_BIT))){
+			/* resetting flags */
+			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+			goto err_next_pkt;
+		}
+		pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
 
 #if AMD8111E_VLAN_TAG_USED
-			vtag = status & TT_MASK;
-			/*MAC will strip vlan tag*/
-			if (vtag != 0)
-				min_pkt_len =MIN_PKT_LEN - 4;
+		vtag = status & TT_MASK;
+		/* MAC will strip vlan tag */
+		if (vtag != 0)
+			min_pkt_len = MIN_PKT_LEN - 4;
 			else
 #endif
-				min_pkt_len =MIN_PKT_LEN;
+			min_pkt_len = MIN_PKT_LEN;
 
-			if (pkt_len < min_pkt_len) {
-				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-				lp->drv_rx_errors++;
-				goto err_next_pkt;
-			}
-			if(--rx_pkt_limit < 0)
-				goto rx_not_empty;
-			new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
-			if (!new_skb) {
-				/* if allocation fail,
-				 * ignore that pkt and go to next one
-				 */
-				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-				lp->drv_rx_errors++;
-				goto err_next_pkt;
-			}
+		if (pkt_len < min_pkt_len) {
+			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+			lp->drv_rx_errors++;
+			goto err_next_pkt;
+		}
+		new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
+		if (!new_skb) {
+			/* if allocation fail,
+			 * ignore that pkt and go to next one
+			 */
+			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+			lp->drv_rx_errors++;
+			goto err_next_pkt;
+		}
 
-			skb_reserve(new_skb, 2);
-			skb = lp->rx_skbuff[rx_index];
-			pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
-					 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
-			skb_put(skb, pkt_len);
-			lp->rx_skbuff[rx_index] = new_skb;
-			lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
-								   new_skb->data,
-								   lp->rx_buff_len-2,
-								   PCI_DMA_FROMDEVICE);
+		skb_reserve(new_skb, 2);
+		skb = lp->rx_skbuff[rx_index];
+		pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
+				 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+		skb_put(skb, pkt_len);
+		lp->rx_skbuff[rx_index] = new_skb;
+		lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+							   new_skb->data,
+							   lp->rx_buff_len-2,
+							   PCI_DMA_FROMDEVICE);
 
-			skb->protocol = eth_type_trans(skb, dev);
+		skb->protocol = eth_type_trans(skb, dev);
 
 #if AMD8111E_VLAN_TAG_USED
-			if (vtag == TT_VLAN_TAGGED){
-				u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
-				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-			}
-#endif
-			netif_receive_skb(skb);
-			/*COAL update rx coalescing parameters*/
-			lp->coal_conf.rx_packets++;
-			lp->coal_conf.rx_bytes += pkt_len;
-			num_rx_pkt++;
-
-		err_next_pkt:
-			lp->rx_ring[rx_index].buff_phy_addr
-				= cpu_to_le32(lp->rx_dma_addr[rx_index]);
-			lp->rx_ring[rx_index].buff_count =
-				cpu_to_le16(lp->rx_buff_len-2);
-			wmb();
-			lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
-			rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+		if (vtag == TT_VLAN_TAGGED){
+			u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 		}
-		/* Check the interrupt status register for more packets in the
-		 * mean time. Process them since we have not used up our quota.
-		 */
-		intr0 = readl(mmio + INT0);
-		/*Ack receive packets */
-		writel(intr0 & RINT0,mmio + INT0);
+#endif
+		napi_gro_receive(napi, skb);
+		/* COAL update rx coalescing parameters */
+		lp->coal_conf.rx_packets++;
+		lp->coal_conf.rx_bytes += pkt_len;
+		num_rx_pkt++;
+
+err_next_pkt:
+		lp->rx_ring[rx_index].buff_phy_addr
+			= cpu_to_le32(lp->rx_dma_addr[rx_index]);
+		lp->rx_ring[rx_index].buff_count =
+			cpu_to_le16(lp->rx_buff_len-2);
+		wmb();
+		lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+		rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+	}
 
-	} while(intr0 & RINT0);
+	if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
+		unsigned long flags;
 
-	if (rx_pkt_limit > 0) {
 		/* Receive descriptor is empty now */
 		spin_lock_irqsave(&lp->lock, flags);
-		__napi_complete(napi);
 		writel(VAL0|RINTEN0, mmio + INTEN0);
 		writel(VAL2 | RDMD0, mmio + CMD0);
 		spin_unlock_irqrestore(&lp->lock, flags);
 	}
 
-rx_not_empty:
 	return num_rx_pkt;
 }
 

+ 3 - 8
drivers/net/ethernet/amd/pcnet32.c

@@ -1350,13 +1350,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
 		pcnet32_restart(dev, CSR0_START);
 		netif_wake_queue(dev);
 	}
-	spin_unlock_irqrestore(&lp->lock, flags);
-
-	if (work_done < budget) {
-		spin_lock_irqsave(&lp->lock, flags);
-
-		__napi_complete(napi);
 
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
 		/* clear interrupt masks */
 		val = lp->a->read_csr(ioaddr, CSR3);
 		val &= 0x00ff;
@@ -1364,9 +1359,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
 
 		/* Set interrupt enable. */
 		lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
-
-		spin_unlock_irqrestore(&lp->lock, flags);
 	}
+
+	spin_unlock_irqrestore(&lp->lock, flags);
 	return work_done;
 }
 

+ 6 - 23
drivers/net/ethernet/cirrus/ep93xx_eth.c

@@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
 		pr_info("mdio write timed out\n");
 }
 
-static int ep93xx_rx(struct net_device *dev, int processed, int budget)
+static int ep93xx_rx(struct net_device *dev, int budget)
 {
 	struct ep93xx_priv *ep = netdev_priv(dev);
+	int processed = 0;
 
 	while (processed < budget) {
 		int entry;
@@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
 			skb_put(skb, length);
 			skb->protocol = eth_type_trans(skb, dev);
 
-			netif_receive_skb(skb);
+			napi_gro_receive(&ep->napi, skb);
 
 			dev->stats.rx_packets++;
 			dev->stats.rx_bytes += length;
@@ -310,35 +311,17 @@ err:
 	return processed;
 }
 
-static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
-{
-	struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
-	return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
-}
-
 static int ep93xx_poll(struct napi_struct *napi, int budget)
 {
 	struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
 	struct net_device *dev = ep->dev;
-	int rx = 0;
-
-poll_some_more:
-	rx = ep93xx_rx(dev, rx, budget);
-	if (rx < budget) {
-		int more = 0;
+	int rx;
 
+	rx = ep93xx_rx(dev, budget);
+	if (rx < budget && napi_complete_done(napi, rx)) {
 		spin_lock_irq(&ep->rx_lock);
-		__napi_complete(napi);
 		wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
-		if (ep93xx_have_more_rx(ep)) {
-			wrl(ep, REG_INTEN, REG_INTEN_TX);
-			wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
-			more = 1;
-		}
 		spin_unlock_irq(&ep->rx_lock);
-
-		if (more && napi_reschedule(napi))
-			goto poll_some_more;
 	}
 
 	if (rx) {

+ 9 - 9
drivers/net/ethernet/ibm/emac/mal.c

@@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget)
 		int n;
 		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
 			continue;
-		n = mc->ops->poll_rx(mc->dev, budget);
+		n = mc->ops->poll_rx(mc->dev, budget - received);
 		if (n) {
 			received += n;
-			budget -= n;
-			if (budget <= 0)
-				goto more_work; // XXX What if this is the last one ?
+			if (received >= budget)
+				return budget;
 		}
 	}
 
-	/* We need to disable IRQs to protect from RXDE IRQ here */
-	spin_lock_irqsave(&mal->lock, flags);
-	__napi_complete(napi);
-	mal_enable_eob_irq(mal);
-	spin_unlock_irqrestore(&mal->lock, flags);
+	if (napi_complete_done(napi, received)) {
+		/* We need to disable IRQs to protect from RXDE IRQ here */
+		spin_lock_irqsave(&mal->lock, flags);
+		mal_enable_eob_irq(mal);
+		spin_unlock_irqrestore(&mal->lock, flags);
+	}
 
 	/* Check for "rotting" packet(s) */
 	list_for_each(l, &mal->poll_list) {

+ 3 - 5
drivers/net/ethernet/marvell/skge.c

@@ -3201,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev)
 	}
 }
 
-static int skge_poll(struct napi_struct *napi, int to_do)
+static int skge_poll(struct napi_struct *napi, int budget)
 {
 	struct skge_port *skge = container_of(napi, struct skge_port, napi);
 	struct net_device *dev = skge->netdev;
@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
 
 	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
 
-	for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
+	for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
 		struct skge_rx_desc *rd = e->desc;
 		struct sk_buff *skb;
 		u32 control;
@@ -3236,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do)
 	wmb();
 	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
 
-	if (work_done < to_do) {
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
 		unsigned long flags;
 
-		napi_gro_flush(napi, false);
 		spin_lock_irqsave(&hw->hw_lock, flags);
-		__napi_complete(napi);
 		hw->intr_mask |= napimask[skge->port];
 		skge_write32(hw, B0_IMSK, hw->intr_mask);
 		skge_read32(hw, B0_IMSK);

+ 5 - 6
drivers/net/ethernet/micrel/ks8695net.c

@@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
 			/* Relinquish the SKB to the network layer */
 			skb_put(skb, pktlen);
 			skb->protocol = eth_type_trans(skb, ndev);
-			netif_receive_skb(skb);
+			napi_gro_receive(&ksp->napi, skb);
 
 			/* Record stats */
 			ndev->stats.rx_packets++;
@@ -561,18 +561,17 @@ rx_finished:
 static int ks8695_poll(struct napi_struct *napi, int budget)
 {
 	struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
-	unsigned long  work_done;
-
 	unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
 	unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+	int work_done;
 
 	work_done = ks8695_rx(ksp, budget);
 
-	if (work_done < budget) {
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
 		unsigned long flags;
+
 		spin_lock_irqsave(&ksp->rx_lock, flags);
-		__napi_complete(napi);
-		/*enable rx interrupt*/
+		/* enable rx interrupt */
 		writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
 		spin_unlock_irqrestore(&ksp->rx_lock, flags);
 	}

+ 14 - 17
drivers/net/ethernet/qlogic/qla3xxx.c

@@ -2025,7 +2025,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
 	skb_checksum_none_assert(skb);
 	skb->protocol = eth_type_trans(skb, qdev->ndev);
 
-	netif_receive_skb(skb);
+	napi_gro_receive(&qdev->napi, skb);
 	lrg_buf_cb2->skb = NULL;
 
 	if (qdev->device_id == QL3022_DEVICE_ID)
@@ -2095,7 +2095,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
 	}
 	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
 
-	netif_receive_skb(skb2);
+	napi_gro_receive(&qdev->napi, skb2);
 	ndev->stats.rx_packets++;
 	ndev->stats.rx_bytes += length;
 	lrg_buf_cb2->skb = NULL;
@@ -2105,8 +2105,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 }
 
-static int ql_tx_rx_clean(struct ql3_adapter *qdev,
-			  int *tx_cleaned, int *rx_cleaned, int work_to_do)
+static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
 {
 	struct net_rsp_iocb *net_rsp;
 	struct net_device *ndev = qdev->ndev;
@@ -2114,7 +2113,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 
 	/* While there are entries in the completion queue. */
 	while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
-		qdev->rsp_consumer_index) && (work_done < work_to_do)) {
+		qdev->rsp_consumer_index) && (work_done < budget)) {
 
 		net_rsp = qdev->rsp_current;
 		rmb();
@@ -2130,21 +2129,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 		case OPCODE_OB_MAC_IOCB_FN2:
 			ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
 					       net_rsp);
-			(*tx_cleaned)++;
 			break;
 
 		case OPCODE_IB_MAC_IOCB:
 		case OPCODE_IB_3032_MAC_IOCB:
 			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
 					       net_rsp);
-			(*rx_cleaned)++;
+			work_done++;
 			break;
 
 		case OPCODE_IB_IP_IOCB:
 		case OPCODE_IB_3032_IP_IOCB:
 			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
 						 net_rsp);
-			(*rx_cleaned)++;
+			work_done++;
 			break;
 		default: {
 			u32 *tmp = (u32 *)net_rsp;
@@ -2169,7 +2167,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 			qdev->rsp_current++;
 		}
 
-		work_done = *tx_cleaned + *rx_cleaned;
 	}
 
 	return work_done;
@@ -2178,25 +2175,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 static int ql_poll(struct napi_struct *napi, int budget)
 {
 	struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
-	int rx_cleaned = 0, tx_cleaned = 0;
-	unsigned long hw_flags;
 	struct ql3xxx_port_registers __iomem *port_regs =
 		qdev->mem_map_registers;
+	int work_done;
 
-	ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
+	work_done = ql_tx_rx_clean(qdev, budget);
 
-	if (tx_cleaned + rx_cleaned != budget) {
-		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-		__napi_complete(napi);
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&qdev->hw_lock, flags);
 		ql_update_small_bufq_prod_index(qdev);
 		ql_update_lrg_bufq_prod_index(qdev);
 		writel(qdev->rsp_consumer_index,
 			    &port_regs->CommonRegs.rspQConsumerIndex);
-		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+		spin_unlock_irqrestore(&qdev->hw_lock, flags);
 
 		ql_enable_interrupts(qdev);
 	}
-	return tx_cleaned + rx_cleaned;
+	return work_done;
 }
 
 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)

+ 2 - 9
drivers/net/ethernet/realtek/8139cp.c

@@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
 	struct cp_private *cp = container_of(napi, struct cp_private, napi);
 	struct net_device *dev = cp->dev;
 	unsigned int rx_tail = cp->rx_tail;
-	int rx;
+	int rx = 0;
 
-	rx = 0;
-rx_status_loop:
 	cpw16(IntrStatus, cp_rx_intr_mask);
 
 	while (rx < budget) {
@@ -556,15 +554,10 @@ rx_next:
 	/* if we did not reach work limit, then we're done with
 	 * this round of polling
 	 */
-	if (rx < budget) {
+	if (rx < budget && napi_complete_done(napi, rx)) {
 		unsigned long flags;
 
-		if (cpr16(IntrStatus) & cp_rx_intr_mask)
-			goto rx_status_loop;
-
-		napi_gro_flush(napi, false);
 		spin_lock_irqsave(&cp->lock, flags);
-		__napi_complete(napi);
 		cpw16_f(IntrMask, cp_intr_mask);
 		spin_unlock_irqrestore(&cp->lock, flags);
 	}

+ 2 - 6
drivers/net/ethernet/realtek/8139too.c

@@ -2135,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
 	if (likely(RTL_R16(IntrStatus) & RxAckBits))
 		work_done += rtl8139_rx(dev, tp, budget);
 
-	if (work_done < budget) {
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
 		unsigned long flags;
-		/*
-		 * Order is important since data can get interrupted
-		 * again when we think we are done.
-		 */
+
 		spin_lock_irqsave(&tp->lock, flags);
-		__napi_complete(napi);
 		RTL_W16_F(IntrMask, rtl8139_intr_mask);
 		spin_unlock_irqrestore(&tp->lock, flags);
 	}

+ 7 - 24
drivers/net/ethernet/smsc/epic100.c

@@ -264,7 +264,6 @@ struct epic_private {
 	spinlock_t lock;				/* Group with Tx control cache line. */
 	spinlock_t napi_lock;
 	struct napi_struct napi;
-	unsigned int reschedule_in_poll;
 	unsigned int cur_tx, dirty_tx;
 
 	unsigned int cur_rx, dirty_rx;
@@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	spin_lock_init(&ep->lock);
 	spin_lock_init(&ep->napi_lock);
-	ep->reschedule_in_poll = 0;
 
 	/* Bring the chip out of low-power mode. */
 	ew32(GENCTL, 0x4200);
@@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
 
 	handled = 1;
 
-	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
+	if (status & EpicNapiEvent) {
 		spin_lock(&ep->napi_lock);
 		if (napi_schedule_prep(&ep->napi)) {
 			epic_napi_irq_off(dev, ep);
 			__napi_schedule(&ep->napi);
-		} else
-			ep->reschedule_in_poll++;
+		}
 		spin_unlock(&ep->napi_lock);
 	}
 	status &= ~EpicNapiEvent;
@@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget)
 {
 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
 	struct net_device *dev = ep->mii.dev;
-	int work_done = 0;
 	void __iomem *ioaddr = ep->ioaddr;
-
-rx_action:
+	int work_done;
 
 	epic_tx(dev, ep);
 
-	work_done += epic_rx(dev, budget);
+	work_done = epic_rx(dev, budget);
 
 	epic_rx_err(dev, ep);
 
-	if (work_done < budget) {
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
 		unsigned long flags;
-		int more;
-
-		/* A bit baroque but it avoids a (space hungry) spin_unlock */
 
 		spin_lock_irqsave(&ep->napi_lock, flags);
 
-		more = ep->reschedule_in_poll;
-		if (!more) {
-			__napi_complete(napi);
-			ew32(INTSTAT, EpicNapiEvent);
-			epic_napi_irq_on(dev, ep);
-		} else
-			ep->reschedule_in_poll--;
-
+		ew32(INTSTAT, EpicNapiEvent);
+		epic_napi_irq_on(dev, ep);
 		spin_unlock_irqrestore(&ep->napi_lock, flags);
-
-		if (more)
-			goto rx_action;
 	}
 
 	return work_done;

+ 0 - 1
include/linux/netdevice.h

@@ -463,7 +463,6 @@ static inline bool napi_reschedule(struct napi_struct *napi)
 	return false;
 }
 
-bool __napi_complete(struct napi_struct *n);
 bool napi_complete_done(struct napi_struct *n, int work_done);
 /**
  *	napi_complete - NAPI processing complete

+ 3 - 21
net/core/dev.c

@@ -4883,23 +4883,6 @@ void __napi_schedule_irqoff(struct napi_struct *n)
 }
 EXPORT_SYMBOL(__napi_schedule_irqoff);
 
-bool __napi_complete(struct napi_struct *n)
-{
-	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
-
-	/* Some drivers call us directly, instead of calling
-	 * napi_complete_done().
-	 */
-	if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
-		return false;
-
-	list_del_init(&n->poll_list);
-	smp_mb__before_atomic();
-	clear_bit(NAPI_STATE_SCHED, &n->state);
-	return true;
-}
-EXPORT_SYMBOL(__napi_complete);
-
 bool napi_complete_done(struct napi_struct *n, int work_done)
 {
 	unsigned long flags;
@@ -4926,14 +4909,13 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
 		else
 			napi_gro_flush(n, false);
 	}
-	if (likely(list_empty(&n->poll_list))) {
-		WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
-	} else {
+	if (unlikely(!list_empty(&n->poll_list))) {
 		/* If n->poll_list is not empty, we need to mask irqs */
 		local_irq_save(flags);
-		__napi_complete(n);
+		list_del_init(&n->poll_list);
 		local_irq_restore(flags);
 	}
+	WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
 	return true;
 }
 EXPORT_SYMBOL(napi_complete_done);