Bladeren bron

sungem, sunhme, sunvnet: Update drivers to use dma_wmb/rmb

This patch goes through and replaces wmb/rmb with dma_wmb/dma_rmb in cases
where the barrier is being used to order writes or reads to just memory and
doesn't involve any programmed I/O.

Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Alexander Duyck 10 jaren geleden
bovenliggende
commit
b4468cc6f2
3 gewijzigde bestanden met toevoegingen van 18 en 18 verwijderingen
  1. 7 7
      drivers/net/ethernet/sun/sungem.c
  2. 8 8
      drivers/net/ethernet/sun/sunhme.c
  3. 3 3
      drivers/net/ethernet/sun/sunvnet.c

+ 7 - 7
drivers/net/ethernet/sun/sungem.c

@@ -718,7 +718,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
 	cluster_start = curr = (gp->rx_new & ~(4 - 1));
 	cluster_start = curr = (gp->rx_new & ~(4 - 1));
 	count = 0;
 	count = 0;
 	kick = -1;
 	kick = -1;
-	wmb();
+	dma_wmb();
 	while (curr != limit) {
 	while (curr != limit) {
 		curr = NEXT_RX(curr);
 		curr = NEXT_RX(curr);
 		if (++count == 4) {
 		if (++count == 4) {
@@ -1038,7 +1038,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
 		if (gem_intme(entry))
 		if (gem_intme(entry))
 			ctrl |= TXDCTRL_INTME;
 			ctrl |= TXDCTRL_INTME;
 		txd->buffer = cpu_to_le64(mapping);
 		txd->buffer = cpu_to_le64(mapping);
-		wmb();
+		dma_wmb();
 		txd->control_word = cpu_to_le64(ctrl);
 		txd->control_word = cpu_to_le64(ctrl);
 		entry = NEXT_TX(entry);
 		entry = NEXT_TX(entry);
 	} else {
 	} else {
@@ -1076,7 +1076,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
 
 
 			txd = &gp->init_block->txd[entry];
 			txd = &gp->init_block->txd[entry];
 			txd->buffer = cpu_to_le64(mapping);
 			txd->buffer = cpu_to_le64(mapping);
-			wmb();
+			dma_wmb();
 			txd->control_word = cpu_to_le64(this_ctrl | len);
 			txd->control_word = cpu_to_le64(this_ctrl | len);
 
 
 			if (gem_intme(entry))
 			if (gem_intme(entry))
@@ -1086,7 +1086,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
 		}
 		}
 		txd = &gp->init_block->txd[first_entry];
 		txd = &gp->init_block->txd[first_entry];
 		txd->buffer = cpu_to_le64(first_mapping);
 		txd->buffer = cpu_to_le64(first_mapping);
-		wmb();
+		dma_wmb();
 		txd->control_word =
 		txd->control_word =
 			cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
 			cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
 	}
 	}
@@ -1585,7 +1585,7 @@ static void gem_clean_rings(struct gem *gp)
 			gp->rx_skbs[i] = NULL;
 			gp->rx_skbs[i] = NULL;
 		}
 		}
 		rxd->status_word = 0;
 		rxd->status_word = 0;
-		wmb();
+		dma_wmb();
 		rxd->buffer = 0;
 		rxd->buffer = 0;
 	}
 	}
 
 
@@ -1647,7 +1647,7 @@ static void gem_init_rings(struct gem *gp)
 					RX_BUF_ALLOC_SIZE(gp),
 					RX_BUF_ALLOC_SIZE(gp),
 					PCI_DMA_FROMDEVICE);
 					PCI_DMA_FROMDEVICE);
 		rxd->buffer = cpu_to_le64(dma_addr);
 		rxd->buffer = cpu_to_le64(dma_addr);
-		wmb();
+		dma_wmb();
 		rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
 		rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
 		skb_reserve(skb, RX_OFFSET);
 		skb_reserve(skb, RX_OFFSET);
 	}
 	}
@@ -1656,7 +1656,7 @@ static void gem_init_rings(struct gem *gp)
 		struct gem_txd *txd = &gb->txd[i];
 		struct gem_txd *txd = &gb->txd[i];
 
 
 		txd->control_word = 0;
 		txd->control_word = 0;
-		wmb();
+		dma_wmb();
 		txd->buffer = 0;
 		txd->buffer = 0;
 	}
 	}
 	wmb();
 	wmb();

+ 8 - 8
drivers/net/ethernet/sun/sunhme.c

@@ -196,14 +196,14 @@ static u32 sbus_hme_read32(void __iomem *reg)
 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 {
 {
 	rxd->rx_addr = (__force hme32)addr;
 	rxd->rx_addr = (__force hme32)addr;
-	wmb();
+	dma_wmb();
 	rxd->rx_flags = (__force hme32)flags;
 	rxd->rx_flags = (__force hme32)flags;
 }
 }
 
 
 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 {
 {
 	txd->tx_addr = (__force hme32)addr;
 	txd->tx_addr = (__force hme32)addr;
-	wmb();
+	dma_wmb();
 	txd->tx_flags = (__force hme32)flags;
 	txd->tx_flags = (__force hme32)flags;
 }
 }
 
 
@@ -225,14 +225,14 @@ static u32 pci_hme_read32(void __iomem *reg)
 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 {
 {
 	rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
 	rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
-	wmb();
+	dma_wmb();
 	rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
 	rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
 }
 }
 
 
 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 {
 {
 	txd->tx_addr = (__force hme32)cpu_to_le32(addr);
 	txd->tx_addr = (__force hme32)cpu_to_le32(addr);
-	wmb();
+	dma_wmb();
 	txd->tx_flags = (__force hme32)cpu_to_le32(flags);
 	txd->tx_flags = (__force hme32)cpu_to_le32(flags);
 }
 }
 
 
@@ -268,12 +268,12 @@ static u32 pci_hme_read_desc32(hme32 *p)
 	sbus_readl(__reg)
 	sbus_readl(__reg)
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 do {	(__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
 do {	(__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
-	wmb(); \
+	dma_wmb(); \
 	(__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
 	(__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
 } while(0)
 } while(0)
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 do {	(__txd)->tx_addr = (__force hme32)(u32)(__addr); \
 do {	(__txd)->tx_addr = (__force hme32)(u32)(__addr); \
-	wmb(); \
+	dma_wmb(); \
 	(__txd)->tx_flags = (__force hme32)(u32)(__flags); \
 	(__txd)->tx_flags = (__force hme32)(u32)(__flags); \
 } while(0)
 } while(0)
 #define hme_read_desc32(__hp, __p)	((__force u32)(hme32)*(__p))
 #define hme_read_desc32(__hp, __p)	((__force u32)(hme32)*(__p))
@@ -293,12 +293,12 @@ do {	(__txd)->tx_addr = (__force hme32)(u32)(__addr); \
 	readl(__reg)
 	readl(__reg)
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 do {	(__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
 do {	(__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
-	wmb(); \
+	dma_wmb(); \
 	(__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
 	(__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
 } while(0)
 } while(0)
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 do {	(__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
 do {	(__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
-	wmb(); \
+	dma_wmb(); \
 	(__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
 	(__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
 } while(0)
 } while(0)
 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)

+ 3 - 3
drivers/net/ethernet/sun/sunvnet.c

@@ -519,7 +519,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
 	if (desc->hdr.state != VIO_DESC_READY)
 	if (desc->hdr.state != VIO_DESC_READY)
 		return 1;
 		return 1;
 
 
-	rmb();
+	dma_rmb();
 
 
 	viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
 	viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
 	       desc->hdr.state, desc->hdr.ack,
 	       desc->hdr.state, desc->hdr.ack,
@@ -1380,7 +1380,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	/* This has to be a non-SMP write barrier because we are writing
 	/* This has to be a non-SMP write barrier because we are writing
 	 * to memory which is shared with the peer LDOM.
 	 * to memory which is shared with the peer LDOM.
 	 */
 	 */
-	wmb();
+	dma_wmb();
 
 
 	d->hdr.state = VIO_DESC_READY;
 	d->hdr.state = VIO_DESC_READY;
 
 
@@ -1395,7 +1395,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	 * is marked READY, but start_cons was false.
 	 * is marked READY, but start_cons was false.
 	 * If so, vnet_ack() should send out the missed "start" trigger.
 	 * If so, vnet_ack() should send out the missed "start" trigger.
 	 *
 	 *
-	 * Note that the wmb() above makes sure the cookies et al. are
+	 * Note that the dma_wmb() above makes sure the cookies et al. are
 	 * not globally visible before the VIO_DESC_READY, and that the
 	 * not globally visible before the VIO_DESC_READY, and that the
 	 * stores are ordered correctly by the compiler. The consumer will
 	 * stores are ordered correctly by the compiler. The consumer will
 	 * not proceed until the VIO_DESC_READY is visible assuring that
 	 * not proceed until the VIO_DESC_READY is visible assuring that