|
@@ -165,7 +165,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
|
|
|
if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
|
|
|
lstatus |= BD_LFLAG(RXBD_WRAP);
|
|
|
|
|
|
- eieio();
|
|
|
+ gfar_wmb();
|
|
|
|
|
|
bdp->lstatus = lstatus;
|
|
|
}
|
|
@@ -2371,18 +2371,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
*/
|
|
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
|
|
|
|
|
- /* The powerpc-specific eieio() is used, as wmb() has too strong
|
|
|
- * semantics (it requires synchronization between cacheable and
|
|
|
- * uncacheable mappings, which eieio doesn't provide and which we
|
|
|
- * don't need), thus requiring a more expensive sync instruction. At
|
|
|
- * some point, the set of architecture-independent barrier functions
|
|
|
- * should be expanded to include weaker barriers.
|
|
|
- */
|
|
|
- eieio();
|
|
|
+ gfar_wmb();
|
|
|
|
|
|
txbdp_start->lstatus = lstatus;
|
|
|
|
|
|
- eieio(); /* force lstatus write before tx_skbuff */
|
|
|
+ gfar_wmb(); /* force lstatus write before tx_skbuff */
|
|
|
|
|
|
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
|
|
|
|