|
@@ -416,7 +416,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
|
|
|
* make sure we read the CQE after we read the
|
|
|
* ownership bit
|
|
|
*/
|
|
|
- rmb();
|
|
|
+ dma_rmb();
|
|
|
|
|
|
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
|
|
|
MLX4_CQE_OPCODE_ERROR)) {
|
|
@@ -667,7 +667,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
|
|
|
skb_frag_size(&shinfo->frags[0]));
|
|
|
}
|
|
|
|
|
|
- wmb();
|
|
|
+ dma_wmb();
|
|
|
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
|
|
|
}
|
|
|
}
|
|
@@ -804,7 +804,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
data->addr = cpu_to_be64(dma);
|
|
|
data->lkey = ring->mr_key;
|
|
|
- wmb();
|
|
|
+ dma_wmb();
|
|
|
data->byte_count = cpu_to_be32(byte_count);
|
|
|
--data;
|
|
|
}
|
|
@@ -821,7 +821,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
data->addr = cpu_to_be64(dma);
|
|
|
data->lkey = ring->mr_key;
|
|
|
- wmb();
|
|
|
+ dma_wmb();
|
|
|
data->byte_count = cpu_to_be32(byte_count);
|
|
|
}
|
|
|
/* tx completion can avoid cache line miss for common cases */
|
|
@@ -938,7 +938,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
/* Ensure new descriptor hits memory
|
|
|
* before setting ownership of this descriptor to HW
|
|
|
*/
|
|
|
- wmb();
|
|
|
+ dma_wmb();
|
|
|
tx_desc->ctrl.owner_opcode = op_own;
|
|
|
|
|
|
wmb();
|
|
@@ -958,7 +958,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
/* Ensure new descriptor hits memory
|
|
|
* before setting ownership of this descriptor to HW
|
|
|
*/
|
|
|
- wmb();
|
|
|
+ dma_wmb();
|
|
|
tx_desc->ctrl.owner_opcode = op_own;
|
|
|
if (send_doorbell) {
|
|
|
wmb();
|