|
@@ -1416,22 +1416,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
|
|
|
|
-{
|
|
|
|
- rx_ring->next_to_use = val;
|
|
|
|
-
|
|
|
|
- /* update next to alloc since we have filled the ring */
|
|
|
|
- rx_ring->next_to_alloc = val;
|
|
|
|
- /*
|
|
|
|
- * Force memory writes to complete before letting h/w
|
|
|
|
- * know there are new descriptors to fetch. (Only
|
|
|
|
- * applicable for weak-ordered memory model archs,
|
|
|
|
- * such as IA-64).
|
|
|
|
- */
|
|
|
|
- wmb();
|
|
|
|
- ixgbe_write_tail(rx_ring, val);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|
struct ixgbe_rx_buffer *bi)
|
|
struct ixgbe_rx_buffer *bi)
|
|
{
|
|
{
|
|
@@ -1517,8 +1501,20 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|
|
|
|
|
i += rx_ring->count;
|
|
i += rx_ring->count;
|
|
|
|
|
|
- if (rx_ring->next_to_use != i)
|
|
|
|
- ixgbe_release_rx_desc(rx_ring, i);
|
|
|
|
|
|
+ if (rx_ring->next_to_use != i) {
|
|
|
|
+ rx_ring->next_to_use = i;
|
|
|
|
+
|
|
|
|
+ /* update next to alloc since we have filled the ring */
|
|
|
|
+ rx_ring->next_to_alloc = i;
|
|
|
|
+
|
|
|
|
+ /* Force memory writes to complete before letting h/w
|
|
|
|
+ * know there are new descriptors to fetch. (Only
|
|
|
|
+ * applicable for weak-ordered memory model archs,
|
|
|
|
+ * such as IA-64).
|
|
|
|
+ */
|
|
|
|
+ wmb();
|
|
|
|
+ writel(i, rx_ring->tail);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
|
|
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
|
|
@@ -6954,8 +6950,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
|
|
|
|
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
|
|
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
|
|
- /* notify HW of packet */
|
|
|
|
- ixgbe_write_tail(tx_ring, i);
|
|
|
|
|
|
+ writel(i, tx_ring->tail);
|
|
|
|
+
|
|
|
|
+ /* we need this if more than one processor can write to our tail
|
|
|
|
+ * at a time, it synchronizes IO on IA64/Altix systems
|
|
|
|
+ */
|
|
|
|
+ mmiowb();
|
|
}
|
|
}
|
|
|
|
|
|
return;
|
|
return;
|