|
@@ -3720,6 +3720,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
|
|
|
struct igb_ring *ring)
|
|
|
{
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
+ union e1000_adv_rx_desc *rx_desc;
|
|
|
u64 rdba = ring->dma;
|
|
|
int reg_idx = ring->reg_idx;
|
|
|
u32 srrctl = 0, rxdctl = 0;
|
|
@@ -3758,6 +3759,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
|
|
|
rxdctl |= IGB_RX_HTHRESH << 8;
|
|
|
rxdctl |= IGB_RX_WTHRESH << 16;
|
|
|
|
|
|
+ /* initialize Rx descriptor 0 */
|
|
|
+ rx_desc = IGB_RX_DESC(ring, 0);
|
|
|
+ rx_desc->wb.upper.length = 0;
|
|
|
+
|
|
|
/* enable receive descriptor fetching */
|
|
|
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
|
|
|
wr32(E1000_RXDCTL(reg_idx), rxdctl);
|
|
@@ -3973,9 +3978,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
|
|
|
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
|
|
|
memset(rx_ring->rx_buffer_info, 0, size);
|
|
|
|
|
|
- /* Zero out the descriptor ring */
|
|
|
- memset(rx_ring->desc, 0, rx_ring->size);
|
|
|
-
|
|
|
rx_ring->next_to_alloc = 0;
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
rx_ring->next_to_use = 0;
|
|
@@ -7172,7 +7174,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
|
|
|
|
|
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
|
|
|
|
|
|
- if (!rx_desc->wb.upper.status_error)
|
|
|
+ if (!rx_desc->wb.upper.length)
|
|
|
break;
|
|
|
|
|
|
/* This memory barrier is needed to keep us from reading
|
|
@@ -7312,8 +7314,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
|
|
|
i -= rx_ring->count;
|
|
|
}
|
|
|
|
|
|
- /* clear the status bits for the next_to_use descriptor */
|
|
|
- rx_desc->wb.upper.status_error = 0;
|
|
|
+ /* clear the length for the next_to_use descriptor */
|
|
|
+ rx_desc->wb.upper.length = 0;
|
|
|
|
|
|
cleaned_count--;
|
|
|
} while (cleaned_count);
|