|
@@ -98,10 +98,11 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
|
|
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
|
|
|
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
|
|
|
|
|
|
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
|
|
|
- struct ixgbevf_ring *rx_ring,
|
|
|
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
|
|
|
u32 val)
|
|
|
{
|
|
|
+ rx_ring->next_to_use = val;
|
|
|
+
|
|
|
/*
|
|
|
* Force memory writes to complete before letting h/w
|
|
|
* know there are new descriptors to fetch. (Only
|
|
@@ -109,7 +110,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
|
|
|
* such as IA-64).
|
|
|
*/
|
|
|
wmb();
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
|
|
|
+ writel(val, rx_ring->tail);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -406,10 +407,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
|
|
|
}
|
|
|
|
|
|
no_buffers:
|
|
|
- if (rx_ring->next_to_use != i) {
|
|
|
- rx_ring->next_to_use = i;
|
|
|
- ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
|
|
|
- }
|
|
|
+ if (rx_ring->next_to_use != i)
|
|
|
+ ixgbevf_release_rx_desc(rx_ring, i);
|
|
|
}
|
|
|
|
|
|
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
|
|
@@ -1110,8 +1109,9 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
|
|
|
- adapter->tx_ring[i].head = IXGBE_VFTDH(j);
|
|
|
- adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
|
|
|
+ ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
|
|
|
+ ring->next_to_clean = 0;
|
|
|
+ ring->next_to_use = 0;
|
|
|
/* Disable Tx Head Writeback RO bit, since this hoses
|
|
|
* bookkeeping if things aren't delivered in order.
|
|
|
*/
|
|
@@ -1208,20 +1208,22 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
|
|
|
/* set_rx_buffer_len must be called before ring initialization */
|
|
|
ixgbevf_set_rx_buffer_len(adapter);
|
|
|
|
|
|
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
|
|
|
/* Setup the HW Rx Head and Tail Descriptor Pointers and
|
|
|
* the Base and Length of the Rx Descriptor Ring */
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- rdba = adapter->rx_ring[i].dma;
|
|
|
- j = adapter->rx_ring[i].reg_idx;
|
|
|
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
|
|
|
+ rdba = ring->dma;
|
|
|
+ j = ring->reg_idx;
|
|
|
+ rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
|
|
|
(rdba & DMA_BIT_MASK(32)));
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
|
|
|
- adapter->rx_ring[i].head = IXGBE_VFRDH(j);
|
|
|
- adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
|
|
|
+ ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
|
|
|
+ ring->next_to_clean = 0;
|
|
|
+ ring->next_to_use = 0;
|
|
|
|
|
|
ixgbevf_configure_srrctl(adapter, j);
|
|
|
}
|
|
@@ -1402,7 +1404,7 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
|
|
|
hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
|
|
|
rxr);
|
|
|
|
|
|
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
|
|
|
+ ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
|
|
|
(adapter->rx_ring[rxr].count - 1));
|
|
|
}
|
|
|
|
|
@@ -1680,14 +1682,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
|
|
|
|
|
|
/* Zero out the descriptor ring */
|
|
|
memset(rx_ring->desc, 0, rx_ring->size);
|
|
|
-
|
|
|
- rx_ring->next_to_clean = 0;
|
|
|
- rx_ring->next_to_use = 0;
|
|
|
-
|
|
|
- if (rx_ring->head)
|
|
|
- writel(0, adapter->hw.hw_addr + rx_ring->head);
|
|
|
- if (rx_ring->tail)
|
|
|
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1715,14 +1709,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
|
|
|
memset(tx_ring->tx_buffer_info, 0, size);
|
|
|
|
|
|
memset(tx_ring->desc, 0, tx_ring->size);
|
|
|
-
|
|
|
- tx_ring->next_to_use = 0;
|
|
|
- tx_ring->next_to_clean = 0;
|
|
|
-
|
|
|
- if (tx_ring->head)
|
|
|
- writel(0, adapter->hw.hw_addr + tx_ring->head);
|
|
|
- if (tx_ring->tail)
|
|
|
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2473,8 +2459,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
|
|
|
if (!tx_ring->desc)
|
|
|
goto err;
|
|
|
|
|
|
- tx_ring->next_to_use = 0;
|
|
|
- tx_ring->next_to_clean = 0;
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
@@ -2542,9 +2526,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
|
|
|
goto alloc_failed;
|
|
|
}
|
|
|
|
|
|
- rx_ring->next_to_clean = 0;
|
|
|
- rx_ring->next_to_use = 0;
|
|
|
-
|
|
|
return 0;
|
|
|
alloc_failed:
|
|
|
return -ENOMEM;
|
|
@@ -3181,7 +3162,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
ixgbevf_tx_map(tx_ring, skb, tx_flags),
|
|
|
first, skb->len, hdr_len);
|
|
|
|
|
|
- writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
|
|
|
+ writel(tx_ring->next_to_use, tx_ring->tail);
|
|
|
|
|
|
ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
|
|