|
@@ -667,8 +667,10 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
|
|
|
* i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
|
|
|
* @rx_ring: ring to place buffers on
|
|
|
* @cleaned_count: number of buffers to replace
|
|
|
+ *
|
|
|
+ * Returns true if any errors on allocation
|
|
|
**/
|
|
|
-void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
+bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
{
|
|
|
u16 i = rx_ring->next_to_use;
|
|
|
union i40e_rx_desc *rx_desc;
|
|
@@ -676,7 +678,7 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
|
|
|
/* do nothing if no valid netdev defined */
|
|
|
if (!rx_ring->netdev || !cleaned_count)
|
|
|
- return;
|
|
|
+ return false;
|
|
|
|
|
|
while (cleaned_count--) {
|
|
|
rx_desc = I40E_RX_DESC(rx_ring, i);
|
|
@@ -723,17 +725,29 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
i = 0;
|
|
|
}
|
|
|
|
|
|
+ if (rx_ring->next_to_use != i)
|
|
|
+ i40e_release_rx_desc(rx_ring, i);
|
|
|
+
|
|
|
+ return false;
|
|
|
+
|
|
|
no_buffers:
|
|
|
if (rx_ring->next_to_use != i)
|
|
|
i40e_release_rx_desc(rx_ring, i);
|
|
|
+
|
|
|
+ /* make sure to come back via polling to try again after
|
|
|
+ * allocation failure
|
|
|
+ */
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
|
|
|
* @rx_ring: ring to place buffers on
|
|
|
* @cleaned_count: number of buffers to replace
|
|
|
+ *
|
|
|
+ * Returns true if any errors on allocation
|
|
|
**/
|
|
|
-void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
+bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
{
|
|
|
u16 i = rx_ring->next_to_use;
|
|
|
union i40e_rx_desc *rx_desc;
|
|
@@ -742,7 +756,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
|
|
|
/* do nothing if no valid netdev defined */
|
|
|
if (!rx_ring->netdev || !cleaned_count)
|
|
|
- return;
|
|
|
+ return false;
|
|
|
|
|
|
while (cleaned_count--) {
|
|
|
rx_desc = I40E_RX_DESC(rx_ring, i);
|
|
@@ -769,6 +783,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
|
|
|
rx_ring->rx_stats.alloc_buff_failed++;
|
|
|
bi->dma = 0;
|
|
|
+ dev_kfree_skb(bi->skb);
|
|
|
+ bi->skb = NULL;
|
|
|
goto no_buffers;
|
|
|
}
|
|
|
}
|
|
@@ -780,9 +796,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
|
|
|
i = 0;
|
|
|
}
|
|
|
|
|
|
+ if (rx_ring->next_to_use != i)
|
|
|
+ i40e_release_rx_desc(rx_ring, i);
|
|
|
+
|
|
|
+ return false;
|
|
|
+
|
|
|
no_buffers:
|
|
|
if (rx_ring->next_to_use != i)
|
|
|
i40e_release_rx_desc(rx_ring, i);
|
|
|
+
|
|
|
+ /* make sure to come back via polling to try again after
|
|
|
+ * allocation failure
|
|
|
+ */
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -965,7 +991,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
|
|
|
*
|
|
|
* Returns true if there's any budget left (e.g. the clean is finished)
|
|
|
**/
|
|
|
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|
|
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
|
|
|
{
|
|
|
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
|
|
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
|
|
@@ -975,6 +1001,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|
|
u16 i = rx_ring->next_to_clean;
|
|
|
union i40e_rx_desc *rx_desc;
|
|
|
u32 rx_error, rx_status;
|
|
|
+ bool failure = false;
|
|
|
u8 rx_ptype;
|
|
|
u64 qword;
|
|
|
|
|
@@ -984,7 +1011,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|
|
u16 vlan_tag;
|
|
|
/* return some buffers to hardware, one at a time is too slow */
|
|
|
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
|
|
|
- i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count);
|
|
|
+ failure = failure ||
|
|
|
+ i40evf_alloc_rx_buffers_ps(rx_ring,
|
|
|
+ cleaned_count);
|
|
|
cleaned_count = 0;
|
|
|
}
|
|
|
|
|
@@ -1009,6 +1038,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|
|
rx_ring->rx_hdr_len);
|
|
|
if (!skb) {
|
|
|
rx_ring->rx_stats.alloc_buff_failed++;
|
|
|
+ failure = true;
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -1131,7 +1161,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|
|
rx_ring->q_vector->rx.total_packets += total_rx_packets;
|
|
|
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
|
|
|
|
|
|
- return total_rx_packets;
|
|
|
+ return failure ? budget : total_rx_packets;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1149,6 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|
|
union i40e_rx_desc *rx_desc;
|
|
|
u32 rx_error, rx_status;
|
|
|
u16 rx_packet_len;
|
|
|
+ bool failure = false;
|
|
|
u8 rx_ptype;
|
|
|
u64 qword;
|
|
|
u16 i;
|
|
@@ -1159,7 +1190,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|
|
u16 vlan_tag;
|
|
|
/* return some buffers to hardware, one at a time is too slow */
|
|
|
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
|
|
|
- i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
|
|
|
+ failure = failure ||
|
|
|
+ i40evf_alloc_rx_buffers_1buf(rx_ring,
|
|
|
+ cleaned_count);
|
|
|
cleaned_count = 0;
|
|
|
}
|
|
|
|
|
@@ -1240,7 +1273,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|
|
rx_ring->q_vector->rx.total_packets += total_rx_packets;
|
|
|
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
|
|
|
|
|
|
- return total_rx_packets;
|
|
|
+ return failure ? budget : total_rx_packets;
|
|
|
}
|
|
|
|
|
|
static u32 i40e_buildreg_itr(const int type, const u16 itr)
|