|
@@ -2879,23 +2879,18 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
|
|
**/
|
|
**/
|
|
static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
|
|
static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
|
|
{
|
|
{
|
|
- struct i40e_vsi *vsi = ring->vsi;
|
|
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
if (!ring->q_vector || !ring->netdev)
|
|
if (!ring->q_vector || !ring->netdev)
|
|
return;
|
|
return;
|
|
|
|
|
|
- if ((vsi->tc_config.numtc <= 1) &&
|
|
|
|
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) {
|
|
|
|
- cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
|
|
|
|
- netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
|
|
|
|
- ring->queue_index);
|
|
|
|
- }
|
|
|
|
|
|
+ /* We only initialize XPS once, so as not to overwrite user settings */
|
|
|
|
+ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
|
|
|
|
+ return;
|
|
|
|
|
|
- /* schedule our worker thread which will take care of
|
|
|
|
- * applying the new filter changes
|
|
|
|
- */
|
|
|
|
- i40e_service_event_schedule(vsi->back);
|
|
|
|
|
|
+ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
|
|
|
|
+ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
|
|
|
|
+ ring->queue_index);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -3030,7 +3025,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
|
if (hw->revision_id == 0)
|
|
if (hw->revision_id == 0)
|
|
rx_ctx.lrxqthresh = 0;
|
|
rx_ctx.lrxqthresh = 0;
|
|
else
|
|
else
|
|
- rx_ctx.lrxqthresh = 2;
|
|
|
|
|
|
+ rx_ctx.lrxqthresh = 1;
|
|
rx_ctx.crcstrip = 1;
|
|
rx_ctx.crcstrip = 1;
|
|
rx_ctx.l2tsel = 1;
|
|
rx_ctx.l2tsel = 1;
|
|
/* this controls whether VLAN is stripped from inner headers */
|
|
/* this controls whether VLAN is stripped from inner headers */
|
|
@@ -3403,15 +3398,14 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
|
|
/**
|
|
/**
|
|
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
|
|
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
|
|
* @pf: board private structure
|
|
* @pf: board private structure
|
|
- * @clearpba: true when all pending interrupt events should be cleared
|
|
|
|
**/
|
|
**/
|
|
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
|
|
|
|
|
|
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
|
|
{
|
|
{
|
|
struct i40e_hw *hw = &pf->hw;
|
|
struct i40e_hw *hw = &pf->hw;
|
|
u32 val;
|
|
u32 val;
|
|
|
|
|
|
val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
|
|
val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
|
|
- (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
|
|
|
|
|
|
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
|
|
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
|
|
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
|
|
|
|
|
|
wr32(hw, I40E_PFINT_DYN_CTL0, val);
|
|
wr32(hw, I40E_PFINT_DYN_CTL0, val);
|
|
@@ -3597,7 +3591,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
|
|
for (i = 0; i < vsi->num_q_vectors; i++)
|
|
for (i = 0; i < vsi->num_q_vectors; i++)
|
|
i40e_irq_dynamic_enable(vsi, i);
|
|
i40e_irq_dynamic_enable(vsi, i);
|
|
} else {
|
|
} else {
|
|
- i40e_irq_dynamic_enable_icr0(pf, true);
|
|
|
|
|
|
+ i40e_irq_dynamic_enable_icr0(pf);
|
|
}
|
|
}
|
|
|
|
|
|
i40e_flush(&pf->hw);
|
|
i40e_flush(&pf->hw);
|
|
@@ -3746,7 +3740,7 @@ enable_intr:
|
|
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
|
|
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
|
|
if (!test_bit(__I40E_DOWN, pf->state)) {
|
|
if (!test_bit(__I40E_DOWN, pf->state)) {
|
|
i40e_service_event_schedule(pf);
|
|
i40e_service_event_schedule(pf);
|
|
- i40e_irq_dynamic_enable_icr0(pf, false);
|
|
|
|
|
|
+ i40e_irq_dynamic_enable_icr0(pf);
|
|
}
|
|
}
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -7694,7 +7688,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
|
|
|
|
|
|
/**
|
|
/**
|
|
* i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
|
|
* i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
|
|
- * @type: VSI pointer
|
|
|
|
|
|
+ * @vsi: VSI pointer
|
|
* @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
|
|
* @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
|
|
*
|
|
*
|
|
* On error: returns error code (negative)
|
|
* On error: returns error code (negative)
|
|
@@ -8455,7 +8449,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
|
|
|
|
|
|
i40e_flush(hw);
|
|
i40e_flush(hw);
|
|
|
|
|
|
- i40e_irq_dynamic_enable_icr0(pf, true);
|
|
|
|
|
|
+ i40e_irq_dynamic_enable_icr0(pf);
|
|
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
@@ -8983,8 +8977,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
|
I40E_FLAG_MSIX_ENABLED;
|
|
I40E_FLAG_MSIX_ENABLED;
|
|
|
|
|
|
/* Set default ITR */
|
|
/* Set default ITR */
|
|
- pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
|
|
|
|
- pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
|
|
|
|
|
|
+ pf->rx_itr_default = I40E_ITR_RX_DEF;
|
|
|
|
+ pf->tx_itr_default = I40E_ITR_TX_DEF;
|
|
|
|
|
|
/* Depending on PF configurations, it is possible that the RSS
|
|
/* Depending on PF configurations, it is possible that the RSS
|
|
* maximum might end up larger than the available queues
|
|
* maximum might end up larger than the available queues
|