|
|
@@ -50,13 +50,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio
|
|
|
|
|
|
/* a bit of forward declarations */
|
|
|
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
|
|
|
-static void i40e_handle_reset_warning(struct i40e_pf *pf);
|
|
|
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
|
|
|
static int i40e_add_vsi(struct i40e_vsi *vsi);
|
|
|
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
|
|
|
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
|
|
|
static int i40e_setup_misc_vector(struct i40e_pf *pf);
|
|
|
static void i40e_determine_queue_usage(struct i40e_pf *pf);
|
|
|
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
|
|
|
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
|
|
|
+static int i40e_reset(struct i40e_pf *pf);
|
|
|
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
|
|
|
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
|
|
|
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
|
|
|
|
|
|
@@ -734,7 +737,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
|
|
struct i40e_eth_stats *oes;
|
|
|
struct i40e_eth_stats *es; /* device's eth stats */
|
|
|
u32 tx_restart, tx_busy;
|
|
|
- u64 tx_lost_interrupt;
|
|
|
struct i40e_ring *p;
|
|
|
u32 rx_page, rx_buf;
|
|
|
u64 bytes, packets;
|
|
|
@@ -760,7 +762,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
|
|
rx_b = rx_p = 0;
|
|
|
tx_b = tx_p = 0;
|
|
|
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
|
|
|
- tx_lost_interrupt = 0;
|
|
|
rx_page = 0;
|
|
|
rx_buf = 0;
|
|
|
rcu_read_lock();
|
|
|
@@ -779,7 +780,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
|
|
tx_busy += p->tx_stats.tx_busy;
|
|
|
tx_linearize += p->tx_stats.tx_linearize;
|
|
|
tx_force_wb += p->tx_stats.tx_force_wb;
|
|
|
- tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
|
|
|
|
|
|
/* Rx queue is part of the same block as Tx queue */
|
|
|
p = &p[1];
|
|
|
@@ -798,7 +798,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
|
|
vsi->tx_busy = tx_busy;
|
|
|
vsi->tx_linearize = tx_linearize;
|
|
|
vsi->tx_force_wb = tx_force_wb;
|
|
|
- vsi->tx_lost_interrupt = tx_lost_interrupt;
|
|
|
vsi->rx_page_failed = rx_page;
|
|
|
vsi->rx_buf_failed = rx_buf;
|
|
|
|
|
|
@@ -3039,6 +3038,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
+ /* configure Rx buffer alignment */
|
|
|
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
|
|
|
+ clear_ring_build_skb_enabled(ring);
|
|
|
+ else
|
|
|
+ set_ring_build_skb_enabled(ring);
|
|
|
+
|
|
|
/* cache tail for quicker writes, and clear the reg before use */
|
|
|
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
|
|
|
writel(0, ring->tail);
|
|
|
@@ -3080,13 +3085,15 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
|
|
|
vsi->max_frame = I40E_MAX_RXBUFFER;
|
|
|
vsi->rx_buf_len = I40E_RXBUFFER_2048;
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
- } else if (vsi->netdev->mtu <= ETH_DATA_LEN) {
|
|
|
+ } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
|
|
|
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
|
|
|
vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
|
|
|
vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
|
|
|
#endif
|
|
|
} else {
|
|
|
vsi->max_frame = I40E_MAX_RXBUFFER;
|
|
|
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
|
|
|
+ vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
|
|
|
+ I40E_RXBUFFER_2048;
|
|
|
}
|
|
|
|
|
|
/* set up individual rings */
|
|
|
@@ -4441,7 +4448,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
|
|
|
* i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
|
|
|
* @vsi: the VSI being configured
|
|
|
*
|
|
|
- * This function waits for the given VSI's queues to be disabled.
|
|
|
+ * Wait until all queues on a given VSI have been disabled.
|
|
|
**/
|
|
|
static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
|
|
|
{
|
|
|
@@ -4450,7 +4457,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
|
|
|
|
|
|
pf_q = vsi->base_queue;
|
|
|
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
|
|
|
- /* Check and wait for the disable status of the queue */
|
|
|
+ /* Check and wait for the Tx queue */
|
|
|
ret = i40e_pf_txq_wait(pf, pf_q, false);
|
|
|
if (ret) {
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
@@ -4458,11 +4465,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
|
|
|
vsi->seid, pf_q);
|
|
|
return ret;
|
|
|
}
|
|
|
- }
|
|
|
-
|
|
|
- pf_q = vsi->base_queue;
|
|
|
- for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
|
|
|
- /* Check and wait for the disable status of the queue */
|
|
|
+ /* Check and wait for the Tx queue */
|
|
|
ret = i40e_pf_rxq_wait(pf, pf_q, false);
|
|
|
if (ret) {
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
@@ -4505,16 +4508,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
|
|
|
* @vsi: Pointer to VSI struct
|
|
|
*
|
|
|
* This function checks specified queue for given VSI. Detects hung condition.
|
|
|
- * Sets hung bit since it is two step process. Before next run of service task
|
|
|
- * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
|
|
|
- * hung condition remain unchanged and during subsequent run, this function
|
|
|
- * issues SW interrupt to recover from hung condition.
|
|
|
+ * We proactively detect hung TX queues by checking if interrupts are disabled
|
|
|
+ * but there are pending descriptors. If it appears hung, attempt to recover
|
|
|
+ * by triggering a SW interrupt.
|
|
|
**/
|
|
|
static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
|
|
|
{
|
|
|
struct i40e_ring *tx_ring = NULL;
|
|
|
struct i40e_pf *pf;
|
|
|
- u32 head, val, tx_pending_hw;
|
|
|
+ u32 val, tx_pending;
|
|
|
int i;
|
|
|
|
|
|
pf = vsi->back;
|
|
|
@@ -4540,47 +4542,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
|
|
|
else
|
|
|
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
|
|
|
|
|
|
- head = i40e_get_head(tx_ring);
|
|
|
-
|
|
|
- tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
|
|
|
-
|
|
|
- /* HW is done executing descriptors, updated HEAD write back,
|
|
|
- * but SW hasn't processed those descriptors. If interrupt is
|
|
|
- * not generated from this point ON, it could result into
|
|
|
- * dev_watchdog detecting timeout on those netdev_queue,
|
|
|
- * hence proactively trigger SW interrupt.
|
|
|
- */
|
|
|
- if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
|
|
|
- /* NAPI Poll didn't run and clear since it was set */
|
|
|
- if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
|
|
|
- &tx_ring->q_vector->hung_detected)) {
|
|
|
- netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
|
|
|
- vsi->seid, q_idx, tx_pending_hw,
|
|
|
- tx_ring->next_to_clean, head,
|
|
|
- tx_ring->next_to_use,
|
|
|
- readl(tx_ring->tail));
|
|
|
- netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
|
|
|
- vsi->seid, q_idx, val);
|
|
|
- i40e_force_wb(vsi, tx_ring->q_vector);
|
|
|
- } else {
|
|
|
- /* First Chance - detected possible hung */
|
|
|
- set_bit(I40E_Q_VECTOR_HUNG_DETECT,
|
|
|
- &tx_ring->q_vector->hung_detected);
|
|
|
- }
|
|
|
- }
|
|
|
+ tx_pending = i40e_get_tx_pending(tx_ring);
|
|
|
|
|
|
- /* This is the case where we have interrupts missing,
|
|
|
- * so the tx_pending in HW will most likely be 0, but we
|
|
|
- * will have tx_pending in SW since the WB happened but the
|
|
|
- * interrupt got lost.
|
|
|
+ /* Interrupts are disabled and TX pending is non-zero,
|
|
|
+ * trigger the SW interrupt (don't wait). Worst case
|
|
|
+ * there will be one extra interrupt which may result
|
|
|
+ * into not cleaning any queues because queues are cleaned.
|
|
|
*/
|
|
|
- if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
|
|
|
- (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
|
|
|
- local_bh_disable();
|
|
|
- if (napi_reschedule(&tx_ring->q_vector->napi))
|
|
|
- tx_ring->tx_stats.tx_lost_interrupt++;
|
|
|
- local_bh_enable();
|
|
|
- }
|
|
|
+ if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
|
|
|
+ i40e_force_wb(vsi, tx_ring->q_vector);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -5537,6 +5507,8 @@ int i40e_open(struct net_device *netdev)
|
|
|
* Finish initialization of the VSI.
|
|
|
*
|
|
|
* Returns 0 on success, negative value on failure
|
|
|
+ *
|
|
|
+ * Note: expects to be called while under rtnl_lock()
|
|
|
**/
|
|
|
int i40e_vsi_open(struct i40e_vsi *vsi)
|
|
|
{
|
|
|
@@ -5600,7 +5572,7 @@ err_setup_rx:
|
|
|
err_setup_tx:
|
|
|
i40e_vsi_free_tx_resources(vsi);
|
|
|
if (vsi == pf->vsi[pf->lan_vsi])
|
|
|
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
|
|
|
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
|
|
|
|
|
|
return err;
|
|
|
}
|
|
|
@@ -5686,12 +5658,14 @@ int i40e_close(struct net_device *netdev)
|
|
|
* i40e_do_reset - Start a PF or Core Reset sequence
|
|
|
* @pf: board private structure
|
|
|
* @reset_flags: which reset is requested
|
|
|
+ * @lock_acquired: indicates whether or not the lock has been acquired
|
|
|
+ * before this function was called.
|
|
|
*
|
|
|
* The essential difference in resets is that the PF Reset
|
|
|
* doesn't clear the packet buffers, doesn't reset the PE
|
|
|
* firmware, and doesn't bother the other PFs on the chip.
|
|
|
**/
|
|
|
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
|
|
|
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
|
|
|
{
|
|
|
u32 val;
|
|
|
|
|
|
@@ -5737,7 +5711,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
|
|
|
* for the Core Reset.
|
|
|
*/
|
|
|
dev_dbg(&pf->pdev->dev, "PFR requested\n");
|
|
|
- i40e_handle_reset_warning(pf);
|
|
|
+ i40e_handle_reset_warning(pf, lock_acquired);
|
|
|
|
|
|
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
|
|
|
int v;
|
|
|
@@ -5946,7 +5920,7 @@ exit:
|
|
|
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
|
|
|
{
|
|
|
rtnl_lock();
|
|
|
- i40e_do_reset(pf, reset_flags);
|
|
|
+ i40e_do_reset(pf, reset_flags, true);
|
|
|
rtnl_unlock();
|
|
|
}
|
|
|
|
|
|
@@ -6348,7 +6322,6 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
|
|
|
{
|
|
|
u32 reset_flags = 0;
|
|
|
|
|
|
- rtnl_lock();
|
|
|
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
|
|
|
reset_flags |= BIT(__I40E_REINIT_REQUESTED);
|
|
|
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
|
|
|
@@ -6374,18 +6347,19 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
|
|
|
* precedence before starting a new reset sequence.
|
|
|
*/
|
|
|
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
|
|
|
- i40e_handle_reset_warning(pf);
|
|
|
- goto unlock;
|
|
|
+ i40e_prep_for_reset(pf, false);
|
|
|
+ i40e_reset(pf);
|
|
|
+ i40e_rebuild(pf, false, false);
|
|
|
}
|
|
|
|
|
|
/* If we're already down or resetting, just bail */
|
|
|
if (reset_flags &&
|
|
|
!test_bit(__I40E_DOWN, &pf->state) &&
|
|
|
- !test_bit(__I40E_CONFIG_BUSY, &pf->state))
|
|
|
- i40e_do_reset(pf, reset_flags);
|
|
|
-
|
|
|
-unlock:
|
|
|
- rtnl_unlock();
|
|
|
+ !test_bit(__I40E_CONFIG_BUSY, &pf->state)) {
|
|
|
+ rtnl_lock();
|
|
|
+ i40e_do_reset(pf, reset_flags, true);
|
|
|
+ rtnl_unlock();
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -6873,10 +6847,12 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
|
|
|
/**
|
|
|
* i40e_prep_for_reset - prep for the core to reset
|
|
|
* @pf: board private structure
|
|
|
+ * @lock_acquired: indicates whether or not the lock has been acquired
|
|
|
+ * before this function was called.
|
|
|
*
|
|
|
* Close up the VFs and other things in prep for PF Reset.
|
|
|
**/
|
|
|
-static void i40e_prep_for_reset(struct i40e_pf *pf)
|
|
|
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
|
|
|
{
|
|
|
struct i40e_hw *hw = &pf->hw;
|
|
|
i40e_status ret = 0;
|
|
|
@@ -6891,7 +6867,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
|
|
|
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
|
|
|
|
|
|
/* quiesce the VSIs and their queues that are not already DOWN */
|
|
|
+ /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
|
|
|
+ if (!lock_acquired)
|
|
|
+ rtnl_lock();
|
|
|
i40e_pf_quiesce_all_vsi(pf);
|
|
|
+ if (!lock_acquired)
|
|
|
+ rtnl_unlock();
|
|
|
|
|
|
for (v = 0; v < pf->num_alloc_vsi; v++) {
|
|
|
if (pf->vsi[v])
|
|
|
@@ -6926,29 +6907,39 @@ static void i40e_send_version(struct i40e_pf *pf)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * i40e_reset_and_rebuild - reset and rebuild using a saved config
|
|
|
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
|
|
|
* @pf: board private structure
|
|
|
- * @reinit: if the Main VSI needs to re-initialized.
|
|
|
**/
|
|
|
-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
|
|
|
+static int i40e_reset(struct i40e_pf *pf)
|
|
|
{
|
|
|
struct i40e_hw *hw = &pf->hw;
|
|
|
- u8 set_fc_aq_fail = 0;
|
|
|
i40e_status ret;
|
|
|
- u32 val;
|
|
|
- u32 v;
|
|
|
|
|
|
- /* Now we wait for GRST to settle out.
|
|
|
- * We don't have to delete the VEBs or VSIs from the hw switch
|
|
|
- * because the reset will make them disappear.
|
|
|
- */
|
|
|
ret = i40e_pf_reset(hw);
|
|
|
if (ret) {
|
|
|
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
|
|
|
set_bit(__I40E_RESET_FAILED, &pf->state);
|
|
|
- goto clear_recovery;
|
|
|
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
|
|
|
+ } else {
|
|
|
+ pf->pfr_count++;
|
|
|
}
|
|
|
- pf->pfr_count++;
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i40e_rebuild - rebuild using a saved config
|
|
|
+ * @pf: board private structure
|
|
|
+ * @reinit: if the Main VSI needs to re-initialized.
|
|
|
+ * @lock_acquired: indicates whether or not the lock has been acquired
|
|
|
+ * before this function was called.
|
|
|
+ **/
|
|
|
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
|
|
|
+{
|
|
|
+ struct i40e_hw *hw = &pf->hw;
|
|
|
+ u8 set_fc_aq_fail = 0;
|
|
|
+ i40e_status ret;
|
|
|
+ u32 val;
|
|
|
+ int v;
|
|
|
|
|
|
if (test_bit(__I40E_DOWN, &pf->state))
|
|
|
goto clear_recovery;
|
|
|
@@ -6993,9 +6984,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
|
|
|
}
|
|
|
#endif /* CONFIG_I40E_DCB */
|
|
|
/* do basic switch setup */
|
|
|
+ if (!lock_acquired)
|
|
|
+ rtnl_lock();
|
|
|
ret = i40e_setup_pf_switch(pf, reinit);
|
|
|
if (ret)
|
|
|
- goto end_core_reset;
|
|
|
+ goto end_unlock;
|
|
|
|
|
|
/* The driver only wants link up/down and module qualification
|
|
|
* reports from firmware. Note the negative logic.
|
|
|
@@ -7066,7 +7059,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
|
|
|
if (ret) {
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
"rebuild of Main VSI failed: %d\n", ret);
|
|
|
- goto end_core_reset;
|
|
|
+ goto end_unlock;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -7117,23 +7110,48 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
|
|
|
/* tell the firmware that we're starting */
|
|
|
i40e_send_version(pf);
|
|
|
|
|
|
+end_unlock:
|
|
|
+if (!lock_acquired)
|
|
|
+ rtnl_unlock();
|
|
|
end_core_reset:
|
|
|
clear_bit(__I40E_RESET_FAILED, &pf->state);
|
|
|
clear_recovery:
|
|
|
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
|
|
|
+ * @pf: board private structure
|
|
|
+ * @reinit: if the Main VSI needs to re-initialized.
|
|
|
+ * @lock_acquired: indicates whether or not the lock has been acquired
|
|
|
+ * before this function was called.
|
|
|
+ **/
|
|
|
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
|
|
|
+ bool lock_acquired)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ /* Now we wait for GRST to settle out.
|
|
|
+ * We don't have to delete the VEBs or VSIs from the hw switch
|
|
|
+ * because the reset will make them disappear.
|
|
|
+ */
|
|
|
+ ret = i40e_reset(pf);
|
|
|
+ if (!ret)
|
|
|
+ i40e_rebuild(pf, reinit, lock_acquired);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
|
|
|
* @pf: board private structure
|
|
|
*
|
|
|
* Close up the VFs and other things in prep for a Core Reset,
|
|
|
* then get ready to rebuild the world.
|
|
|
+ * @lock_acquired: indicates whether or not the lock has been acquired
|
|
|
+ * before this function was called.
|
|
|
**/
|
|
|
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
|
|
|
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
|
|
|
{
|
|
|
- i40e_prep_for_reset(pf);
|
|
|
- i40e_reset_and_rebuild(pf, false);
|
|
|
+ i40e_prep_for_reset(pf, lock_acquired);
|
|
|
+ i40e_reset_and_rebuild(pf, false, lock_acquired);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -8430,6 +8448,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
|
|
|
*
|
|
|
* returns 0 if rss is not enabled, if enabled returns the final rss queue
|
|
|
* count which may be different from the requested queue count.
|
|
|
+ * Note: expects to be called while under rtnl_lock()
|
|
|
**/
|
|
|
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
|
|
|
{
|
|
|
@@ -8445,11 +8464,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
|
|
|
u16 qcount;
|
|
|
|
|
|
vsi->req_queue_pairs = queue_count;
|
|
|
- i40e_prep_for_reset(pf);
|
|
|
+ i40e_prep_for_reset(pf, true);
|
|
|
|
|
|
pf->alloc_rss_size = new_rss_size;
|
|
|
|
|
|
- i40e_reset_and_rebuild(pf, true);
|
|
|
+ i40e_reset_and_rebuild(pf, true, true);
|
|
|
|
|
|
/* Discard the user configured hash keys and lut, if less
|
|
|
* queues are enabled.
|
|
|
@@ -8825,6 +8844,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
|
|
|
* i40e_set_features - set the netdev feature flags
|
|
|
* @netdev: ptr to the netdev being adjusted
|
|
|
* @features: the feature set that the stack is suggesting
|
|
|
+ * Note: expects to be called while under rtnl_lock()
|
|
|
**/
|
|
|
static int i40e_set_features(struct net_device *netdev,
|
|
|
netdev_features_t features)
|
|
|
@@ -8848,7 +8868,7 @@ static int i40e_set_features(struct net_device *netdev,
|
|
|
need_reset = i40e_set_ntuple(pf, features);
|
|
|
|
|
|
if (need_reset)
|
|
|
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
|
|
|
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -9043,6 +9063,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
* is to change the mode then that requires a PF reset to
|
|
|
* allow rebuild of the components with required hardware
|
|
|
* bridge mode enabled.
|
|
|
+ *
|
|
|
+ * Note: expects to be called while under rtnl_lock()
|
|
|
**/
|
|
|
static int i40e_ndo_bridge_setlink(struct net_device *dev,
|
|
|
struct nlmsghdr *nlh,
|
|
|
@@ -9098,7 +9120,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
|
|
|
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
|
|
else
|
|
|
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
|
|
|
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
|
|
|
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
|
|
|
+ true);
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
@@ -9307,10 +9330,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
|
|
if (vsi->type == I40E_VSI_MAIN) {
|
|
|
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
|
|
|
ether_addr_copy(mac_addr, hw->mac.perm_addr);
|
|
|
- /* The following steps are necessary to properly keep track of
|
|
|
- * MAC-VLAN filters loaded into firmware - first we remove
|
|
|
- * filter that is automatically generated by firmware and then
|
|
|
- * add new filter both to the driver hash table and firmware.
|
|
|
+ /* The following steps are necessary for two reasons. First,
|
|
|
+ * some older NVM configurations load a default MAC-VLAN
|
|
|
+ * filter that will accept any tagged packet, and we want to
|
|
|
+ * replace this with a normal filter. Additionally, it is
|
|
|
+ * possible our MAC address was provided by the platform using
|
|
|
+ * Open Firmware or similar.
|
|
|
+ *
|
|
|
+ * Thus, we need to remove the default filter and install one
|
|
|
+ * specific to the MAC address.
|
|
|
*/
|
|
|
i40e_rm_default_mac_filter(vsi, mac_addr);
|
|
|
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
|
|
@@ -10815,20 +10843,18 @@ static void i40e_print_features(struct i40e_pf *pf)
|
|
|
|
|
|
/**
|
|
|
* i40e_get_platform_mac_addr - get platform-specific MAC address
|
|
|
- *
|
|
|
* @pdev: PCI device information struct
|
|
|
* @pf: board private structure
|
|
|
*
|
|
|
- * Look up the MAC address in Open Firmware on systems that support it,
|
|
|
- * and use IDPROM on SPARC if no OF address is found. On return, the
|
|
|
- * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
|
|
|
- * has been selected.
|
|
|
+ * Look up the MAC address for the device. First we'll try
|
|
|
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
|
|
|
+ * specific fallback. Otherwise, we'll default to the stored value in
|
|
|
+ * firmware.
|
|
|
**/
|
|
|
static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
|
|
|
{
|
|
|
- pf->flags &= ~I40E_FLAG_PF_MAC;
|
|
|
- if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
|
|
|
- pf->flags |= I40E_FLAG_PF_MAC;
|
|
|
+ if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
|
|
|
+ i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -11042,9 +11068,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
i40e_aq_stop_lldp(hw, true, NULL);
|
|
|
}
|
|
|
|
|
|
- i40e_get_mac_addr(hw, hw->mac.addr);
|
|
|
/* allow a platform config to override the HW addr */
|
|
|
i40e_get_platform_mac_addr(pdev, pf);
|
|
|
+
|
|
|
if (!is_valid_ether_addr(hw->mac.addr)) {
|
|
|
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
|
|
|
err = -EIO;
|
|
|
@@ -11073,7 +11099,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
|
|
INIT_WORK(&pf->service_task, i40e_service_task);
|
|
|
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
|
|
|
- pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
|
|
|
|
|
|
/* NVM bit on means WoL disabled for the port */
|
|
|
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
|
|
|
@@ -11245,10 +11270,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
round_jiffies(jiffies + pf->service_timer_period));
|
|
|
|
|
|
/* add this PF to client device list and launch a client service task */
|
|
|
- err = i40e_lan_add_device(pf);
|
|
|
- if (err)
|
|
|
- dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
|
|
|
- err);
|
|
|
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
|
|
|
+ err = i40e_lan_add_device(pf);
|
|
|
+ if (err)
|
|
|
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
|
|
|
+ err);
|
|
|
+ }
|
|
|
|
|
|
#define PCI_SPEED_SIZE 8
|
|
|
#define PCI_WIDTH_SIZE 8
|
|
|
@@ -11426,10 +11453,11 @@ static void i40e_remove(struct pci_dev *pdev)
|
|
|
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
|
|
|
|
|
|
/* remove attached clients */
|
|
|
- ret_code = i40e_lan_del_device(pf);
|
|
|
- if (ret_code) {
|
|
|
- dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
|
|
|
- ret_code);
|
|
|
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
|
|
|
+ ret_code = i40e_lan_del_device(pf);
|
|
|
+ if (ret_code)
|
|
|
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
|
|
|
+ ret_code);
|
|
|
}
|
|
|
|
|
|
/* shutdown and destroy the HMC */
|
|
|
@@ -11498,7 +11526,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
|
|
|
/* shutdown all operations */
|
|
|
if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
|
|
|
rtnl_lock();
|
|
|
- i40e_prep_for_reset(pf);
|
|
|
+ i40e_prep_for_reset(pf, true);
|
|
|
rtnl_unlock();
|
|
|
}
|
|
|
|
|
|
@@ -11567,7 +11595,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
|
|
|
return;
|
|
|
|
|
|
rtnl_lock();
|
|
|
- i40e_handle_reset_warning(pf);
|
|
|
+ i40e_handle_reset_warning(pf, true);
|
|
|
rtnl_unlock();
|
|
|
}
|
|
|
|
|
|
@@ -11630,7 +11658,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
|
|
|
set_bit(__I40E_SUSPENDED, &pf->state);
|
|
|
set_bit(__I40E_DOWN, &pf->state);
|
|
|
rtnl_lock();
|
|
|
- i40e_prep_for_reset(pf);
|
|
|
+ i40e_prep_for_reset(pf, true);
|
|
|
rtnl_unlock();
|
|
|
|
|
|
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
|
|
|
@@ -11649,7 +11677,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
|
|
|
i40e_enable_mc_magic_wake(pf);
|
|
|
|
|
|
rtnl_lock();
|
|
|
- i40e_prep_for_reset(pf);
|
|
|
+ i40e_prep_for_reset(pf, true);
|
|
|
rtnl_unlock();
|
|
|
|
|
|
wr32(hw, I40E_PFPM_APM,
|
|
|
@@ -11683,7 +11711,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
i40e_enable_mc_magic_wake(pf);
|
|
|
|
|
|
rtnl_lock();
|
|
|
- i40e_prep_for_reset(pf);
|
|
|
+ i40e_prep_for_reset(pf, true);
|
|
|
rtnl_unlock();
|
|
|
|
|
|
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
|
|
|
@@ -11731,7 +11759,7 @@ static int i40e_resume(struct pci_dev *pdev)
|
|
|
if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
|
|
|
clear_bit(__I40E_DOWN, &pf->state);
|
|
|
rtnl_lock();
|
|
|
- i40e_reset_and_rebuild(pf, false);
|
|
|
+ i40e_reset_and_rebuild(pf, false, true);
|
|
|
rtnl_unlock();
|
|
|
}
|
|
|
|