|
@@ -1776,11 +1776,6 @@ static void i40e_set_rx_mode(struct net_device *netdev)
|
|
|
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
|
|
|
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
|
|
|
}
|
|
|
-
|
|
|
- /* schedule our worker thread which will take care of
|
|
|
- * applying the new filter changes
|
|
|
- */
|
|
|
- i40e_service_event_schedule(vsi->back);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2885,14 +2880,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
|
|
|
static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
|
|
|
{
|
|
|
struct i40e_vsi *vsi = ring->vsi;
|
|
|
+ int cpu;
|
|
|
|
|
|
if (!ring->q_vector || !ring->netdev)
|
|
|
return;
|
|
|
|
|
|
if ((vsi->tc_config.numtc <= 1) &&
|
|
|
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
|
|
|
- netif_set_xps_queue(ring->netdev,
|
|
|
- get_cpu_mask(ring->q_vector->v_idx),
|
|
|
+ !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) {
|
|
|
+ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
|
|
|
+ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
|
|
|
ring->queue_index);
|
|
|
}
|
|
|
|
|
@@ -3009,7 +3005,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
|
|
struct i40e_hmc_obj_rxq rx_ctx;
|
|
|
i40e_status err = 0;
|
|
|
|
|
|
- ring->state = 0;
|
|
|
+ bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
|
|
|
|
|
|
/* clear the context structure first */
|
|
|
memset(&rx_ctx, 0, sizeof(rx_ctx));
|
|
@@ -3482,6 +3478,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
|
|
|
int tx_int_idx = 0;
|
|
|
int vector, err;
|
|
|
int irq_num;
|
|
|
+ int cpu;
|
|
|
|
|
|
for (vector = 0; vector < q_vectors; vector++) {
|
|
|
struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
|
|
@@ -3517,10 +3514,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
|
|
|
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
|
|
|
q_vector->affinity_notify.release = i40e_irq_affinity_release;
|
|
|
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
|
|
|
- /* get_cpu_mask returns a static constant mask with
|
|
|
- * a permanent lifetime so it's ok to use here.
|
|
|
+ /* Spread affinity hints out across online CPUs.
|
|
|
+ *
|
|
|
+ * get_cpu_mask returns a static constant mask with
|
|
|
+ * a permanent lifetime so it's ok to pass to
|
|
|
+ * irq_set_affinity_hint without making a copy.
|
|
|
*/
|
|
|
- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
|
|
|
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
|
|
|
+ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
|
|
|
}
|
|
|
|
|
|
vsi->irqs_ready = true;
|
|
@@ -6231,6 +6232,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
|
|
|
hlist_del(&filter->fdir_node);
|
|
|
kfree(filter);
|
|
|
pf->fdir_pf_active_filters--;
|
|
|
+ pf->fd_inv = 0;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -6557,12 +6559,26 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
|
|
|
*/
|
|
|
i40e_link_event(pf);
|
|
|
|
|
|
- /* check for unqualified module, if link is down */
|
|
|
- if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
|
|
|
- (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
|
|
|
- (!(status->link_info & I40E_AQ_LINK_UP)))
|
|
|
+ /* Check if module meets thermal requirements */
|
|
|
+ if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
|
|
|
+ dev_err(&pf->pdev->dev,
|
|
|
+ "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
|
|
|
dev_err(&pf->pdev->dev,
|
|
|
- "The driver failed to link because an unqualified module was detected.\n");
|
|
|
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
|
|
|
+ } else {
|
|
|
+ /* check for unqualified module, if link is down, suppress
|
|
|
+ * the message if link was forced to be down.
|
|
|
+ */
|
|
|
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
|
|
|
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
|
|
|
+ (!(status->link_info & I40E_AQ_LINK_UP)) &&
|
|
|
+ (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
|
|
|
+ dev_err(&pf->pdev->dev,
|
|
|
+ "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
|
|
|
+ dev_err(&pf->pdev->dev,
|
|
|
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -9068,6 +9084,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
|
|
(pf->hw.aq.fw_maj_ver >= 5)))
|
|
|
pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
|
|
|
|
|
|
+ /* Enable PTP L4 if FW > v6.0 */
|
|
|
+ if (pf->hw.mac.type == I40E_MAC_XL710 &&
|
|
|
+ pf->hw.aq.fw_maj_ver >= 6)
|
|
|
+ pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
|
|
|
+
|
|
|
if (pf->hw.func_caps.vmdq) {
|
|
|
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
|
|
|
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
|
|
@@ -9903,6 +9924,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
|
|
|
|
|
|
enabled_tc = i40e_pf_get_tc_map(pf);
|
|
|
|
|
|
+ /* Source pruning is enabled by default, so the flag is
|
|
|
+ * negative logic - if it's set, we need to fiddle with
|
|
|
+ * the VSI to disable source pruning.
|
|
|
+ */
|
|
|
+ if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
|
|
|
+ memset(&ctxt, 0, sizeof(ctxt));
|
|
|
+ ctxt.seid = pf->main_vsi_seid;
|
|
|
+ ctxt.pf_num = pf->hw.pf_id;
|
|
|
+ ctxt.vf_num = 0;
|
|
|
+ ctxt.info.valid_sections |=
|
|
|
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
|
|
|
+ ctxt.info.switch_id =
|
|
|
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
|
|
|
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
|
|
|
+ if (ret) {
|
|
|
+ dev_info(&pf->pdev->dev,
|
|
|
+ "update vsi failed, err %s aq_err %s\n",
|
|
|
+ i40e_stat_str(&pf->hw, ret),
|
|
|
+ i40e_aq_str(&pf->hw,
|
|
|
+ pf->hw.aq.asq_last_status));
|
|
|
+ ret = -ENOENT;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* MFP mode setup queue map and update VSI */
|
|
|
if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
|
|
|
!(pf->hw.func_caps.iscsi)) { /* NIC type PF */
|
|
@@ -11999,6 +12045,28 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * i40e_pci_error_reset_prepare - prepare device driver for pci reset
|
|
|
+ * @pdev: PCI device information struct
|
|
|
+ */
|
|
|
+static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
|
|
|
+{
|
|
|
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
|
|
|
+
|
|
|
+ i40e_prep_for_reset(pf, false);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
|
|
|
+ * @pdev: PCI device information struct
|
|
|
+ */
|
|
|
+static void i40e_pci_error_reset_done(struct pci_dev *pdev)
|
|
|
+{
|
|
|
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
|
|
|
+
|
|
|
+ i40e_reset_and_rebuild(pf, false, false);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* i40e_pci_error_resume - restart operations after PCI error recovery
|
|
|
* @pdev: PCI device information struct
|
|
@@ -12189,6 +12257,8 @@ static int i40e_resume(struct device *dev)
|
|
|
static const struct pci_error_handlers i40e_err_handler = {
|
|
|
.error_detected = i40e_pci_error_detected,
|
|
|
.slot_reset = i40e_pci_error_slot_reset,
|
|
|
+ .reset_prepare = i40e_pci_error_reset_prepare,
|
|
|
+ .reset_done = i40e_pci_error_reset_done,
|
|
|
.resume = i40e_pci_error_resume,
|
|
|
};
|
|
|
|