|
@@ -95,6 +95,7 @@ module_param(debug, int, 0);
|
|
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
|
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
|
|
|
|
|
/* forward decls */
|
|
/* forward decls */
|
|
|
|
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
|
|
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
|
|
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
|
|
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
|
|
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
|
|
|
|
|
|
@@ -1368,11 +1369,51 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
|
|
|
|
+{
|
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
+ unsigned int def_q = 0;
|
|
|
|
+ unsigned int num_tcs = 0;
|
|
|
|
+ unsigned int num_rx_queues = 1;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&adapter->mbx_lock);
|
|
|
|
+
|
|
|
|
+ /* fetch queue configuration from the PF */
|
|
|
|
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
|
|
|
|
+
|
|
|
|
+ spin_unlock_bh(&adapter->mbx_lock);
|
|
|
|
+
|
|
|
|
+ if (err)
|
|
|
|
+ return err;
|
|
|
|
+
|
|
|
|
+ if (num_tcs > 1) {
|
|
|
|
+ /* update default Tx ring register index */
|
|
|
|
+ adapter->tx_ring[0].reg_idx = def_q;
|
|
|
|
+
|
|
|
|
+ /* we need as many queues as traffic classes */
|
|
|
|
+ num_rx_queues = num_tcs;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* if we have a bad config abort request queue reset */
|
|
|
|
+ if (adapter->num_rx_queues != num_rx_queues) {
|
|
|
|
+ /* force mailbox timeout to prevent further messages */
|
|
|
|
+ hw->mbx.timeout = 0;
|
|
|
|
+
|
|
|
|
+ /* wait for watchdog to come around and bail us out */
|
|
|
|
+ adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
|
|
static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
|
|
{
|
|
{
|
|
struct net_device *netdev = adapter->netdev;
|
|
struct net_device *netdev = adapter->netdev;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
|
|
+ ixgbevf_configure_dcb(adapter);
|
|
|
|
+
|
|
ixgbevf_set_rx_mode(netdev);
|
|
ixgbevf_set_rx_mode(netdev);
|
|
|
|
|
|
ixgbevf_restore_vlan(adapter);
|
|
ixgbevf_restore_vlan(adapter);
|
|
@@ -1551,85 +1592,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
|
|
mod_timer(&adapter->watchdog_timer, jiffies);
|
|
mod_timer(&adapter->watchdog_timer, jiffies);
|
|
}
|
|
}
|
|
|
|
|
|
-static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
|
|
|
|
-{
|
|
|
|
- struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
- struct ixgbevf_ring *rx_ring;
|
|
|
|
- unsigned int def_q = 0;
|
|
|
|
- unsigned int num_tcs = 0;
|
|
|
|
- unsigned int num_rx_queues = 1;
|
|
|
|
- int err, i;
|
|
|
|
-
|
|
|
|
- spin_lock_bh(&adapter->mbx_lock);
|
|
|
|
-
|
|
|
|
- /* fetch queue configuration from the PF */
|
|
|
|
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
|
|
|
|
-
|
|
|
|
- spin_unlock_bh(&adapter->mbx_lock);
|
|
|
|
-
|
|
|
|
- if (err)
|
|
|
|
- return err;
|
|
|
|
-
|
|
|
|
- if (num_tcs > 1) {
|
|
|
|
- /* update default Tx ring register index */
|
|
|
|
- adapter->tx_ring[0].reg_idx = def_q;
|
|
|
|
-
|
|
|
|
- /* we need as many queues as traffic classes */
|
|
|
|
- num_rx_queues = num_tcs;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* nothing to do if we have the correct number of queues */
|
|
|
|
- if (adapter->num_rx_queues == num_rx_queues)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- /* allocate new rings */
|
|
|
|
- rx_ring = kcalloc(num_rx_queues,
|
|
|
|
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
|
|
|
|
- if (!rx_ring)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- /* setup ring fields */
|
|
|
|
- for (i = 0; i < num_rx_queues; i++) {
|
|
|
|
- rx_ring[i].count = adapter->rx_ring_count;
|
|
|
|
- rx_ring[i].queue_index = i;
|
|
|
|
- rx_ring[i].reg_idx = i;
|
|
|
|
- rx_ring[i].dev = &adapter->pdev->dev;
|
|
|
|
- rx_ring[i].netdev = adapter->netdev;
|
|
|
|
-
|
|
|
|
- /* allocate resources on the ring */
|
|
|
|
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
|
|
|
|
- if (err) {
|
|
|
|
- while (i) {
|
|
|
|
- i--;
|
|
|
|
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
|
|
|
|
- }
|
|
|
|
- kfree(rx_ring);
|
|
|
|
- return err;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* free the existing rings and queues */
|
|
|
|
- ixgbevf_free_all_rx_resources(adapter);
|
|
|
|
- adapter->num_rx_queues = 0;
|
|
|
|
- kfree(adapter->rx_ring);
|
|
|
|
-
|
|
|
|
- /* move new rings into position on the adapter struct */
|
|
|
|
- adapter->rx_ring = rx_ring;
|
|
|
|
- adapter->num_rx_queues = num_rx_queues;
|
|
|
|
-
|
|
|
|
- /* reset ring to vector mapping */
|
|
|
|
- ixgbevf_reset_q_vectors(adapter);
|
|
|
|
- ixgbevf_map_rings_to_vectors(adapter);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
void ixgbevf_up(struct ixgbevf_adapter *adapter)
|
|
void ixgbevf_up(struct ixgbevf_adapter *adapter)
|
|
{
|
|
{
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
|
- ixgbevf_reset_queues(adapter);
|
|
|
|
-
|
|
|
|
ixgbevf_configure(adapter);
|
|
ixgbevf_configure(adapter);
|
|
|
|
|
|
ixgbevf_up_complete(adapter);
|
|
ixgbevf_up_complete(adapter);
|
|
@@ -1875,9 +1841,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
|
|
**/
|
|
**/
|
|
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
|
|
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
|
|
{
|
|
{
|
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
+ unsigned int def_q = 0;
|
|
|
|
+ unsigned int num_tcs = 0;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
/* Start with base case */
|
|
/* Start with base case */
|
|
adapter->num_rx_queues = 1;
|
|
adapter->num_rx_queues = 1;
|
|
adapter->num_tx_queues = 1;
|
|
adapter->num_tx_queues = 1;
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&adapter->mbx_lock);
|
|
|
|
+
|
|
|
|
+ /* fetch queue configuration from the PF */
|
|
|
|
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
|
|
|
|
+
|
|
|
|
+ spin_unlock_bh(&adapter->mbx_lock);
|
|
|
|
+
|
|
|
|
+ if (err)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* we need as many queues as traffic classes */
|
|
|
|
+ if (num_tcs > 1)
|
|
|
|
+ adapter->num_rx_queues = num_tcs;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -2326,6 +2311,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
|
|
bool link_up = adapter->link_up;
|
|
bool link_up = adapter->link_up;
|
|
s32 need_reset;
|
|
s32 need_reset;
|
|
|
|
|
|
|
|
+ ixgbevf_queue_reset_subtask(adapter);
|
|
|
|
+
|
|
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
|
|
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2595,63 +2582,6 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
|
|
&adapter->rx_ring[i]);
|
|
&adapter->rx_ring[i]);
|
|
}
|
|
}
|
|
|
|
|
|
-static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
|
|
|
|
-{
|
|
|
|
- struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
- struct ixgbevf_ring *rx_ring;
|
|
|
|
- unsigned int def_q = 0;
|
|
|
|
- unsigned int num_tcs = 0;
|
|
|
|
- unsigned int num_rx_queues = 1;
|
|
|
|
- int err, i;
|
|
|
|
-
|
|
|
|
- spin_lock_bh(&adapter->mbx_lock);
|
|
|
|
-
|
|
|
|
- /* fetch queue configuration from the PF */
|
|
|
|
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
|
|
|
|
-
|
|
|
|
- spin_unlock_bh(&adapter->mbx_lock);
|
|
|
|
-
|
|
|
|
- if (err)
|
|
|
|
- return err;
|
|
|
|
-
|
|
|
|
- if (num_tcs > 1) {
|
|
|
|
- /* update default Tx ring register index */
|
|
|
|
- adapter->tx_ring[0].reg_idx = def_q;
|
|
|
|
-
|
|
|
|
- /* we need as many queues as traffic classes */
|
|
|
|
- num_rx_queues = num_tcs;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* nothing to do if we have the correct number of queues */
|
|
|
|
- if (adapter->num_rx_queues == num_rx_queues)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- /* allocate new rings */
|
|
|
|
- rx_ring = kcalloc(num_rx_queues,
|
|
|
|
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
|
|
|
|
- if (!rx_ring)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- /* setup ring fields */
|
|
|
|
- for (i = 0; i < num_rx_queues; i++) {
|
|
|
|
- rx_ring[i].count = adapter->rx_ring_count;
|
|
|
|
- rx_ring[i].queue_index = i;
|
|
|
|
- rx_ring[i].reg_idx = i;
|
|
|
|
- rx_ring[i].dev = &adapter->pdev->dev;
|
|
|
|
- rx_ring[i].netdev = adapter->netdev;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* free the existing ring and queues */
|
|
|
|
- adapter->num_rx_queues = 0;
|
|
|
|
- kfree(adapter->rx_ring);
|
|
|
|
-
|
|
|
|
- /* move new rings into position on the adapter struct */
|
|
|
|
- adapter->rx_ring = rx_ring;
|
|
|
|
- adapter->num_rx_queues = num_rx_queues;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* ixgbevf_open - Called when a network interface is made active
|
|
* ixgbevf_open - Called when a network interface is made active
|
|
* @netdev: network interface device structure
|
|
* @netdev: network interface device structure
|
|
@@ -2695,11 +2625,6 @@ static int ixgbevf_open(struct net_device *netdev)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- /* setup queue reg_idx and Rx queue count */
|
|
|
|
- err = ixgbevf_setup_queues(adapter);
|
|
|
|
- if (err)
|
|
|
|
- goto err_setup_queues;
|
|
|
|
-
|
|
|
|
/* allocate transmit descriptors */
|
|
/* allocate transmit descriptors */
|
|
err = ixgbevf_setup_all_tx_resources(adapter);
|
|
err = ixgbevf_setup_all_tx_resources(adapter);
|
|
if (err)
|
|
if (err)
|
|
@@ -2737,7 +2662,6 @@ err_setup_rx:
|
|
ixgbevf_free_all_rx_resources(adapter);
|
|
ixgbevf_free_all_rx_resources(adapter);
|
|
err_setup_tx:
|
|
err_setup_tx:
|
|
ixgbevf_free_all_tx_resources(adapter);
|
|
ixgbevf_free_all_tx_resources(adapter);
|
|
-err_setup_queues:
|
|
|
|
ixgbevf_reset(adapter);
|
|
ixgbevf_reset(adapter);
|
|
|
|
|
|
err_setup_reset:
|
|
err_setup_reset:
|
|
@@ -2769,6 +2693,34 @@ static int ixgbevf_close(struct net_device *netdev)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
|
|
|
|
+{
|
|
|
|
+ struct net_device *dev = adapter->netdev;
|
|
|
|
+
|
|
|
|
+ if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
|
|
|
|
+
|
|
|
|
+ /* if interface is down do nothing */
|
|
|
|
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
|
|
|
|
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* Hardware has to reinitialize queues and interrupts to
|
|
|
|
+ * match packet buffer alignment. Unfortunately, the
|
|
|
|
+ * hardware is not flexible enough to do this dynamically.
|
|
|
|
+ */
|
|
|
|
+ if (netif_running(dev))
|
|
|
|
+ ixgbevf_close(dev);
|
|
|
|
+
|
|
|
|
+ ixgbevf_clear_interrupt_scheme(adapter);
|
|
|
|
+ ixgbevf_init_interrupt_scheme(adapter);
|
|
|
|
+
|
|
|
|
+ if (netif_running(dev))
|
|
|
|
+ ixgbevf_open(dev);
|
|
|
|
+}
|
|
|
|
+
|
|
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
|
|
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
|
|
u32 vlan_macip_lens, u32 type_tucmd,
|
|
u32 vlan_macip_lens, u32 type_tucmd,
|
|
u32 mss_l4len_idx)
|
|
u32 mss_l4len_idx)
|