|
@@ -1768,15 +1768,14 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
|
|
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
|
|
|
ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
|
|
|
|
|
|
- skb->protocol = eth_type_trans(skb, dev);
|
|
|
-
|
|
|
/* record Rx queue, or update MACVLAN statistics */
|
|
|
if (netif_is_ixgbe(dev))
|
|
|
skb_record_rx_queue(skb, rx_ring->queue_index);
|
|
|
else
|
|
|
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
|
|
|
- (skb->pkt_type == PACKET_BROADCAST) ||
|
|
|
- (skb->pkt_type == PACKET_MULTICAST));
|
|
|
+ false);
|
|
|
+
|
|
|
+ skb->protocol = eth_type_trans(skb, dev);
|
|
|
}
|
|
|
|
|
|
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
|
|
@@ -4219,7 +4218,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
|
|
|
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
- u32 reg_offset, vf_shift;
|
|
|
+ u16 pool = adapter->num_rx_pools;
|
|
|
+ u32 reg_offset, vf_shift, vmolr;
|
|
|
u32 gcr_ext, vmdctl;
|
|
|
int i;
|
|
|
|
|
@@ -4233,6 +4233,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
|
|
|
vmdctl |= IXGBE_VT_CTL_REPLEN;
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
|
|
|
|
|
|
+ /* accept untagged packets until a vlan tag is
|
|
|
+ * specifically set for the VMDQ queue/pool
|
|
|
+ */
|
|
|
+ vmolr = IXGBE_VMOLR_AUPE;
|
|
|
+ while (pool--)
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
|
|
|
+
|
|
|
vf_shift = VMDQ_P(0) % 32;
|
|
|
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
|
|
|
|
|
@@ -4900,36 +4907,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
|
|
|
- * @netdev: network interface device structure
|
|
|
- * @vfn: pool to associate with unicast addresses
|
|
|
- *
|
|
|
- * Writes unicast address list to the RAR table.
|
|
|
- * Returns: -ENOMEM on failure/insufficient address space
|
|
|
- * 0 on no addresses written
|
|
|
- * X on writing X addresses to the RAR table
|
|
|
- **/
|
|
|
-static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
|
|
|
-{
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
- int count = 0;
|
|
|
-
|
|
|
- /* return ENOMEM indicating insufficient memory for addresses */
|
|
|
- if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- if (!netdev_uc_empty(netdev)) {
|
|
|
- struct netdev_hw_addr *ha;
|
|
|
- netdev_for_each_uc_addr(ha, netdev) {
|
|
|
- ixgbe_del_mac_filter(adapter, ha->addr, vfn);
|
|
|
- ixgbe_add_mac_filter(adapter, ha->addr, vfn);
|
|
|
- count++;
|
|
|
- }
|
|
|
- }
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
|
|
|
{
|
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
@@ -5309,29 +5286,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
|
|
|
spin_unlock(&adapter->fdir_perfect_lock);
|
|
|
}
|
|
|
|
|
|
-static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
|
|
|
- struct ixgbe_adapter *adapter)
|
|
|
-{
|
|
|
- struct ixgbe_hw *hw = &adapter->hw;
|
|
|
- u32 vmolr;
|
|
|
-
|
|
|
- /* No unicast promiscuous support for VMDQ devices. */
|
|
|
- vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
|
|
|
- vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
|
|
|
-
|
|
|
- /* clear the affected bit */
|
|
|
- vmolr &= ~IXGBE_VMOLR_MPE;
|
|
|
-
|
|
|
- if (dev->flags & IFF_ALLMULTI) {
|
|
|
- vmolr |= IXGBE_VMOLR_MPE;
|
|
|
- } else {
|
|
|
- vmolr |= IXGBE_VMOLR_ROMPE;
|
|
|
- hw->mac.ops.update_mc_addr_list(hw, dev);
|
|
|
- }
|
|
|
- ixgbe_write_uc_addr_list(adapter->netdev, pool);
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
|
|
|
* @rx_ring: ring to free buffers from
|
|
@@ -5384,21 +5338,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|
|
rx_ring->next_to_use = 0;
|
|
|
}
|
|
|
|
|
|
-static int ixgbe_fwd_ring_up(struct net_device *vdev,
|
|
|
+static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_fwd_adapter *accel)
|
|
|
{
|
|
|
- struct ixgbe_adapter *adapter = accel->real_adapter;
|
|
|
+ struct net_device *vdev = accel->netdev;
|
|
|
int i, baseq, err;
|
|
|
|
|
|
- if (!test_bit(accel->pool, adapter->fwd_bitmask))
|
|
|
- return 0;
|
|
|
-
|
|
|
baseq = accel->pool * adapter->num_rx_queues_per_pool;
|
|
|
netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
|
|
|
accel->pool, adapter->num_rx_pools,
|
|
|
baseq, baseq + adapter->num_rx_queues_per_pool);
|
|
|
|
|
|
- accel->netdev = vdev;
|
|
|
accel->rx_base_queue = baseq;
|
|
|
accel->tx_base_queue = baseq;
|
|
|
|
|
@@ -5415,26 +5365,36 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
|
|
|
*/
|
|
|
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
|
|
|
VMDQ_P(accel->pool));
|
|
|
- if (err >= 0) {
|
|
|
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
|
|
|
+ if (err >= 0)
|
|
|
return 0;
|
|
|
- }
|
|
|
+
|
|
|
+ /* if we cannot add the MAC rule then disable the offload */
|
|
|
+ macvlan_release_l2fw_offload(vdev);
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
|
|
|
adapter->rx_ring[baseq + i]->netdev = NULL;
|
|
|
|
|
|
+ netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
|
|
|
+
|
|
|
+ clear_bit(accel->pool, adapter->fwd_bitmask);
|
|
|
+ kfree(accel);
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
|
|
|
+static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
|
|
|
{
|
|
|
- if (netif_is_macvlan(upper)) {
|
|
|
- struct macvlan_dev *dfwd = netdev_priv(upper);
|
|
|
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
|
|
|
+ struct ixgbe_adapter *adapter = data;
|
|
|
+ struct ixgbe_fwd_adapter *accel;
|
|
|
|
|
|
- if (dfwd->fwd_priv)
|
|
|
- ixgbe_fwd_ring_up(upper, vadapter);
|
|
|
- }
|
|
|
+ if (!netif_is_macvlan(vdev))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ accel = macvlan_accel_priv(vdev);
|
|
|
+ if (!accel)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ ixgbe_fwd_ring_up(adapter, accel);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -5442,7 +5402,7 @@ static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
|
|
|
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
netdev_walk_all_upper_dev_rcu(adapter->netdev,
|
|
|
- ixgbe_upper_dev_walk, NULL);
|
|
|
+ ixgbe_macvlan_up, adapter);
|
|
|
}
|
|
|
|
|
|
static void ixgbe_configure(struct ixgbe_adapter *adapter)
|
|
@@ -8843,6 +8803,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
|
|
|
}
|
|
|
|
|
|
#endif /* CONFIG_IXGBE_DCB */
|
|
|
+static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
|
|
|
+{
|
|
|
+ struct ixgbe_adapter *adapter = data;
|
|
|
+ struct ixgbe_fwd_adapter *accel;
|
|
|
+ int pool;
|
|
|
+
|
|
|
+ /* we only care about macvlans... */
|
|
|
+ if (!netif_is_macvlan(vdev))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* that have hardware offload enabled... */
|
|
|
+ accel = macvlan_accel_priv(vdev);
|
|
|
+ if (!accel)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* If we can relocate to a different bit do so */
|
|
|
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
|
|
|
+ if (pool < adapter->num_rx_pools) {
|
|
|
+ set_bit(pool, adapter->fwd_bitmask);
|
|
|
+ accel->pool = pool;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* if we cannot find a free pool then disable the offload */
|
|
|
+ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
|
|
|
+ macvlan_release_l2fw_offload(vdev);
|
|
|
+ kfree(accel);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
|
|
|
+{
|
|
|
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
|
|
|
+
|
|
|
+ /* flush any stale bits out of the fwd bitmask */
|
|
|
+ bitmap_clear(adapter->fwd_bitmask, 1, 63);
|
|
|
+
|
|
|
+ /* walk through upper devices reassigning pools */
|
|
|
+ netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
|
|
|
+ adapter);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ixgbe_setup_tc - configure net_device for multiple traffic classes
|
|
|
*
|
|
@@ -8910,6 +8913,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
|
|
|
#endif /* CONFIG_IXGBE_DCB */
|
|
|
ixgbe_init_interrupt_scheme(adapter);
|
|
|
|
|
|
+ ixgbe_defrag_macvlan_pools(dev);
|
|
|
+
|
|
|
if (netif_running(dev))
|
|
|
return ixgbe_open(dev);
|
|
|
|
|
@@ -9014,13 +9019,12 @@ struct upper_walk_data {
|
|
|
static int get_macvlan_queue(struct net_device *upper, void *_data)
|
|
|
{
|
|
|
if (netif_is_macvlan(upper)) {
|
|
|
- struct macvlan_dev *dfwd = netdev_priv(upper);
|
|
|
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
|
|
|
+ struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
|
|
|
struct upper_walk_data *data = _data;
|
|
|
struct ixgbe_adapter *adapter = data->adapter;
|
|
|
int ifindex = data->ifindex;
|
|
|
|
|
|
- if (vadapter && vadapter->netdev->ifindex == ifindex) {
|
|
|
+ if (vadapter && upper->ifindex == ifindex) {
|
|
|
data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
|
|
|
data->action = data->queue;
|
|
|
return 1;
|
|
@@ -9460,6 +9464,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
|
|
|
return features;
|
|
|
}
|
|
|
|
|
|
+static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
|
|
|
+{
|
|
|
+ int rss = min_t(int, ixgbe_max_rss_indices(adapter),
|
|
|
+ num_online_cpus());
|
|
|
+
|
|
|
+ /* go back to full RSS if we're not running SR-IOV */
|
|
|
+ if (!adapter->ring_feature[RING_F_VMDQ].offset)
|
|
|
+ adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
|
|
|
+ IXGBE_FLAG_SRIOV_ENABLED);
|
|
|
+
|
|
|
+ adapter->ring_feature[RING_F_RSS].limit = rss;
|
|
|
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
|
|
|
+
|
|
|
+ ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
|
|
|
+}
|
|
|
+
|
|
|
static int ixgbe_set_features(struct net_device *netdev,
|
|
|
netdev_features_t features)
|
|
|
{
|
|
@@ -9540,7 +9560,9 @@ static int ixgbe_set_features(struct net_device *netdev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (need_reset)
|
|
|
+ if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
|
|
|
+ ixgbe_reset_l2fw_offload(adapter);
|
|
|
+ else if (need_reset)
|
|
|
ixgbe_do_reset(netdev);
|
|
|
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER))
|
|
@@ -9803,71 +9825,98 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
|
|
|
|
|
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
|
|
|
{
|
|
|
- struct ixgbe_fwd_adapter *fwd_adapter = NULL;
|
|
|
struct ixgbe_adapter *adapter = netdev_priv(pdev);
|
|
|
- int used_pools = adapter->num_vfs + adapter->num_rx_pools;
|
|
|
+ struct ixgbe_fwd_adapter *accel;
|
|
|
int tcs = adapter->hw_tcs ? : 1;
|
|
|
- unsigned int limit;
|
|
|
int pool, err;
|
|
|
|
|
|
- /* Hardware has a limited number of available pools. Each VF, and the
|
|
|
- * PF require a pool. Check to ensure we don't attempt to use more
|
|
|
- * then the available number of pools.
|
|
|
+ /* The hardware supported by ixgbe only filters on the destination MAC
|
|
|
+ * address. In order to avoid issues we only support offloading modes
|
|
|
+ * where the hardware can actually provide the functionality.
|
|
|
*/
|
|
|
- if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
+ if (!macvlan_supports_dest_filter(vdev))
|
|
|
+ return ERR_PTR(-EMEDIUMTYPE);
|
|
|
|
|
|
- if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
|
|
|
- adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
|
|
|
- (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
|
|
|
- return ERR_PTR(-EBUSY);
|
|
|
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
|
|
|
+ if (pool == adapter->num_rx_pools) {
|
|
|
+ u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
|
|
|
+ u16 reserved_pools;
|
|
|
+
|
|
|
+ if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
|
|
|
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
|
|
|
+ adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
|
|
|
+ return ERR_PTR(-EBUSY);
|
|
|
+
|
|
|
+ /* Hardware has a limited number of available pools. Each VF,
|
|
|
+ * and the PF require a pool. Check to ensure we don't
|
|
|
+ * attempt to use more then the available number of pools.
|
|
|
+ */
|
|
|
+ if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
|
|
|
+ return ERR_PTR(-EBUSY);
|
|
|
|
|
|
- fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
|
|
|
- if (!fwd_adapter)
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
+ /* Enable VMDq flag so device will be set in VM mode */
|
|
|
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
|
|
|
+ IXGBE_FLAG_SRIOV_ENABLED;
|
|
|
|
|
|
- pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
|
|
|
- set_bit(pool, adapter->fwd_bitmask);
|
|
|
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
|
|
|
+ /* Try to reserve as many queues per pool as possible,
|
|
|
+ * we start with the configurations that support 4 queues
|
|
|
+ * per pools, followed by 2, and then by just 1 per pool.
|
|
|
+ */
|
|
|
+ if (used_pools < 32 && adapter->num_rx_pools < 16)
|
|
|
+ reserved_pools = min_t(u16,
|
|
|
+ 32 - used_pools,
|
|
|
+ 16 - adapter->num_rx_pools);
|
|
|
+ else if (adapter->num_rx_pools < 32)
|
|
|
+ reserved_pools = min_t(u16,
|
|
|
+ 64 - used_pools,
|
|
|
+ 32 - adapter->num_rx_pools);
|
|
|
+ else
|
|
|
+ reserved_pools = 64 - used_pools;
|
|
|
|
|
|
- /* Enable VMDq flag so device will be set in VM mode */
|
|
|
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
|
|
|
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
|
|
|
|
|
|
- fwd_adapter->pool = pool;
|
|
|
- fwd_adapter->real_adapter = adapter;
|
|
|
+ if (!reserved_pools)
|
|
|
+ return ERR_PTR(-EBUSY);
|
|
|
|
|
|
- /* Force reinit of ring allocation with VMDQ enabled */
|
|
|
- err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
|
|
|
+ adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
|
|
|
|
|
|
- if (!err && netif_running(pdev))
|
|
|
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
|
|
|
+ /* Force reinit of ring allocation with VMDQ enabled */
|
|
|
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
|
|
|
+ if (err)
|
|
|
+ return ERR_PTR(err);
|
|
|
|
|
|
- if (!err)
|
|
|
- return fwd_adapter;
|
|
|
+ if (pool >= adapter->num_rx_pools)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ }
|
|
|
+
|
|
|
+ accel = kzalloc(sizeof(*accel), GFP_KERNEL);
|
|
|
+ if (!accel)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- /* unwind counter and free adapter struct */
|
|
|
- netdev_info(pdev,
|
|
|
- "%s: dfwd hardware acceleration failed\n", vdev->name);
|
|
|
- clear_bit(pool, adapter->fwd_bitmask);
|
|
|
- kfree(fwd_adapter);
|
|
|
- return ERR_PTR(err);
|
|
|
+ set_bit(pool, adapter->fwd_bitmask);
|
|
|
+ accel->pool = pool;
|
|
|
+ accel->netdev = vdev;
|
|
|
+
|
|
|
+ if (!netif_running(pdev))
|
|
|
+ return accel;
|
|
|
+
|
|
|
+ err = ixgbe_fwd_ring_up(adapter, accel);
|
|
|
+ if (err)
|
|
|
+ return ERR_PTR(err);
|
|
|
+
|
|
|
+ return accel;
|
|
|
}
|
|
|
|
|
|
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
|
|
|
{
|
|
|
struct ixgbe_fwd_adapter *accel = priv;
|
|
|
- struct ixgbe_adapter *adapter = accel->real_adapter;
|
|
|
+ struct ixgbe_adapter *adapter = netdev_priv(pdev);
|
|
|
unsigned int rxbase = accel->rx_base_queue;
|
|
|
- unsigned int limit, i;
|
|
|
+ unsigned int i;
|
|
|
|
|
|
/* delete unicast filter associated with offloaded interface */
|
|
|
ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
|
|
|
VMDQ_P(accel->pool));
|
|
|
|
|
|
- /* disable ability to receive packets for this pool */
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
|
|
|
-
|
|
|
/* Allow remaining Rx packets to get flushed out of the
|
|
|
* Rx FIFO before we drop the netdev for the ring.
|
|
|
*/
|
|
@@ -9886,25 +9935,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
|
|
|
}
|
|
|
|
|
|
clear_bit(accel->pool, adapter->fwd_bitmask);
|
|
|
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
|
|
|
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
|
|
|
-
|
|
|
- /* go back to full RSS if we're done with our VMQs */
|
|
|
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
|
|
|
- int rss = min_t(int, ixgbe_max_rss_indices(adapter),
|
|
|
- num_online_cpus());
|
|
|
-
|
|
|
- adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
|
|
|
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
|
|
|
- adapter->ring_feature[RING_F_RSS].limit = rss;
|
|
|
- }
|
|
|
-
|
|
|
- ixgbe_setup_tc(pdev, adapter->hw_tcs);
|
|
|
- netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
|
|
|
- accel->pool, adapter->num_rx_pools,
|
|
|
- accel->rx_base_queue,
|
|
|
- accel->rx_base_queue +
|
|
|
- adapter->num_rx_queues_per_pool);
|
|
|
kfree(accel);
|
|
|
}
|
|
|
|