|
@@ -140,7 +140,7 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
|
|
struct rtnl_link_stats64 *stats);
|
|
struct rtnl_link_stats64 *stats);
|
|
static int igb_change_mtu(struct net_device *, int);
|
|
static int igb_change_mtu(struct net_device *, int);
|
|
static int igb_set_mac(struct net_device *, void *);
|
|
static int igb_set_mac(struct net_device *, void *);
|
|
-static void igb_set_uta(struct igb_adapter *adapter);
|
|
|
|
|
|
+static void igb_set_uta(struct igb_adapter *adapter, bool set);
|
|
static irqreturn_t igb_intr(int irq, void *);
|
|
static irqreturn_t igb_intr(int irq, void *);
|
|
static irqreturn_t igb_intr_msi(int irq, void *);
|
|
static irqreturn_t igb_intr_msi(int irq, void *);
|
|
static irqreturn_t igb_msix_other(int irq, void *);
|
|
static irqreturn_t igb_msix_other(int irq, void *);
|
|
@@ -1534,12 +1534,13 @@ static void igb_irq_enable(struct igb_adapter *adapter)
|
|
static void igb_update_mng_vlan(struct igb_adapter *adapter)
|
|
static void igb_update_mng_vlan(struct igb_adapter *adapter)
|
|
{
|
|
{
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
+ u16 pf_id = adapter->vfs_allocated_count;
|
|
u16 vid = adapter->hw.mng_cookie.vlan_id;
|
|
u16 vid = adapter->hw.mng_cookie.vlan_id;
|
|
u16 old_vid = adapter->mng_vlan_id;
|
|
u16 old_vid = adapter->mng_vlan_id;
|
|
|
|
|
|
if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
|
|
if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
|
|
/* add VID to filter table */
|
|
/* add VID to filter table */
|
|
- igb_vfta_set(hw, vid, true);
|
|
|
|
|
|
+ igb_vfta_set(hw, vid, pf_id, true, true);
|
|
adapter->mng_vlan_id = vid;
|
|
adapter->mng_vlan_id = vid;
|
|
} else {
|
|
} else {
|
|
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
|
|
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
|
|
@@ -1549,7 +1550,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
|
|
(vid != old_vid) &&
|
|
(vid != old_vid) &&
|
|
!test_bit(old_vid, adapter->active_vlans)) {
|
|
!test_bit(old_vid, adapter->active_vlans)) {
|
|
/* remove VID from filter table */
|
|
/* remove VID from filter table */
|
|
- igb_vfta_set(hw, old_vid, false);
|
|
|
|
|
|
+ igb_vfta_set(hw, vid, pf_id, false, true);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1818,6 +1819,10 @@ void igb_down(struct igb_adapter *adapter)
|
|
|
|
|
|
if (!pci_channel_offline(adapter->pdev))
|
|
if (!pci_channel_offline(adapter->pdev))
|
|
igb_reset(adapter);
|
|
igb_reset(adapter);
|
|
|
|
+
|
|
|
|
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
|
|
|
|
+ adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
|
|
|
|
+
|
|
igb_clean_all_tx_rings(adapter);
|
|
igb_clean_all_tx_rings(adapter);
|
|
igb_clean_all_rx_rings(adapter);
|
|
igb_clean_all_rx_rings(adapter);
|
|
#ifdef CONFIG_IGB_DCA
|
|
#ifdef CONFIG_IGB_DCA
|
|
@@ -1862,7 +1867,7 @@ void igb_reset(struct igb_adapter *adapter)
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_mac_info *mac = &hw->mac;
|
|
struct e1000_mac_info *mac = &hw->mac;
|
|
struct e1000_fc_info *fc = &hw->fc;
|
|
struct e1000_fc_info *fc = &hw->fc;
|
|
- u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
|
|
|
|
|
|
+ u32 pba, hwm;
|
|
|
|
|
|
/* Repartition Pba for greater than 9k mtu
|
|
/* Repartition Pba for greater than 9k mtu
|
|
* To take effect CTRL.RST is required.
|
|
* To take effect CTRL.RST is required.
|
|
@@ -1886,9 +1891,10 @@ void igb_reset(struct igb_adapter *adapter)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
|
|
|
|
- (mac->type < e1000_82576)) {
|
|
|
|
- /* adjust PBA for jumbo frames */
|
|
|
|
|
|
+ if (mac->type == e1000_82575) {
|
|
|
|
+ u32 min_rx_space, min_tx_space, needed_tx_space;
|
|
|
|
+
|
|
|
|
+ /* write Rx PBA so that hardware can report correct Tx PBA */
|
|
wr32(E1000_PBA, pba);
|
|
wr32(E1000_PBA, pba);
|
|
|
|
|
|
/* To maintain wire speed transmits, the Tx FIFO should be
|
|
/* To maintain wire speed transmits, the Tx FIFO should be
|
|
@@ -1898,31 +1904,26 @@ void igb_reset(struct igb_adapter *adapter)
|
|
* one full receive packet and is similarly rounded up and
|
|
* one full receive packet and is similarly rounded up and
|
|
* expressed in KB.
|
|
* expressed in KB.
|
|
*/
|
|
*/
|
|
- pba = rd32(E1000_PBA);
|
|
|
|
- /* upper 16 bits has Tx packet buffer allocation size in KB */
|
|
|
|
- tx_space = pba >> 16;
|
|
|
|
- /* lower 16 bits has Rx packet buffer allocation size in KB */
|
|
|
|
- pba &= 0xffff;
|
|
|
|
- /* the Tx fifo also stores 16 bytes of information about the Tx
|
|
|
|
- * but don't include ethernet FCS because hardware appends it
|
|
|
|
|
|
+ min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
|
|
|
|
+
|
|
|
|
+ /* The Tx FIFO also stores 16 bytes of information about the Tx
|
|
|
|
+ * but don't include Ethernet FCS because hardware appends it.
|
|
|
|
+ * We only need to round down to the nearest 512 byte block
|
|
|
|
+ * count since the value we care about is 2 frames, not 1.
|
|
*/
|
|
*/
|
|
- min_tx_space = (adapter->max_frame_size +
|
|
|
|
- sizeof(union e1000_adv_tx_desc) -
|
|
|
|
- ETH_FCS_LEN) * 2;
|
|
|
|
- min_tx_space = ALIGN(min_tx_space, 1024);
|
|
|
|
- min_tx_space >>= 10;
|
|
|
|
- /* software strips receive CRC, so leave room for it */
|
|
|
|
- min_rx_space = adapter->max_frame_size;
|
|
|
|
- min_rx_space = ALIGN(min_rx_space, 1024);
|
|
|
|
- min_rx_space >>= 10;
|
|
|
|
|
|
+ min_tx_space = adapter->max_frame_size;
|
|
|
|
+ min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
|
|
|
|
+ min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
|
|
|
|
+
|
|
|
|
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
|
|
|
|
+ needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
|
|
|
|
|
|
/* If current Tx allocation is less than the min Tx FIFO size,
|
|
/* If current Tx allocation is less than the min Tx FIFO size,
|
|
* and the min Tx FIFO size is less than the current Rx FIFO
|
|
* and the min Tx FIFO size is less than the current Rx FIFO
|
|
- * allocation, take space away from current Rx allocation
|
|
|
|
|
|
+ * allocation, take space away from current Rx allocation.
|
|
*/
|
|
*/
|
|
- if (tx_space < min_tx_space &&
|
|
|
|
- ((min_tx_space - tx_space) < pba)) {
|
|
|
|
- pba = pba - (min_tx_space - tx_space);
|
|
|
|
|
|
+ if (needed_tx_space < pba) {
|
|
|
|
+ pba -= needed_tx_space;
|
|
|
|
|
|
/* if short on Rx space, Rx wins and must trump Tx
|
|
/* if short on Rx space, Rx wins and must trump Tx
|
|
* adjustment
|
|
* adjustment
|
|
@@ -1930,18 +1931,20 @@ void igb_reset(struct igb_adapter *adapter)
|
|
if (pba < min_rx_space)
|
|
if (pba < min_rx_space)
|
|
pba = min_rx_space;
|
|
pba = min_rx_space;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /* adjust PBA for jumbo frames */
|
|
wr32(E1000_PBA, pba);
|
|
wr32(E1000_PBA, pba);
|
|
}
|
|
}
|
|
|
|
|
|
- /* flow control settings */
|
|
|
|
- /* The high water mark must be low enough to fit one full frame
|
|
|
|
- * (or the size used for early receive) above it in the Rx FIFO.
|
|
|
|
- * Set it to the lower of:
|
|
|
|
- * - 90% of the Rx FIFO size, or
|
|
|
|
- * - the full Rx FIFO size minus one full frame
|
|
|
|
|
|
+ /* flow control settings
|
|
|
|
+ * The high water mark must be low enough to fit one full frame
|
|
|
|
+ * after transmitting the pause frame. As such we must have enough
|
|
|
|
+ * space to allow for us to complete our current transmit and then
|
|
|
|
+ * receive the frame that is in progress from the link partner.
|
|
|
|
+ * Set it to:
|
|
|
|
+ * - the full Rx FIFO size minus one full Tx plus one full Rx frame
|
|
*/
|
|
*/
|
|
- hwm = min(((pba << 10) * 9 / 10),
|
|
|
|
- ((pba << 10) - 2 * adapter->max_frame_size));
|
|
|
|
|
|
+ hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
|
|
|
|
|
|
fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
|
|
fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
|
|
fc->low_water = fc->high_water - 16;
|
|
fc->low_water = fc->high_water - 16;
|
|
@@ -2051,7 +2054,7 @@ static int igb_set_features(struct net_device *netdev,
|
|
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
|
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
|
igb_vlan_mode(netdev, features);
|
|
igb_vlan_mode(netdev, features);
|
|
|
|
|
|
- if (!(changed & NETIF_F_RXALL))
|
|
|
|
|
|
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
netdev->features = features;
|
|
netdev->features = features;
|
|
@@ -2064,6 +2067,25 @@ static int igb_set_features(struct net_device *netdev,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
|
+ struct net_device *dev,
|
|
|
|
+ const unsigned char *addr, u16 vid,
|
|
|
|
+ u16 flags)
|
|
|
|
+{
|
|
|
|
+ /* guarantee we can provide a unique filter for the unicast address */
|
|
|
|
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
|
|
|
|
+ struct igb_adapter *adapter = netdev_priv(dev);
|
|
|
|
+ struct e1000_hw *hw = &adapter->hw;
|
|
|
|
+ int vfn = adapter->vfs_allocated_count;
|
|
|
|
+ int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
|
|
|
|
+
|
|
|
|
+ if (netdev_uc_count(dev) >= rar_entries)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct net_device_ops igb_netdev_ops = {
|
|
static const struct net_device_ops igb_netdev_ops = {
|
|
.ndo_open = igb_open,
|
|
.ndo_open = igb_open,
|
|
.ndo_stop = igb_close,
|
|
.ndo_stop = igb_close,
|
|
@@ -2087,6 +2109,7 @@ static const struct net_device_ops igb_netdev_ops = {
|
|
#endif
|
|
#endif
|
|
.ndo_fix_features = igb_fix_features,
|
|
.ndo_fix_features = igb_fix_features,
|
|
.ndo_set_features = igb_set_features,
|
|
.ndo_set_features = igb_set_features,
|
|
|
|
+ .ndo_fdb_add = igb_ndo_fdb_add,
|
|
.ndo_features_check = passthru_features_check,
|
|
.ndo_features_check = passthru_features_check,
|
|
};
|
|
};
|
|
|
|
|
|
@@ -2921,14 +2944,6 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
|
|
/* Device supports enough interrupts without queue pairing. */
|
|
/* Device supports enough interrupts without queue pairing. */
|
|
break;
|
|
break;
|
|
case e1000_82576:
|
|
case e1000_82576:
|
|
- /* If VFs are going to be allocated with RSS queues then we
|
|
|
|
- * should pair the queues in order to conserve interrupts due
|
|
|
|
- * to limited supply.
|
|
|
|
- */
|
|
|
|
- if ((adapter->rss_queues > 1) &&
|
|
|
|
- (adapter->vfs_allocated_count > 6))
|
|
|
|
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
|
|
|
|
- /* fall through */
|
|
|
|
case e1000_82580:
|
|
case e1000_82580:
|
|
case e1000_i350:
|
|
case e1000_i350:
|
|
case e1000_i354:
|
|
case e1000_i354:
|
|
@@ -2939,6 +2954,8 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
|
|
*/
|
|
*/
|
|
if (adapter->rss_queues > (max_rss_queues / 2))
|
|
if (adapter->rss_queues > (max_rss_queues / 2))
|
|
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
|
|
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
|
|
|
|
+ else
|
|
|
|
+ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -3498,7 +3515,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
|
|
/* disable store bad packets and clear size bits. */
|
|
/* disable store bad packets and clear size bits. */
|
|
rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
|
|
rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
|
|
|
|
|
|
- /* enable LPE to prevent packets larger than max_frame_size */
|
|
|
|
|
|
+ /* enable LPE to allow for reception of jumbo frames */
|
|
rctl |= E1000_RCTL_LPE;
|
|
rctl |= E1000_RCTL_LPE;
|
|
|
|
|
|
/* disable queue 0 to prevent tail write w/o re-config */
|
|
/* disable queue 0 to prevent tail write w/o re-config */
|
|
@@ -3522,8 +3539,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
|
|
E1000_RCTL_BAM | /* RX All Bcast Pkts */
|
|
E1000_RCTL_BAM | /* RX All Bcast Pkts */
|
|
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
|
|
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
|
|
|
|
|
|
- rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
|
|
|
|
- E1000_RCTL_DPF | /* Allow filtered pause */
|
|
|
|
|
|
+ rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
|
|
E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
|
|
E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
|
|
/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
|
|
/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
|
|
* and that breaks VLANs.
|
|
* and that breaks VLANs.
|
|
@@ -3539,12 +3555,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
u32 vmolr;
|
|
u32 vmolr;
|
|
|
|
|
|
- /* if it isn't the PF check to see if VFs are enabled and
|
|
|
|
- * increase the size to support vlan tags
|
|
|
|
- */
|
|
|
|
- if (vfn < adapter->vfs_allocated_count &&
|
|
|
|
- adapter->vf_data[vfn].vlans_enabled)
|
|
|
|
- size += VLAN_TAG_SIZE;
|
|
|
|
|
|
+ if (size > MAX_JUMBO_FRAME_SIZE)
|
|
|
|
+ size = MAX_JUMBO_FRAME_SIZE;
|
|
|
|
|
|
vmolr = rd32(E1000_VMOLR(vfn));
|
|
vmolr = rd32(E1000_VMOLR(vfn));
|
|
vmolr &= ~E1000_VMOLR_RLPML_MASK;
|
|
vmolr &= ~E1000_VMOLR_RLPML_MASK;
|
|
@@ -3554,32 +3566,6 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * igb_rlpml_set - set maximum receive packet size
|
|
|
|
- * @adapter: board private structure
|
|
|
|
- *
|
|
|
|
- * Configure maximum receivable packet size.
|
|
|
|
- **/
|
|
|
|
-static void igb_rlpml_set(struct igb_adapter *adapter)
|
|
|
|
-{
|
|
|
|
- u32 max_frame_size = adapter->max_frame_size;
|
|
|
|
- struct e1000_hw *hw = &adapter->hw;
|
|
|
|
- u16 pf_id = adapter->vfs_allocated_count;
|
|
|
|
-
|
|
|
|
- if (pf_id) {
|
|
|
|
- igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
|
|
|
|
- /* If we're in VMDQ or SR-IOV mode, then set global RLPML
|
|
|
|
- * to our max jumbo frame size, in case we need to enable
|
|
|
|
- * jumbo frames on one of the rings later.
|
|
|
|
- * This will not pass over-length frames into the default
|
|
|
|
- * queue because it's gated by the VMOLR.RLPML.
|
|
|
|
- */
|
|
|
|
- max_frame_size = MAX_JUMBO_FRAME_SIZE;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- wr32(E1000_RLPML, max_frame_size);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static inline void igb_set_vmolr(struct igb_adapter *adapter,
|
|
static inline void igb_set_vmolr(struct igb_adapter *adapter,
|
|
int vfn, bool aupe)
|
|
int vfn, bool aupe)
|
|
{
|
|
{
|
|
@@ -3684,9 +3670,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- /* set UTA to appropriate mode */
|
|
|
|
- igb_set_uta(adapter);
|
|
|
|
-
|
|
|
|
/* set the correct pool for the PF default MAC address in entry 0 */
|
|
/* set the correct pool for the PF default MAC address in entry 0 */
|
|
igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
|
|
igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
|
|
adapter->vfs_allocated_count);
|
|
adapter->vfs_allocated_count);
|
|
@@ -4004,6 +3987,130 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
|
|
return count;
|
|
return count;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
|
|
|
|
+{
|
|
|
|
+ struct e1000_hw *hw = &adapter->hw;
|
|
|
|
+ u32 i, pf_id;
|
|
|
|
+
|
|
|
|
+ switch (hw->mac.type) {
|
|
|
|
+ case e1000_i210:
|
|
|
|
+ case e1000_i211:
|
|
|
|
+ case e1000_i350:
|
|
|
|
+ /* VLAN filtering needed for VLAN prio filter */
|
|
|
|
+ if (adapter->netdev->features & NETIF_F_NTUPLE)
|
|
|
|
+ break;
|
|
|
|
+ /* fall through */
|
|
|
|
+ case e1000_82576:
|
|
|
|
+ case e1000_82580:
|
|
|
|
+ case e1000_i354:
|
|
|
|
+ /* VLAN filtering needed for pool filtering */
|
|
|
|
+ if (adapter->vfs_allocated_count)
|
|
|
|
+ break;
|
|
|
|
+ /* fall through */
|
|
|
|
+ default:
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* We are already in VLAN promisc, nothing to do */
|
|
|
|
+ if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (!adapter->vfs_allocated_count)
|
|
|
|
+ goto set_vfta;
|
|
|
|
+
|
|
|
|
+ /* Add PF to all active pools */
|
|
|
|
+ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
|
|
|
|
+
|
|
|
|
+ for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
|
|
|
|
+ u32 vlvf = rd32(E1000_VLVF(i));
|
|
|
|
+
|
|
|
|
+ vlvf |= 1 << pf_id;
|
|
|
|
+ wr32(E1000_VLVF(i), vlvf);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+set_vfta:
|
|
|
|
+ /* Set all bits in the VLAN filter table array */
|
|
|
|
+ for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
|
|
|
|
+ hw->mac.ops.write_vfta(hw, i, ~0U);
|
|
|
|
+
|
|
|
|
+ /* Set flag so we don't redo unnecessary work */
|
|
|
|
+ adapter->flags |= IGB_FLAG_VLAN_PROMISC;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define VFTA_BLOCK_SIZE 8
|
|
|
|
+static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
|
|
|
|
+{
|
|
|
|
+ struct e1000_hw *hw = &adapter->hw;
|
|
|
|
+ u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
|
|
|
|
+ u32 vid_start = vfta_offset * 32;
|
|
|
|
+ u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
|
|
|
|
+ u32 i, vid, word, bits, pf_id;
|
|
|
|
+
|
|
|
|
+ /* guarantee that we don't scrub out management VLAN */
|
|
|
|
+ vid = adapter->mng_vlan_id;
|
|
|
|
+ if (vid >= vid_start && vid < vid_end)
|
|
|
|
+ vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
|
|
|
|
+
|
|
|
|
+ if (!adapter->vfs_allocated_count)
|
|
|
|
+ goto set_vfta;
|
|
|
|
+
|
|
|
|
+ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
|
|
|
|
+
|
|
|
|
+ for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
|
|
|
|
+ u32 vlvf = rd32(E1000_VLVF(i));
|
|
|
|
+
|
|
|
|
+ /* pull VLAN ID from VLVF */
|
|
|
|
+ vid = vlvf & VLAN_VID_MASK;
|
|
|
|
+
|
|
|
|
+ /* only concern ourselves with a certain range */
|
|
|
|
+ if (vid < vid_start || vid >= vid_end)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (vlvf & E1000_VLVF_VLANID_ENABLE) {
|
|
|
|
+ /* record VLAN ID in VFTA */
|
|
|
|
+ vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
|
|
|
|
+
|
|
|
|
+ /* if PF is part of this then continue */
|
|
|
|
+ if (test_bit(vid, adapter->active_vlans))
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* remove PF from the pool */
|
|
|
|
+ bits = ~(1 << pf_id);
|
|
|
|
+ bits &= rd32(E1000_VLVF(i));
|
|
|
|
+ wr32(E1000_VLVF(i), bits);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+set_vfta:
|
|
|
|
+ /* extract values from active_vlans and write back to VFTA */
|
|
|
|
+ for (i = VFTA_BLOCK_SIZE; i--;) {
|
|
|
|
+ vid = (vfta_offset + i) * 32;
|
|
|
|
+ word = vid / BITS_PER_LONG;
|
|
|
|
+ bits = vid % BITS_PER_LONG;
|
|
|
|
+
|
|
|
|
+ vfta[i] |= adapter->active_vlans[word] >> bits;
|
|
|
|
+
|
|
|
|
+ hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
|
|
|
|
+{
|
|
|
|
+ u32 i;
|
|
|
|
+
|
|
|
|
+ /* We are not in VLAN promisc, nothing to do */
|
|
|
|
+ if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* Set flag so we don't redo unnecessary work */
|
|
|
|
+ adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
|
|
|
|
+ igb_scrub_vfta(adapter, i);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
|
|
* igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
|
|
* @netdev: network interface device structure
|
|
* @netdev: network interface device structure
|
|
@@ -4018,21 +4125,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
|
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
unsigned int vfn = adapter->vfs_allocated_count;
|
|
unsigned int vfn = adapter->vfs_allocated_count;
|
|
- u32 rctl, vmolr = 0;
|
|
|
|
|
|
+ u32 rctl = 0, vmolr = 0;
|
|
int count;
|
|
int count;
|
|
|
|
|
|
/* Check for Promiscuous and All Multicast modes */
|
|
/* Check for Promiscuous and All Multicast modes */
|
|
- rctl = rd32(E1000_RCTL);
|
|
|
|
-
|
|
|
|
- /* clear the effected bits */
|
|
|
|
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
|
|
|
|
-
|
|
|
|
if (netdev->flags & IFF_PROMISC) {
|
|
if (netdev->flags & IFF_PROMISC) {
|
|
- /* retain VLAN HW filtering if in VT mode */
|
|
|
|
- if (adapter->vfs_allocated_count)
|
|
|
|
- rctl |= E1000_RCTL_VFE;
|
|
|
|
- rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
|
|
|
|
- vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
|
|
|
|
|
|
+ rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
|
|
|
|
+ vmolr |= E1000_VMOLR_MPME;
|
|
|
|
+
|
|
|
|
+ /* enable use of UTA filter to force packets to default pool */
|
|
|
|
+ if (hw->mac.type == e1000_82576)
|
|
|
|
+ vmolr |= E1000_VMOLR_ROPE;
|
|
} else {
|
|
} else {
|
|
if (netdev->flags & IFF_ALLMULTI) {
|
|
if (netdev->flags & IFF_ALLMULTI) {
|
|
rctl |= E1000_RCTL_MPE;
|
|
rctl |= E1000_RCTL_MPE;
|
|
@@ -4050,17 +4153,34 @@ static void igb_set_rx_mode(struct net_device *netdev)
|
|
vmolr |= E1000_VMOLR_ROMPE;
|
|
vmolr |= E1000_VMOLR_ROMPE;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- /* Write addresses to available RAR registers, if there is not
|
|
|
|
- * sufficient space to store all the addresses then enable
|
|
|
|
- * unicast promiscuous mode
|
|
|
|
- */
|
|
|
|
- count = igb_write_uc_addr_list(netdev);
|
|
|
|
- if (count < 0) {
|
|
|
|
- rctl |= E1000_RCTL_UPE;
|
|
|
|
- vmolr |= E1000_VMOLR_ROPE;
|
|
|
|
- }
|
|
|
|
- rctl |= E1000_RCTL_VFE;
|
|
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /* Write addresses to available RAR registers, if there is not
|
|
|
|
+ * sufficient space to store all the addresses then enable
|
|
|
|
+ * unicast promiscuous mode
|
|
|
|
+ */
|
|
|
|
+ count = igb_write_uc_addr_list(netdev);
|
|
|
|
+ if (count < 0) {
|
|
|
|
+ rctl |= E1000_RCTL_UPE;
|
|
|
|
+ vmolr |= E1000_VMOLR_ROPE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* enable VLAN filtering by default */
|
|
|
|
+ rctl |= E1000_RCTL_VFE;
|
|
|
|
+
|
|
|
|
+ /* disable VLAN filtering for modes that require it */
|
|
|
|
+ if ((netdev->flags & IFF_PROMISC) ||
|
|
|
|
+ (netdev->features & NETIF_F_RXALL)) {
|
|
|
|
+ /* if we fail to set all rules then just clear VFE */
|
|
|
|
+ if (igb_vlan_promisc_enable(adapter))
|
|
|
|
+ rctl &= ~E1000_RCTL_VFE;
|
|
|
|
+ } else {
|
|
|
|
+ igb_vlan_promisc_disable(adapter);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* update state of unicast, multicast, and VLAN filtering modes */
|
|
|
|
+ rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
|
|
|
|
+ E1000_RCTL_VFE);
|
|
wr32(E1000_RCTL, rctl);
|
|
wr32(E1000_RCTL, rctl);
|
|
|
|
|
|
/* In order to support SR-IOV and eventually VMDq it is necessary to set
|
|
/* In order to support SR-IOV and eventually VMDq it is necessary to set
|
|
@@ -4071,9 +4191,19 @@ static void igb_set_rx_mode(struct net_device *netdev)
|
|
if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
|
|
if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ /* set UTA to appropriate mode */
|
|
|
|
+ igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
|
|
|
|
+
|
|
vmolr |= rd32(E1000_VMOLR(vfn)) &
|
|
vmolr |= rd32(E1000_VMOLR(vfn)) &
|
|
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
|
|
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
|
|
|
|
+
|
|
|
|
+ /* enable Rx jumbo frames, no need for restriction */
|
|
|
|
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
|
|
|
|
+ vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
|
|
|
|
+
|
|
wr32(E1000_VMOLR(vfn), vmolr);
|
|
wr32(E1000_VMOLR(vfn), vmolr);
|
|
|
|
+ wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
|
|
|
|
+
|
|
igb_restore_vf_multicasts(adapter);
|
|
igb_restore_vf_multicasts(adapter);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5088,16 +5218,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
|
|
{
|
|
{
|
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
- if (test_bit(__IGB_DOWN, &adapter->state)) {
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
- return NETDEV_TX_OK;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (skb->len <= 0) {
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
- return NETDEV_TX_OK;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
|
|
/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
|
|
* in order to meet this minimum size requirement.
|
|
* in order to meet this minimum size requirement.
|
|
*/
|
|
*/
|
|
@@ -5792,125 +5912,132 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
|
|
static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
|
|
static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
|
|
{
|
|
{
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
- u32 pool_mask, reg, vid;
|
|
|
|
- int i;
|
|
|
|
|
|
+ u32 pool_mask, vlvf_mask, i;
|
|
|
|
|
|
- pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
|
|
|
|
|
|
+ /* create mask for VF and other pools */
|
|
|
|
+ pool_mask = E1000_VLVF_POOLSEL_MASK;
|
|
|
|
+ vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
|
|
|
|
+
|
|
|
|
+ /* drop PF from pool bits */
|
|
|
|
+ pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
|
|
|
|
+ adapter->vfs_allocated_count));
|
|
|
|
|
|
/* Find the vlan filter for this id */
|
|
/* Find the vlan filter for this id */
|
|
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
|
|
|
|
- reg = rd32(E1000_VLVF(i));
|
|
|
|
|
|
+ for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
|
|
|
|
+ u32 vlvf = rd32(E1000_VLVF(i));
|
|
|
|
+ u32 vfta_mask, vid, vfta;
|
|
|
|
|
|
/* remove the vf from the pool */
|
|
/* remove the vf from the pool */
|
|
- reg &= ~pool_mask;
|
|
|
|
-
|
|
|
|
- /* if pool is empty then remove entry from vfta */
|
|
|
|
- if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
|
|
|
|
- (reg & E1000_VLVF_VLANID_ENABLE)) {
|
|
|
|
- reg = 0;
|
|
|
|
- vid = reg & E1000_VLVF_VLANID_MASK;
|
|
|
|
- igb_vfta_set(hw, vid, false);
|
|
|
|
- }
|
|
|
|
|
|
+ if (!(vlvf & vlvf_mask))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ /* clear out bit from VLVF */
|
|
|
|
+ vlvf ^= vlvf_mask;
|
|
|
|
+
|
|
|
|
+ /* if other pools are present, just remove ourselves */
|
|
|
|
+ if (vlvf & pool_mask)
|
|
|
|
+ goto update_vlvfb;
|
|
|
|
+
|
|
|
|
+ /* if PF is present, leave VFTA */
|
|
|
|
+ if (vlvf & E1000_VLVF_POOLSEL_MASK)
|
|
|
|
+ goto update_vlvf;
|
|
|
|
+
|
|
|
|
+ vid = vlvf & E1000_VLVF_VLANID_MASK;
|
|
|
|
+ vfta_mask = 1 << (vid % 32);
|
|
|
|
|
|
- wr32(E1000_VLVF(i), reg);
|
|
|
|
|
|
+ /* clear bit from VFTA */
|
|
|
|
+ vfta = adapter->shadow_vfta[vid / 32];
|
|
|
|
+ if (vfta & vfta_mask)
|
|
|
|
+ hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
|
|
|
|
+update_vlvf:
|
|
|
|
+ /* clear pool selection enable */
|
|
|
|
+ if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
|
|
|
|
+ vlvf &= E1000_VLVF_POOLSEL_MASK;
|
|
|
|
+ else
|
|
|
|
+ vlvf = 0;
|
|
|
|
+update_vlvfb:
|
|
|
|
+ /* clear pool bits */
|
|
|
|
+ wr32(E1000_VLVF(i), vlvf);
|
|
}
|
|
}
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
|
|
|
|
+{
|
|
|
|
+ u32 vlvf;
|
|
|
|
+ int idx;
|
|
|
|
|
|
- adapter->vf_data[vf].vlans_enabled = 0;
|
|
|
|
|
|
+ /* short cut the special case */
|
|
|
|
+ if (vlan == 0)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /* Search for the VLAN id in the VLVF entries */
|
|
|
|
+ for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
|
|
|
|
+ vlvf = rd32(E1000_VLVF(idx));
|
|
|
|
+ if ((vlvf & VLAN_VID_MASK) == vlan)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return idx;
|
|
}
|
|
}
|
|
|
|
|
|
-static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
|
|
|
|
|
|
+void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
|
|
{
|
|
{
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
- u32 reg, i;
|
|
|
|
-
|
|
|
|
- /* The vlvf table only exists on 82576 hardware and newer */
|
|
|
|
- if (hw->mac.type < e1000_82576)
|
|
|
|
- return -1;
|
|
|
|
|
|
+ u32 bits, pf_id;
|
|
|
|
+ int idx;
|
|
|
|
|
|
- /* we only need to do this if VMDq is enabled */
|
|
|
|
- if (!adapter->vfs_allocated_count)
|
|
|
|
- return -1;
|
|
|
|
|
|
+ idx = igb_find_vlvf_entry(hw, vid);
|
|
|
|
+ if (!idx)
|
|
|
|
+ return;
|
|
|
|
|
|
- /* Find the vlan filter for this id */
|
|
|
|
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
|
|
|
|
- reg = rd32(E1000_VLVF(i));
|
|
|
|
- if ((reg & E1000_VLVF_VLANID_ENABLE) &&
|
|
|
|
- vid == (reg & E1000_VLVF_VLANID_MASK))
|
|
|
|
- break;
|
|
|
|
|
|
+ /* See if any other pools are set for this VLAN filter
|
|
|
|
+ * entry other than the PF.
|
|
|
|
+ */
|
|
|
|
+ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
|
|
|
|
+ bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
|
|
|
|
+ bits &= rd32(E1000_VLVF(idx));
|
|
|
|
+
|
|
|
|
+ /* Disable the filter so this falls into the default pool. */
|
|
|
|
+ if (!bits) {
|
|
|
|
+ if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
|
|
|
|
+ wr32(E1000_VLVF(idx), 1 << pf_id);
|
|
|
|
+ else
|
|
|
|
+ wr32(E1000_VLVF(idx), 0);
|
|
}
|
|
}
|
|
|
|
+}
|
|
|
|
|
|
- if (add) {
|
|
|
|
- if (i == E1000_VLVF_ARRAY_SIZE) {
|
|
|
|
- /* Did not find a matching VLAN ID entry that was
|
|
|
|
- * enabled. Search for a free filter entry, i.e.
|
|
|
|
- * one without the enable bit set
|
|
|
|
- */
|
|
|
|
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
|
|
|
|
- reg = rd32(E1000_VLVF(i));
|
|
|
|
- if (!(reg & E1000_VLVF_VLANID_ENABLE))
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- if (i < E1000_VLVF_ARRAY_SIZE) {
|
|
|
|
- /* Found an enabled/available entry */
|
|
|
|
- reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
|
|
|
|
-
|
|
|
|
- /* if !enabled we need to set this up in vfta */
|
|
|
|
- if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
|
|
|
|
- /* add VID to filter table */
|
|
|
|
- igb_vfta_set(hw, vid, true);
|
|
|
|
- reg |= E1000_VLVF_VLANID_ENABLE;
|
|
|
|
- }
|
|
|
|
- reg &= ~E1000_VLVF_VLANID_MASK;
|
|
|
|
- reg |= vid;
|
|
|
|
- wr32(E1000_VLVF(i), reg);
|
|
|
|
-
|
|
|
|
- /* do not modify RLPML for PF devices */
|
|
|
|
- if (vf >= adapter->vfs_allocated_count)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- if (!adapter->vf_data[vf].vlans_enabled) {
|
|
|
|
- u32 size;
|
|
|
|
-
|
|
|
|
- reg = rd32(E1000_VMOLR(vf));
|
|
|
|
- size = reg & E1000_VMOLR_RLPML_MASK;
|
|
|
|
- size += 4;
|
|
|
|
- reg &= ~E1000_VMOLR_RLPML_MASK;
|
|
|
|
- reg |= size;
|
|
|
|
- wr32(E1000_VMOLR(vf), reg);
|
|
|
|
- }
|
|
|
|
|
|
+static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
|
|
|
|
+ bool add, u32 vf)
|
|
|
|
+{
|
|
|
|
+ int pf_id = adapter->vfs_allocated_count;
|
|
|
|
+ struct e1000_hw *hw = &adapter->hw;
|
|
|
|
+ int err;
|
|
|
|
|
|
- adapter->vf_data[vf].vlans_enabled++;
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- if (i < E1000_VLVF_ARRAY_SIZE) {
|
|
|
|
- /* remove vf from the pool */
|
|
|
|
- reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
|
|
|
|
- /* if pool is empty then remove entry from vfta */
|
|
|
|
- if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
|
|
|
|
- reg = 0;
|
|
|
|
- igb_vfta_set(hw, vid, false);
|
|
|
|
- }
|
|
|
|
- wr32(E1000_VLVF(i), reg);
|
|
|
|
-
|
|
|
|
- /* do not modify RLPML for PF devices */
|
|
|
|
- if (vf >= adapter->vfs_allocated_count)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- adapter->vf_data[vf].vlans_enabled--;
|
|
|
|
- if (!adapter->vf_data[vf].vlans_enabled) {
|
|
|
|
- u32 size;
|
|
|
|
-
|
|
|
|
- reg = rd32(E1000_VMOLR(vf));
|
|
|
|
- size = reg & E1000_VMOLR_RLPML_MASK;
|
|
|
|
- size -= 4;
|
|
|
|
- reg &= ~E1000_VMOLR_RLPML_MASK;
|
|
|
|
- reg |= size;
|
|
|
|
- wr32(E1000_VMOLR(vf), reg);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ /* If VLAN overlaps with one the PF is currently monitoring make
|
|
|
|
+ * sure that we are able to allocate a VLVF entry. This may be
|
|
|
|
+ * redundant but it guarantees PF will maintain visibility to
|
|
|
|
+ * the VLAN.
|
|
|
|
+ */
|
|
|
|
+ if (add && test_bit(vid, adapter->active_vlans)) {
|
|
|
|
+ err = igb_vfta_set(hw, vid, pf_id, true, false);
|
|
|
|
+ if (err)
|
|
|
|
+ return err;
|
|
}
|
|
}
|
|
- return 0;
|
|
|
|
|
|
+
|
|
|
|
+ err = igb_vfta_set(hw, vid, vf, add, false);
|
|
|
|
+
|
|
|
|
+ if (add && !err)
|
|
|
|
+ return err;
|
|
|
|
+
|
|
|
|
+ /* If we failed to add the VF VLAN or we are removing the VF VLAN
|
|
|
|
+ * we may need to drop the PF pool bit in order to allow us to free
|
|
|
|
+ * up the VLVF resources.
|
|
|
|
+ */
|
|
|
|
+ if (test_bit(vid, adapter->active_vlans) ||
|
|
|
|
+ (adapter->flags & IGB_FLAG_VLAN_PROMISC))
|
|
|
|
+ igb_update_pf_vlvf(adapter, vid);
|
|
|
|
+
|
|
|
|
+ return err;
|
|
}
|
|
}
|
|
|
|
|
|
static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
|
|
static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
|
|
@@ -5923,130 +6050,97 @@ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
|
|
wr32(E1000_VMVIR(vf), 0);
|
|
wr32(E1000_VMVIR(vf), 0);
|
|
}
|
|
}
|
|
|
|
|
|
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
|
|
|
|
- int vf, u16 vlan, u8 qos)
|
|
|
|
|
|
+static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
|
|
|
|
+ u16 vlan, u8 qos)
|
|
{
|
|
{
|
|
- int err = 0;
|
|
|
|
- struct igb_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
+ int err;
|
|
|
|
|
|
- if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
|
|
|
|
- return -EINVAL;
|
|
|
|
- if (vlan || qos) {
|
|
|
|
- err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
|
|
|
|
- if (err)
|
|
|
|
- goto out;
|
|
|
|
- igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
|
|
|
|
- igb_set_vmolr(adapter, vf, !vlan);
|
|
|
|
- adapter->vf_data[vf].pf_vlan = vlan;
|
|
|
|
- adapter->vf_data[vf].pf_qos = qos;
|
|
|
|
- dev_info(&adapter->pdev->dev,
|
|
|
|
- "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
|
|
|
|
- if (test_bit(__IGB_DOWN, &adapter->state)) {
|
|
|
|
- dev_warn(&adapter->pdev->dev,
|
|
|
|
- "The VF VLAN has been set, but the PF device is not up.\n");
|
|
|
|
- dev_warn(&adapter->pdev->dev,
|
|
|
|
- "Bring the PF device up before attempting to use the VF device.\n");
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
|
|
|
|
- false, vf);
|
|
|
|
- igb_set_vmvir(adapter, vlan, vf);
|
|
|
|
- igb_set_vmolr(adapter, vf, true);
|
|
|
|
- adapter->vf_data[vf].pf_vlan = 0;
|
|
|
|
- adapter->vf_data[vf].pf_qos = 0;
|
|
|
|
|
|
+ err = igb_set_vf_vlan(adapter, vlan, true, vf);
|
|
|
|
+ if (err)
|
|
|
|
+ return err;
|
|
|
|
+
|
|
|
|
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
|
|
|
|
+ igb_set_vmolr(adapter, vf, !vlan);
|
|
|
|
+
|
|
|
|
+ /* revoke access to previous VLAN */
|
|
|
|
+ if (vlan != adapter->vf_data[vf].pf_vlan)
|
|
|
|
+ igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
|
|
|
|
+ false, vf);
|
|
|
|
+
|
|
|
|
+ adapter->vf_data[vf].pf_vlan = vlan;
|
|
|
|
+ adapter->vf_data[vf].pf_qos = qos;
|
|
|
|
+ dev_info(&adapter->pdev->dev,
|
|
|
|
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
|
|
|
|
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
|
|
|
|
+ dev_warn(&adapter->pdev->dev,
|
|
|
|
+ "The VF VLAN has been set, but the PF device is not up.\n");
|
|
|
|
+ dev_warn(&adapter->pdev->dev,
|
|
|
|
+ "Bring the PF device up before attempting to use the VF device.\n");
|
|
}
|
|
}
|
|
-out:
|
|
|
|
|
|
+
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
|
|
|
|
|
|
+static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
|
|
{
|
|
{
|
|
- struct e1000_hw *hw = &adapter->hw;
|
|
|
|
- int i;
|
|
|
|
- u32 reg;
|
|
|
|
|
|
+ /* Restore tagless access via VLAN 0 */
|
|
|
|
+ igb_set_vf_vlan(adapter, 0, true, vf);
|
|
|
|
|
|
- /* Find the vlan filter for this id */
|
|
|
|
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
|
|
|
|
- reg = rd32(E1000_VLVF(i));
|
|
|
|
- if ((reg & E1000_VLVF_VLANID_ENABLE) &&
|
|
|
|
- vid == (reg & E1000_VLVF_VLANID_MASK))
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
|
|
+ igb_set_vmvir(adapter, 0, vf);
|
|
|
|
+ igb_set_vmolr(adapter, vf, true);
|
|
|
|
|
|
- if (i >= E1000_VLVF_ARRAY_SIZE)
|
|
|
|
- i = -1;
|
|
|
|
|
|
+ /* Remove any PF assigned VLAN */
|
|
|
|
+ if (adapter->vf_data[vf].pf_vlan)
|
|
|
|
+ igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
|
|
|
|
+ false, vf);
|
|
|
|
+
|
|
|
|
+ adapter->vf_data[vf].pf_vlan = 0;
|
|
|
|
+ adapter->vf_data[vf].pf_qos = 0;
|
|
|
|
|
|
- return i;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
|
|
|
|
|
|
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
|
|
|
|
+ int vf, u16 vlan, u8 qos)
|
|
{
|
|
{
|
|
- struct e1000_hw *hw = &adapter->hw;
|
|
|
|
- int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
|
|
|
|
- int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
|
|
|
|
- int err = 0;
|
|
|
|
|
|
+ struct igb_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
- /* If in promiscuous mode we need to make sure the PF also has
|
|
|
|
- * the VLAN filter set.
|
|
|
|
- */
|
|
|
|
- if (add && (adapter->netdev->flags & IFF_PROMISC))
|
|
|
|
- err = igb_vlvf_set(adapter, vid, add,
|
|
|
|
- adapter->vfs_allocated_count);
|
|
|
|
- if (err)
|
|
|
|
- goto out;
|
|
|
|
|
|
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- err = igb_vlvf_set(adapter, vid, add, vf);
|
|
|
|
|
|
+ return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
|
|
|
|
+ igb_disable_port_vlan(adapter, vf);
|
|
|
|
+}
|
|
|
|
|
|
- if (err)
|
|
|
|
- goto out;
|
|
|
|
|
|
+static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
|
|
|
|
+{
|
|
|
|
+ int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
|
|
|
|
+ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
|
|
|
|
|
|
- /* Go through all the checks to see if the VLAN filter should
|
|
|
|
- * be wiped completely.
|
|
|
|
- */
|
|
|
|
- if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
|
|
|
|
- u32 vlvf, bits;
|
|
|
|
- int regndx = igb_find_vlvf_entry(adapter, vid);
|
|
|
|
-
|
|
|
|
- if (regndx < 0)
|
|
|
|
- goto out;
|
|
|
|
- /* See if any other pools are set for this VLAN filter
|
|
|
|
- * entry other than the PF.
|
|
|
|
- */
|
|
|
|
- vlvf = bits = rd32(E1000_VLVF(regndx));
|
|
|
|
- bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
|
|
|
|
- adapter->vfs_allocated_count);
|
|
|
|
- /* If the filter was removed then ensure PF pool bit
|
|
|
|
- * is cleared if the PF only added itself to the pool
|
|
|
|
- * because the PF is in promiscuous mode.
|
|
|
|
- */
|
|
|
|
- if ((vlvf & VLAN_VID_MASK) == vid &&
|
|
|
|
- !test_bit(vid, adapter->active_vlans) &&
|
|
|
|
- !bits)
|
|
|
|
- igb_vlvf_set(adapter, vid, add,
|
|
|
|
- adapter->vfs_allocated_count);
|
|
|
|
- }
|
|
|
|
|
|
+ if (adapter->vf_data[vf].pf_vlan)
|
|
|
|
+ return -1;
|
|
|
|
|
|
-out:
|
|
|
|
- return err;
|
|
|
|
|
|
+ /* VLAN 0 is a special case, don't allow it to be removed */
|
|
|
|
+ if (!vid && !add)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ return igb_set_vf_vlan(adapter, vid, !!add, vf);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
|
|
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
|
|
{
|
|
{
|
|
- /* clear flags - except flag that indicates PF has set the MAC */
|
|
|
|
- adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
|
|
|
|
- adapter->vf_data[vf].last_nack = jiffies;
|
|
|
|
|
|
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
|
|
|
|
|
|
- /* reset offloads to defaults */
|
|
|
|
- igb_set_vmolr(adapter, vf, true);
|
|
|
|
|
|
+ /* clear flags - except flag that indicates PF has set the MAC */
|
|
|
|
+ vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
|
|
|
|
+ vf_data->last_nack = jiffies;
|
|
|
|
|
|
/* reset vlans for device */
|
|
/* reset vlans for device */
|
|
igb_clear_vf_vfta(adapter, vf);
|
|
igb_clear_vf_vfta(adapter, vf);
|
|
- if (adapter->vf_data[vf].pf_vlan)
|
|
|
|
- igb_ndo_set_vf_vlan(adapter->netdev, vf,
|
|
|
|
- adapter->vf_data[vf].pf_vlan,
|
|
|
|
- adapter->vf_data[vf].pf_qos);
|
|
|
|
- else
|
|
|
|
- igb_clear_vf_vfta(adapter, vf);
|
|
|
|
|
|
+ igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
|
|
|
|
+ igb_set_vmvir(adapter, vf_data->pf_vlan |
|
|
|
|
+ (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
|
|
|
|
+ igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
|
|
|
|
|
|
/* reset multicast table array for vf */
|
|
/* reset multicast table array for vf */
|
|
adapter->vf_data[vf].num_vf_mc_hashes = 0;
|
|
adapter->vf_data[vf].num_vf_mc_hashes = 0;
|
|
@@ -6191,7 +6285,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
|
|
"VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
|
|
"VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
|
|
vf);
|
|
vf);
|
|
else
|
|
else
|
|
- retval = igb_set_vf_vlan(adapter, msgbuf, vf);
|
|
|
|
|
|
+ retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
|
|
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
|
|
@@ -6233,6 +6327,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
|
|
/**
|
|
/**
|
|
* igb_set_uta - Set unicast filter table address
|
|
* igb_set_uta - Set unicast filter table address
|
|
* @adapter: board private structure
|
|
* @adapter: board private structure
|
|
|
|
+ * @set: boolean indicating if we are setting or clearing bits
|
|
*
|
|
*
|
|
* The unicast table address is a register array of 32-bit registers.
|
|
* The unicast table address is a register array of 32-bit registers.
|
|
* The table is meant to be used in a way similar to how the MTA is used
|
|
* The table is meant to be used in a way similar to how the MTA is used
|
|
@@ -6240,21 +6335,18 @@ static void igb_msg_task(struct igb_adapter *adapter)
|
|
* set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
|
|
* set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
|
|
* enable bit to allow vlan tag stripping when promiscuous mode is enabled
|
|
* enable bit to allow vlan tag stripping when promiscuous mode is enabled
|
|
**/
|
|
**/
|
|
-static void igb_set_uta(struct igb_adapter *adapter)
|
|
|
|
|
|
+static void igb_set_uta(struct igb_adapter *adapter, bool set)
|
|
{
|
|
{
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
+ u32 uta = set ? ~0 : 0;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- /* The UTA table only exists on 82576 hardware and newer */
|
|
|
|
- if (hw->mac.type < e1000_82576)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
/* we only need to do this if VMDq is enabled */
|
|
/* we only need to do this if VMDq is enabled */
|
|
if (!adapter->vfs_allocated_count)
|
|
if (!adapter->vfs_allocated_count)
|
|
return;
|
|
return;
|
|
|
|
|
|
- for (i = 0; i < hw->mac.uta_reg_count; i++)
|
|
|
|
- array_wr32(E1000_UTA, i, ~0);
|
|
|
|
|
|
+ for (i = hw->mac.uta_reg_count; i--;)
|
|
|
|
+ array_wr32(E1000_UTA, i, uta);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -7201,8 +7293,6 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
|
|
ctrl &= ~E1000_CTRL_VME;
|
|
ctrl &= ~E1000_CTRL_VME;
|
|
wr32(E1000_CTRL, ctrl);
|
|
wr32(E1000_CTRL, ctrl);
|
|
}
|
|
}
|
|
-
|
|
|
|
- igb_rlpml_set(adapter);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static int igb_vlan_rx_add_vid(struct net_device *netdev,
|
|
static int igb_vlan_rx_add_vid(struct net_device *netdev,
|
|
@@ -7212,11 +7302,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev,
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
int pf_id = adapter->vfs_allocated_count;
|
|
int pf_id = adapter->vfs_allocated_count;
|
|
|
|
|
|
- /* attempt to add filter to vlvf array */
|
|
|
|
- igb_vlvf_set(adapter, vid, true, pf_id);
|
|
|
|
-
|
|
|
|
/* add the filter since PF can receive vlans w/o entry in vlvf */
|
|
/* add the filter since PF can receive vlans w/o entry in vlvf */
|
|
- igb_vfta_set(hw, vid, true);
|
|
|
|
|
|
+ if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
|
|
|
|
+ igb_vfta_set(hw, vid, pf_id, true, !!vid);
|
|
|
|
|
|
set_bit(vid, adapter->active_vlans);
|
|
set_bit(vid, adapter->active_vlans);
|
|
|
|
|
|
@@ -7227,16 +7315,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev,
|
|
__be16 proto, u16 vid)
|
|
__be16 proto, u16 vid)
|
|
{
|
|
{
|
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
- struct e1000_hw *hw = &adapter->hw;
|
|
|
|
int pf_id = adapter->vfs_allocated_count;
|
|
int pf_id = adapter->vfs_allocated_count;
|
|
- s32 err;
|
|
|
|
-
|
|
|
|
- /* remove vlan from VLVF table array */
|
|
|
|
- err = igb_vlvf_set(adapter, vid, false, pf_id);
|
|
|
|
|
|
+ struct e1000_hw *hw = &adapter->hw;
|
|
|
|
|
|
- /* if vid was not present in VLVF just remove it from table */
|
|
|
|
- if (err)
|
|
|
|
- igb_vfta_set(hw, vid, false);
|
|
|
|
|
|
+ /* remove VID from filter table */
|
|
|
|
+ if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
|
|
|
|
+ igb_vfta_set(hw, vid, pf_id, false, true);
|
|
|
|
|
|
clear_bit(vid, adapter->active_vlans);
|
|
clear_bit(vid, adapter->active_vlans);
|
|
|
|
|
|
@@ -7245,11 +7329,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev,
|
|
|
|
|
|
static void igb_restore_vlan(struct igb_adapter *adapter)
|
|
static void igb_restore_vlan(struct igb_adapter *adapter)
|
|
{
|
|
{
|
|
- u16 vid;
|
|
|
|
|
|
+ u16 vid = 1;
|
|
|
|
|
|
igb_vlan_mode(adapter->netdev, adapter->netdev->features);
|
|
igb_vlan_mode(adapter->netdev, adapter->netdev->features);
|
|
|
|
+ igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
|
|
|
|
|
|
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
|
|
|
|
|
+ for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
|
|
igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
|
igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -7704,15 +7789,14 @@ static void igb_io_resume(struct pci_dev *pdev)
|
|
static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
|
|
static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
|
|
u8 qsel)
|
|
u8 qsel)
|
|
{
|
|
{
|
|
- u32 rar_low, rar_high;
|
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
+ u32 rar_low, rar_high;
|
|
|
|
|
|
/* HW expects these in little endian so we reverse the byte order
|
|
/* HW expects these in little endian so we reverse the byte order
|
|
- * from network order (big endian) to little endian
|
|
|
|
|
|
+ * from network order (big endian) to CPU endian
|
|
*/
|
|
*/
|
|
- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
|
|
|
|
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
|
|
|
|
- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
|
|
|
|
|
|
+ rar_low = le32_to_cpup((__be32 *)(addr));
|
|
|
|
+ rar_high = le16_to_cpup((__be16 *)(addr + 4));
|
|
|
|
|
|
/* Indicate to hardware the Address is Valid. */
|
|
/* Indicate to hardware the Address is Valid. */
|
|
rar_high |= E1000_RAH_AV;
|
|
rar_high |= E1000_RAH_AV;
|
|
@@ -7959,9 +8043,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
|
|
* than the Rx threshold. Set hwm to PBA - max frame
|
|
* than the Rx threshold. Set hwm to PBA - max frame
|
|
* size in 16B units, capping it at PBA - 6KB.
|
|
* size in 16B units, capping it at PBA - 6KB.
|
|
*/
|
|
*/
|
|
- hwm = 64 * pba - adapter->max_frame_size / 16;
|
|
|
|
- if (hwm < 64 * (pba - 6))
|
|
|
|
- hwm = 64 * (pba - 6);
|
|
|
|
|
|
+ hwm = 64 * (pba - 6);
|
|
reg = rd32(E1000_FCRTC);
|
|
reg = rd32(E1000_FCRTC);
|
|
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
|
|
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
|
|
reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
|
|
reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
|
|
@@ -7971,9 +8053,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
|
|
/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
|
|
/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
|
|
* frame size, capping it at PBA - 10KB.
|
|
* frame size, capping it at PBA - 10KB.
|
|
*/
|
|
*/
|
|
- dmac_thr = pba - adapter->max_frame_size / 512;
|
|
|
|
- if (dmac_thr < pba - 10)
|
|
|
|
- dmac_thr = pba - 10;
|
|
|
|
|
|
+ dmac_thr = pba - 10;
|
|
reg = rd32(E1000_DMACR);
|
|
reg = rd32(E1000_DMACR);
|
|
reg &= ~E1000_DMACR_DMACTHR_MASK;
|
|
reg &= ~E1000_DMACR_DMACTHR_MASK;
|
|
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
|
|
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
|