|
@@ -59,6 +59,7 @@ static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
|
|
|
static const struct ixgbe_info *ixgbe_info_tbl[] = {
|
|
|
[board_82598] = &ixgbe_82598_info,
|
|
|
[board_82599] = &ixgbe_82599_info,
|
|
|
+ [board_X540] = &ixgbe_X540_info,
|
|
|
};
|
|
|
|
|
|
/* ixgbe_pci_tbl - PCI Device ID Table
|
|
@@ -112,6 +113,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
|
|
|
board_82599 },
|
|
|
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
|
|
|
board_82599 },
|
|
|
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
|
|
|
+ board_82599 },
|
|
|
|
|
|
/* required last entry */
|
|
|
{0, }
|
|
@@ -560,6 +563,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
|
|
|
break;
|
|
|
case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
if (direction == -1) {
|
|
|
/* other causes */
|
|
|
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
|
|
@@ -589,29 +593,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
|
|
|
{
|
|
|
u32 mask;
|
|
|
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
|
|
|
- } else {
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
mask = (qmask & 0xFFFFFFFF);
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
|
|
|
mask = (qmask >> 32);
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_tx_buffer
|
|
|
- *tx_buffer_info)
|
|
|
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
|
|
|
+ struct ixgbe_tx_buffer *tx_buffer_info)
|
|
|
{
|
|
|
if (tx_buffer_info->dma) {
|
|
|
if (tx_buffer_info->mapped_as_page)
|
|
|
- dma_unmap_page(&adapter->pdev->dev,
|
|
|
+ dma_unmap_page(tx_ring->dev,
|
|
|
tx_buffer_info->dma,
|
|
|
tx_buffer_info->length,
|
|
|
DMA_TO_DEVICE);
|
|
|
else
|
|
|
- dma_unmap_single(&adapter->pdev->dev,
|
|
|
+ dma_unmap_single(tx_ring->dev,
|
|
|
tx_buffer_info->dma,
|
|
|
tx_buffer_info->length,
|
|
|
DMA_TO_DEVICE);
|
|
@@ -626,92 +635,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ixgbe_tx_xon_state - check the tx ring xon state
|
|
|
- * @adapter: the ixgbe adapter
|
|
|
- * @tx_ring: the corresponding tx_ring
|
|
|
+ * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
|
|
|
+ * @adapter: driver private struct
|
|
|
+ * @index: reg idx of queue to query (0-127)
|
|
|
*
|
|
|
- * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
|
|
|
- * corresponding TC of this tx_ring when checking TFCS.
|
|
|
+ * Helper function to determine the traffic index for a paticular
|
|
|
+ * register index.
|
|
|
*
|
|
|
- * Returns : true if in xon state (currently not paused)
|
|
|
+ * Returns : a tc index for use in range 0-7, or 0-3
|
|
|
*/
|
|
|
-static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *tx_ring)
|
|
|
+u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
|
|
|
{
|
|
|
- u32 txoff = IXGBE_TFCS_TXOFF;
|
|
|
+ int tc = -1;
|
|
|
+ int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
|
|
|
|
|
|
-#ifdef CONFIG_IXGBE_DCB
|
|
|
- if (adapter->dcb_cfg.pfc_mode_enable) {
|
|
|
- int tc;
|
|
|
- int reg_idx = tx_ring->reg_idx;
|
|
|
- int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
|
|
|
+ /* if DCB is not enabled the queues have no TC */
|
|
|
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
|
|
|
+ return tc;
|
|
|
+
|
|
|
+ /* check valid range */
|
|
|
+ if (reg_idx >= adapter->hw.mac.max_tx_queues)
|
|
|
+ return tc;
|
|
|
|
|
|
- switch (adapter->hw.mac.type) {
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
+ tc = reg_idx >> 2;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ if (dcb_i != 4 && dcb_i != 8)
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* if VMDq is enabled the lowest order bits determine TC */
|
|
|
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
|
|
|
+ IXGBE_FLAG_VMDQ_ENABLED)) {
|
|
|
+ tc = reg_idx & (dcb_i - 1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Convert the reg_idx into the correct TC. This bitmask
|
|
|
+ * targets the last full 32 ring traffic class and assigns
|
|
|
+ * it a value of 1. From there the rest of the rings are
|
|
|
+ * based on shifting the mask further up to include the
|
|
|
+ * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
|
|
|
+ * will only ever be 8 or 4 and that reg_idx will never
|
|
|
+ * be greater then 128. The code without the power of 2
|
|
|
+ * optimizations would be:
|
|
|
+ * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
|
|
|
+ */
|
|
|
+ tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
|
|
|
+ tc >>= 9 - (reg_idx >> 5);
|
|
|
+ }
|
|
|
+
|
|
|
+ return tc;
|
|
|
+}
|
|
|
+
|
|
|
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
|
|
|
+{
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
|
|
|
+ u32 data = 0;
|
|
|
+ u32 xoff[8] = {0};
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if ((hw->fc.current_mode == ixgbe_fc_full) ||
|
|
|
+ (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
|
|
|
+ switch (hw->mac.type) {
|
|
|
case ixgbe_mac_82598EB:
|
|
|
- tc = reg_idx >> 2;
|
|
|
- txoff = IXGBE_TFCS_TXOFF0;
|
|
|
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
|
|
|
break;
|
|
|
- case ixgbe_mac_82599EB:
|
|
|
- tc = 0;
|
|
|
- txoff = IXGBE_TFCS_TXOFF;
|
|
|
- if (dcb_i == 8) {
|
|
|
- /* TC0, TC1 */
|
|
|
- tc = reg_idx >> 5;
|
|
|
- if (tc == 2) /* TC2, TC3 */
|
|
|
- tc += (reg_idx - 64) >> 4;
|
|
|
- else if (tc == 3) /* TC4, TC5, TC6, TC7 */
|
|
|
- tc += 1 + ((reg_idx - 96) >> 3);
|
|
|
- } else if (dcb_i == 4) {
|
|
|
- /* TC0, TC1 */
|
|
|
- tc = reg_idx >> 6;
|
|
|
- if (tc == 1) {
|
|
|
- tc += (reg_idx - 64) >> 5;
|
|
|
- if (tc == 2) /* TC2, TC3 */
|
|
|
- tc += (reg_idx - 96) >> 4;
|
|
|
- }
|
|
|
- }
|
|
|
+ default:
|
|
|
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
|
|
|
+ }
|
|
|
+ hwstats->lxoffrxc += data;
|
|
|
+
|
|
|
+ /* refill credits (no tx hang) if we received xoff */
|
|
|
+ if (!data)
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
|
|
|
+ &adapter->tx_ring[i]->state);
|
|
|
+ return;
|
|
|
+ } else if (!(adapter->dcb_cfg.pfc_mode_enable))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* update stats for each tc, only valid with PFC enabled */
|
|
|
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
|
|
|
break;
|
|
|
default:
|
|
|
- tc = 0;
|
|
|
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
|
|
|
}
|
|
|
- txoff <<= tc;
|
|
|
+ hwstats->pxoffrxc[i] += xoff[i];
|
|
|
}
|
|
|
-#endif
|
|
|
- return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
|
|
|
+
|
|
|
+ /* disarm tx queues that have received xoff frames */
|
|
|
+ for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
|
|
|
+ u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
|
|
|
+
|
|
|
+ if (xoff[tc])
|
|
|
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
|
|
|
+{
|
|
|
+ return ring->tx_stats.completed;
|
|
|
}
|
|
|
|
|
|
-static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *tx_ring,
|
|
|
- unsigned int eop)
|
|
|
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
|
|
|
{
|
|
|
+ struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
|
- /* Detect a transmit hang in hardware, this serializes the
|
|
|
- * check with the clearing of time_stamp and movement of eop */
|
|
|
- adapter->detect_tx_hung = false;
|
|
|
- if (tx_ring->tx_buffer_info[eop].time_stamp &&
|
|
|
- time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
|
|
|
- ixgbe_tx_xon_state(adapter, tx_ring)) {
|
|
|
- /* detected Tx unit hang */
|
|
|
- union ixgbe_adv_tx_desc *tx_desc;
|
|
|
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
|
|
|
- e_err(drv, "Detected Tx Unit Hang\n"
|
|
|
- " Tx Queue <%d>\n"
|
|
|
- " TDH, TDT <%x>, <%x>\n"
|
|
|
- " next_to_use <%x>\n"
|
|
|
- " next_to_clean <%x>\n"
|
|
|
- "tx_buffer_info[next_to_clean]\n"
|
|
|
- " time_stamp <%lx>\n"
|
|
|
- " jiffies <%lx>\n",
|
|
|
- tx_ring->queue_index,
|
|
|
- IXGBE_READ_REG(hw, tx_ring->head),
|
|
|
- IXGBE_READ_REG(hw, tx_ring->tail),
|
|
|
- tx_ring->next_to_use, eop,
|
|
|
- tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
|
|
|
- return true;
|
|
|
+ u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
|
|
|
+ u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
|
|
|
+
|
|
|
+ if (head != tail)
|
|
|
+ return (head < tail) ?
|
|
|
+ tail - head : (tail + ring->count - head);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
|
|
|
+{
|
|
|
+ u32 tx_done = ixgbe_get_tx_completed(tx_ring);
|
|
|
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
|
|
|
+ u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
|
|
|
+ bool ret = false;
|
|
|
+
|
|
|
+ clear_check_for_tx_hang(tx_ring);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check for a hung queue, but be thorough. This verifies
|
|
|
+ * that a transmit has been completed since the previous
|
|
|
+ * check AND there is at least one packet pending. The
|
|
|
+ * ARMED bit is set to indicate a potential hang. The
|
|
|
+ * bit is cleared if a pause frame is received to remove
|
|
|
+ * false hang detection due to PFC or 802.3x frames. By
|
|
|
+ * requiring this to fail twice we avoid races with
|
|
|
+ * pfc clearing the ARMED bit and conditions where we
|
|
|
+ * run the check_tx_hang logic with a transmit completion
|
|
|
+ * pending but without time to complete it yet.
|
|
|
+ */
|
|
|
+ if ((tx_done_old == tx_done) && tx_pending) {
|
|
|
+ /* make sure it is true for two checks in a row */
|
|
|
+ ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
|
|
|
+ &tx_ring->state);
|
|
|
+ } else {
|
|
|
+ /* update completed stats and continue */
|
|
|
+ tx_ring->tx_stats.tx_done_old = tx_done;
|
|
|
+ /* reset the countdown */
|
|
|
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
|
|
|
}
|
|
|
|
|
|
- return false;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
#define IXGBE_MAX_TXD_PWR 14
|
|
@@ -734,11 +817,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
struct ixgbe_ring *tx_ring)
|
|
|
{
|
|
|
struct ixgbe_adapter *adapter = q_vector->adapter;
|
|
|
- struct net_device *netdev = adapter->netdev;
|
|
|
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
|
|
|
struct ixgbe_tx_buffer *tx_buffer_info;
|
|
|
- unsigned int i, eop, count = 0;
|
|
|
unsigned int total_bytes = 0, total_packets = 0;
|
|
|
+ u16 i, eop, count = 0;
|
|
|
|
|
|
i = tx_ring->next_to_clean;
|
|
|
eop = tx_ring->tx_buffer_info[i].next_to_watch;
|
|
@@ -749,148 +831,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
bool cleaned = false;
|
|
|
rmb(); /* read buffer_info after eop_desc */
|
|
|
for ( ; !cleaned; count++) {
|
|
|
- struct sk_buff *skb;
|
|
|
tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
|
|
|
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
|
|
- cleaned = (i == eop);
|
|
|
- skb = tx_buffer_info->skb;
|
|
|
-
|
|
|
- if (cleaned && skb) {
|
|
|
- unsigned int segs, bytecount;
|
|
|
- unsigned int hlen = skb_headlen(skb);
|
|
|
-
|
|
|
- /* gso_segs is currently only valid for tcp */
|
|
|
- segs = skb_shinfo(skb)->gso_segs ?: 1;
|
|
|
-#ifdef IXGBE_FCOE
|
|
|
- /* adjust for FCoE Sequence Offload */
|
|
|
- if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
|
|
- && skb_is_gso(skb)
|
|
|
- && vlan_get_protocol(skb) ==
|
|
|
- htons(ETH_P_FCOE)) {
|
|
|
- hlen = skb_transport_offset(skb) +
|
|
|
- sizeof(struct fc_frame_header) +
|
|
|
- sizeof(struct fcoe_crc_eof);
|
|
|
- segs = DIV_ROUND_UP(skb->len - hlen,
|
|
|
- skb_shinfo(skb)->gso_size);
|
|
|
- }
|
|
|
-#endif /* IXGBE_FCOE */
|
|
|
- /* multiply data chunks by size of headers */
|
|
|
- bytecount = ((segs - 1) * hlen) + skb->len;
|
|
|
- total_packets += segs;
|
|
|
- total_bytes += bytecount;
|
|
|
- }
|
|
|
-
|
|
|
- ixgbe_unmap_and_free_tx_resource(adapter,
|
|
|
- tx_buffer_info);
|
|
|
|
|
|
tx_desc->wb.status = 0;
|
|
|
+ cleaned = (i == eop);
|
|
|
|
|
|
i++;
|
|
|
if (i == tx_ring->count)
|
|
|
i = 0;
|
|
|
+
|
|
|
+ if (cleaned && tx_buffer_info->skb) {
|
|
|
+ total_bytes += tx_buffer_info->bytecount;
|
|
|
+ total_packets += tx_buffer_info->gso_segs;
|
|
|
+ }
|
|
|
+
|
|
|
+ ixgbe_unmap_and_free_tx_resource(tx_ring,
|
|
|
+ tx_buffer_info);
|
|
|
}
|
|
|
|
|
|
+ tx_ring->tx_stats.completed++;
|
|
|
eop = tx_ring->tx_buffer_info[i].next_to_watch;
|
|
|
eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
|
|
|
}
|
|
|
|
|
|
tx_ring->next_to_clean = i;
|
|
|
+ tx_ring->total_bytes += total_bytes;
|
|
|
+ tx_ring->total_packets += total_packets;
|
|
|
+ u64_stats_update_begin(&tx_ring->syncp);
|
|
|
+ tx_ring->stats.packets += total_packets;
|
|
|
+ tx_ring->stats.bytes += total_bytes;
|
|
|
+ u64_stats_update_end(&tx_ring->syncp);
|
|
|
+
|
|
|
+ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
|
|
|
+ /* schedule immediate reset if we believe we hung */
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
|
|
|
+ e_err(drv, "Detected Tx Unit Hang\n"
|
|
|
+ " Tx Queue <%d>\n"
|
|
|
+ " TDH, TDT <%x>, <%x>\n"
|
|
|
+ " next_to_use <%x>\n"
|
|
|
+ " next_to_clean <%x>\n"
|
|
|
+ "tx_buffer_info[next_to_clean]\n"
|
|
|
+ " time_stamp <%lx>\n"
|
|
|
+ " jiffies <%lx>\n",
|
|
|
+ tx_ring->queue_index,
|
|
|
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
|
|
|
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
|
|
|
+ tx_ring->next_to_use, eop,
|
|
|
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
|
|
|
+
|
|
|
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
|
+
|
|
|
+ e_info(probe,
|
|
|
+ "tx hang %d detected on queue %d, resetting adapter\n",
|
|
|
+ adapter->tx_timeout_count + 1, tx_ring->queue_index);
|
|
|
+
|
|
|
+ /* schedule immediate reset if we believe we hung */
|
|
|
+ ixgbe_tx_timeout(adapter->netdev);
|
|
|
+
|
|
|
+ /* the adapter is about to reset, no point in enabling stuff */
|
|
|
+ return true;
|
|
|
+ }
|
|
|
|
|
|
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
|
|
|
- if (unlikely(count && netif_carrier_ok(netdev) &&
|
|
|
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
|
|
|
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
|
|
/* Make sure that anybody stopping the queue after this
|
|
|
* sees the new next_to_clean.
|
|
|
*/
|
|
|
smp_mb();
|
|
|
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
|
|
|
+ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
|
|
|
!test_bit(__IXGBE_DOWN, &adapter->state)) {
|
|
|
- netif_wake_subqueue(netdev, tx_ring->queue_index);
|
|
|
- ++tx_ring->restart_queue;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (adapter->detect_tx_hung) {
|
|
|
- if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
|
|
|
- /* schedule immediate reset if we believe we hung */
|
|
|
- e_info(probe, "tx hang %d detected, resetting "
|
|
|
- "adapter\n", adapter->tx_timeout_count + 1);
|
|
|
- ixgbe_tx_timeout(adapter->netdev);
|
|
|
+ netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
|
+ ++tx_ring->tx_stats.restart_queue;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* re-arm the interrupt */
|
|
|
- if (count >= tx_ring->work_limit)
|
|
|
- ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
|
|
|
-
|
|
|
- tx_ring->total_bytes += total_bytes;
|
|
|
- tx_ring->total_packets += total_packets;
|
|
|
- u64_stats_update_begin(&tx_ring->syncp);
|
|
|
- tx_ring->stats.packets += total_packets;
|
|
|
- tx_ring->stats.bytes += total_bytes;
|
|
|
- u64_stats_update_end(&tx_ring->syncp);
|
|
|
return count < tx_ring->work_limit;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_IXGBE_DCA
|
|
|
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *rx_ring)
|
|
|
+ struct ixgbe_ring *rx_ring,
|
|
|
+ int cpu)
|
|
|
{
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
u32 rxctrl;
|
|
|
- int cpu = get_cpu();
|
|
|
- int q = rx_ring->reg_idx;
|
|
|
-
|
|
|
- if (rx_ring->cpu != cpu) {
|
|
|
- rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
|
|
|
- rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
|
|
|
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
|
|
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
|
|
|
- rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
|
|
|
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
|
|
|
- }
|
|
|
- rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
|
|
|
- rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
|
|
|
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
|
|
|
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
|
|
|
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
|
|
|
- rx_ring->cpu = cpu;
|
|
|
+ u8 reg_idx = rx_ring->reg_idx;
|
|
|
+
|
|
|
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
|
|
|
+ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
|
|
|
+ rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
|
|
|
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
- put_cpu();
|
|
|
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
|
|
|
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
|
|
|
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
|
|
|
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
|
|
|
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
|
|
|
}
|
|
|
|
|
|
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *tx_ring)
|
|
|
+ struct ixgbe_ring *tx_ring,
|
|
|
+ int cpu)
|
|
|
{
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
u32 txctrl;
|
|
|
+ u8 reg_idx = tx_ring->reg_idx;
|
|
|
+
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
|
|
|
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
|
|
|
+ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
|
|
|
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
|
|
|
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
|
|
|
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
|
|
|
+ txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
|
|
|
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
|
|
|
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
|
|
|
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
|
|
|
+{
|
|
|
+ struct ixgbe_adapter *adapter = q_vector->adapter;
|
|
|
int cpu = get_cpu();
|
|
|
- int q = tx_ring->reg_idx;
|
|
|
- struct ixgbe_hw *hw = &adapter->hw;
|
|
|
+ long r_idx;
|
|
|
+ int i;
|
|
|
|
|
|
- if (tx_ring->cpu != cpu) {
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
|
|
|
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
|
|
|
- txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
|
|
|
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
|
|
|
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
|
|
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
|
|
|
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
|
|
|
- txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
|
|
|
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
|
|
|
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
|
|
|
- }
|
|
|
- tx_ring->cpu = cpu;
|
|
|
+ if (q_vector->cpu == cpu)
|
|
|
+ goto out_no_update;
|
|
|
+
|
|
|
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
|
|
+ for (i = 0; i < q_vector->txr_count; i++) {
|
|
|
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
|
|
|
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
|
|
|
+ r_idx + 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
|
+ for (i = 0; i < q_vector->rxr_count; i++) {
|
|
|
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
|
|
|
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
|
|
|
+ r_idx + 1);
|
|
|
}
|
|
|
+
|
|
|
+ q_vector->cpu = cpu;
|
|
|
+out_no_update:
|
|
|
put_cpu();
|
|
|
}
|
|
|
|
|
|
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
+ int num_q_vectors;
|
|
|
int i;
|
|
|
|
|
|
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
|
|
@@ -899,22 +1015,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
|
|
|
/* always use CB2 mode, difference is masked in the CB driver */
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
|
|
|
|
|
|
- for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
- adapter->tx_ring[i]->cpu = -1;
|
|
|
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
|
|
|
- }
|
|
|
- for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- adapter->rx_ring[i]->cpu = -1;
|
|
|
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
|
|
|
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
|
|
|
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
|
|
+ else
|
|
|
+ num_q_vectors = 1;
|
|
|
+
|
|
|
+ for (i = 0; i < num_q_vectors; i++) {
|
|
|
+ adapter->q_vector[i]->cpu = -1;
|
|
|
+ ixgbe_update_dca(adapter->q_vector[i]);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static int __ixgbe_notify_dca(struct device *dev, void *data)
|
|
|
{
|
|
|
- struct net_device *netdev = dev_get_drvdata(dev);
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
|
|
|
unsigned long event = *(unsigned long *)data;
|
|
|
|
|
|
+ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
|
|
|
+ return 0;
|
|
|
+
|
|
|
switch (event) {
|
|
|
case DCA_PROVIDER_ADD:
|
|
|
/* if we're already enabled, don't do it again */
|
|
@@ -1013,8 +1132,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
}
|
|
|
|
|
|
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
|
|
|
- struct ixgbe_ring *rx_ring, u32 val)
|
|
|
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
|
|
|
{
|
|
|
/*
|
|
|
* Force memory writes to complete before letting h/w
|
|
@@ -1023,72 +1141,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
|
|
|
* such as IA-64).
|
|
|
*/
|
|
|
wmb();
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
|
|
|
+ writel(val, rx_ring->tail);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
|
|
|
- * @adapter: address of board private structure
|
|
|
+ * @rx_ring: ring to place buffers on
|
|
|
+ * @cleaned_count: number of buffers to replace
|
|
|
**/
|
|
|
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *rx_ring,
|
|
|
- int cleaned_count)
|
|
|
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|
|
{
|
|
|
- struct net_device *netdev = adapter->netdev;
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
union ixgbe_adv_rx_desc *rx_desc;
|
|
|
struct ixgbe_rx_buffer *bi;
|
|
|
- unsigned int i;
|
|
|
- unsigned int bufsz = rx_ring->rx_buf_len;
|
|
|
+ struct sk_buff *skb;
|
|
|
+ u16 i = rx_ring->next_to_use;
|
|
|
|
|
|
- i = rx_ring->next_to_use;
|
|
|
- bi = &rx_ring->rx_buffer_info[i];
|
|
|
+ /* do nothing if no valid netdev defined */
|
|
|
+ if (!rx_ring->netdev)
|
|
|
+ return;
|
|
|
|
|
|
while (cleaned_count--) {
|
|
|
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
|
|
|
+ bi = &rx_ring->rx_buffer_info[i];
|
|
|
+ skb = bi->skb;
|
|
|
|
|
|
- if (!bi->page_dma &&
|
|
|
- (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
|
|
|
- if (!bi->page) {
|
|
|
- bi->page = netdev_alloc_page(netdev);
|
|
|
- if (!bi->page) {
|
|
|
- adapter->alloc_rx_page_failed++;
|
|
|
- goto no_buffers;
|
|
|
- }
|
|
|
- bi->page_offset = 0;
|
|
|
- } else {
|
|
|
- /* use a half page if we're re-using */
|
|
|
- bi->page_offset ^= (PAGE_SIZE / 2);
|
|
|
- }
|
|
|
-
|
|
|
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
|
|
|
- bi->page_offset,
|
|
|
- (PAGE_SIZE / 2),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- }
|
|
|
-
|
|
|
- if (!bi->skb) {
|
|
|
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
|
|
|
- bufsz);
|
|
|
- bi->skb = skb;
|
|
|
-
|
|
|
+ if (!skb) {
|
|
|
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
|
|
|
+ rx_ring->rx_buf_len);
|
|
|
if (!skb) {
|
|
|
- adapter->alloc_rx_buff_failed++;
|
|
|
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
|
|
|
goto no_buffers;
|
|
|
}
|
|
|
/* initialize queue mapping */
|
|
|
skb_record_rx_queue(skb, rx_ring->queue_index);
|
|
|
+ bi->skb = skb;
|
|
|
}
|
|
|
|
|
|
if (!bi->dma) {
|
|
|
- bi->dma = dma_map_single(&pdev->dev,
|
|
|
- bi->skb->data,
|
|
|
+ bi->dma = dma_map_single(rx_ring->dev,
|
|
|
+ skb->data,
|
|
|
rx_ring->rx_buf_len,
|
|
|
DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
|
|
|
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
|
|
|
+ bi->dma = 0;
|
|
|
+ goto no_buffers;
|
|
|
+ }
|
|
|
}
|
|
|
- /* Refresh the desc even if buffer_addrs didn't change because
|
|
|
- * each write-back erases this info. */
|
|
|
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
|
|
|
+
|
|
|
+ if (ring_is_ps_enabled(rx_ring)) {
|
|
|
+ if (!bi->page) {
|
|
|
+ bi->page = netdev_alloc_page(rx_ring->netdev);
|
|
|
+ if (!bi->page) {
|
|
|
+ rx_ring->rx_stats.alloc_rx_page_failed++;
|
|
|
+ goto no_buffers;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!bi->page_dma) {
|
|
|
+ /* use a half page if we're re-using */
|
|
|
+ bi->page_offset ^= PAGE_SIZE / 2;
|
|
|
+ bi->page_dma = dma_map_page(rx_ring->dev,
|
|
|
+ bi->page,
|
|
|
+ bi->page_offset,
|
|
|
+ PAGE_SIZE / 2,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(rx_ring->dev,
|
|
|
+ bi->page_dma)) {
|
|
|
+ rx_ring->rx_stats.alloc_rx_page_failed++;
|
|
|
+ bi->page_dma = 0;
|
|
|
+ goto no_buffers;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Refresh the desc even if buffer_addrs didn't change
|
|
|
+ * because each write-back erases this info. */
|
|
|
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
|
|
|
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
|
|
|
} else {
|
|
@@ -1099,56 +1226,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
|
|
|
i++;
|
|
|
if (i == rx_ring->count)
|
|
|
i = 0;
|
|
|
- bi = &rx_ring->rx_buffer_info[i];
|
|
|
}
|
|
|
|
|
|
no_buffers:
|
|
|
if (rx_ring->next_to_use != i) {
|
|
|
rx_ring->next_to_use = i;
|
|
|
- if (i-- == 0)
|
|
|
- i = (rx_ring->count - 1);
|
|
|
-
|
|
|
- ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
|
|
|
+ ixgbe_release_rx_desc(rx_ring, i);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
|
|
|
-{
|
|
|
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
|
|
|
-}
|
|
|
-
|
|
|
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
|
|
|
-{
|
|
|
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
|
|
|
-}
|
|
|
-
|
|
|
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
|
|
|
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
|
|
|
{
|
|
|
- return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
|
|
|
- IXGBE_RXDADV_RSCCNT_MASK) >>
|
|
|
- IXGBE_RXDADV_RSCCNT_SHIFT;
|
|
|
+ /* HW will not DMA in data larger than the given buffer, even if it
|
|
|
+ * parses the (NFS, of course) header to be larger. In that case, it
|
|
|
+ * fills the header buffer and spills the rest into the page.
|
|
|
+ */
|
|
|
+ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
|
|
|
+ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
|
|
|
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
|
|
|
+ if (hlen > IXGBE_RX_HDR_SIZE)
|
|
|
+ hlen = IXGBE_RX_HDR_SIZE;
|
|
|
+ return hlen;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ixgbe_transform_rsc_queue - change rsc queue into a full packet
|
|
|
* @skb: pointer to the last skb in the rsc queue
|
|
|
- * @count: pointer to number of packets coalesced in this context
|
|
|
*
|
|
|
* This function changes a queue full of hw rsc buffers into a completed
|
|
|
* packet. It uses the ->prev pointers to find the first packet and then
|
|
|
* turns it into the frag list owner.
|
|
|
**/
|
|
|
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
|
|
|
- u64 *count)
|
|
|
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
|
|
|
{
|
|
|
unsigned int frag_list_size = 0;
|
|
|
+ unsigned int skb_cnt = 1;
|
|
|
|
|
|
while (skb->prev) {
|
|
|
struct sk_buff *prev = skb->prev;
|
|
|
frag_list_size += skb->len;
|
|
|
skb->prev = NULL;
|
|
|
skb = prev;
|
|
|
- *count += 1;
|
|
|
+ skb_cnt++;
|
|
|
}
|
|
|
|
|
|
skb_shinfo(skb)->frag_list = skb->next;
|
|
@@ -1156,68 +1275,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
|
|
|
skb->len += frag_list_size;
|
|
|
skb->data_len += frag_list_size;
|
|
|
skb->truesize += frag_list_size;
|
|
|
+ IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
|
|
|
+
|
|
|
return skb;
|
|
|
}
|
|
|
|
|
|
-struct ixgbe_rsc_cb {
|
|
|
- dma_addr_t dma;
|
|
|
- bool delay_unmap;
|
|
|
-};
|
|
|
-
|
|
|
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
|
|
|
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
|
|
|
+{
|
|
|
+ return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
|
|
|
+ IXGBE_RXDADV_RSCCNT_MASK);
|
|
|
+}
|
|
|
|
|
|
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
struct ixgbe_ring *rx_ring,
|
|
|
int *work_done, int work_to_do)
|
|
|
{
|
|
|
struct ixgbe_adapter *adapter = q_vector->adapter;
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
|
|
|
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
|
|
|
struct sk_buff *skb;
|
|
|
- unsigned int i, rsc_count = 0;
|
|
|
- u32 len, staterr;
|
|
|
- u16 hdr_info;
|
|
|
- bool cleaned = false;
|
|
|
- int cleaned_count = 0;
|
|
|
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
|
|
+ const int current_node = numa_node_id();
|
|
|
#ifdef IXGBE_FCOE
|
|
|
int ddp_bytes = 0;
|
|
|
#endif /* IXGBE_FCOE */
|
|
|
+ u32 staterr;
|
|
|
+ u16 i;
|
|
|
+ u16 cleaned_count = 0;
|
|
|
+ bool pkt_is_rsc = false;
|
|
|
|
|
|
i = rx_ring->next_to_clean;
|
|
|
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
|
|
|
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
|
|
|
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
|
|
|
|
|
|
while (staterr & IXGBE_RXD_STAT_DD) {
|
|
|
u32 upper_len = 0;
|
|
|
- if (*work_done >= work_to_do)
|
|
|
- break;
|
|
|
- (*work_done)++;
|
|
|
|
|
|
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
|
|
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
|
|
|
- hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
|
|
|
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
|
|
|
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
|
|
|
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
- if ((len > IXGBE_RX_HDR_SIZE) ||
|
|
|
- (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
|
|
|
- len = IXGBE_RX_HDR_SIZE;
|
|
|
- } else {
|
|
|
- len = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
- }
|
|
|
|
|
|
- cleaned = true;
|
|
|
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
|
|
|
+
|
|
|
skb = rx_buffer_info->skb;
|
|
|
- prefetch(skb->data);
|
|
|
rx_buffer_info->skb = NULL;
|
|
|
+ prefetch(skb->data);
|
|
|
|
|
|
+ if (ring_is_rsc_enabled(rx_ring))
|
|
|
+ pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
|
|
|
+
|
|
|
+ /* if this is a skb from previous receive DMA will be 0 */
|
|
|
if (rx_buffer_info->dma) {
|
|
|
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
|
|
|
- (!(staterr & IXGBE_RXD_STAT_EOP)) &&
|
|
|
- (!(skb->prev))) {
|
|
|
+ u16 hlen;
|
|
|
+ if (pkt_is_rsc &&
|
|
|
+ !(staterr & IXGBE_RXD_STAT_EOP) &&
|
|
|
+ !skb->prev) {
|
|
|
/*
|
|
|
* When HWRSC is enabled, delay unmapping
|
|
|
* of the first packet. It carries the
|
|
@@ -1228,29 +1338,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
IXGBE_RSC_CB(skb)->delay_unmap = true;
|
|
|
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
|
|
|
} else {
|
|
|
- dma_unmap_single(&pdev->dev,
|
|
|
+ dma_unmap_single(rx_ring->dev,
|
|
|
rx_buffer_info->dma,
|
|
|
rx_ring->rx_buf_len,
|
|
|
DMA_FROM_DEVICE);
|
|
|
}
|
|
|
rx_buffer_info->dma = 0;
|
|
|
- skb_put(skb, len);
|
|
|
+
|
|
|
+ if (ring_is_ps_enabled(rx_ring)) {
|
|
|
+ hlen = ixgbe_get_hlen(rx_desc);
|
|
|
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
+ } else {
|
|
|
+ hlen = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
+ }
|
|
|
+
|
|
|
+ skb_put(skb, hlen);
|
|
|
+ } else {
|
|
|
+ /* assume packet split since header is unmapped */
|
|
|
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
}
|
|
|
|
|
|
if (upper_len) {
|
|
|
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
|
|
|
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_page(rx_ring->dev,
|
|
|
+ rx_buffer_info->page_dma,
|
|
|
+ PAGE_SIZE / 2,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
rx_buffer_info->page_dma = 0;
|
|
|
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
|
|
rx_buffer_info->page,
|
|
|
rx_buffer_info->page_offset,
|
|
|
upper_len);
|
|
|
|
|
|
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
|
|
|
- (page_count(rx_buffer_info->page) != 1))
|
|
|
- rx_buffer_info->page = NULL;
|
|
|
- else
|
|
|
+ if ((page_count(rx_buffer_info->page) == 1) &&
|
|
|
+ (page_to_nid(rx_buffer_info->page) == current_node))
|
|
|
get_page(rx_buffer_info->page);
|
|
|
+ else
|
|
|
+ rx_buffer_info->page = NULL;
|
|
|
|
|
|
skb->len += upper_len;
|
|
|
skb->data_len += upper_len;
|
|
@@ -1265,10 +1388,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
prefetch(next_rxd);
|
|
|
cleaned_count++;
|
|
|
|
|
|
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
|
|
|
- rsc_count = ixgbe_get_rsc_count(rx_desc);
|
|
|
-
|
|
|
- if (rsc_count) {
|
|
|
+ if (pkt_is_rsc) {
|
|
|
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
|
|
|
IXGBE_RXDADV_NEXTP_SHIFT;
|
|
|
next_buffer = &rx_ring->rx_buffer_info[nextp];
|
|
@@ -1276,32 +1396,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
next_buffer = &rx_ring->rx_buffer_info[i];
|
|
|
}
|
|
|
|
|
|
- if (staterr & IXGBE_RXD_STAT_EOP) {
|
|
|
- if (skb->prev)
|
|
|
- skb = ixgbe_transform_rsc_queue(skb,
|
|
|
- &(rx_ring->rsc_count));
|
|
|
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
|
|
|
- if (IXGBE_RSC_CB(skb)->delay_unmap) {
|
|
|
- dma_unmap_single(&pdev->dev,
|
|
|
- IXGBE_RSC_CB(skb)->dma,
|
|
|
- rx_ring->rx_buf_len,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- IXGBE_RSC_CB(skb)->dma = 0;
|
|
|
- IXGBE_RSC_CB(skb)->delay_unmap = false;
|
|
|
- }
|
|
|
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
|
|
|
- rx_ring->rsc_count +=
|
|
|
- skb_shinfo(skb)->nr_frags;
|
|
|
- else
|
|
|
- rx_ring->rsc_count++;
|
|
|
- rx_ring->rsc_flush++;
|
|
|
- }
|
|
|
- u64_stats_update_begin(&rx_ring->syncp);
|
|
|
- rx_ring->stats.packets++;
|
|
|
- rx_ring->stats.bytes += skb->len;
|
|
|
- u64_stats_update_end(&rx_ring->syncp);
|
|
|
- } else {
|
|
|
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
|
|
|
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
|
|
|
+ if (ring_is_ps_enabled(rx_ring)) {
|
|
|
rx_buffer_info->skb = next_buffer->skb;
|
|
|
rx_buffer_info->dma = next_buffer->dma;
|
|
|
next_buffer->skb = skb;
|
|
@@ -1310,12 +1406,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
skb->next = next_buffer->skb;
|
|
|
skb->next->prev = skb;
|
|
|
}
|
|
|
- rx_ring->non_eop_descs++;
|
|
|
+ rx_ring->rx_stats.non_eop_descs++;
|
|
|
goto next_desc;
|
|
|
}
|
|
|
|
|
|
+ if (skb->prev) {
|
|
|
+ skb = ixgbe_transform_rsc_queue(skb);
|
|
|
+ /* if we got here without RSC the packet is invalid */
|
|
|
+ if (!pkt_is_rsc) {
|
|
|
+ __pskb_trim(skb, 0);
|
|
|
+ rx_buffer_info->skb = skb;
|
|
|
+ goto next_desc;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ring_is_rsc_enabled(rx_ring)) {
|
|
|
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
|
|
|
+ dma_unmap_single(rx_ring->dev,
|
|
|
+ IXGBE_RSC_CB(skb)->dma,
|
|
|
+ rx_ring->rx_buf_len,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ IXGBE_RSC_CB(skb)->dma = 0;
|
|
|
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (pkt_is_rsc) {
|
|
|
+ if (ring_is_ps_enabled(rx_ring))
|
|
|
+ rx_ring->rx_stats.rsc_count +=
|
|
|
+ skb_shinfo(skb)->nr_frags;
|
|
|
+ else
|
|
|
+ rx_ring->rx_stats.rsc_count +=
|
|
|
+ IXGBE_RSC_CB(skb)->skb_cnt;
|
|
|
+ rx_ring->rx_stats.rsc_flush++;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* ERR_MASK will only have valid bits if EOP set */
|
|
|
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
|
|
|
- dev_kfree_skb_irq(skb);
|
|
|
+ /* trim packet back to size 0 and recycle it */
|
|
|
+ __pskb_trim(skb, 0);
|
|
|
+ rx_buffer_info->skb = skb;
|
|
|
goto next_desc;
|
|
|
}
|
|
|
|
|
@@ -1325,7 +1454,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
total_rx_bytes += skb->len;
|
|
|
total_rx_packets++;
|
|
|
|
|
|
- skb->protocol = eth_type_trans(skb, adapter->netdev);
|
|
|
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
|
|
#ifdef IXGBE_FCOE
|
|
|
/* if ddp, not passing to ULD unless for FCP_RSP or error */
|
|
|
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
|
|
@@ -1339,16 +1468,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
next_desc:
|
|
|
rx_desc->wb.upper.status_error = 0;
|
|
|
|
|
|
+ (*work_done)++;
|
|
|
+ if (*work_done >= work_to_do)
|
|
|
+ break;
|
|
|
+
|
|
|
/* return some buffers to hardware, one at a time is too slow */
|
|
|
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
|
|
|
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
|
|
|
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
|
|
|
cleaned_count = 0;
|
|
|
}
|
|
|
|
|
|
/* use prefetched values */
|
|
|
rx_desc = next_rxd;
|
|
|
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
|
|
|
-
|
|
|
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
|
|
|
}
|
|
|
|
|
@@ -1356,14 +1487,14 @@ next_desc:
|
|
|
cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
|
|
|
|
|
|
if (cleaned_count)
|
|
|
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
|
|
|
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
|
|
|
|
|
|
#ifdef IXGBE_FCOE
|
|
|
/* include DDPed FCoE data */
|
|
|
if (ddp_bytes > 0) {
|
|
|
unsigned int mss;
|
|
|
|
|
|
- mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
|
|
|
+ mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
|
|
|
sizeof(struct fc_frame_header) -
|
|
|
sizeof(struct fcoe_crc_eof);
|
|
|
if (mss > 512)
|
|
@@ -1375,8 +1506,10 @@ next_desc:
|
|
|
|
|
|
rx_ring->total_packets += total_rx_packets;
|
|
|
rx_ring->total_bytes += total_rx_bytes;
|
|
|
-
|
|
|
- return cleaned;
|
|
|
+ u64_stats_update_begin(&rx_ring->syncp);
|
|
|
+ rx_ring->stats.packets += total_rx_packets;
|
|
|
+ rx_ring->stats.bytes += total_rx_bytes;
|
|
|
+ u64_stats_update_end(&rx_ring->syncp);
|
|
|
}
|
|
|
|
|
|
static int ixgbe_clean_rxonly(struct napi_struct *, int);
|
|
@@ -1390,7 +1523,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
|
|
|
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct ixgbe_q_vector *q_vector;
|
|
|
- int i, j, q_vectors, v_idx, r_idx;
|
|
|
+ int i, q_vectors, v_idx, r_idx;
|
|
|
u32 mask;
|
|
|
|
|
|
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
|
@@ -1406,8 +1539,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
|
|
|
adapter->num_rx_queues);
|
|
|
|
|
|
for (i = 0; i < q_vector->rxr_count; i++) {
|
|
|
- j = adapter->rx_ring[r_idx]->reg_idx;
|
|
|
- ixgbe_set_ivar(adapter, 0, j, v_idx);
|
|
|
+ u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
|
|
|
+ ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
|
|
|
r_idx = find_next_bit(q_vector->rxr_idx,
|
|
|
adapter->num_rx_queues,
|
|
|
r_idx + 1);
|
|
@@ -1416,8 +1549,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
|
|
|
adapter->num_tx_queues);
|
|
|
|
|
|
for (i = 0; i < q_vector->txr_count; i++) {
|
|
|
- j = adapter->tx_ring[r_idx]->reg_idx;
|
|
|
- ixgbe_set_ivar(adapter, 1, j, v_idx);
|
|
|
+ u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
|
|
|
+ ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
|
|
|
r_idx = find_next_bit(q_vector->txr_idx,
|
|
|
adapter->num_tx_queues,
|
|
|
r_idx + 1);
|
|
@@ -1448,11 +1581,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
|
|
|
v_idx);
|
|
|
- else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
ixgbe_set_ivar(adapter, -1, 1, v_idx);
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
|
|
|
|
|
|
/* set up to autoclear timer, and the vectors */
|
|
@@ -1548,12 +1689,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
|
|
|
int v_idx = q_vector->v_idx;
|
|
|
u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
|
|
|
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
/* must write high and low 16 bits to reset counter */
|
|
|
itr_reg |= (itr_reg << 16);
|
|
|
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
/*
|
|
|
- * 82599 can support a value of zero, so allow it for
|
|
|
+ * 82599 and X540 can support a value of zero, so allow it for
|
|
|
* max interrupt rate, but there is an errata where it can
|
|
|
* not be zero with RSC
|
|
|
*/
|
|
@@ -1566,6 +1710,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
|
|
|
* immediate assertion of the interrupt
|
|
|
*/
|
|
|
itr_reg |= IXGBE_EITR_CNT_WDIS;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
|
|
|
}
|
|
@@ -1573,14 +1720,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
|
|
|
static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
struct ixgbe_adapter *adapter = q_vector->adapter;
|
|
|
+ int i, r_idx;
|
|
|
u32 new_itr;
|
|
|
u8 current_itr, ret_itr;
|
|
|
- int i, r_idx;
|
|
|
- struct ixgbe_ring *rx_ring, *tx_ring;
|
|
|
|
|
|
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
|
|
for (i = 0; i < q_vector->txr_count; i++) {
|
|
|
- tx_ring = adapter->tx_ring[r_idx];
|
|
|
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
|
|
|
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
|
|
|
q_vector->tx_itr,
|
|
|
tx_ring->total_packets,
|
|
@@ -1595,7 +1741,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
|
|
|
|
|
|
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
|
for (i = 0; i < q_vector->rxr_count; i++) {
|
|
|
- rx_ring = adapter->rx_ring[r_idx];
|
|
|
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
|
|
|
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
|
|
|
q_vector->rx_itr,
|
|
|
rx_ring->total_packets,
|
|
@@ -1626,7 +1772,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
|
|
|
|
|
|
if (new_itr != q_vector->eitr) {
|
|
|
/* do an exponential smoothing */
|
|
|
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
|
|
|
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
|
|
|
|
|
|
/* save the algorithm value here, not the smoothed one */
|
|
|
q_vector->eitr = new_itr;
|
|
@@ -1694,17 +1840,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
|
|
|
{
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
|
+ if (eicr & IXGBE_EICR_GPI_SDP2) {
|
|
|
+ /* Clear the interrupt */
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
|
|
|
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
|
|
+ schedule_work(&adapter->sfp_config_module_task);
|
|
|
+ }
|
|
|
+
|
|
|
if (eicr & IXGBE_EICR_GPI_SDP1) {
|
|
|
/* Clear the interrupt */
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
|
|
|
- schedule_work(&adapter->multispeed_fiber_task);
|
|
|
- } else if (eicr & IXGBE_EICR_GPI_SDP2) {
|
|
|
- /* Clear the interrupt */
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
|
|
|
- schedule_work(&adapter->sfp_config_module_task);
|
|
|
- } else {
|
|
|
- /* Interrupt isn't for us... */
|
|
|
- return;
|
|
|
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
|
|
+ schedule_work(&adapter->multispeed_fiber_task);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1744,16 +1891,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
|
|
|
if (eicr & IXGBE_EICR_MAILBOX)
|
|
|
ixgbe_msg_task(adapter);
|
|
|
|
|
|
- if (hw->mac.type == ixgbe_mac_82598EB)
|
|
|
- ixgbe_check_fan_failure(adapter, eicr);
|
|
|
-
|
|
|
- if (hw->mac.type == ixgbe_mac_82599EB) {
|
|
|
- ixgbe_check_sfp_event(adapter, eicr);
|
|
|
- adapter->interrupt_event = eicr;
|
|
|
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
|
|
|
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
|
|
|
- schedule_work(&adapter->check_overtemp_task);
|
|
|
-
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
/* Handle Flow Director Full threshold interrupt */
|
|
|
if (eicr & IXGBE_EICR_FLOW_DIR) {
|
|
|
int i;
|
|
@@ -1763,12 +1903,24 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
struct ixgbe_ring *tx_ring =
|
|
|
adapter->tx_ring[i];
|
|
|
- if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
|
|
|
- &tx_ring->reinit_state))
|
|
|
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
|
|
|
+ &tx_ring->state))
|
|
|
schedule_work(&adapter->fdir_reinit_task);
|
|
|
}
|
|
|
}
|
|
|
+ ixgbe_check_sfp_event(adapter, eicr);
|
|
|
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
|
|
|
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
|
|
|
+ adapter->interrupt_event = eicr;
|
|
|
+ schedule_work(&adapter->check_overtemp_task);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
+
|
|
|
+ ixgbe_check_fan_failure(adapter, eicr);
|
|
|
+
|
|
|
if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
|
|
|
|
|
@@ -1779,15 +1931,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
|
|
|
u64 qmask)
|
|
|
{
|
|
|
u32 mask;
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
|
|
|
- } else {
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
mask = (qmask & 0xFFFFFFFF);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
|
|
|
+ if (mask)
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
|
|
|
mask = (qmask >> 32);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
|
|
|
+ if (mask)
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
/* skip the flush */
|
|
|
}
|
|
@@ -1796,15 +1957,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
|
|
|
u64 qmask)
|
|
|
{
|
|
|
u32 mask;
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
|
|
|
- } else {
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
mask = (qmask & 0xFFFFFFFF);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
|
|
|
+ if (mask)
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
|
|
|
mask = (qmask >> 32);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
|
|
|
+ if (mask)
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
/* skip the flush */
|
|
|
}
|
|
@@ -1847,8 +2017,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
|
|
|
int r_idx;
|
|
|
int i;
|
|
|
|
|
|
+#ifdef CONFIG_IXGBE_DCA
|
|
|
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
|
|
|
+ ixgbe_update_dca(q_vector);
|
|
|
+#endif
|
|
|
+
|
|
|
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
|
- for (i = 0; i < q_vector->rxr_count; i++) {
|
|
|
+ for (i = 0; i < q_vector->rxr_count; i++) {
|
|
|
rx_ring = adapter->rx_ring[r_idx];
|
|
|
rx_ring->total_bytes = 0;
|
|
|
rx_ring->total_packets = 0;
|
|
@@ -1859,7 +2034,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
|
|
|
if (!q_vector->rxr_count)
|
|
|
return IRQ_HANDLED;
|
|
|
|
|
|
- /* disable interrupts on this vector only */
|
|
|
/* EIAM disabled interrupts (on this vector) for us */
|
|
|
napi_schedule(&q_vector->napi);
|
|
|
|
|
@@ -1918,13 +2092,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
|
|
|
int work_done = 0;
|
|
|
long r_idx;
|
|
|
|
|
|
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
|
- rx_ring = adapter->rx_ring[r_idx];
|
|
|
#ifdef CONFIG_IXGBE_DCA
|
|
|
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
|
|
|
- ixgbe_update_rx_dca(adapter, rx_ring);
|
|
|
+ ixgbe_update_dca(q_vector);
|
|
|
#endif
|
|
|
|
|
|
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
|
+ rx_ring = adapter->rx_ring[r_idx];
|
|
|
+
|
|
|
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
|
|
|
|
|
|
/* If all Rx work done, exit the polling mode */
|
|
@@ -1958,13 +2133,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
|
|
|
long r_idx;
|
|
|
bool tx_clean_complete = true;
|
|
|
|
|
|
+#ifdef CONFIG_IXGBE_DCA
|
|
|
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
|
|
|
+ ixgbe_update_dca(q_vector);
|
|
|
+#endif
|
|
|
+
|
|
|
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
|
|
for (i = 0; i < q_vector->txr_count; i++) {
|
|
|
ring = adapter->tx_ring[r_idx];
|
|
|
-#ifdef CONFIG_IXGBE_DCA
|
|
|
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
|
|
|
- ixgbe_update_tx_dca(adapter, ring);
|
|
|
-#endif
|
|
|
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
|
|
|
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
|
|
|
r_idx + 1);
|
|
@@ -1977,10 +2153,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
|
|
|
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
|
for (i = 0; i < q_vector->rxr_count; i++) {
|
|
|
ring = adapter->rx_ring[r_idx];
|
|
|
-#ifdef CONFIG_IXGBE_DCA
|
|
|
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
|
|
|
- ixgbe_update_rx_dca(adapter, ring);
|
|
|
-#endif
|
|
|
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
|
|
|
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
|
|
|
r_idx + 1);
|
|
@@ -2019,13 +2191,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
|
|
|
int work_done = 0;
|
|
|
long r_idx;
|
|
|
|
|
|
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
|
|
- tx_ring = adapter->tx_ring[r_idx];
|
|
|
#ifdef CONFIG_IXGBE_DCA
|
|
|
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
|
|
|
- ixgbe_update_tx_dca(adapter, tx_ring);
|
|
|
+ ixgbe_update_dca(q_vector);
|
|
|
#endif
|
|
|
|
|
|
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
|
|
+ tx_ring = adapter->tx_ring[r_idx];
|
|
|
+
|
|
|
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
|
|
|
work_done = budget;
|
|
|
|
|
@@ -2046,24 +2219,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
|
|
|
int r_idx)
|
|
|
{
|
|
|
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
|
|
|
+ struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
|
|
|
|
|
|
set_bit(r_idx, q_vector->rxr_idx);
|
|
|
q_vector->rxr_count++;
|
|
|
+ rx_ring->q_vector = q_vector;
|
|
|
}
|
|
|
|
|
|
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
|
|
|
int t_idx)
|
|
|
{
|
|
|
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
|
|
|
+ struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
|
|
|
|
|
|
set_bit(t_idx, q_vector->txr_idx);
|
|
|
q_vector->txr_count++;
|
|
|
+ tx_ring->q_vector = q_vector;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
|
|
|
* @adapter: board private structure to initialize
|
|
|
- * @vectors: allotted vector count for descriptor rings
|
|
|
*
|
|
|
* This function maps descriptor rings to the queue-specific vectors
|
|
|
* we were allotted through the MSI-X enabling code. Ideally, we'd have
|
|
@@ -2071,9 +2247,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
|
|
|
* group the rings as "efficiently" as possible. You would add new
|
|
|
* mapping configurations in here.
|
|
|
**/
|
|
|
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
|
|
|
- int vectors)
|
|
|
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
+ int q_vectors;
|
|
|
int v_start = 0;
|
|
|
int rxr_idx = 0, txr_idx = 0;
|
|
|
int rxr_remaining = adapter->num_rx_queues;
|
|
@@ -2086,11 +2262,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
|
|
|
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
|
|
|
goto out;
|
|
|
|
|
|
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
|
|
+
|
|
|
/*
|
|
|
* The ideal configuration...
|
|
|
* We have enough vectors to map one per queue.
|
|
|
*/
|
|
|
- if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
|
|
|
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
|
|
|
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
|
|
|
map_vector_to_rxq(adapter, v_start, rxr_idx);
|
|
|
|
|
@@ -2106,23 +2284,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
|
|
|
* multiple queues per vector.
|
|
|
*/
|
|
|
/* Re-adjusting *qpv takes care of the remainder. */
|
|
|
- for (i = v_start; i < vectors; i++) {
|
|
|
- rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
|
|
|
+ for (i = v_start; i < q_vectors; i++) {
|
|
|
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
|
|
|
for (j = 0; j < rqpv; j++) {
|
|
|
map_vector_to_rxq(adapter, i, rxr_idx);
|
|
|
rxr_idx++;
|
|
|
rxr_remaining--;
|
|
|
}
|
|
|
- }
|
|
|
- for (i = v_start; i < vectors; i++) {
|
|
|
- tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
|
|
|
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
|
|
|
for (j = 0; j < tqpv; j++) {
|
|
|
map_vector_to_txq(adapter, i, txr_idx);
|
|
|
txr_idx++;
|
|
|
txr_remaining--;
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
out:
|
|
|
return err;
|
|
|
}
|
|
@@ -2144,30 +2319,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
|
|
|
/* Decrement for Other and TCP Timer vectors */
|
|
|
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
|
|
|
|
|
- /* Map the Tx/Rx rings to the vectors we were allotted. */
|
|
|
- err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
|
|
|
+ err = ixgbe_map_rings_to_vectors(adapter);
|
|
|
if (err)
|
|
|
- goto out;
|
|
|
+ return err;
|
|
|
|
|
|
-#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
|
|
|
- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
|
|
|
- &ixgbe_msix_clean_many)
|
|
|
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
|
|
|
+ ? &ixgbe_msix_clean_many : \
|
|
|
+ (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
|
|
|
+ (_v)->txr_count ? &ixgbe_msix_clean_tx : \
|
|
|
+ NULL)
|
|
|
for (vector = 0; vector < q_vectors; vector++) {
|
|
|
- handler = SET_HANDLER(adapter->q_vector[vector]);
|
|
|
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
|
|
|
+ handler = SET_HANDLER(q_vector);
|
|
|
|
|
|
if (handler == &ixgbe_msix_clean_rx) {
|
|
|
- sprintf(adapter->name[vector], "%s-%s-%d",
|
|
|
+ sprintf(q_vector->name, "%s-%s-%d",
|
|
|
netdev->name, "rx", ri++);
|
|
|
} else if (handler == &ixgbe_msix_clean_tx) {
|
|
|
- sprintf(adapter->name[vector], "%s-%s-%d",
|
|
|
+ sprintf(q_vector->name, "%s-%s-%d",
|
|
|
netdev->name, "tx", ti++);
|
|
|
- } else
|
|
|
- sprintf(adapter->name[vector], "%s-%s-%d",
|
|
|
- netdev->name, "TxRx", vector);
|
|
|
-
|
|
|
+ } else if (handler == &ixgbe_msix_clean_many) {
|
|
|
+ sprintf(q_vector->name, "%s-%s-%d",
|
|
|
+ netdev->name, "TxRx", ri++);
|
|
|
+ ti++;
|
|
|
+ } else {
|
|
|
+ /* skip this unused q_vector */
|
|
|
+ continue;
|
|
|
+ }
|
|
|
err = request_irq(adapter->msix_entries[vector].vector,
|
|
|
- handler, 0, adapter->name[vector],
|
|
|
- adapter->q_vector[vector]);
|
|
|
+ handler, 0, q_vector->name,
|
|
|
+ q_vector);
|
|
|
if (err) {
|
|
|
e_err(probe, "request_irq failed for MSIX interrupt "
|
|
|
"Error: %d\n", err);
|
|
@@ -2175,9 +2356,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- sprintf(adapter->name[vector], "%s:lsc", netdev->name);
|
|
|
+ sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
|
|
|
err = request_irq(adapter->msix_entries[vector].vector,
|
|
|
- ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
|
|
|
+ ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
|
|
|
if (err) {
|
|
|
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
|
|
|
goto free_queue_irqs;
|
|
@@ -2193,17 +2374,16 @@ free_queue_irqs:
|
|
|
pci_disable_msix(adapter->pdev);
|
|
|
kfree(adapter->msix_entries);
|
|
|
adapter->msix_entries = NULL;
|
|
|
-out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
|
|
|
- u8 current_itr;
|
|
|
- u32 new_itr = q_vector->eitr;
|
|
|
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
|
|
|
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
|
|
|
+ u32 new_itr = q_vector->eitr;
|
|
|
+ u8 current_itr;
|
|
|
|
|
|
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
|
|
|
q_vector->tx_itr,
|
|
@@ -2233,9 +2413,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
if (new_itr != q_vector->eitr) {
|
|
|
/* do an exponential smoothing */
|
|
|
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
|
|
|
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
|
|
|
|
|
|
- /* save the algorithm value here, not the smoothed one */
|
|
|
+ /* save the algorithm value here */
|
|
|
q_vector->eitr = new_itr;
|
|
|
|
|
|
ixgbe_write_eitr(q_vector);
|
|
@@ -2256,12 +2436,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
|
|
|
mask |= IXGBE_EIMS_GPI_SDP0;
|
|
|
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
|
|
|
mask |= IXGBE_EIMS_GPI_SDP1;
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
mask |= IXGBE_EIMS_ECC;
|
|
|
mask |= IXGBE_EIMS_GPI_SDP1;
|
|
|
mask |= IXGBE_EIMS_GPI_SDP2;
|
|
|
if (adapter->num_vfs)
|
|
|
mask |= IXGBE_EIMS_MAILBOX;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
|
|
|
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
|
|
@@ -2317,13 +2502,21 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
|
|
|
if (eicr & IXGBE_EICR_LSC)
|
|
|
ixgbe_check_lsc(adapter);
|
|
|
|
|
|
- if (hw->mac.type == ixgbe_mac_82599EB)
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
ixgbe_check_sfp_event(adapter, eicr);
|
|
|
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
|
|
|
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
|
|
|
+ adapter->interrupt_event = eicr;
|
|
|
+ schedule_work(&adapter->check_overtemp_task);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
ixgbe_check_fan_failure(adapter, eicr);
|
|
|
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
|
|
|
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
|
|
|
- schedule_work(&adapter->check_overtemp_task);
|
|
|
|
|
|
if (napi_schedule_prep(&(q_vector->napi))) {
|
|
|
adapter->tx_ring[0]->total_packets = 0;
|
|
@@ -2416,14 +2609,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
|
|
|
**/
|
|
|
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
|
|
|
- } else {
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
|
|
|
if (adapter->num_vfs > 32)
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
IXGBE_WRITE_FLUSH(&adapter->hw);
|
|
|
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
|
|
@@ -2469,7 +2668,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|
|
u64 tdba = ring->dma;
|
|
|
int wait_loop = 10;
|
|
|
u32 txdctl;
|
|
|
- u16 reg_idx = ring->reg_idx;
|
|
|
+ u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
/* disable queue to avoid issues while updating state */
|
|
|
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
|
|
@@ -2484,8 +2683,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|
|
ring->count * sizeof(union ixgbe_adv_tx_desc));
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
|
|
|
- ring->head = IXGBE_TDH(reg_idx);
|
|
|
- ring->tail = IXGBE_TDT(reg_idx);
|
|
|
+ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
|
|
|
|
|
|
/* configure fetching thresholds */
|
|
|
if (adapter->rx_itr_setting == 0) {
|
|
@@ -2501,7 +2699,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|
|
}
|
|
|
|
|
|
/* reinitialize flowdirector state */
|
|
|
- set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
|
|
|
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
|
|
|
+ adapter->atr_sample_rate) {
|
|
|
+ ring->atr_sample_rate = adapter->atr_sample_rate;
|
|
|
+ ring->atr_count = 0;
|
|
|
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
|
|
|
+ } else {
|
|
|
+ ring->atr_sample_rate = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
|
|
|
|
|
|
/* enable queue */
|
|
|
txdctl |= IXGBE_TXDCTL_ENABLE;
|
|
@@ -2592,16 +2799,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_ring *rx_ring)
|
|
|
{
|
|
|
u32 srrctl;
|
|
|
- int index;
|
|
|
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
|
|
|
+ u8 reg_idx = rx_ring->reg_idx;
|
|
|
|
|
|
- index = rx_ring->reg_idx;
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
- unsigned long mask;
|
|
|
- mask = (unsigned long) feature[RING_F_RSS].mask;
|
|
|
- index = index & mask;
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82598EB: {
|
|
|
+ struct ixgbe_ring_feature *feature = adapter->ring_feature;
|
|
|
+ const int mask = feature[RING_F_RSS].mask;
|
|
|
+ reg_idx = reg_idx & mask;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
|
|
|
+
|
|
|
+ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
|
|
|
|
|
|
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
|
|
|
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
|
|
@@ -2611,7 +2824,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
|
|
|
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
|
|
|
IXGBE_SRRCTL_BSIZEHDR_MASK;
|
|
|
|
|
|
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
|
|
|
+ if (ring_is_ps_enabled(rx_ring)) {
|
|
|
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
|
|
|
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
|
|
#else
|
|
@@ -2624,7 +2837,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
|
|
|
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
|
|
|
}
|
|
|
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
|
|
|
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
|
|
|
}
|
|
|
|
|
|
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
|
|
@@ -2693,20 +2906,37 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
|
|
|
+ * @adapter: address of board private structure
|
|
|
+ * @ring: structure containing ring specific data
|
|
|
+ **/
|
|
|
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
|
|
|
+ struct ixgbe_ring *ring)
|
|
|
+{
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
+ u32 rscctrl;
|
|
|
+ u8 reg_idx = ring->reg_idx;
|
|
|
+
|
|
|
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
|
|
|
+ rscctrl &= ~IXGBE_RSCCTL_RSCEN;
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ixgbe_configure_rscctl - enable RSC for the indicated ring
|
|
|
* @adapter: address of board private structure
|
|
|
* @index: index of ring to set
|
|
|
**/
|
|
|
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
|
|
|
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_ring *ring)
|
|
|
{
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
u32 rscctrl;
|
|
|
int rx_buf_len;
|
|
|
- u16 reg_idx = ring->reg_idx;
|
|
|
+ u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
|
|
|
+ if (!ring_is_rsc_enabled(ring))
|
|
|
return;
|
|
|
|
|
|
rx_buf_len = ring->rx_buf_len;
|
|
@@ -2717,7 +2947,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
|
|
|
* total size of max desc * buf_len is not greater
|
|
|
* than 65535
|
|
|
*/
|
|
|
- if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
|
|
|
+ if (ring_is_ps_enabled(ring)) {
|
|
|
#if (MAX_SKB_FRAGS > 16)
|
|
|
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
|
|
|
#elif (MAX_SKB_FRAGS > 8)
|
|
@@ -2770,9 +3000,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_ring *ring)
|
|
|
{
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
- int reg_idx = ring->reg_idx;
|
|
|
int wait_loop = IXGBE_MAX_RX_DESC_POLL;
|
|
|
u32 rxdctl;
|
|
|
+ u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
|
|
|
if (hw->mac.type == ixgbe_mac_82598EB &&
|
|
@@ -2796,7 +3026,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
u64 rdba = ring->dma;
|
|
|
u32 rxdctl;
|
|
|
- u16 reg_idx = ring->reg_idx;
|
|
|
+ u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
/* disable queue to avoid issues while updating state */
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
|
|
@@ -2810,8 +3040,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
ring->count * sizeof(union ixgbe_adv_rx_desc));
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
|
|
|
- ring->head = IXGBE_RDH(reg_idx);
|
|
|
- ring->tail = IXGBE_RDT(reg_idx);
|
|
|
+ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
|
|
|
|
|
|
ixgbe_configure_srrctl(adapter, ring);
|
|
|
ixgbe_configure_rscctl(adapter, ring);
|
|
@@ -2833,7 +3062,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
|
|
|
|
|
|
ixgbe_rx_desc_queue_enable(adapter, ring);
|
|
|
- ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
|
|
|
+ ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
|
|
|
}
|
|
|
|
|
|
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
|
|
@@ -2956,24 +3185,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
|
|
|
rx_ring->rx_buf_len = rx_buf_len;
|
|
|
|
|
|
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
|
|
|
- rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
|
|
|
+ set_ring_ps_enabled(rx_ring);
|
|
|
+ else
|
|
|
+ clear_ring_ps_enabled(rx_ring);
|
|
|
+
|
|
|
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
|
|
|
+ set_ring_rsc_enabled(rx_ring);
|
|
|
else
|
|
|
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
|
|
|
+ clear_ring_rsc_enabled(rx_ring);
|
|
|
|
|
|
#ifdef IXGBE_FCOE
|
|
|
if (netdev->features & NETIF_F_FCOE_MTU) {
|
|
|
struct ixgbe_ring_feature *f;
|
|
|
f = &adapter->ring_feature[RING_F_FCOE];
|
|
|
if ((i >= f->mask) && (i < f->mask + f->indices)) {
|
|
|
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
|
|
|
+ clear_ring_ps_enabled(rx_ring);
|
|
|
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
|
|
|
rx_ring->rx_buf_len =
|
|
|
IXGBE_FCOE_JUMBO_FRAME_SIZE;
|
|
|
+ } else if (!ring_is_rsc_enabled(rx_ring) &&
|
|
|
+ !ring_is_ps_enabled(rx_ring)) {
|
|
|
+ rx_ring->rx_buf_len =
|
|
|
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
|
|
|
}
|
|
|
}
|
|
|
#endif /* IXGBE_FCOE */
|
|
|
}
|
|
|
-
|
|
|
}
|
|
|
|
|
|
static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
|
|
@@ -2996,6 +3233,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
|
|
|
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
|
|
|
break;
|
|
|
case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
/* Disable RSC for ACK packets */
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
|
|
|
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
|
|
@@ -3123,6 +3361,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
|
|
|
break;
|
|
|
case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
j = adapter->rx_ring[i]->reg_idx;
|
|
|
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
|
|
@@ -3152,6 +3391,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
|
|
|
break;
|
|
|
case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
j = adapter->rx_ring[i]->reg_idx;
|
|
|
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
|
|
@@ -3349,8 +3589,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
|
|
|
- u32 txdctl;
|
|
|
- int i, j;
|
|
|
|
|
|
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
|
|
|
if (hw->mac.type == ixgbe_mac_82598EB)
|
|
@@ -3366,25 +3604,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
|
|
|
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
|
|
|
#endif
|
|
|
|
|
|
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
|
|
|
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
|
|
|
DCB_TX_CONFIG);
|
|
|
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
|
|
|
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
|
|
|
DCB_RX_CONFIG);
|
|
|
|
|
|
- /* reconfigure the hardware */
|
|
|
- ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
|
|
|
-
|
|
|
- for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
- j = adapter->tx_ring[i]->reg_idx;
|
|
|
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
|
|
|
- /* PThresh workaround for Tx hang with DFP enabled. */
|
|
|
- txdctl |= 32;
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
|
|
|
- }
|
|
|
/* Enable VLAN tag insert/strip */
|
|
|
adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
|
|
|
|
|
|
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
|
|
|
+
|
|
|
+ /* reconfigure the hardware */
|
|
|
+ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
|
|
|
}
|
|
|
|
|
|
#endif
|
|
@@ -3516,8 +3747,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
|
|
|
case ixgbe_mac_82598EB:
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
|
|
|
break;
|
|
|
- default:
|
|
|
case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
+ default:
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
|
|
|
break;
|
|
@@ -3562,12 +3794,20 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
|
|
|
ixgbe_configure_msi_and_legacy(adapter);
|
|
|
|
|
|
/* enable the optics */
|
|
|
- if (hw->phy.multispeed_fiber)
|
|
|
+ if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser)
|
|
|
hw->mac.ops.enable_tx_laser(hw);
|
|
|
|
|
|
clear_bit(__IXGBE_DOWN, &adapter->state);
|
|
|
ixgbe_napi_enable_all(adapter);
|
|
|
|
|
|
+ if (ixgbe_is_sfp(hw)) {
|
|
|
+ ixgbe_sfp_link_config(adapter);
|
|
|
+ } else {
|
|
|
+ err = ixgbe_non_sfp_link_config(hw);
|
|
|
+ if (err)
|
|
|
+ e_err(probe, "link_config FAILED %d\n", err);
|
|
|
+ }
|
|
|
+
|
|
|
/* clear any pending interrupts, may auto mask */
|
|
|
IXGBE_READ_REG(hw, IXGBE_EICR);
|
|
|
ixgbe_irq_enable(adapter, true, true);
|
|
@@ -3588,28 +3828,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
|
|
|
* devices wouldn't have their type identified yet. We need to
|
|
|
* kick off the SFP+ module setup first, then try to bring up link.
|
|
|
* If we're not hot-pluggable SFP+, we just need to configure link
|
|
|
- * and bring it up.
|
|
|
- */
|
|
|
- if (hw->phy.type == ixgbe_phy_unknown) {
|
|
|
- err = hw->phy.ops.identify(hw);
|
|
|
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
|
|
- /*
|
|
|
- * Take the device down and schedule the sfp tasklet
|
|
|
- * which will unregister_netdev and log it.
|
|
|
- */
|
|
|
- ixgbe_down(adapter);
|
|
|
- schedule_work(&adapter->sfp_config_module_task);
|
|
|
- return err;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (ixgbe_is_sfp(hw)) {
|
|
|
- ixgbe_sfp_link_config(adapter);
|
|
|
- } else {
|
|
|
- err = ixgbe_non_sfp_link_config(hw);
|
|
|
- if (err)
|
|
|
- e_err(probe, "link_config FAILED %d\n", err);
|
|
|
- }
|
|
|
+ * and bring it up.
|
|
|
+ */
|
|
|
+ if (hw->phy.type == ixgbe_phy_unknown)
|
|
|
+ schedule_work(&adapter->sfp_config_module_task);
|
|
|
|
|
|
/* enable transmits */
|
|
|
netif_tx_start_all_queues(adapter->netdev);
|
|
@@ -3687,15 +3909,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
/**
|
|
|
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
|
|
|
- * @adapter: board private structure
|
|
|
* @rx_ring: ring to free buffers from
|
|
|
**/
|
|
|
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *rx_ring)
|
|
|
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|
|
{
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
+ struct device *dev = rx_ring->dev;
|
|
|
unsigned long size;
|
|
|
- unsigned int i;
|
|
|
+ u16 i;
|
|
|
|
|
|
/* ring already cleared, nothing to do */
|
|
|
if (!rx_ring->rx_buffer_info)
|
|
@@ -3707,7 +3927,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
|
|
|
rx_buffer_info = &rx_ring->rx_buffer_info[i];
|
|
|
if (rx_buffer_info->dma) {
|
|
|
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
|
|
|
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
|
|
|
rx_ring->rx_buf_len,
|
|
|
DMA_FROM_DEVICE);
|
|
|
rx_buffer_info->dma = 0;
|
|
@@ -3718,7 +3938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
do {
|
|
|
struct sk_buff *this = skb;
|
|
|
if (IXGBE_RSC_CB(this)->delay_unmap) {
|
|
|
- dma_unmap_single(&pdev->dev,
|
|
|
+ dma_unmap_single(dev,
|
|
|
IXGBE_RSC_CB(this)->dma,
|
|
|
rx_ring->rx_buf_len,
|
|
|
DMA_FROM_DEVICE);
|
|
@@ -3732,7 +3952,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
if (!rx_buffer_info->page)
|
|
|
continue;
|
|
|
if (rx_buffer_info->page_dma) {
|
|
|
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
|
|
|
+ dma_unmap_page(dev, rx_buffer_info->page_dma,
|
|
|
PAGE_SIZE / 2, DMA_FROM_DEVICE);
|
|
|
rx_buffer_info->page_dma = 0;
|
|
|
}
|
|
@@ -3749,24 +3969,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
rx_ring->next_to_use = 0;
|
|
|
-
|
|
|
- if (rx_ring->head)
|
|
|
- writel(0, adapter->hw.hw_addr + rx_ring->head);
|
|
|
- if (rx_ring->tail)
|
|
|
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ixgbe_clean_tx_ring - Free Tx Buffers
|
|
|
- * @adapter: board private structure
|
|
|
* @tx_ring: ring to be cleaned
|
|
|
**/
|
|
|
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *tx_ring)
|
|
|
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
|
|
{
|
|
|
struct ixgbe_tx_buffer *tx_buffer_info;
|
|
|
unsigned long size;
|
|
|
- unsigned int i;
|
|
|
+ u16 i;
|
|
|
|
|
|
/* ring already cleared, nothing to do */
|
|
|
if (!tx_ring->tx_buffer_info)
|
|
@@ -3775,7 +3988,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
|
|
|
/* Free all the Tx ring sk_buffs */
|
|
|
for (i = 0; i < tx_ring->count; i++) {
|
|
|
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
|
|
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
|
|
|
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
|
|
|
}
|
|
|
|
|
|
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
|
|
@@ -3786,11 +3999,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
|
|
|
|
|
|
tx_ring->next_to_use = 0;
|
|
|
tx_ring->next_to_clean = 0;
|
|
|
-
|
|
|
- if (tx_ring->head)
|
|
|
- writel(0, adapter->hw.hw_addr + tx_ring->head);
|
|
|
- if (tx_ring->tail)
|
|
|
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3802,7 +4010,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
- ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
|
|
|
+ ixgbe_clean_rx_ring(adapter->rx_ring[i]);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3814,7 +4022,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
- ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
|
|
|
+ ixgbe_clean_tx_ring(adapter->tx_ring[i]);
|
|
|
}
|
|
|
|
|
|
void ixgbe_down(struct ixgbe_adapter *adapter)
|
|
@@ -3823,7 +4031,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
u32 rxctrl;
|
|
|
u32 txdctl;
|
|
|
- int i, j;
|
|
|
+ int i;
|
|
|
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
|
|
|
|
|
/* signal that we are down to the interrupt handler */
|
|
@@ -3881,19 +4089,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
/* disable transmits in the hardware now that interrupts are off */
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
- j = adapter->tx_ring[i]->reg_idx;
|
|
|
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
|
|
|
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
|
|
|
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
|
|
|
(txdctl & ~IXGBE_TXDCTL_ENABLE));
|
|
|
}
|
|
|
/* Disable the Tx DMA engine on 82599 */
|
|
|
- if (hw->mac.type == ixgbe_mac_82599EB)
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
|
|
|
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
|
|
|
~IXGBE_DMATXCTL_TE));
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
/* power down the optics */
|
|
|
- if (hw->phy.multispeed_fiber)
|
|
|
+ if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
|
|
|
hw->mac.ops.disable_tx_laser(hw);
|
|
|
|
|
|
/* clear n-tuple filters that are cached */
|
|
@@ -3925,10 +4139,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
|
|
|
int tx_clean_complete, work_done = 0;
|
|
|
|
|
|
#ifdef CONFIG_IXGBE_DCA
|
|
|
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
|
|
|
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
|
|
|
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
|
|
|
- }
|
|
|
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
|
|
|
+ ixgbe_update_dca(q_vector);
|
|
|
#endif
|
|
|
|
|
|
tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
|
|
@@ -3956,6 +4168,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
|
|
|
{
|
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
+ adapter->tx_timeout_count++;
|
|
|
+
|
|
|
/* Do the reset outside of interrupt context */
|
|
|
schedule_work(&adapter->reset_task);
|
|
|
}
|
|
@@ -3970,8 +4184,6 @@ static void ixgbe_reset_task(struct work_struct *work)
|
|
|
test_bit(__IXGBE_RESETTING, &adapter->state))
|
|
|
return;
|
|
|
|
|
|
- adapter->tx_timeout_count++;
|
|
|
-
|
|
|
ixgbe_dump(adapter);
|
|
|
netdev_err(adapter->netdev, "Reset adapter\n");
|
|
|
ixgbe_reinit_locked(adapter);
|
|
@@ -4221,19 +4433,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
|
|
|
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
int i;
|
|
|
- bool ret = false;
|
|
|
|
|
|
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
|
|
|
- for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
- adapter->rx_ring[i]->reg_idx = i;
|
|
|
- for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
- adapter->tx_ring[i]->reg_idx = i;
|
|
|
- ret = true;
|
|
|
- } else {
|
|
|
- ret = false;
|
|
|
- }
|
|
|
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
|
|
|
+ return false;
|
|
|
|
|
|
- return ret;
|
|
|
+ for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
+ adapter->rx_ring[i]->reg_idx = i;
|
|
|
+ for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
+ adapter->tx_ring[i]->reg_idx = i;
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_IXGBE_DCB
|
|
@@ -4250,71 +4459,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
|
|
|
bool ret = false;
|
|
|
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
|
|
|
|
|
|
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
|
|
|
- /* the number of queues is assumed to be symmetric */
|
|
|
- for (i = 0; i < dcb_i; i++) {
|
|
|
- adapter->rx_ring[i]->reg_idx = i << 3;
|
|
|
- adapter->tx_ring[i]->reg_idx = i << 2;
|
|
|
- }
|
|
|
- ret = true;
|
|
|
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
|
|
- if (dcb_i == 8) {
|
|
|
- /*
|
|
|
- * Tx TC0 starts at: descriptor queue 0
|
|
|
- * Tx TC1 starts at: descriptor queue 32
|
|
|
- * Tx TC2 starts at: descriptor queue 64
|
|
|
- * Tx TC3 starts at: descriptor queue 80
|
|
|
- * Tx TC4 starts at: descriptor queue 96
|
|
|
- * Tx TC5 starts at: descriptor queue 104
|
|
|
- * Tx TC6 starts at: descriptor queue 112
|
|
|
- * Tx TC7 starts at: descriptor queue 120
|
|
|
- *
|
|
|
- * Rx TC0-TC7 are offset by 16 queues each
|
|
|
- */
|
|
|
- for (i = 0; i < 3; i++) {
|
|
|
- adapter->tx_ring[i]->reg_idx = i << 5;
|
|
|
- adapter->rx_ring[i]->reg_idx = i << 4;
|
|
|
- }
|
|
|
- for ( ; i < 5; i++) {
|
|
|
- adapter->tx_ring[i]->reg_idx =
|
|
|
- ((i + 2) << 4);
|
|
|
- adapter->rx_ring[i]->reg_idx = i << 4;
|
|
|
- }
|
|
|
- for ( ; i < dcb_i; i++) {
|
|
|
- adapter->tx_ring[i]->reg_idx =
|
|
|
- ((i + 8) << 3);
|
|
|
- adapter->rx_ring[i]->reg_idx = i << 4;
|
|
|
- }
|
|
|
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
|
|
|
+ return false;
|
|
|
|
|
|
- ret = true;
|
|
|
- } else if (dcb_i == 4) {
|
|
|
- /*
|
|
|
- * Tx TC0 starts at: descriptor queue 0
|
|
|
- * Tx TC1 starts at: descriptor queue 64
|
|
|
- * Tx TC2 starts at: descriptor queue 96
|
|
|
- * Tx TC3 starts at: descriptor queue 112
|
|
|
- *
|
|
|
- * Rx TC0-TC3 are offset by 32 queues each
|
|
|
- */
|
|
|
- adapter->tx_ring[0]->reg_idx = 0;
|
|
|
- adapter->tx_ring[1]->reg_idx = 64;
|
|
|
- adapter->tx_ring[2]->reg_idx = 96;
|
|
|
- adapter->tx_ring[3]->reg_idx = 112;
|
|
|
- for (i = 0 ; i < dcb_i; i++)
|
|
|
- adapter->rx_ring[i]->reg_idx = i << 5;
|
|
|
-
|
|
|
- ret = true;
|
|
|
- } else {
|
|
|
- ret = false;
|
|
|
+ /* the number of queues is assumed to be symmetric */
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
+ for (i = 0; i < dcb_i; i++) {
|
|
|
+ adapter->rx_ring[i]->reg_idx = i << 3;
|
|
|
+ adapter->tx_ring[i]->reg_idx = i << 2;
|
|
|
+ }
|
|
|
+ ret = true;
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
+ if (dcb_i == 8) {
|
|
|
+ /*
|
|
|
+ * Tx TC0 starts at: descriptor queue 0
|
|
|
+ * Tx TC1 starts at: descriptor queue 32
|
|
|
+ * Tx TC2 starts at: descriptor queue 64
|
|
|
+ * Tx TC3 starts at: descriptor queue 80
|
|
|
+ * Tx TC4 starts at: descriptor queue 96
|
|
|
+ * Tx TC5 starts at: descriptor queue 104
|
|
|
+ * Tx TC6 starts at: descriptor queue 112
|
|
|
+ * Tx TC7 starts at: descriptor queue 120
|
|
|
+ *
|
|
|
+ * Rx TC0-TC7 are offset by 16 queues each
|
|
|
+ */
|
|
|
+ for (i = 0; i < 3; i++) {
|
|
|
+ adapter->tx_ring[i]->reg_idx = i << 5;
|
|
|
+ adapter->rx_ring[i]->reg_idx = i << 4;
|
|
|
}
|
|
|
- } else {
|
|
|
- ret = false;
|
|
|
+ for ( ; i < 5; i++) {
|
|
|
+ adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
|
|
|
+ adapter->rx_ring[i]->reg_idx = i << 4;
|
|
|
+ }
|
|
|
+ for ( ; i < dcb_i; i++) {
|
|
|
+ adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
|
|
|
+ adapter->rx_ring[i]->reg_idx = i << 4;
|
|
|
+ }
|
|
|
+ ret = true;
|
|
|
+ } else if (dcb_i == 4) {
|
|
|
+ /*
|
|
|
+ * Tx TC0 starts at: descriptor queue 0
|
|
|
+ * Tx TC1 starts at: descriptor queue 64
|
|
|
+ * Tx TC2 starts at: descriptor queue 96
|
|
|
+ * Tx TC3 starts at: descriptor queue 112
|
|
|
+ *
|
|
|
+ * Rx TC0-TC3 are offset by 32 queues each
|
|
|
+ */
|
|
|
+ adapter->tx_ring[0]->reg_idx = 0;
|
|
|
+ adapter->tx_ring[1]->reg_idx = 64;
|
|
|
+ adapter->tx_ring[2]->reg_idx = 96;
|
|
|
+ adapter->tx_ring[3]->reg_idx = 112;
|
|
|
+ for (i = 0 ; i < dcb_i; i++)
|
|
|
+ adapter->rx_ring[i]->reg_idx = i << 5;
|
|
|
+ ret = true;
|
|
|
}
|
|
|
- } else {
|
|
|
- ret = false;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
#endif
|
|
@@ -4354,55 +4559,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
|
|
|
*/
|
|
|
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
- int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
|
|
|
- bool ret = false;
|
|
|
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
|
|
|
+ int i;
|
|
|
+ u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
|
|
|
+
|
|
|
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
|
|
+ return false;
|
|
|
|
|
|
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
|
|
|
#ifdef CONFIG_IXGBE_DCB
|
|
|
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
|
|
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
|
|
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
|
|
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
|
|
|
|
|
- ixgbe_cache_ring_dcb(adapter);
|
|
|
- /* find out queues in TC for FCoE */
|
|
|
- fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
|
|
|
- fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
|
|
|
- /*
|
|
|
- * In 82599, the number of Tx queues for each traffic
|
|
|
- * class for both 8-TC and 4-TC modes are:
|
|
|
- * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
|
|
|
- * 8 TCs: 32 32 16 16 8 8 8 8
|
|
|
- * 4 TCs: 64 64 32 32
|
|
|
- * We have max 8 queues for FCoE, where 8 the is
|
|
|
- * FCoE redirection table size. If TC for FCoE is
|
|
|
- * less than or equal to TC3, we have enough queues
|
|
|
- * to add max of 8 queues for FCoE, so we start FCoE
|
|
|
- * tx descriptor from the next one, i.e., reg_idx + 1.
|
|
|
- * If TC for FCoE is above TC3, implying 8 TC mode,
|
|
|
- * and we need 8 for FCoE, we have to take all queues
|
|
|
- * in that traffic class for FCoE.
|
|
|
- */
|
|
|
- if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
|
|
|
- fcoe_tx_i--;
|
|
|
- }
|
|
|
+ ixgbe_cache_ring_dcb(adapter);
|
|
|
+ /* find out queues in TC for FCoE */
|
|
|
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
|
|
|
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
|
|
|
+ /*
|
|
|
+ * In 82599, the number of Tx queues for each traffic
|
|
|
+ * class for both 8-TC and 4-TC modes are:
|
|
|
+ * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
|
|
|
+ * 8 TCs: 32 32 16 16 8 8 8 8
|
|
|
+ * 4 TCs: 64 64 32 32
|
|
|
+ * We have max 8 queues for FCoE, where 8 the is
|
|
|
+ * FCoE redirection table size. If TC for FCoE is
|
|
|
+ * less than or equal to TC3, we have enough queues
|
|
|
+ * to add max of 8 queues for FCoE, so we start FCoE
|
|
|
+ * Tx queue from the next one, i.e., reg_idx + 1.
|
|
|
+ * If TC for FCoE is above TC3, implying 8 TC mode,
|
|
|
+ * and we need 8 for FCoE, we have to take all queues
|
|
|
+ * in that traffic class for FCoE.
|
|
|
+ */
|
|
|
+ if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
|
|
|
+ fcoe_tx_i--;
|
|
|
+ }
|
|
|
#endif /* CONFIG_IXGBE_DCB */
|
|
|
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
|
|
|
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
|
|
|
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
|
|
|
- ixgbe_cache_ring_fdir(adapter);
|
|
|
- else
|
|
|
- ixgbe_cache_ring_rss(adapter);
|
|
|
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
|
|
|
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
|
|
|
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
|
|
|
+ ixgbe_cache_ring_fdir(adapter);
|
|
|
+ else
|
|
|
+ ixgbe_cache_ring_rss(adapter);
|
|
|
|
|
|
- fcoe_rx_i = f->mask;
|
|
|
- fcoe_tx_i = f->mask;
|
|
|
- }
|
|
|
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
|
|
|
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
|
|
|
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
|
|
|
- }
|
|
|
- ret = true;
|
|
|
+ fcoe_rx_i = f->mask;
|
|
|
+ fcoe_tx_i = f->mask;
|
|
|
}
|
|
|
- return ret;
|
|
|
+ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
|
|
|
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
|
|
|
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
#endif /* IXGBE_FCOE */
|
|
@@ -4471,65 +4676,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
|
|
|
**/
|
|
|
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
- int i;
|
|
|
- int orig_node = adapter->node;
|
|
|
+ int rx = 0, tx = 0, nid = adapter->node;
|
|
|
|
|
|
- for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
- struct ixgbe_ring *ring = adapter->tx_ring[i];
|
|
|
- if (orig_node == -1) {
|
|
|
- int cur_node = next_online_node(adapter->node);
|
|
|
- if (cur_node == MAX_NUMNODES)
|
|
|
- cur_node = first_online_node;
|
|
|
- adapter->node = cur_node;
|
|
|
- }
|
|
|
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
|
|
|
- adapter->node);
|
|
|
+ if (nid < 0 || !node_online(nid))
|
|
|
+ nid = first_online_node;
|
|
|
+
|
|
|
+ for (; tx < adapter->num_tx_queues; tx++) {
|
|
|
+ struct ixgbe_ring *ring;
|
|
|
+
|
|
|
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
|
|
|
if (!ring)
|
|
|
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
|
|
|
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
if (!ring)
|
|
|
- goto err_tx_ring_allocation;
|
|
|
+ goto err_allocation;
|
|
|
ring->count = adapter->tx_ring_count;
|
|
|
- ring->queue_index = i;
|
|
|
- ring->numa_node = adapter->node;
|
|
|
+ ring->queue_index = tx;
|
|
|
+ ring->numa_node = nid;
|
|
|
+ ring->dev = &adapter->pdev->dev;
|
|
|
+ ring->netdev = adapter->netdev;
|
|
|
|
|
|
- adapter->tx_ring[i] = ring;
|
|
|
+ adapter->tx_ring[tx] = ring;
|
|
|
}
|
|
|
|
|
|
- /* Restore the adapter's original node */
|
|
|
- adapter->node = orig_node;
|
|
|
+ for (; rx < adapter->num_rx_queues; rx++) {
|
|
|
+ struct ixgbe_ring *ring;
|
|
|
|
|
|
- for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- struct ixgbe_ring *ring = adapter->rx_ring[i];
|
|
|
- if (orig_node == -1) {
|
|
|
- int cur_node = next_online_node(adapter->node);
|
|
|
- if (cur_node == MAX_NUMNODES)
|
|
|
- cur_node = first_online_node;
|
|
|
- adapter->node = cur_node;
|
|
|
- }
|
|
|
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
|
|
|
- adapter->node);
|
|
|
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
|
|
|
if (!ring)
|
|
|
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
|
|
|
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
if (!ring)
|
|
|
- goto err_rx_ring_allocation;
|
|
|
+ goto err_allocation;
|
|
|
ring->count = adapter->rx_ring_count;
|
|
|
- ring->queue_index = i;
|
|
|
- ring->numa_node = adapter->node;
|
|
|
+ ring->queue_index = rx;
|
|
|
+ ring->numa_node = nid;
|
|
|
+ ring->dev = &adapter->pdev->dev;
|
|
|
+ ring->netdev = adapter->netdev;
|
|
|
|
|
|
- adapter->rx_ring[i] = ring;
|
|
|
+ adapter->rx_ring[rx] = ring;
|
|
|
}
|
|
|
|
|
|
- /* Restore the adapter's original node */
|
|
|
- adapter->node = orig_node;
|
|
|
-
|
|
|
ixgbe_cache_ring_register(adapter);
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-err_rx_ring_allocation:
|
|
|
- for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
- kfree(adapter->tx_ring[i]);
|
|
|
-err_tx_ring_allocation:
|
|
|
+err_allocation:
|
|
|
+ while (tx)
|
|
|
+ kfree(adapter->tx_ring[--tx]);
|
|
|
+
|
|
|
+ while (rx)
|
|
|
+ kfree(adapter->rx_ring[--rx]);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -4751,6 +4946,11 @@ err_set_interrupt:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static void ring_free_rcu(struct rcu_head *head)
|
|
|
+{
|
|
|
+ kfree(container_of(head, struct ixgbe_ring, rcu));
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
|
|
|
* @adapter: board private structure to clear interrupt scheme on
|
|
@@ -4767,7 +4967,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
|
|
|
adapter->tx_ring[i] = NULL;
|
|
|
}
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- kfree(adapter->rx_ring[i]);
|
|
|
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
|
|
|
+
|
|
|
+ /* ixgbe_get_stats64() might access this ring, we must wait
|
|
|
+ * a grace period before freeing it.
|
|
|
+ */
|
|
|
+ call_rcu(&ring->rcu, ring_free_rcu);
|
|
|
adapter->rx_ring[i] = NULL;
|
|
|
}
|
|
|
|
|
@@ -4844,6 +5049,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
|
|
int j;
|
|
|
struct tc_configuration *tc;
|
|
|
#endif
|
|
|
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
|
|
|
|
|
|
/* PCI config space info */
|
|
|
|
|
@@ -4858,11 +5064,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
|
|
adapter->ring_feature[RING_F_RSS].indices = rss;
|
|
|
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
|
|
|
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
|
|
|
- if (hw->mac.type == ixgbe_mac_82598EB) {
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
if (hw->device_id == IXGBE_DEV_ID_82598AT)
|
|
|
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
|
|
|
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
|
|
|
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
|
|
|
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
|
|
|
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
|
|
@@ -4891,6 +5100,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
|
|
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
|
|
|
#endif
|
|
|
#endif /* IXGBE_FCOE */
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_IXGBE_DCB
|
|
@@ -4920,8 +5132,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
|
|
#ifdef CONFIG_DCB
|
|
|
adapter->last_lfc_mode = hw->fc.current_mode;
|
|
|
#endif
|
|
|
- hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
|
|
|
- hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
|
|
|
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
|
|
|
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
|
|
|
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
|
|
|
hw->fc.send_xon = true;
|
|
|
hw->fc.disable_fc_autoneg = false;
|
|
@@ -4959,15 +5171,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
/**
|
|
|
* ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
|
|
|
- * @adapter: board private structure
|
|
|
* @tx_ring: tx descriptor ring (for a specific queue) to setup
|
|
|
*
|
|
|
* Return 0 on success, negative on failure
|
|
|
**/
|
|
|
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *tx_ring)
|
|
|
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
|
|
|
{
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
+ struct device *dev = tx_ring->dev;
|
|
|
int size;
|
|
|
|
|
|
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
|
|
@@ -4982,7 +5192,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
|
|
|
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
|
|
|
tx_ring->size = ALIGN(tx_ring->size, 4096);
|
|
|
|
|
|
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
|
|
|
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
|
|
|
&tx_ring->dma, GFP_KERNEL);
|
|
|
if (!tx_ring->desc)
|
|
|
goto err;
|
|
@@ -4995,7 +5205,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
|
|
|
err:
|
|
|
vfree(tx_ring->tx_buffer_info);
|
|
|
tx_ring->tx_buffer_info = NULL;
|
|
|
- e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
|
|
|
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -5014,7 +5224,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
|
|
|
int i, err = 0;
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
- err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
|
|
|
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
|
|
|
if (!err)
|
|
|
continue;
|
|
|
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
|
|
@@ -5026,48 +5236,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
/**
|
|
|
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
|
|
|
- * @adapter: board private structure
|
|
|
* @rx_ring: rx descriptor ring (for a specific queue) to setup
|
|
|
*
|
|
|
* Returns 0 on success, negative on failure
|
|
|
**/
|
|
|
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *rx_ring)
|
|
|
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
|
|
|
{
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
+ struct device *dev = rx_ring->dev;
|
|
|
int size;
|
|
|
|
|
|
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
|
|
|
- rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
|
|
|
+ rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
|
|
|
if (!rx_ring->rx_buffer_info)
|
|
|
rx_ring->rx_buffer_info = vmalloc(size);
|
|
|
- if (!rx_ring->rx_buffer_info) {
|
|
|
- e_err(probe, "vmalloc allocation failed for the Rx "
|
|
|
- "descriptor ring\n");
|
|
|
- goto alloc_failed;
|
|
|
- }
|
|
|
+ if (!rx_ring->rx_buffer_info)
|
|
|
+ goto err;
|
|
|
memset(rx_ring->rx_buffer_info, 0, size);
|
|
|
|
|
|
/* Round up to nearest 4K */
|
|
|
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
|
|
|
rx_ring->size = ALIGN(rx_ring->size, 4096);
|
|
|
|
|
|
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
|
|
|
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
|
|
|
&rx_ring->dma, GFP_KERNEL);
|
|
|
|
|
|
- if (!rx_ring->desc) {
|
|
|
- e_err(probe, "Memory allocation failed for the Rx "
|
|
|
- "descriptor ring\n");
|
|
|
- vfree(rx_ring->rx_buffer_info);
|
|
|
- goto alloc_failed;
|
|
|
- }
|
|
|
+ if (!rx_ring->desc)
|
|
|
+ goto err;
|
|
|
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
rx_ring->next_to_use = 0;
|
|
|
|
|
|
return 0;
|
|
|
-
|
|
|
-alloc_failed:
|
|
|
+err:
|
|
|
+ vfree(rx_ring->rx_buffer_info);
|
|
|
+ rx_ring->rx_buffer_info = NULL;
|
|
|
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -5081,13 +5284,12 @@ alloc_failed:
|
|
|
*
|
|
|
* Return 0 on success, negative on failure
|
|
|
**/
|
|
|
-
|
|
|
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
int i, err = 0;
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
|
|
|
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
|
|
|
if (!err)
|
|
|
continue;
|
|
|
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
|
|
@@ -5099,23 +5301,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
/**
|
|
|
* ixgbe_free_tx_resources - Free Tx Resources per Queue
|
|
|
- * @adapter: board private structure
|
|
|
* @tx_ring: Tx descriptor ring for a specific queue
|
|
|
*
|
|
|
* Free all transmit software resources
|
|
|
**/
|
|
|
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *tx_ring)
|
|
|
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
|
|
|
{
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
-
|
|
|
- ixgbe_clean_tx_ring(adapter, tx_ring);
|
|
|
+ ixgbe_clean_tx_ring(tx_ring);
|
|
|
|
|
|
vfree(tx_ring->tx_buffer_info);
|
|
|
tx_ring->tx_buffer_info = NULL;
|
|
|
|
|
|
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
|
|
|
- tx_ring->dma);
|
|
|
+ /* if not set, then don't free */
|
|
|
+ if (!tx_ring->desc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
|
|
|
+ tx_ring->desc, tx_ring->dma);
|
|
|
|
|
|
tx_ring->desc = NULL;
|
|
|
}
|
|
@@ -5132,28 +5334,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
if (adapter->tx_ring[i]->desc)
|
|
|
- ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
|
|
|
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ixgbe_free_rx_resources - Free Rx Resources
|
|
|
- * @adapter: board private structure
|
|
|
* @rx_ring: ring to clean the resources from
|
|
|
*
|
|
|
* Free all receive software resources
|
|
|
**/
|
|
|
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *rx_ring)
|
|
|
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
|
|
|
{
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
-
|
|
|
- ixgbe_clean_rx_ring(adapter, rx_ring);
|
|
|
+ ixgbe_clean_rx_ring(rx_ring);
|
|
|
|
|
|
vfree(rx_ring->rx_buffer_info);
|
|
|
rx_ring->rx_buffer_info = NULL;
|
|
|
|
|
|
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
|
|
|
- rx_ring->dma);
|
|
|
+ /* if not set, then don't free */
|
|
|
+ if (!rx_ring->desc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
|
|
|
+ rx_ring->desc, rx_ring->dma);
|
|
|
|
|
|
rx_ring->desc = NULL;
|
|
|
}
|
|
@@ -5170,7 +5372,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
if (adapter->rx_ring[i]->desc)
|
|
|
- ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
|
|
|
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -5183,6 +5385,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
|
|
|
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
{
|
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_hw *hw = &adapter->hw;
|
|
|
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
|
|
|
|
|
/* MTU < 68 is an error and causes problems on some kernels */
|
|
@@ -5193,6 +5396,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
/* must set new MTU before calling down or up */
|
|
|
netdev->mtu = new_mtu;
|
|
|
|
|
|
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
|
|
|
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
|
|
|
+
|
|
|
if (netif_running(netdev))
|
|
|
ixgbe_reinit_locked(adapter);
|
|
|
|
|
@@ -5288,8 +5494,8 @@ static int ixgbe_close(struct net_device *netdev)
|
|
|
#ifdef CONFIG_PM
|
|
|
static int ixgbe_resume(struct pci_dev *pdev)
|
|
|
{
|
|
|
- struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
u32 err;
|
|
|
|
|
|
pci_set_power_state(pdev, PCI_D0);
|
|
@@ -5320,7 +5526,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
|
|
|
|
|
|
if (netif_running(netdev)) {
|
|
|
- err = ixgbe_open(adapter->netdev);
|
|
|
+ err = ixgbe_open(netdev);
|
|
|
if (err)
|
|
|
return err;
|
|
|
}
|
|
@@ -5333,8 +5539,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
|
|
|
|
|
|
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|
|
{
|
|
|
- struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
u32 ctrl, fctrl;
|
|
|
u32 wufc = adapter->wol;
|
|
@@ -5351,6 +5557,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|
|
ixgbe_free_all_rx_resources(adapter);
|
|
|
}
|
|
|
|
|
|
+ ixgbe_clear_interrupt_scheme(adapter);
|
|
|
+
|
|
|
#ifdef CONFIG_PM
|
|
|
retval = pci_save_state(pdev);
|
|
|
if (retval)
|
|
@@ -5377,15 +5585,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
|
|
|
}
|
|
|
|
|
|
- if (wufc && hw->mac.type == ixgbe_mac_82599EB)
|
|
|
- pci_wake_from_d3(pdev, true);
|
|
|
- else
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
pci_wake_from_d3(pdev, false);
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
+ pci_wake_from_d3(pdev, !!wufc);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
*enable_wake = !!wufc;
|
|
|
|
|
|
- ixgbe_clear_interrupt_scheme(adapter);
|
|
|
-
|
|
|
ixgbe_release_hw_control(adapter);
|
|
|
|
|
|
pci_disable_device(pdev);
|
|
@@ -5434,10 +5647,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
|
|
|
u64 total_mpc = 0;
|
|
|
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
|
|
|
- u64 non_eop_descs = 0, restart_queue = 0;
|
|
|
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
|
|
|
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
|
|
|
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
|
|
|
+ u64 bytes = 0, packets = 0;
|
|
|
|
|
|
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
|
|
|
test_bit(__IXGBE_RESETTING, &adapter->state))
|
|
@@ -5450,21 +5665,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
|
|
adapter->hw_rx_no_dma_resources +=
|
|
|
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- rsc_count += adapter->rx_ring[i]->rsc_count;
|
|
|
- rsc_flush += adapter->rx_ring[i]->rsc_flush;
|
|
|
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
|
|
|
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
|
|
|
}
|
|
|
adapter->rsc_total_count = rsc_count;
|
|
|
adapter->rsc_total_flush = rsc_flush;
|
|
|
}
|
|
|
|
|
|
+ for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
|
|
|
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
|
|
|
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
|
|
|
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
|
|
|
+ bytes += rx_ring->stats.bytes;
|
|
|
+ packets += rx_ring->stats.packets;
|
|
|
+ }
|
|
|
+ adapter->non_eop_descs = non_eop_descs;
|
|
|
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
|
|
|
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
|
|
|
+ netdev->stats.rx_bytes = bytes;
|
|
|
+ netdev->stats.rx_packets = packets;
|
|
|
+
|
|
|
+ bytes = 0;
|
|
|
+ packets = 0;
|
|
|
/* gather some stats to the adapter struct that are per queue */
|
|
|
- for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
- restart_queue += adapter->tx_ring[i]->restart_queue;
|
|
|
+ for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
|
|
|
+ restart_queue += tx_ring->tx_stats.restart_queue;
|
|
|
+ tx_busy += tx_ring->tx_stats.tx_busy;
|
|
|
+ bytes += tx_ring->stats.bytes;
|
|
|
+ packets += tx_ring->stats.packets;
|
|
|
+ }
|
|
|
adapter->restart_queue = restart_queue;
|
|
|
-
|
|
|
- for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
- non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
|
|
|
- adapter->non_eop_descs = non_eop_descs;
|
|
|
+ adapter->tx_busy = tx_busy;
|
|
|
+ netdev->stats.tx_bytes = bytes;
|
|
|
+ netdev->stats.tx_packets = packets;
|
|
|
|
|
|
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
|
|
|
for (i = 0; i < 8; i++) {
|
|
@@ -5479,17 +5714,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
|
|
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
|
|
|
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
|
|
|
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
|
|
|
- if (hw->mac.type == ixgbe_mac_82599EB) {
|
|
|
- hwstats->pxonrxc[i] +=
|
|
|
- IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
|
|
|
- hwstats->pxoffrxc[i] +=
|
|
|
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
|
|
|
- hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
|
|
|
- } else {
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
hwstats->pxonrxc[i] +=
|
|
|
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
|
|
|
- hwstats->pxoffrxc[i] +=
|
|
|
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
+ hwstats->pxonrxc[i] +=
|
|
|
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
|
|
|
hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
|
|
@@ -5498,21 +5734,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
|
|
/* work around hardware counting issue */
|
|
|
hwstats->gprc -= missed_rx;
|
|
|
|
|
|
+ ixgbe_update_xoff_received(adapter);
|
|
|
+
|
|
|
/* 82598 hardware only has a 32 bit counter in the high register */
|
|
|
- if (hw->mac.type == ixgbe_mac_82599EB) {
|
|
|
- u64 tmp;
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB:
|
|
|
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
|
|
|
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
|
|
|
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
|
|
|
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
|
|
|
- tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
|
|
|
- /* 4 high bits of GORC */
|
|
|
- hwstats->gorc += (tmp << 32);
|
|
|
+ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
|
|
|
hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
|
|
|
- tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
|
|
|
- /* 4 high bits of GOTC */
|
|
|
- hwstats->gotc += (tmp << 32);
|
|
|
+ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
|
|
|
hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
|
|
|
- IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
|
|
|
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
|
|
|
hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
|
|
|
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
|
|
|
hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
|
|
|
hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
|
|
|
#ifdef IXGBE_FCOE
|
|
@@ -5523,12 +5763,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
|
|
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
|
|
|
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
|
|
|
#endif /* IXGBE_FCOE */
|
|
|
- } else {
|
|
|
- hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
|
|
|
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
|
|
|
- hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
|
|
|
- hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
|
|
|
- hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
|
|
|
hwstats->bprc += bprc;
|
|
@@ -5701,8 +5938,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
|
|
|
|
|
|
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
- set_bit(__IXGBE_FDIR_INIT_DONE,
|
|
|
- &(adapter->tx_ring[i]->reinit_state));
|
|
|
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
|
|
|
+ &(adapter->tx_ring[i]->state));
|
|
|
} else {
|
|
|
e_err(probe, "failed to finish FDIR re-initialization, "
|
|
|
"ignored adding FDIR ATR filters\n");
|
|
@@ -5764,17 +6001,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
|
|
|
if (!netif_carrier_ok(netdev)) {
|
|
|
bool flow_rx, flow_tx;
|
|
|
|
|
|
- if (hw->mac.type == ixgbe_mac_82599EB) {
|
|
|
- u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
|
|
|
- u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
|
|
|
- flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
|
|
|
- flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
|
|
|
- } else {
|
|
|
+ switch (hw->mac.type) {
|
|
|
+ case ixgbe_mac_82598EB: {
|
|
|
u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
|
|
u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
|
|
|
flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
|
|
|
flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
|
|
|
}
|
|
|
+ break;
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540: {
|
|
|
+ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
|
|
|
+ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
|
|
|
+ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
|
|
|
+ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ flow_tx = false;
|
|
|
+ flow_rx = false;
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
|
|
|
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
|
|
@@ -5788,7 +6035,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
|
|
|
netif_carrier_on(netdev);
|
|
|
} else {
|
|
|
/* Force detection of hung controller */
|
|
|
- adapter->detect_tx_hung = true;
|
|
|
+ for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
+ tx_ring = adapter->tx_ring[i];
|
|
|
+ set_check_for_tx_hang(tx_ring);
|
|
|
+ }
|
|
|
}
|
|
|
} else {
|
|
|
adapter->link_up = false;
|
|
@@ -6000,15 +6250,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
|
|
|
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_ring *tx_ring,
|
|
|
struct sk_buff *skb, u32 tx_flags,
|
|
|
- unsigned int first)
|
|
|
+ unsigned int first, const u8 hdr_len)
|
|
|
{
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
+ struct device *dev = tx_ring->dev;
|
|
|
struct ixgbe_tx_buffer *tx_buffer_info;
|
|
|
unsigned int len;
|
|
|
unsigned int total = skb->len;
|
|
|
unsigned int offset = 0, size, count = 0, i;
|
|
|
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
unsigned int f;
|
|
|
+ unsigned int bytecount = skb->len;
|
|
|
+ u16 gso_segs = 1;
|
|
|
|
|
|
i = tx_ring->next_to_use;
|
|
|
|
|
@@ -6023,10 +6275,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
|
|
|
|
|
|
tx_buffer_info->length = size;
|
|
|
tx_buffer_info->mapped_as_page = false;
|
|
|
- tx_buffer_info->dma = dma_map_single(&pdev->dev,
|
|
|
+ tx_buffer_info->dma = dma_map_single(dev,
|
|
|
skb->data + offset,
|
|
|
size, DMA_TO_DEVICE);
|
|
|
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
|
|
|
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
|
|
|
goto dma_error;
|
|
|
tx_buffer_info->time_stamp = jiffies;
|
|
|
tx_buffer_info->next_to_watch = i;
|
|
@@ -6059,12 +6311,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
|
|
|
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
|
|
|
|
|
|
tx_buffer_info->length = size;
|
|
|
- tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
|
|
|
+ tx_buffer_info->dma = dma_map_page(dev,
|
|
|
frag->page,
|
|
|
offset, size,
|
|
|
DMA_TO_DEVICE);
|
|
|
tx_buffer_info->mapped_as_page = true;
|
|
|
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
|
|
|
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
|
|
|
goto dma_error;
|
|
|
tx_buffer_info->time_stamp = jiffies;
|
|
|
tx_buffer_info->next_to_watch = i;
|
|
@@ -6078,6 +6330,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
|
|
|
+ gso_segs = skb_shinfo(skb)->gso_segs;
|
|
|
+#ifdef IXGBE_FCOE
|
|
|
+ /* adjust for FCoE Sequence Offload */
|
|
|
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
|
|
|
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
|
|
|
+ skb_shinfo(skb)->gso_size);
|
|
|
+#endif /* IXGBE_FCOE */
|
|
|
+ bytecount += (gso_segs - 1) * hdr_len;
|
|
|
+
|
|
|
+ /* multiply data chunks by size of headers */
|
|
|
+ tx_ring->tx_buffer_info[i].bytecount = bytecount;
|
|
|
+ tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
|
|
|
tx_ring->tx_buffer_info[i].skb = skb;
|
|
|
tx_ring->tx_buffer_info[first].next_to_watch = i;
|
|
|
|
|
@@ -6099,14 +6364,13 @@ dma_error:
|
|
|
i += tx_ring->count;
|
|
|
i--;
|
|
|
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
|
|
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
|
|
|
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
|
|
|
- struct ixgbe_ring *tx_ring,
|
|
|
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
|
|
|
int tx_flags, int count, u32 paylen, u8 hdr_len)
|
|
|
{
|
|
|
union ixgbe_adv_tx_desc *tx_desc = NULL;
|
|
@@ -6171,60 +6435,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
|
|
|
wmb();
|
|
|
|
|
|
tx_ring->next_to_use = i;
|
|
|
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
|
|
|
+ writel(i, tx_ring->tail);
|
|
|
}
|
|
|
|
|
|
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
|
|
|
- int queue, u32 tx_flags, __be16 protocol)
|
|
|
+ u8 queue, u32 tx_flags, __be16 protocol)
|
|
|
{
|
|
|
struct ixgbe_atr_input atr_input;
|
|
|
- struct tcphdr *th;
|
|
|
struct iphdr *iph = ip_hdr(skb);
|
|
|
struct ethhdr *eth = (struct ethhdr *)skb->data;
|
|
|
- u16 vlan_id, src_port, dst_port, flex_bytes;
|
|
|
- u32 src_ipv4_addr, dst_ipv4_addr;
|
|
|
- u8 l4type = 0;
|
|
|
+ struct tcphdr *th;
|
|
|
+ u16 vlan_id;
|
|
|
|
|
|
- /* Right now, we support IPv4 only */
|
|
|
- if (protocol != htons(ETH_P_IP))
|
|
|
- return;
|
|
|
- /* check if we're UDP or TCP */
|
|
|
- if (iph->protocol == IPPROTO_TCP) {
|
|
|
- th = tcp_hdr(skb);
|
|
|
- src_port = th->source;
|
|
|
- dst_port = th->dest;
|
|
|
- l4type |= IXGBE_ATR_L4TYPE_TCP;
|
|
|
- /* l4type IPv4 type is 0, no need to assign */
|
|
|
- } else {
|
|
|
- /* Unsupported L4 header, just bail here */
|
|
|
+ /* Right now, we support IPv4 w/ TCP only */
|
|
|
+ if (protocol != htons(ETH_P_IP) ||
|
|
|
+ iph->protocol != IPPROTO_TCP)
|
|
|
return;
|
|
|
- }
|
|
|
|
|
|
memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
|
|
|
|
|
|
vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
|
|
|
IXGBE_TX_FLAGS_VLAN_SHIFT;
|
|
|
- src_ipv4_addr = iph->saddr;
|
|
|
- dst_ipv4_addr = iph->daddr;
|
|
|
- flex_bytes = eth->h_proto;
|
|
|
+
|
|
|
+ th = tcp_hdr(skb);
|
|
|
|
|
|
ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
|
|
|
- ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
|
|
|
- ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
|
|
|
- ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
|
|
|
- ixgbe_atr_set_l4type_82599(&atr_input, l4type);
|
|
|
+ ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
|
|
|
+ ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
|
|
|
+ ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
|
|
|
+ ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
|
|
|
/* src and dst are inverted, think how the receiver sees them */
|
|
|
- ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
|
|
|
- ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
|
|
|
+ ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
|
|
|
+ ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
|
|
|
|
|
|
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
|
|
|
ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
|
|
|
}
|
|
|
|
|
|
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
|
|
|
- struct ixgbe_ring *tx_ring, int size)
|
|
|
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
|
|
|
{
|
|
|
- netif_stop_subqueue(netdev, tx_ring->queue_index);
|
|
|
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
|
/* Herbert's original patch had:
|
|
|
* smp_mb__after_netif_stop_queue();
|
|
|
* but since that doesn't exist yet, just open code it. */
|
|
@@ -6236,17 +6486,16 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
|
|
|
return -EBUSY;
|
|
|
|
|
|
/* A reprieve! - use start_queue because it doesn't call schedule */
|
|
|
- netif_start_subqueue(netdev, tx_ring->queue_index);
|
|
|
- ++tx_ring->restart_queue;
|
|
|
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
|
+ ++tx_ring->tx_stats.restart_queue;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
|
|
|
- struct ixgbe_ring *tx_ring, int size)
|
|
|
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
|
|
|
{
|
|
|
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
|
|
|
return 0;
|
|
|
- return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
|
|
|
+ return __ixgbe_maybe_stop_tx(tx_ring, size);
|
|
|
}
|
|
|
|
|
|
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
|
|
@@ -6291,10 +6540,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
|
|
|
return skb_tx_hash(dev, skb);
|
|
|
}
|
|
|
|
|
|
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
|
|
|
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|
|
struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_ring *tx_ring)
|
|
|
{
|
|
|
+ struct net_device *netdev = tx_ring->netdev;
|
|
|
struct netdev_queue *txq;
|
|
|
unsigned int first;
|
|
|
unsigned int tx_flags = 0;
|
|
@@ -6352,8 +6602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|
|
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
|
|
|
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
|
|
|
|
|
|
- if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
|
|
|
- adapter->tx_busy++;
|
|
|
+ if (ixgbe_maybe_stop_tx(tx_ring, count)) {
|
|
|
+ tx_ring->tx_stats.tx_busy++;
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
|
|
@@ -6387,14 +6637,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|
|
tx_flags |= IXGBE_TX_FLAGS_CSUM;
|
|
|
}
|
|
|
|
|
|
- count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
|
|
|
+ count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
|
|
|
if (count) {
|
|
|
/* add the ATR filter if ATR is on */
|
|
|
if (tx_ring->atr_sample_rate) {
|
|
|
++tx_ring->atr_count;
|
|
|
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
|
|
|
- test_bit(__IXGBE_FDIR_INIT_DONE,
|
|
|
- &tx_ring->reinit_state)) {
|
|
|
+ test_bit(__IXGBE_TX_FDIR_INIT_DONE,
|
|
|
+ &tx_ring->state)) {
|
|
|
ixgbe_atr(adapter, skb, tx_ring->queue_index,
|
|
|
tx_flags, protocol);
|
|
|
tx_ring->atr_count = 0;
|
|
@@ -6403,9 +6653,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|
|
txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
|
|
|
txq->tx_bytes += skb->len;
|
|
|
txq->tx_packets++;
|
|
|
- ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
|
|
|
- hdr_len);
|
|
|
- ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
|
|
|
+ ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
|
|
|
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
|
|
|
|
} else {
|
|
|
dev_kfree_skb_any(skb);
|
|
@@ -6422,7 +6671,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
|
|
|
struct ixgbe_ring *tx_ring;
|
|
|
|
|
|
tx_ring = adapter->tx_ring[skb->queue_mapping];
|
|
|
- return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
|
|
|
+ return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -6563,20 +6812,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
|
|
|
|
|
|
/* accurate rx/tx bytes/packets stats */
|
|
|
dev_txq_stats_fold(netdev, stats);
|
|
|
+ rcu_read_lock();
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- struct ixgbe_ring *ring = adapter->rx_ring[i];
|
|
|
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
|
|
|
u64 bytes, packets;
|
|
|
unsigned int start;
|
|
|
|
|
|
- do {
|
|
|
- start = u64_stats_fetch_begin_bh(&ring->syncp);
|
|
|
- packets = ring->stats.packets;
|
|
|
- bytes = ring->stats.bytes;
|
|
|
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
|
|
- stats->rx_packets += packets;
|
|
|
- stats->rx_bytes += bytes;
|
|
|
+ if (ring) {
|
|
|
+ do {
|
|
|
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
|
|
|
+ packets = ring->stats.packets;
|
|
|
+ bytes = ring->stats.bytes;
|
|
|
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
|
|
+ stats->rx_packets += packets;
|
|
|
+ stats->rx_bytes += bytes;
|
|
|
+ }
|
|
|
}
|
|
|
-
|
|
|
+ rcu_read_unlock();
|
|
|
/* following stats updated by ixgbe_watchdog_task() */
|
|
|
stats->multicast = netdev->stats.multicast;
|
|
|
stats->rx_errors = netdev->stats.rx_errors;
|
|
@@ -6758,8 +7010,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
|
|
|
|
|
SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
|
|
|
|
- pci_set_drvdata(pdev, netdev);
|
|
|
adapter = netdev_priv(netdev);
|
|
|
+ pci_set_drvdata(pdev, adapter);
|
|
|
|
|
|
adapter->netdev = netdev;
|
|
|
adapter->pdev = pdev;
|
|
@@ -6832,8 +7084,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
|
|
goto err_sw_init;
|
|
|
|
|
|
/* Make it possible the adapter to be woken up via WOL */
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
|
|
|
+ switch (adapter->hw.mac.type) {
|
|
|
+ case ixgbe_mac_82599EB:
|
|
|
+ case ixgbe_mac_X540:
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* If there is a fan on this device and it has failed log the
|
|
@@ -6942,7 +7200,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
|
|
}
|
|
|
|
|
|
/* power down the optics */
|
|
|
- if (hw->phy.multispeed_fiber)
|
|
|
+ if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
|
|
|
hw->mac.ops.disable_tx_laser(hw);
|
|
|
|
|
|
init_timer(&adapter->watchdog_timer);
|
|
@@ -6957,6 +7215,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
|
|
goto err_sw_init;
|
|
|
|
|
|
switch (pdev->device) {
|
|
|
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
|
|
|
+ /* All except this subdevice support WOL */
|
|
|
+ if (pdev->subsystem_device ==
|
|
|
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
|
|
|
+ adapter->wol = 0;
|
|
|
+ break;
|
|
|
+ }
|
|
|
case IXGBE_DEV_ID_82599_KX4:
|
|
|
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
|
|
|
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
|
|
@@ -7082,8 +7347,8 @@ err_dma:
|
|
|
**/
|
|
|
static void __devexit ixgbe_remove(struct pci_dev *pdev)
|
|
|
{
|
|
|
- struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
|
|
|
set_bit(__IXGBE_DOWN, &adapter->state);
|
|
|
/* clear the module not found bit to make sure the worker won't
|
|
@@ -7153,8 +7418,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
|
|
|
static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
|
|
|
pci_channel_state_t state)
|
|
|
{
|
|
|
- struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
|
|
|
netif_device_detach(netdev);
|
|
|
|
|
@@ -7177,8 +7442,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
|
|
|
*/
|
|
|
static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
|
|
|
{
|
|
|
- struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
|
|
|
pci_ers_result_t result;
|
|
|
int err;
|
|
|
|
|
@@ -7216,8 +7480,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
|
|
|
*/
|
|
|
static void ixgbe_io_resume(struct pci_dev *pdev)
|
|
|
{
|
|
|
- struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
|
|
|
if (netif_running(netdev)) {
|
|
|
if (ixgbe_up(adapter)) {
|
|
@@ -7282,6 +7546,7 @@ static void __exit ixgbe_exit_module(void)
|
|
|
dca_unregister_notify(&dca_notifier);
|
|
|
#endif
|
|
|
pci_unregister_driver(&ixgbe_driver);
|
|
|
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_IXGBE_DCA
|