|
@@ -876,7 +876,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
|
|
static void xgbe_stop(struct xgbe_prv_data *pdata)
|
|
|
{
|
|
|
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
|
|
+ struct xgbe_channel *channel;
|
|
|
struct net_device *netdev = pdata->netdev;
|
|
|
+ struct netdev_queue *txq;
|
|
|
+ unsigned int i;
|
|
|
|
|
|
DBGPR("-->xgbe_stop\n");
|
|
|
|
|
@@ -890,6 +893,15 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
|
|
|
hw_if->disable_tx(pdata);
|
|
|
hw_if->disable_rx(pdata);
|
|
|
|
|
|
+ channel = pdata->channel;
|
|
|
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
|
|
|
+ if (!channel->tx_ring)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
|
|
|
+ netdev_tx_reset_queue(txq);
|
|
|
+ }
|
|
|
+
|
|
|
DBGPR("<--xgbe_stop\n");
|
|
|
}
|
|
|
|
|
@@ -1156,6 +1168,12 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
|
|
|
packet->tcp_header_len, packet->tcp_payload_len);
|
|
|
DBGPR(" packet->mss=%u\n", packet->mss);
|
|
|
|
|
|
+ /* Update the number of packets that will ultimately be transmitted
|
|
|
+ * along with the extra bytes for each extra packet
|
|
|
+ */
|
|
|
+ packet->tx_packets = skb_shinfo(skb)->gso_segs;
|
|
|
+ packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1184,6 +1202,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
|
|
|
context_desc = 0;
|
|
|
packet->rdesc_count = 0;
|
|
|
|
|
|
+ packet->tx_packets = 1;
|
|
|
+ packet->tx_bytes = skb->len;
|
|
|
+
|
|
|
if (xgbe_is_tso(skb)) {
|
|
|
/* TSO requires an extra descriptor if mss is different */
|
|
|
if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
|
|
@@ -1400,12 +1421,14 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
struct xgbe_channel *channel;
|
|
|
struct xgbe_ring *ring;
|
|
|
struct xgbe_packet_data *packet;
|
|
|
+ struct netdev_queue *txq;
|
|
|
unsigned long flags;
|
|
|
int ret;
|
|
|
|
|
|
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
|
|
|
|
|
|
channel = pdata->channel + skb->queue_mapping;
|
|
|
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
|
|
|
ring = channel->tx_ring;
|
|
|
packet = &ring->packet_data;
|
|
|
|
|
@@ -1447,6 +1470,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
|
|
xgbe_prep_tx_tstamp(pdata, skb, packet);
|
|
|
|
|
|
+ /* Report on the actual number of bytes (to be) sent */
|
|
|
+ netdev_tx_sent_queue(txq, packet->tx_bytes);
|
|
|
+
|
|
|
/* Configure required descriptor fields for transmission */
|
|
|
hw_if->dev_xmit(channel);
|
|
|
|
|
@@ -1772,8 +1798,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
|
|
|
struct xgbe_ring_data *rdata;
|
|
|
struct xgbe_ring_desc *rdesc;
|
|
|
struct net_device *netdev = pdata->netdev;
|
|
|
+ struct netdev_queue *txq;
|
|
|
unsigned long flags;
|
|
|
int processed = 0;
|
|
|
+ unsigned int tx_packets = 0, tx_bytes = 0;
|
|
|
|
|
|
DBGPR("-->xgbe_tx_poll\n");
|
|
|
|
|
@@ -1781,6 +1809,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
|
|
|
if (!ring)
|
|
|
return 0;
|
|
|
|
|
|
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
|
|
|
+
|
|
|
spin_lock_irqsave(&ring->lock, flags);
|
|
|
|
|
|
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
|
|
@@ -1799,6 +1829,11 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
|
|
|
xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
|
|
|
#endif
|
|
|
|
|
|
+ if (hw_if->is_last_desc(rdesc)) {
|
|
|
+ tx_packets += rdata->tx.packets;
|
|
|
+ tx_bytes += rdata->tx.bytes;
|
|
|
+ }
|
|
|
+
|
|
|
/* Free the SKB and reset the descriptor for re-use */
|
|
|
desc_if->unmap_rdata(pdata, rdata);
|
|
|
hw_if->tx_desc_reset(rdata);
|
|
@@ -1807,14 +1842,20 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
|
|
|
ring->dirty++;
|
|
|
}
|
|
|
|
|
|
+ if (!processed)
|
|
|
+ goto unlock;
|
|
|
+
|
|
|
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
|
|
|
+
|
|
|
if ((ring->tx.queue_stopped == 1) &&
|
|
|
(xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
|
|
|
ring->tx.queue_stopped = 0;
|
|
|
- netif_wake_subqueue(netdev, channel->queue_index);
|
|
|
+ netif_tx_wake_queue(txq);
|
|
|
}
|
|
|
|
|
|
DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
|
|
|
|
|
|
+unlock:
|
|
|
spin_unlock_irqrestore(&ring->lock, flags);
|
|
|
|
|
|
return processed;
|