|
@@ -32,7 +32,9 @@
|
|
#include <linux/of_gpio.h>
|
|
#include <linux/of_gpio.h>
|
|
#include <linux/of_mdio.h>
|
|
#include <linux/of_mdio.h>
|
|
#include <linux/of_net.h>
|
|
#include <linux/of_net.h>
|
|
-
|
|
|
|
|
|
+#include <linux/ip.h>
|
|
|
|
+#include <linux/udp.h>
|
|
|
|
+#include <linux/tcp.h>
|
|
#include "macb.h"
|
|
#include "macb.h"
|
|
|
|
|
|
#define MACB_RX_BUFFER_SIZE 128
|
|
#define MACB_RX_BUFFER_SIZE 128
|
|
@@ -60,10 +62,13 @@
|
|
| MACB_BIT(TXERR))
|
|
| MACB_BIT(TXERR))
|
|
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
|
|
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
|
|
|
|
|
|
-#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
|
|
|
|
-#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
|
|
|
|
|
|
+/* Max length of transmit frame must be a multiple of 8 bytes */
|
|
|
|
+#define MACB_TX_LEN_ALIGN 8
|
|
|
|
+#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
|
|
|
+#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
|
|
|
|
|
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
|
|
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
|
|
|
|
+#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO)
|
|
|
|
|
|
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
|
|
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
|
|
#define MACB_WOL_ENABLED (0x1 << 1)
|
|
#define MACB_WOL_ENABLED (0x1 << 1)
|
|
@@ -1223,7 +1228,8 @@ static void macb_poll_controller(struct net_device *dev)
|
|
|
|
|
|
static unsigned int macb_tx_map(struct macb *bp,
|
|
static unsigned int macb_tx_map(struct macb *bp,
|
|
struct macb_queue *queue,
|
|
struct macb_queue *queue,
|
|
- struct sk_buff *skb)
|
|
|
|
|
|
+ struct sk_buff *skb,
|
|
|
|
+ unsigned int hdrlen)
|
|
{
|
|
{
|
|
dma_addr_t mapping;
|
|
dma_addr_t mapping;
|
|
unsigned int len, entry, i, tx_head = queue->tx_head;
|
|
unsigned int len, entry, i, tx_head = queue->tx_head;
|
|
@@ -1231,14 +1237,27 @@ static unsigned int macb_tx_map(struct macb *bp,
|
|
struct macb_dma_desc *desc;
|
|
struct macb_dma_desc *desc;
|
|
unsigned int offset, size, count = 0;
|
|
unsigned int offset, size, count = 0;
|
|
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
|
|
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
|
|
- unsigned int eof = 1;
|
|
|
|
- u32 ctrl;
|
|
|
|
|
|
+ unsigned int eof = 1, mss_mfs = 0;
|
|
|
|
+ u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
|
|
|
|
+
|
|
|
|
+ /* LSO */
|
|
|
|
+ if (skb_shinfo(skb)->gso_size != 0) {
|
|
|
|
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP)
|
|
|
|
+ /* UDP - UFO */
|
|
|
|
+ lso_ctrl = MACB_LSO_UFO_ENABLE;
|
|
|
|
+ else
|
|
|
|
+ /* TCP - TSO */
|
|
|
|
+ lso_ctrl = MACB_LSO_TSO_ENABLE;
|
|
|
|
+ }
|
|
|
|
|
|
/* First, map non-paged data */
|
|
/* First, map non-paged data */
|
|
len = skb_headlen(skb);
|
|
len = skb_headlen(skb);
|
|
|
|
+
|
|
|
|
+ /* first buffer length */
|
|
|
|
+ size = hdrlen;
|
|
|
|
+
|
|
offset = 0;
|
|
offset = 0;
|
|
while (len) {
|
|
while (len) {
|
|
- size = min(len, bp->max_tx_length);
|
|
|
|
entry = macb_tx_ring_wrap(bp, tx_head);
|
|
entry = macb_tx_ring_wrap(bp, tx_head);
|
|
tx_skb = &queue->tx_skb[entry];
|
|
tx_skb = &queue->tx_skb[entry];
|
|
|
|
|
|
@@ -1258,6 +1277,8 @@ static unsigned int macb_tx_map(struct macb *bp,
|
|
offset += size;
|
|
offset += size;
|
|
count++;
|
|
count++;
|
|
tx_head++;
|
|
tx_head++;
|
|
|
|
+
|
|
|
|
+ size = min(len, bp->max_tx_length);
|
|
}
|
|
}
|
|
|
|
|
|
/* Then, map paged data from fragments */
|
|
/* Then, map paged data from fragments */
|
|
@@ -1311,6 +1332,21 @@ static unsigned int macb_tx_map(struct macb *bp,
|
|
desc = &queue->tx_ring[entry];
|
|
desc = &queue->tx_ring[entry];
|
|
desc->ctrl = ctrl;
|
|
desc->ctrl = ctrl;
|
|
|
|
|
|
|
|
+ if (lso_ctrl) {
|
|
|
|
+ if (lso_ctrl == MACB_LSO_UFO_ENABLE)
|
|
|
|
+ /* include header and FCS in value given to h/w */
|
|
|
|
+ mss_mfs = skb_shinfo(skb)->gso_size +
|
|
|
|
+ skb_transport_offset(skb) +
|
|
|
|
+ ETH_FCS_LEN;
|
|
|
|
+ else /* TSO */ {
|
|
|
|
+ mss_mfs = skb_shinfo(skb)->gso_size;
|
|
|
|
+ /* TCP Sequence Number Source Select
|
|
|
|
+ * can be set only for TSO
|
|
|
|
+ */
|
|
|
|
+ seq_ctrl = 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
do {
|
|
do {
|
|
i--;
|
|
i--;
|
|
entry = macb_tx_ring_wrap(bp, i);
|
|
entry = macb_tx_ring_wrap(bp, i);
|
|
@@ -1325,6 +1361,16 @@ static unsigned int macb_tx_map(struct macb *bp,
|
|
if (unlikely(entry == (bp->tx_ring_size - 1)))
|
|
if (unlikely(entry == (bp->tx_ring_size - 1)))
|
|
ctrl |= MACB_BIT(TX_WRAP);
|
|
ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
|
|
|
+ /* First descriptor is header descriptor */
|
|
|
|
+ if (i == queue->tx_head) {
|
|
|
|
+ ctrl |= MACB_BF(TX_LSO, lso_ctrl);
|
|
|
|
+ ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
|
|
|
|
+ } else
|
|
|
|
+ /* Only set MSS/MFS on payload descriptors
|
|
|
|
+ * (second or later descriptor)
|
|
|
|
+ */
|
|
|
|
+ ctrl |= MACB_BF(MSS_MFS, mss_mfs);
|
|
|
|
+
|
|
/* Set TX buffer descriptor */
|
|
/* Set TX buffer descriptor */
|
|
macb_set_addr(desc, tx_skb->mapping);
|
|
macb_set_addr(desc, tx_skb->mapping);
|
|
/* desc->addr must be visible to hardware before clearing
|
|
/* desc->addr must be visible to hardware before clearing
|
|
@@ -1350,6 +1396,43 @@ dma_error:
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static netdev_features_t macb_features_check(struct sk_buff *skb,
|
|
|
|
+ struct net_device *dev,
|
|
|
|
+ netdev_features_t features)
|
|
|
|
+{
|
|
|
|
+ unsigned int nr_frags, f;
|
|
|
|
+ unsigned int hdrlen;
|
|
|
|
+
|
|
|
|
+ /* Validate LSO compatibility */
|
|
|
|
+
|
|
|
|
+ /* there is only one buffer */
|
|
|
|
+ if (!skb_is_nonlinear(skb))
|
|
|
|
+ return features;
|
|
|
|
+
|
|
|
|
+ /* length of header */
|
|
|
|
+ hdrlen = skb_transport_offset(skb);
|
|
|
|
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
|
|
|
+ hdrlen += tcp_hdrlen(skb);
|
|
|
|
+
|
|
|
|
+ /* For LSO:
|
|
|
|
+ * When software supplies two or more payload buffers all payload buffers
|
|
|
|
+ * apart from the last must be a multiple of 8 bytes in size.
|
|
|
|
+ */
|
|
|
|
+ if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
|
|
|
|
+ return features & ~MACB_NETIF_LSO;
|
|
|
|
+
|
|
|
|
+ nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
+ /* No need to check last fragment */
|
|
|
|
+ nr_frags--;
|
|
|
|
+ for (f = 0; f < nr_frags; f++) {
|
|
|
|
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
|
|
|
|
+
|
|
|
|
+ if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
|
|
|
|
+ return features & ~MACB_NETIF_LSO;
|
|
|
|
+ }
|
|
|
|
+ return features;
|
|
|
|
+}
|
|
|
|
+
|
|
static inline int macb_clear_csum(struct sk_buff *skb)
|
|
static inline int macb_clear_csum(struct sk_buff *skb)
|
|
{
|
|
{
|
|
/* no change for packets without checksum offloading */
|
|
/* no change for packets without checksum offloading */
|
|
@@ -1374,7 +1457,28 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
struct macb *bp = netdev_priv(dev);
|
|
struct macb *bp = netdev_priv(dev);
|
|
struct macb_queue *queue = &bp->queues[queue_index];
|
|
struct macb_queue *queue = &bp->queues[queue_index];
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- unsigned int count, nr_frags, frag_size, f;
|
|
|
|
|
|
+ unsigned int desc_cnt, nr_frags, frag_size, f;
|
|
|
|
+ unsigned int hdrlen;
|
|
|
|
+ bool is_lso, is_udp = 0;
|
|
|
|
+
|
|
|
|
+ is_lso = (skb_shinfo(skb)->gso_size != 0);
|
|
|
|
+
|
|
|
|
+ if (is_lso) {
|
|
|
|
+ is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
|
|
|
|
+
|
|
|
|
+ /* length of headers */
|
|
|
|
+ if (is_udp)
|
|
|
|
+ /* only queue eth + ip headers separately for UDP */
|
|
|
|
+ hdrlen = skb_transport_offset(skb);
|
|
|
|
+ else
|
|
|
|
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
|
|
+ if (skb_headlen(skb) < hdrlen) {
|
|
|
|
+ netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
|
|
|
|
+ /* if this is required, would need to copy to single buffer */
|
|
|
|
+ return NETDEV_TX_BUSY;
|
|
|
|
+ }
|
|
|
|
+ } else
|
|
|
|
+ hdrlen = min(skb_headlen(skb), bp->max_tx_length);
|
|
|
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
netdev_vdbg(bp->dev,
|
|
netdev_vdbg(bp->dev,
|
|
@@ -1389,18 +1493,22 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
* socket buffer: skb fragments of jumbo frames may need to be
|
|
* socket buffer: skb fragments of jumbo frames may need to be
|
|
* split into many buffer descriptors.
|
|
* split into many buffer descriptors.
|
|
*/
|
|
*/
|
|
- count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
|
|
|
|
|
|
+ if (is_lso && (skb_headlen(skb) > hdrlen))
|
|
|
|
+ /* extra header descriptor if also payload in first buffer */
|
|
|
|
+ desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
|
|
|
|
+ else
|
|
|
|
+ desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
for (f = 0; f < nr_frags; f++) {
|
|
for (f = 0; f < nr_frags; f++) {
|
|
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
|
|
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
|
|
- count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
|
|
|
|
|
|
+ desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
|
|
}
|
|
}
|
|
|
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
|
|
/* This is a hard error, log it. */
|
|
/* This is a hard error, log it. */
|
|
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
|
|
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
|
|
- bp->tx_ring_size) < count) {
|
|
|
|
|
|
+ bp->tx_ring_size) < desc_cnt) {
|
|
netif_stop_subqueue(dev, queue_index);
|
|
netif_stop_subqueue(dev, queue_index);
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
|
|
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
|
|
@@ -1414,7 +1522,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
}
|
|
}
|
|
|
|
|
|
/* Map socket buffer for DMA transfer */
|
|
/* Map socket buffer for DMA transfer */
|
|
- if (!macb_tx_map(bp, queue, skb)) {
|
|
|
|
|
|
+ if (!macb_tx_map(bp, queue, skb, hdrlen)) {
|
|
dev_kfree_skb_any(skb);
|
|
dev_kfree_skb_any(skb);
|
|
goto unlock;
|
|
goto unlock;
|
|
}
|
|
}
|
|
@@ -2354,6 +2462,7 @@ static const struct net_device_ops macb_netdev_ops = {
|
|
.ndo_poll_controller = macb_poll_controller,
|
|
.ndo_poll_controller = macb_poll_controller,
|
|
#endif
|
|
#endif
|
|
.ndo_set_features = macb_set_features,
|
|
.ndo_set_features = macb_set_features,
|
|
|
|
+ .ndo_features_check = macb_features_check,
|
|
};
|
|
};
|
|
|
|
|
|
/* Configure peripheral capabilities according to device tree
|
|
/* Configure peripheral capabilities according to device tree
|
|
@@ -2560,6 +2669,11 @@ static int macb_init(struct platform_device *pdev)
|
|
|
|
|
|
/* Set features */
|
|
/* Set features */
|
|
dev->hw_features = NETIF_F_SG;
|
|
dev->hw_features = NETIF_F_SG;
|
|
|
|
+
|
|
|
|
+ /* Check LSO capability */
|
|
|
|
+ if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
|
|
|
|
+ dev->hw_features |= MACB_NETIF_LSO;
|
|
|
|
+
|
|
/* Checksum offload is only available on gem with packet buffer */
|
|
/* Checksum offload is only available on gem with packet buffer */
|
|
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
|
|
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
|
|
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
|
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|