|
@@ -52,6 +52,9 @@
|
|
|
| MACB_BIT(TXERR))
|
|
|
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
|
|
|
|
|
|
+#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
|
|
|
+#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
|
|
|
+
|
|
|
/*
|
|
|
* Graceful stop timeouts in us. We should allow up to
|
|
|
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
|
|
@@ -264,7 +267,8 @@ static void macb_handle_link_change(struct net_device *dev)
|
|
|
reg |= MACB_BIT(FD);
|
|
|
if (phydev->speed == SPEED_100)
|
|
|
reg |= MACB_BIT(SPD);
|
|
|
- if (phydev->speed == SPEED_1000)
|
|
|
+ if (phydev->speed == SPEED_1000 &&
|
|
|
+ bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
|
|
|
reg |= GEM_BIT(GBE);
|
|
|
|
|
|
macb_or_gem_writel(bp, NCFGR, reg);
|
|
@@ -337,7 +341,7 @@ static int macb_mii_probe(struct net_device *dev)
|
|
|
}
|
|
|
|
|
|
/* mask with MAC supported features */
|
|
|
- if (macb_is_gem(bp))
|
|
|
+ if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
|
|
|
phydev->supported &= PHY_GBIT_FEATURES;
|
|
|
else
|
|
|
phydev->supported &= PHY_BASIC_FEATURES;
|
|
@@ -467,6 +471,24 @@ static int macb_halt_tx(struct macb *bp)
|
|
|
return -ETIMEDOUT;
|
|
|
}
|
|
|
|
|
|
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
|
|
|
+{
|
|
|
+ if (tx_skb->mapping) {
|
|
|
+ if (tx_skb->mapped_as_page)
|
|
|
+ dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
|
|
|
+ tx_skb->size, DMA_TO_DEVICE);
|
|
|
+ else
|
|
|
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
|
|
|
+ tx_skb->size, DMA_TO_DEVICE);
|
|
|
+ tx_skb->mapping = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (tx_skb->skb) {
|
|
|
+ dev_kfree_skb_any(tx_skb->skb);
|
|
|
+ tx_skb->skb = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void macb_tx_error_task(struct work_struct *work)
|
|
|
{
|
|
|
struct macb *bp = container_of(work, struct macb, tx_error_task);
|
|
@@ -504,10 +526,23 @@ static void macb_tx_error_task(struct work_struct *work)
|
|
|
skb = tx_skb->skb;
|
|
|
|
|
|
if (ctrl & MACB_BIT(TX_USED)) {
|
|
|
- netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
|
|
|
- macb_tx_ring_wrap(tail), skb->data);
|
|
|
- bp->stats.tx_packets++;
|
|
|
- bp->stats.tx_bytes += skb->len;
|
|
|
+ /* skb is set for the last buffer of the frame */
|
|
|
+ while (!skb) {
|
|
|
+ macb_tx_unmap(bp, tx_skb);
|
|
|
+ tail++;
|
|
|
+ tx_skb = macb_tx_skb(bp, tail);
|
|
|
+ skb = tx_skb->skb;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* ctrl still refers to the first buffer descriptor
|
|
|
+ * since it's the only one written back by the hardware
|
|
|
+ */
|
|
|
+ if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
|
|
|
+ netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
|
|
|
+ macb_tx_ring_wrap(tail), skb->data);
|
|
|
+ bp->stats.tx_packets++;
|
|
|
+ bp->stats.tx_bytes += skb->len;
|
|
|
+ }
|
|
|
} else {
|
|
|
/*
|
|
|
* "Buffers exhausted mid-frame" errors may only happen
|
|
@@ -521,10 +556,7 @@ static void macb_tx_error_task(struct work_struct *work)
|
|
|
desc->ctrl = ctrl | MACB_BIT(TX_USED);
|
|
|
}
|
|
|
|
|
|
- dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
|
|
|
- DMA_TO_DEVICE);
|
|
|
- tx_skb->skb = NULL;
|
|
|
- dev_kfree_skb(skb);
|
|
|
+ macb_tx_unmap(bp, tx_skb);
|
|
|
}
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
|
@@ -572,20 +604,35 @@ static void macb_tx_interrupt(struct macb *bp)
|
|
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
|
|
+ /* TX_USED bit is only set by hardware on the very first buffer
|
|
|
+ * descriptor of the transmitted frame.
|
|
|
+ */
|
|
|
if (!(ctrl & MACB_BIT(TX_USED)))
|
|
|
break;
|
|
|
|
|
|
- tx_skb = macb_tx_skb(bp, tail);
|
|
|
- skb = tx_skb->skb;
|
|
|
+ /* Process all buffers of the current transmitted frame */
|
|
|
+ for (;; tail++) {
|
|
|
+ tx_skb = macb_tx_skb(bp, tail);
|
|
|
+ skb = tx_skb->skb;
|
|
|
+
|
|
|
+ /* First, update TX stats if needed */
|
|
|
+ if (skb) {
|
|
|
+ netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
|
|
|
+ macb_tx_ring_wrap(tail), skb->data);
|
|
|
+ bp->stats.tx_packets++;
|
|
|
+ bp->stats.tx_bytes += skb->len;
|
|
|
+ }
|
|
|
|
|
|
- netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
|
|
|
- macb_tx_ring_wrap(tail), skb->data);
|
|
|
- dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
|
|
|
- DMA_TO_DEVICE);
|
|
|
- bp->stats.tx_packets++;
|
|
|
- bp->stats.tx_bytes += skb->len;
|
|
|
- tx_skb->skb = NULL;
|
|
|
- dev_kfree_skb_irq(skb);
|
|
|
+ /* Now we can safely release resources */
|
|
|
+ macb_tx_unmap(bp, tx_skb);
|
|
|
+
|
|
|
+ /* skb is set only for the last buffer of the frame.
|
|
|
+ * WARNING: at this point skb has been freed by
|
|
|
+ * macb_tx_unmap().
|
|
|
+ */
|
|
|
+ if (skb)
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
bp->tx_tail = tail;
|
|
@@ -718,6 +765,10 @@ static int gem_rx(struct macb *bp, int budget)
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
skb_checksum_none_assert(skb);
|
|
|
+ if (bp->dev->features & NETIF_F_RXCSUM &&
|
|
|
+ !(bp->dev->flags & IFF_PROMISC) &&
|
|
|
+ GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
|
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
|
bp->stats.rx_packets++;
|
|
|
bp->stats.rx_bytes += skb->len;
|
|
@@ -1001,15 +1052,145 @@ static void macb_poll_controller(struct net_device *dev)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
+static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
|
|
|
+ unsigned int len)
|
|
|
+{
|
|
|
+ return (len + bp->max_tx_length - 1) / bp->max_tx_length;
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned int macb_tx_map(struct macb *bp,
|
|
|
+ struct sk_buff *skb)
|
|
|
{
|
|
|
- struct macb *bp = netdev_priv(dev);
|
|
|
dma_addr_t mapping;
|
|
|
- unsigned int len, entry;
|
|
|
+ unsigned int len, entry, i, tx_head = bp->tx_head;
|
|
|
+ struct macb_tx_skb *tx_skb = NULL;
|
|
|
struct macb_dma_desc *desc;
|
|
|
- struct macb_tx_skb *tx_skb;
|
|
|
+ unsigned int offset, size, count = 0;
|
|
|
+ unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
+ unsigned int eof = 1;
|
|
|
u32 ctrl;
|
|
|
+
|
|
|
+ /* First, map non-paged data */
|
|
|
+ len = skb_headlen(skb);
|
|
|
+ offset = 0;
|
|
|
+ while (len) {
|
|
|
+ size = min(len, bp->max_tx_length);
|
|
|
+ entry = macb_tx_ring_wrap(tx_head);
|
|
|
+ tx_skb = &bp->tx_skb[entry];
|
|
|
+
|
|
|
+ mapping = dma_map_single(&bp->pdev->dev,
|
|
|
+ skb->data + offset,
|
|
|
+ size, DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(&bp->pdev->dev, mapping))
|
|
|
+ goto dma_error;
|
|
|
+
|
|
|
+ /* Save info to properly release resources */
|
|
|
+ tx_skb->skb = NULL;
|
|
|
+ tx_skb->mapping = mapping;
|
|
|
+ tx_skb->size = size;
|
|
|
+ tx_skb->mapped_as_page = false;
|
|
|
+
|
|
|
+ len -= size;
|
|
|
+ offset += size;
|
|
|
+ count++;
|
|
|
+ tx_head++;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Then, map paged data from fragments */
|
|
|
+ for (f = 0; f < nr_frags; f++) {
|
|
|
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
|
|
|
+
|
|
|
+ len = skb_frag_size(frag);
|
|
|
+ offset = 0;
|
|
|
+ while (len) {
|
|
|
+ size = min(len, bp->max_tx_length);
|
|
|
+ entry = macb_tx_ring_wrap(tx_head);
|
|
|
+ tx_skb = &bp->tx_skb[entry];
|
|
|
+
|
|
|
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
|
|
|
+ offset, size, DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(&bp->pdev->dev, mapping))
|
|
|
+ goto dma_error;
|
|
|
+
|
|
|
+ /* Save info to properly release resources */
|
|
|
+ tx_skb->skb = NULL;
|
|
|
+ tx_skb->mapping = mapping;
|
|
|
+ tx_skb->size = size;
|
|
|
+ tx_skb->mapped_as_page = true;
|
|
|
+
|
|
|
+ len -= size;
|
|
|
+ offset += size;
|
|
|
+ count++;
|
|
|
+ tx_head++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Should never happen */
|
|
|
+ if (unlikely(tx_skb == NULL)) {
|
|
|
+ netdev_err(bp->dev, "BUG! empty skb!\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* This is the last buffer of the frame: save socket buffer */
|
|
|
+ tx_skb->skb = skb;
|
|
|
+
|
|
|
+ /* Update TX ring: update buffer descriptors in reverse order
|
|
|
+ * to avoid race condition
|
|
|
+ */
|
|
|
+
|
|
|
+ /* Set 'TX_USED' bit in buffer descriptor at tx_head position
|
|
|
+ * to set the end of TX queue
|
|
|
+ */
|
|
|
+ i = tx_head;
|
|
|
+ entry = macb_tx_ring_wrap(i);
|
|
|
+ ctrl = MACB_BIT(TX_USED);
|
|
|
+ desc = &bp->tx_ring[entry];
|
|
|
+ desc->ctrl = ctrl;
|
|
|
+
|
|
|
+ do {
|
|
|
+ i--;
|
|
|
+ entry = macb_tx_ring_wrap(i);
|
|
|
+ tx_skb = &bp->tx_skb[entry];
|
|
|
+ desc = &bp->tx_ring[entry];
|
|
|
+
|
|
|
+ ctrl = (u32)tx_skb->size;
|
|
|
+ if (eof) {
|
|
|
+ ctrl |= MACB_BIT(TX_LAST);
|
|
|
+ eof = 0;
|
|
|
+ }
|
|
|
+ if (unlikely(entry == (TX_RING_SIZE - 1)))
|
|
|
+ ctrl |= MACB_BIT(TX_WRAP);
|
|
|
+
|
|
|
+ /* Set TX buffer descriptor */
|
|
|
+ desc->addr = tx_skb->mapping;
|
|
|
+ /* desc->addr must be visible to hardware before clearing
|
|
|
+ * 'TX_USED' bit in desc->ctrl.
|
|
|
+ */
|
|
|
+ wmb();
|
|
|
+ desc->ctrl = ctrl;
|
|
|
+ } while (i != bp->tx_head);
|
|
|
+
|
|
|
+ bp->tx_head = tx_head;
|
|
|
+
|
|
|
+ return count;
|
|
|
+
|
|
|
+dma_error:
|
|
|
+ netdev_err(bp->dev, "TX DMA map failed\n");
|
|
|
+
|
|
|
+ for (i = bp->tx_head; i != tx_head; i++) {
|
|
|
+ tx_skb = macb_tx_skb(bp, i);
|
|
|
+
|
|
|
+ macb_tx_unmap(bp, tx_skb);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
+{
|
|
|
+ struct macb *bp = netdev_priv(dev);
|
|
|
unsigned long flags;
|
|
|
+ unsigned int count, nr_frags, frag_size, f;
|
|
|
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
|
netdev_vdbg(bp->dev,
|
|
@@ -1020,44 +1201,34 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
skb->data, 16, true);
|
|
|
#endif
|
|
|
|
|
|
- len = skb->len;
|
|
|
+ /* Count how many TX buffer descriptors are needed to send this
|
|
|
+ * socket buffer: skb fragments of jumbo frames may need to be
|
|
|
+ * splitted into many buffer descriptors.
|
|
|
+ */
|
|
|
+ count = macb_count_tx_descriptors(bp, skb_headlen(skb));
|
|
|
+ nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
+ for (f = 0; f < nr_frags; f++) {
|
|
|
+ frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
|
|
|
+ count += macb_count_tx_descriptors(bp, frag_size);
|
|
|
+ }
|
|
|
+
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
|
|
/* This is a hard error, log it. */
|
|
|
- if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
|
|
|
+ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) {
|
|
|
netif_stop_queue(dev);
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
- netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
|
|
|
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
|
|
|
bp->tx_head, bp->tx_tail);
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
|
|
|
- entry = macb_tx_ring_wrap(bp->tx_head);
|
|
|
- netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
|
|
|
- mapping = dma_map_single(&bp->pdev->dev, skb->data,
|
|
|
- len, DMA_TO_DEVICE);
|
|
|
- if (dma_mapping_error(&bp->pdev->dev, mapping)) {
|
|
|
+ /* Map socket buffer for DMA transfer */
|
|
|
+ if (!macb_tx_map(bp, skb)) {
|
|
|
dev_kfree_skb_any(skb);
|
|
|
goto unlock;
|
|
|
}
|
|
|
|
|
|
- bp->tx_head++;
|
|
|
- tx_skb = &bp->tx_skb[entry];
|
|
|
- tx_skb->skb = skb;
|
|
|
- tx_skb->mapping = mapping;
|
|
|
- netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
|
|
|
- skb->data, (unsigned long)mapping);
|
|
|
-
|
|
|
- ctrl = MACB_BF(TX_FRMLEN, len);
|
|
|
- ctrl |= MACB_BIT(TX_LAST);
|
|
|
- if (entry == (TX_RING_SIZE - 1))
|
|
|
- ctrl |= MACB_BIT(TX_WRAP);
|
|
|
-
|
|
|
- desc = &bp->tx_ring[entry];
|
|
|
- desc->addr = mapping;
|
|
|
- desc->ctrl = ctrl;
|
|
|
-
|
|
|
/* Make newly initialized descriptor visible to hardware */
|
|
|
wmb();
|
|
|
|
|
@@ -1342,7 +1513,7 @@ static u32 macb_dbw(struct macb *bp)
|
|
|
/*
|
|
|
* Configure the receive DMA engine
|
|
|
* - use the correct receive buffer size
|
|
|
- * - set the possibility to use INCR16 bursts
|
|
|
+ * - set best burst length for DMA operations
|
|
|
* (if not supported by FIFO, it will fallback to default)
|
|
|
* - set both rx/tx packet buffers to full memory size
|
|
|
* These are configurable parameters for GEM.
|
|
@@ -1354,24 +1525,20 @@ static void macb_configure_dma(struct macb *bp)
|
|
|
if (macb_is_gem(bp)) {
|
|
|
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
|
|
|
dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
|
|
|
- dmacfg |= GEM_BF(FBLDO, 16);
|
|
|
+ if (bp->dma_burst_length)
|
|
|
+ dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
|
|
|
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
|
|
|
dmacfg &= ~GEM_BIT(ENDIA);
|
|
|
+ if (bp->dev->features & NETIF_F_HW_CSUM)
|
|
|
+ dmacfg |= GEM_BIT(TXCOEN);
|
|
|
+ else
|
|
|
+ dmacfg &= ~GEM_BIT(TXCOEN);
|
|
|
+ netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
|
|
|
+ dmacfg);
|
|
|
gem_writel(bp, DMACFG, dmacfg);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Configure peripheral capacities according to integration options used
|
|
|
- */
|
|
|
-static void macb_configure_caps(struct macb *bp)
|
|
|
-{
|
|
|
- if (macb_is_gem(bp)) {
|
|
|
- if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
|
|
|
- bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static void macb_init_hw(struct macb *bp)
|
|
|
{
|
|
|
u32 config;
|
|
@@ -1386,6 +1553,8 @@ static void macb_init_hw(struct macb *bp)
|
|
|
config |= MACB_BIT(BIG); /* Receive oversized frames */
|
|
|
if (bp->dev->flags & IFF_PROMISC)
|
|
|
config |= MACB_BIT(CAF); /* Copy All Frames */
|
|
|
+ else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
|
|
|
+ config |= GEM_BIT(RXCOEN);
|
|
|
if (!(bp->dev->flags & IFF_BROADCAST))
|
|
|
config |= MACB_BIT(NBC); /* No BroadCast */
|
|
|
config |= macb_dbw(bp);
|
|
@@ -1394,7 +1563,6 @@ static void macb_init_hw(struct macb *bp)
|
|
|
bp->duplex = DUPLEX_HALF;
|
|
|
|
|
|
macb_configure_dma(bp);
|
|
|
- macb_configure_caps(bp);
|
|
|
|
|
|
/* Initialize TX and RX buffers */
|
|
|
macb_writel(bp, RBQP, bp->rx_ring_dma);
|
|
@@ -1500,13 +1668,22 @@ void macb_set_rx_mode(struct net_device *dev)
|
|
|
|
|
|
cfg = macb_readl(bp, NCFGR);
|
|
|
|
|
|
- if (dev->flags & IFF_PROMISC)
|
|
|
+ if (dev->flags & IFF_PROMISC) {
|
|
|
/* Enable promiscuous mode */
|
|
|
cfg |= MACB_BIT(CAF);
|
|
|
- else if (dev->flags & (~IFF_PROMISC))
|
|
|
- /* Disable promiscuous mode */
|
|
|
+
|
|
|
+ /* Disable RX checksum offload */
|
|
|
+ if (macb_is_gem(bp))
|
|
|
+ cfg &= ~GEM_BIT(RXCOEN);
|
|
|
+ } else {
|
|
|
+ /* Disable promiscuous mode */
|
|
|
cfg &= ~MACB_BIT(CAF);
|
|
|
|
|
|
+ /* Enable RX checksum offload only if requested */
|
|
|
+ if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
|
|
|
+ cfg |= GEM_BIT(RXCOEN);
|
|
|
+ }
|
|
|
+
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
/* Enable all multicast mode */
|
|
|
macb_or_gem_writel(bp, HRB, -1);
|
|
@@ -1767,6 +1944,40 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(macb_ioctl);
|
|
|
|
|
|
+static int macb_set_features(struct net_device *netdev,
|
|
|
+ netdev_features_t features)
|
|
|
+{
|
|
|
+ struct macb *bp = netdev_priv(netdev);
|
|
|
+ netdev_features_t changed = features ^ netdev->features;
|
|
|
+
|
|
|
+ /* TX checksum offload */
|
|
|
+ if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
|
|
|
+ u32 dmacfg;
|
|
|
+
|
|
|
+ dmacfg = gem_readl(bp, DMACFG);
|
|
|
+ if (features & NETIF_F_HW_CSUM)
|
|
|
+ dmacfg |= GEM_BIT(TXCOEN);
|
|
|
+ else
|
|
|
+ dmacfg &= ~GEM_BIT(TXCOEN);
|
|
|
+ gem_writel(bp, DMACFG, dmacfg);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* RX checksum offload */
|
|
|
+ if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
|
|
|
+ u32 netcfg;
|
|
|
+
|
|
|
+ netcfg = gem_readl(bp, NCFGR);
|
|
|
+ if (features & NETIF_F_RXCSUM &&
|
|
|
+ !(netdev->flags & IFF_PROMISC))
|
|
|
+ netcfg |= GEM_BIT(RXCOEN);
|
|
|
+ else
|
|
|
+ netcfg &= ~GEM_BIT(RXCOEN);
|
|
|
+ gem_writel(bp, NCFGR, netcfg);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static const struct net_device_ops macb_netdev_ops = {
|
|
|
.ndo_open = macb_open,
|
|
|
.ndo_stop = macb_close,
|
|
@@ -1780,20 +1991,77 @@ static const struct net_device_ops macb_netdev_ops = {
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
.ndo_poll_controller = macb_poll_controller,
|
|
|
#endif
|
|
|
+ .ndo_set_features = macb_set_features,
|
|
|
};
|
|
|
|
|
|
#if defined(CONFIG_OF)
|
|
|
+static struct macb_config pc302gem_config = {
|
|
|
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
|
|
|
+ .dma_burst_length = 16,
|
|
|
+};
|
|
|
+
|
|
|
+static struct macb_config sama5d3_config = {
|
|
|
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
|
|
|
+ .dma_burst_length = 16,
|
|
|
+};
|
|
|
+
|
|
|
+static struct macb_config sama5d4_config = {
|
|
|
+ .caps = 0,
|
|
|
+ .dma_burst_length = 4,
|
|
|
+};
|
|
|
+
|
|
|
static const struct of_device_id macb_dt_ids[] = {
|
|
|
{ .compatible = "cdns,at32ap7000-macb" },
|
|
|
{ .compatible = "cdns,at91sam9260-macb" },
|
|
|
{ .compatible = "cdns,macb" },
|
|
|
- { .compatible = "cdns,pc302-gem" },
|
|
|
- { .compatible = "cdns,gem" },
|
|
|
+ { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
|
|
|
+ { .compatible = "cdns,gem", .data = &pc302gem_config },
|
|
|
+ { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
|
|
|
+ { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
|
|
|
{ /* sentinel */ }
|
|
|
};
|
|
|
MODULE_DEVICE_TABLE(of, macb_dt_ids);
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * Configure peripheral capacities according to device tree
|
|
|
+ * and integration options used
|
|
|
+ */
|
|
|
+static void macb_configure_caps(struct macb *bp)
|
|
|
+{
|
|
|
+ u32 dcfg;
|
|
|
+ const struct of_device_id *match;
|
|
|
+ const struct macb_config *config;
|
|
|
+
|
|
|
+ if (bp->pdev->dev.of_node) {
|
|
|
+ match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
|
|
|
+ if (match && match->data) {
|
|
|
+ config = (const struct macb_config *)match->data;
|
|
|
+
|
|
|
+ bp->caps = config->caps;
|
|
|
+ /*
|
|
|
+ * As we have access to the matching node, configure
|
|
|
+ * DMA burst length as well
|
|
|
+ */
|
|
|
+ bp->dma_burst_length = config->dma_burst_length;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
|
|
|
+ bp->caps |= MACB_CAPS_MACB_IS_GEM;
|
|
|
+
|
|
|
+ if (macb_is_gem(bp)) {
|
|
|
+ dcfg = gem_readl(bp, DCFG1);
|
|
|
+ if (GEM_BFEXT(IRQCOR, dcfg) == 0)
|
|
|
+ bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
|
|
|
+ dcfg = gem_readl(bp, DCFG2);
|
|
|
+ if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
|
|
|
+ bp->caps |= MACB_CAPS_FIFO_MODE;
|
|
|
+ }
|
|
|
+
|
|
|
+ netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
|
|
|
+}
|
|
|
+
|
|
|
static int __init macb_probe(struct platform_device *pdev)
|
|
|
{
|
|
|
struct macb_platform_data *pdata;
|
|
@@ -1828,9 +2096,6 @@ static int __init macb_probe(struct platform_device *pdev)
|
|
|
|
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
|
|
|
- /* TODO: Actually, we have some interesting features... */
|
|
|
- dev->features |= 0;
|
|
|
-
|
|
|
bp = netdev_priv(dev);
|
|
|
bp->pdev = pdev;
|
|
|
bp->dev = dev;
|
|
@@ -1897,19 +2162,33 @@ static int __init macb_probe(struct platform_device *pdev)
|
|
|
|
|
|
dev->base_addr = regs->start;
|
|
|
|
|
|
+ /* setup capacities */
|
|
|
+ macb_configure_caps(bp);
|
|
|
+
|
|
|
/* setup appropriated routines according to adapter type */
|
|
|
if (macb_is_gem(bp)) {
|
|
|
+ bp->max_tx_length = GEM_MAX_TX_LEN;
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
|
|
|
bp->macbgem_ops.mog_init_rings = gem_init_rings;
|
|
|
bp->macbgem_ops.mog_rx = gem_rx;
|
|
|
} else {
|
|
|
+ bp->max_tx_length = MACB_MAX_TX_LEN;
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
|
|
|
bp->macbgem_ops.mog_init_rings = macb_init_rings;
|
|
|
bp->macbgem_ops.mog_rx = macb_rx;
|
|
|
}
|
|
|
|
|
|
+ /* Set features */
|
|
|
+ dev->hw_features = NETIF_F_SG;
|
|
|
+ /* Checksum offload is only available on gem with packet buffer */
|
|
|
+ if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
|
|
|
+ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
|
|
+ if (bp->caps & MACB_CAPS_SG_DISABLED)
|
|
|
+ dev->hw_features &= ~NETIF_F_SG;
|
|
|
+ dev->features = dev->hw_features;
|
|
|
+
|
|
|
/* Set MII management clock divider */
|
|
|
config = macb_mdc_clk_div(bp);
|
|
|
config |= macb_dbw(bp);
|