|
@@ -116,9 +116,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
|
|
static void gfar_reset_task(struct work_struct *work);
|
|
|
static void gfar_timeout(struct net_device *dev);
|
|
|
static int gfar_close(struct net_device *dev);
|
|
|
-struct sk_buff *gfar_new_skb(struct net_device *dev);
|
|
|
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
|
|
|
- struct sk_buff *skb);
|
|
|
+struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr);
|
|
|
static int gfar_set_mac_address(struct net_device *dev);
|
|
|
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
|
|
|
static irqreturn_t gfar_error(int irq, void *dev_id);
|
|
@@ -180,6 +178,7 @@ static int gfar_init_bds(struct net_device *ndev)
|
|
|
struct rxbd8 *rxbdp;
|
|
|
u32 *rfbptr;
|
|
|
int i, j;
|
|
|
+ dma_addr_t bufaddr;
|
|
|
|
|
|
for (i = 0; i < priv->num_tx_queues; i++) {
|
|
|
tx_queue = priv->tx_queue[i];
|
|
@@ -214,19 +213,17 @@ static int gfar_init_bds(struct net_device *ndev)
|
|
|
struct sk_buff *skb = rx_queue->rx_skbuff[j];
|
|
|
|
|
|
if (skb) {
|
|
|
- gfar_init_rxbdp(rx_queue, rxbdp,
|
|
|
- rxbdp->bufPtr);
|
|
|
+ bufaddr = rxbdp->bufPtr;
|
|
|
} else {
|
|
|
- skb = gfar_new_skb(ndev);
|
|
|
+ skb = gfar_new_skb(ndev, &bufaddr);
|
|
|
if (!skb) {
|
|
|
netdev_err(ndev, "Can't allocate RX buffers\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
rx_queue->rx_skbuff[j] = skb;
|
|
|
-
|
|
|
- gfar_new_rxbdp(rx_queue, rxbdp, skb);
|
|
|
}
|
|
|
|
|
|
+ gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
|
|
|
rxbdp++;
|
|
|
}
|
|
|
|
|
@@ -2319,6 +2316,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
0,
|
|
|
frag_len,
|
|
|
DMA_TO_DEVICE);
|
|
|
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
|
|
|
+ goto dma_map_err;
|
|
|
|
|
|
/* set the TxBD length and buffer pointer */
|
|
|
txbdp->bufPtr = bufaddr;
|
|
@@ -2368,8 +2367,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
fcb->ptp = 1;
|
|
|
}
|
|
|
|
|
|
- txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
|
|
|
- skb_headlen(skb), DMA_TO_DEVICE);
|
|
|
+ bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
|
|
|
+ goto dma_map_err;
|
|
|
+
|
|
|
+ txbdp_start->bufPtr = bufaddr;
|
|
|
|
|
|
/* If time stamping is requested one additional TxBD must be set up. The
|
|
|
* first TxBD points to the FCB and must have a data length of
|
|
@@ -2435,6 +2438,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
+
|
|
|
+dma_map_err:
|
|
|
+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
|
|
|
+ if (do_tstamp)
|
|
|
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
|
|
|
+ for (i = 0; i < nr_frags; i++) {
|
|
|
+ lstatus = txbdp->lstatus;
|
|
|
+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
|
|
|
+ break;
|
|
|
+
|
|
|
+ txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
|
|
|
+ bufaddr = txbdp->bufPtr;
|
|
|
+ dma_unmap_page(priv->dev, bufaddr, txbdp->length,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
|
|
|
+ }
|
|
|
+ gfar_wmb();
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
+ return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
/* Stops the kernel queue, and halts the controller */
|
|
@@ -2635,18 +2657,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
|
|
|
netdev_tx_completed_queue(txq, howmany, bytes_sent);
|
|
|
}
|
|
|
|
|
|
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
|
|
|
- struct sk_buff *skb)
|
|
|
-{
|
|
|
- struct net_device *dev = rx_queue->dev;
|
|
|
- struct gfar_private *priv = netdev_priv(dev);
|
|
|
- dma_addr_t buf;
|
|
|
-
|
|
|
- buf = dma_map_single(priv->dev, skb->data,
|
|
|
- priv->rx_buffer_size, DMA_FROM_DEVICE);
|
|
|
- gfar_init_rxbdp(rx_queue, bdp, buf);
|
|
|
-}
|
|
|
-
|
|
|
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
|
|
|
{
|
|
|
struct gfar_private *priv = netdev_priv(dev);
|
|
@@ -2661,9 +2671,25 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
|
|
|
return skb;
|
|
|
}
|
|
|
|
|
|
-struct sk_buff *gfar_new_skb(struct net_device *dev)
|
|
|
+struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
|
|
|
{
|
|
|
- return gfar_alloc_skb(dev);
|
|
|
+ struct gfar_private *priv = netdev_priv(dev);
|
|
|
+ struct sk_buff *skb;
|
|
|
+ dma_addr_t addr;
|
|
|
+
|
|
|
+ skb = gfar_alloc_skb(dev);
|
|
|
+ if (!skb)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ addr = dma_map_single(priv->dev, skb->data,
|
|
|
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
|
|
|
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ *bufaddr = addr;
|
|
|
+ return skb;
|
|
|
}
|
|
|
|
|
|
static inline void count_errors(unsigned short status, struct net_device *dev)
|
|
@@ -2834,11 +2860,12 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
|
|
|
|
|
|
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
|
|
|
struct sk_buff *newskb;
|
|
|
+ dma_addr_t bufaddr;
|
|
|
|
|
|
rmb();
|
|
|
|
|
|
/* Add another skb for the future */
|
|
|
- newskb = gfar_new_skb(dev);
|
|
|
+ newskb = gfar_new_skb(dev, &bufaddr);
|
|
|
|
|
|
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
|
|
|
|
|
@@ -2854,9 +2881,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
|
|
|
bdp->status & RXBD_ERR)) {
|
|
|
count_errors(bdp->status, dev);
|
|
|
|
|
|
- if (unlikely(!newskb))
|
|
|
+ if (unlikely(!newskb)) {
|
|
|
newskb = skb;
|
|
|
- else if (skb)
|
|
|
+ bufaddr = bdp->bufPtr;
|
|
|
+ } else if (skb)
|
|
|
dev_kfree_skb(skb);
|
|
|
} else {
|
|
|
/* Increment the number of packets */
|
|
@@ -2883,7 +2911,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
|
|
|
rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
|
|
|
|
|
|
/* Setup the new bdp */
|
|
|
- gfar_new_rxbdp(rx_queue, bdp, newskb);
|
|
|
+ gfar_init_rxbdp(rx_queue, bdp, bufaddr);
|
|
|
|
|
|
/* Update Last Free RxBD pointer for LFC */
|
|
|
if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
|