|
@@ -28,6 +28,16 @@
|
|
|
|
|
|
#include "moxart_ether.h"
|
|
|
|
|
|
+static inline void moxart_desc_write(u32 data, u32 *desc)
|
|
|
+{
|
|
|
+ *desc = cpu_to_le32(data);
|
|
|
+}
|
|
|
+
|
|
|
+static inline u32 moxart_desc_read(u32 *desc)
|
|
|
+{
|
|
|
+ return le32_to_cpu(*desc);
|
|
|
+}
|
|
|
+
|
|
|
static inline void moxart_emac_write(struct net_device *ndev,
|
|
|
unsigned int reg, unsigned long value)
|
|
|
{
|
|
@@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev)
|
|
|
static void moxart_mac_setup_desc_ring(struct net_device *ndev)
|
|
|
{
|
|
|
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
|
|
|
- void __iomem *desc;
|
|
|
+ void *desc;
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < TX_DESC_NUM; i++) {
|
|
@@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
|
|
|
|
|
|
priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
|
|
|
}
|
|
|
- writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
|
|
|
+ moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
|
|
|
|
|
|
priv->tx_head = 0;
|
|
|
priv->tx_tail = 0;
|
|
@@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
|
|
|
for (i = 0; i < RX_DESC_NUM; i++) {
|
|
|
desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
|
|
|
memset(desc, 0, RX_REG_DESC_SIZE);
|
|
|
- writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
|
|
|
- writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
|
|
|
+ moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
|
|
|
+ moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
|
|
|
desc + RX_REG_OFFSET_DESC1);
|
|
|
|
|
|
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
|
|
@@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
|
|
|
if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
|
|
|
netdev_err(ndev, "DMA mapping error\n");
|
|
|
|
|
|
- writel(priv->rx_mapping[i],
|
|
|
+ moxart_desc_write(priv->rx_mapping[i],
|
|
|
desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
|
|
|
- writel(priv->rx_buf[i],
|
|
|
+ moxart_desc_write((uintptr_t)priv->rx_buf[i],
|
|
|
desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
|
|
|
}
|
|
|
- writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
|
|
|
+ moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
|
|
|
|
|
|
priv->rx_head = 0;
|
|
|
|
|
@@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
|
|
|
napi);
|
|
|
struct net_device *ndev = priv->ndev;
|
|
|
struct sk_buff *skb;
|
|
|
- void __iomem *desc;
|
|
|
+ void *desc;
|
|
|
unsigned int desc0, len;
|
|
|
int rx_head = priv->rx_head;
|
|
|
int rx = 0;
|
|
|
|
|
|
while (rx < budget) {
|
|
|
desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
|
|
|
- desc0 = readl(desc + RX_REG_OFFSET_DESC0);
|
|
|
+ desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
|
|
|
+ rmb(); /* ensure desc0 is up to date */
|
|
|
|
|
|
if (desc0 & RX_DESC0_DMA_OWN)
|
|
|
break;
|
|
@@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
|
|
|
priv->stats.multicast++;
|
|
|
|
|
|
rx_next:
|
|
|
- writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
|
|
|
+ wmb(); /* prevent setting ownership back too early */
|
|
|
+ moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
|
|
|
|
|
|
rx_head = RX_NEXT(rx_head);
|
|
|
priv->rx_head = rx_head;
|
|
@@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
|
|
|
static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
|
{
|
|
|
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
|
|
|
- void __iomem *desc;
|
|
|
+ void *desc;
|
|
|
unsigned int len;
|
|
|
unsigned int tx_head = priv->tx_head;
|
|
|
u32 txdes1;
|
|
@@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
|
desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
|
|
|
|
|
|
spin_lock_irq(&priv->txlock);
|
|
|
- if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
|
|
|
+ if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
|
|
|
net_dbg_ratelimited("no TX space for packet\n");
|
|
|
priv->stats.tx_dropped++;
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
+ rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
|
|
|
|
|
|
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
|
|
|
|
|
@@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
|
priv->tx_len[tx_head] = len;
|
|
|
priv->tx_skb[tx_head] = skb;
|
|
|
|
|
|
- writel(priv->tx_mapping[tx_head],
|
|
|
+ moxart_desc_write(priv->tx_mapping[tx_head],
|
|
|
desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
|
|
|
- writel(skb->data,
|
|
|
+ moxart_desc_write((uintptr_t)skb->data,
|
|
|
desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
|
|
|
|
|
|
if (skb->len < ETH_ZLEN) {
|
|
@@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
|
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
|
|
|
if (tx_head == TX_DESC_NUM_MASK)
|
|
|
txdes1 |= TX_DESC1_END;
|
|
|
- writel(txdes1, desc + TX_REG_OFFSET_DESC1);
|
|
|
- writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
|
|
|
+ moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
|
|
|
+ wmb(); /* flush descriptor before transferring ownership */
|
|
|
+ moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
|
|
|
|
|
|
/* start to send packet */
|
|
|
writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
|