|
@@ -101,16 +101,56 @@
|
|
|
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
|
|
|
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
|
|
|
#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
|
|
|
+
|
|
|
+/* Exception Interrupt Port/Queue Cause register */
|
|
|
+
|
|
|
#define MVNETA_INTR_NEW_CAUSE 0x25a0
|
|
|
-#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
|
|
|
#define MVNETA_INTR_NEW_MASK 0x25a4
|
|
|
+
|
|
|
+/* bits 0..7 = TXQ SENT, one bit per queue.
|
|
|
+ * bits 8..15 = RXQ OCCUP, one bit per queue.
|
|
|
+ * bits 16..23 = RXQ FREE, one bit per queue.
|
|
|
+ * bit 29 = OLD_REG_SUM, see old reg ?
|
|
|
+ * bit 30 = TX_ERR_SUM, one bit for 4 ports
|
|
|
+ * bit 31 = MISC_SUM, one bit for 4 ports
|
|
|
+ */
|
|
|
+#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
|
|
|
+#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
|
|
|
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
|
|
|
+#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
|
|
|
+
|
|
|
#define MVNETA_INTR_OLD_CAUSE 0x25a8
|
|
|
#define MVNETA_INTR_OLD_MASK 0x25ac
|
|
|
+
|
|
|
+/* Data Path Port/Queue Cause Register */
|
|
|
#define MVNETA_INTR_MISC_CAUSE 0x25b0
|
|
|
#define MVNETA_INTR_MISC_MASK 0x25b4
|
|
|
+
|
|
|
+#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
|
|
|
+#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
|
|
|
+#define MVNETA_CAUSE_PTP BIT(4)
|
|
|
+
|
|
|
+#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
|
|
|
+#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
|
|
|
+#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
|
|
|
+#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
|
|
|
+#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
|
|
|
+#define MVNETA_CAUSE_PRBS_ERR BIT(12)
|
|
|
+#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
|
|
|
+#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
|
|
|
+
|
|
|
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
|
|
|
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
|
|
|
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
|
|
|
+
|
|
|
+#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
|
|
|
+#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
|
|
|
+#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
|
|
|
+
|
|
|
#define MVNETA_INTR_ENABLE 0x25b8
|
|
|
#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
|
|
|
-#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
|
|
|
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
|
|
|
+
|
|
|
#define MVNETA_RXQ_CMD 0x2680
|
|
|
#define MVNETA_RXQ_DISABLE_SHIFT 8
|
|
|
#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
|
|
@@ -176,9 +216,6 @@
|
|
|
#define MVNETA_RX_COAL_PKTS 32
|
|
|
#define MVNETA_RX_COAL_USEC 100
|
|
|
|
|
|
-/* Timer */
|
|
|
-#define MVNETA_TX_DONE_TIMER_PERIOD 10
|
|
|
-
|
|
|
/* Napi polling weight */
|
|
|
#define MVNETA_RX_POLL_WEIGHT 64
|
|
|
|
|
@@ -221,27 +258,25 @@
|
|
|
|
|
|
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
|
|
|
|
|
|
-struct mvneta_stats {
|
|
|
+struct mvneta_pcpu_stats {
|
|
|
struct u64_stats_sync syncp;
|
|
|
- u64 packets;
|
|
|
- u64 bytes;
|
|
|
+ u64 rx_packets;
|
|
|
+ u64 rx_bytes;
|
|
|
+ u64 tx_packets;
|
|
|
+ u64 tx_bytes;
|
|
|
};
|
|
|
|
|
|
struct mvneta_port {
|
|
|
int pkt_size;
|
|
|
+ unsigned int frag_size;
|
|
|
void __iomem *base;
|
|
|
struct mvneta_rx_queue *rxqs;
|
|
|
struct mvneta_tx_queue *txqs;
|
|
|
- struct timer_list tx_done_timer;
|
|
|
struct net_device *dev;
|
|
|
|
|
|
u32 cause_rx_tx;
|
|
|
struct napi_struct napi;
|
|
|
|
|
|
- /* Flags */
|
|
|
- unsigned long flags;
|
|
|
-#define MVNETA_F_TX_DONE_TIMER_BIT 0
|
|
|
-
|
|
|
/* Napi weight */
|
|
|
int weight;
|
|
|
|
|
@@ -250,8 +285,7 @@ struct mvneta_port {
|
|
|
u8 mcast_count[256];
|
|
|
u16 tx_ring_size;
|
|
|
u16 rx_ring_size;
|
|
|
- struct mvneta_stats tx_stats;
|
|
|
- struct mvneta_stats rx_stats;
|
|
|
+ struct mvneta_pcpu_stats *stats;
|
|
|
|
|
|
struct mii_bus *mii_bus;
|
|
|
struct phy_device *phy_dev;
|
|
@@ -410,6 +444,8 @@ static int txq_number = 8;
|
|
|
|
|
|
static int rxq_def;
|
|
|
|
|
|
+static int rx_copybreak __read_mostly = 256;
|
|
|
+
|
|
|
#define MVNETA_DRIVER_NAME "mvneta"
|
|
|
#define MVNETA_DRIVER_VERSION "1.0"
|
|
|
|
|
@@ -461,21 +497,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
|
|
|
{
|
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
|
unsigned int start;
|
|
|
+ int cpu;
|
|
|
|
|
|
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
|
|
|
-
|
|
|
- do {
|
|
|
- start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
|
|
|
- stats->rx_packets = pp->rx_stats.packets;
|
|
|
- stats->rx_bytes = pp->rx_stats.bytes;
|
|
|
- } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ struct mvneta_pcpu_stats *cpu_stats;
|
|
|
+ u64 rx_packets;
|
|
|
+ u64 rx_bytes;
|
|
|
+ u64 tx_packets;
|
|
|
+ u64 tx_bytes;
|
|
|
|
|
|
+ cpu_stats = per_cpu_ptr(pp->stats, cpu);
|
|
|
+ do {
|
|
|
+ start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
|
|
|
+ rx_packets = cpu_stats->rx_packets;
|
|
|
+ rx_bytes = cpu_stats->rx_bytes;
|
|
|
+ tx_packets = cpu_stats->tx_packets;
|
|
|
+ tx_bytes = cpu_stats->tx_bytes;
|
|
|
+ } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
|
|
|
|
|
|
- do {
|
|
|
- start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
|
|
|
- stats->tx_packets = pp->tx_stats.packets;
|
|
|
- stats->tx_bytes = pp->tx_stats.bytes;
|
|
|
- } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
|
|
|
+ stats->rx_packets += rx_packets;
|
|
|
+ stats->rx_bytes += rx_bytes;
|
|
|
+ stats->tx_packets += tx_packets;
|
|
|
+ stats->tx_bytes += tx_bytes;
|
|
|
+ }
|
|
|
|
|
|
stats->rx_errors = dev->stats.rx_errors;
|
|
|
stats->rx_dropped = dev->stats.rx_dropped;
|
|
@@ -487,14 +531,14 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
|
|
|
|
|
|
/* Rx descriptors helper methods */
|
|
|
|
|
|
-/* Checks whether the given RX descriptor is both the first and the
|
|
|
- * last descriptor for the RX packet. Each RX packet is currently
|
|
|
+/* Checks whether the RX descriptor having this status is both the first
|
|
|
+ * and the last descriptor for the RX packet. Each RX packet is currently
|
|
|
* received through a single RX descriptor, so not having each RX
|
|
|
* descriptor with its first and last bits set is an error
|
|
|
*/
|
|
|
-static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
|
|
|
+static int mvneta_rxq_desc_is_first_last(u32 status)
|
|
|
{
|
|
|
- return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
|
|
|
+ return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
|
|
|
MVNETA_RXD_FIRST_LAST_DESC;
|
|
|
}
|
|
|
|
|
@@ -570,6 +614,7 @@ mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
|
|
|
int rx_desc = rxq->next_desc_to_proc;
|
|
|
|
|
|
rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
|
|
|
+ prefetch(rxq->descs + rxq->next_desc_to_proc);
|
|
|
return rxq->descs + rx_desc;
|
|
|
}
|
|
|
|
|
@@ -1100,17 +1145,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
|
|
|
txq->done_pkts_coal = value;
|
|
|
}
|
|
|
|
|
|
-/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
|
|
|
-static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
|
|
|
-{
|
|
|
- if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
|
|
|
- pp->tx_done_timer.expires = jiffies +
|
|
|
- msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
|
|
|
- add_timer(&pp->tx_done_timer);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
|
|
|
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
|
|
|
u32 phys_addr, u32 cookie)
|
|
@@ -1204,10 +1238,10 @@ static void mvneta_rx_error(struct mvneta_port *pp,
|
|
|
{
|
|
|
u32 status = rx_desc->status;
|
|
|
|
|
|
- if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
|
|
|
+ if (!mvneta_rxq_desc_is_first_last(status)) {
|
|
|
netdev_err(pp->dev,
|
|
|
"bad rx status %08x (buffer oversize), size=%d\n",
|
|
|
- rx_desc->status, rx_desc->data_size);
|
|
|
+ status, rx_desc->data_size);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -1231,13 +1265,12 @@ static void mvneta_rx_error(struct mvneta_port *pp,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* Handle RX checksum offload */
|
|
|
-static void mvneta_rx_csum(struct mvneta_port *pp,
|
|
|
- struct mvneta_rx_desc *rx_desc,
|
|
|
+/* Handle RX checksum offload based on the descriptor's status */
|
|
|
+static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
|
|
|
struct sk_buff *skb)
|
|
|
{
|
|
|
- if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
|
|
|
- (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
|
|
|
+ if ((status & MVNETA_RXD_L3_IP4) &&
|
|
|
+ (status & MVNETA_RXD_L4_CSUM_OK)) {
|
|
|
skb->csum = 0;
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
return;
|
|
@@ -1246,13 +1279,16 @@ static void mvneta_rx_csum(struct mvneta_port *pp,
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
}
|
|
|
|
|
|
-/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
|
|
|
+/* Return tx queue pointer (find last set bit) according to <cause> returned
|
|
|
+ * form tx_done reg. <cause> must not be null. The return value is always a
|
|
|
+ * valid queue for matching the first one found in <cause>.
|
|
|
+ */
|
|
|
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
|
|
|
u32 cause)
|
|
|
{
|
|
|
int queue = fls(cause) - 1;
|
|
|
|
|
|
- return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
|
|
|
+ return &pp->txqs[queue];
|
|
|
}
|
|
|
|
|
|
/* Free tx queue skbuffs */
|
|
@@ -1278,15 +1314,16 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
|
|
|
}
|
|
|
|
|
|
/* Handle end of transmission */
|
|
|
-static int mvneta_txq_done(struct mvneta_port *pp,
|
|
|
+static void mvneta_txq_done(struct mvneta_port *pp,
|
|
|
struct mvneta_tx_queue *txq)
|
|
|
{
|
|
|
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
|
|
|
int tx_done;
|
|
|
|
|
|
tx_done = mvneta_txq_sent_desc_proc(pp, txq);
|
|
|
- if (tx_done == 0)
|
|
|
- return tx_done;
|
|
|
+ if (!tx_done)
|
|
|
+ return;
|
|
|
+
|
|
|
mvneta_txq_bufs_free(pp, txq, tx_done);
|
|
|
|
|
|
txq->count -= tx_done;
|
|
@@ -1295,8 +1332,22 @@ static int mvneta_txq_done(struct mvneta_port *pp,
|
|
|
if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
|
|
|
netif_tx_wake_queue(nq);
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- return tx_done;
|
|
|
+static void *mvneta_frag_alloc(const struct mvneta_port *pp)
|
|
|
+{
|
|
|
+ if (likely(pp->frag_size <= PAGE_SIZE))
|
|
|
+ return netdev_alloc_frag(pp->frag_size);
|
|
|
+ else
|
|
|
+ return kmalloc(pp->frag_size, GFP_ATOMIC);
|
|
|
+}
|
|
|
+
|
|
|
+static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
|
|
|
+{
|
|
|
+ if (likely(pp->frag_size <= PAGE_SIZE))
|
|
|
+ put_page(virt_to_head_page(data));
|
|
|
+ else
|
|
|
+ kfree(data);
|
|
|
}
|
|
|
|
|
|
/* Refill processing */
|
|
@@ -1305,22 +1356,21 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
|
|
|
|
|
|
{
|
|
|
dma_addr_t phys_addr;
|
|
|
- struct sk_buff *skb;
|
|
|
+ void *data;
|
|
|
|
|
|
- skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
|
|
|
- if (!skb)
|
|
|
+ data = mvneta_frag_alloc(pp);
|
|
|
+ if (!data)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
|
|
|
+ phys_addr = dma_map_single(pp->dev->dev.parent, data,
|
|
|
MVNETA_RX_BUF_SIZE(pp->pkt_size),
|
|
|
DMA_FROM_DEVICE);
|
|
|
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
|
|
|
- dev_kfree_skb(skb);
|
|
|
+ mvneta_frag_free(pp, data);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
|
|
|
-
|
|
|
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1374,9 +1424,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
|
|
|
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
|
|
|
for (i = 0; i < rxq->size; i++) {
|
|
|
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
|
|
|
- struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
|
|
|
+ void *data = (void *)rx_desc->buf_cookie;
|
|
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
+ mvneta_frag_free(pp, data);
|
|
|
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
|
|
|
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
|
|
|
}
|
|
@@ -1391,6 +1441,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
|
|
|
{
|
|
|
struct net_device *dev = pp->dev;
|
|
|
int rx_done, rx_filled;
|
|
|
+ u32 rcvd_pkts = 0;
|
|
|
+ u32 rcvd_bytes = 0;
|
|
|
|
|
|
/* Get number of received packets */
|
|
|
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
|
|
@@ -1405,53 +1457,89 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
|
|
|
while (rx_done < rx_todo) {
|
|
|
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
|
|
|
struct sk_buff *skb;
|
|
|
+ unsigned char *data;
|
|
|
u32 rx_status;
|
|
|
int rx_bytes, err;
|
|
|
|
|
|
- prefetch(rx_desc);
|
|
|
rx_done++;
|
|
|
rx_filled++;
|
|
|
rx_status = rx_desc->status;
|
|
|
- skb = (struct sk_buff *)rx_desc->buf_cookie;
|
|
|
+ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
|
|
|
+ data = (unsigned char *)rx_desc->buf_cookie;
|
|
|
|
|
|
- if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
|
|
|
+ if (!mvneta_rxq_desc_is_first_last(rx_status) ||
|
|
|
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
|
|
|
+ err_drop_frame:
|
|
|
dev->stats.rx_errors++;
|
|
|
mvneta_rx_error(pp, rx_desc);
|
|
|
- mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
|
|
|
- (u32)skb);
|
|
|
+ /* leave the descriptor untouched */
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
|
|
|
+ if (rx_bytes <= rx_copybreak) {
|
|
|
+ /* better copy a small frame and not unmap the DMA region */
|
|
|
+ skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
|
|
|
+ if (unlikely(!skb))
|
|
|
+ goto err_drop_frame;
|
|
|
+
|
|
|
+ dma_sync_single_range_for_cpu(dev->dev.parent,
|
|
|
+ rx_desc->buf_phys_addr,
|
|
|
+ MVNETA_MH_SIZE + NET_SKB_PAD,
|
|
|
+ rx_bytes,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ memcpy(skb_put(skb, rx_bytes),
|
|
|
+ data + MVNETA_MH_SIZE + NET_SKB_PAD,
|
|
|
+ rx_bytes);
|
|
|
+
|
|
|
+ skb->protocol = eth_type_trans(skb, dev);
|
|
|
+ mvneta_rx_csum(pp, rx_status, skb);
|
|
|
+ napi_gro_receive(&pp->napi, skb);
|
|
|
+
|
|
|
+ rcvd_pkts++;
|
|
|
+ rcvd_bytes += rx_bytes;
|
|
|
+
|
|
|
+ /* leave the descriptor and buffer untouched */
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
|
|
|
+ if (!skb)
|
|
|
+ goto err_drop_frame;
|
|
|
+
|
|
|
+ dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
|
|
|
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
|
|
|
|
|
|
- rx_bytes = rx_desc->data_size -
|
|
|
- (ETH_FCS_LEN + MVNETA_MH_SIZE);
|
|
|
- u64_stats_update_begin(&pp->rx_stats.syncp);
|
|
|
- pp->rx_stats.packets++;
|
|
|
- pp->rx_stats.bytes += rx_bytes;
|
|
|
- u64_stats_update_end(&pp->rx_stats.syncp);
|
|
|
+ rcvd_pkts++;
|
|
|
+ rcvd_bytes += rx_bytes;
|
|
|
|
|
|
/* Linux processing */
|
|
|
- skb_reserve(skb, MVNETA_MH_SIZE);
|
|
|
+ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
|
|
|
skb_put(skb, rx_bytes);
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
|
|
|
|
- mvneta_rx_csum(pp, rx_desc, skb);
|
|
|
+ mvneta_rx_csum(pp, rx_status, skb);
|
|
|
|
|
|
napi_gro_receive(&pp->napi, skb);
|
|
|
|
|
|
/* Refill processing */
|
|
|
err = mvneta_rx_refill(pp, rx_desc);
|
|
|
if (err) {
|
|
|
- netdev_err(pp->dev, "Linux processing - Can't refill\n");
|
|
|
+ netdev_err(dev, "Linux processing - Can't refill\n");
|
|
|
rxq->missed++;
|
|
|
rx_filled--;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (rcvd_pkts) {
|
|
|
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
|
|
+
|
|
|
+ u64_stats_update_begin(&stats->syncp);
|
|
|
+ stats->rx_packets += rcvd_pkts;
|
|
|
+ stats->rx_bytes += rcvd_bytes;
|
|
|
+ u64_stats_update_end(&stats->syncp);
|
|
|
+ }
|
|
|
+
|
|
|
/* Update rxq management counters */
|
|
|
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
|
|
|
|
|
@@ -1582,25 +1670,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
out:
|
|
|
if (frags > 0) {
|
|
|
- u64_stats_update_begin(&pp->tx_stats.syncp);
|
|
|
- pp->tx_stats.packets++;
|
|
|
- pp->tx_stats.bytes += skb->len;
|
|
|
- u64_stats_update_end(&pp->tx_stats.syncp);
|
|
|
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
|
|
|
|
|
+ u64_stats_update_begin(&stats->syncp);
|
|
|
+ stats->tx_packets++;
|
|
|
+ stats->tx_bytes += skb->len;
|
|
|
+ u64_stats_update_end(&stats->syncp);
|
|
|
} else {
|
|
|
dev->stats.tx_dropped++;
|
|
|
dev_kfree_skb_any(skb);
|
|
|
}
|
|
|
|
|
|
- if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
|
|
|
- mvneta_txq_done(pp, txq);
|
|
|
-
|
|
|
- /* If after calling mvneta_txq_done, count equals
|
|
|
- * frags, we need to set the timer
|
|
|
- */
|
|
|
- if (txq->count == frags && frags > 0)
|
|
|
- mvneta_add_tx_done_timer(pp);
|
|
|
-
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
@@ -1620,33 +1700,26 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
|
|
|
txq->txq_get_index = 0;
|
|
|
}
|
|
|
|
|
|
-/* handle tx done - called from tx done timer callback */
|
|
|
-static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
|
|
|
- int *tx_todo)
|
|
|
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
|
|
|
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
|
|
|
+ */
|
|
|
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
|
|
|
{
|
|
|
struct mvneta_tx_queue *txq;
|
|
|
- u32 tx_done = 0;
|
|
|
struct netdev_queue *nq;
|
|
|
|
|
|
- *tx_todo = 0;
|
|
|
- while (cause_tx_done != 0) {
|
|
|
+ while (cause_tx_done) {
|
|
|
txq = mvneta_tx_done_policy(pp, cause_tx_done);
|
|
|
- if (!txq)
|
|
|
- break;
|
|
|
|
|
|
nq = netdev_get_tx_queue(pp->dev, txq->id);
|
|
|
__netif_tx_lock(nq, smp_processor_id());
|
|
|
|
|
|
- if (txq->count) {
|
|
|
- tx_done += mvneta_txq_done(pp, txq);
|
|
|
- *tx_todo += txq->count;
|
|
|
- }
|
|
|
+ if (txq->count)
|
|
|
+ mvneta_txq_done(pp, txq);
|
|
|
|
|
|
__netif_tx_unlock(nq);
|
|
|
cause_tx_done &= ~((1 << txq->id));
|
|
|
}
|
|
|
-
|
|
|
- return tx_done;
|
|
|
}
|
|
|
|
|
|
/* Compute crc8 of the specified address, using a unique algorithm ,
|
|
@@ -1876,14 +1949,20 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
/* Read cause register */
|
|
|
cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
|
|
|
- MVNETA_RX_INTR_MASK(rxq_number);
|
|
|
+ (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
|
|
|
+
|
|
|
+ /* Release Tx descriptors */
|
|
|
+ if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
|
|
|
+ mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
|
|
|
+ cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
|
|
|
+ }
|
|
|
|
|
|
/* For the case where the last mvneta_poll did not process all
|
|
|
* RX packets
|
|
|
*/
|
|
|
cause_rx_tx |= pp->cause_rx_tx;
|
|
|
if (rxq_number > 1) {
|
|
|
- while ((cause_rx_tx != 0) && (budget > 0)) {
|
|
|
+ while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
|
|
|
int count;
|
|
|
struct mvneta_rx_queue *rxq;
|
|
|
/* get rx queue number from cause_rx_tx */
|
|
@@ -1915,7 +1994,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
|
|
napi_complete(napi);
|
|
|
local_irq_save(flags);
|
|
|
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
|
|
|
- MVNETA_RX_INTR_MASK(rxq_number));
|
|
|
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
@@ -1923,56 +2002,19 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
|
|
return rx_done;
|
|
|
}
|
|
|
|
|
|
-/* tx done timer callback */
|
|
|
-static void mvneta_tx_done_timer_callback(unsigned long data)
|
|
|
-{
|
|
|
- struct net_device *dev = (struct net_device *)data;
|
|
|
- struct mvneta_port *pp = netdev_priv(dev);
|
|
|
- int tx_done = 0, tx_todo = 0;
|
|
|
-
|
|
|
- if (!netif_running(dev))
|
|
|
- return ;
|
|
|
-
|
|
|
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
|
|
|
-
|
|
|
- tx_done = mvneta_tx_done_gbe(pp,
|
|
|
- (((1 << txq_number) - 1) &
|
|
|
- MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
|
|
|
- &tx_todo);
|
|
|
- if (tx_todo > 0)
|
|
|
- mvneta_add_tx_done_timer(pp);
|
|
|
-}
|
|
|
-
|
|
|
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
|
|
|
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
|
|
|
int num)
|
|
|
{
|
|
|
- struct net_device *dev = pp->dev;
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
- struct sk_buff *skb;
|
|
|
- struct mvneta_rx_desc *rx_desc;
|
|
|
- unsigned long phys_addr;
|
|
|
-
|
|
|
- skb = dev_alloc_skb(pp->pkt_size);
|
|
|
- if (!skb) {
|
|
|
- netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
|
|
|
+ memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
|
|
|
+ if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
|
|
|
+ netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
|
|
|
__func__, rxq->id, i, num);
|
|
|
break;
|
|
|
}
|
|
|
-
|
|
|
- rx_desc = rxq->descs + i;
|
|
|
- memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
|
|
|
- phys_addr = dma_map_single(dev->dev.parent, skb->head,
|
|
|
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
|
|
|
- dev_kfree_skb(skb);
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
|
|
|
}
|
|
|
|
|
|
/* Add this number of RX descriptors as non occupied (ready to
|
|
@@ -2192,7 +2234,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
|
|
|
|
|
|
/* Unmask interrupts */
|
|
|
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
|
|
|
- MVNETA_RX_INTR_MASK(rxq_number));
|
|
|
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
|
|
|
|
|
|
phy_start(pp->phy_dev);
|
|
|
netif_tx_start_all_queues(pp->dev);
|
|
@@ -2225,16 +2267,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
|
|
|
mvneta_rx_reset(pp);
|
|
|
}
|
|
|
|
|
|
-/* tx timeout callback - display a message and stop/start the network device */
|
|
|
-static void mvneta_tx_timeout(struct net_device *dev)
|
|
|
-{
|
|
|
- struct mvneta_port *pp = netdev_priv(dev);
|
|
|
-
|
|
|
- netdev_info(dev, "tx timeout\n");
|
|
|
- mvneta_stop_dev(pp);
|
|
|
- mvneta_start_dev(pp);
|
|
|
-}
|
|
|
-
|
|
|
/* Return positive if MTU is valid */
|
|
|
static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
|
|
|
{
|
|
@@ -2282,6 +2314,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
|
|
|
mvneta_cleanup_rxqs(pp);
|
|
|
|
|
|
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
|
|
|
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
|
|
|
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
|
|
|
|
|
ret = mvneta_setup_rxqs(pp);
|
|
|
if (ret) {
|
|
@@ -2429,6 +2463,8 @@ static int mvneta_open(struct net_device *dev)
|
|
|
mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
|
|
|
|
|
|
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
|
|
|
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
|
|
|
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
|
|
|
|
|
ret = mvneta_setup_rxqs(pp);
|
|
|
if (ret)
|
|
@@ -2478,8 +2514,6 @@ static int mvneta_stop(struct net_device *dev)
|
|
|
free_irq(dev->irq, pp);
|
|
|
mvneta_cleanup_rxqs(pp);
|
|
|
mvneta_cleanup_txqs(pp);
|
|
|
- del_timer(&pp->tx_done_timer);
|
|
|
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2615,7 +2649,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
|
|
|
.ndo_set_rx_mode = mvneta_set_rx_mode,
|
|
|
.ndo_set_mac_address = mvneta_set_mac_addr,
|
|
|
.ndo_change_mtu = mvneta_change_mtu,
|
|
|
- .ndo_tx_timeout = mvneta_tx_timeout,
|
|
|
.ndo_get_stats64 = mvneta_get_stats64,
|
|
|
.ndo_do_ioctl = mvneta_ioctl,
|
|
|
};
|
|
@@ -2751,6 +2784,7 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
const char *mac_from;
|
|
|
int phy_mode;
|
|
|
int err;
|
|
|
+ int cpu;
|
|
|
|
|
|
/* Our multiqueue support is not complete, so for now, only
|
|
|
* allow the usage of the first RX queue
|
|
@@ -2792,9 +2826,6 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
|
|
|
pp = netdev_priv(dev);
|
|
|
|
|
|
- u64_stats_init(&pp->tx_stats.syncp);
|
|
|
- u64_stats_init(&pp->rx_stats.syncp);
|
|
|
-
|
|
|
pp->weight = MVNETA_RX_POLL_WEIGHT;
|
|
|
pp->phy_node = phy_node;
|
|
|
pp->phy_interface = phy_mode;
|
|
@@ -2813,6 +2844,19 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
goto err_clk;
|
|
|
}
|
|
|
|
|
|
+ /* Alloc per-cpu stats */
|
|
|
+ pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
|
|
|
+ if (!pp->stats) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_unmap;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ struct mvneta_pcpu_stats *stats;
|
|
|
+ stats = per_cpu_ptr(pp->stats, cpu);
|
|
|
+ u64_stats_init(&stats->syncp);
|
|
|
+ }
|
|
|
+
|
|
|
dt_mac_addr = of_get_mac_address(dn);
|
|
|
if (dt_mac_addr) {
|
|
|
mac_from = "device tree";
|
|
@@ -2828,11 +2872,6 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- pp->tx_done_timer.data = (unsigned long)dev;
|
|
|
- pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
|
|
|
- init_timer(&pp->tx_done_timer);
|
|
|
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
|
|
|
-
|
|
|
pp->tx_ring_size = MVNETA_MAX_TXD;
|
|
|
pp->rx_ring_size = MVNETA_MAX_RXD;
|
|
|
|
|
@@ -2842,7 +2881,7 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
err = mvneta_init(pp, phy_addr);
|
|
|
if (err < 0) {
|
|
|
dev_err(&pdev->dev, "can't init eth hal\n");
|
|
|
- goto err_unmap;
|
|
|
+ goto err_free_stats;
|
|
|
}
|
|
|
mvneta_port_power_up(pp, phy_mode);
|
|
|
|
|
@@ -2872,6 +2911,8 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
|
|
|
err_deinit:
|
|
|
mvneta_deinit(pp);
|
|
|
+err_free_stats:
|
|
|
+ free_percpu(pp->stats);
|
|
|
err_unmap:
|
|
|
iounmap(pp->base);
|
|
|
err_clk:
|
|
@@ -2892,6 +2933,7 @@ static int mvneta_remove(struct platform_device *pdev)
|
|
|
unregister_netdev(dev);
|
|
|
mvneta_deinit(pp);
|
|
|
clk_disable_unprepare(pp->clk);
|
|
|
+ free_percpu(pp->stats);
|
|
|
iounmap(pp->base);
|
|
|
irq_dispose_mapping(dev->irq);
|
|
|
free_netdev(dev);
|
|
@@ -2924,3 +2966,4 @@ module_param(rxq_number, int, S_IRUGO);
|
|
|
module_param(txq_number, int, S_IRUGO);
|
|
|
|
|
|
module_param(rxq_def, int, S_IRUGO);
|
|
|
+module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
|