|
@@ -2797,10 +2797,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
|
|
|
|
|
|
/* Rx/Tx queue initialization/cleanup methods */
|
|
|
|
|
|
-/* Create a specified RX queue */
|
|
|
-static int mvneta_rxq_init(struct mvneta_port *pp,
|
|
|
- struct mvneta_rx_queue *rxq)
|
|
|
-
|
|
|
+static int mvneta_rxq_sw_init(struct mvneta_port *pp,
|
|
|
+ struct mvneta_rx_queue *rxq)
|
|
|
{
|
|
|
rxq->size = pp->rx_ring_size;
|
|
|
|
|
@@ -2813,6 +2811,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
|
|
|
|
|
|
rxq->last_desc = rxq->size - 1;
|
|
|
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void mvneta_rxq_hw_init(struct mvneta_port *pp,
|
|
|
+ struct mvneta_rx_queue *rxq)
|
|
|
+{
|
|
|
/* Set Rx descriptors queue starting address */
|
|
|
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
|
|
|
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
|
|
@@ -2836,6 +2840,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
|
|
|
mvneta_rxq_short_pool_set(pp, rxq);
|
|
|
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+/* Create a specified RX queue */
|
|
|
+static int mvneta_rxq_init(struct mvneta_port *pp,
|
|
|
+ struct mvneta_rx_queue *rxq)
|
|
|
+
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = mvneta_rxq_sw_init(pp, rxq);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ mvneta_rxq_hw_init(pp, rxq);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2858,9 +2876,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
|
|
|
rxq->descs_phys = 0;
|
|
|
}
|
|
|
|
|
|
-/* Create and initialize a tx queue */
|
|
|
-static int mvneta_txq_init(struct mvneta_port *pp,
|
|
|
- struct mvneta_tx_queue *txq)
|
|
|
+static int mvneta_txq_sw_init(struct mvneta_port *pp,
|
|
|
+ struct mvneta_tx_queue *txq)
|
|
|
{
|
|
|
int cpu;
|
|
|
|
|
@@ -2873,7 +2890,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
|
|
|
txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
|
|
|
txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
|
|
|
|
|
|
-
|
|
|
/* Allocate memory for TX descriptors */
|
|
|
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
|
|
|
txq->size * MVNETA_DESC_ALIGNED_SIZE,
|
|
@@ -2883,14 +2899,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
|
|
|
|
|
|
txq->last_desc = txq->size - 1;
|
|
|
|
|
|
- /* Set maximum bandwidth for enabled TXQs */
|
|
|
- mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
|
|
|
- mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
|
|
|
-
|
|
|
- /* Set Tx descriptors queue starting address */
|
|
|
- mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
|
|
|
- mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
|
|
|
-
|
|
|
txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
|
|
|
GFP_KERNEL);
|
|
|
if (!txq->tx_skb) {
|
|
@@ -2911,7 +2919,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
|
|
|
txq->descs, txq->descs_phys);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
- mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
|
|
|
|
|
|
/* Setup XPS mapping */
|
|
|
if (txq_number > 1)
|
|
@@ -2924,9 +2931,38 @@ static int mvneta_txq_init(struct mvneta_port *pp,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void mvneta_txq_hw_init(struct mvneta_port *pp,
|
|
|
+ struct mvneta_tx_queue *txq)
|
|
|
+{
|
|
|
+ /* Set maximum bandwidth for enabled TXQs */
|
|
|
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
|
|
|
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
|
|
|
+
|
|
|
+ /* Set Tx descriptors queue starting address */
|
|
|
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
|
|
|
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
|
|
|
+
|
|
|
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
|
|
|
+}
|
|
|
+
|
|
|
+/* Create and initialize a tx queue */
|
|
|
+static int mvneta_txq_init(struct mvneta_port *pp,
|
|
|
+ struct mvneta_tx_queue *txq)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = mvneta_txq_sw_init(pp, txq);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ mvneta_txq_hw_init(pp, txq);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
|
|
|
-static void mvneta_txq_deinit(struct mvneta_port *pp,
|
|
|
- struct mvneta_tx_queue *txq)
|
|
|
+static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
|
|
|
+ struct mvneta_tx_queue *txq)
|
|
|
{
|
|
|
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
|
|
|
|
|
@@ -2947,7 +2983,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
|
|
|
txq->last_desc = 0;
|
|
|
txq->next_desc_to_proc = 0;
|
|
|
txq->descs_phys = 0;
|
|
|
+}
|
|
|
|
|
|
+static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
|
|
|
+ struct mvneta_tx_queue *txq)
|
|
|
+{
|
|
|
/* Set minimum bandwidth for disabled TXQs */
|
|
|
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
|
|
|
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
|
|
@@ -2957,6 +2997,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
|
|
|
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
|
|
|
}
|
|
|
|
|
|
+static void mvneta_txq_deinit(struct mvneta_port *pp,
|
|
|
+ struct mvneta_tx_queue *txq)
|
|
|
+{
|
|
|
+ mvneta_txq_sw_deinit(pp, txq);
|
|
|
+ mvneta_txq_hw_deinit(pp, txq);
|
|
|
+}
|
|
|
+
|
|
|
/* Cleanup all Tx queues */
|
|
|
static void mvneta_cleanup_txqs(struct mvneta_port *pp)
|
|
|
{
|
|
@@ -4524,16 +4571,45 @@ static int mvneta_remove(struct platform_device *pdev)
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
static int mvneta_suspend(struct device *device)
|
|
|
{
|
|
|
+ int queue;
|
|
|
struct net_device *dev = dev_get_drvdata(device);
|
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
|
|
|
|
+ if (!netif_running(dev))
|
|
|
+ goto clean_exit;
|
|
|
+
|
|
|
+ if (!pp->neta_armada3700) {
|
|
|
+ spin_lock(&pp->lock);
|
|
|
+ pp->is_stopped = true;
|
|
|
+ spin_unlock(&pp->lock);
|
|
|
+
|
|
|
+ cpuhp_state_remove_instance_nocalls(online_hpstate,
|
|
|
+ &pp->node_online);
|
|
|
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
|
|
|
+ &pp->node_dead);
|
|
|
+ }
|
|
|
+
|
|
|
rtnl_lock();
|
|
|
- if (netif_running(dev))
|
|
|
- mvneta_stop(dev);
|
|
|
+ mvneta_stop_dev(pp);
|
|
|
rtnl_unlock();
|
|
|
+
|
|
|
+ for (queue = 0; queue < rxq_number; queue++) {
|
|
|
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
|
|
|
+
|
|
|
+ mvneta_rxq_drop_pkts(pp, rxq);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (queue = 0; queue < txq_number; queue++) {
|
|
|
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
|
|
|
+
|
|
|
+ mvneta_txq_hw_deinit(pp, txq);
|
|
|
+ }
|
|
|
+
|
|
|
+clean_exit:
|
|
|
netif_device_detach(dev);
|
|
|
clk_disable_unprepare(pp->clk_bus);
|
|
|
clk_disable_unprepare(pp->clk);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -4542,7 +4618,7 @@ static int mvneta_resume(struct device *device)
|
|
|
struct platform_device *pdev = to_platform_device(device);
|
|
|
struct net_device *dev = dev_get_drvdata(device);
|
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
|
- int err;
|
|
|
+ int err, queue;
|
|
|
|
|
|
clk_prepare_enable(pp->clk);
|
|
|
if (!IS_ERR(pp->clk_bus))
|
|
@@ -4564,12 +4640,38 @@ static int mvneta_resume(struct device *device)
|
|
|
}
|
|
|
|
|
|
netif_device_attach(dev);
|
|
|
- rtnl_lock();
|
|
|
- if (netif_running(dev)) {
|
|
|
- mvneta_open(dev);
|
|
|
- mvneta_set_rx_mode(dev);
|
|
|
+
|
|
|
+ if (!netif_running(dev))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ for (queue = 0; queue < rxq_number; queue++) {
|
|
|
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
|
|
|
+
|
|
|
+ rxq->next_desc_to_proc = 0;
|
|
|
+ mvneta_rxq_hw_init(pp, rxq);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (queue = 0; queue < txq_number; queue++) {
|
|
|
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
|
|
|
+
|
|
|
+ txq->next_desc_to_proc = 0;
|
|
|
+ mvneta_txq_hw_init(pp, txq);
|
|
|
}
|
|
|
+
|
|
|
+ if (!pp->neta_armada3700) {
|
|
|
+ spin_lock(&pp->lock);
|
|
|
+ pp->is_stopped = false;
|
|
|
+ spin_unlock(&pp->lock);
|
|
|
+ cpuhp_state_add_instance_nocalls(online_hpstate,
|
|
|
+ &pp->node_online);
|
|
|
+ cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
|
|
|
+ &pp->node_dead);
|
|
|
+ }
|
|
|
+
|
|
|
+ rtnl_lock();
|
|
|
+ mvneta_start_dev(pp);
|
|
|
rtnl_unlock();
|
|
|
+ mvneta_set_rx_mode(dev);
|
|
|
|
|
|
return 0;
|
|
|
}
|