|
@@ -1038,6 +1038,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void mvneta_percpu_unmask_interrupt(void *arg)
|
|
|
+{
|
|
|
+ struct mvneta_port *pp = arg;
|
|
|
+
|
|
|
+ /* All the queue are unmasked, but actually only the ones
|
|
|
+ * mapped to this CPU will be unmasked
|
|
|
+ */
|
|
|
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
|
|
|
+ MVNETA_RX_INTR_MASK_ALL |
|
|
|
+ MVNETA_TX_INTR_MASK_ALL |
|
|
|
+ MVNETA_MISCINTR_INTR_MASK);
|
|
|
+}
|
|
|
+
|
|
|
+static void mvneta_percpu_mask_interrupt(void *arg)
|
|
|
+{
|
|
|
+ struct mvneta_port *pp = arg;
|
|
|
+
|
|
|
+ /* All the queue are masked, but actually only the ones
|
|
|
+ * mapped to this CPU will be masked
|
|
|
+ */
|
|
|
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
|
|
|
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
|
|
|
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void mvneta_percpu_clear_intr_cause(void *arg)
|
|
|
+{
|
|
|
+ struct mvneta_port *pp = arg;
|
|
|
+
|
|
|
+ /* All the queue are cleared, but actually only the ones
|
|
|
+ * mapped to this CPU will be cleared
|
|
|
+ */
|
|
|
+ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
|
|
|
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
|
|
|
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
|
|
|
+}
|
|
|
+
|
|
|
/* This method sets defaults to the NETA port:
|
|
|
* Clears interrupt Cause and Mask registers.
|
|
|
* Clears all MAC tables.
|
|
@@ -1055,14 +1092,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
|
|
|
int max_cpu = num_present_cpus();
|
|
|
|
|
|
/* Clear all Cause registers */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
|
|
|
+ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
|
|
|
|
|
|
/* Mask all interrupts */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
|
|
|
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
|
|
|
mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
|
|
|
|
|
|
/* Enable MBUS Retry bit16 */
|
|
@@ -2528,31 +2561,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void mvneta_percpu_unmask_interrupt(void *arg)
|
|
|
-{
|
|
|
- struct mvneta_port *pp = arg;
|
|
|
-
|
|
|
- /* All the queue are unmasked, but actually only the ones
|
|
|
- * maped to this CPU will be unmasked
|
|
|
- */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
|
|
|
- MVNETA_RX_INTR_MASK_ALL |
|
|
|
- MVNETA_TX_INTR_MASK_ALL |
|
|
|
- MVNETA_MISCINTR_INTR_MASK);
|
|
|
-}
|
|
|
-
|
|
|
-static void mvneta_percpu_mask_interrupt(void *arg)
|
|
|
-{
|
|
|
- struct mvneta_port *pp = arg;
|
|
|
-
|
|
|
- /* All the queue are masked, but actually only the ones
|
|
|
- * maped to this CPU will be masked
|
|
|
- */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
|
|
|
-}
|
|
|
-
|
|
|
static void mvneta_start_dev(struct mvneta_port *pp)
|
|
|
{
|
|
|
int cpu;
|
|
@@ -2603,13 +2611,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
|
|
|
mvneta_port_disable(pp);
|
|
|
|
|
|
/* Clear all ethernet port interrupts */
|
|
|
- mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
|
|
|
+ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
|
|
|
|
|
|
/* Mask all ethernet port interrupts */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
|
|
|
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
|
|
|
|
|
|
mvneta_tx_reset(pp);
|
|
|
mvneta_rx_reset(pp);
|
|
@@ -2921,9 +2926,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
|
|
|
}
|
|
|
|
|
|
/* Mask all ethernet port interrupts */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
|
|
|
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
|
|
|
napi_enable(&port->napi);
|
|
|
|
|
|
|
|
@@ -2938,14 +2941,8 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
|
|
|
*/
|
|
|
mvneta_percpu_elect(pp);
|
|
|
|
|
|
- /* Unmask all ethernet port interrupts, as this
|
|
|
- * notifier is called for each CPU then the CPU to
|
|
|
- * Queue mapping is applied
|
|
|
- */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
|
|
|
- MVNETA_RX_INTR_MASK(rxq_number) |
|
|
|
- MVNETA_TX_INTR_MASK(txq_number) |
|
|
|
- MVNETA_MISCINTR_INTR_MASK);
|
|
|
+ /* Unmask all ethernet port interrupts */
|
|
|
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
|
|
|
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
|
|
|
MVNETA_CAUSE_PHY_STATUS_CHANGE |
|
|
|
MVNETA_CAUSE_LINK_CHANGE |
|
|
@@ -2956,9 +2953,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
|
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
|
|
netif_tx_stop_all_queues(pp->dev);
|
|
|
/* Mask all ethernet port interrupts */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
|
|
|
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
|
|
|
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
|
|
|
|
|
|
napi_synchronize(&port->napi);
|
|
|
napi_disable(&port->napi);
|
|
@@ -2974,10 +2969,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
|
|
|
/* Check if a new CPU must be elected now this on is down */
|
|
|
mvneta_percpu_elect(pp);
|
|
|
/* Unmask all ethernet port interrupts */
|
|
|
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
|
|
|
- MVNETA_RX_INTR_MASK(rxq_number) |
|
|
|
- MVNETA_TX_INTR_MASK(txq_number) |
|
|
|
- MVNETA_MISCINTR_INTR_MASK);
|
|
|
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
|
|
|
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
|
|
|
MVNETA_CAUSE_PHY_STATUS_CHANGE |
|
|
|
MVNETA_CAUSE_LINK_CHANGE |
|