|
@@ -197,51 +197,65 @@ int __init setup_profiling_timer(unsigned int multiplier)
|
|
|
/* Inter Processor Interrupt Handling */
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
-/*
|
|
|
- * structures for inter-processor calls
|
|
|
- * A Collection of single bit ipi messages
|
|
|
- *
|
|
|
- */
|
|
|
-
|
|
|
-/*
|
|
|
- * TODO_rajesh investigate tlb message types.
|
|
|
- * IPI Timer not needed because each ARC has an individual Interrupting Timer
|
|
|
- */
|
|
|
enum ipi_msg_type {
|
|
|
- IPI_NOP = 0,
|
|
|
+ IPI_EMPTY = 0,
|
|
|
IPI_RESCHEDULE = 1,
|
|
|
IPI_CALL_FUNC,
|
|
|
- IPI_CPU_STOP
|
|
|
+ IPI_CPU_STOP,
|
|
|
};
|
|
|
|
|
|
-struct ipi_data {
|
|
|
- unsigned long bits;
|
|
|
-};
|
|
|
+/*
|
|
|
+ * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
|
|
|
+ * figure out what msg was sent. For those which don't (ARC has dedicated IPI
|
|
|
+ * IRQ), the msg-type needs to be conveyed via per-cpu data
|
|
|
+ */
|
|
|
|
|
|
-static DEFINE_PER_CPU(struct ipi_data, ipi_data);
|
|
|
+static DEFINE_PER_CPU(unsigned long, ipi_data);
|
|
|
|
|
|
-static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
|
|
|
+static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
|
|
|
{
|
|
|
+ unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
|
|
|
+ unsigned long old, new;
|
|
|
unsigned long flags;
|
|
|
- unsigned int cpu;
|
|
|
+
|
|
|
+ pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
- for_each_cpu(cpu, callmap) {
|
|
|
- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
|
|
|
- set_bit(msg, &ipi->bits);
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Atomically write new msg bit (in case others are writing too),
|
|
|
+ * and read back old value
|
|
|
+ */
|
|
|
+ do {
|
|
|
+ new = old = *ipi_data_ptr;
|
|
|
+ new |= 1U << msg;
|
|
|
+ } while (cmpxchg(ipi_data_ptr, old, new) != old);
|
|
|
|
|
|
- /* Call the platform specific cross-CPU call function */
|
|
|
- if (plat_smp_ops.ipi_send)
|
|
|
- plat_smp_ops.ipi_send((void *)callmap);
|
|
|
+ /*
|
|
|
+ * Call the platform specific IPI kick function, but avoid if possible:
|
|
|
+ * Only do so if there's no pending msg from other concurrent sender(s).
|
|
|
+ * Otherwise, recevier will see this msg as well when it takes the
|
|
|
+ * IPI corresponding to that msg. This is true, even if it is already in
|
|
|
+ * IPI handler, because !@old means it has not yet dequeued the msg(s)
|
|
|
+ * so @new msg can be a free-loader
|
|
|
+ */
|
|
|
+ if (plat_smp_ops.ipi_send && !old)
|
|
|
+ plat_smp_ops.ipi_send(cpu);
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
+static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
|
|
|
+{
|
|
|
+ unsigned int cpu;
|
|
|
+
|
|
|
+ for_each_cpu(cpu, callmap)
|
|
|
+ ipi_send_msg_one(cpu, msg);
|
|
|
+}
|
|
|
+
|
|
|
void smp_send_reschedule(int cpu)
|
|
|
{
|
|
|
- ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
|
|
|
+ ipi_send_msg_one(cpu, IPI_RESCHEDULE);
|
|
|
}
|
|
|
|
|
|
void smp_send_stop(void)
|
|
@@ -254,7 +268,7 @@ void smp_send_stop(void)
|
|
|
|
|
|
void arch_send_call_function_single_ipi(int cpu)
|
|
|
{
|
|
|
- ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC);
|
|
|
+ ipi_send_msg_one(cpu, IPI_CALL_FUNC);
|
|
|
}
|
|
|
|
|
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
@@ -265,33 +279,29 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
|
/*
|
|
|
* ipi_cpu_stop - handle IPI from smp_send_stop()
|
|
|
*/
|
|
|
-static void ipi_cpu_stop(unsigned int cpu)
|
|
|
+static void ipi_cpu_stop(void)
|
|
|
{
|
|
|
machine_halt();
|
|
|
}
|
|
|
|
|
|
-static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
|
|
|
+static inline void __do_IPI(unsigned long msg)
|
|
|
{
|
|
|
- unsigned long msg = 0;
|
|
|
+ switch (msg) {
|
|
|
+ case IPI_RESCHEDULE:
|
|
|
+ scheduler_ipi();
|
|
|
+ break;
|
|
|
|
|
|
- do {
|
|
|
- msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
|
|
|
+ case IPI_CALL_FUNC:
|
|
|
+ generic_smp_call_function_interrupt();
|
|
|
+ break;
|
|
|
|
|
|
- switch (msg) {
|
|
|
- case IPI_RESCHEDULE:
|
|
|
- scheduler_ipi();
|
|
|
- break;
|
|
|
-
|
|
|
- case IPI_CALL_FUNC:
|
|
|
- generic_smp_call_function_interrupt();
|
|
|
- break;
|
|
|
-
|
|
|
- case IPI_CPU_STOP:
|
|
|
- ipi_cpu_stop(cpu);
|
|
|
- break;
|
|
|
- }
|
|
|
- } while (msg < BITS_PER_LONG);
|
|
|
+ case IPI_CPU_STOP:
|
|
|
+ ipi_cpu_stop();
|
|
|
+ break;
|
|
|
|
|
|
+ default:
|
|
|
+ pr_warn("IPI with unexpected msg %ld\n", msg);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -300,19 +310,25 @@ static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
|
|
|
*/
|
|
|
irqreturn_t do_IPI(int irq, void *dev_id)
|
|
|
{
|
|
|
- int cpu = smp_processor_id();
|
|
|
- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
|
|
|
- unsigned long ops;
|
|
|
+ unsigned long pending;
|
|
|
+
|
|
|
+ pr_debug("IPI [%ld] received on cpu %d\n",
|
|
|
+ *this_cpu_ptr(&ipi_data), smp_processor_id());
|
|
|
|
|
|
if (plat_smp_ops.ipi_clear)
|
|
|
- plat_smp_ops.ipi_clear(cpu, irq);
|
|
|
+ plat_smp_ops.ipi_clear(irq);
|
|
|
|
|
|
/*
|
|
|
- * XXX: is this loop really needed
|
|
|
- * And do we need to move ipi_clean inside
|
|
|
+ * "dequeue" the msg corresponding to this IPI (and possibly other
|
|
|
+ * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
|
|
|
*/
|
|
|
- while ((ops = xchg(&ipi->bits, 0)) != 0)
|
|
|
- __do_IPI(&ops, ipi, cpu);
|
|
|
+ pending = xchg(this_cpu_ptr(&ipi_data), 0);
|
|
|
+
|
|
|
+ do {
|
|
|
+ unsigned long msg = __ffs(pending);
|
|
|
+ __do_IPI(msg);
|
|
|
+ pending &= ~(1U << msg);
|
|
|
+ } while (pending);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
}
|