|
@@ -22,23 +22,44 @@
|
|
|
#include <linux/interrupt.h>
|
|
|
#include <linux/smp.h>
|
|
|
#include <linux/sched.h>
|
|
|
+#include <linux/seq_file.h>
|
|
|
|
|
|
#include <asm/sbi.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
|
-/* A collection of single bit ipi messages. */
|
|
|
-static struct {
|
|
|
- unsigned long bits ____cacheline_aligned;
|
|
|
-} ipi_data[NR_CPUS] __cacheline_aligned;
|
|
|
-
|
|
|
enum ipi_message_type {
|
|
|
IPI_RESCHEDULE,
|
|
|
IPI_CALL_FUNC,
|
|
|
IPI_MAX
|
|
|
};
|
|
|
|
|
|
+/* A collection of single bit ipi messages. */
|
|
|
+static struct {
|
|
|
+ unsigned long stats[IPI_MAX] ____cacheline_aligned;
|
|
|
+ unsigned long bits ____cacheline_aligned;
|
|
|
+} ipi_data[NR_CPUS] __cacheline_aligned;
|
|
|
+
|
|
|
+int riscv_hartid_to_cpuid(int hartid)
|
|
|
+{
|
|
|
+ int i = -1;
|
|
|
+
|
|
|
+ for (i = 0; i < NR_CPUS; i++)
|
|
|
+ if (cpuid_to_hartid_map(i) == hartid)
|
|
|
+ return i;
|
|
|
|
|
|
+ pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
|
|
|
+ BUG();
|
|
|
+ return i;
|
|
|
+}
|
|
|
+
|
|
|
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_cpu(cpu, in)
|
|
|
+ cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
|
|
|
+}
|
|
|
/* Unsupported */
|
|
|
int setup_profiling_timer(unsigned int multiplier)
|
|
|
{
|
|
@@ -48,6 +69,7 @@ int setup_profiling_timer(unsigned int multiplier)
|
|
|
void riscv_software_interrupt(void)
|
|
|
{
|
|
|
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
|
|
|
+ unsigned long *stats = ipi_data[smp_processor_id()].stats;
|
|
|
|
|
|
/* Clear pending IPI */
|
|
|
csr_clear(sip, SIE_SSIE);
|
|
@@ -62,11 +84,15 @@ void riscv_software_interrupt(void)
|
|
|
if (ops == 0)
|
|
|
return;
|
|
|
|
|
|
- if (ops & (1 << IPI_RESCHEDULE))
|
|
|
+ if (ops & (1 << IPI_RESCHEDULE)) {
|
|
|
+ stats[IPI_RESCHEDULE]++;
|
|
|
scheduler_ipi();
|
|
|
+ }
|
|
|
|
|
|
- if (ops & (1 << IPI_CALL_FUNC))
|
|
|
+ if (ops & (1 << IPI_CALL_FUNC)) {
|
|
|
+ stats[IPI_CALL_FUNC]++;
|
|
|
generic_smp_call_function_interrupt();
|
|
|
+ }
|
|
|
|
|
|
BUG_ON((ops >> IPI_MAX) != 0);
|
|
|
|
|
@@ -78,14 +104,36 @@ void riscv_software_interrupt(void)
|
|
|
static void
|
|
|
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
|
|
|
{
|
|
|
- int i;
|
|
|
+ int cpuid, hartid;
|
|
|
+ struct cpumask hartid_mask;
|
|
|
|
|
|
+ cpumask_clear(&hartid_mask);
|
|
|
mb();
|
|
|
- for_each_cpu(i, to_whom)
|
|
|
- set_bit(operation, &ipi_data[i].bits);
|
|
|
-
|
|
|
+ for_each_cpu(cpuid, to_whom) {
|
|
|
+ set_bit(operation, &ipi_data[cpuid].bits);
|
|
|
+ hartid = cpuid_to_hartid_map(cpuid);
|
|
|
+ cpumask_set_cpu(hartid, &hartid_mask);
|
|
|
+ }
|
|
|
mb();
|
|
|
- sbi_send_ipi(cpumask_bits(to_whom));
|
|
|
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
|
|
|
+}
|
|
|
+
|
|
|
+static const char * const ipi_names[] = {
|
|
|
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
|
|
|
+ [IPI_CALL_FUNC] = "Function call interrupts",
|
|
|
+};
|
|
|
+
|
|
|
+void show_ipi_stats(struct seq_file *p, int prec)
|
|
|
+{
|
|
|
+ unsigned int cpu, i;
|
|
|
+
|
|
|
+ for (i = 0; i < IPI_MAX; i++) {
|
|
|
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
|
|
|
+ prec >= 4 ? " " : "");
|
|
|
+ for_each_online_cpu(cpu)
|
|
|
+ seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
|
|
|
+ seq_printf(p, " %s\n", ipi_names[i]);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void arch_send_call_function_ipi_mask(struct cpumask *mask)
|
|
@@ -127,7 +175,7 @@ void smp_send_reschedule(int cpu)
|
|
|
void flush_icache_mm(struct mm_struct *mm, bool local)
|
|
|
{
|
|
|
unsigned int cpu;
|
|
|
- cpumask_t others, *mask;
|
|
|
+ cpumask_t others, hmask, *mask;
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
@@ -145,9 +193,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
|
|
|
*/
|
|
|
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
|
|
local |= cpumask_empty(&others);
|
|
|
- if (mm != current->active_mm || !local)
|
|
|
- sbi_remote_fence_i(others.bits);
|
|
|
- else {
|
|
|
+ if (mm != current->active_mm || !local) {
|
|
|
+ cpumask_clear(&hmask);
|
|
|
+ riscv_cpuid_to_hartid_mask(&others, &hmask);
|
|
|
+ sbi_remote_fence_i(hmask.bits);
|
|
|
+ } else {
|
|
|
/*
|
|
|
* It's assumed that at least one strongly ordered operation is
|
|
|
* performed on this hart between setting a hart's cpumask bit
|