|
@@ -97,14 +97,18 @@ void riscv_software_interrupt(void)
|
|
|
static void
|
|
|
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
|
|
|
{
|
|
|
- int i;
|
|
|
+ int cpuid, hartid;
|
|
|
+ struct cpumask hartid_mask;
|
|
|
|
|
|
+ cpumask_clear(&hartid_mask);
|
|
|
mb();
|
|
|
- for_each_cpu(i, to_whom)
|
|
|
- set_bit(operation, &ipi_data[i].bits);
|
|
|
-
|
|
|
+ for_each_cpu(cpuid, to_whom) {
|
|
|
+ set_bit(operation, &ipi_data[cpuid].bits);
|
|
|
+ hartid = cpuid_to_hartid_map(cpuid);
|
|
|
+ cpumask_set_cpu(hartid, &hartid_mask);
|
|
|
+ }
|
|
|
mb();
|
|
|
- sbi_send_ipi(cpumask_bits(to_whom));
|
|
|
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
|
|
|
}
|
|
|
|
|
|
void arch_send_call_function_ipi_mask(struct cpumask *mask)
|
|
@@ -146,7 +150,7 @@ void smp_send_reschedule(int cpu)
|
|
|
void flush_icache_mm(struct mm_struct *mm, bool local)
|
|
|
{
|
|
|
unsigned int cpu;
|
|
|
- cpumask_t others, *mask;
|
|
|
+ cpumask_t others, hmask, *mask;
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
@@ -164,9 +168,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
|
|
|
*/
|
|
|
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
|
|
local |= cpumask_empty(&others);
|
|
|
- if (mm != current->active_mm || !local)
|
|
|
- sbi_remote_fence_i(others.bits);
|
|
|
- else {
|
|
|
+ if (mm != current->active_mm || !local) {
|
|
|
+ cpumask_clear(&hmask);
|
|
|
+ riscv_cpuid_to_hartid_mask(&others, &hmask);
|
|
|
+ sbi_remote_fence_i(hmask.bits);
|
|
|
+ } else {
|
|
|
/*
|
|
|
* It's assumed that at least one strongly ordered operation is
|
|
|
* performed on this hart between setting a hart's cpumask bit
|