|
|
@@ -17,16 +17,19 @@
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
*/
|
|
|
|
|
|
+#include <linux/bottom_half.h>
|
|
|
#include <linux/cpu.h>
|
|
|
#include <linux/cpu_pm.h>
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/init.h>
|
|
|
+#include <linux/percpu.h>
|
|
|
+#include <linux/preempt.h>
|
|
|
#include <linux/sched/signal.h>
|
|
|
#include <linux/signal.h>
|
|
|
-#include <linux/hardirq.h>
|
|
|
|
|
|
#include <asm/fpsimd.h>
|
|
|
#include <asm/cputype.h>
|
|
|
+#include <asm/simd.h>
|
|
|
|
|
|
#define FPEXC_IOF (1 << 0)
|
|
|
#define FPEXC_DZF (1 << 1)
|
|
|
@@ -62,6 +65,13 @@
|
|
|
* CPU currently contain the most recent userland FPSIMD state of the current
|
|
|
* task.
|
|
|
*
|
|
|
+ * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
|
|
|
+ * save the task's FPSIMD context back to task_struct from softirq context.
|
|
|
+ * To prevent this from racing with the manipulation of the task's FPSIMD state
|
|
|
+ * from task context and thereby corrupting the state, it is necessary to
|
|
|
+ * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
|
|
|
+ * flag with local_bh_disable() unless softirqs are already masked.
|
|
|
+ *
|
|
|
* For a certain task, the sequence may look something like this:
|
|
|
* - the task gets scheduled in; if both the task's fpsimd_state.cpu field
|
|
|
* contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
|
|
|
@@ -161,9 +171,14 @@ void fpsimd_flush_thread(void)
|
|
|
{
|
|
|
if (!system_supports_fpsimd())
|
|
|
return;
|
|
|
+
|
|
|
+ local_bh_disable();
|
|
|
+
|
|
|
memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
|
|
|
fpsimd_flush_task_state(current);
|
|
|
set_thread_flag(TIF_FOREIGN_FPSTATE);
|
|
|
+
|
|
|
+ local_bh_enable();
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -174,10 +189,13 @@ void fpsimd_preserve_current_state(void)
|
|
|
{
|
|
|
if (!system_supports_fpsimd())
|
|
|
return;
|
|
|
- preempt_disable();
|
|
|
+
|
|
|
+ local_bh_disable();
|
|
|
+
|
|
|
if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
|
|
|
fpsimd_save_state(¤t->thread.fpsimd_state);
|
|
|
- preempt_enable();
|
|
|
+
|
|
|
+ local_bh_enable();
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -189,15 +207,18 @@ void fpsimd_restore_current_state(void)
|
|
|
{
|
|
|
if (!system_supports_fpsimd())
|
|
|
return;
|
|
|
- preempt_disable();
|
|
|
+
|
|
|
+ local_bh_disable();
|
|
|
+
|
|
|
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
|
|
|
struct fpsimd_state *st = ¤t->thread.fpsimd_state;
|
|
|
|
|
|
fpsimd_load_state(st);
|
|
|
- this_cpu_write(fpsimd_last_state, st);
|
|
|
+ __this_cpu_write(fpsimd_last_state, st);
|
|
|
st->cpu = smp_processor_id();
|
|
|
}
|
|
|
- preempt_enable();
|
|
|
+
|
|
|
+ local_bh_enable();
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -209,15 +230,18 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
|
|
|
{
|
|
|
if (!system_supports_fpsimd())
|
|
|
return;
|
|
|
- preempt_disable();
|
|
|
+
|
|
|
+ local_bh_disable();
|
|
|
+
|
|
|
fpsimd_load_state(state);
|
|
|
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
|
|
|
struct fpsimd_state *st = ¤t->thread.fpsimd_state;
|
|
|
|
|
|
- this_cpu_write(fpsimd_last_state, st);
|
|
|
+ __this_cpu_write(fpsimd_last_state, st);
|
|
|
st->cpu = smp_processor_id();
|
|
|
}
|
|
|
- preempt_enable();
|
|
|
+
|
|
|
+ local_bh_enable();
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -230,52 +254,122 @@ void fpsimd_flush_task_state(struct task_struct *t)
|
|
|
|
|
|
#ifdef CONFIG_KERNEL_MODE_NEON
|
|
|
|
|
|
-static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate);
|
|
|
-static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
|
|
|
+DEFINE_PER_CPU(bool, kernel_neon_busy);
|
|
|
+EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
|
|
|
|
|
|
/*
|
|
|
* Kernel-side NEON support functions
|
|
|
*/
|
|
|
-void kernel_neon_begin_partial(u32 num_regs)
|
|
|
+
|
|
|
+/*
|
|
|
+ * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
|
|
|
+ * context
|
|
|
+ *
|
|
|
+ * Must not be called unless may_use_simd() returns true.
|
|
|
+ * Task context in the FPSIMD registers is saved back to memory as necessary.
|
|
|
+ *
|
|
|
+ * A matching call to kernel_neon_end() must be made before returning from the
|
|
|
+ * calling context.
|
|
|
+ *
|
|
|
+ * The caller may freely use the FPSIMD registers until kernel_neon_end() is
|
|
|
+ * called.
|
|
|
+ */
|
|
|
+void kernel_neon_begin(void)
|
|
|
{
|
|
|
if (WARN_ON(!system_supports_fpsimd()))
|
|
|
return;
|
|
|
- if (in_interrupt()) {
|
|
|
- struct fpsimd_partial_state *s = this_cpu_ptr(
|
|
|
- in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
|
|
|
|
|
|
- BUG_ON(num_regs > 32);
|
|
|
- fpsimd_save_partial_state(s, roundup(num_regs, 2));
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Save the userland FPSIMD state if we have one and if we
|
|
|
- * haven't done so already. Clear fpsimd_last_state to indicate
|
|
|
- * that there is no longer userland FPSIMD state in the
|
|
|
- * registers.
|
|
|
- */
|
|
|
- preempt_disable();
|
|
|
- if (current->mm &&
|
|
|
- !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
|
|
|
- fpsimd_save_state(¤t->thread.fpsimd_state);
|
|
|
- this_cpu_write(fpsimd_last_state, NULL);
|
|
|
- }
|
|
|
+ BUG_ON(!may_use_simd());
|
|
|
+
|
|
|
+ local_bh_disable();
|
|
|
+
|
|
|
+ __this_cpu_write(kernel_neon_busy, true);
|
|
|
+
|
|
|
+ /* Save unsaved task fpsimd state, if any: */
|
|
|
+ if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
|
|
|
+ fpsimd_save_state(¤t->thread.fpsimd_state);
|
|
|
+
|
|
|
+ /* Invalidate any task state remaining in the fpsimd regs: */
|
|
|
+ __this_cpu_write(fpsimd_last_state, NULL);
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ local_bh_enable();
|
|
|
}
|
|
|
-EXPORT_SYMBOL(kernel_neon_begin_partial);
|
|
|
+EXPORT_SYMBOL(kernel_neon_begin);
|
|
|
|
|
|
+/*
|
|
|
+ * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
|
|
|
+ *
|
|
|
+ * Must be called from a context in which kernel_neon_begin() was previously
|
|
|
+ * called, with no call to kernel_neon_end() in the meantime.
|
|
|
+ *
|
|
|
+ * The caller must not use the FPSIMD registers after this function is called,
|
|
|
+ * unless kernel_neon_begin() is called again in the meantime.
|
|
|
+ */
|
|
|
void kernel_neon_end(void)
|
|
|
{
|
|
|
+ bool busy;
|
|
|
+
|
|
|
if (!system_supports_fpsimd())
|
|
|
return;
|
|
|
- if (in_interrupt()) {
|
|
|
- struct fpsimd_partial_state *s = this_cpu_ptr(
|
|
|
- in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
|
|
|
- fpsimd_load_partial_state(s);
|
|
|
- } else {
|
|
|
- preempt_enable();
|
|
|
- }
|
|
|
+
|
|
|
+ busy = __this_cpu_xchg(kernel_neon_busy, false);
|
|
|
+ WARN_ON(!busy); /* No matching kernel_neon_begin()? */
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
}
|
|
|
EXPORT_SYMBOL(kernel_neon_end);
|
|
|
|
|
|
+static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
|
|
|
+static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
|
|
|
+
|
|
|
+/*
|
|
|
+ * EFI runtime services support functions
|
|
|
+ *
|
|
|
+ * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
|
|
|
+ * This means that for EFI (and only for EFI), we have to assume that FPSIMD
|
|
|
+ * is always used rather than being an optional accelerator.
|
|
|
+ *
|
|
|
+ * These functions provide the necessary support for ensuring FPSIMD
|
|
|
+ * save/restore in the contexts from which EFI is used.
|
|
|
+ *
|
|
|
+ * Do not use them for any other purpose -- if tempted to do so, you are
|
|
|
+ * either doing something wrong or you need to propose some refactoring.
|
|
|
+ */
|
|
|
+
|
|
|
+/*
|
|
|
+ * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
|
|
|
+ */
|
|
|
+void __efi_fpsimd_begin(void)
|
|
|
+{
|
|
|
+ if (!system_supports_fpsimd())
|
|
|
+ return;
|
|
|
+
|
|
|
+ WARN_ON(preemptible());
|
|
|
+
|
|
|
+ if (may_use_simd())
|
|
|
+ kernel_neon_begin();
|
|
|
+ else {
|
|
|
+ fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
|
|
|
+ __this_cpu_write(efi_fpsimd_state_used, true);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
|
|
|
+ */
|
|
|
+void __efi_fpsimd_end(void)
|
|
|
+{
|
|
|
+ if (!system_supports_fpsimd())
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (__this_cpu_xchg(efi_fpsimd_state_used, false))
|
|
|
+ fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
|
|
|
+ else
|
|
|
+ kernel_neon_end();
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_KERNEL_MODE_NEON */
|
|
|
|
|
|
#ifdef CONFIG_CPU_PM
|