|
@@ -342,6 +342,60 @@
|
|
|
.Lend_\@:
|
|
|
.endm
|
|
|
|
|
|
+/*
|
|
|
+ * Switch back from the kernel stack to the entry stack.
|
|
|
+ *
|
|
|
+ * The %esp register must point to pt_regs on the task stack. It will
|
|
|
+ * first calculate the size of the stack-frame to copy, depending on
|
|
|
+ * whether we return to VM86 mode or not. With that it uses 'rep movsl'
|
|
|
+ * to copy the contents of the stack over to the entry stack.
|
|
|
+ *
|
|
|
+ * We must be very careful here, as we can't trust the contents of the
|
|
|
+ * task-stack once we switched to the entry-stack. When an NMI happens
|
|
|
+ * while on the entry-stack, the NMI handler will switch back to the top
|
|
|
+ * of the task stack, overwriting our stack-frame we are about to copy.
|
|
|
+ * Therefore we switch the stack only after everything is copied over.
|
|
|
+ */
|
|
|
+.macro SWITCH_TO_ENTRY_STACK
|
|
|
+
|
|
|
+ ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
|
|
|
+
|
|
|
+ /* Bytes to copy */
|
|
|
+ movl $PTREGS_SIZE, %ecx
|
|
|
+
|
|
|
+#ifdef CONFIG_VM86
|
|
|
+ testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
|
|
|
+ jz .Lcopy_pt_regs_\@
|
|
|
+
|
|
|
+ /* Additional 4 registers to copy when returning to VM86 mode */
|
|
|
+ addl $(4 * 4), %ecx
|
|
|
+
|
|
|
+.Lcopy_pt_regs_\@:
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* Initialize source and destination for movsl */
|
|
|
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
|
|
|
+ subl %ecx, %edi
|
|
|
+ movl %esp, %esi
|
|
|
+
|
|
|
+ /* Save future stack pointer in %ebx */
|
|
|
+ movl %edi, %ebx
|
|
|
+
|
|
|
+ /* Copy over the stack-frame */
|
|
|
+ shrl $2, %ecx
|
|
|
+ cld
|
|
|
+ rep movsl
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Switch to entry-stack - needs to happen after everything is
|
|
|
+ * copied because the NMI handler will overwrite the task-stack
|
|
|
+ * when on entry-stack
|
|
|
+ */
|
|
|
+ movl %ebx, %esp
|
|
|
+
|
|
|
+.Lend_\@:
|
|
|
+.endm
|
|
|
+
|
|
|
/*
|
|
|
* %eax: prev task
|
|
|
* %edx: next task
|
|
@@ -581,25 +635,45 @@ ENTRY(entry_SYSENTER_32)
|
|
|
|
|
|
/* Opportunistic SYSEXIT */
|
|
|
TRACE_IRQS_ON /* User mode traces as IRQs on. */
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Setup entry stack - we keep the pointer in %eax and do the
|
|
|
+ * switch after almost all user-state is restored.
|
|
|
+ */
|
|
|
+
|
|
|
+ /* Load entry stack pointer and allocate frame for eflags/eax */
|
|
|
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
|
|
|
+ subl $(2*4), %eax
|
|
|
+
|
|
|
+ /* Copy eflags and eax to entry stack */
|
|
|
+ movl PT_EFLAGS(%esp), %edi
|
|
|
+ movl PT_EAX(%esp), %esi
|
|
|
+ movl %edi, (%eax)
|
|
|
+ movl %esi, 4(%eax)
|
|
|
+
|
|
|
+ /* Restore user registers and segments */
|
|
|
movl PT_EIP(%esp), %edx /* pt_regs->ip */
|
|
|
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
|
|
|
1: mov PT_FS(%esp), %fs
|
|
|
PTGS_TO_GS
|
|
|
+
|
|
|
popl %ebx /* pt_regs->bx */
|
|
|
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
|
|
|
popl %esi /* pt_regs->si */
|
|
|
popl %edi /* pt_regs->di */
|
|
|
popl %ebp /* pt_regs->bp */
|
|
|
- popl %eax /* pt_regs->ax */
|
|
|
+
|
|
|
+ /* Switch to entry stack */
|
|
|
+ movl %eax, %esp
|
|
|
|
|
|
/*
|
|
|
* Restore all flags except IF. (We restore IF separately because
|
|
|
* STI gives a one-instruction window in which we won't be interrupted,
|
|
|
* whereas POPF does not.)
|
|
|
*/
|
|
|
- addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
|
|
|
btrl $X86_EFLAGS_IF_BIT, (%esp)
|
|
|
popfl
|
|
|
+ popl %eax
|
|
|
|
|
|
/*
|
|
|
* Return back to the vDSO, which will pop ecx and edx.
|
|
@@ -668,6 +742,7 @@ ENTRY(entry_INT80_32)
|
|
|
|
|
|
restore_all:
|
|
|
TRACE_IRQS_IRET
|
|
|
+ SWITCH_TO_ENTRY_STACK
|
|
|
.Lrestore_all_notrace:
|
|
|
CHECK_AND_APPLY_ESPFIX
|
|
|
.Lrestore_nocheck:
|