瀏覽代碼

x86/vdso/32: Save extra registers in the INT80 vsyscall path

The goal is to integrate the SYSENTER and SYSCALL32 entry paths
with the INT80 path.  SYSENTER clobbers ESP and EIP.  SYSCALL32
clobbers ECX (and, invisibly, R11).  SYSRETL (long mode to
compat mode) clobbers ECX and, invisibly, R11.  SYSEXIT (which
we only need for native 32-bit) clobbers ECX and EDX.

This means that we'll need to provide ESP to the kernel in a
register (I chose ECX, since it's only needed for SYSENTER) and
we need to provide the args that normally live in ECX and EDX in
memory.

The epilogue needs to restore ECX and EDX, since user code
relies on regs being preserved.

We don't need to do anything special about EIP, since the kernel
already knows where we are.  The kernel will eventually need to
know where int $0x80 lands, so add a vdso_image entry for it.

The only user-visible effect of this code is that ptrace-induced
changes to ECX and EDX during fast syscalls will be lost.  This
is already the case for the SYSENTER path.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/b860925adbee2d2627a0671fbfe23a7fd04127f8.1444091584.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Andy Lutomirski 9 年之前
父節點
當前提交
8242c6c84a
共有 3 個文件被更改,包括 26 次插入1 次删除
  1. 1 0
      arch/x86/entry/vdso/vdso2c.c
  2. 24 1
      arch/x86/entry/vdso/vdso32/system_call.S
  3. 1 0
      arch/x86/include/asm/vdso.h

+ 1 - 0
arch/x86/entry/vdso/vdso2c.c

@@ -101,6 +101,7 @@ struct vdso_sym required_syms[] = {
 	{"__kernel_vsyscall", true},
 	{"__kernel_vsyscall", true},
 	{"__kernel_sigreturn", true},
 	{"__kernel_sigreturn", true},
 	{"__kernel_rt_sigreturn", true},
 	{"__kernel_rt_sigreturn", true},
+	{"int80_landing_pad", true},
 };
 };
 
 
 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))

+ 24 - 1
arch/x86/entry/vdso/vdso32/system_call.S

@@ -16,7 +16,30 @@
 	ALIGN
 	ALIGN
 __kernel_vsyscall:
 __kernel_vsyscall:
 	CFI_STARTPROC
 	CFI_STARTPROC
-	int $0x80
+	/*
+	 * Reshuffle regs so that all of any of the entry instructions
+	 * will preserve enough state.
+	 */
+	pushl	%edx
+	CFI_ADJUST_CFA_OFFSET	4
+	CFI_REL_OFFSET		edx, 0
+	pushl	%ecx
+	CFI_ADJUST_CFA_OFFSET	4
+	CFI_REL_OFFSET		ecx, 0
+	movl	%esp, %ecx
+
+	/* Enter using int $0x80 */
+	movl	(%esp), %ecx
+	int	$0x80
+GLOBAL(int80_landing_pad)
+
+	/* Restore ECX and EDX in case they were clobbered. */
+	popl	%ecx
+	CFI_RESTORE		ecx
+	CFI_ADJUST_CFA_OFFSET	-4
+	popl	%edx
+	CFI_RESTORE		edx
+	CFI_ADJUST_CFA_OFFSET	-4
 	ret
 	ret
 	CFI_ENDPROC
 	CFI_ENDPROC
 
 

+ 1 - 0
arch/x86/include/asm/vdso.h

@@ -26,6 +26,7 @@ struct vdso_image {
 	long sym___kernel_sigreturn;
 	long sym___kernel_sigreturn;
 	long sym___kernel_rt_sigreturn;
 	long sym___kernel_rt_sigreturn;
 	long sym___kernel_vsyscall;
 	long sym___kernel_vsyscall;
+	long sym_int80_landing_pad;
 };
 };
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64