|
@@ -182,7 +182,15 @@ entry_SYSCALL_64_fastpath:
|
|
|
#endif
|
|
|
ja 1f /* return -ENOSYS (already in pt_regs->ax) */
|
|
|
movq %r10, %rcx
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This call instruction is handled specially in stub_ptregs_64.
|
|
|
+ * It might end up jumping to the slow path. If it jumps, RAX is
|
|
|
+ * clobbered.
|
|
|
+ */
|
|
|
call *sys_call_table(, %rax, 8)
|
|
|
+.Lentry_SYSCALL_64_after_fastpath_call:
|
|
|
+
|
|
|
movq %rax, RAX(%rsp)
|
|
|
1:
|
|
|
/*
|
|
@@ -235,25 +243,13 @@ GLOBAL(int_ret_from_sys_call_irqs_off)
|
|
|
|
|
|
/* Do syscall entry tracing */
|
|
|
tracesys:
|
|
|
- movq %rsp, %rdi
|
|
|
- movl $AUDIT_ARCH_X86_64, %esi
|
|
|
- call syscall_trace_enter_phase1
|
|
|
- test %rax, %rax
|
|
|
- jnz tracesys_phase2 /* if needed, run the slow path */
|
|
|
- RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
|
|
|
- movq ORIG_RAX(%rsp), %rax
|
|
|
- jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
|
|
|
-
|
|
|
-tracesys_phase2:
|
|
|
SAVE_EXTRA_REGS
|
|
|
movq %rsp, %rdi
|
|
|
- movl $AUDIT_ARCH_X86_64, %esi
|
|
|
- movq %rax, %rdx
|
|
|
- call syscall_trace_enter_phase2
|
|
|
+ call syscall_trace_enter
|
|
|
|
|
|
/*
|
|
|
* Reload registers from stack in case ptrace changed them.
|
|
|
- * We don't reload %rax because syscall_trace_entry_phase2() returned
|
|
|
+ * We don't reload %rax because syscall_trace_enter() returned
|
|
|
* the value it wants us to use in the table lookup.
|
|
|
*/
|
|
|
RESTORE_C_REGS_EXCEPT_RAX
|
|
@@ -355,6 +351,38 @@ opportunistic_sysret_failed:
|
|
|
jmp restore_c_regs_and_iret
|
|
|
END(entry_SYSCALL_64)
|
|
|
|
|
|
+ENTRY(stub_ptregs_64)
|
|
|
+ /*
|
|
|
+ * Syscalls marked as needing ptregs land here.
|
|
|
+ * If we are on the fast path, we need to save the extra regs.
|
|
|
+ * If we are on the slow path, the extra regs are already saved.
|
|
|
+ *
|
|
|
+ * RAX stores a pointer to the C function implementing the syscall.
|
|
|
+ */
|
|
|
+ cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
|
|
|
+ jne 1f
|
|
|
+
|
|
|
+ /* Called from fast path -- pop return address and jump to slow path */
|
|
|
+ popq %rax
|
|
|
+ jmp tracesys /* called from fast path */
|
|
|
+
|
|
|
+1:
|
|
|
+ /* Called from C */
|
|
|
+ jmp *%rax /* called from C */
|
|
|
+END(stub_ptregs_64)
|
|
|
+
|
|
|
+.macro ptregs_stub func
|
|
|
+ENTRY(ptregs_\func)
|
|
|
+ leaq \func(%rip), %rax
|
|
|
+ jmp stub_ptregs_64
|
|
|
+END(ptregs_\func)
|
|
|
+.endm
|
|
|
+
|
|
|
+/* Instantiate ptregs_stub for each ptregs-using syscall */
|
|
|
+#define __SYSCALL_64_QUAL_(sym)
|
|
|
+#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
|
|
|
+#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
|
|
|
+#include <asm/syscalls_64.h>
|
|
|
|
|
|
.macro FORK_LIKE func
|
|
|
ENTRY(stub_\func)
|