|
@@ -266,14 +266,13 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
-__visible void do_syscall_64(struct pt_regs *regs)
|
|
|
+__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
|
|
|
{
|
|
|
- struct thread_info *ti = current_thread_info();
|
|
|
- unsigned long nr = regs->orig_ax;
|
|
|
+ struct thread_info *ti;
|
|
|
|
|
|
enter_from_user_mode();
|
|
|
local_irq_enable();
|
|
|
-
|
|
|
+ ti = current_thread_info();
|
|
|
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
|
|
|
nr = syscall_trace_enter(regs);
|
|
|
|
|
@@ -282,8 +281,9 @@ __visible void do_syscall_64(struct pt_regs *regs)
|
|
|
* table. The only functional difference is the x32 bit in
|
|
|
* regs->orig_ax, which changes the behavior of some syscalls.
|
|
|
*/
|
|
|
- if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
|
|
|
- nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls);
|
|
|
+ nr &= __SYSCALL_MASK;
|
|
|
+ if (likely(nr < NR_syscalls)) {
|
|
|
+ nr = array_index_nospec(nr, NR_syscalls);
|
|
|
regs->ax = sys_call_table[nr](
|
|
|
regs->di, regs->si, regs->dx,
|
|
|
regs->r10, regs->r8, regs->r9);
|