entry_32.S 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080
  1. /*
  2. * Copyright (C) 1991,1992 Linus Torvalds
  3. *
  4. * entry_32.S contains the system-call and low-level fault and trap handling routines.
  5. *
  6. * Stack layout while running C code:
  7. * ptrace needs to have all registers on the stack.
  8. * If the order here is changed, it needs to be
  9. * updated in fork.c:copy_process(), signal.c:do_signal(),
  10. * ptrace.c and ptrace.h
  11. *
  12. * 0(%esp) - %ebx
  13. * 4(%esp) - %ecx
  14. * 8(%esp) - %edx
  15. * C(%esp) - %esi
  16. * 10(%esp) - %edi
  17. * 14(%esp) - %ebp
  18. * 18(%esp) - %eax
  19. * 1C(%esp) - %ds
  20. * 20(%esp) - %es
  21. * 24(%esp) - %fs
  22. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  23. * 2C(%esp) - orig_eax
  24. * 30(%esp) - %eip
  25. * 34(%esp) - %cs
  26. * 38(%esp) - %eflags
  27. * 3C(%esp) - %oldesp
  28. * 40(%esp) - %oldss
  29. */
  30. #include <linux/linkage.h>
  31. #include <linux/err.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/irqflags.h>
  34. #include <asm/errno.h>
  35. #include <asm/segment.h>
  36. #include <asm/smp.h>
  37. #include <asm/percpu.h>
  38. #include <asm/processor-flags.h>
  39. #include <asm/irq_vectors.h>
  40. #include <asm/cpufeatures.h>
  41. #include <asm/alternative-asm.h>
  42. #include <asm/asm.h>
  43. #include <asm/smap.h>
  44. #include <asm/frame.h>
  45. .section .entry.text, "ax"
  46. /*
  47. * We use macros for low-level operations which need to be overridden
  48. * for paravirtualization. The following will never clobber any registers:
  49. * INTERRUPT_RETURN (aka. "iret")
  50. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  51. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  52. *
  53. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  54. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  55. * Allowing a register to be clobbered can shrink the paravirt replacement
  56. * enough to patch inline, increasing performance.
  57. */
  58. #ifdef CONFIG_PREEMPT
  59. # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  60. #else
  61. # define preempt_stop(clobbers)
  62. # define resume_kernel restore_all
  63. #endif
  64. .macro TRACE_IRQS_IRET
  65. #ifdef CONFIG_TRACE_IRQFLAGS
  66. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
  67. jz 1f
  68. TRACE_IRQS_ON
  69. 1:
  70. #endif
  71. .endm
  72. /*
  73. * User gs save/restore
  74. *
  75. * %gs is used for userland TLS and kernel only uses it for stack
  76. * canary which is required to be at %gs:20 by gcc. Read the comment
  77. * at the top of stackprotector.h for more info.
  78. *
  79. * Local labels 98 and 99 are used.
  80. */
  81. #ifdef CONFIG_X86_32_LAZY_GS
  82. /* unfortunately push/pop can't be no-op */
  83. .macro PUSH_GS
  84. pushl $0
  85. .endm
  86. .macro POP_GS pop=0
  87. addl $(4 + \pop), %esp
  88. .endm
  89. .macro POP_GS_EX
  90. .endm
  91. /* all the rest are no-op */
  92. .macro PTGS_TO_GS
  93. .endm
  94. .macro PTGS_TO_GS_EX
  95. .endm
  96. .macro GS_TO_REG reg
  97. .endm
  98. .macro REG_TO_PTGS reg
  99. .endm
  100. .macro SET_KERNEL_GS reg
  101. .endm
  102. #else /* CONFIG_X86_32_LAZY_GS */
  103. .macro PUSH_GS
  104. pushl %gs
  105. .endm
  106. .macro POP_GS pop=0
  107. 98: popl %gs
  108. .if \pop <> 0
  109. add $\pop, %esp
  110. .endif
  111. .endm
  112. .macro POP_GS_EX
  113. .pushsection .fixup, "ax"
  114. 99: movl $0, (%esp)
  115. jmp 98b
  116. .popsection
  117. _ASM_EXTABLE(98b, 99b)
  118. .endm
  119. .macro PTGS_TO_GS
  120. 98: mov PT_GS(%esp), %gs
  121. .endm
  122. .macro PTGS_TO_GS_EX
  123. .pushsection .fixup, "ax"
  124. 99: movl $0, PT_GS(%esp)
  125. jmp 98b
  126. .popsection
  127. _ASM_EXTABLE(98b, 99b)
  128. .endm
  129. .macro GS_TO_REG reg
  130. movl %gs, \reg
  131. .endm
  132. .macro REG_TO_PTGS reg
  133. movl \reg, PT_GS(%esp)
  134. .endm
  135. .macro SET_KERNEL_GS reg
  136. movl $(__KERNEL_STACK_CANARY), \reg
  137. movl \reg, %gs
  138. .endm
  139. #endif /* CONFIG_X86_32_LAZY_GS */
  140. .macro SAVE_ALL pt_regs_ax=%eax
  141. cld
  142. PUSH_GS
  143. pushl %fs
  144. pushl %es
  145. pushl %ds
  146. pushl \pt_regs_ax
  147. pushl %ebp
  148. pushl %edi
  149. pushl %esi
  150. pushl %edx
  151. pushl %ecx
  152. pushl %ebx
  153. movl $(__USER_DS), %edx
  154. movl %edx, %ds
  155. movl %edx, %es
  156. movl $(__KERNEL_PERCPU), %edx
  157. movl %edx, %fs
  158. SET_KERNEL_GS %edx
  159. .endm
  160. /*
  161. * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
  162. * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
  163. * is just setting the LSB, which makes it an invalid stack address and is also
  164. * a signal to the unwinder that it's a pt_regs pointer in disguise.
  165. *
  166. * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
  167. * original rbp.
  168. */
  169. .macro ENCODE_FRAME_POINTER
  170. #ifdef CONFIG_FRAME_POINTER
  171. mov %esp, %ebp
  172. orl $0x1, %ebp
  173. #endif
  174. .endm
  175. .macro RESTORE_INT_REGS
  176. popl %ebx
  177. popl %ecx
  178. popl %edx
  179. popl %esi
  180. popl %edi
  181. popl %ebp
  182. popl %eax
  183. .endm
  184. .macro RESTORE_REGS pop=0
  185. RESTORE_INT_REGS
  186. 1: popl %ds
  187. 2: popl %es
  188. 3: popl %fs
  189. POP_GS \pop
  190. .pushsection .fixup, "ax"
  191. 4: movl $0, (%esp)
  192. jmp 1b
  193. 5: movl $0, (%esp)
  194. jmp 2b
  195. 6: movl $0, (%esp)
  196. jmp 3b
  197. .popsection
  198. _ASM_EXTABLE(1b, 4b)
  199. _ASM_EXTABLE(2b, 5b)
  200. _ASM_EXTABLE(3b, 6b)
  201. POP_GS_EX
  202. .endm
  203. /*
  204. * %eax: prev task
  205. * %edx: next task
  206. */
  207. ENTRY(__switch_to_asm)
  208. /*
  209. * Save callee-saved registers
  210. * This must match the order in struct inactive_task_frame
  211. */
  212. pushl %ebp
  213. pushl %ebx
  214. pushl %edi
  215. pushl %esi
  216. /* switch stack */
  217. movl %esp, TASK_threadsp(%eax)
  218. movl TASK_threadsp(%edx), %esp
  219. #ifdef CONFIG_CC_STACKPROTECTOR
  220. movl TASK_stack_canary(%edx), %ebx
  221. movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
  222. #endif
  223. /* restore callee-saved registers */
  224. popl %esi
  225. popl %edi
  226. popl %ebx
  227. popl %ebp
  228. jmp __switch_to
  229. END(__switch_to_asm)
  230. /*
  231. * The unwinder expects the last frame on the stack to always be at the same
  232. * offset from the end of the page, which allows it to validate the stack.
  233. * Calling schedule_tail() directly would break that convention because its an
  234. * asmlinkage function so its argument has to be pushed on the stack. This
  235. * wrapper creates a proper "end of stack" frame header before the call.
  236. */
  237. ENTRY(schedule_tail_wrapper)
  238. FRAME_BEGIN
  239. pushl %eax
  240. call schedule_tail
  241. popl %eax
  242. FRAME_END
  243. ret
  244. ENDPROC(schedule_tail_wrapper)
  245. /*
  246. * A newly forked process directly context switches into this address.
  247. *
  248. * eax: prev task we switched from
  249. * ebx: kernel thread func (NULL for user thread)
  250. * edi: kernel thread arg
  251. */
  252. ENTRY(ret_from_fork)
  253. call schedule_tail_wrapper
  254. testl %ebx, %ebx
  255. jnz 1f /* kernel threads are uncommon */
  256. 2:
  257. /* When we fork, we trace the syscall return in the child, too. */
  258. movl %esp, %eax
  259. call syscall_return_slowpath
  260. jmp restore_all
  261. /* kernel thread */
  262. 1: movl %edi, %eax
  263. call *%ebx
  264. /*
  265. * A kernel thread is allowed to return here after successfully
  266. * calling do_execve(). Exit to userspace to complete the execve()
  267. * syscall.
  268. */
  269. movl $0, PT_EAX(%esp)
  270. jmp 2b
  271. END(ret_from_fork)
  272. /*
  273. * Return to user mode is not as complex as all this looks,
  274. * but we want the default path for a system call return to
  275. * go as quickly as possible which is why some of this is
  276. * less clear than it otherwise should be.
  277. */
  278. # userspace resumption stub bypassing syscall exit tracing
  279. ALIGN
  280. ret_from_exception:
  281. preempt_stop(CLBR_ANY)
  282. ret_from_intr:
  283. #ifdef CONFIG_VM86
  284. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  285. movb PT_CS(%esp), %al
  286. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  287. #else
  288. /*
  289. * We can be coming here from child spawned by kernel_thread().
  290. */
  291. movl PT_CS(%esp), %eax
  292. andl $SEGMENT_RPL_MASK, %eax
  293. #endif
  294. cmpl $USER_RPL, %eax
  295. jb resume_kernel # not returning to v8086 or userspace
  296. ENTRY(resume_userspace)
  297. DISABLE_INTERRUPTS(CLBR_ANY)
  298. TRACE_IRQS_OFF
  299. movl %esp, %eax
  300. call prepare_exit_to_usermode
  301. jmp restore_all
  302. END(ret_from_exception)
  303. #ifdef CONFIG_PREEMPT
  304. ENTRY(resume_kernel)
  305. DISABLE_INTERRUPTS(CLBR_ANY)
  306. .Lneed_resched:
  307. cmpl $0, PER_CPU_VAR(__preempt_count)
  308. jnz restore_all
  309. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
  310. jz restore_all
  311. call preempt_schedule_irq
  312. jmp .Lneed_resched
  313. END(resume_kernel)
  314. #endif
  315. GLOBAL(__begin_SYSENTER_singlestep_region)
  316. /*
  317. * All code from here through __end_SYSENTER_singlestep_region is subject
  318. * to being single-stepped if a user program sets TF and executes SYSENTER.
  319. * There is absolutely nothing that we can do to prevent this from happening
  320. * (thanks Intel!). To keep our handling of this situation as simple as
  321. * possible, we handle TF just like AC and NT, except that our #DB handler
  322. * will ignore all of the single-step traps generated in this range.
  323. */
  324. #ifdef CONFIG_XEN
  325. /*
  326. * Xen doesn't set %esp to be precisely what the normal SYSENTER
  327. * entry point expects, so fix it up before using the normal path.
  328. */
  329. ENTRY(xen_sysenter_target)
  330. addl $5*4, %esp /* remove xen-provided frame */
  331. jmp .Lsysenter_past_esp
  332. #endif
  333. /*
  334. * 32-bit SYSENTER entry.
  335. *
  336. * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
  337. * if X86_FEATURE_SEP is available. This is the preferred system call
  338. * entry on 32-bit systems.
  339. *
  340. * The SYSENTER instruction, in principle, should *only* occur in the
  341. * vDSO. In practice, a small number of Android devices were shipped
  342. * with a copy of Bionic that inlined a SYSENTER instruction. This
  343. * never happened in any of Google's Bionic versions -- it only happened
  344. * in a narrow range of Intel-provided versions.
  345. *
  346. * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
  347. * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
  348. * SYSENTER does not save anything on the stack,
  349. * and does not save old EIP (!!!), ESP, or EFLAGS.
  350. *
  351. * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
  352. * user and/or vm86 state), we explicitly disable the SYSENTER
  353. * instruction in vm86 mode by reprogramming the MSRs.
  354. *
  355. * Arguments:
  356. * eax system call number
  357. * ebx arg1
  358. * ecx arg2
  359. * edx arg3
  360. * esi arg4
  361. * edi arg5
  362. * ebp user stack
  363. * 0(%ebp) arg6
  364. */
  365. ENTRY(entry_SYSENTER_32)
  366. movl TSS_sysenter_sp0(%esp), %esp
  367. .Lsysenter_past_esp:
  368. pushl $__USER_DS /* pt_regs->ss */
  369. pushl %ebp /* pt_regs->sp (stashed in bp) */
  370. pushfl /* pt_regs->flags (except IF = 0) */
  371. orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
  372. pushl $__USER_CS /* pt_regs->cs */
  373. pushl $0 /* pt_regs->ip = 0 (placeholder) */
  374. pushl %eax /* pt_regs->orig_ax */
  375. SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
  376. /*
  377. * SYSENTER doesn't filter flags, so we need to clear NT, AC
  378. * and TF ourselves. To save a few cycles, we can check whether
  379. * either was set instead of doing an unconditional popfq.
  380. * This needs to happen before enabling interrupts so that
  381. * we don't get preempted with NT set.
  382. *
  383. * If TF is set, we will single-step all the way to here -- do_debug
  384. * will ignore all the traps. (Yes, this is slow, but so is
  385. * single-stepping in general. This allows us to avoid having
  386. * a more complicated code to handle the case where a user program
  387. * forces us to single-step through the SYSENTER entry code.)
  388. *
  389. * NB.: .Lsysenter_fix_flags is a label with the code under it moved
  390. * out-of-line as an optimization: NT is unlikely to be set in the
  391. * majority of the cases and instead of polluting the I$ unnecessarily,
  392. * we're keeping that code behind a branch which will predict as
  393. * not-taken and therefore its instructions won't be fetched.
  394. */
  395. testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
  396. jnz .Lsysenter_fix_flags
  397. .Lsysenter_flags_fixed:
  398. /*
  399. * User mode is traced as though IRQs are on, and SYSENTER
  400. * turned them off.
  401. */
  402. TRACE_IRQS_OFF
  403. movl %esp, %eax
  404. call do_fast_syscall_32
  405. /* XEN PV guests always use IRET path */
  406. ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
  407. "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
  408. /* Opportunistic SYSEXIT */
  409. TRACE_IRQS_ON /* User mode traces as IRQs on. */
  410. movl PT_EIP(%esp), %edx /* pt_regs->ip */
  411. movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
  412. 1: mov PT_FS(%esp), %fs
  413. PTGS_TO_GS
  414. popl %ebx /* pt_regs->bx */
  415. addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
  416. popl %esi /* pt_regs->si */
  417. popl %edi /* pt_regs->di */
  418. popl %ebp /* pt_regs->bp */
  419. popl %eax /* pt_regs->ax */
  420. /*
  421. * Restore all flags except IF. (We restore IF separately because
  422. * STI gives a one-instruction window in which we won't be interrupted,
  423. * whereas POPF does not.)
  424. */
  425. addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
  426. btr $X86_EFLAGS_IF_BIT, (%esp)
  427. popfl
  428. /*
  429. * Return back to the vDSO, which will pop ecx and edx.
  430. * Don't bother with DS and ES (they already contain __USER_DS).
  431. */
  432. sti
  433. sysexit
  434. .pushsection .fixup, "ax"
  435. 2: movl $0, PT_FS(%esp)
  436. jmp 1b
  437. .popsection
  438. _ASM_EXTABLE(1b, 2b)
  439. PTGS_TO_GS_EX
  440. .Lsysenter_fix_flags:
  441. pushl $X86_EFLAGS_FIXED
  442. popfl
  443. jmp .Lsysenter_flags_fixed
  444. GLOBAL(__end_SYSENTER_singlestep_region)
  445. ENDPROC(entry_SYSENTER_32)
  446. /*
  447. * 32-bit legacy system call entry.
  448. *
  449. * 32-bit x86 Linux system calls traditionally used the INT $0x80
  450. * instruction. INT $0x80 lands here.
  451. *
  452. * This entry point can be used by any 32-bit perform system calls.
  453. * Instances of INT $0x80 can be found inline in various programs and
  454. * libraries. It is also used by the vDSO's __kernel_vsyscall
  455. * fallback for hardware that doesn't support a faster entry method.
  456. * Restarted 32-bit system calls also fall back to INT $0x80
  457. * regardless of what instruction was originally used to do the system
  458. * call. (64-bit programs can use INT $0x80 as well, but they can
  459. * only run on 64-bit kernels and therefore land in
  460. * entry_INT80_compat.)
  461. *
  462. * This is considered a slow path. It is not used by most libc
  463. * implementations on modern hardware except during process startup.
  464. *
  465. * Arguments:
  466. * eax system call number
  467. * ebx arg1
  468. * ecx arg2
  469. * edx arg3
  470. * esi arg4
  471. * edi arg5
  472. * ebp arg6
  473. */
  474. ENTRY(entry_INT80_32)
  475. ASM_CLAC
  476. pushl %eax /* pt_regs->orig_ax */
  477. SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
  478. /*
  479. * User mode is traced as though IRQs are on, and the interrupt gate
  480. * turned them off.
  481. */
  482. TRACE_IRQS_OFF
  483. movl %esp, %eax
  484. call do_int80_syscall_32
  485. .Lsyscall_32_done:
  486. restore_all:
  487. TRACE_IRQS_IRET
  488. .Lrestore_all_notrace:
  489. #ifdef CONFIG_X86_ESPFIX32
  490. ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
  491. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  492. /*
  493. * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  494. * are returning to the kernel.
  495. * See comments in process.c:copy_thread() for details.
  496. */
  497. movb PT_OLDSS(%esp), %ah
  498. movb PT_CS(%esp), %al
  499. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  500. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  501. je .Lldt_ss # returning to user-space with LDT SS
  502. #endif
  503. .Lrestore_nocheck:
  504. RESTORE_REGS 4 # skip orig_eax/error_code
  505. .Lirq_return:
  506. INTERRUPT_RETURN
  507. .section .fixup, "ax"
  508. ENTRY(iret_exc )
  509. pushl $0 # no error code
  510. pushl $do_iret_error
  511. jmp common_exception
  512. .previous
  513. _ASM_EXTABLE(.Lirq_return, iret_exc)
  514. #ifdef CONFIG_X86_ESPFIX32
  515. .Lldt_ss:
  516. /*
  517. * Setup and switch to ESPFIX stack
  518. *
  519. * We're returning to userspace with a 16 bit stack. The CPU will not
  520. * restore the high word of ESP for us on executing iret... This is an
  521. * "official" bug of all the x86-compatible CPUs, which we can work
  522. * around to make dosemu and wine happy. We do this by preloading the
  523. * high word of ESP with the high word of the userspace ESP while
  524. * compensating for the offset by changing to the ESPFIX segment with
  525. * a base address that matches for the difference.
  526. */
  527. #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
  528. mov %esp, %edx /* load kernel esp */
  529. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  530. mov %dx, %ax /* eax: new kernel esp */
  531. sub %eax, %edx /* offset (low word is 0) */
  532. shr $16, %edx
  533. mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
  534. mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
  535. pushl $__ESPFIX_SS
  536. pushl %eax /* new kernel esp */
  537. /*
  538. * Disable interrupts, but do not irqtrace this section: we
  539. * will soon execute iret and the tracer was already set to
  540. * the irqstate after the IRET:
  541. */
  542. DISABLE_INTERRUPTS(CLBR_ANY)
  543. lss (%esp), %esp /* switch to espfix segment */
  544. jmp .Lrestore_nocheck
  545. #endif
  546. ENDPROC(entry_INT80_32)
  547. .macro FIXUP_ESPFIX_STACK
  548. /*
  549. * Switch back for ESPFIX stack to the normal zerobased stack
  550. *
  551. * We can't call C functions using the ESPFIX stack. This code reads
  552. * the high word of the segment base from the GDT and swiches to the
  553. * normal stack and adjusts ESP with the matching offset.
  554. */
  555. #ifdef CONFIG_X86_ESPFIX32
  556. /* fixup the stack */
  557. mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
  558. mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
  559. shl $16, %eax
  560. addl %esp, %eax /* the adjusted stack pointer */
  561. pushl $__KERNEL_DS
  562. pushl %eax
  563. lss (%esp), %esp /* switch to the normal stack segment */
  564. #endif
  565. .endm
  566. .macro UNWIND_ESPFIX_STACK
  567. #ifdef CONFIG_X86_ESPFIX32
  568. movl %ss, %eax
  569. /* see if on espfix stack */
  570. cmpw $__ESPFIX_SS, %ax
  571. jne 27f
  572. movl $__KERNEL_DS, %eax
  573. movl %eax, %ds
  574. movl %eax, %es
  575. /* switch to normal stack */
  576. FIXUP_ESPFIX_STACK
  577. 27:
  578. #endif
  579. .endm
  580. /*
  581. * Build the entry stubs with some assembler magic.
  582. * We pack 1 stub into every 8-byte block.
  583. */
  584. .align 8
  585. ENTRY(irq_entries_start)
  586. vector=FIRST_EXTERNAL_VECTOR
  587. .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
  588. pushl $(~vector+0x80) /* Note: always in signed byte range */
  589. vector=vector+1
  590. jmp common_interrupt
  591. .align 8
  592. .endr
  593. END(irq_entries_start)
  594. /*
  595. * the CPU automatically disables interrupts when executing an IRQ vector,
  596. * so IRQ-flags tracing has to follow that:
  597. */
  598. .p2align CONFIG_X86_L1_CACHE_SHIFT
  599. common_interrupt:
  600. ASM_CLAC
  601. addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
  602. SAVE_ALL
  603. ENCODE_FRAME_POINTER
  604. TRACE_IRQS_OFF
  605. movl %esp, %eax
  606. call do_IRQ
  607. jmp ret_from_intr
  608. ENDPROC(common_interrupt)
  609. #define BUILD_INTERRUPT3(name, nr, fn) \
  610. ENTRY(name) \
  611. ASM_CLAC; \
  612. pushl $~(nr); \
  613. SAVE_ALL; \
  614. ENCODE_FRAME_POINTER; \
  615. TRACE_IRQS_OFF \
  616. movl %esp, %eax; \
  617. call fn; \
  618. jmp ret_from_intr; \
  619. ENDPROC(name)
  620. #ifdef CONFIG_TRACING
  621. # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
  622. #else
  623. # define TRACE_BUILD_INTERRUPT(name, nr)
  624. #endif
  625. #define BUILD_INTERRUPT(name, nr) \
  626. BUILD_INTERRUPT3(name, nr, smp_##name); \
  627. TRACE_BUILD_INTERRUPT(name, nr)
  628. /* The include is where all of the SMP etc. interrupts come from */
  629. #include <asm/entry_arch.h>
  630. ENTRY(coprocessor_error)
  631. ASM_CLAC
  632. pushl $0
  633. pushl $do_coprocessor_error
  634. jmp common_exception
  635. END(coprocessor_error)
  636. ENTRY(simd_coprocessor_error)
  637. ASM_CLAC
  638. pushl $0
  639. #ifdef CONFIG_X86_INVD_BUG
  640. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  641. ALTERNATIVE "pushl $do_general_protection", \
  642. "pushl $do_simd_coprocessor_error", \
  643. X86_FEATURE_XMM
  644. #else
  645. pushl $do_simd_coprocessor_error
  646. #endif
  647. jmp common_exception
  648. END(simd_coprocessor_error)
  649. ENTRY(device_not_available)
  650. ASM_CLAC
  651. pushl $-1 # mark this as an int
  652. pushl $do_device_not_available
  653. jmp common_exception
  654. END(device_not_available)
  655. #ifdef CONFIG_PARAVIRT
  656. ENTRY(native_iret)
  657. iret
  658. _ASM_EXTABLE(native_iret, iret_exc)
  659. END(native_iret)
  660. #endif
  661. ENTRY(overflow)
  662. ASM_CLAC
  663. pushl $0
  664. pushl $do_overflow
  665. jmp common_exception
  666. END(overflow)
  667. ENTRY(bounds)
  668. ASM_CLAC
  669. pushl $0
  670. pushl $do_bounds
  671. jmp common_exception
  672. END(bounds)
  673. ENTRY(invalid_op)
  674. ASM_CLAC
  675. pushl $0
  676. pushl $do_invalid_op
  677. jmp common_exception
  678. END(invalid_op)
  679. ENTRY(coprocessor_segment_overrun)
  680. ASM_CLAC
  681. pushl $0
  682. pushl $do_coprocessor_segment_overrun
  683. jmp common_exception
  684. END(coprocessor_segment_overrun)
  685. ENTRY(invalid_TSS)
  686. ASM_CLAC
  687. pushl $do_invalid_TSS
  688. jmp common_exception
  689. END(invalid_TSS)
  690. ENTRY(segment_not_present)
  691. ASM_CLAC
  692. pushl $do_segment_not_present
  693. jmp common_exception
  694. END(segment_not_present)
  695. ENTRY(stack_segment)
  696. ASM_CLAC
  697. pushl $do_stack_segment
  698. jmp common_exception
  699. END(stack_segment)
  700. ENTRY(alignment_check)
  701. ASM_CLAC
  702. pushl $do_alignment_check
  703. jmp common_exception
  704. END(alignment_check)
  705. ENTRY(divide_error)
  706. ASM_CLAC
  707. pushl $0 # no error code
  708. pushl $do_divide_error
  709. jmp common_exception
  710. END(divide_error)
  711. #ifdef CONFIG_X86_MCE
  712. ENTRY(machine_check)
  713. ASM_CLAC
  714. pushl $0
  715. pushl machine_check_vector
  716. jmp common_exception
  717. END(machine_check)
  718. #endif
  719. ENTRY(spurious_interrupt_bug)
  720. ASM_CLAC
  721. pushl $0
  722. pushl $do_spurious_interrupt_bug
  723. jmp common_exception
  724. END(spurious_interrupt_bug)
  725. #ifdef CONFIG_XEN
  726. ENTRY(xen_hypervisor_callback)
  727. pushl $-1 /* orig_ax = -1 => not a system call */
  728. SAVE_ALL
  729. ENCODE_FRAME_POINTER
  730. TRACE_IRQS_OFF
  731. /*
  732. * Check to see if we got the event in the critical
  733. * region in xen_iret_direct, after we've reenabled
  734. * events and checked for pending events. This simulates
  735. * iret instruction's behaviour where it delivers a
  736. * pending interrupt when enabling interrupts:
  737. */
  738. movl PT_EIP(%esp), %eax
  739. cmpl $xen_iret_start_crit, %eax
  740. jb 1f
  741. cmpl $xen_iret_end_crit, %eax
  742. jae 1f
  743. jmp xen_iret_crit_fixup
  744. ENTRY(xen_do_upcall)
  745. 1: mov %esp, %eax
  746. call xen_evtchn_do_upcall
  747. #ifndef CONFIG_PREEMPT
  748. call xen_maybe_preempt_hcall
  749. #endif
  750. jmp ret_from_intr
  751. ENDPROC(xen_hypervisor_callback)
  752. /*
  753. * Hypervisor uses this for application faults while it executes.
  754. * We get here for two reasons:
  755. * 1. Fault while reloading DS, ES, FS or GS
  756. * 2. Fault while executing IRET
  757. * Category 1 we fix up by reattempting the load, and zeroing the segment
  758. * register if the load fails.
  759. * Category 2 we fix up by jumping to do_iret_error. We cannot use the
  760. * normal Linux return path in this case because if we use the IRET hypercall
  761. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  762. * We distinguish between categories by maintaining a status value in EAX.
  763. */
  764. ENTRY(xen_failsafe_callback)
  765. pushl %eax
  766. movl $1, %eax
  767. 1: mov 4(%esp), %ds
  768. 2: mov 8(%esp), %es
  769. 3: mov 12(%esp), %fs
  770. 4: mov 16(%esp), %gs
  771. /* EAX == 0 => Category 1 (Bad segment)
  772. EAX != 0 => Category 2 (Bad IRET) */
  773. testl %eax, %eax
  774. popl %eax
  775. lea 16(%esp), %esp
  776. jz 5f
  777. jmp iret_exc
  778. 5: pushl $-1 /* orig_ax = -1 => not a system call */
  779. SAVE_ALL
  780. ENCODE_FRAME_POINTER
  781. jmp ret_from_exception
  782. .section .fixup, "ax"
  783. 6: xorl %eax, %eax
  784. movl %eax, 4(%esp)
  785. jmp 1b
  786. 7: xorl %eax, %eax
  787. movl %eax, 8(%esp)
  788. jmp 2b
  789. 8: xorl %eax, %eax
  790. movl %eax, 12(%esp)
  791. jmp 3b
  792. 9: xorl %eax, %eax
  793. movl %eax, 16(%esp)
  794. jmp 4b
  795. .previous
  796. _ASM_EXTABLE(1b, 6b)
  797. _ASM_EXTABLE(2b, 7b)
  798. _ASM_EXTABLE(3b, 8b)
  799. _ASM_EXTABLE(4b, 9b)
  800. ENDPROC(xen_failsafe_callback)
  801. BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  802. xen_evtchn_do_upcall)
  803. #endif /* CONFIG_XEN */
  804. #if IS_ENABLED(CONFIG_HYPERV)
  805. BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  806. hyperv_vector_handler)
  807. #endif /* CONFIG_HYPERV */
  808. #ifdef CONFIG_TRACING
  809. ENTRY(trace_page_fault)
  810. ASM_CLAC
  811. pushl $trace_do_page_fault
  812. jmp common_exception
  813. END(trace_page_fault)
  814. #endif
  815. ENTRY(page_fault)
  816. ASM_CLAC
  817. pushl $do_page_fault
  818. ALIGN
  819. jmp common_exception
  820. END(page_fault)
  821. common_exception:
  822. /* the function address is in %gs's slot on the stack */
  823. pushl %fs
  824. pushl %es
  825. pushl %ds
  826. pushl %eax
  827. pushl %ebp
  828. pushl %edi
  829. pushl %esi
  830. pushl %edx
  831. pushl %ecx
  832. pushl %ebx
  833. ENCODE_FRAME_POINTER
  834. cld
  835. movl $(__KERNEL_PERCPU), %ecx
  836. movl %ecx, %fs
  837. UNWIND_ESPFIX_STACK
  838. GS_TO_REG %ecx
  839. movl PT_GS(%esp), %edi # get the function address
  840. movl PT_ORIG_EAX(%esp), %edx # get the error code
  841. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  842. REG_TO_PTGS %ecx
  843. SET_KERNEL_GS %ecx
  844. movl $(__USER_DS), %ecx
  845. movl %ecx, %ds
  846. movl %ecx, %es
  847. TRACE_IRQS_OFF
  848. movl %esp, %eax # pt_regs pointer
  849. call *%edi
  850. jmp ret_from_exception
  851. END(common_exception)
  852. ENTRY(debug)
  853. /*
  854. * #DB can happen at the first instruction of
  855. * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
  856. * happens, then we will be running on a very small stack. We
  857. * need to detect this condition and switch to the thread
  858. * stack before calling any C code at all.
  859. *
  860. * If you edit this code, keep in mind that NMIs can happen in here.
  861. */
  862. ASM_CLAC
  863. pushl $-1 # mark this as an int
  864. SAVE_ALL
  865. ENCODE_FRAME_POINTER
  866. xorl %edx, %edx # error code 0
  867. movl %esp, %eax # pt_regs pointer
  868. /* Are we currently on the SYSENTER stack? */
  869. PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
  870. subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
  871. cmpl $SIZEOF_SYSENTER_stack, %ecx
  872. jb .Ldebug_from_sysenter_stack
  873. TRACE_IRQS_OFF
  874. call do_debug
  875. jmp ret_from_exception
  876. .Ldebug_from_sysenter_stack:
  877. /* We're on the SYSENTER stack. Switch off. */
  878. movl %esp, %ebx
  879. movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
  880. TRACE_IRQS_OFF
  881. call do_debug
  882. movl %ebx, %esp
  883. jmp ret_from_exception
  884. END(debug)
  885. /*
  886. * NMI is doubly nasty. It can happen on the first instruction of
  887. * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
  888. * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
  889. * switched stacks. We handle both conditions by simply checking whether we
  890. * interrupted kernel code running on the SYSENTER stack.
  891. */
  892. ENTRY(nmi)
  893. ASM_CLAC
  894. #ifdef CONFIG_X86_ESPFIX32
  895. pushl %eax
  896. movl %ss, %eax
  897. cmpw $__ESPFIX_SS, %ax
  898. popl %eax
  899. je .Lnmi_espfix_stack
  900. #endif
  901. pushl %eax # pt_regs->orig_ax
  902. SAVE_ALL
  903. ENCODE_FRAME_POINTER
  904. xorl %edx, %edx # zero error code
  905. movl %esp, %eax # pt_regs pointer
  906. /* Are we currently on the SYSENTER stack? */
  907. PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
  908. subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
  909. cmpl $SIZEOF_SYSENTER_stack, %ecx
  910. jb .Lnmi_from_sysenter_stack
  911. /* Not on SYSENTER stack. */
  912. call do_nmi
  913. jmp .Lrestore_all_notrace
  914. .Lnmi_from_sysenter_stack:
  915. /*
  916. * We're on the SYSENTER stack. Switch off. No one (not even debug)
  917. * is using the thread stack right now, so it's safe for us to use it.
  918. */
  919. movl %esp, %ebx
  920. movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
  921. call do_nmi
  922. movl %ebx, %esp
  923. jmp .Lrestore_all_notrace
  924. #ifdef CONFIG_X86_ESPFIX32
  925. .Lnmi_espfix_stack:
  926. /*
  927. * create the pointer to lss back
  928. */
  929. pushl %ss
  930. pushl %esp
  931. addl $4, (%esp)
  932. /* copy the iret frame of 12 bytes */
  933. .rept 3
  934. pushl 16(%esp)
  935. .endr
  936. pushl %eax
  937. SAVE_ALL
  938. ENCODE_FRAME_POINTER
  939. FIXUP_ESPFIX_STACK # %eax == %esp
  940. xorl %edx, %edx # zero error code
  941. call do_nmi
  942. RESTORE_REGS
  943. lss 12+4(%esp), %esp # back to espfix stack
  944. jmp .Lirq_return
  945. #endif
  946. END(nmi)
  947. ENTRY(int3)
  948. ASM_CLAC
  949. pushl $-1 # mark this as an int
  950. SAVE_ALL
  951. ENCODE_FRAME_POINTER
  952. TRACE_IRQS_OFF
  953. xorl %edx, %edx # zero error code
  954. movl %esp, %eax # pt_regs pointer
  955. call do_int3
  956. jmp ret_from_exception
  957. END(int3)
  958. ENTRY(general_protection)
  959. pushl $do_general_protection
  960. jmp common_exception
  961. END(general_protection)
  962. #ifdef CONFIG_KVM_GUEST
  963. ENTRY(async_page_fault)
  964. ASM_CLAC
  965. pushl $do_async_page_fault
  966. jmp common_exception
  967. END(async_page_fault)
  968. #endif
  969. ENTRY(rewind_stack_do_exit)
  970. /* Prevent any naive code from trying to unwind to our caller. */
  971. xorl %ebp, %ebp
  972. movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
  973. leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
  974. call do_exit
  975. 1: jmp 1b
  976. END(rewind_stack_do_exit)