entry_32.S 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077
  1. /*
  2. * Copyright (C) 1991,1992 Linus Torvalds
  3. *
  4. * entry_32.S contains the system-call and low-level fault and trap handling routines.
  5. *
  6. * Stack layout while running C code:
  7. * ptrace needs to have all registers on the stack.
  8. * If the order here is changed, it needs to be
  9. * updated in fork.c:copy_process(), signal.c:do_signal(),
  10. * ptrace.c and ptrace.h
  11. *
  12. * 0(%esp) - %ebx
  13. * 4(%esp) - %ecx
  14. * 8(%esp) - %edx
  15. * C(%esp) - %esi
  16. * 10(%esp) - %edi
  17. * 14(%esp) - %ebp
  18. * 18(%esp) - %eax
  19. * 1C(%esp) - %ds
  20. * 20(%esp) - %es
  21. * 24(%esp) - %fs
  22. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  23. * 2C(%esp) - orig_eax
  24. * 30(%esp) - %eip
  25. * 34(%esp) - %cs
  26. * 38(%esp) - %eflags
  27. * 3C(%esp) - %oldesp
  28. * 40(%esp) - %oldss
  29. */
  30. #include <linux/linkage.h>
  31. #include <linux/err.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/irqflags.h>
  34. #include <asm/errno.h>
  35. #include <asm/segment.h>
  36. #include <asm/smp.h>
  37. #include <asm/page_types.h>
  38. #include <asm/percpu.h>
  39. #include <asm/processor-flags.h>
  40. #include <asm/ftrace.h>
  41. #include <asm/irq_vectors.h>
  42. #include <asm/cpufeature.h>
  43. #include <asm/alternative-asm.h>
  44. #include <asm/asm.h>
  45. #include <asm/smap.h>
  46. .section .entry.text, "ax"
  47. /*
  48. * We use macros for low-level operations which need to be overridden
  49. * for paravirtualization. The following will never clobber any registers:
  50. * INTERRUPT_RETURN (aka. "iret")
  51. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  52. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  53. *
  54. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  55. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  56. * Allowing a register to be clobbered can shrink the paravirt replacement
  57. * enough to patch inline, increasing performance.
  58. */
  59. #ifdef CONFIG_PREEMPT
  60. # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  61. #else
  62. # define preempt_stop(clobbers)
  63. # define resume_kernel restore_all
  64. #endif
  65. .macro TRACE_IRQS_IRET
  66. #ifdef CONFIG_TRACE_IRQFLAGS
  67. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
  68. jz 1f
  69. TRACE_IRQS_ON
  70. 1:
  71. #endif
  72. .endm
  73. /*
  74. * User gs save/restore
  75. *
  76. * %gs is used for userland TLS and kernel only uses it for stack
  77. * canary which is required to be at %gs:20 by gcc. Read the comment
  78. * at the top of stackprotector.h for more info.
  79. *
  80. * Local labels 98 and 99 are used.
  81. */
  82. #ifdef CONFIG_X86_32_LAZY_GS
  83. /* unfortunately push/pop can't be no-op */
  84. .macro PUSH_GS
  85. pushl $0
  86. .endm
  87. .macro POP_GS pop=0
  88. addl $(4 + \pop), %esp
  89. .endm
  90. .macro POP_GS_EX
  91. .endm
  92. /* all the rest are no-op */
  93. .macro PTGS_TO_GS
  94. .endm
  95. .macro PTGS_TO_GS_EX
  96. .endm
  97. .macro GS_TO_REG reg
  98. .endm
  99. .macro REG_TO_PTGS reg
  100. .endm
  101. .macro SET_KERNEL_GS reg
  102. .endm
  103. #else /* CONFIG_X86_32_LAZY_GS */
  104. .macro PUSH_GS
  105. pushl %gs
  106. .endm
  107. .macro POP_GS pop=0
  108. 98: popl %gs
  109. .if \pop <> 0
  110. add $\pop, %esp
  111. .endif
  112. .endm
  113. .macro POP_GS_EX
  114. .pushsection .fixup, "ax"
  115. 99: movl $0, (%esp)
  116. jmp 98b
  117. .popsection
  118. _ASM_EXTABLE(98b, 99b)
  119. .endm
  120. .macro PTGS_TO_GS
  121. 98: mov PT_GS(%esp), %gs
  122. .endm
  123. .macro PTGS_TO_GS_EX
  124. .pushsection .fixup, "ax"
  125. 99: movl $0, PT_GS(%esp)
  126. jmp 98b
  127. .popsection
  128. _ASM_EXTABLE(98b, 99b)
  129. .endm
  130. .macro GS_TO_REG reg
  131. movl %gs, \reg
  132. .endm
  133. .macro REG_TO_PTGS reg
  134. movl \reg, PT_GS(%esp)
  135. .endm
  136. .macro SET_KERNEL_GS reg
  137. movl $(__KERNEL_STACK_CANARY), \reg
  138. movl \reg, %gs
  139. .endm
  140. #endif /* CONFIG_X86_32_LAZY_GS */
  141. .macro SAVE_ALL pt_regs_ax=%eax
  142. cld
  143. PUSH_GS
  144. pushl %fs
  145. pushl %es
  146. pushl %ds
  147. pushl \pt_regs_ax
  148. pushl %ebp
  149. pushl %edi
  150. pushl %esi
  151. pushl %edx
  152. pushl %ecx
  153. pushl %ebx
  154. movl $(__USER_DS), %edx
  155. movl %edx, %ds
  156. movl %edx, %es
  157. movl $(__KERNEL_PERCPU), %edx
  158. movl %edx, %fs
  159. SET_KERNEL_GS %edx
  160. .endm
  161. .macro RESTORE_INT_REGS
  162. popl %ebx
  163. popl %ecx
  164. popl %edx
  165. popl %esi
  166. popl %edi
  167. popl %ebp
  168. popl %eax
  169. .endm
  170. .macro RESTORE_REGS pop=0
  171. RESTORE_INT_REGS
  172. 1: popl %ds
  173. 2: popl %es
  174. 3: popl %fs
  175. POP_GS \pop
  176. .pushsection .fixup, "ax"
  177. 4: movl $0, (%esp)
  178. jmp 1b
  179. 5: movl $0, (%esp)
  180. jmp 2b
  181. 6: movl $0, (%esp)
  182. jmp 3b
  183. .popsection
  184. _ASM_EXTABLE(1b, 4b)
  185. _ASM_EXTABLE(2b, 5b)
  186. _ASM_EXTABLE(3b, 6b)
  187. POP_GS_EX
  188. .endm
  189. ENTRY(ret_from_fork)
  190. pushl %eax
  191. call schedule_tail
  192. GET_THREAD_INFO(%ebp)
  193. popl %eax
  194. pushl $0x0202 # Reset kernel eflags
  195. popfl
  196. /* When we fork, we trace the syscall return in the child, too. */
  197. movl %esp, %eax
  198. call syscall_return_slowpath
  199. jmp restore_all
  200. END(ret_from_fork)
  201. ENTRY(ret_from_kernel_thread)
  202. pushl %eax
  203. call schedule_tail
  204. GET_THREAD_INFO(%ebp)
  205. popl %eax
  206. pushl $0x0202 # Reset kernel eflags
  207. popfl
  208. movl PT_EBP(%esp), %eax
  209. call *PT_EBX(%esp)
  210. movl $0, PT_EAX(%esp)
  211. /*
  212. * Kernel threads return to userspace as if returning from a syscall.
  213. * We should check whether anything actually uses this path and, if so,
  214. * consider switching it over to ret_from_fork.
  215. */
  216. movl %esp, %eax
  217. call syscall_return_slowpath
  218. jmp restore_all
  219. ENDPROC(ret_from_kernel_thread)
  220. /*
  221. * Return to user mode is not as complex as all this looks,
  222. * but we want the default path for a system call return to
  223. * go as quickly as possible which is why some of this is
  224. * less clear than it otherwise should be.
  225. */
  226. # userspace resumption stub bypassing syscall exit tracing
  227. ALIGN
  228. ret_from_exception:
  229. preempt_stop(CLBR_ANY)
  230. ret_from_intr:
  231. GET_THREAD_INFO(%ebp)
  232. #ifdef CONFIG_VM86
  233. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  234. movb PT_CS(%esp), %al
  235. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  236. #else
  237. /*
  238. * We can be coming here from child spawned by kernel_thread().
  239. */
  240. movl PT_CS(%esp), %eax
  241. andl $SEGMENT_RPL_MASK, %eax
  242. #endif
  243. cmpl $USER_RPL, %eax
  244. jb resume_kernel # not returning to v8086 or userspace
  245. ENTRY(resume_userspace)
  246. DISABLE_INTERRUPTS(CLBR_ANY)
  247. TRACE_IRQS_OFF
  248. movl %esp, %eax
  249. call prepare_exit_to_usermode
  250. jmp restore_all
  251. END(ret_from_exception)
  252. #ifdef CONFIG_PREEMPT
  253. ENTRY(resume_kernel)
  254. DISABLE_INTERRUPTS(CLBR_ANY)
  255. need_resched:
  256. cmpl $0, PER_CPU_VAR(__preempt_count)
  257. jnz restore_all
  258. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
  259. jz restore_all
  260. call preempt_schedule_irq
  261. jmp need_resched
  262. END(resume_kernel)
  263. #endif
  264. # SYSENTER call handler stub
  265. ENTRY(entry_SYSENTER_32)
  266. movl TSS_sysenter_sp0(%esp), %esp
  267. sysenter_past_esp:
  268. pushl $__USER_DS /* pt_regs->ss */
  269. pushl %ebp /* pt_regs->sp (stashed in bp) */
  270. pushfl /* pt_regs->flags (except IF = 0) */
  271. orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
  272. pushl $__USER_CS /* pt_regs->cs */
  273. pushl $0 /* pt_regs->ip = 0 (placeholder) */
  274. pushl %eax /* pt_regs->orig_ax */
  275. SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
  276. /*
  277. * User mode is traced as though IRQs are on, and SYSENTER
  278. * turned them off.
  279. */
  280. TRACE_IRQS_OFF
  281. movl %esp, %eax
  282. call do_fast_syscall_32
  283. /* XEN PV guests always use IRET path */
  284. ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
  285. "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
  286. /* Opportunistic SYSEXIT */
  287. TRACE_IRQS_ON /* User mode traces as IRQs on. */
  288. movl PT_EIP(%esp), %edx /* pt_regs->ip */
  289. movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
  290. 1: mov PT_FS(%esp), %fs
  291. PTGS_TO_GS
  292. popl %ebx /* pt_regs->bx */
  293. addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
  294. popl %esi /* pt_regs->si */
  295. popl %edi /* pt_regs->di */
  296. popl %ebp /* pt_regs->bp */
  297. popl %eax /* pt_regs->ax */
  298. /*
  299. * Return back to the vDSO, which will pop ecx and edx.
  300. * Don't bother with DS and ES (they already contain __USER_DS).
  301. */
  302. sti
  303. sysexit
  304. .pushsection .fixup, "ax"
  305. 2: movl $0, PT_FS(%esp)
  306. jmp 1b
  307. .popsection
  308. _ASM_EXTABLE(1b, 2b)
  309. PTGS_TO_GS_EX
  310. ENDPROC(entry_SYSENTER_32)
  311. # system call handler stub
  312. ENTRY(entry_INT80_32)
  313. ASM_CLAC
  314. pushl %eax /* pt_regs->orig_ax */
  315. SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
  316. /*
  317. * User mode is traced as though IRQs are on. Unlike the 64-bit
  318. * case, INT80 is a trap gate on 32-bit kernels, so interrupts
  319. * are already on (unless user code is messing around with iopl).
  320. */
  321. movl %esp, %eax
  322. call do_syscall_32_irqs_on
  323. .Lsyscall_32_done:
  324. restore_all:
  325. TRACE_IRQS_IRET
  326. restore_all_notrace:
  327. #ifdef CONFIG_X86_ESPFIX32
  328. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  329. /*
  330. * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  331. * are returning to the kernel.
  332. * See comments in process.c:copy_thread() for details.
  333. */
  334. movb PT_OLDSS(%esp), %ah
  335. movb PT_CS(%esp), %al
  336. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  337. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  338. je ldt_ss # returning to user-space with LDT SS
  339. #endif
  340. restore_nocheck:
  341. RESTORE_REGS 4 # skip orig_eax/error_code
  342. irq_return:
  343. INTERRUPT_RETURN
  344. .section .fixup, "ax"
  345. ENTRY(iret_exc )
  346. pushl $0 # no error code
  347. pushl $do_iret_error
  348. jmp error_code
  349. .previous
  350. _ASM_EXTABLE(irq_return, iret_exc)
  351. #ifdef CONFIG_X86_ESPFIX32
  352. ldt_ss:
  353. #ifdef CONFIG_PARAVIRT
  354. /*
  355. * The kernel can't run on a non-flat stack if paravirt mode
  356. * is active. Rather than try to fixup the high bits of
  357. * ESP, bypass this code entirely. This may break DOSemu
  358. * and/or Wine support in a paravirt VM, although the option
  359. * is still available to implement the setting of the high
  360. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  361. */
  362. cmpl $0, pv_info+PARAVIRT_enabled
  363. jne restore_nocheck
  364. #endif
  365. /*
  366. * Setup and switch to ESPFIX stack
  367. *
  368. * We're returning to userspace with a 16 bit stack. The CPU will not
  369. * restore the high word of ESP for us on executing iret... This is an
  370. * "official" bug of all the x86-compatible CPUs, which we can work
  371. * around to make dosemu and wine happy. We do this by preloading the
  372. * high word of ESP with the high word of the userspace ESP while
  373. * compensating for the offset by changing to the ESPFIX segment with
  374. * a base address that matches for the difference.
  375. */
  376. #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
  377. mov %esp, %edx /* load kernel esp */
  378. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  379. mov %dx, %ax /* eax: new kernel esp */
  380. sub %eax, %edx /* offset (low word is 0) */
  381. shr $16, %edx
  382. mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
  383. mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
  384. pushl $__ESPFIX_SS
  385. pushl %eax /* new kernel esp */
  386. /*
  387. * Disable interrupts, but do not irqtrace this section: we
  388. * will soon execute iret and the tracer was already set to
  389. * the irqstate after the IRET:
  390. */
  391. DISABLE_INTERRUPTS(CLBR_EAX)
  392. lss (%esp), %esp /* switch to espfix segment */
  393. jmp restore_nocheck
  394. #endif
  395. ENDPROC(entry_INT80_32)
  396. .macro FIXUP_ESPFIX_STACK
  397. /*
  398. * Switch back for ESPFIX stack to the normal zerobased stack
  399. *
  400. * We can't call C functions using the ESPFIX stack. This code reads
  401. * the high word of the segment base from the GDT and swiches to the
  402. * normal stack and adjusts ESP with the matching offset.
  403. */
  404. #ifdef CONFIG_X86_ESPFIX32
  405. /* fixup the stack */
  406. mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
  407. mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
  408. shl $16, %eax
  409. addl %esp, %eax /* the adjusted stack pointer */
  410. pushl $__KERNEL_DS
  411. pushl %eax
  412. lss (%esp), %esp /* switch to the normal stack segment */
  413. #endif
  414. .endm
  415. .macro UNWIND_ESPFIX_STACK
  416. #ifdef CONFIG_X86_ESPFIX32
  417. movl %ss, %eax
  418. /* see if on espfix stack */
  419. cmpw $__ESPFIX_SS, %ax
  420. jne 27f
  421. movl $__KERNEL_DS, %eax
  422. movl %eax, %ds
  423. movl %eax, %es
  424. /* switch to normal stack */
  425. FIXUP_ESPFIX_STACK
  426. 27:
  427. #endif
  428. .endm
  429. /*
  430. * Build the entry stubs with some assembler magic.
  431. * We pack 1 stub into every 8-byte block.
  432. */
  433. .align 8
  434. ENTRY(irq_entries_start)
  435. vector=FIRST_EXTERNAL_VECTOR
  436. .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
  437. pushl $(~vector+0x80) /* Note: always in signed byte range */
  438. vector=vector+1
  439. jmp common_interrupt
  440. .align 8
  441. .endr
  442. END(irq_entries_start)
  443. /*
  444. * the CPU automatically disables interrupts when executing an IRQ vector,
  445. * so IRQ-flags tracing has to follow that:
  446. */
  447. .p2align CONFIG_X86_L1_CACHE_SHIFT
  448. common_interrupt:
  449. ASM_CLAC
  450. addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
  451. SAVE_ALL
  452. TRACE_IRQS_OFF
  453. movl %esp, %eax
  454. call do_IRQ
  455. jmp ret_from_intr
  456. ENDPROC(common_interrupt)
  457. #define BUILD_INTERRUPT3(name, nr, fn) \
  458. ENTRY(name) \
  459. ASM_CLAC; \
  460. pushl $~(nr); \
  461. SAVE_ALL; \
  462. TRACE_IRQS_OFF \
  463. movl %esp, %eax; \
  464. call fn; \
  465. jmp ret_from_intr; \
  466. ENDPROC(name)
  467. #ifdef CONFIG_TRACING
  468. # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
  469. #else
  470. # define TRACE_BUILD_INTERRUPT(name, nr)
  471. #endif
  472. #define BUILD_INTERRUPT(name, nr) \
  473. BUILD_INTERRUPT3(name, nr, smp_##name); \
  474. TRACE_BUILD_INTERRUPT(name, nr)
  475. /* The include is where all of the SMP etc. interrupts come from */
  476. #include <asm/entry_arch.h>
  477. ENTRY(coprocessor_error)
  478. ASM_CLAC
  479. pushl $0
  480. pushl $do_coprocessor_error
  481. jmp error_code
  482. END(coprocessor_error)
  483. ENTRY(simd_coprocessor_error)
  484. ASM_CLAC
  485. pushl $0
  486. #ifdef CONFIG_X86_INVD_BUG
  487. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  488. ALTERNATIVE "pushl $do_general_protection", \
  489. "pushl $do_simd_coprocessor_error", \
  490. X86_FEATURE_XMM
  491. #else
  492. pushl $do_simd_coprocessor_error
  493. #endif
  494. jmp error_code
  495. END(simd_coprocessor_error)
  496. ENTRY(device_not_available)
  497. ASM_CLAC
  498. pushl $-1 # mark this as an int
  499. pushl $do_device_not_available
  500. jmp error_code
  501. END(device_not_available)
  502. #ifdef CONFIG_PARAVIRT
  503. ENTRY(native_iret)
  504. iret
  505. _ASM_EXTABLE(native_iret, iret_exc)
  506. END(native_iret)
  507. #endif
  508. ENTRY(overflow)
  509. ASM_CLAC
  510. pushl $0
  511. pushl $do_overflow
  512. jmp error_code
  513. END(overflow)
  514. ENTRY(bounds)
  515. ASM_CLAC
  516. pushl $0
  517. pushl $do_bounds
  518. jmp error_code
  519. END(bounds)
  520. ENTRY(invalid_op)
  521. ASM_CLAC
  522. pushl $0
  523. pushl $do_invalid_op
  524. jmp error_code
  525. END(invalid_op)
  526. ENTRY(coprocessor_segment_overrun)
  527. ASM_CLAC
  528. pushl $0
  529. pushl $do_coprocessor_segment_overrun
  530. jmp error_code
  531. END(coprocessor_segment_overrun)
  532. ENTRY(invalid_TSS)
  533. ASM_CLAC
  534. pushl $do_invalid_TSS
  535. jmp error_code
  536. END(invalid_TSS)
  537. ENTRY(segment_not_present)
  538. ASM_CLAC
  539. pushl $do_segment_not_present
  540. jmp error_code
  541. END(segment_not_present)
  542. ENTRY(stack_segment)
  543. ASM_CLAC
  544. pushl $do_stack_segment
  545. jmp error_code
  546. END(stack_segment)
  547. ENTRY(alignment_check)
  548. ASM_CLAC
  549. pushl $do_alignment_check
  550. jmp error_code
  551. END(alignment_check)
  552. ENTRY(divide_error)
  553. ASM_CLAC
  554. pushl $0 # no error code
  555. pushl $do_divide_error
  556. jmp error_code
  557. END(divide_error)
  558. #ifdef CONFIG_X86_MCE
  559. ENTRY(machine_check)
  560. ASM_CLAC
  561. pushl $0
  562. pushl machine_check_vector
  563. jmp error_code
  564. END(machine_check)
  565. #endif
  566. ENTRY(spurious_interrupt_bug)
  567. ASM_CLAC
  568. pushl $0
  569. pushl $do_spurious_interrupt_bug
  570. jmp error_code
  571. END(spurious_interrupt_bug)
  572. #ifdef CONFIG_XEN
  573. /*
  574. * Xen doesn't set %esp to be precisely what the normal SYSENTER
  575. * entry point expects, so fix it up before using the normal path.
  576. */
  577. ENTRY(xen_sysenter_target)
  578. addl $5*4, %esp /* remove xen-provided frame */
  579. jmp sysenter_past_esp
  580. ENTRY(xen_hypervisor_callback)
  581. pushl $-1 /* orig_ax = -1 => not a system call */
  582. SAVE_ALL
  583. TRACE_IRQS_OFF
  584. /*
  585. * Check to see if we got the event in the critical
  586. * region in xen_iret_direct, after we've reenabled
  587. * events and checked for pending events. This simulates
  588. * iret instruction's behaviour where it delivers a
  589. * pending interrupt when enabling interrupts:
  590. */
  591. movl PT_EIP(%esp), %eax
  592. cmpl $xen_iret_start_crit, %eax
  593. jb 1f
  594. cmpl $xen_iret_end_crit, %eax
  595. jae 1f
  596. jmp xen_iret_crit_fixup
  597. ENTRY(xen_do_upcall)
  598. 1: mov %esp, %eax
  599. call xen_evtchn_do_upcall
  600. #ifndef CONFIG_PREEMPT
  601. call xen_maybe_preempt_hcall
  602. #endif
  603. jmp ret_from_intr
  604. ENDPROC(xen_hypervisor_callback)
  605. /*
  606. * Hypervisor uses this for application faults while it executes.
  607. * We get here for two reasons:
  608. * 1. Fault while reloading DS, ES, FS or GS
  609. * 2. Fault while executing IRET
  610. * Category 1 we fix up by reattempting the load, and zeroing the segment
  611. * register if the load fails.
  612. * Category 2 we fix up by jumping to do_iret_error. We cannot use the
  613. * normal Linux return path in this case because if we use the IRET hypercall
  614. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  615. * We distinguish between categories by maintaining a status value in EAX.
  616. */
  617. ENTRY(xen_failsafe_callback)
  618. pushl %eax
  619. movl $1, %eax
  620. 1: mov 4(%esp), %ds
  621. 2: mov 8(%esp), %es
  622. 3: mov 12(%esp), %fs
  623. 4: mov 16(%esp), %gs
  624. /* EAX == 0 => Category 1 (Bad segment)
  625. EAX != 0 => Category 2 (Bad IRET) */
  626. testl %eax, %eax
  627. popl %eax
  628. lea 16(%esp), %esp
  629. jz 5f
  630. jmp iret_exc
  631. 5: pushl $-1 /* orig_ax = -1 => not a system call */
  632. SAVE_ALL
  633. jmp ret_from_exception
  634. .section .fixup, "ax"
  635. 6: xorl %eax, %eax
  636. movl %eax, 4(%esp)
  637. jmp 1b
  638. 7: xorl %eax, %eax
  639. movl %eax, 8(%esp)
  640. jmp 2b
  641. 8: xorl %eax, %eax
  642. movl %eax, 12(%esp)
  643. jmp 3b
  644. 9: xorl %eax, %eax
  645. movl %eax, 16(%esp)
  646. jmp 4b
  647. .previous
  648. _ASM_EXTABLE(1b, 6b)
  649. _ASM_EXTABLE(2b, 7b)
  650. _ASM_EXTABLE(3b, 8b)
  651. _ASM_EXTABLE(4b, 9b)
  652. ENDPROC(xen_failsafe_callback)
  653. BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  654. xen_evtchn_do_upcall)
  655. #endif /* CONFIG_XEN */
  656. #if IS_ENABLED(CONFIG_HYPERV)
  657. BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  658. hyperv_vector_handler)
  659. #endif /* CONFIG_HYPERV */
  660. #ifdef CONFIG_FUNCTION_TRACER
  661. #ifdef CONFIG_DYNAMIC_FTRACE
  662. ENTRY(mcount)
  663. ret
  664. END(mcount)
  665. ENTRY(ftrace_caller)
  666. pushl %eax
  667. pushl %ecx
  668. pushl %edx
  669. pushl $0 /* Pass NULL as regs pointer */
  670. movl 4*4(%esp), %eax
  671. movl 0x4(%ebp), %edx
  672. movl function_trace_op, %ecx
  673. subl $MCOUNT_INSN_SIZE, %eax
  674. .globl ftrace_call
  675. ftrace_call:
  676. call ftrace_stub
  677. addl $4, %esp /* skip NULL pointer */
  678. popl %edx
  679. popl %ecx
  680. popl %eax
  681. ftrace_ret:
  682. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  683. .globl ftrace_graph_call
  684. ftrace_graph_call:
  685. jmp ftrace_stub
  686. #endif
  687. .globl ftrace_stub
  688. ftrace_stub:
  689. ret
  690. END(ftrace_caller)
  691. ENTRY(ftrace_regs_caller)
  692. pushf /* push flags before compare (in cs location) */
  693. /*
  694. * i386 does not save SS and ESP when coming from kernel.
  695. * Instead, to get sp, &regs->sp is used (see ptrace.h).
  696. * Unfortunately, that means eflags must be at the same location
  697. * as the current return ip is. We move the return ip into the
  698. * ip location, and move flags into the return ip location.
  699. */
  700. pushl 4(%esp) /* save return ip into ip slot */
  701. pushl $0 /* Load 0 into orig_ax */
  702. pushl %gs
  703. pushl %fs
  704. pushl %es
  705. pushl %ds
  706. pushl %eax
  707. pushl %ebp
  708. pushl %edi
  709. pushl %esi
  710. pushl %edx
  711. pushl %ecx
  712. pushl %ebx
  713. movl 13*4(%esp), %eax /* Get the saved flags */
  714. movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
  715. /* clobbering return ip */
  716. movl $__KERNEL_CS, 13*4(%esp)
  717. movl 12*4(%esp), %eax /* Load ip (1st parameter) */
  718. subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
  719. movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
  720. movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
  721. pushl %esp /* Save pt_regs as 4th parameter */
  722. GLOBAL(ftrace_regs_call)
  723. call ftrace_stub
  724. addl $4, %esp /* Skip pt_regs */
  725. movl 14*4(%esp), %eax /* Move flags back into cs */
  726. movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
  727. movl 12*4(%esp), %eax /* Get return ip from regs->ip */
  728. movl %eax, 14*4(%esp) /* Put return ip back for ret */
  729. popl %ebx
  730. popl %ecx
  731. popl %edx
  732. popl %esi
  733. popl %edi
  734. popl %ebp
  735. popl %eax
  736. popl %ds
  737. popl %es
  738. popl %fs
  739. popl %gs
  740. addl $8, %esp /* Skip orig_ax and ip */
  741. popf /* Pop flags at end (no addl to corrupt flags) */
  742. jmp ftrace_ret
  743. popf
  744. jmp ftrace_stub
  745. #else /* ! CONFIG_DYNAMIC_FTRACE */
  746. ENTRY(mcount)
  747. cmpl $__PAGE_OFFSET, %esp
  748. jb ftrace_stub /* Paging not enabled yet? */
  749. cmpl $ftrace_stub, ftrace_trace_function
  750. jnz trace
  751. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  752. cmpl $ftrace_stub, ftrace_graph_return
  753. jnz ftrace_graph_caller
  754. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  755. jnz ftrace_graph_caller
  756. #endif
  757. .globl ftrace_stub
  758. ftrace_stub:
  759. ret
  760. /* taken from glibc */
  761. trace:
  762. pushl %eax
  763. pushl %ecx
  764. pushl %edx
  765. movl 0xc(%esp), %eax
  766. movl 0x4(%ebp), %edx
  767. subl $MCOUNT_INSN_SIZE, %eax
  768. call *ftrace_trace_function
  769. popl %edx
  770. popl %ecx
  771. popl %eax
  772. jmp ftrace_stub
  773. END(mcount)
  774. #endif /* CONFIG_DYNAMIC_FTRACE */
  775. #endif /* CONFIG_FUNCTION_TRACER */
  776. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  777. ENTRY(ftrace_graph_caller)
  778. pushl %eax
  779. pushl %ecx
  780. pushl %edx
  781. movl 0xc(%esp), %eax
  782. lea 0x4(%ebp), %edx
  783. movl (%ebp), %ecx
  784. subl $MCOUNT_INSN_SIZE, %eax
  785. call prepare_ftrace_return
  786. popl %edx
  787. popl %ecx
  788. popl %eax
  789. ret
  790. END(ftrace_graph_caller)
  791. .globl return_to_handler
  792. return_to_handler:
  793. pushl %eax
  794. pushl %edx
  795. movl %ebp, %eax
  796. call ftrace_return_to_handler
  797. movl %eax, %ecx
  798. popl %edx
  799. popl %eax
  800. jmp *%ecx
  801. #endif
  802. #ifdef CONFIG_TRACING
  803. ENTRY(trace_page_fault)
  804. ASM_CLAC
  805. pushl $trace_do_page_fault
  806. jmp error_code
  807. END(trace_page_fault)
  808. #endif
  809. ENTRY(page_fault)
  810. ASM_CLAC
  811. pushl $do_page_fault
  812. ALIGN
  813. error_code:
  814. /* the function address is in %gs's slot on the stack */
  815. pushl %fs
  816. pushl %es
  817. pushl %ds
  818. pushl %eax
  819. pushl %ebp
  820. pushl %edi
  821. pushl %esi
  822. pushl %edx
  823. pushl %ecx
  824. pushl %ebx
  825. cld
  826. movl $(__KERNEL_PERCPU), %ecx
  827. movl %ecx, %fs
  828. UNWIND_ESPFIX_STACK
  829. GS_TO_REG %ecx
  830. movl PT_GS(%esp), %edi # get the function address
  831. movl PT_ORIG_EAX(%esp), %edx # get the error code
  832. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  833. REG_TO_PTGS %ecx
  834. SET_KERNEL_GS %ecx
  835. movl $(__USER_DS), %ecx
  836. movl %ecx, %ds
  837. movl %ecx, %es
  838. TRACE_IRQS_OFF
  839. movl %esp, %eax # pt_regs pointer
  840. call *%edi
  841. jmp ret_from_exception
  842. END(page_fault)
  843. /*
  844. * Debug traps and NMI can happen at the one SYSENTER instruction
  845. * that sets up the real kernel stack. Check here, since we can't
  846. * allow the wrong stack to be used.
  847. *
  848. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  849. * already pushed 3 words if it hits on the sysenter instruction:
  850. * eflags, cs and eip.
  851. *
  852. * We just load the right stack, and push the three (known) values
  853. * by hand onto the new stack - while updating the return eip past
  854. * the instruction that would have done it for sysenter.
  855. */
  856. .macro FIX_STACK offset ok label
  857. cmpw $__KERNEL_CS, 4(%esp)
  858. jne \ok
  859. \label:
  860. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  861. pushfl
  862. pushl $__KERNEL_CS
  863. pushl $sysenter_past_esp
  864. .endm
  865. ENTRY(debug)
  866. ASM_CLAC
  867. cmpl $entry_SYSENTER_32, (%esp)
  868. jne debug_stack_correct
  869. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  870. debug_stack_correct:
  871. pushl $-1 # mark this as an int
  872. SAVE_ALL
  873. TRACE_IRQS_OFF
  874. xorl %edx, %edx # error code 0
  875. movl %esp, %eax # pt_regs pointer
  876. call do_debug
  877. jmp ret_from_exception
  878. END(debug)
  879. /*
  880. * NMI is doubly nasty. It can happen _while_ we're handling
  881. * a debug fault, and the debug fault hasn't yet been able to
  882. * clear up the stack. So we first check whether we got an
  883. * NMI on the sysenter entry path, but after that we need to
  884. * check whether we got an NMI on the debug path where the debug
  885. * fault happened on the sysenter path.
  886. */
  887. ENTRY(nmi)
  888. ASM_CLAC
  889. #ifdef CONFIG_X86_ESPFIX32
  890. pushl %eax
  891. movl %ss, %eax
  892. cmpw $__ESPFIX_SS, %ax
  893. popl %eax
  894. je nmi_espfix_stack
  895. #endif
  896. cmpl $entry_SYSENTER_32, (%esp)
  897. je nmi_stack_fixup
  898. pushl %eax
  899. movl %esp, %eax
  900. /*
  901. * Do not access memory above the end of our stack page,
  902. * it might not exist.
  903. */
  904. andl $(THREAD_SIZE-1), %eax
  905. cmpl $(THREAD_SIZE-20), %eax
  906. popl %eax
  907. jae nmi_stack_correct
  908. cmpl $entry_SYSENTER_32, 12(%esp)
  909. je nmi_debug_stack_check
  910. nmi_stack_correct:
  911. pushl %eax
  912. SAVE_ALL
  913. xorl %edx, %edx # zero error code
  914. movl %esp, %eax # pt_regs pointer
  915. call do_nmi
  916. jmp restore_all_notrace
  917. nmi_stack_fixup:
  918. FIX_STACK 12, nmi_stack_correct, 1
  919. jmp nmi_stack_correct
  920. nmi_debug_stack_check:
  921. cmpw $__KERNEL_CS, 16(%esp)
  922. jne nmi_stack_correct
  923. cmpl $debug, (%esp)
  924. jb nmi_stack_correct
  925. cmpl $debug_esp_fix_insn, (%esp)
  926. ja nmi_stack_correct
  927. FIX_STACK 24, nmi_stack_correct, 1
  928. jmp nmi_stack_correct
  929. #ifdef CONFIG_X86_ESPFIX32
  930. nmi_espfix_stack:
  931. /*
  932. * create the pointer to lss back
  933. */
  934. pushl %ss
  935. pushl %esp
  936. addl $4, (%esp)
  937. /* copy the iret frame of 12 bytes */
  938. .rept 3
  939. pushl 16(%esp)
  940. .endr
  941. pushl %eax
  942. SAVE_ALL
  943. FIXUP_ESPFIX_STACK # %eax == %esp
  944. xorl %edx, %edx # zero error code
  945. call do_nmi
  946. RESTORE_REGS
  947. lss 12+4(%esp), %esp # back to espfix stack
  948. jmp irq_return
  949. #endif
  950. END(nmi)
  951. ENTRY(int3)
  952. ASM_CLAC
  953. pushl $-1 # mark this as an int
  954. SAVE_ALL
  955. TRACE_IRQS_OFF
  956. xorl %edx, %edx # zero error code
  957. movl %esp, %eax # pt_regs pointer
  958. call do_int3
  959. jmp ret_from_exception
  960. END(int3)
  961. ENTRY(general_protection)
  962. pushl $do_general_protection
  963. jmp error_code
  964. END(general_protection)
  965. #ifdef CONFIG_KVM_GUEST
  966. ENTRY(async_page_fault)
  967. ASM_CLAC
  968. pushl $do_async_page_fault
  969. jmp error_code
  970. END(async_page_fault)
  971. #endif