entry_32.S 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  33. * 2C(%esp) - orig_eax
  34. * 30(%esp) - %eip
  35. * 34(%esp) - %cs
  36. * 38(%esp) - %eflags
  37. * 3C(%esp) - %oldesp
  38. * 40(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <linux/err.h>
  44. #include <asm/thread_info.h>
  45. #include <asm/irqflags.h>
  46. #include <asm/errno.h>
  47. #include <asm/segment.h>
  48. #include <asm/smp.h>
  49. #include <asm/page_types.h>
  50. #include <asm/percpu.h>
  51. #include <asm/dwarf2.h>
  52. #include <asm/processor-flags.h>
  53. #include <asm/ftrace.h>
  54. #include <asm/irq_vectors.h>
  55. #include <asm/cpufeature.h>
  56. #include <asm/alternative-asm.h>
  57. #include <asm/asm.h>
  58. #include <asm/smap.h>
  59. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  60. #include <linux/elf-em.h>
  61. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  62. #define __AUDIT_ARCH_LE 0x40000000
  63. #ifndef CONFIG_AUDITSYSCALL
  64. #define sysenter_audit syscall_trace_entry
  65. #define sysexit_audit syscall_exit_work
  66. #endif
  67. .section .entry.text, "ax"
  68. /*
  69. * We use macros for low-level operations which need to be overridden
  70. * for paravirtualization. The following will never clobber any registers:
  71. * INTERRUPT_RETURN (aka. "iret")
  72. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  73. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  74. *
  75. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  76. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  77. * Allowing a register to be clobbered can shrink the paravirt replacement
  78. * enough to patch inline, increasing performance.
  79. */
  80. #ifdef CONFIG_PREEMPT
  81. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  82. #else
  83. #define preempt_stop(clobbers)
  84. #define resume_kernel restore_all
  85. #endif
  86. .macro TRACE_IRQS_IRET
  87. #ifdef CONFIG_TRACE_IRQFLAGS
  88. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  89. jz 1f
  90. TRACE_IRQS_ON
  91. 1:
  92. #endif
  93. .endm
  94. /*
  95. * User gs save/restore
  96. *
  97. * %gs is used for userland TLS and kernel only uses it for stack
  98. * canary which is required to be at %gs:20 by gcc. Read the comment
  99. * at the top of stackprotector.h for more info.
  100. *
  101. * Local labels 98 and 99 are used.
  102. */
  103. #ifdef CONFIG_X86_32_LAZY_GS
  104. /* unfortunately push/pop can't be no-op */
  105. .macro PUSH_GS
  106. pushl_cfi $0
  107. .endm
  108. .macro POP_GS pop=0
  109. addl $(4 + \pop), %esp
  110. CFI_ADJUST_CFA_OFFSET -(4 + \pop)
  111. .endm
  112. .macro POP_GS_EX
  113. .endm
  114. /* all the rest are no-op */
  115. .macro PTGS_TO_GS
  116. .endm
  117. .macro PTGS_TO_GS_EX
  118. .endm
  119. .macro GS_TO_REG reg
  120. .endm
  121. .macro REG_TO_PTGS reg
  122. .endm
  123. .macro SET_KERNEL_GS reg
  124. .endm
  125. #else /* CONFIG_X86_32_LAZY_GS */
  126. .macro PUSH_GS
  127. pushl_cfi %gs
  128. /*CFI_REL_OFFSET gs, 0*/
  129. .endm
  130. .macro POP_GS pop=0
  131. 98: popl_cfi %gs
  132. /*CFI_RESTORE gs*/
  133. .if \pop <> 0
  134. add $\pop, %esp
  135. CFI_ADJUST_CFA_OFFSET -\pop
  136. .endif
  137. .endm
  138. .macro POP_GS_EX
  139. .pushsection .fixup, "ax"
  140. 99: movl $0, (%esp)
  141. jmp 98b
  142. .popsection
  143. _ASM_EXTABLE(98b,99b)
  144. .endm
  145. .macro PTGS_TO_GS
  146. 98: mov PT_GS(%esp), %gs
  147. .endm
  148. .macro PTGS_TO_GS_EX
  149. .pushsection .fixup, "ax"
  150. 99: movl $0, PT_GS(%esp)
  151. jmp 98b
  152. .popsection
  153. _ASM_EXTABLE(98b,99b)
  154. .endm
  155. .macro GS_TO_REG reg
  156. movl %gs, \reg
  157. /*CFI_REGISTER gs, \reg*/
  158. .endm
  159. .macro REG_TO_PTGS reg
  160. movl \reg, PT_GS(%esp)
  161. /*CFI_REL_OFFSET gs, PT_GS*/
  162. .endm
  163. .macro SET_KERNEL_GS reg
  164. movl $(__KERNEL_STACK_CANARY), \reg
  165. movl \reg, %gs
  166. .endm
  167. #endif /* CONFIG_X86_32_LAZY_GS */
  168. .macro SAVE_ALL
  169. cld
  170. PUSH_GS
  171. pushl_cfi %fs
  172. /*CFI_REL_OFFSET fs, 0;*/
  173. pushl_cfi %es
  174. /*CFI_REL_OFFSET es, 0;*/
  175. pushl_cfi %ds
  176. /*CFI_REL_OFFSET ds, 0;*/
  177. pushl_cfi %eax
  178. CFI_REL_OFFSET eax, 0
  179. pushl_cfi %ebp
  180. CFI_REL_OFFSET ebp, 0
  181. pushl_cfi %edi
  182. CFI_REL_OFFSET edi, 0
  183. pushl_cfi %esi
  184. CFI_REL_OFFSET esi, 0
  185. pushl_cfi %edx
  186. CFI_REL_OFFSET edx, 0
  187. pushl_cfi %ecx
  188. CFI_REL_OFFSET ecx, 0
  189. pushl_cfi %ebx
  190. CFI_REL_OFFSET ebx, 0
  191. movl $(__USER_DS), %edx
  192. movl %edx, %ds
  193. movl %edx, %es
  194. movl $(__KERNEL_PERCPU), %edx
  195. movl %edx, %fs
  196. SET_KERNEL_GS %edx
  197. .endm
  198. .macro RESTORE_INT_REGS
  199. popl_cfi %ebx
  200. CFI_RESTORE ebx
  201. popl_cfi %ecx
  202. CFI_RESTORE ecx
  203. popl_cfi %edx
  204. CFI_RESTORE edx
  205. popl_cfi %esi
  206. CFI_RESTORE esi
  207. popl_cfi %edi
  208. CFI_RESTORE edi
  209. popl_cfi %ebp
  210. CFI_RESTORE ebp
  211. popl_cfi %eax
  212. CFI_RESTORE eax
  213. .endm
  214. .macro RESTORE_REGS pop=0
  215. RESTORE_INT_REGS
  216. 1: popl_cfi %ds
  217. /*CFI_RESTORE ds;*/
  218. 2: popl_cfi %es
  219. /*CFI_RESTORE es;*/
  220. 3: popl_cfi %fs
  221. /*CFI_RESTORE fs;*/
  222. POP_GS \pop
  223. .pushsection .fixup, "ax"
  224. 4: movl $0, (%esp)
  225. jmp 1b
  226. 5: movl $0, (%esp)
  227. jmp 2b
  228. 6: movl $0, (%esp)
  229. jmp 3b
  230. .popsection
  231. _ASM_EXTABLE(1b,4b)
  232. _ASM_EXTABLE(2b,5b)
  233. _ASM_EXTABLE(3b,6b)
  234. POP_GS_EX
  235. .endm
  236. .macro RING0_INT_FRAME
  237. CFI_STARTPROC simple
  238. CFI_SIGNAL_FRAME
  239. CFI_DEF_CFA esp, 3*4
  240. /*CFI_OFFSET cs, -2*4;*/
  241. CFI_OFFSET eip, -3*4
  242. .endm
  243. .macro RING0_EC_FRAME
  244. CFI_STARTPROC simple
  245. CFI_SIGNAL_FRAME
  246. CFI_DEF_CFA esp, 4*4
  247. /*CFI_OFFSET cs, -2*4;*/
  248. CFI_OFFSET eip, -3*4
  249. .endm
  250. .macro RING0_PTREGS_FRAME
  251. CFI_STARTPROC simple
  252. CFI_SIGNAL_FRAME
  253. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  254. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  255. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  256. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  257. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  258. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  259. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  260. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  261. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  262. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  263. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  264. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  265. .endm
  266. ENTRY(ret_from_fork)
  267. CFI_STARTPROC
  268. pushl_cfi %eax
  269. call schedule_tail
  270. GET_THREAD_INFO(%ebp)
  271. popl_cfi %eax
  272. pushl_cfi $0x0202 # Reset kernel eflags
  273. popfl_cfi
  274. jmp syscall_exit
  275. CFI_ENDPROC
  276. END(ret_from_fork)
  277. ENTRY(ret_from_kernel_thread)
  278. CFI_STARTPROC
  279. pushl_cfi %eax
  280. call schedule_tail
  281. GET_THREAD_INFO(%ebp)
  282. popl_cfi %eax
  283. pushl_cfi $0x0202 # Reset kernel eflags
  284. popfl_cfi
  285. movl PT_EBP(%esp),%eax
  286. call *PT_EBX(%esp)
  287. movl $0,PT_EAX(%esp)
  288. jmp syscall_exit
  289. CFI_ENDPROC
  290. ENDPROC(ret_from_kernel_thread)
  291. /*
  292. * Return to user mode is not as complex as all this looks,
  293. * but we want the default path for a system call return to
  294. * go as quickly as possible which is why some of this is
  295. * less clear than it otherwise should be.
  296. */
  297. # userspace resumption stub bypassing syscall exit tracing
  298. ALIGN
  299. RING0_PTREGS_FRAME
  300. ret_from_exception:
  301. preempt_stop(CLBR_ANY)
  302. ret_from_intr:
  303. GET_THREAD_INFO(%ebp)
  304. #ifdef CONFIG_VM86
  305. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  306. movb PT_CS(%esp), %al
  307. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  308. #else
  309. /*
  310. * We can be coming here from child spawned by kernel_thread().
  311. */
  312. movl PT_CS(%esp), %eax
  313. andl $SEGMENT_RPL_MASK, %eax
  314. #endif
  315. cmpl $USER_RPL, %eax
  316. jb resume_kernel # not returning to v8086 or userspace
  317. ENTRY(resume_userspace)
  318. LOCKDEP_SYS_EXIT
  319. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  320. # setting need_resched or sigpending
  321. # between sampling and the iret
  322. TRACE_IRQS_OFF
  323. movl TI_flags(%ebp), %ecx
  324. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  325. # int/exception return?
  326. jne work_pending
  327. jmp restore_all
  328. END(ret_from_exception)
  329. #ifdef CONFIG_PREEMPT
  330. ENTRY(resume_kernel)
  331. DISABLE_INTERRUPTS(CLBR_ANY)
  332. need_resched:
  333. cmpl $0,PER_CPU_VAR(__preempt_count)
  334. jnz restore_all
  335. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  336. jz restore_all
  337. call preempt_schedule_irq
  338. jmp need_resched
  339. END(resume_kernel)
  340. #endif
  341. CFI_ENDPROC
  342. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  343. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  344. # sysenter call handler stub
  345. ENTRY(ia32_sysenter_target)
  346. CFI_STARTPROC simple
  347. CFI_SIGNAL_FRAME
  348. CFI_DEF_CFA esp, 0
  349. CFI_REGISTER esp, ebp
  350. movl TSS_sysenter_sp0(%esp),%esp
  351. sysenter_past_esp:
  352. /*
  353. * Interrupts are disabled here, but we can't trace it until
  354. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  355. * we immediately enable interrupts at that point anyway.
  356. */
  357. pushl_cfi $__USER_DS
  358. /*CFI_REL_OFFSET ss, 0*/
  359. pushl_cfi %ebp
  360. CFI_REL_OFFSET esp, 0
  361. pushfl_cfi
  362. orl $X86_EFLAGS_IF, (%esp)
  363. pushl_cfi $__USER_CS
  364. /*CFI_REL_OFFSET cs, 0*/
  365. /*
  366. * Push current_thread_info()->sysenter_return to the stack.
  367. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  368. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  369. */
  370. pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
  371. CFI_REL_OFFSET eip, 0
  372. pushl_cfi %eax
  373. SAVE_ALL
  374. ENABLE_INTERRUPTS(CLBR_NONE)
  375. /*
  376. * Load the potential sixth argument from user stack.
  377. * Careful about security.
  378. */
  379. cmpl $__PAGE_OFFSET-3,%ebp
  380. jae syscall_fault
  381. ASM_STAC
  382. 1: movl (%ebp),%ebp
  383. ASM_CLAC
  384. movl %ebp,PT_EBP(%esp)
  385. _ASM_EXTABLE(1b,syscall_fault)
  386. GET_THREAD_INFO(%ebp)
  387. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  388. jnz sysenter_audit
  389. sysenter_do_call:
  390. cmpl $(NR_syscalls), %eax
  391. jae sysenter_badsys
  392. call *sys_call_table(,%eax,4)
  393. movl %eax,PT_EAX(%esp)
  394. sysenter_after_call:
  395. LOCKDEP_SYS_EXIT
  396. DISABLE_INTERRUPTS(CLBR_ANY)
  397. TRACE_IRQS_OFF
  398. movl TI_flags(%ebp), %ecx
  399. testl $_TIF_ALLWORK_MASK, %ecx
  400. jne sysexit_audit
  401. sysenter_exit:
  402. /* if something modifies registers it must also disable sysexit */
  403. movl PT_EIP(%esp), %edx
  404. movl PT_OLDESP(%esp), %ecx
  405. xorl %ebp,%ebp
  406. TRACE_IRQS_ON
  407. 1: mov PT_FS(%esp), %fs
  408. PTGS_TO_GS
  409. ENABLE_INTERRUPTS_SYSEXIT
  410. #ifdef CONFIG_AUDITSYSCALL
  411. sysenter_audit:
  412. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  413. jnz syscall_trace_entry
  414. addl $4,%esp
  415. CFI_ADJUST_CFA_OFFSET -4
  416. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  417. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  418. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  419. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  420. movl %eax,%edx /* 2nd arg: syscall number */
  421. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  422. call __audit_syscall_entry
  423. pushl_cfi %ebx
  424. movl PT_EAX(%esp),%eax /* reload syscall number */
  425. jmp sysenter_do_call
  426. sysexit_audit:
  427. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  428. jne syscall_exit_work
  429. TRACE_IRQS_ON
  430. ENABLE_INTERRUPTS(CLBR_ANY)
  431. movl %eax,%edx /* second arg, syscall return value */
  432. cmpl $-MAX_ERRNO,%eax /* is it an error ? */
  433. setbe %al /* 1 if so, 0 if not */
  434. movzbl %al,%eax /* zero-extend that */
  435. call __audit_syscall_exit
  436. DISABLE_INTERRUPTS(CLBR_ANY)
  437. TRACE_IRQS_OFF
  438. movl TI_flags(%ebp), %ecx
  439. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  440. jne syscall_exit_work
  441. movl PT_EAX(%esp),%eax /* reload syscall return value */
  442. jmp sysenter_exit
  443. #endif
  444. CFI_ENDPROC
  445. .pushsection .fixup,"ax"
  446. 2: movl $0,PT_FS(%esp)
  447. jmp 1b
  448. .popsection
  449. _ASM_EXTABLE(1b,2b)
  450. PTGS_TO_GS_EX
  451. ENDPROC(ia32_sysenter_target)
  452. # system call handler stub
  453. ENTRY(system_call)
  454. RING0_INT_FRAME # can't unwind into user space anyway
  455. ASM_CLAC
  456. pushl_cfi %eax # save orig_eax
  457. SAVE_ALL
  458. GET_THREAD_INFO(%ebp)
  459. # system call tracing in operation / emulation
  460. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  461. jnz syscall_trace_entry
  462. cmpl $(NR_syscalls), %eax
  463. jae syscall_badsys
  464. syscall_call:
  465. call *sys_call_table(,%eax,4)
  466. movl %eax,PT_EAX(%esp) # store the return value
  467. syscall_exit:
  468. LOCKDEP_SYS_EXIT
  469. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  470. # setting need_resched or sigpending
  471. # between sampling and the iret
  472. TRACE_IRQS_OFF
  473. movl TI_flags(%ebp), %ecx
  474. testl $_TIF_ALLWORK_MASK, %ecx # current->work
  475. jne syscall_exit_work
  476. restore_all:
  477. TRACE_IRQS_IRET
  478. restore_all_notrace:
  479. #ifdef CONFIG_X86_ESPFIX32
  480. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  481. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  482. # are returning to the kernel.
  483. # See comments in process.c:copy_thread() for details.
  484. movb PT_OLDSS(%esp), %ah
  485. movb PT_CS(%esp), %al
  486. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  487. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  488. CFI_REMEMBER_STATE
  489. je ldt_ss # returning to user-space with LDT SS
  490. #endif
  491. restore_nocheck:
  492. RESTORE_REGS 4 # skip orig_eax/error_code
  493. irq_return:
  494. INTERRUPT_RETURN
  495. .section .fixup,"ax"
  496. ENTRY(iret_exc)
  497. pushl $0 # no error code
  498. pushl $do_iret_error
  499. jmp error_code
  500. .previous
  501. _ASM_EXTABLE(irq_return,iret_exc)
  502. #ifdef CONFIG_X86_ESPFIX32
  503. CFI_RESTORE_STATE
  504. ldt_ss:
  505. #ifdef CONFIG_PARAVIRT
  506. /*
  507. * The kernel can't run on a non-flat stack if paravirt mode
  508. * is active. Rather than try to fixup the high bits of
  509. * ESP, bypass this code entirely. This may break DOSemu
  510. * and/or Wine support in a paravirt VM, although the option
  511. * is still available to implement the setting of the high
  512. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  513. */
  514. cmpl $0, pv_info+PARAVIRT_enabled
  515. jne restore_nocheck
  516. #endif
  517. /*
  518. * Setup and switch to ESPFIX stack
  519. *
  520. * We're returning to userspace with a 16 bit stack. The CPU will not
  521. * restore the high word of ESP for us on executing iret... This is an
  522. * "official" bug of all the x86-compatible CPUs, which we can work
  523. * around to make dosemu and wine happy. We do this by preloading the
  524. * high word of ESP with the high word of the userspace ESP while
  525. * compensating for the offset by changing to the ESPFIX segment with
  526. * a base address that matches for the difference.
  527. */
  528. #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
  529. mov %esp, %edx /* load kernel esp */
  530. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  531. mov %dx, %ax /* eax: new kernel esp */
  532. sub %eax, %edx /* offset (low word is 0) */
  533. shr $16, %edx
  534. mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
  535. mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
  536. pushl_cfi $__ESPFIX_SS
  537. pushl_cfi %eax /* new kernel esp */
  538. /* Disable interrupts, but do not irqtrace this section: we
  539. * will soon execute iret and the tracer was already set to
  540. * the irqstate after the iret */
  541. DISABLE_INTERRUPTS(CLBR_EAX)
  542. lss (%esp), %esp /* switch to espfix segment */
  543. CFI_ADJUST_CFA_OFFSET -8
  544. jmp restore_nocheck
  545. #endif
  546. CFI_ENDPROC
  547. ENDPROC(system_call)
  548. # perform work that needs to be done immediately before resumption
  549. ALIGN
  550. RING0_PTREGS_FRAME # can't unwind into user space anyway
  551. work_pending:
  552. testb $_TIF_NEED_RESCHED, %cl
  553. jz work_notifysig
  554. work_resched:
  555. call schedule
  556. LOCKDEP_SYS_EXIT
  557. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  558. # setting need_resched or sigpending
  559. # between sampling and the iret
  560. TRACE_IRQS_OFF
  561. movl TI_flags(%ebp), %ecx
  562. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  563. # than syscall tracing?
  564. jz restore_all
  565. testb $_TIF_NEED_RESCHED, %cl
  566. jnz work_resched
  567. work_notifysig: # deal with pending signals and
  568. # notify-resume requests
  569. #ifdef CONFIG_VM86
  570. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  571. movl %esp, %eax
  572. jne work_notifysig_v86 # returning to kernel-space or
  573. # vm86-space
  574. 1:
  575. #else
  576. movl %esp, %eax
  577. #endif
  578. TRACE_IRQS_ON
  579. ENABLE_INTERRUPTS(CLBR_NONE)
  580. movb PT_CS(%esp), %bl
  581. andb $SEGMENT_RPL_MASK, %bl
  582. cmpb $USER_RPL, %bl
  583. jb resume_kernel
  584. xorl %edx, %edx
  585. call do_notify_resume
  586. jmp resume_userspace
  587. #ifdef CONFIG_VM86
  588. ALIGN
  589. work_notifysig_v86:
  590. pushl_cfi %ecx # save ti_flags for do_notify_resume
  591. call save_v86_state # %eax contains pt_regs pointer
  592. popl_cfi %ecx
  593. movl %eax, %esp
  594. jmp 1b
  595. #endif
  596. END(work_pending)
  597. # perform syscall exit tracing
  598. ALIGN
  599. syscall_trace_entry:
  600. movl $-ENOSYS,PT_EAX(%esp)
  601. movl %esp, %eax
  602. call syscall_trace_enter
  603. /* What it returned is what we'll actually use. */
  604. cmpl $(NR_syscalls), %eax
  605. jnae syscall_call
  606. jmp syscall_exit
  607. END(syscall_trace_entry)
  608. # perform syscall exit tracing
  609. ALIGN
  610. syscall_exit_work:
  611. testl $_TIF_WORK_SYSCALL_EXIT, %ecx
  612. jz work_pending
  613. TRACE_IRQS_ON
  614. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  615. # schedule() instead
  616. movl %esp, %eax
  617. call syscall_trace_leave
  618. jmp resume_userspace
  619. END(syscall_exit_work)
  620. CFI_ENDPROC
  621. RING0_INT_FRAME # can't unwind into user space anyway
  622. syscall_fault:
  623. ASM_CLAC
  624. GET_THREAD_INFO(%ebp)
  625. movl $-EFAULT,PT_EAX(%esp)
  626. jmp resume_userspace
  627. END(syscall_fault)
  628. syscall_badsys:
  629. movl $-ENOSYS,PT_EAX(%esp)
  630. jmp syscall_exit
  631. END(syscall_badsys)
  632. sysenter_badsys:
  633. movl $-ENOSYS,PT_EAX(%esp)
  634. jmp sysenter_after_call
  635. END(syscall_badsys)
  636. CFI_ENDPROC
  637. .macro FIXUP_ESPFIX_STACK
  638. /*
  639. * Switch back for ESPFIX stack to the normal zerobased stack
  640. *
  641. * We can't call C functions using the ESPFIX stack. This code reads
  642. * the high word of the segment base from the GDT and swiches to the
  643. * normal stack and adjusts ESP with the matching offset.
  644. */
  645. #ifdef CONFIG_X86_ESPFIX32
  646. /* fixup the stack */
  647. mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
  648. mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
  649. shl $16, %eax
  650. addl %esp, %eax /* the adjusted stack pointer */
  651. pushl_cfi $__KERNEL_DS
  652. pushl_cfi %eax
  653. lss (%esp), %esp /* switch to the normal stack segment */
  654. CFI_ADJUST_CFA_OFFSET -8
  655. #endif
  656. .endm
  657. .macro UNWIND_ESPFIX_STACK
  658. #ifdef CONFIG_X86_ESPFIX32
  659. movl %ss, %eax
  660. /* see if on espfix stack */
  661. cmpw $__ESPFIX_SS, %ax
  662. jne 27f
  663. movl $__KERNEL_DS, %eax
  664. movl %eax, %ds
  665. movl %eax, %es
  666. /* switch to normal stack */
  667. FIXUP_ESPFIX_STACK
  668. 27:
  669. #endif
  670. .endm
  671. /*
  672. * Build the entry stubs and pointer table with some assembler magic.
  673. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  674. * single cache line on all modern x86 implementations.
  675. */
  676. .section .init.rodata,"a"
  677. ENTRY(interrupt)
  678. .section .entry.text, "ax"
  679. .p2align 5
  680. .p2align CONFIG_X86_L1_CACHE_SHIFT
  681. ENTRY(irq_entries_start)
  682. RING0_INT_FRAME
  683. vector=FIRST_EXTERNAL_VECTOR
  684. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  685. .balign 32
  686. .rept 7
  687. .if vector < NR_VECTORS
  688. .if vector <> FIRST_EXTERNAL_VECTOR
  689. CFI_ADJUST_CFA_OFFSET -4
  690. .endif
  691. 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
  692. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  693. jmp 2f
  694. .endif
  695. .previous
  696. .long 1b
  697. .section .entry.text, "ax"
  698. vector=vector+1
  699. .endif
  700. .endr
  701. 2: jmp common_interrupt
  702. .endr
  703. END(irq_entries_start)
  704. .previous
  705. END(interrupt)
  706. .previous
  707. /*
  708. * the CPU automatically disables interrupts when executing an IRQ vector,
  709. * so IRQ-flags tracing has to follow that:
  710. */
  711. .p2align CONFIG_X86_L1_CACHE_SHIFT
  712. common_interrupt:
  713. ASM_CLAC
  714. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  715. SAVE_ALL
  716. TRACE_IRQS_OFF
  717. movl %esp,%eax
  718. call do_IRQ
  719. jmp ret_from_intr
  720. ENDPROC(common_interrupt)
  721. CFI_ENDPROC
  722. #define BUILD_INTERRUPT3(name, nr, fn) \
  723. ENTRY(name) \
  724. RING0_INT_FRAME; \
  725. ASM_CLAC; \
  726. pushl_cfi $~(nr); \
  727. SAVE_ALL; \
  728. TRACE_IRQS_OFF \
  729. movl %esp,%eax; \
  730. call fn; \
  731. jmp ret_from_intr; \
  732. CFI_ENDPROC; \
  733. ENDPROC(name)
  734. #ifdef CONFIG_TRACING
  735. #define TRACE_BUILD_INTERRUPT(name, nr) \
  736. BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
  737. #else
  738. #define TRACE_BUILD_INTERRUPT(name, nr)
  739. #endif
  740. #define BUILD_INTERRUPT(name, nr) \
  741. BUILD_INTERRUPT3(name, nr, smp_##name); \
  742. TRACE_BUILD_INTERRUPT(name, nr)
  743. /* The include is where all of the SMP etc. interrupts come from */
  744. #include <asm/entry_arch.h>
  745. ENTRY(coprocessor_error)
  746. RING0_INT_FRAME
  747. ASM_CLAC
  748. pushl_cfi $0
  749. pushl_cfi $do_coprocessor_error
  750. jmp error_code
  751. CFI_ENDPROC
  752. END(coprocessor_error)
  753. ENTRY(simd_coprocessor_error)
  754. RING0_INT_FRAME
  755. ASM_CLAC
  756. pushl_cfi $0
  757. #ifdef CONFIG_X86_INVD_BUG
  758. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  759. 661: pushl_cfi $do_general_protection
  760. 662:
  761. .section .altinstructions,"a"
  762. altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
  763. .previous
  764. .section .altinstr_replacement,"ax"
  765. 663: pushl $do_simd_coprocessor_error
  766. 664:
  767. .previous
  768. #else
  769. pushl_cfi $do_simd_coprocessor_error
  770. #endif
  771. jmp error_code
  772. CFI_ENDPROC
  773. END(simd_coprocessor_error)
  774. ENTRY(device_not_available)
  775. RING0_INT_FRAME
  776. ASM_CLAC
  777. pushl_cfi $-1 # mark this as an int
  778. pushl_cfi $do_device_not_available
  779. jmp error_code
  780. CFI_ENDPROC
  781. END(device_not_available)
  782. #ifdef CONFIG_PARAVIRT
  783. ENTRY(native_iret)
  784. iret
  785. _ASM_EXTABLE(native_iret, iret_exc)
  786. END(native_iret)
  787. ENTRY(native_irq_enable_sysexit)
  788. sti
  789. sysexit
  790. END(native_irq_enable_sysexit)
  791. #endif
  792. ENTRY(overflow)
  793. RING0_INT_FRAME
  794. ASM_CLAC
  795. pushl_cfi $0
  796. pushl_cfi $do_overflow
  797. jmp error_code
  798. CFI_ENDPROC
  799. END(overflow)
  800. ENTRY(bounds)
  801. RING0_INT_FRAME
  802. ASM_CLAC
  803. pushl_cfi $0
  804. pushl_cfi $do_bounds
  805. jmp error_code
  806. CFI_ENDPROC
  807. END(bounds)
  808. ENTRY(invalid_op)
  809. RING0_INT_FRAME
  810. ASM_CLAC
  811. pushl_cfi $0
  812. pushl_cfi $do_invalid_op
  813. jmp error_code
  814. CFI_ENDPROC
  815. END(invalid_op)
  816. ENTRY(coprocessor_segment_overrun)
  817. RING0_INT_FRAME
  818. ASM_CLAC
  819. pushl_cfi $0
  820. pushl_cfi $do_coprocessor_segment_overrun
  821. jmp error_code
  822. CFI_ENDPROC
  823. END(coprocessor_segment_overrun)
  824. ENTRY(invalid_TSS)
  825. RING0_EC_FRAME
  826. ASM_CLAC
  827. pushl_cfi $do_invalid_TSS
  828. jmp error_code
  829. CFI_ENDPROC
  830. END(invalid_TSS)
  831. ENTRY(segment_not_present)
  832. RING0_EC_FRAME
  833. ASM_CLAC
  834. pushl_cfi $do_segment_not_present
  835. jmp error_code
  836. CFI_ENDPROC
  837. END(segment_not_present)
  838. ENTRY(stack_segment)
  839. RING0_EC_FRAME
  840. ASM_CLAC
  841. pushl_cfi $do_stack_segment
  842. jmp error_code
  843. CFI_ENDPROC
  844. END(stack_segment)
  845. ENTRY(alignment_check)
  846. RING0_EC_FRAME
  847. ASM_CLAC
  848. pushl_cfi $do_alignment_check
  849. jmp error_code
  850. CFI_ENDPROC
  851. END(alignment_check)
  852. ENTRY(divide_error)
  853. RING0_INT_FRAME
  854. ASM_CLAC
  855. pushl_cfi $0 # no error code
  856. pushl_cfi $do_divide_error
  857. jmp error_code
  858. CFI_ENDPROC
  859. END(divide_error)
  860. #ifdef CONFIG_X86_MCE
  861. ENTRY(machine_check)
  862. RING0_INT_FRAME
  863. ASM_CLAC
  864. pushl_cfi $0
  865. pushl_cfi machine_check_vector
  866. jmp error_code
  867. CFI_ENDPROC
  868. END(machine_check)
  869. #endif
  870. ENTRY(spurious_interrupt_bug)
  871. RING0_INT_FRAME
  872. ASM_CLAC
  873. pushl_cfi $0
  874. pushl_cfi $do_spurious_interrupt_bug
  875. jmp error_code
  876. CFI_ENDPROC
  877. END(spurious_interrupt_bug)
  878. #ifdef CONFIG_XEN
  879. /* Xen doesn't set %esp to be precisely what the normal sysenter
  880. entrypoint expects, so fix it up before using the normal path. */
  881. ENTRY(xen_sysenter_target)
  882. RING0_INT_FRAME
  883. addl $5*4, %esp /* remove xen-provided frame */
  884. CFI_ADJUST_CFA_OFFSET -5*4
  885. jmp sysenter_past_esp
  886. CFI_ENDPROC
  887. ENTRY(xen_hypervisor_callback)
  888. CFI_STARTPROC
  889. pushl_cfi $-1 /* orig_ax = -1 => not a system call */
  890. SAVE_ALL
  891. TRACE_IRQS_OFF
  892. /* Check to see if we got the event in the critical
  893. region in xen_iret_direct, after we've reenabled
  894. events and checked for pending events. This simulates
  895. iret instruction's behaviour where it delivers a
  896. pending interrupt when enabling interrupts. */
  897. movl PT_EIP(%esp),%eax
  898. cmpl $xen_iret_start_crit,%eax
  899. jb 1f
  900. cmpl $xen_iret_end_crit,%eax
  901. jae 1f
  902. jmp xen_iret_crit_fixup
  903. ENTRY(xen_do_upcall)
  904. 1: mov %esp, %eax
  905. call xen_evtchn_do_upcall
  906. jmp ret_from_intr
  907. CFI_ENDPROC
  908. ENDPROC(xen_hypervisor_callback)
  909. # Hypervisor uses this for application faults while it executes.
  910. # We get here for two reasons:
  911. # 1. Fault while reloading DS, ES, FS or GS
  912. # 2. Fault while executing IRET
  913. # Category 1 we fix up by reattempting the load, and zeroing the segment
  914. # register if the load fails.
  915. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  916. # normal Linux return path in this case because if we use the IRET hypercall
  917. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  918. # We distinguish between categories by maintaining a status value in EAX.
  919. ENTRY(xen_failsafe_callback)
  920. CFI_STARTPROC
  921. pushl_cfi %eax
  922. movl $1,%eax
  923. 1: mov 4(%esp),%ds
  924. 2: mov 8(%esp),%es
  925. 3: mov 12(%esp),%fs
  926. 4: mov 16(%esp),%gs
  927. /* EAX == 0 => Category 1 (Bad segment)
  928. EAX != 0 => Category 2 (Bad IRET) */
  929. testl %eax,%eax
  930. popl_cfi %eax
  931. lea 16(%esp),%esp
  932. CFI_ADJUST_CFA_OFFSET -16
  933. jz 5f
  934. jmp iret_exc
  935. 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
  936. SAVE_ALL
  937. jmp ret_from_exception
  938. CFI_ENDPROC
  939. .section .fixup,"ax"
  940. 6: xorl %eax,%eax
  941. movl %eax,4(%esp)
  942. jmp 1b
  943. 7: xorl %eax,%eax
  944. movl %eax,8(%esp)
  945. jmp 2b
  946. 8: xorl %eax,%eax
  947. movl %eax,12(%esp)
  948. jmp 3b
  949. 9: xorl %eax,%eax
  950. movl %eax,16(%esp)
  951. jmp 4b
  952. .previous
  953. _ASM_EXTABLE(1b,6b)
  954. _ASM_EXTABLE(2b,7b)
  955. _ASM_EXTABLE(3b,8b)
  956. _ASM_EXTABLE(4b,9b)
  957. ENDPROC(xen_failsafe_callback)
  958. BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  959. xen_evtchn_do_upcall)
  960. #endif /* CONFIG_XEN */
  961. #if IS_ENABLED(CONFIG_HYPERV)
  962. BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  963. hyperv_vector_handler)
  964. #endif /* CONFIG_HYPERV */
  965. #ifdef CONFIG_FUNCTION_TRACER
  966. #ifdef CONFIG_DYNAMIC_FTRACE
  967. ENTRY(mcount)
  968. ret
  969. END(mcount)
  970. ENTRY(ftrace_caller)
  971. cmpl $0, function_trace_stop
  972. jne ftrace_stub
  973. pushl %eax
  974. pushl %ecx
  975. pushl %edx
  976. pushl $0 /* Pass NULL as regs pointer */
  977. movl 4*4(%esp), %eax
  978. movl 0x4(%ebp), %edx
  979. movl function_trace_op, %ecx
  980. subl $MCOUNT_INSN_SIZE, %eax
  981. .globl ftrace_call
  982. ftrace_call:
  983. call ftrace_stub
  984. addl $4,%esp /* skip NULL pointer */
  985. popl %edx
  986. popl %ecx
  987. popl %eax
  988. ftrace_ret:
  989. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  990. .globl ftrace_graph_call
  991. ftrace_graph_call:
  992. jmp ftrace_stub
  993. #endif
  994. .globl ftrace_stub
  995. ftrace_stub:
  996. ret
  997. END(ftrace_caller)
  998. ENTRY(ftrace_regs_caller)
  999. pushf /* push flags before compare (in cs location) */
  1000. cmpl $0, function_trace_stop
  1001. jne ftrace_restore_flags
  1002. /*
  1003. * i386 does not save SS and ESP when coming from kernel.
  1004. * Instead, to get sp, &regs->sp is used (see ptrace.h).
  1005. * Unfortunately, that means eflags must be at the same location
  1006. * as the current return ip is. We move the return ip into the
  1007. * ip location, and move flags into the return ip location.
  1008. */
  1009. pushl 4(%esp) /* save return ip into ip slot */
  1010. pushl $0 /* Load 0 into orig_ax */
  1011. pushl %gs
  1012. pushl %fs
  1013. pushl %es
  1014. pushl %ds
  1015. pushl %eax
  1016. pushl %ebp
  1017. pushl %edi
  1018. pushl %esi
  1019. pushl %edx
  1020. pushl %ecx
  1021. pushl %ebx
  1022. movl 13*4(%esp), %eax /* Get the saved flags */
  1023. movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
  1024. /* clobbering return ip */
  1025. movl $__KERNEL_CS,13*4(%esp)
  1026. movl 12*4(%esp), %eax /* Load ip (1st parameter) */
  1027. subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
  1028. movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
  1029. movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
  1030. pushl %esp /* Save pt_regs as 4th parameter */
  1031. GLOBAL(ftrace_regs_call)
  1032. call ftrace_stub
  1033. addl $4, %esp /* Skip pt_regs */
  1034. movl 14*4(%esp), %eax /* Move flags back into cs */
  1035. movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
  1036. movl 12*4(%esp), %eax /* Get return ip from regs->ip */
  1037. movl %eax, 14*4(%esp) /* Put return ip back for ret */
  1038. popl %ebx
  1039. popl %ecx
  1040. popl %edx
  1041. popl %esi
  1042. popl %edi
  1043. popl %ebp
  1044. popl %eax
  1045. popl %ds
  1046. popl %es
  1047. popl %fs
  1048. popl %gs
  1049. addl $8, %esp /* Skip orig_ax and ip */
  1050. popf /* Pop flags at end (no addl to corrupt flags) */
  1051. jmp ftrace_ret
  1052. ftrace_restore_flags:
  1053. popf
  1054. jmp ftrace_stub
  1055. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1056. ENTRY(mcount)
  1057. cmpl $__PAGE_OFFSET, %esp
  1058. jb ftrace_stub /* Paging not enabled yet? */
  1059. cmpl $0, function_trace_stop
  1060. jne ftrace_stub
  1061. cmpl $ftrace_stub, ftrace_trace_function
  1062. jnz trace
  1063. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1064. cmpl $ftrace_stub, ftrace_graph_return
  1065. jnz ftrace_graph_caller
  1066. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  1067. jnz ftrace_graph_caller
  1068. #endif
  1069. .globl ftrace_stub
  1070. ftrace_stub:
  1071. ret
  1072. /* taken from glibc */
  1073. trace:
  1074. pushl %eax
  1075. pushl %ecx
  1076. pushl %edx
  1077. movl 0xc(%esp), %eax
  1078. movl 0x4(%ebp), %edx
  1079. subl $MCOUNT_INSN_SIZE, %eax
  1080. call *ftrace_trace_function
  1081. popl %edx
  1082. popl %ecx
  1083. popl %eax
  1084. jmp ftrace_stub
  1085. END(mcount)
  1086. #endif /* CONFIG_DYNAMIC_FTRACE */
  1087. #endif /* CONFIG_FUNCTION_TRACER */
  1088. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1089. ENTRY(ftrace_graph_caller)
  1090. pushl %eax
  1091. pushl %ecx
  1092. pushl %edx
  1093. movl 0xc(%esp), %edx
  1094. lea 0x4(%ebp), %eax
  1095. movl (%ebp), %ecx
  1096. subl $MCOUNT_INSN_SIZE, %edx
  1097. call prepare_ftrace_return
  1098. popl %edx
  1099. popl %ecx
  1100. popl %eax
  1101. ret
  1102. END(ftrace_graph_caller)
  1103. .globl return_to_handler
  1104. return_to_handler:
  1105. pushl %eax
  1106. pushl %edx
  1107. movl %ebp, %eax
  1108. call ftrace_return_to_handler
  1109. movl %eax, %ecx
  1110. popl %edx
  1111. popl %eax
  1112. jmp *%ecx
  1113. #endif
  1114. #ifdef CONFIG_TRACING
  1115. ENTRY(trace_page_fault)
  1116. RING0_EC_FRAME
  1117. ASM_CLAC
  1118. pushl_cfi $trace_do_page_fault
  1119. jmp error_code
  1120. CFI_ENDPROC
  1121. END(trace_page_fault)
  1122. #endif
  1123. ENTRY(page_fault)
  1124. RING0_EC_FRAME
  1125. ASM_CLAC
  1126. pushl_cfi $do_page_fault
  1127. ALIGN
  1128. error_code:
  1129. /* the function address is in %gs's slot on the stack */
  1130. pushl_cfi %fs
  1131. /*CFI_REL_OFFSET fs, 0*/
  1132. pushl_cfi %es
  1133. /*CFI_REL_OFFSET es, 0*/
  1134. pushl_cfi %ds
  1135. /*CFI_REL_OFFSET ds, 0*/
  1136. pushl_cfi %eax
  1137. CFI_REL_OFFSET eax, 0
  1138. pushl_cfi %ebp
  1139. CFI_REL_OFFSET ebp, 0
  1140. pushl_cfi %edi
  1141. CFI_REL_OFFSET edi, 0
  1142. pushl_cfi %esi
  1143. CFI_REL_OFFSET esi, 0
  1144. pushl_cfi %edx
  1145. CFI_REL_OFFSET edx, 0
  1146. pushl_cfi %ecx
  1147. CFI_REL_OFFSET ecx, 0
  1148. pushl_cfi %ebx
  1149. CFI_REL_OFFSET ebx, 0
  1150. cld
  1151. movl $(__KERNEL_PERCPU), %ecx
  1152. movl %ecx, %fs
  1153. UNWIND_ESPFIX_STACK
  1154. GS_TO_REG %ecx
  1155. movl PT_GS(%esp), %edi # get the function address
  1156. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1157. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1158. REG_TO_PTGS %ecx
  1159. SET_KERNEL_GS %ecx
  1160. movl $(__USER_DS), %ecx
  1161. movl %ecx, %ds
  1162. movl %ecx, %es
  1163. TRACE_IRQS_OFF
  1164. movl %esp,%eax # pt_regs pointer
  1165. call *%edi
  1166. jmp ret_from_exception
  1167. CFI_ENDPROC
  1168. END(page_fault)
  1169. /*
  1170. * Debug traps and NMI can happen at the one SYSENTER instruction
  1171. * that sets up the real kernel stack. Check here, since we can't
  1172. * allow the wrong stack to be used.
  1173. *
  1174. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1175. * already pushed 3 words if it hits on the sysenter instruction:
  1176. * eflags, cs and eip.
  1177. *
  1178. * We just load the right stack, and push the three (known) values
  1179. * by hand onto the new stack - while updating the return eip past
  1180. * the instruction that would have done it for sysenter.
  1181. */
  1182. .macro FIX_STACK offset ok label
  1183. cmpw $__KERNEL_CS, 4(%esp)
  1184. jne \ok
  1185. \label:
  1186. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1187. CFI_DEF_CFA esp, 0
  1188. CFI_UNDEFINED eip
  1189. pushfl_cfi
  1190. pushl_cfi $__KERNEL_CS
  1191. pushl_cfi $sysenter_past_esp
  1192. CFI_REL_OFFSET eip, 0
  1193. .endm
  1194. ENTRY(debug)
  1195. RING0_INT_FRAME
  1196. ASM_CLAC
  1197. cmpl $ia32_sysenter_target,(%esp)
  1198. jne debug_stack_correct
  1199. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1200. debug_stack_correct:
  1201. pushl_cfi $-1 # mark this as an int
  1202. SAVE_ALL
  1203. TRACE_IRQS_OFF
  1204. xorl %edx,%edx # error code 0
  1205. movl %esp,%eax # pt_regs pointer
  1206. call do_debug
  1207. jmp ret_from_exception
  1208. CFI_ENDPROC
  1209. END(debug)
  1210. /*
  1211. * NMI is doubly nasty. It can happen _while_ we're handling
  1212. * a debug fault, and the debug fault hasn't yet been able to
  1213. * clear up the stack. So we first check whether we got an
  1214. * NMI on the sysenter entry path, but after that we need to
  1215. * check whether we got an NMI on the debug path where the debug
  1216. * fault happened on the sysenter path.
  1217. */
  1218. ENTRY(nmi)
  1219. RING0_INT_FRAME
  1220. ASM_CLAC
  1221. #ifdef CONFIG_X86_ESPFIX32
  1222. pushl_cfi %eax
  1223. movl %ss, %eax
  1224. cmpw $__ESPFIX_SS, %ax
  1225. popl_cfi %eax
  1226. je nmi_espfix_stack
  1227. #endif
  1228. cmpl $ia32_sysenter_target,(%esp)
  1229. je nmi_stack_fixup
  1230. pushl_cfi %eax
  1231. movl %esp,%eax
  1232. /* Do not access memory above the end of our stack page,
  1233. * it might not exist.
  1234. */
  1235. andl $(THREAD_SIZE-1),%eax
  1236. cmpl $(THREAD_SIZE-20),%eax
  1237. popl_cfi %eax
  1238. jae nmi_stack_correct
  1239. cmpl $ia32_sysenter_target,12(%esp)
  1240. je nmi_debug_stack_check
  1241. nmi_stack_correct:
  1242. /* We have a RING0_INT_FRAME here */
  1243. pushl_cfi %eax
  1244. SAVE_ALL
  1245. xorl %edx,%edx # zero error code
  1246. movl %esp,%eax # pt_regs pointer
  1247. call do_nmi
  1248. jmp restore_all_notrace
  1249. CFI_ENDPROC
  1250. nmi_stack_fixup:
  1251. RING0_INT_FRAME
  1252. FIX_STACK 12, nmi_stack_correct, 1
  1253. jmp nmi_stack_correct
  1254. nmi_debug_stack_check:
  1255. /* We have a RING0_INT_FRAME here */
  1256. cmpw $__KERNEL_CS,16(%esp)
  1257. jne nmi_stack_correct
  1258. cmpl $debug,(%esp)
  1259. jb nmi_stack_correct
  1260. cmpl $debug_esp_fix_insn,(%esp)
  1261. ja nmi_stack_correct
  1262. FIX_STACK 24, nmi_stack_correct, 1
  1263. jmp nmi_stack_correct
  1264. #ifdef CONFIG_X86_ESPFIX32
  1265. nmi_espfix_stack:
  1266. /* We have a RING0_INT_FRAME here.
  1267. *
  1268. * create the pointer to lss back
  1269. */
  1270. pushl_cfi %ss
  1271. pushl_cfi %esp
  1272. addl $4, (%esp)
  1273. /* copy the iret frame of 12 bytes */
  1274. .rept 3
  1275. pushl_cfi 16(%esp)
  1276. .endr
  1277. pushl_cfi %eax
  1278. SAVE_ALL
  1279. FIXUP_ESPFIX_STACK # %eax == %esp
  1280. xorl %edx,%edx # zero error code
  1281. call do_nmi
  1282. RESTORE_REGS
  1283. lss 12+4(%esp), %esp # back to espfix stack
  1284. CFI_ADJUST_CFA_OFFSET -24
  1285. jmp irq_return
  1286. #endif
  1287. CFI_ENDPROC
  1288. END(nmi)
  1289. ENTRY(int3)
  1290. RING0_INT_FRAME
  1291. ASM_CLAC
  1292. pushl_cfi $-1 # mark this as an int
  1293. SAVE_ALL
  1294. TRACE_IRQS_OFF
  1295. xorl %edx,%edx # zero error code
  1296. movl %esp,%eax # pt_regs pointer
  1297. call do_int3
  1298. jmp ret_from_exception
  1299. CFI_ENDPROC
  1300. END(int3)
  1301. ENTRY(general_protection)
  1302. RING0_EC_FRAME
  1303. pushl_cfi $do_general_protection
  1304. jmp error_code
  1305. CFI_ENDPROC
  1306. END(general_protection)
  1307. #ifdef CONFIG_KVM_GUEST
  1308. ENTRY(async_page_fault)
  1309. RING0_EC_FRAME
  1310. ASM_CLAC
  1311. pushl_cfi $do_async_page_fault
  1312. jmp error_code
  1313. CFI_ENDPROC
  1314. END(async_page_fault)
  1315. #endif