entry.S 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * S390 low-level entry points.
  4. *
  5. * Copyright IBM Corp. 1999, 2012
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Hartmut Penner (hp@de.ibm.com),
  8. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  9. * Heiko Carstens <heiko.carstens@de.ibm.com>
  10. */
  11. #include <linux/init.h>
  12. #include <linux/linkage.h>
  13. #include <asm/processor.h>
  14. #include <asm/cache.h>
  15. #include <asm/ctl_reg.h>
  16. #include <asm/dwarf.h>
  17. #include <asm/errno.h>
  18. #include <asm/ptrace.h>
  19. #include <asm/thread_info.h>
  20. #include <asm/asm-offsets.h>
  21. #include <asm/unistd.h>
  22. #include <asm/page.h>
  23. #include <asm/sigp.h>
  24. #include <asm/irq.h>
  25. #include <asm/vx-insn.h>
  26. #include <asm/setup.h>
  27. #include <asm/nmi.h>
  28. #include <asm/export.h>
  29. __PT_R0 = __PT_GPRS
  30. __PT_R1 = __PT_GPRS + 8
  31. __PT_R2 = __PT_GPRS + 16
  32. __PT_R3 = __PT_GPRS + 24
  33. __PT_R4 = __PT_GPRS + 32
  34. __PT_R5 = __PT_GPRS + 40
  35. __PT_R6 = __PT_GPRS + 48
  36. __PT_R7 = __PT_GPRS + 56
  37. __PT_R8 = __PT_GPRS + 64
  38. __PT_R9 = __PT_GPRS + 72
  39. __PT_R10 = __PT_GPRS + 80
  40. __PT_R11 = __PT_GPRS + 88
  41. __PT_R12 = __PT_GPRS + 96
  42. __PT_R13 = __PT_GPRS + 104
  43. __PT_R14 = __PT_GPRS + 112
  44. __PT_R15 = __PT_GPRS + 120
  45. STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
  46. STACK_SIZE = 1 << STACK_SHIFT
  47. STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
  48. _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
  49. _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
  50. _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
  51. _TIF_SYSCALL_TRACEPOINT)
  52. _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
  53. _CIF_ASCE_SECONDARY | _CIF_FPU)
  54. _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
  55. #define BASED(name) name-cleanup_critical(%r13)
  56. .macro TRACE_IRQS_ON
  57. #ifdef CONFIG_TRACE_IRQFLAGS
  58. basr %r2,%r0
  59. brasl %r14,trace_hardirqs_on_caller
  60. #endif
  61. .endm
  62. .macro TRACE_IRQS_OFF
  63. #ifdef CONFIG_TRACE_IRQFLAGS
  64. basr %r2,%r0
  65. brasl %r14,trace_hardirqs_off_caller
  66. #endif
  67. .endm
  68. .macro LOCKDEP_SYS_EXIT
  69. #ifdef CONFIG_LOCKDEP
  70. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  71. jz .+10
  72. brasl %r14,lockdep_sys_exit
  73. #endif
  74. .endm
  75. .macro CHECK_STACK stacksize,savearea
  76. #ifdef CONFIG_CHECK_STACK
  77. tml %r15,\stacksize - CONFIG_STACK_GUARD
  78. lghi %r14,\savearea
  79. jz stack_overflow
  80. #endif
  81. .endm
  82. .macro SWITCH_ASYNC savearea,timer
  83. tmhh %r8,0x0001 # interrupting from user ?
  84. jnz 1f
  85. lgr %r14,%r9
  86. slg %r14,BASED(.Lcritical_start)
  87. clg %r14,BASED(.Lcritical_length)
  88. jhe 0f
  89. lghi %r11,\savearea # inside critical section, do cleanup
  90. brasl %r14,cleanup_critical
  91. tmhh %r8,0x0001 # retest problem state after cleanup
  92. jnz 1f
  93. 0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack?
  94. slgr %r14,%r15
  95. srag %r14,%r14,STACK_SHIFT
  96. jnz 2f
  97. CHECK_STACK 1<<STACK_SHIFT,\savearea
  98. aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
  99. j 3f
  100. 1: UPDATE_VTIME %r14,%r15,\timer
  101. BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  102. 2: lg %r15,__LC_ASYNC_STACK # load async stack
  103. 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
  104. .endm
  105. .macro UPDATE_VTIME w1,w2,enter_timer
  106. lg \w1,__LC_EXIT_TIMER
  107. lg \w2,__LC_LAST_UPDATE_TIMER
  108. slg \w1,\enter_timer
  109. slg \w2,__LC_EXIT_TIMER
  110. alg \w1,__LC_USER_TIMER
  111. alg \w2,__LC_SYSTEM_TIMER
  112. stg \w1,__LC_USER_TIMER
  113. stg \w2,__LC_SYSTEM_TIMER
  114. mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
  115. .endm
  116. .macro REENABLE_IRQS
  117. stg %r8,__LC_RETURN_PSW
  118. ni __LC_RETURN_PSW,0xbf
  119. ssm __LC_RETURN_PSW
  120. .endm
  121. .macro STCK savearea
  122. #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
  123. .insn s,0xb27c0000,\savearea # store clock fast
  124. #else
  125. .insn s,0xb2050000,\savearea # store clock
  126. #endif
  127. .endm
  128. /*
  129. * The TSTMSK macro generates a test-under-mask instruction by
  130. * calculating the memory offset for the specified mask value.
  131. * Mask value can be any constant. The macro shifts the mask
  132. * value to calculate the memory offset for the test-under-mask
  133. * instruction.
  134. */
  135. .macro TSTMSK addr, mask, size=8, bytepos=0
  136. .if (\bytepos < \size) && (\mask >> 8)
  137. .if (\mask & 0xff)
  138. .error "Mask exceeds byte boundary"
  139. .endif
  140. TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
  141. .exitm
  142. .endif
  143. .ifeq \mask
  144. .error "Mask must not be zero"
  145. .endif
  146. off = \size - \bytepos - 1
  147. tm off+\addr, \mask
  148. .endm
  149. .macro BPOFF
  150. .pushsection .altinstr_replacement, "ax"
  151. 660: .long 0xb2e8c000
  152. .popsection
  153. 661: .long 0x47000000
  154. .pushsection .altinstructions, "a"
  155. .long 661b - .
  156. .long 660b - .
  157. .word 82
  158. .byte 4
  159. .byte 4
  160. .popsection
  161. .endm
  162. .macro BPON
  163. .pushsection .altinstr_replacement, "ax"
  164. 662: .long 0xb2e8d000
  165. .popsection
  166. 663: .long 0x47000000
  167. .pushsection .altinstructions, "a"
  168. .long 663b - .
  169. .long 662b - .
  170. .word 82
  171. .byte 4
  172. .byte 4
  173. .popsection
  174. .endm
  175. .macro BPENTER tif_ptr,tif_mask
  176. .pushsection .altinstr_replacement, "ax"
  177. 662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
  178. .word 0xc004, 0x0000, 0x0000 # 6 byte nop
  179. .popsection
  180. 664: TSTMSK \tif_ptr,\tif_mask
  181. jz . + 8
  182. .long 0xb2e8d000
  183. .pushsection .altinstructions, "a"
  184. .long 664b - .
  185. .long 662b - .
  186. .word 82
  187. .byte 12
  188. .byte 12
  189. .popsection
  190. .endm
  191. .macro BPEXIT tif_ptr,tif_mask
  192. TSTMSK \tif_ptr,\tif_mask
  193. .pushsection .altinstr_replacement, "ax"
  194. 662: jnz . + 8
  195. .long 0xb2e8d000
  196. .popsection
  197. 664: jz . + 8
  198. .long 0xb2e8c000
  199. .pushsection .altinstructions, "a"
  200. .long 664b - .
  201. .long 662b - .
  202. .word 82
  203. .byte 8
  204. .byte 8
  205. .popsection
  206. .endm
  207. #ifdef CONFIG_EXPOLINE
  208. .macro GEN_BR_THUNK name,reg,tmp
  209. .section .text.\name,"axG",@progbits,\name,comdat
  210. .globl \name
  211. .hidden \name
  212. .type \name,@function
  213. \name:
  214. CFI_STARTPROC
  215. #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
  216. exrl 0,0f
  217. #else
  218. larl \tmp,0f
  219. ex 0,0(\tmp)
  220. #endif
  221. j .
  222. 0: br \reg
  223. CFI_ENDPROC
  224. .endm
  225. GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
  226. GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
  227. GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
  228. .macro BASR_R14_R9
  229. 0: brasl %r14,__s390x_indirect_jump_r1use_r9
  230. .pushsection .s390_indirect_branches,"a",@progbits
  231. .long 0b-.
  232. .popsection
  233. .endm
  234. .macro BR_R1USE_R14
  235. 0: jg __s390x_indirect_jump_r1use_r14
  236. .pushsection .s390_indirect_branches,"a",@progbits
  237. .long 0b-.
  238. .popsection
  239. .endm
  240. .macro BR_R11USE_R14
  241. 0: jg __s390x_indirect_jump_r11use_r14
  242. .pushsection .s390_indirect_branches,"a",@progbits
  243. .long 0b-.
  244. .popsection
  245. .endm
  246. #else /* CONFIG_EXPOLINE */
  247. .macro BASR_R14_R9
  248. basr %r14,%r9
  249. .endm
  250. .macro BR_R1USE_R14
  251. br %r14
  252. .endm
  253. .macro BR_R11USE_R14
  254. br %r14
  255. .endm
  256. #endif /* CONFIG_EXPOLINE */
  257. .section .kprobes.text, "ax"
  258. .Ldummy:
  259. /*
  260. * This nop exists only in order to avoid that __switch_to starts at
  261. * the beginning of the kprobes text section. In that case we would
  262. * have several symbols at the same address. E.g. objdump would take
  263. * an arbitrary symbol name when disassembling this code.
  264. * With the added nop in between the __switch_to symbol is unique
  265. * again.
  266. */
  267. nop 0
  268. ENTRY(__bpon)
  269. .globl __bpon
  270. BPON
  271. BR_R1USE_R14
  272. /*
  273. * Scheduler resume function, called by switch_to
  274. * gpr2 = (task_struct *) prev
  275. * gpr3 = (task_struct *) next
  276. * Returns:
  277. * gpr2 = prev
  278. */
  279. ENTRY(__switch_to)
  280. stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
  281. lghi %r4,__TASK_stack
  282. lghi %r1,__TASK_thread
  283. lg %r5,0(%r4,%r3) # start of kernel stack of next
  284. stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
  285. lgr %r15,%r5
  286. aghi %r15,STACK_INIT # end of kernel stack of next
  287. stg %r3,__LC_CURRENT # store task struct of next
  288. stg %r15,__LC_KERNEL_STACK # store end of kernel stack
  289. lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
  290. aghi %r3,__TASK_pid
  291. mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
  292. lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
  293. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
  294. jz 0f
  295. .insn s,0xb2800000,__LC_LPP # set program parameter
  296. 0: BR_R1USE_R14
  297. .L__critical_start:
  298. #if IS_ENABLED(CONFIG_KVM)
  299. /*
  300. * sie64a calling convention:
  301. * %r2 pointer to sie control block
  302. * %r3 guest register save area
  303. */
  304. ENTRY(sie64a)
  305. stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
  306. lg %r12,__LC_CURRENT
  307. stg %r2,__SF_EMPTY(%r15) # save control block pointer
  308. stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
  309. xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
  310. mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
  311. TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
  312. jno .Lsie_load_guest_gprs
  313. brasl %r14,load_fpu_regs # load guest fp/vx regs
  314. .Lsie_load_guest_gprs:
  315. lmg %r0,%r13,0(%r3) # load guest gprs 0-13
  316. lg %r14,__LC_GMAP # get gmap pointer
  317. ltgr %r14,%r14
  318. jz .Lsie_gmap
  319. lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
  320. .Lsie_gmap:
  321. lg %r14,__SF_EMPTY(%r15) # get control block pointer
  322. oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
  323. tm __SIE_PROG20+3(%r14),3 # last exit...
  324. jnz .Lsie_skip
  325. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  326. jo .Lsie_skip # exit if fp/vx regs changed
  327. BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
  328. .Lsie_entry:
  329. sie 0(%r14)
  330. .Lsie_exit:
  331. BPOFF
  332. BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
  333. .Lsie_skip:
  334. ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
  335. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  336. .Lsie_done:
  337. # some program checks are suppressing. C code (e.g. do_protection_exception)
  338. # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
  339. # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
  340. # Other instructions between sie64a and .Lsie_done should not cause program
  341. # interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
  342. # See also .Lcleanup_sie
  343. .Lrewind_pad6:
  344. nopr 7
  345. .Lrewind_pad4:
  346. nopr 7
  347. .Lrewind_pad2:
  348. nopr 7
  349. .globl sie_exit
  350. sie_exit:
  351. lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
  352. stmg %r0,%r13,0(%r14) # save guest gprs 0-13
  353. xgr %r0,%r0 # clear guest registers to
  354. xgr %r1,%r1 # prevent speculative use
  355. xgr %r2,%r2
  356. xgr %r3,%r3
  357. xgr %r4,%r4
  358. xgr %r5,%r5
  359. lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
  360. lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
  361. BR_R1USE_R14
  362. .Lsie_fault:
  363. lghi %r14,-EFAULT
  364. stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
  365. j sie_exit
  366. EX_TABLE(.Lrewind_pad6,.Lsie_fault)
  367. EX_TABLE(.Lrewind_pad4,.Lsie_fault)
  368. EX_TABLE(.Lrewind_pad2,.Lsie_fault)
  369. EX_TABLE(sie_exit,.Lsie_fault)
  370. EXPORT_SYMBOL(sie64a)
  371. EXPORT_SYMBOL(sie_exit)
  372. #endif
  373. /*
  374. * SVC interrupt handler routine. System calls are synchronous events and
  375. * are executed with interrupts enabled.
  376. */
  377. ENTRY(system_call)
  378. stpt __LC_SYNC_ENTER_TIMER
  379. .Lsysc_stmg:
  380. stmg %r8,%r15,__LC_SAVE_AREA_SYNC
  381. BPOFF
  382. lg %r12,__LC_CURRENT
  383. lghi %r13,__TASK_thread
  384. lghi %r14,_PIF_SYSCALL
  385. .Lsysc_per:
  386. lg %r15,__LC_KERNEL_STACK
  387. la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
  388. .Lsysc_vtime:
  389. UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
  390. BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  391. stmg %r0,%r7,__PT_R0(%r11)
  392. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
  393. mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
  394. mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
  395. stg %r14,__PT_FLAGS(%r11)
  396. .Lsysc_do_svc:
  397. # clear user controlled register to prevent speculative use
  398. xgr %r0,%r0
  399. # load address of system call table
  400. lg %r10,__THREAD_sysc_table(%r13,%r12)
  401. llgh %r8,__PT_INT_CODE+2(%r11)
  402. slag %r8,%r8,2 # shift and test for svc 0
  403. jnz .Lsysc_nr_ok
  404. # svc 0: system call number in %r1
  405. llgfr %r1,%r1 # clear high word in r1
  406. cghi %r1,NR_syscalls
  407. jnl .Lsysc_nr_ok
  408. sth %r1,__PT_INT_CODE+2(%r11)
  409. slag %r8,%r1,2
  410. .Lsysc_nr_ok:
  411. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  412. stg %r2,__PT_ORIG_GPR2(%r11)
  413. stg %r7,STACK_FRAME_OVERHEAD(%r15)
  414. lgf %r9,0(%r8,%r10) # get system call add.
  415. TSTMSK __TI_flags(%r12),_TIF_TRACE
  416. jnz .Lsysc_tracesys
  417. BASR_R14_R9 # call sys_xxxx
  418. stg %r2,__PT_R2(%r11) # store return value
  419. .Lsysc_return:
  420. LOCKDEP_SYS_EXIT
  421. .Lsysc_tif:
  422. TSTMSK __PT_FLAGS(%r11),_PIF_WORK
  423. jnz .Lsysc_work
  424. TSTMSK __TI_flags(%r12),_TIF_WORK
  425. jnz .Lsysc_work # check for work
  426. TSTMSK __LC_CPU_FLAGS,_CIF_WORK
  427. jnz .Lsysc_work
  428. BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
  429. .Lsysc_restore:
  430. lg %r14,__LC_VDSO_PER_CPU
  431. lmg %r0,%r10,__PT_R0(%r11)
  432. mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
  433. .Lsysc_exit_timer:
  434. stpt __LC_EXIT_TIMER
  435. mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
  436. lmg %r11,%r15,__PT_R11(%r11)
  437. lpswe __LC_RETURN_PSW
  438. .Lsysc_done:
  439. #
  440. # One of the work bits is on. Find out which one.
  441. #
  442. .Lsysc_work:
  443. TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
  444. jo .Lsysc_mcck_pending
  445. TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
  446. jo .Lsysc_reschedule
  447. TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
  448. jo .Lsysc_syscall_restart
  449. #ifdef CONFIG_UPROBES
  450. TSTMSK __TI_flags(%r12),_TIF_UPROBE
  451. jo .Lsysc_uprobe_notify
  452. #endif
  453. TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
  454. jo .Lsysc_guarded_storage
  455. TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
  456. jo .Lsysc_singlestep
  457. #ifdef CONFIG_LIVEPATCH
  458. TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
  459. jo .Lsysc_patch_pending # handle live patching just before
  460. # signals and possible syscall restart
  461. #endif
  462. TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
  463. jo .Lsysc_syscall_restart
  464. TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
  465. jo .Lsysc_sigpending
  466. TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
  467. jo .Lsysc_notify_resume
  468. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  469. jo .Lsysc_vxrs
  470. TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
  471. jnz .Lsysc_asce
  472. j .Lsysc_return # beware of critical section cleanup
  473. #
  474. # _TIF_NEED_RESCHED is set, call schedule
  475. #
  476. .Lsysc_reschedule:
  477. larl %r14,.Lsysc_return
  478. jg schedule
  479. #
  480. # _CIF_MCCK_PENDING is set, call handler
  481. #
  482. .Lsysc_mcck_pending:
  483. larl %r14,.Lsysc_return
  484. jg s390_handle_mcck # TIF bit will be cleared by handler
  485. #
  486. # _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
  487. #
  488. .Lsysc_asce:
  489. ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
  490. lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
  491. TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
  492. jz .Lsysc_return
  493. #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
  494. tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
  495. jnz .Lsysc_set_fs_fixup
  496. ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
  497. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  498. j .Lsysc_return
  499. .Lsysc_set_fs_fixup:
  500. #endif
  501. larl %r14,.Lsysc_return
  502. jg set_fs_fixup
  503. #
  504. # CIF_FPU is set, restore floating-point controls and floating-point registers.
  505. #
  506. .Lsysc_vxrs:
  507. larl %r14,.Lsysc_return
  508. jg load_fpu_regs
  509. #
  510. # _TIF_SIGPENDING is set, call do_signal
  511. #
  512. .Lsysc_sigpending:
  513. lgr %r2,%r11 # pass pointer to pt_regs
  514. brasl %r14,do_signal
  515. TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
  516. jno .Lsysc_return
  517. .Lsysc_do_syscall:
  518. lghi %r13,__TASK_thread
  519. lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
  520. lghi %r1,0 # svc 0 returns -ENOSYS
  521. j .Lsysc_do_svc
  522. #
  523. # _TIF_NOTIFY_RESUME is set, call do_notify_resume
  524. #
  525. .Lsysc_notify_resume:
  526. lgr %r2,%r11 # pass pointer to pt_regs
  527. larl %r14,.Lsysc_return
  528. jg do_notify_resume
  529. #
  530. # _TIF_UPROBE is set, call uprobe_notify_resume
  531. #
  532. #ifdef CONFIG_UPROBES
  533. .Lsysc_uprobe_notify:
  534. lgr %r2,%r11 # pass pointer to pt_regs
  535. larl %r14,.Lsysc_return
  536. jg uprobe_notify_resume
  537. #endif
  538. #
  539. # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
  540. #
  541. .Lsysc_guarded_storage:
  542. lgr %r2,%r11 # pass pointer to pt_regs
  543. larl %r14,.Lsysc_return
  544. jg gs_load_bc_cb
  545. #
  546. # _TIF_PATCH_PENDING is set, call klp_update_patch_state
  547. #
  548. #ifdef CONFIG_LIVEPATCH
  549. .Lsysc_patch_pending:
  550. lg %r2,__LC_CURRENT # pass pointer to task struct
  551. larl %r14,.Lsysc_return
  552. jg klp_update_patch_state
  553. #endif
  554. #
  555. # _PIF_PER_TRAP is set, call do_per_trap
  556. #
  557. .Lsysc_singlestep:
  558. ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
  559. lgr %r2,%r11 # pass pointer to pt_regs
  560. larl %r14,.Lsysc_return
  561. jg do_per_trap
  562. #
  563. # _PIF_SYSCALL_RESTART is set, repeat the current system call
  564. #
  565. .Lsysc_syscall_restart:
  566. ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
  567. lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
  568. lg %r2,__PT_ORIG_GPR2(%r11)
  569. j .Lsysc_do_svc
  570. #
  571. # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
  572. # and after the system call
  573. #
  574. .Lsysc_tracesys:
  575. lgr %r2,%r11 # pass pointer to pt_regs
  576. la %r3,0
  577. llgh %r0,__PT_INT_CODE+2(%r11)
  578. stg %r0,__PT_R2(%r11)
  579. brasl %r14,do_syscall_trace_enter
  580. lghi %r0,NR_syscalls
  581. clgr %r0,%r2
  582. jnh .Lsysc_tracenogo
  583. sllg %r8,%r2,2
  584. lgf %r9,0(%r8,%r10)
  585. .Lsysc_tracego:
  586. lmg %r3,%r7,__PT_R3(%r11)
  587. stg %r7,STACK_FRAME_OVERHEAD(%r15)
  588. lg %r2,__PT_ORIG_GPR2(%r11)
  589. BASR_R14_R9 # call sys_xxx
  590. stg %r2,__PT_R2(%r11) # store return value
  591. .Lsysc_tracenogo:
  592. TSTMSK __TI_flags(%r12),_TIF_TRACE
  593. jz .Lsysc_return
  594. lgr %r2,%r11 # pass pointer to pt_regs
  595. larl %r14,.Lsysc_return
  596. jg do_syscall_trace_exit
  597. #
  598. # a new process exits the kernel with ret_from_fork
  599. #
  600. ENTRY(ret_from_fork)
  601. la %r11,STACK_FRAME_OVERHEAD(%r15)
  602. lg %r12,__LC_CURRENT
  603. brasl %r14,schedule_tail
  604. TRACE_IRQS_ON
  605. ssm __LC_SVC_NEW_PSW # reenable interrupts
  606. tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
  607. jne .Lsysc_tracenogo
  608. # it's a kernel thread
  609. lmg %r9,%r10,__PT_R9(%r11) # load gprs
  610. ENTRY(kernel_thread_starter)
  611. la %r2,0(%r10)
  612. BASR_R14_R9
  613. j .Lsysc_tracenogo
  614. /*
  615. * Program check handler routine
  616. */
  617. ENTRY(pgm_check_handler)
  618. stpt __LC_SYNC_ENTER_TIMER
  619. BPOFF
  620. stmg %r8,%r15,__LC_SAVE_AREA_SYNC
  621. lg %r10,__LC_LAST_BREAK
  622. lg %r12,__LC_CURRENT
  623. lghi %r11,0
  624. larl %r13,cleanup_critical
  625. lmg %r8,%r9,__LC_PGM_OLD_PSW
  626. tmhh %r8,0x0001 # test problem state bit
  627. jnz 2f # -> fault in user space
  628. #if IS_ENABLED(CONFIG_KVM)
  629. # cleanup critical section for program checks in sie64a
  630. lgr %r14,%r9
  631. slg %r14,BASED(.Lsie_critical_start)
  632. clg %r14,BASED(.Lsie_critical_length)
  633. jhe 0f
  634. lg %r14,__SF_EMPTY(%r15) # get control block pointer
  635. ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
  636. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  637. larl %r9,sie_exit # skip forward to sie_exit
  638. lghi %r11,_PIF_GUEST_FAULT
  639. #endif
  640. 0: tmhh %r8,0x4000 # PER bit set in old PSW ?
  641. jnz 1f # -> enabled, can't be a double fault
  642. tm __LC_PGM_ILC+3,0x80 # check for per exception
  643. jnz .Lpgm_svcper # -> single stepped svc
  644. 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
  645. aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
  646. j 4f
  647. 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
  648. BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  649. lg %r15,__LC_KERNEL_STACK
  650. lgr %r14,%r12
  651. aghi %r14,__TASK_thread # pointer to thread_struct
  652. lghi %r13,__LC_PGM_TDB
  653. tm __LC_PGM_ILC+2,0x02 # check for transaction abort
  654. jz 3f
  655. mvc __THREAD_trap_tdb(256,%r14),0(%r13)
  656. 3: stg %r10,__THREAD_last_break(%r14)
  657. 4: lgr %r13,%r11
  658. la %r11,STACK_FRAME_OVERHEAD(%r15)
  659. stmg %r0,%r7,__PT_R0(%r11)
  660. # clear user controlled registers to prevent speculative use
  661. xgr %r0,%r0
  662. xgr %r1,%r1
  663. xgr %r2,%r2
  664. xgr %r3,%r3
  665. xgr %r4,%r4
  666. xgr %r5,%r5
  667. xgr %r6,%r6
  668. xgr %r7,%r7
  669. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
  670. stmg %r8,%r9,__PT_PSW(%r11)
  671. mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
  672. mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
  673. stg %r13,__PT_FLAGS(%r11)
  674. stg %r10,__PT_ARGS(%r11)
  675. tm __LC_PGM_ILC+3,0x80 # check for per exception
  676. jz 5f
  677. tmhh %r8,0x0001 # kernel per event ?
  678. jz .Lpgm_kprobe
  679. oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
  680. mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
  681. mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
  682. mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
  683. 5: REENABLE_IRQS
  684. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  685. larl %r1,pgm_check_table
  686. llgh %r10,__PT_INT_CODE+2(%r11)
  687. nill %r10,0x007f
  688. sll %r10,2
  689. je .Lpgm_return
  690. lgf %r9,0(%r10,%r1) # load address of handler routine
  691. lgr %r2,%r11 # pass pointer to pt_regs
  692. BASR_R14_R9 # branch to interrupt-handler
  693. .Lpgm_return:
  694. LOCKDEP_SYS_EXIT
  695. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  696. jno .Lsysc_restore
  697. TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
  698. jo .Lsysc_do_syscall
  699. j .Lsysc_tif
  700. #
  701. # PER event in supervisor state, must be kprobes
  702. #
  703. .Lpgm_kprobe:
  704. REENABLE_IRQS
  705. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  706. lgr %r2,%r11 # pass pointer to pt_regs
  707. brasl %r14,do_per_trap
  708. j .Lpgm_return
  709. #
  710. # single stepped system call
  711. #
  712. .Lpgm_svcper:
  713. mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
  714. lghi %r13,__TASK_thread
  715. larl %r14,.Lsysc_per
  716. stg %r14,__LC_RETURN_PSW+8
  717. lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
  718. lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
  719. /*
  720. * IO interrupt handler routine
  721. */
  722. ENTRY(io_int_handler)
  723. STCK __LC_INT_CLOCK
  724. stpt __LC_ASYNC_ENTER_TIMER
  725. BPOFF
  726. stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
  727. lg %r12,__LC_CURRENT
  728. larl %r13,cleanup_critical
  729. lmg %r8,%r9,__LC_IO_OLD_PSW
  730. SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
  731. stmg %r0,%r7,__PT_R0(%r11)
  732. # clear user controlled registers to prevent speculative use
  733. xgr %r0,%r0
  734. xgr %r1,%r1
  735. xgr %r2,%r2
  736. xgr %r3,%r3
  737. xgr %r4,%r4
  738. xgr %r5,%r5
  739. xgr %r6,%r6
  740. xgr %r7,%r7
  741. xgr %r10,%r10
  742. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
  743. stmg %r8,%r9,__PT_PSW(%r11)
  744. mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
  745. xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
  746. TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
  747. jo .Lio_restore
  748. TRACE_IRQS_OFF
  749. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  750. .Lio_loop:
  751. lgr %r2,%r11 # pass pointer to pt_regs
  752. lghi %r3,IO_INTERRUPT
  753. tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
  754. jz .Lio_call
  755. lghi %r3,THIN_INTERRUPT
  756. .Lio_call:
  757. brasl %r14,do_IRQ
  758. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
  759. jz .Lio_return
  760. tpi 0
  761. jz .Lio_return
  762. mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
  763. j .Lio_loop
  764. .Lio_return:
  765. LOCKDEP_SYS_EXIT
  766. TRACE_IRQS_ON
  767. .Lio_tif:
  768. TSTMSK __TI_flags(%r12),_TIF_WORK
  769. jnz .Lio_work # there is work to do (signals etc.)
  770. TSTMSK __LC_CPU_FLAGS,_CIF_WORK
  771. jnz .Lio_work
  772. .Lio_restore:
  773. lg %r14,__LC_VDSO_PER_CPU
  774. lmg %r0,%r10,__PT_R0(%r11)
  775. mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
  776. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  777. jno .Lio_exit_kernel
  778. BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
  779. .Lio_exit_timer:
  780. stpt __LC_EXIT_TIMER
  781. mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
  782. .Lio_exit_kernel:
  783. lmg %r11,%r15,__PT_R11(%r11)
  784. lpswe __LC_RETURN_PSW
  785. .Lio_done:
  786. #
  787. # There is work todo, find out in which context we have been interrupted:
  788. # 1) if we return to user space we can do all _TIF_WORK work
  789. # 2) if we return to kernel code and kvm is enabled check if we need to
  790. # modify the psw to leave SIE
  791. # 3) if we return to kernel code and preemptive scheduling is enabled check
  792. # the preemption counter and if it is zero call preempt_schedule_irq
  793. # Before any work can be done, a switch to the kernel stack is required.
  794. #
  795. .Lio_work:
  796. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  797. jo .Lio_work_user # yes -> do resched & signal
  798. #ifdef CONFIG_PREEMPT
  799. # check for preemptive scheduling
  800. icm %r0,15,__LC_PREEMPT_COUNT
  801. jnz .Lio_restore # preemption is disabled
  802. TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
  803. jno .Lio_restore
  804. # switch to kernel stack
  805. lg %r1,__PT_R15(%r11)
  806. aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
  807. mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
  808. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
  809. la %r11,STACK_FRAME_OVERHEAD(%r1)
  810. lgr %r15,%r1
  811. # TRACE_IRQS_ON already done at .Lio_return, call
  812. # TRACE_IRQS_OFF to keep things symmetrical
  813. TRACE_IRQS_OFF
  814. brasl %r14,preempt_schedule_irq
  815. j .Lio_return
  816. #else
  817. j .Lio_restore
  818. #endif
  819. #
  820. # Need to do work before returning to userspace, switch to kernel stack
  821. #
  822. .Lio_work_user:
  823. lg %r1,__LC_KERNEL_STACK
  824. mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
  825. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
  826. la %r11,STACK_FRAME_OVERHEAD(%r1)
  827. lgr %r15,%r1
  828. #
  829. # One of the work bits is on. Find out which one.
  830. #
  831. .Lio_work_tif:
  832. TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
  833. jo .Lio_mcck_pending
  834. TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
  835. jo .Lio_reschedule
  836. #ifdef CONFIG_LIVEPATCH
  837. TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
  838. jo .Lio_patch_pending
  839. #endif
  840. TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
  841. jo .Lio_sigpending
  842. TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
  843. jo .Lio_notify_resume
  844. TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
  845. jo .Lio_guarded_storage
  846. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  847. jo .Lio_vxrs
  848. TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
  849. jnz .Lio_asce
  850. j .Lio_return # beware of critical section cleanup
  851. #
  852. # _CIF_MCCK_PENDING is set, call handler
  853. #
  854. .Lio_mcck_pending:
  855. # TRACE_IRQS_ON already done at .Lio_return
  856. brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
  857. TRACE_IRQS_OFF
  858. j .Lio_return
  859. #
  860. # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
  861. #
  862. .Lio_asce:
  863. ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
  864. lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
  865. TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
  866. jz .Lio_return
  867. #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
  868. tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
  869. jnz .Lio_set_fs_fixup
  870. ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
  871. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  872. j .Lio_return
  873. .Lio_set_fs_fixup:
  874. #endif
  875. larl %r14,.Lio_return
  876. jg set_fs_fixup
  877. #
  878. # CIF_FPU is set, restore floating-point controls and floating-point registers.
  879. #
  880. .Lio_vxrs:
  881. larl %r14,.Lio_return
  882. jg load_fpu_regs
  883. #
  884. # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
  885. #
  886. .Lio_guarded_storage:
  887. # TRACE_IRQS_ON already done at .Lio_return
  888. ssm __LC_SVC_NEW_PSW # reenable interrupts
  889. lgr %r2,%r11 # pass pointer to pt_regs
  890. brasl %r14,gs_load_bc_cb
  891. ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
  892. TRACE_IRQS_OFF
  893. j .Lio_return
  894. #
  895. # _TIF_NEED_RESCHED is set, call schedule
  896. #
  897. .Lio_reschedule:
  898. # TRACE_IRQS_ON already done at .Lio_return
  899. ssm __LC_SVC_NEW_PSW # reenable interrupts
  900. brasl %r14,schedule # call scheduler
  901. ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
  902. TRACE_IRQS_OFF
  903. j .Lio_return
  904. #
  905. # _TIF_PATCH_PENDING is set, call klp_update_patch_state
  906. #
  907. #ifdef CONFIG_LIVEPATCH
  908. .Lio_patch_pending:
  909. lg %r2,__LC_CURRENT # pass pointer to task struct
  910. larl %r14,.Lio_return
  911. jg klp_update_patch_state
  912. #endif
  913. #
  914. # _TIF_SIGPENDING or is set, call do_signal
  915. #
  916. .Lio_sigpending:
  917. # TRACE_IRQS_ON already done at .Lio_return
  918. ssm __LC_SVC_NEW_PSW # reenable interrupts
  919. lgr %r2,%r11 # pass pointer to pt_regs
  920. brasl %r14,do_signal
  921. ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
  922. TRACE_IRQS_OFF
  923. j .Lio_return
  924. #
  925. # _TIF_NOTIFY_RESUME or is set, call do_notify_resume
  926. #
  927. .Lio_notify_resume:
  928. # TRACE_IRQS_ON already done at .Lio_return
  929. ssm __LC_SVC_NEW_PSW # reenable interrupts
  930. lgr %r2,%r11 # pass pointer to pt_regs
  931. brasl %r14,do_notify_resume
  932. ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
  933. TRACE_IRQS_OFF
  934. j .Lio_return
  935. /*
  936. * External interrupt handler routine
  937. */
  938. ENTRY(ext_int_handler)
  939. STCK __LC_INT_CLOCK
  940. stpt __LC_ASYNC_ENTER_TIMER
  941. BPOFF
  942. stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
  943. lg %r12,__LC_CURRENT
  944. larl %r13,cleanup_critical
  945. lmg %r8,%r9,__LC_EXT_OLD_PSW
  946. SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
  947. stmg %r0,%r7,__PT_R0(%r11)
  948. # clear user controlled registers to prevent speculative use
  949. xgr %r0,%r0
  950. xgr %r1,%r1
  951. xgr %r2,%r2
  952. xgr %r3,%r3
  953. xgr %r4,%r4
  954. xgr %r5,%r5
  955. xgr %r6,%r6
  956. xgr %r7,%r7
  957. xgr %r10,%r10
  958. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
  959. stmg %r8,%r9,__PT_PSW(%r11)
  960. lghi %r1,__LC_EXT_PARAMS2
  961. mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
  962. mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
  963. mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
  964. xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
  965. TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
  966. jo .Lio_restore
  967. TRACE_IRQS_OFF
  968. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  969. lgr %r2,%r11 # pass pointer to pt_regs
  970. lghi %r3,EXT_INTERRUPT
  971. brasl %r14,do_IRQ
  972. j .Lio_return
  973. /*
  974. * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
  975. */
  976. ENTRY(psw_idle)
  977. stg %r3,__SF_EMPTY(%r15)
  978. larl %r1,.Lpsw_idle_lpsw+4
  979. stg %r1,__SF_EMPTY+8(%r15)
  980. #ifdef CONFIG_SMP
  981. larl %r1,smp_cpu_mtid
  982. llgf %r1,0(%r1)
  983. ltgr %r1,%r1
  984. jz .Lpsw_idle_stcctm
  985. .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
  986. .Lpsw_idle_stcctm:
  987. #endif
  988. oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
  989. BPON
  990. STCK __CLOCK_IDLE_ENTER(%r2)
  991. stpt __TIMER_IDLE_ENTER(%r2)
  992. .Lpsw_idle_lpsw:
  993. lpswe __SF_EMPTY(%r15)
  994. BR_R1USE_R14
  995. .Lpsw_idle_end:
  996. /*
  997. * Store floating-point controls and floating-point or vector register
  998. * depending whether the vector facility is available. A critical section
  999. * cleanup assures that the registers are stored even if interrupted for
  1000. * some other work. The CIF_FPU flag is set to trigger a lazy restore
  1001. * of the register contents at return from io or a system call.
  1002. */
  1003. ENTRY(save_fpu_regs)
  1004. lg %r2,__LC_CURRENT
  1005. aghi %r2,__TASK_thread
  1006. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  1007. jo .Lsave_fpu_regs_exit
  1008. stfpc __THREAD_FPU_fpc(%r2)
  1009. lg %r3,__THREAD_FPU_regs(%r2)
  1010. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
  1011. jz .Lsave_fpu_regs_fp # no -> store FP regs
  1012. VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
  1013. VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
  1014. j .Lsave_fpu_regs_done # -> set CIF_FPU flag
  1015. .Lsave_fpu_regs_fp:
  1016. std 0,0(%r3)
  1017. std 1,8(%r3)
  1018. std 2,16(%r3)
  1019. std 3,24(%r3)
  1020. std 4,32(%r3)
  1021. std 5,40(%r3)
  1022. std 6,48(%r3)
  1023. std 7,56(%r3)
  1024. std 8,64(%r3)
  1025. std 9,72(%r3)
  1026. std 10,80(%r3)
  1027. std 11,88(%r3)
  1028. std 12,96(%r3)
  1029. std 13,104(%r3)
  1030. std 14,112(%r3)
  1031. std 15,120(%r3)
  1032. .Lsave_fpu_regs_done:
  1033. oi __LC_CPU_FLAGS+7,_CIF_FPU
  1034. .Lsave_fpu_regs_exit:
  1035. BR_R1USE_R14
  1036. .Lsave_fpu_regs_end:
  1037. EXPORT_SYMBOL(save_fpu_regs)
  1038. /*
  1039. * Load floating-point controls and floating-point or vector registers.
  1040. * A critical section cleanup assures that the register contents are
  1041. * loaded even if interrupted for some other work.
  1042. *
  1043. * There are special calling conventions to fit into sysc and io return work:
  1044. * %r15: <kernel stack>
  1045. * The function requires:
  1046. * %r4
  1047. */
  1048. load_fpu_regs:
  1049. lg %r4,__LC_CURRENT
  1050. aghi %r4,__TASK_thread
  1051. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  1052. jno .Lload_fpu_regs_exit
  1053. lfpc __THREAD_FPU_fpc(%r4)
  1054. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
  1055. lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
  1056. jz .Lload_fpu_regs_fp # -> no VX, load FP regs
  1057. VLM %v0,%v15,0,%r4
  1058. VLM %v16,%v31,256,%r4
  1059. j .Lload_fpu_regs_done
  1060. .Lload_fpu_regs_fp:
  1061. ld 0,0(%r4)
  1062. ld 1,8(%r4)
  1063. ld 2,16(%r4)
  1064. ld 3,24(%r4)
  1065. ld 4,32(%r4)
  1066. ld 5,40(%r4)
  1067. ld 6,48(%r4)
  1068. ld 7,56(%r4)
  1069. ld 8,64(%r4)
  1070. ld 9,72(%r4)
  1071. ld 10,80(%r4)
  1072. ld 11,88(%r4)
  1073. ld 12,96(%r4)
  1074. ld 13,104(%r4)
  1075. ld 14,112(%r4)
  1076. ld 15,120(%r4)
  1077. .Lload_fpu_regs_done:
  1078. ni __LC_CPU_FLAGS+7,255-_CIF_FPU
  1079. .Lload_fpu_regs_exit:
  1080. BR_R1USE_R14
  1081. .Lload_fpu_regs_end:
  1082. .L__critical_end:
  1083. /*
  1084. * Machine check handler routines
  1085. */
  1086. ENTRY(mcck_int_handler)
  1087. STCK __LC_MCCK_CLOCK
  1088. BPOFF
  1089. la %r1,4095 # validate r1
  1090. spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
  1091. sckc __LC_CLOCK_COMPARATOR # validate comparator
  1092. lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
  1093. lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
  1094. lg %r12,__LC_CURRENT
  1095. larl %r13,cleanup_critical
  1096. lmg %r8,%r9,__LC_MCK_OLD_PSW
  1097. TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
  1098. jo .Lmcck_panic # yes -> rest of mcck code invalid
  1099. TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
  1100. jno .Lmcck_panic # control registers invalid -> panic
  1101. la %r14,4095
  1102. lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
  1103. ptlb
  1104. lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
  1105. nill %r11,0xfc00 # MCESA_ORIGIN_MASK
  1106. TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
  1107. jno 0f
  1108. TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
  1109. jno 0f
  1110. .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
  1111. 0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
  1112. TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
  1113. jo 0f
  1114. sr %r14,%r14
  1115. 0: sfpc %r14
  1116. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
  1117. jo 0f
  1118. lghi %r14,__LC_FPREGS_SAVE_AREA
  1119. ld %f0,0(%r14)
  1120. ld %f1,8(%r14)
  1121. ld %f2,16(%r14)
  1122. ld %f3,24(%r14)
  1123. ld %f4,32(%r14)
  1124. ld %f5,40(%r14)
  1125. ld %f6,48(%r14)
  1126. ld %f7,56(%r14)
  1127. ld %f8,64(%r14)
  1128. ld %f9,72(%r14)
  1129. ld %f10,80(%r14)
  1130. ld %f11,88(%r14)
  1131. ld %f12,96(%r14)
  1132. ld %f13,104(%r14)
  1133. ld %f14,112(%r14)
  1134. ld %f15,120(%r14)
  1135. j 1f
  1136. 0: VLM %v0,%v15,0,%r11
  1137. VLM %v16,%v31,256,%r11
  1138. 1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
  1139. mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
  1140. TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
  1141. jo 3f
  1142. la %r14,__LC_SYNC_ENTER_TIMER
  1143. clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
  1144. jl 0f
  1145. la %r14,__LC_ASYNC_ENTER_TIMER
  1146. 0: clc 0(8,%r14),__LC_EXIT_TIMER
  1147. jl 1f
  1148. la %r14,__LC_EXIT_TIMER
  1149. 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
  1150. jl 2f
  1151. la %r14,__LC_LAST_UPDATE_TIMER
  1152. 2: spt 0(%r14)
  1153. mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
  1154. 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
  1155. jno .Lmcck_panic
  1156. tmhh %r8,0x0001 # interrupting from user ?
  1157. jnz 4f
  1158. TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
  1159. jno .Lmcck_panic
  1160. 4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
  1161. .Lmcck_skip:
  1162. lghi %r14,__LC_GPREGS_SAVE_AREA+64
  1163. stmg %r0,%r7,__PT_R0(%r11)
  1164. # clear user controlled registers to prevent speculative use
  1165. xgr %r0,%r0
  1166. xgr %r1,%r1
  1167. xgr %r2,%r2
  1168. xgr %r3,%r3
  1169. xgr %r4,%r4
  1170. xgr %r5,%r5
  1171. xgr %r6,%r6
  1172. xgr %r7,%r7
  1173. xgr %r10,%r10
  1174. mvc __PT_R8(64,%r11),0(%r14)
  1175. stmg %r8,%r9,__PT_PSW(%r11)
  1176. xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
  1177. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  1178. lgr %r2,%r11 # pass pointer to pt_regs
  1179. brasl %r14,s390_do_machine_check
  1180. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  1181. jno .Lmcck_return
  1182. lg %r1,__LC_KERNEL_STACK # switch to kernel stack
  1183. mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
  1184. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
  1185. la %r11,STACK_FRAME_OVERHEAD(%r1)
  1186. lgr %r15,%r1
  1187. ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
  1188. TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
  1189. jno .Lmcck_return
  1190. TRACE_IRQS_OFF
  1191. brasl %r14,s390_handle_mcck
  1192. TRACE_IRQS_ON
  1193. .Lmcck_return:
  1194. lg %r14,__LC_VDSO_PER_CPU
  1195. lmg %r0,%r10,__PT_R0(%r11)
  1196. mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
  1197. tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
  1198. jno 0f
  1199. BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
  1200. stpt __LC_EXIT_TIMER
  1201. mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
  1202. 0: lmg %r11,%r15,__PT_R11(%r11)
  1203. lpswe __LC_RETURN_MCCK_PSW
  1204. .Lmcck_panic:
  1205. lg %r15,__LC_PANIC_STACK
  1206. la %r11,STACK_FRAME_OVERHEAD(%r15)
  1207. j .Lmcck_skip
  1208. #
  1209. # PSW restart interrupt handler
  1210. #
  1211. ENTRY(restart_int_handler)
  1212. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
  1213. jz 0f
  1214. .insn s,0xb2800000,__LC_LPP
  1215. 0: stg %r15,__LC_SAVE_AREA_RESTART
  1216. lg %r15,__LC_RESTART_STACK
  1217. aghi %r15,-__PT_SIZE # create pt_regs on stack
  1218. xc 0(__PT_SIZE,%r15),0(%r15)
  1219. stmg %r0,%r14,__PT_R0(%r15)
  1220. mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
  1221. mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
  1222. aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
  1223. xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
  1224. lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
  1225. lg %r2,__LC_RESTART_DATA
  1226. lg %r3,__LC_RESTART_SOURCE
  1227. ltgr %r3,%r3 # test source cpu address
  1228. jm 1f # negative -> skip source stop
  1229. 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
  1230. brc 10,0b # wait for status stored
  1231. 1: basr %r14,%r1 # call function
  1232. stap __SF_EMPTY(%r15) # store cpu address
  1233. llgh %r3,__SF_EMPTY(%r15)
  1234. 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
  1235. brc 2,2b
  1236. 3: j 3b
  1237. .section .kprobes.text, "ax"
  1238. #ifdef CONFIG_CHECK_STACK
  1239. /*
  1240. * The synchronous or the asynchronous stack overflowed. We are dead.
  1241. * No need to properly save the registers, we are going to panic anyway.
  1242. * Setup a pt_regs so that show_trace can provide a good call trace.
  1243. */
  1244. stack_overflow:
  1245. lg %r15,__LC_PANIC_STACK # change to panic stack
  1246. la %r11,STACK_FRAME_OVERHEAD(%r15)
  1247. stmg %r0,%r7,__PT_R0(%r11)
  1248. stmg %r8,%r9,__PT_PSW(%r11)
  1249. mvc __PT_R8(64,%r11),0(%r14)
  1250. stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
  1251. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  1252. lgr %r2,%r11 # pass pointer to pt_regs
  1253. jg kernel_stack_overflow
  1254. #endif
  1255. cleanup_critical:
  1256. #if IS_ENABLED(CONFIG_KVM)
  1257. clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
  1258. jl 0f
  1259. clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
  1260. jl .Lcleanup_sie
  1261. #endif
  1262. clg %r9,BASED(.Lcleanup_table) # system_call
  1263. jl 0f
  1264. clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
  1265. jl .Lcleanup_system_call
  1266. clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
  1267. jl 0f
  1268. clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
  1269. jl .Lcleanup_sysc_tif
  1270. clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
  1271. jl .Lcleanup_sysc_restore
  1272. clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
  1273. jl 0f
  1274. clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
  1275. jl .Lcleanup_io_tif
  1276. clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
  1277. jl .Lcleanup_io_restore
  1278. clg %r9,BASED(.Lcleanup_table+64) # psw_idle
  1279. jl 0f
  1280. clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
  1281. jl .Lcleanup_idle
  1282. clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
  1283. jl 0f
  1284. clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
  1285. jl .Lcleanup_save_fpu_regs
  1286. clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
  1287. jl 0f
  1288. clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
  1289. jl .Lcleanup_load_fpu_regs
  1290. 0: BR_R11USE_R14
  1291. .align 8
  1292. .Lcleanup_table:
  1293. .quad system_call
  1294. .quad .Lsysc_do_svc
  1295. .quad .Lsysc_tif
  1296. .quad .Lsysc_restore
  1297. .quad .Lsysc_done
  1298. .quad .Lio_tif
  1299. .quad .Lio_restore
  1300. .quad .Lio_done
  1301. .quad psw_idle
  1302. .quad .Lpsw_idle_end
  1303. .quad save_fpu_regs
  1304. .quad .Lsave_fpu_regs_end
  1305. .quad load_fpu_regs
  1306. .quad .Lload_fpu_regs_end
  1307. #if IS_ENABLED(CONFIG_KVM)
  1308. .Lcleanup_table_sie:
  1309. .quad .Lsie_gmap
  1310. .quad .Lsie_done
  1311. .Lcleanup_sie:
  1312. cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
  1313. je 1f
  1314. slg %r9,BASED(.Lsie_crit_mcck_start)
  1315. clg %r9,BASED(.Lsie_crit_mcck_length)
  1316. jh 1f
  1317. oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
  1318. 1: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
  1319. lg %r9,__SF_EMPTY(%r15) # get control block pointer
  1320. ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
  1321. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  1322. larl %r9,sie_exit # skip forward to sie_exit
  1323. BR_R11USE_R14
  1324. #endif
  1325. .Lcleanup_system_call:
  1326. # check if stpt has been executed
  1327. clg %r9,BASED(.Lcleanup_system_call_insn)
  1328. jh 0f
  1329. mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
  1330. cghi %r11,__LC_SAVE_AREA_ASYNC
  1331. je 0f
  1332. mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
  1333. 0: # check if stmg has been executed
  1334. clg %r9,BASED(.Lcleanup_system_call_insn+8)
  1335. jh 0f
  1336. mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
  1337. 0: # check if base register setup + TIF bit load has been done
  1338. clg %r9,BASED(.Lcleanup_system_call_insn+16)
  1339. jhe 0f
  1340. # set up saved register r12 task struct pointer
  1341. stg %r12,32(%r11)
  1342. # set up saved register r13 __TASK_thread offset
  1343. mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
  1344. 0: # check if the user time update has been done
  1345. clg %r9,BASED(.Lcleanup_system_call_insn+24)
  1346. jh 0f
  1347. lg %r15,__LC_EXIT_TIMER
  1348. slg %r15,__LC_SYNC_ENTER_TIMER
  1349. alg %r15,__LC_USER_TIMER
  1350. stg %r15,__LC_USER_TIMER
  1351. 0: # check if the system time update has been done
  1352. clg %r9,BASED(.Lcleanup_system_call_insn+32)
  1353. jh 0f
  1354. lg %r15,__LC_LAST_UPDATE_TIMER
  1355. slg %r15,__LC_EXIT_TIMER
  1356. alg %r15,__LC_SYSTEM_TIMER
  1357. stg %r15,__LC_SYSTEM_TIMER
  1358. 0: # update accounting time stamp
  1359. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  1360. BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  1361. # set up saved register r11
  1362. lg %r15,__LC_KERNEL_STACK
  1363. la %r9,STACK_FRAME_OVERHEAD(%r15)
  1364. stg %r9,24(%r11) # r11 pt_regs pointer
  1365. # fill pt_regs
  1366. mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
  1367. stmg %r0,%r7,__PT_R0(%r9)
  1368. mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
  1369. mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
  1370. xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
  1371. mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
  1372. # setup saved register r15
  1373. stg %r15,56(%r11) # r15 stack pointer
  1374. # set new psw address and exit
  1375. larl %r9,.Lsysc_do_svc
  1376. BR_R11USE_R14
  1377. .Lcleanup_system_call_insn:
  1378. .quad system_call
  1379. .quad .Lsysc_stmg
  1380. .quad .Lsysc_per
  1381. .quad .Lsysc_vtime+36
  1382. .quad .Lsysc_vtime+42
  1383. .Lcleanup_system_call_const:
  1384. .quad __TASK_thread
  1385. .Lcleanup_sysc_tif:
  1386. larl %r9,.Lsysc_tif
  1387. BR_R11USE_R14
  1388. .Lcleanup_sysc_restore:
  1389. # check if stpt has been executed
  1390. clg %r9,BASED(.Lcleanup_sysc_restore_insn)
  1391. jh 0f
  1392. mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
  1393. cghi %r11,__LC_SAVE_AREA_ASYNC
  1394. je 0f
  1395. mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
  1396. 0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
  1397. je 1f
  1398. lg %r9,24(%r11) # get saved pointer to pt_regs
  1399. mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
  1400. mvc 0(64,%r11),__PT_R8(%r9)
  1401. lmg %r0,%r7,__PT_R0(%r9)
  1402. 1: lmg %r8,%r9,__LC_RETURN_PSW
  1403. BR_R11USE_R14
  1404. .Lcleanup_sysc_restore_insn:
  1405. .quad .Lsysc_exit_timer
  1406. .quad .Lsysc_done - 4
  1407. .Lcleanup_io_tif:
  1408. larl %r9,.Lio_tif
  1409. BR_R11USE_R14
  1410. .Lcleanup_io_restore:
  1411. # check if stpt has been executed
  1412. clg %r9,BASED(.Lcleanup_io_restore_insn)
  1413. jh 0f
  1414. mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
  1415. 0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
  1416. je 1f
  1417. lg %r9,24(%r11) # get saved r11 pointer to pt_regs
  1418. mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
  1419. mvc 0(64,%r11),__PT_R8(%r9)
  1420. lmg %r0,%r7,__PT_R0(%r9)
  1421. 1: lmg %r8,%r9,__LC_RETURN_PSW
  1422. BR_R11USE_R14
  1423. .Lcleanup_io_restore_insn:
  1424. .quad .Lio_exit_timer
  1425. .quad .Lio_done - 4
  1426. .Lcleanup_idle:
  1427. ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
  1428. # copy interrupt clock & cpu timer
  1429. mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
  1430. mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
  1431. cghi %r11,__LC_SAVE_AREA_ASYNC
  1432. je 0f
  1433. mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
  1434. mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
  1435. 0: # check if stck & stpt have been executed
  1436. clg %r9,BASED(.Lcleanup_idle_insn)
  1437. jhe 1f
  1438. mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
  1439. mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
  1440. 1: # calculate idle cycles
  1441. #ifdef CONFIG_SMP
  1442. clg %r9,BASED(.Lcleanup_idle_insn)
  1443. jl 3f
  1444. larl %r1,smp_cpu_mtid
  1445. llgf %r1,0(%r1)
  1446. ltgr %r1,%r1
  1447. jz 3f
  1448. .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
  1449. larl %r3,mt_cycles
  1450. ag %r3,__LC_PERCPU_OFFSET
  1451. la %r4,__SF_EMPTY+16(%r15)
  1452. 2: lg %r0,0(%r3)
  1453. slg %r0,0(%r4)
  1454. alg %r0,64(%r4)
  1455. stg %r0,0(%r3)
  1456. la %r3,8(%r3)
  1457. la %r4,8(%r4)
  1458. brct %r1,2b
  1459. #endif
  1460. 3: # account system time going idle
  1461. lg %r9,__LC_STEAL_TIMER
  1462. alg %r9,__CLOCK_IDLE_ENTER(%r2)
  1463. slg %r9,__LC_LAST_UPDATE_CLOCK
  1464. stg %r9,__LC_STEAL_TIMER
  1465. mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
  1466. lg %r9,__LC_SYSTEM_TIMER
  1467. alg %r9,__LC_LAST_UPDATE_TIMER
  1468. slg %r9,__TIMER_IDLE_ENTER(%r2)
  1469. stg %r9,__LC_SYSTEM_TIMER
  1470. mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
  1471. # prepare return psw
  1472. nihh %r8,0xfcfd # clear irq & wait state bits
  1473. lg %r9,48(%r11) # return from psw_idle
  1474. BR_R11USE_R14
  1475. .Lcleanup_idle_insn:
  1476. .quad .Lpsw_idle_lpsw
  1477. .Lcleanup_save_fpu_regs:
  1478. larl %r9,save_fpu_regs
  1479. BR_R11USE_R14
  1480. .Lcleanup_load_fpu_regs:
  1481. larl %r9,load_fpu_regs
  1482. BR_R11USE_R14
  1483. /*
  1484. * Integer constants
  1485. */
  1486. .align 8
  1487. .Lcritical_start:
  1488. .quad .L__critical_start
  1489. .Lcritical_length:
  1490. .quad .L__critical_end - .L__critical_start
  1491. #if IS_ENABLED(CONFIG_KVM)
  1492. .Lsie_critical_start:
  1493. .quad .Lsie_gmap
  1494. .Lsie_critical_length:
  1495. .quad .Lsie_done - .Lsie_gmap
  1496. .Lsie_crit_mcck_start:
  1497. .quad .Lsie_entry
  1498. .Lsie_crit_mcck_length:
  1499. .quad .Lsie_skip - .Lsie_entry
  1500. #endif
  1501. .section .rodata, "a"
  1502. #define SYSCALL(esame,emu) .long esame
  1503. .globl sys_call_table
  1504. sys_call_table:
  1505. #include "asm/syscall_table.h"
  1506. #undef SYSCALL
  1507. #ifdef CONFIG_COMPAT
  1508. #define SYSCALL(esame,emu) .long emu
  1509. .globl sys_call_table_emu
  1510. sys_call_table_emu:
  1511. #include "asm/syscall_table.h"
  1512. #undef SYSCALL
  1513. #endif