assembler.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /*
  2. * arch/arm/include/asm/assembler.h
  3. *
  4. * Copyright (C) 1996-2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This file contains arm architecture specific defines
  11. * for the different processors.
  12. *
  13. * Do not include any C declarations in this file - it is included by
  14. * assembler source.
  15. */
  16. #ifndef __ASM_ASSEMBLER_H__
  17. #define __ASM_ASSEMBLER_H__
  18. #ifndef __ASSEMBLY__
  19. #error "Only include this from assembly code"
  20. #endif
  21. #include <asm/ptrace.h>
  22. #include <asm/domain.h>
  23. #include <asm/opcodes-virt.h>
  24. #include <asm/asm-offsets.h>
  25. #define IOMEM(x) (x)
  26. /*
  27. * Endian independent macros for shifting bytes within registers.
  28. */
  29. #ifndef __ARMEB__
  30. #define lspull lsr
  31. #define lspush lsl
  32. #define get_byte_0 lsl #0
  33. #define get_byte_1 lsr #8
  34. #define get_byte_2 lsr #16
  35. #define get_byte_3 lsr #24
  36. #define put_byte_0 lsl #0
  37. #define put_byte_1 lsl #8
  38. #define put_byte_2 lsl #16
  39. #define put_byte_3 lsl #24
  40. #else
  41. #define lspull lsl
  42. #define lspush lsr
  43. #define get_byte_0 lsr #24
  44. #define get_byte_1 lsr #16
  45. #define get_byte_2 lsr #8
  46. #define get_byte_3 lsl #0
  47. #define put_byte_0 lsl #24
  48. #define put_byte_1 lsl #16
  49. #define put_byte_2 lsl #8
  50. #define put_byte_3 lsl #0
  51. #endif
  52. /* Select code for any configuration running in BE8 mode */
  53. #ifdef CONFIG_CPU_ENDIAN_BE8
  54. #define ARM_BE8(code...) code
  55. #else
  56. #define ARM_BE8(code...)
  57. #endif
  58. /*
  59. * Data preload for architectures that support it
  60. */
  61. #if __LINUX_ARM_ARCH__ >= 5
  62. #define PLD(code...) code
  63. #else
  64. #define PLD(code...)
  65. #endif
  66. /*
  67. * This can be used to enable code to cacheline align the destination
  68. * pointer when bulk writing to memory. Experiments on StrongARM and
  69. * XScale didn't show this a worthwhile thing to do when the cache is not
  70. * set to write-allocate (this would need further testing on XScale when WA
  71. * is used).
  72. *
  73. * On Feroceon there is much to gain however, regardless of cache mode.
  74. */
  75. #ifdef CONFIG_CPU_FEROCEON
  76. #define CALGN(code...) code
  77. #else
  78. #define CALGN(code...)
  79. #endif
  80. /*
  81. * Enable and disable interrupts
  82. */
  83. #if __LINUX_ARM_ARCH__ >= 6
  84. .macro disable_irq_notrace
  85. cpsid i
  86. .endm
  87. .macro enable_irq_notrace
  88. cpsie i
  89. .endm
  90. #else
  91. .macro disable_irq_notrace
  92. msr cpsr_c, #PSR_I_BIT | SVC_MODE
  93. .endm
  94. .macro enable_irq_notrace
  95. msr cpsr_c, #SVC_MODE
  96. .endm
  97. #endif
  98. .macro asm_trace_hardirqs_off
  99. #if defined(CONFIG_TRACE_IRQFLAGS)
  100. stmdb sp!, {r0-r3, ip, lr}
  101. bl trace_hardirqs_off
  102. ldmia sp!, {r0-r3, ip, lr}
  103. #endif
  104. .endm
  105. .macro asm_trace_hardirqs_on_cond, cond
  106. #if defined(CONFIG_TRACE_IRQFLAGS)
  107. /*
  108. * actually the registers should be pushed and pop'd conditionally, but
  109. * after bl the flags are certainly clobbered
  110. */
  111. stmdb sp!, {r0-r3, ip, lr}
  112. bl\cond trace_hardirqs_on
  113. ldmia sp!, {r0-r3, ip, lr}
  114. #endif
  115. .endm
  116. .macro asm_trace_hardirqs_on
  117. asm_trace_hardirqs_on_cond al
  118. .endm
  119. .macro disable_irq
  120. disable_irq_notrace
  121. asm_trace_hardirqs_off
  122. .endm
  123. .macro enable_irq
  124. asm_trace_hardirqs_on
  125. enable_irq_notrace
  126. .endm
  127. /*
  128. * Save the current IRQ state and disable IRQs. Note that this macro
  129. * assumes FIQs are enabled, and that the processor is in SVC mode.
  130. */
  131. .macro save_and_disable_irqs, oldcpsr
  132. #ifdef CONFIG_CPU_V7M
  133. mrs \oldcpsr, primask
  134. #else
  135. mrs \oldcpsr, cpsr
  136. #endif
  137. disable_irq
  138. .endm
  139. .macro save_and_disable_irqs_notrace, oldcpsr
  140. mrs \oldcpsr, cpsr
  141. disable_irq_notrace
  142. .endm
  143. /*
  144. * Restore interrupt state previously stored in a register. We don't
  145. * guarantee that this will preserve the flags.
  146. */
  147. .macro restore_irqs_notrace, oldcpsr
  148. #ifdef CONFIG_CPU_V7M
  149. msr primask, \oldcpsr
  150. #else
  151. msr cpsr_c, \oldcpsr
  152. #endif
  153. .endm
  154. .macro restore_irqs, oldcpsr
  155. tst \oldcpsr, #PSR_I_BIT
  156. asm_trace_hardirqs_on_cond eq
  157. restore_irqs_notrace \oldcpsr
  158. .endm
  159. /*
  160. * Get current thread_info.
  161. */
  162. .macro get_thread_info, rd
  163. ARM( mov \rd, sp, lsr #13 )
  164. THUMB( mov \rd, sp )
  165. THUMB( lsr \rd, \rd, #13 )
  166. mov \rd, \rd, lsl #13
  167. .endm
  168. /*
  169. * Increment/decrement the preempt count.
  170. */
  171. #ifdef CONFIG_PREEMPT_COUNT
  172. .macro inc_preempt_count, ti, tmp
  173. ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
  174. add \tmp, \tmp, #1 @ increment it
  175. str \tmp, [\ti, #TI_PREEMPT]
  176. .endm
  177. .macro dec_preempt_count, ti, tmp
  178. ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
  179. sub \tmp, \tmp, #1 @ decrement it
  180. str \tmp, [\ti, #TI_PREEMPT]
  181. .endm
  182. .macro dec_preempt_count_ti, ti, tmp
  183. get_thread_info \ti
  184. dec_preempt_count \ti, \tmp
  185. .endm
  186. #else
  187. .macro inc_preempt_count, ti, tmp
  188. .endm
  189. .macro dec_preempt_count, ti, tmp
  190. .endm
  191. .macro dec_preempt_count_ti, ti, tmp
  192. .endm
  193. #endif
  194. #define USER(x...) \
  195. 9999: x; \
  196. .pushsection __ex_table,"a"; \
  197. .align 3; \
  198. .long 9999b,9001f; \
  199. .popsection
  200. #ifdef CONFIG_SMP
  201. #define ALT_SMP(instr...) \
  202. 9998: instr
  203. /*
  204. * Note: if you get assembler errors from ALT_UP() when building with
  205. * CONFIG_THUMB2_KERNEL, you almost certainly need to use
  206. * ALT_SMP( W(instr) ... )
  207. */
  208. #define ALT_UP(instr...) \
  209. .pushsection ".alt.smp.init", "a" ;\
  210. .long 9998b ;\
  211. 9997: instr ;\
  212. .if . - 9997b != 4 ;\
  213. .error "ALT_UP() content must assemble to exactly 4 bytes";\
  214. .endif ;\
  215. .popsection
  216. #define ALT_UP_B(label) \
  217. .equ up_b_offset, label - 9998b ;\
  218. .pushsection ".alt.smp.init", "a" ;\
  219. .long 9998b ;\
  220. W(b) . + up_b_offset ;\
  221. .popsection
  222. #else
  223. #define ALT_SMP(instr...)
  224. #define ALT_UP(instr...) instr
  225. #define ALT_UP_B(label) b label
  226. #endif
  227. /*
  228. * Instruction barrier
  229. */
  230. .macro instr_sync
  231. #if __LINUX_ARM_ARCH__ >= 7
  232. isb
  233. #elif __LINUX_ARM_ARCH__ == 6
  234. mcr p15, 0, r0, c7, c5, 4
  235. #endif
  236. .endm
  237. /*
  238. * SMP data memory barrier
  239. */
  240. .macro smp_dmb mode
  241. #ifdef CONFIG_SMP
  242. #if __LINUX_ARM_ARCH__ >= 7
  243. .ifeqs "\mode","arm"
  244. ALT_SMP(dmb ish)
  245. .else
  246. ALT_SMP(W(dmb) ish)
  247. .endif
  248. #elif __LINUX_ARM_ARCH__ == 6
  249. ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
  250. #else
  251. #error Incompatible SMP platform
  252. #endif
  253. .ifeqs "\mode","arm"
  254. ALT_UP(nop)
  255. .else
  256. ALT_UP(W(nop))
  257. .endif
  258. #endif
  259. .endm
  260. #if defined(CONFIG_CPU_V7M)
  261. /*
  262. * setmode is used to assert to be in svc mode during boot. For v7-M
  263. * this is done in __v7m_setup, so setmode can be empty here.
  264. */
  265. .macro setmode, mode, reg
  266. .endm
  267. #elif defined(CONFIG_THUMB2_KERNEL)
  268. .macro setmode, mode, reg
  269. mov \reg, #\mode
  270. msr cpsr_c, \reg
  271. .endm
  272. #else
  273. .macro setmode, mode, reg
  274. msr cpsr_c, #\mode
  275. .endm
  276. #endif
  277. /*
  278. * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
  279. * a scratch register for the macro to overwrite.
  280. *
  281. * This macro is intended for forcing the CPU into SVC mode at boot time.
  282. * you cannot return to the original mode.
  283. */
  284. .macro safe_svcmode_maskall reg:req
  285. #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
  286. mrs \reg , cpsr
  287. eor \reg, \reg, #HYP_MODE
  288. tst \reg, #MODE_MASK
  289. bic \reg , \reg , #MODE_MASK
  290. orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
  291. THUMB( orr \reg , \reg , #PSR_T_BIT )
  292. bne 1f
  293. orr \reg, \reg, #PSR_A_BIT
  294. adr lr, BSYM(2f)
  295. msr spsr_cxsf, \reg
  296. __MSR_ELR_HYP(14)
  297. __ERET
  298. 1: msr cpsr_c, \reg
  299. 2:
  300. #else
  301. /*
  302. * workaround for possibly broken pre-v6 hardware
  303. * (akita, Sharp Zaurus C-1000, PXA270-based)
  304. */
  305. setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
  306. #endif
  307. .endm
  308. /*
  309. * STRT/LDRT access macros with ARM and Thumb-2 variants
  310. */
  311. #ifdef CONFIG_THUMB2_KERNEL
  312. .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
  313. 9999:
  314. .if \inc == 1
  315. \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
  316. .elseif \inc == 4
  317. \instr\cond\()\t\().w \reg, [\ptr, #\off]
  318. .else
  319. .error "Unsupported inc macro argument"
  320. .endif
  321. .pushsection __ex_table,"a"
  322. .align 3
  323. .long 9999b, \abort
  324. .popsection
  325. .endm
  326. .macro usracc, instr, reg, ptr, inc, cond, rept, abort
  327. @ explicit IT instruction needed because of the label
  328. @ introduced by the USER macro
  329. .ifnc \cond,al
  330. .if \rept == 1
  331. itt \cond
  332. .elseif \rept == 2
  333. ittt \cond
  334. .else
  335. .error "Unsupported rept macro argument"
  336. .endif
  337. .endif
  338. @ Slightly optimised to avoid incrementing the pointer twice
  339. usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
  340. .if \rept == 2
  341. usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
  342. .endif
  343. add\cond \ptr, #\rept * \inc
  344. .endm
  345. #else /* !CONFIG_THUMB2_KERNEL */
  346. .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
  347. .rept \rept
  348. 9999:
  349. .if \inc == 1
  350. \instr\cond\()b\()\t \reg, [\ptr], #\inc
  351. .elseif \inc == 4
  352. \instr\cond\()\t \reg, [\ptr], #\inc
  353. .else
  354. .error "Unsupported inc macro argument"
  355. .endif
  356. .pushsection __ex_table,"a"
  357. .align 3
  358. .long 9999b, \abort
  359. .popsection
  360. .endr
  361. .endm
  362. #endif /* CONFIG_THUMB2_KERNEL */
  363. .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  364. usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
  365. .endm
  366. .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  367. usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
  368. .endm
  369. /* Utility macro for declaring string literals */
  370. .macro string name:req, string
  371. .type \name , #object
  372. \name:
  373. .asciz "\string"
  374. .size \name , . - \name
  375. .endm
  376. .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
  377. #ifndef CONFIG_CPU_USE_DOMAINS
  378. adds \tmp, \addr, #\size - 1
  379. sbcccs \tmp, \tmp, \limit
  380. bcs \bad
  381. #endif
  382. .endm
  383. #endif /* __ASM_ASSEMBLER_H__ */