ppc_asm.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. /*
  2. * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
  3. */
  4. #ifndef _ASM_POWERPC_PPC_ASM_H
  5. #define _ASM_POWERPC_PPC_ASM_H
  6. #include <linux/stringify.h>
  7. #include <asm/asm-compat.h>
  8. #include <asm/processor.h>
  9. #include <asm/ppc-opcode.h>
  10. #include <asm/firmware.h>
  11. #ifdef __ASSEMBLY__
  12. #define SZL (BITS_PER_LONG/8)
  13. /*
  14. * Stuff for accurate CPU time accounting.
  15. * These macros handle transitions between user and system state
  16. * in exception entry and exit and accumulate time to the
  17. * user_time and system_time fields in the paca.
  18. */
  19. #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  20. #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
  21. #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
  22. #define ACCOUNT_STOLEN_TIME
  23. #else
  24. #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
  25. MFTB(ra); /* get timebase */ \
  26. PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
  27. PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
  28. subf rb,rb,ra; /* subtract start value */ \
  29. PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
  30. add ra,ra,rb; /* add on to user time */ \
  31. PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
  32. #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
  33. MFTB(ra); /* get timebase */ \
  34. PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
  35. PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
  36. subf rb,rb,ra; /* subtract start value */ \
  37. PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
  38. add ra,ra,rb; /* add on to system time */ \
  39. PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
  40. #ifdef CONFIG_PPC_SPLPAR
  41. #define ACCOUNT_STOLEN_TIME \
  42. BEGIN_FW_FTR_SECTION; \
  43. beq 33f; \
  44. /* from user - see if there are any DTL entries to process */ \
  45. ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
  46. ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
  47. addi r10,r10,LPPACA_DTLIDX; \
  48. LDX_BE r10,0,r10; /* get log write index */ \
  49. cmpd cr1,r11,r10; \
  50. beq+ cr1,33f; \
  51. bl accumulate_stolen_time; \
  52. ld r12,_MSR(r1); \
  53. andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
  54. 33: \
  55. END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
  56. #else /* CONFIG_PPC_SPLPAR */
  57. #define ACCOUNT_STOLEN_TIME
  58. #endif /* CONFIG_PPC_SPLPAR */
  59. #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  60. /*
  61. * Macros for storing registers into and loading registers from
  62. * exception frames.
  63. */
  64. #ifdef __powerpc64__
  65. #define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
  66. #define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
  67. #define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
  68. #define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
  69. #else
  70. #define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
  71. #define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
  72. #define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
  73. SAVE_10GPRS(22, base)
  74. #define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
  75. REST_10GPRS(22, base)
  76. #endif
  77. #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
  78. #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
  79. #define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
  80. #define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
  81. #define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
  82. #define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
  83. #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
  84. #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
  85. #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
  86. #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
  87. #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
  88. #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
  89. #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
  90. #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
  91. #define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
  92. #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
  93. #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
  94. #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
  95. #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
  96. #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
  97. #define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
  98. #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
  99. #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
  100. #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
  101. #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
  102. #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
  103. #define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
  104. #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
  105. #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
  106. #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
  107. #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
  108. #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
  109. #ifdef __BIG_ENDIAN__
  110. #define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base)
  111. #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base)
  112. #else
  113. #define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \
  114. STXVD2X(n,b,base); \
  115. XXSWAPD(n,n)
  116. #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \
  117. XXSWAPD(n,n)
  118. #endif
  119. /* Save the lower 32 VSRs in the thread VSR region */
  120. #define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b)
  121. #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
  122. #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
  123. #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
  124. #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
  125. #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
  126. #define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
  127. #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
  128. #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
  129. #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
  130. #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
  131. #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
  132. /*
  133. * b = base register for addressing, o = base offset from register of 1st EVR
  134. * n = first EVR, s = scratch
  135. */
  136. #define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
  137. #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
  138. #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
  139. #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
  140. #define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
  141. #define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
  142. #define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
  143. #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
  144. #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
  145. #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
  146. #define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
  147. #define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
  148. /* Macros to adjust thread priority for hardware multithreading */
  149. #define HMT_VERY_LOW or 31,31,31 # very low priority
  150. #define HMT_LOW or 1,1,1
  151. #define HMT_MEDIUM_LOW or 6,6,6 # medium low priority
  152. #define HMT_MEDIUM or 2,2,2
  153. #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
  154. #define HMT_HIGH or 3,3,3
  155. #define HMT_EXTRA_HIGH or 7,7,7 # power7 only
  156. #ifdef CONFIG_PPC64
  157. #define ULONG_SIZE 8
  158. #else
  159. #define ULONG_SIZE 4
  160. #endif
  161. #define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
  162. #define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
  163. #ifdef __KERNEL__
  164. #ifdef CONFIG_PPC64
  165. #define STACKFRAMESIZE 256
  166. #define __STK_REG(i) (112 + ((i)-14)*8)
  167. #define STK_REG(i) __STK_REG(__REG_##i)
  168. #ifdef PPC64_ELF_ABI_v2
  169. #define STK_GOT 24
  170. #define __STK_PARAM(i) (32 + ((i)-3)*8)
  171. #else
  172. #define STK_GOT 40
  173. #define __STK_PARAM(i) (48 + ((i)-3)*8)
  174. #endif
  175. #define STK_PARAM(i) __STK_PARAM(__REG_##i)
  176. #ifdef PPC64_ELF_ABI_v2
  177. #define _GLOBAL(name) \
  178. .align 2 ; \
  179. .type name,@function; \
  180. .globl name; \
  181. name:
  182. #define _GLOBAL_TOC(name) \
  183. .align 2 ; \
  184. .type name,@function; \
  185. .globl name; \
  186. name: \
  187. 0: addis r2,r12,(.TOC.-0b)@ha; \
  188. addi r2,r2,(.TOC.-0b)@l; \
  189. .localentry name,.-name
  190. #define DOTSYM(a) a
  191. #else
  192. #define XGLUE(a,b) a##b
  193. #define GLUE(a,b) XGLUE(a,b)
  194. #define _GLOBAL(name) \
  195. .align 2 ; \
  196. .globl name; \
  197. .globl GLUE(.,name); \
  198. .pushsection ".opd","aw"; \
  199. name: \
  200. .quad GLUE(.,name); \
  201. .quad .TOC.@tocbase; \
  202. .quad 0; \
  203. .popsection; \
  204. .type GLUE(.,name),@function; \
  205. GLUE(.,name):
  206. #define _GLOBAL_TOC(name) _GLOBAL(name)
  207. #define DOTSYM(a) GLUE(.,a)
  208. #endif
  209. #else /* 32-bit */
  210. #define _ENTRY(n) \
  211. .globl n; \
  212. n:
  213. #define _GLOBAL(n) \
  214. .stabs __stringify(n:F-1),N_FUN,0,0,n;\
  215. .globl n; \
  216. n:
  217. #define _GLOBAL_TOC(name) _GLOBAL(name)
  218. #endif
  219. /*
  220. * __kprobes (the C annotation) puts the symbol into the .kprobes.text
  221. * section, which gets emitted at the end of regular text.
  222. *
  223. * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
  224. * a blacklist. The former is for core kprobe functions/data, the
  225. * latter is for those that incdentially must be excluded from probing
  226. * and allows them to be linked at more optimal location within text.
  227. */
  228. #ifdef CONFIG_KPROBES
  229. #define _ASM_NOKPROBE_SYMBOL(entry) \
  230. .pushsection "_kprobe_blacklist","aw"; \
  231. PPC_LONG (entry) ; \
  232. .popsection
  233. #else
  234. #define _ASM_NOKPROBE_SYMBOL(entry)
  235. #endif
  236. #define FUNC_START(name) _GLOBAL(name)
  237. #define FUNC_END(name)
  238. /*
  239. * LOAD_REG_IMMEDIATE(rn, expr)
  240. * Loads the value of the constant expression 'expr' into register 'rn'
  241. * using immediate instructions only. Use this when it's important not
  242. * to reference other data (i.e. on ppc64 when the TOC pointer is not
  243. * valid) and when 'expr' is a constant or absolute address.
  244. *
  245. * LOAD_REG_ADDR(rn, name)
  246. * Loads the address of label 'name' into register 'rn'. Use this when
  247. * you don't particularly need immediate instructions only, but you need
  248. * the whole address in one register (e.g. it's a structure address and
  249. * you want to access various offsets within it). On ppc32 this is
  250. * identical to LOAD_REG_IMMEDIATE.
  251. *
  252. * LOAD_REG_ADDR_PIC(rn, name)
  253. * Loads the address of label 'name' into register 'run'. Use this when
  254. * the kernel doesn't run at the linked or relocated address. Please
  255. * note that this macro will clobber the lr register.
  256. *
  257. * LOAD_REG_ADDRBASE(rn, name)
  258. * ADDROFF(name)
  259. * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
  260. * register 'rn'. ADDROFF(name) returns the remainder of the address as
  261. * a constant expression. ADDROFF(name) is a signed expression < 16 bits
  262. * in size, so is suitable for use directly as an offset in load and store
  263. * instructions. Use this when loading/storing a single word or less as:
  264. * LOAD_REG_ADDRBASE(rX, name)
  265. * ld rY,ADDROFF(name)(rX)
  266. */
  267. /* Be careful, this will clobber the lr register. */
  268. #define LOAD_REG_ADDR_PIC(reg, name) \
  269. bl 0f; \
  270. 0: mflr reg; \
  271. addis reg,reg,(name - 0b)@ha; \
  272. addi reg,reg,(name - 0b)@l;
  273. #ifdef __powerpc64__
  274. #ifdef HAVE_AS_ATHIGH
  275. #define __AS_ATHIGH high
  276. #else
  277. #define __AS_ATHIGH h
  278. #endif
  279. #define LOAD_REG_IMMEDIATE(reg,expr) \
  280. lis reg,(expr)@highest; \
  281. ori reg,reg,(expr)@higher; \
  282. rldicr reg,reg,32,31; \
  283. oris reg,reg,(expr)@__AS_ATHIGH; \
  284. ori reg,reg,(expr)@l;
  285. #define LOAD_REG_ADDR(reg,name) \
  286. ld reg,name@got(r2)
  287. #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
  288. #define ADDROFF(name) 0
  289. /* offsets for stack frame layout */
  290. #define LRSAVE 16
  291. #else /* 32-bit */
  292. #define LOAD_REG_IMMEDIATE(reg,expr) \
  293. lis reg,(expr)@ha; \
  294. addi reg,reg,(expr)@l;
  295. #define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
  296. #define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha
  297. #define ADDROFF(name) name@l
  298. /* offsets for stack frame layout */
  299. #define LRSAVE 4
  300. #endif
  301. /* various errata or part fixups */
  302. #ifdef CONFIG_PPC601_SYNC_FIX
  303. #define SYNC \
  304. BEGIN_FTR_SECTION \
  305. sync; \
  306. isync; \
  307. END_FTR_SECTION_IFSET(CPU_FTR_601)
  308. #define SYNC_601 \
  309. BEGIN_FTR_SECTION \
  310. sync; \
  311. END_FTR_SECTION_IFSET(CPU_FTR_601)
  312. #define ISYNC_601 \
  313. BEGIN_FTR_SECTION \
  314. isync; \
  315. END_FTR_SECTION_IFSET(CPU_FTR_601)
  316. #else
  317. #define SYNC
  318. #define SYNC_601
  319. #define ISYNC_601
  320. #endif
  321. #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
  322. #define MFTB(dest) \
  323. 90: mfspr dest, SPRN_TBRL; \
  324. BEGIN_FTR_SECTION_NESTED(96); \
  325. cmpwi dest,0; \
  326. beq- 90b; \
  327. END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
  328. #elif defined(CONFIG_8xx)
  329. #define MFTB(dest) mftb dest
  330. #else
  331. #define MFTB(dest) mfspr dest, SPRN_TBRL
  332. #endif
  333. #ifndef CONFIG_SMP
  334. #define TLBSYNC
  335. #else /* CONFIG_SMP */
  336. /* tlbsync is not implemented on 601 */
  337. #define TLBSYNC \
  338. BEGIN_FTR_SECTION \
  339. tlbsync; \
  340. sync; \
  341. END_FTR_SECTION_IFCLR(CPU_FTR_601)
  342. #endif
  343. #ifdef CONFIG_PPC64
  344. #define MTOCRF(FXM, RS) \
  345. BEGIN_FTR_SECTION_NESTED(848); \
  346. mtcrf (FXM), RS; \
  347. FTR_SECTION_ELSE_NESTED(848); \
  348. mtocrf (FXM), RS; \
  349. ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
  350. #endif
  351. /*
  352. * This instruction is not implemented on the PPC 603 or 601; however, on
  353. * the 403GCX and 405GP tlbia IS defined and tlbie is not.
  354. * All of these instructions exist in the 8xx, they have magical powers,
  355. * and they must be used.
  356. */
  357. #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
  358. #define tlbia \
  359. li r4,1024; \
  360. mtctr r4; \
  361. lis r4,KERNELBASE@h; \
  362. .machine push; \
  363. .machine "power4"; \
  364. 0: tlbie r4; \
  365. .machine pop; \
  366. addi r4,r4,0x1000; \
  367. bdnz 0b
  368. #endif
  369. #ifdef CONFIG_IBM440EP_ERR42
  370. #define PPC440EP_ERR42 isync
  371. #else
  372. #define PPC440EP_ERR42
  373. #endif
  374. /* The following stops all load and store data streams associated with stream
  375. * ID (ie. streams created explicitly). The embedded and server mnemonics for
  376. * dcbt are different so we use machine "power4" here explicitly.
  377. */
  378. #define DCBT_STOP_ALL_STREAM_IDS(scratch) \
  379. .machine push ; \
  380. .machine "power4" ; \
  381. lis scratch,0x60000000@h; \
  382. dcbt r0,scratch,0b01010; \
  383. .machine pop
  384. /*
  385. * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
  386. * keep the address intact to be compatible with code shared with
  387. * 32-bit classic.
  388. *
  389. * On the other hand, I find it useful to have them behave as expected
  390. * by their name (ie always do the addition) on 64-bit BookE
  391. */
  392. #if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
  393. #define toreal(rd)
  394. #define fromreal(rd)
  395. /*
  396. * We use addis to ensure compatibility with the "classic" ppc versions of
  397. * these macros, which use rs = 0 to get the tophys offset in rd, rather than
  398. * converting the address in r0, and so this version has to do that too
  399. * (i.e. set register rd to 0 when rs == 0).
  400. */
  401. #define tophys(rd,rs) \
  402. addis rd,rs,0
  403. #define tovirt(rd,rs) \
  404. addis rd,rs,0
  405. #elif defined(CONFIG_PPC64)
  406. #define toreal(rd) /* we can access c000... in real mode */
  407. #define fromreal(rd)
  408. #define tophys(rd,rs) \
  409. clrldi rd,rs,2
  410. #define tovirt(rd,rs) \
  411. rotldi rd,rs,16; \
  412. ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
  413. rotldi rd,rd,48
  414. #else
  415. /*
  416. * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
  417. * physical base address of RAM at compile time.
  418. */
  419. #define toreal(rd) tophys(rd,rd)
  420. #define fromreal(rd) tovirt(rd,rd)
  421. #define tophys(rd,rs) \
  422. 0: addis rd,rs,-PAGE_OFFSET@h; \
  423. .section ".vtop_fixup","aw"; \
  424. .align 1; \
  425. .long 0b; \
  426. .previous
  427. #define tovirt(rd,rs) \
  428. 0: addis rd,rs,PAGE_OFFSET@h; \
  429. .section ".ptov_fixup","aw"; \
  430. .align 1; \
  431. .long 0b; \
  432. .previous
  433. #endif
  434. #ifdef CONFIG_PPC_BOOK3S_64
  435. #define RFI rfid
  436. #define MTMSRD(r) mtmsrd r
  437. #define MTMSR_EERI(reg) mtmsrd reg,1
  438. #else
  439. #ifndef CONFIG_40x
  440. #define RFI rfi
  441. #else
  442. #define RFI rfi; b . /* Prevent prefetch past rfi */
  443. #endif
  444. #define MTMSRD(r) mtmsr r
  445. #define MTMSR_EERI(reg) mtmsr reg
  446. #endif
  447. #endif /* __KERNEL__ */
  448. /* The boring bits... */
  449. /* Condition Register Bit Fields */
  450. #define cr0 0
  451. #define cr1 1
  452. #define cr2 2
  453. #define cr3 3
  454. #define cr4 4
  455. #define cr5 5
  456. #define cr6 6
  457. #define cr7 7
  458. /*
  459. * General Purpose Registers (GPRs)
  460. *
  461. * The lower case r0-r31 should be used in preference to the upper
  462. * case R0-R31 as they provide more error checking in the assembler.
  463. * Use R0-31 only when really nessesary.
  464. */
  465. #define r0 %r0
  466. #define r1 %r1
  467. #define r2 %r2
  468. #define r3 %r3
  469. #define r4 %r4
  470. #define r5 %r5
  471. #define r6 %r6
  472. #define r7 %r7
  473. #define r8 %r8
  474. #define r9 %r9
  475. #define r10 %r10
  476. #define r11 %r11
  477. #define r12 %r12
  478. #define r13 %r13
  479. #define r14 %r14
  480. #define r15 %r15
  481. #define r16 %r16
  482. #define r17 %r17
  483. #define r18 %r18
  484. #define r19 %r19
  485. #define r20 %r20
  486. #define r21 %r21
  487. #define r22 %r22
  488. #define r23 %r23
  489. #define r24 %r24
  490. #define r25 %r25
  491. #define r26 %r26
  492. #define r27 %r27
  493. #define r28 %r28
  494. #define r29 %r29
  495. #define r30 %r30
  496. #define r31 %r31
  497. /* Floating Point Registers (FPRs) */
  498. #define fr0 0
  499. #define fr1 1
  500. #define fr2 2
  501. #define fr3 3
  502. #define fr4 4
  503. #define fr5 5
  504. #define fr6 6
  505. #define fr7 7
  506. #define fr8 8
  507. #define fr9 9
  508. #define fr10 10
  509. #define fr11 11
  510. #define fr12 12
  511. #define fr13 13
  512. #define fr14 14
  513. #define fr15 15
  514. #define fr16 16
  515. #define fr17 17
  516. #define fr18 18
  517. #define fr19 19
  518. #define fr20 20
  519. #define fr21 21
  520. #define fr22 22
  521. #define fr23 23
  522. #define fr24 24
  523. #define fr25 25
  524. #define fr26 26
  525. #define fr27 27
  526. #define fr28 28
  527. #define fr29 29
  528. #define fr30 30
  529. #define fr31 31
  530. /* AltiVec Registers (VPRs) */
  531. #define v0 0
  532. #define v1 1
  533. #define v2 2
  534. #define v3 3
  535. #define v4 4
  536. #define v5 5
  537. #define v6 6
  538. #define v7 7
  539. #define v8 8
  540. #define v9 9
  541. #define v10 10
  542. #define v11 11
  543. #define v12 12
  544. #define v13 13
  545. #define v14 14
  546. #define v15 15
  547. #define v16 16
  548. #define v17 17
  549. #define v18 18
  550. #define v19 19
  551. #define v20 20
  552. #define v21 21
  553. #define v22 22
  554. #define v23 23
  555. #define v24 24
  556. #define v25 25
  557. #define v26 26
  558. #define v27 27
  559. #define v28 28
  560. #define v29 29
  561. #define v30 30
  562. #define v31 31
  563. /* VSX Registers (VSRs) */
  564. #define vs0 0
  565. #define vs1 1
  566. #define vs2 2
  567. #define vs3 3
  568. #define vs4 4
  569. #define vs5 5
  570. #define vs6 6
  571. #define vs7 7
  572. #define vs8 8
  573. #define vs9 9
  574. #define vs10 10
  575. #define vs11 11
  576. #define vs12 12
  577. #define vs13 13
  578. #define vs14 14
  579. #define vs15 15
  580. #define vs16 16
  581. #define vs17 17
  582. #define vs18 18
  583. #define vs19 19
  584. #define vs20 20
  585. #define vs21 21
  586. #define vs22 22
  587. #define vs23 23
  588. #define vs24 24
  589. #define vs25 25
  590. #define vs26 26
  591. #define vs27 27
  592. #define vs28 28
  593. #define vs29 29
  594. #define vs30 30
  595. #define vs31 31
  596. #define vs32 32
  597. #define vs33 33
  598. #define vs34 34
  599. #define vs35 35
  600. #define vs36 36
  601. #define vs37 37
  602. #define vs38 38
  603. #define vs39 39
  604. #define vs40 40
  605. #define vs41 41
  606. #define vs42 42
  607. #define vs43 43
  608. #define vs44 44
  609. #define vs45 45
  610. #define vs46 46
  611. #define vs47 47
  612. #define vs48 48
  613. #define vs49 49
  614. #define vs50 50
  615. #define vs51 51
  616. #define vs52 52
  617. #define vs53 53
  618. #define vs54 54
  619. #define vs55 55
  620. #define vs56 56
  621. #define vs57 57
  622. #define vs58 58
  623. #define vs59 59
  624. #define vs60 60
  625. #define vs61 61
  626. #define vs62 62
  627. #define vs63 63
  628. /* SPE Registers (EVPRs) */
  629. #define evr0 0
  630. #define evr1 1
  631. #define evr2 2
  632. #define evr3 3
  633. #define evr4 4
  634. #define evr5 5
  635. #define evr6 6
  636. #define evr7 7
  637. #define evr8 8
  638. #define evr9 9
  639. #define evr10 10
  640. #define evr11 11
  641. #define evr12 12
  642. #define evr13 13
  643. #define evr14 14
  644. #define evr15 15
  645. #define evr16 16
  646. #define evr17 17
  647. #define evr18 18
  648. #define evr19 19
  649. #define evr20 20
  650. #define evr21 21
  651. #define evr22 22
  652. #define evr23 23
  653. #define evr24 24
  654. #define evr25 25
  655. #define evr26 26
  656. #define evr27 27
  657. #define evr28 28
  658. #define evr29 29
  659. #define evr30 30
  660. #define evr31 31
  661. /* some stab codes */
  662. #define N_FUN 36
  663. #define N_RSYM 64
  664. #define N_SLINE 68
  665. #define N_SO 100
  666. /*
  667. * Create an endian fixup trampoline
  668. *
  669. * This starts with a "tdi 0,0,0x48" instruction which is
  670. * essentially a "trap never", and thus akin to a nop.
  671. *
  672. * The opcode for this instruction read with the wrong endian
  673. * however results in a b . + 8
  674. *
  675. * So essentially we use that trick to execute the following
  676. * trampoline in "reverse endian" if we are running with the
  677. * MSR_LE bit set the "wrong" way for whatever endianness the
  678. * kernel is built for.
  679. */
  680. #ifdef CONFIG_PPC_BOOK3E
  681. #define FIXUP_ENDIAN
  682. #else
  683. #define FIXUP_ENDIAN \
  684. tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
  685. b $+36; /* Skip trampoline if endian is good */ \
  686. .long 0x05009f42; /* bcl 20,31,$+4 */ \
  687. .long 0xa602487d; /* mflr r10 */ \
  688. .long 0x1c004a39; /* addi r10,r10,28 */ \
  689. .long 0xa600607d; /* mfmsr r11 */ \
  690. .long 0x01006b69; /* xori r11,r11,1 */ \
  691. .long 0xa6035a7d; /* mtsrr0 r10 */ \
  692. .long 0xa6037b7d; /* mtsrr1 r11 */ \
  693. .long 0x2400004c /* rfid */
  694. #endif /* !CONFIG_PPC_BOOK3E */
  695. #endif /* __ASSEMBLY__ */
  696. /*
  697. * Helper macro for exception table entries
  698. */
  699. #define EX_TABLE(_fault, _target) \
  700. stringify_in_c(.section __ex_table,"a";)\
  701. stringify_in_c(.balign 4;) \
  702. stringify_in_c(.long (_fault) - . ;) \
  703. stringify_in_c(.long (_target) - . ;) \
  704. stringify_in_c(.previous)
  705. #endif /* _ASM_POWERPC_PPC_ASM_H */