unaligned.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603
  1. /*
  2. * Copyright 2013 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * A code-rewriter that handles unaligned exception.
  15. */
  16. #include <linux/smp.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/slab.h>
  19. #include <linux/sched/debug.h>
  20. #include <linux/sched/task.h>
  21. #include <linux/thread_info.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/mman.h>
  24. #include <linux/types.h>
  25. #include <linux/err.h>
  26. #include <linux/extable.h>
  27. #include <linux/compat.h>
  28. #include <linux/prctl.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/traps.h>
  31. #include <linux/uaccess.h>
  32. #include <asm/unaligned.h>
  33. #include <arch/abi.h>
  34. #include <arch/spr_def.h>
  35. #include <arch/opcode.h>
  36. /*
  37. * This file handles unaligned exception for tile-Gx. The tilepro's unaligned
  38. * exception is supported out of single_step.c
  39. */
  40. int unaligned_printk;
  41. static int __init setup_unaligned_printk(char *str)
  42. {
  43. long val;
  44. if (kstrtol(str, 0, &val) != 0)
  45. return 0;
  46. unaligned_printk = val;
  47. pr_info("Printk for each unaligned data accesses is %s\n",
  48. unaligned_printk ? "enabled" : "disabled");
  49. return 1;
  50. }
  51. __setup("unaligned_printk=", setup_unaligned_printk);
  52. unsigned int unaligned_fixup_count;
  53. #ifdef __tilegx__
  54. /*
  55. * Unalign data jit fixup code fragement. Reserved space is 128 bytes.
  56. * The 1st 64-bit word saves fault PC address, 2nd word is the fault
  57. * instruction bundle followed by 14 JIT bundles.
  58. */
  59. struct unaligned_jit_fragment {
  60. unsigned long pc;
  61. tilegx_bundle_bits bundle;
  62. tilegx_bundle_bits insn[14];
  63. };
  64. /*
  65. * Check if a nop or fnop at bundle's pipeline X0.
  66. */
  67. static bool is_bundle_x0_nop(tilegx_bundle_bits bundle)
  68. {
  69. return (((get_UnaryOpcodeExtension_X0(bundle) ==
  70. NOP_UNARY_OPCODE_X0) &&
  71. (get_RRROpcodeExtension_X0(bundle) ==
  72. UNARY_RRR_0_OPCODE_X0) &&
  73. (get_Opcode_X0(bundle) ==
  74. RRR_0_OPCODE_X0)) ||
  75. ((get_UnaryOpcodeExtension_X0(bundle) ==
  76. FNOP_UNARY_OPCODE_X0) &&
  77. (get_RRROpcodeExtension_X0(bundle) ==
  78. UNARY_RRR_0_OPCODE_X0) &&
  79. (get_Opcode_X0(bundle) ==
  80. RRR_0_OPCODE_X0)));
  81. }
  82. /*
  83. * Check if nop or fnop at bundle's pipeline X1.
  84. */
  85. static bool is_bundle_x1_nop(tilegx_bundle_bits bundle)
  86. {
  87. return (((get_UnaryOpcodeExtension_X1(bundle) ==
  88. NOP_UNARY_OPCODE_X1) &&
  89. (get_RRROpcodeExtension_X1(bundle) ==
  90. UNARY_RRR_0_OPCODE_X1) &&
  91. (get_Opcode_X1(bundle) ==
  92. RRR_0_OPCODE_X1)) ||
  93. ((get_UnaryOpcodeExtension_X1(bundle) ==
  94. FNOP_UNARY_OPCODE_X1) &&
  95. (get_RRROpcodeExtension_X1(bundle) ==
  96. UNARY_RRR_0_OPCODE_X1) &&
  97. (get_Opcode_X1(bundle) ==
  98. RRR_0_OPCODE_X1)));
  99. }
  100. /*
  101. * Check if nop or fnop at bundle's Y0 pipeline.
  102. */
  103. static bool is_bundle_y0_nop(tilegx_bundle_bits bundle)
  104. {
  105. return (((get_UnaryOpcodeExtension_Y0(bundle) ==
  106. NOP_UNARY_OPCODE_Y0) &&
  107. (get_RRROpcodeExtension_Y0(bundle) ==
  108. UNARY_RRR_1_OPCODE_Y0) &&
  109. (get_Opcode_Y0(bundle) ==
  110. RRR_1_OPCODE_Y0)) ||
  111. ((get_UnaryOpcodeExtension_Y0(bundle) ==
  112. FNOP_UNARY_OPCODE_Y0) &&
  113. (get_RRROpcodeExtension_Y0(bundle) ==
  114. UNARY_RRR_1_OPCODE_Y0) &&
  115. (get_Opcode_Y0(bundle) ==
  116. RRR_1_OPCODE_Y0)));
  117. }
  118. /*
  119. * Check if nop or fnop at bundle's pipeline Y1.
  120. */
  121. static bool is_bundle_y1_nop(tilegx_bundle_bits bundle)
  122. {
  123. return (((get_UnaryOpcodeExtension_Y1(bundle) ==
  124. NOP_UNARY_OPCODE_Y1) &&
  125. (get_RRROpcodeExtension_Y1(bundle) ==
  126. UNARY_RRR_1_OPCODE_Y1) &&
  127. (get_Opcode_Y1(bundle) ==
  128. RRR_1_OPCODE_Y1)) ||
  129. ((get_UnaryOpcodeExtension_Y1(bundle) ==
  130. FNOP_UNARY_OPCODE_Y1) &&
  131. (get_RRROpcodeExtension_Y1(bundle) ==
  132. UNARY_RRR_1_OPCODE_Y1) &&
  133. (get_Opcode_Y1(bundle) ==
  134. RRR_1_OPCODE_Y1)));
  135. }
  136. /*
  137. * Test if a bundle's y0 and y1 pipelines are both nop or fnop.
  138. */
  139. static bool is_y0_y1_nop(tilegx_bundle_bits bundle)
  140. {
  141. return is_bundle_y0_nop(bundle) && is_bundle_y1_nop(bundle);
  142. }
  143. /*
  144. * Test if a bundle's x0 and x1 pipelines are both nop or fnop.
  145. */
  146. static bool is_x0_x1_nop(tilegx_bundle_bits bundle)
  147. {
  148. return is_bundle_x0_nop(bundle) && is_bundle_x1_nop(bundle);
  149. }
  150. /*
  151. * Find the destination, source registers of fault unalign access instruction
  152. * at X1 or Y2. Also, allocate up to 3 scratch registers clob1, clob2 and
  153. * clob3, which are guaranteed different from any register used in the fault
  154. * bundle. r_alias is used to return if the other instructions other than the
  155. * unalign load/store shares same register with ra, rb and rd.
  156. */
  157. static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,
  158. uint64_t *rb, uint64_t *clob1, uint64_t *clob2,
  159. uint64_t *clob3, bool *r_alias)
  160. {
  161. int i;
  162. uint64_t reg;
  163. uint64_t reg_map = 0, alias_reg_map = 0, map;
  164. bool alias = false;
  165. /*
  166. * Parse fault bundle, find potential used registers and mark
  167. * corresponding bits in reg_map and alias_map. These 2 bit maps
  168. * are used to find the scratch registers and determine if there
  169. * is register alias.
  170. */
  171. if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* Y Mode Bundle. */
  172. reg = get_SrcA_Y2(bundle);
  173. reg_map |= 1ULL << reg;
  174. *ra = reg;
  175. reg = get_SrcBDest_Y2(bundle);
  176. reg_map |= 1ULL << reg;
  177. if (rd) {
  178. /* Load. */
  179. *rd = reg;
  180. alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
  181. } else {
  182. /* Store. */
  183. *rb = reg;
  184. alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
  185. }
  186. if (!is_bundle_y1_nop(bundle)) {
  187. reg = get_SrcA_Y1(bundle);
  188. reg_map |= (1ULL << reg);
  189. map = (1ULL << reg);
  190. reg = get_SrcB_Y1(bundle);
  191. reg_map |= (1ULL << reg);
  192. map |= (1ULL << reg);
  193. reg = get_Dest_Y1(bundle);
  194. reg_map |= (1ULL << reg);
  195. map |= (1ULL << reg);
  196. if (map & alias_reg_map)
  197. alias = true;
  198. }
  199. if (!is_bundle_y0_nop(bundle)) {
  200. reg = get_SrcA_Y0(bundle);
  201. reg_map |= (1ULL << reg);
  202. map = (1ULL << reg);
  203. reg = get_SrcB_Y0(bundle);
  204. reg_map |= (1ULL << reg);
  205. map |= (1ULL << reg);
  206. reg = get_Dest_Y0(bundle);
  207. reg_map |= (1ULL << reg);
  208. map |= (1ULL << reg);
  209. if (map & alias_reg_map)
  210. alias = true;
  211. }
  212. } else { /* X Mode Bundle. */
  213. reg = get_SrcA_X1(bundle);
  214. reg_map |= (1ULL << reg);
  215. *ra = reg;
  216. if (rd) {
  217. /* Load. */
  218. reg = get_Dest_X1(bundle);
  219. reg_map |= (1ULL << reg);
  220. *rd = reg;
  221. alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
  222. } else {
  223. /* Store. */
  224. reg = get_SrcB_X1(bundle);
  225. reg_map |= (1ULL << reg);
  226. *rb = reg;
  227. alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
  228. }
  229. if (!is_bundle_x0_nop(bundle)) {
  230. reg = get_SrcA_X0(bundle);
  231. reg_map |= (1ULL << reg);
  232. map = (1ULL << reg);
  233. reg = get_SrcB_X0(bundle);
  234. reg_map |= (1ULL << reg);
  235. map |= (1ULL << reg);
  236. reg = get_Dest_X0(bundle);
  237. reg_map |= (1ULL << reg);
  238. map |= (1ULL << reg);
  239. if (map & alias_reg_map)
  240. alias = true;
  241. }
  242. }
  243. /*
  244. * "alias" indicates if the unalign access registers have collision
  245. * with others in the same bundle. We jsut simply test all register
  246. * operands case (RRR), ignored the case with immidate. If a bundle
  247. * has no register alias, we may do fixup in a simple or fast manner.
  248. * So if an immidata field happens to hit with a register, we may end
  249. * up fall back to the generic handling.
  250. */
  251. *r_alias = alias;
  252. /* Flip bits on reg_map. */
  253. reg_map ^= -1ULL;
  254. /* Scan reg_map lower 54(TREG_SP) bits to find 3 set bits. */
  255. for (i = 0; i < TREG_SP; i++) {
  256. if (reg_map & (0x1ULL << i)) {
  257. if (*clob1 == -1) {
  258. *clob1 = i;
  259. } else if (*clob2 == -1) {
  260. *clob2 = i;
  261. } else if (*clob3 == -1) {
  262. *clob3 = i;
  263. return;
  264. }
  265. }
  266. }
  267. }
  268. /*
  269. * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them
  270. * is unexpected.
  271. */
  272. static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb,
  273. uint64_t clob1, uint64_t clob2, uint64_t clob3)
  274. {
  275. bool unexpected = false;
  276. if ((ra >= 56) && (ra != TREG_ZERO))
  277. unexpected = true;
  278. if ((clob1 >= 56) || (clob2 >= 56) || (clob3 >= 56))
  279. unexpected = true;
  280. if (rd != -1) {
  281. if ((rd >= 56) && (rd != TREG_ZERO))
  282. unexpected = true;
  283. } else {
  284. if ((rb >= 56) && (rb != TREG_ZERO))
  285. unexpected = true;
  286. }
  287. return unexpected;
  288. }
  289. #define GX_INSN_X0_MASK ((1ULL << 31) - 1)
  290. #define GX_INSN_X1_MASK (((1ULL << 31) - 1) << 31)
  291. #define GX_INSN_Y0_MASK ((0xFULL << 27) | (0xFFFFFULL))
  292. #define GX_INSN_Y1_MASK (GX_INSN_Y0_MASK << 31)
  293. #define GX_INSN_Y2_MASK ((0x7FULL << 51) | (0x7FULL << 20))
  294. #ifdef __LITTLE_ENDIAN
  295. #define GX_INSN_BSWAP(_bundle_) (_bundle_)
  296. #else
  297. #define GX_INSN_BSWAP(_bundle_) swab64(_bundle_)
  298. #endif /* __LITTLE_ENDIAN */
  299. /*
  300. * __JIT_CODE(.) creates template bundles in .rodata.unalign_data section.
  301. * The corresponding static function jix_x#_###(.) generates partial or
  302. * whole bundle based on the template and given arguments.
  303. */
  304. #define __JIT_CODE(_X_) \
  305. asm (".pushsection .rodata.unalign_data, \"a\"\n" \
  306. _X_"\n" \
  307. ".popsection\n")
  308. __JIT_CODE("__unalign_jit_x1_mtspr: {mtspr 0, r0}");
  309. static tilegx_bundle_bits jit_x1_mtspr(int spr, int reg)
  310. {
  311. extern tilegx_bundle_bits __unalign_jit_x1_mtspr;
  312. return (GX_INSN_BSWAP(__unalign_jit_x1_mtspr) & GX_INSN_X1_MASK) |
  313. create_MT_Imm14_X1(spr) | create_SrcA_X1(reg);
  314. }
  315. __JIT_CODE("__unalign_jit_x1_mfspr: {mfspr r0, 0}");
  316. static tilegx_bundle_bits jit_x1_mfspr(int reg, int spr)
  317. {
  318. extern tilegx_bundle_bits __unalign_jit_x1_mfspr;
  319. return (GX_INSN_BSWAP(__unalign_jit_x1_mfspr) & GX_INSN_X1_MASK) |
  320. create_MF_Imm14_X1(spr) | create_Dest_X1(reg);
  321. }
  322. __JIT_CODE("__unalign_jit_x0_addi: {addi r0, r0, 0; iret}");
  323. static tilegx_bundle_bits jit_x0_addi(int rd, int ra, int imm8)
  324. {
  325. extern tilegx_bundle_bits __unalign_jit_x0_addi;
  326. return (GX_INSN_BSWAP(__unalign_jit_x0_addi) & GX_INSN_X0_MASK) |
  327. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  328. create_Imm8_X0(imm8);
  329. }
  330. __JIT_CODE("__unalign_jit_x1_ldna: {ldna r0, r0}");
  331. static tilegx_bundle_bits jit_x1_ldna(int rd, int ra)
  332. {
  333. extern tilegx_bundle_bits __unalign_jit_x1_ldna;
  334. return (GX_INSN_BSWAP(__unalign_jit_x1_ldna) & GX_INSN_X1_MASK) |
  335. create_Dest_X1(rd) | create_SrcA_X1(ra);
  336. }
  337. __JIT_CODE("__unalign_jit_x0_dblalign: {dblalign r0, r0 ,r0}");
  338. static tilegx_bundle_bits jit_x0_dblalign(int rd, int ra, int rb)
  339. {
  340. extern tilegx_bundle_bits __unalign_jit_x0_dblalign;
  341. return (GX_INSN_BSWAP(__unalign_jit_x0_dblalign) & GX_INSN_X0_MASK) |
  342. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  343. create_SrcB_X0(rb);
  344. }
  345. __JIT_CODE("__unalign_jit_x1_iret: {iret}");
  346. static tilegx_bundle_bits jit_x1_iret(void)
  347. {
  348. extern tilegx_bundle_bits __unalign_jit_x1_iret;
  349. return GX_INSN_BSWAP(__unalign_jit_x1_iret) & GX_INSN_X1_MASK;
  350. }
  351. __JIT_CODE("__unalign_jit_x01_fnop: {fnop;fnop}");
  352. static tilegx_bundle_bits jit_x0_fnop(void)
  353. {
  354. extern tilegx_bundle_bits __unalign_jit_x01_fnop;
  355. return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X0_MASK;
  356. }
  357. static tilegx_bundle_bits jit_x1_fnop(void)
  358. {
  359. extern tilegx_bundle_bits __unalign_jit_x01_fnop;
  360. return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X1_MASK;
  361. }
  362. __JIT_CODE("__unalign_jit_y2_dummy: {fnop; fnop; ld zero, sp}");
  363. static tilegx_bundle_bits jit_y2_dummy(void)
  364. {
  365. extern tilegx_bundle_bits __unalign_jit_y2_dummy;
  366. return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y2_MASK;
  367. }
  368. static tilegx_bundle_bits jit_y1_fnop(void)
  369. {
  370. extern tilegx_bundle_bits __unalign_jit_y2_dummy;
  371. return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y1_MASK;
  372. }
  373. __JIT_CODE("__unalign_jit_x1_st1_add: {st1_add r1, r0, 0}");
  374. static tilegx_bundle_bits jit_x1_st1_add(int ra, int rb, int imm8)
  375. {
  376. extern tilegx_bundle_bits __unalign_jit_x1_st1_add;
  377. return (GX_INSN_BSWAP(__unalign_jit_x1_st1_add) &
  378. (~create_SrcA_X1(-1)) &
  379. GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
  380. create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
  381. }
  382. __JIT_CODE("__unalign_jit_x1_st: {crc32_8 r1, r0, r0; st r0, r0}");
  383. static tilegx_bundle_bits jit_x1_st(int ra, int rb)
  384. {
  385. extern tilegx_bundle_bits __unalign_jit_x1_st;
  386. return (GX_INSN_BSWAP(__unalign_jit_x1_st) & GX_INSN_X1_MASK) |
  387. create_SrcA_X1(ra) | create_SrcB_X1(rb);
  388. }
  389. __JIT_CODE("__unalign_jit_x1_st_add: {st_add r1, r0, 0}");
  390. static tilegx_bundle_bits jit_x1_st_add(int ra, int rb, int imm8)
  391. {
  392. extern tilegx_bundle_bits __unalign_jit_x1_st_add;
  393. return (GX_INSN_BSWAP(__unalign_jit_x1_st_add) &
  394. (~create_SrcA_X1(-1)) &
  395. GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
  396. create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
  397. }
  398. __JIT_CODE("__unalign_jit_x1_ld: {crc32_8 r1, r0, r0; ld r0, r0}");
  399. static tilegx_bundle_bits jit_x1_ld(int rd, int ra)
  400. {
  401. extern tilegx_bundle_bits __unalign_jit_x1_ld;
  402. return (GX_INSN_BSWAP(__unalign_jit_x1_ld) & GX_INSN_X1_MASK) |
  403. create_Dest_X1(rd) | create_SrcA_X1(ra);
  404. }
  405. __JIT_CODE("__unalign_jit_x1_ld_add: {ld_add r1, r0, 0}");
  406. static tilegx_bundle_bits jit_x1_ld_add(int rd, int ra, int imm8)
  407. {
  408. extern tilegx_bundle_bits __unalign_jit_x1_ld_add;
  409. return (GX_INSN_BSWAP(__unalign_jit_x1_ld_add) &
  410. (~create_Dest_X1(-1)) &
  411. GX_INSN_X1_MASK) | create_Dest_X1(rd) |
  412. create_SrcA_X1(ra) | create_Imm8_X1(imm8);
  413. }
  414. __JIT_CODE("__unalign_jit_x0_bfexts: {bfexts r0, r0, 0, 0}");
  415. static tilegx_bundle_bits jit_x0_bfexts(int rd, int ra, int bfs, int bfe)
  416. {
  417. extern tilegx_bundle_bits __unalign_jit_x0_bfexts;
  418. return (GX_INSN_BSWAP(__unalign_jit_x0_bfexts) &
  419. GX_INSN_X0_MASK) |
  420. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  421. create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
  422. }
  423. __JIT_CODE("__unalign_jit_x0_bfextu: {bfextu r0, r0, 0, 0}");
  424. static tilegx_bundle_bits jit_x0_bfextu(int rd, int ra, int bfs, int bfe)
  425. {
  426. extern tilegx_bundle_bits __unalign_jit_x0_bfextu;
  427. return (GX_INSN_BSWAP(__unalign_jit_x0_bfextu) &
  428. GX_INSN_X0_MASK) |
  429. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  430. create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
  431. }
  432. __JIT_CODE("__unalign_jit_x1_addi: {bfextu r1, r1, 0, 0; addi r0, r0, 0}");
  433. static tilegx_bundle_bits jit_x1_addi(int rd, int ra, int imm8)
  434. {
  435. extern tilegx_bundle_bits __unalign_jit_x1_addi;
  436. return (GX_INSN_BSWAP(__unalign_jit_x1_addi) & GX_INSN_X1_MASK) |
  437. create_Dest_X1(rd) | create_SrcA_X1(ra) |
  438. create_Imm8_X1(imm8);
  439. }
  440. __JIT_CODE("__unalign_jit_x0_shrui: {shrui r0, r0, 0; iret}");
  441. static tilegx_bundle_bits jit_x0_shrui(int rd, int ra, int imm6)
  442. {
  443. extern tilegx_bundle_bits __unalign_jit_x0_shrui;
  444. return (GX_INSN_BSWAP(__unalign_jit_x0_shrui) &
  445. GX_INSN_X0_MASK) |
  446. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  447. create_ShAmt_X0(imm6);
  448. }
  449. __JIT_CODE("__unalign_jit_x0_rotli: {rotli r0, r0, 0; iret}");
  450. static tilegx_bundle_bits jit_x0_rotli(int rd, int ra, int imm6)
  451. {
  452. extern tilegx_bundle_bits __unalign_jit_x0_rotli;
  453. return (GX_INSN_BSWAP(__unalign_jit_x0_rotli) &
  454. GX_INSN_X0_MASK) |
  455. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  456. create_ShAmt_X0(imm6);
  457. }
  458. __JIT_CODE("__unalign_jit_x1_bnezt: {bnezt r0, __unalign_jit_x1_bnezt}");
  459. static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff)
  460. {
  461. extern tilegx_bundle_bits __unalign_jit_x1_bnezt;
  462. return (GX_INSN_BSWAP(__unalign_jit_x1_bnezt) &
  463. GX_INSN_X1_MASK) |
  464. create_SrcA_X1(ra) | create_BrOff_X1(broff);
  465. }
  466. #undef __JIT_CODE
  467. /*
  468. * This function generates unalign fixup JIT.
  469. *
  470. * We first find unalign load/store instruction's destination, source
  471. * registers: ra, rb and rd. and 3 scratch registers by calling
  472. * find_regs(...). 3 scratch clobbers should not alias with any register
  473. * used in the fault bundle. Then analyze the fault bundle to determine
  474. * if it's a load or store, operand width, branch or address increment etc.
  475. * At last generated JIT is copied into JIT code area in user space.
  476. */
  477. static
  478. void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
  479. int align_ctl)
  480. {
  481. struct thread_info *info = current_thread_info();
  482. struct unaligned_jit_fragment frag;
  483. struct unaligned_jit_fragment *jit_code_area;
  484. tilegx_bundle_bits bundle_2 = 0;
  485. /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
  486. bool bundle_2_enable = true;
  487. uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;
  488. /*
  489. * Indicate if the unalign access
  490. * instruction's registers hit with
  491. * others in the same bundle.
  492. */
  493. bool alias = false;
  494. bool load_n_store = true;
  495. bool load_store_signed = false;
  496. unsigned int load_store_size = 8;
  497. bool y1_br = false; /* True, for a branch in same bundle at Y1.*/
  498. int y1_br_reg = 0;
  499. /* True for link operation. i.e. jalr or lnk at Y1 */
  500. bool y1_lr = false;
  501. int y1_lr_reg = 0;
  502. bool x1_add = false;/* True, for load/store ADD instruction at X1*/
  503. int x1_add_imm8 = 0;
  504. bool unexpected = false;
  505. int n = 0, k;
  506. jit_code_area =
  507. (struct unaligned_jit_fragment *)(info->unalign_jit_base);
  508. memset((void *)&frag, 0, sizeof(frag));
  509. /* 0: X mode, Otherwise: Y mode. */
  510. if (bundle & TILEGX_BUNDLE_MODE_MASK) {
  511. unsigned int mod, opcode;
  512. if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
  513. get_RRROpcodeExtension_Y1(bundle) ==
  514. UNARY_RRR_1_OPCODE_Y1) {
  515. opcode = get_UnaryOpcodeExtension_Y1(bundle);
  516. /*
  517. * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
  518. * pipeline.
  519. */
  520. switch (opcode) {
  521. case JALR_UNARY_OPCODE_Y1:
  522. case JALRP_UNARY_OPCODE_Y1:
  523. y1_lr = true;
  524. y1_lr_reg = 55; /* Link register. */
  525. /* FALLTHROUGH */
  526. case JR_UNARY_OPCODE_Y1:
  527. case JRP_UNARY_OPCODE_Y1:
  528. y1_br = true;
  529. y1_br_reg = get_SrcA_Y1(bundle);
  530. break;
  531. case LNK_UNARY_OPCODE_Y1:
  532. /* "lnk" at Y1 pipeline. */
  533. y1_lr = true;
  534. y1_lr_reg = get_Dest_Y1(bundle);
  535. break;
  536. }
  537. }
  538. opcode = get_Opcode_Y2(bundle);
  539. mod = get_Mode(bundle);
  540. /*
  541. * bundle_2 is bundle after making Y2 as a dummy operation
  542. * - ld zero, sp
  543. */
  544. bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();
  545. /* Make Y1 as fnop if Y1 is a branch or lnk operation. */
  546. if (y1_br || y1_lr) {
  547. bundle_2 &= ~(GX_INSN_Y1_MASK);
  548. bundle_2 |= jit_y1_fnop();
  549. }
  550. if (is_y0_y1_nop(bundle_2))
  551. bundle_2_enable = false;
  552. if (mod == MODE_OPCODE_YC2) {
  553. /* Store. */
  554. load_n_store = false;
  555. load_store_size = 1 << opcode;
  556. load_store_signed = false;
  557. find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
  558. &clob3, &alias);
  559. if (load_store_size > 8)
  560. unexpected = true;
  561. } else {
  562. /* Load. */
  563. load_n_store = true;
  564. if (mod == MODE_OPCODE_YB2) {
  565. switch (opcode) {
  566. case LD_OPCODE_Y2:
  567. load_store_signed = false;
  568. load_store_size = 8;
  569. break;
  570. case LD4S_OPCODE_Y2:
  571. load_store_signed = true;
  572. load_store_size = 4;
  573. break;
  574. case LD4U_OPCODE_Y2:
  575. load_store_signed = false;
  576. load_store_size = 4;
  577. break;
  578. default:
  579. unexpected = true;
  580. }
  581. } else if (mod == MODE_OPCODE_YA2) {
  582. if (opcode == LD2S_OPCODE_Y2) {
  583. load_store_signed = true;
  584. load_store_size = 2;
  585. } else if (opcode == LD2U_OPCODE_Y2) {
  586. load_store_signed = false;
  587. load_store_size = 2;
  588. } else
  589. unexpected = true;
  590. } else
  591. unexpected = true;
  592. find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
  593. &clob3, &alias);
  594. }
  595. } else {
  596. unsigned int opcode;
  597. /* bundle_2 is bundle after making X1 as "fnop". */
  598. bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();
  599. if (is_x0_x1_nop(bundle_2))
  600. bundle_2_enable = false;
  601. if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
  602. opcode = get_UnaryOpcodeExtension_X1(bundle);
  603. if (get_RRROpcodeExtension_X1(bundle) ==
  604. UNARY_RRR_0_OPCODE_X1) {
  605. load_n_store = true;
  606. find_regs(bundle, &rd, &ra, &rb, &clob1,
  607. &clob2, &clob3, &alias);
  608. switch (opcode) {
  609. case LD_UNARY_OPCODE_X1:
  610. load_store_signed = false;
  611. load_store_size = 8;
  612. break;
  613. case LD4S_UNARY_OPCODE_X1:
  614. load_store_signed = true;
  615. /* FALLTHROUGH */
  616. case LD4U_UNARY_OPCODE_X1:
  617. load_store_size = 4;
  618. break;
  619. case LD2S_UNARY_OPCODE_X1:
  620. load_store_signed = true;
  621. /* FALLTHROUGH */
  622. case LD2U_UNARY_OPCODE_X1:
  623. load_store_size = 2;
  624. break;
  625. default:
  626. unexpected = true;
  627. }
  628. } else {
  629. load_n_store = false;
  630. load_store_signed = false;
  631. find_regs(bundle, 0, &ra, &rb,
  632. &clob1, &clob2, &clob3,
  633. &alias);
  634. opcode = get_RRROpcodeExtension_X1(bundle);
  635. switch (opcode) {
  636. case ST_RRR_0_OPCODE_X1:
  637. load_store_size = 8;
  638. break;
  639. case ST4_RRR_0_OPCODE_X1:
  640. load_store_size = 4;
  641. break;
  642. case ST2_RRR_0_OPCODE_X1:
  643. load_store_size = 2;
  644. break;
  645. default:
  646. unexpected = true;
  647. }
  648. }
  649. } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
  650. load_n_store = true;
  651. opcode = get_Imm8OpcodeExtension_X1(bundle);
  652. switch (opcode) {
  653. case LD_ADD_IMM8_OPCODE_X1:
  654. load_store_size = 8;
  655. break;
  656. case LD4S_ADD_IMM8_OPCODE_X1:
  657. load_store_signed = true;
  658. /* FALLTHROUGH */
  659. case LD4U_ADD_IMM8_OPCODE_X1:
  660. load_store_size = 4;
  661. break;
  662. case LD2S_ADD_IMM8_OPCODE_X1:
  663. load_store_signed = true;
  664. /* FALLTHROUGH */
  665. case LD2U_ADD_IMM8_OPCODE_X1:
  666. load_store_size = 2;
  667. break;
  668. case ST_ADD_IMM8_OPCODE_X1:
  669. load_n_store = false;
  670. load_store_size = 8;
  671. break;
  672. case ST4_ADD_IMM8_OPCODE_X1:
  673. load_n_store = false;
  674. load_store_size = 4;
  675. break;
  676. case ST2_ADD_IMM8_OPCODE_X1:
  677. load_n_store = false;
  678. load_store_size = 2;
  679. break;
  680. default:
  681. unexpected = true;
  682. }
  683. if (!unexpected) {
  684. x1_add = true;
  685. if (load_n_store)
  686. x1_add_imm8 = get_Imm8_X1(bundle);
  687. else
  688. x1_add_imm8 = get_Dest_Imm8_X1(bundle);
  689. }
  690. find_regs(bundle, load_n_store ? (&rd) : NULL,
  691. &ra, &rb, &clob1, &clob2, &clob3, &alias);
  692. } else
  693. unexpected = true;
  694. }
  695. /*
  696. * Some sanity check for register numbers extracted from fault bundle.
  697. */
  698. if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
  699. unexpected = true;
  700. /* Give warning if register ra has an aligned address. */
  701. if (!unexpected)
  702. WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));
  703. /*
  704. * Fault came from kernel space, here we only need take care of
  705. * unaligned "get_user/put_user" macros defined in "uaccess.h".
  706. * Basically, we will handle bundle like this:
  707. * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
  708. * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
  709. * For either load or store, byte-wise operation is performed by calling
  710. * get_user() or put_user(). If the macro returns non-zero value,
  711. * set the value to rx, otherwise set zero to rx. Finally make pc point
  712. * to next bundle and return.
  713. */
  714. if (EX1_PL(regs->ex1) != USER_PL) {
  715. unsigned long rx = 0;
  716. unsigned long x = 0, ret = 0;
  717. if (y1_br || y1_lr || x1_add ||
  718. (load_store_signed !=
  719. (load_n_store && load_store_size == 4))) {
  720. /* No branch, link, wrong sign-ext or load/store add. */
  721. unexpected = true;
  722. } else if (!unexpected) {
  723. if (bundle & TILEGX_BUNDLE_MODE_MASK) {
  724. /*
  725. * Fault bundle is Y mode.
  726. * Check if the Y1 and Y0 is the form of
  727. * { movei rx, 0; nop/fnop }, if yes,
  728. * find the rx.
  729. */
  730. if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
  731. && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
  732. (get_Imm8_Y1(bundle) == 0) &&
  733. is_bundle_y0_nop(bundle)) {
  734. rx = get_Dest_Y1(bundle);
  735. } else if ((get_Opcode_Y0(bundle) ==
  736. ADDI_OPCODE_Y0) &&
  737. (get_SrcA_Y0(bundle) == TREG_ZERO) &&
  738. (get_Imm8_Y0(bundle) == 0) &&
  739. is_bundle_y1_nop(bundle)) {
  740. rx = get_Dest_Y0(bundle);
  741. } else {
  742. unexpected = true;
  743. }
  744. } else {
  745. /*
  746. * Fault bundle is X mode.
  747. * Check if the X0 is 'movei rx, 0',
  748. * if yes, find the rx.
  749. */
  750. if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
  751. && (get_Imm8OpcodeExtension_X0(bundle) ==
  752. ADDI_IMM8_OPCODE_X0) &&
  753. (get_SrcA_X0(bundle) == TREG_ZERO) &&
  754. (get_Imm8_X0(bundle) == 0)) {
  755. rx = get_Dest_X0(bundle);
  756. } else {
  757. unexpected = true;
  758. }
  759. }
  760. /* rx should be less than 56. */
  761. if (!unexpected && (rx >= 56))
  762. unexpected = true;
  763. }
  764. if (!search_exception_tables(regs->pc)) {
  765. /* No fixup in the exception tables for the pc. */
  766. unexpected = true;
  767. }
  768. if (unexpected) {
  769. /* Unexpected unalign kernel fault. */
  770. struct task_struct *tsk = validate_current();
  771. bust_spinlocks(1);
  772. show_regs(regs);
  773. if (unlikely(tsk->pid < 2)) {
  774. panic("Kernel unalign fault running %s!",
  775. tsk->pid ? "init" : "the idle task");
  776. }
  777. #ifdef SUPPORT_DIE
  778. die("Oops", regs);
  779. #endif
  780. bust_spinlocks(1);
  781. do_group_exit(SIGKILL);
  782. } else {
  783. unsigned long i, b = 0;
  784. unsigned char *ptr =
  785. (unsigned char *)regs->regs[ra];
  786. if (load_n_store) {
  787. /* handle get_user(x, ptr) */
  788. for (i = 0; i < load_store_size; i++) {
  789. ret = get_user(b, ptr++);
  790. if (!ret) {
  791. /* Success! update x. */
  792. #ifdef __LITTLE_ENDIAN
  793. x |= (b << (8 * i));
  794. #else
  795. x <<= 8;
  796. x |= b;
  797. #endif /* __LITTLE_ENDIAN */
  798. } else {
  799. x = 0;
  800. break;
  801. }
  802. }
  803. /* Sign-extend 4-byte loads. */
  804. if (load_store_size == 4)
  805. x = (long)(int)x;
  806. /* Set register rd. */
  807. regs->regs[rd] = x;
  808. /* Set register rx. */
  809. regs->regs[rx] = ret;
  810. /* Bump pc. */
  811. regs->pc += 8;
  812. } else {
  813. /* Handle put_user(x, ptr) */
  814. x = regs->regs[rb];
  815. #ifdef __LITTLE_ENDIAN
  816. b = x;
  817. #else
  818. /*
  819. * Swap x in order to store x from low
  820. * to high memory same as the
  821. * little-endian case.
  822. */
  823. switch (load_store_size) {
  824. case 8:
  825. b = swab64(x);
  826. break;
  827. case 4:
  828. b = swab32(x);
  829. break;
  830. case 2:
  831. b = swab16(x);
  832. break;
  833. }
  834. #endif /* __LITTLE_ENDIAN */
  835. for (i = 0; i < load_store_size; i++) {
  836. ret = put_user(b, ptr++);
  837. if (ret)
  838. break;
  839. /* Success! shift 1 byte. */
  840. b >>= 8;
  841. }
  842. /* Set register rx. */
  843. regs->regs[rx] = ret;
  844. /* Bump pc. */
  845. regs->pc += 8;
  846. }
  847. }
  848. unaligned_fixup_count++;
  849. if (unaligned_printk) {
  850. pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
  851. current->comm, current->pid, regs->regs[ra]);
  852. }
  853. /* Done! Return to the exception handler. */
  854. return;
  855. }
  856. if ((align_ctl == 0) || unexpected) {
  857. siginfo_t info;
  858. clear_siginfo(&info);
  859. info.si_signo = SIGBUS;
  860. info.si_code = BUS_ADRALN;
  861. info.si_addr = (unsigned char __user *)0;
  862. if (unaligned_printk)
  863. pr_info("Unalign bundle: unexp @%llx, %llx\n",
  864. (unsigned long long)regs->pc,
  865. (unsigned long long)bundle);
  866. if (ra < 56) {
  867. unsigned long uaa = (unsigned long)regs->regs[ra];
  868. /* Set bus Address. */
  869. info.si_addr = (unsigned char __user *)uaa;
  870. }
  871. unaligned_fixup_count++;
  872. trace_unhandled_signal("unaligned fixup trap", regs,
  873. (unsigned long)info.si_addr, SIGBUS);
  874. force_sig_info(info.si_signo, &info, current);
  875. return;
  876. }
  877. #ifdef __LITTLE_ENDIAN
  878. #define UA_FIXUP_ADDR_DELTA 1
  879. #define UA_FIXUP_BFEXT_START(_B_) 0
  880. #define UA_FIXUP_BFEXT_END(_B_) (8 * (_B_) - 1)
  881. #else /* __BIG_ENDIAN */
  882. #define UA_FIXUP_ADDR_DELTA -1
  883. #define UA_FIXUP_BFEXT_START(_B_) (64 - 8 * (_B_))
  884. #define UA_FIXUP_BFEXT_END(_B_) 63
  885. #endif /* __LITTLE_ENDIAN */
  886. if ((ra != rb) && (rd != TREG_SP) && !alias &&
  887. !y1_br && !y1_lr && !x1_add) {
  888. /*
  889. * Simple case: ra != rb and no register alias found,
  890. * and no branch or link. This will be the majority.
  891. * We can do a little better for simplae case than the
  892. * generic scheme below.
  893. */
  894. if (!load_n_store) {
  895. /*
  896. * Simple store: ra != rb, no need for scratch register.
  897. * Just store and rotate to right bytewise.
  898. */
  899. #ifdef __BIG_ENDIAN
  900. frag.insn[n++] =
  901. jit_x0_addi(ra, ra, load_store_size - 1) |
  902. jit_x1_fnop();
  903. #endif /* __BIG_ENDIAN */
  904. for (k = 0; k < load_store_size; k++) {
  905. /* Store a byte. */
  906. frag.insn[n++] =
  907. jit_x0_rotli(rb, rb, 56) |
  908. jit_x1_st1_add(ra, rb,
  909. UA_FIXUP_ADDR_DELTA);
  910. }
  911. #ifdef __BIG_ENDIAN
  912. frag.insn[n] = jit_x1_addi(ra, ra, 1);
  913. #else
  914. frag.insn[n] = jit_x1_addi(ra, ra,
  915. -1 * load_store_size);
  916. #endif /* __LITTLE_ENDIAN */
  917. if (load_store_size == 8) {
  918. frag.insn[n] |= jit_x0_fnop();
  919. } else if (load_store_size == 4) {
  920. frag.insn[n] |= jit_x0_rotli(rb, rb, 32);
  921. } else { /* = 2 */
  922. frag.insn[n] |= jit_x0_rotli(rb, rb, 16);
  923. }
  924. n++;
  925. if (bundle_2_enable)
  926. frag.insn[n++] = bundle_2;
  927. frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
  928. } else {
  929. if (rd == ra) {
  930. /* Use two clobber registers: clob1/2. */
  931. frag.insn[n++] =
  932. jit_x0_addi(TREG_SP, TREG_SP, -16) |
  933. jit_x1_fnop();
  934. frag.insn[n++] =
  935. jit_x0_addi(clob1, ra, 7) |
  936. jit_x1_st_add(TREG_SP, clob1, -8);
  937. frag.insn[n++] =
  938. jit_x0_addi(clob2, ra, 0) |
  939. jit_x1_st(TREG_SP, clob2);
  940. frag.insn[n++] =
  941. jit_x0_fnop() |
  942. jit_x1_ldna(rd, ra);
  943. frag.insn[n++] =
  944. jit_x0_fnop() |
  945. jit_x1_ldna(clob1, clob1);
  946. /*
  947. * Note: we must make sure that rd must not
  948. * be sp. Recover clob1/2 from stack.
  949. */
  950. frag.insn[n++] =
  951. jit_x0_dblalign(rd, clob1, clob2) |
  952. jit_x1_ld_add(clob2, TREG_SP, 8);
  953. frag.insn[n++] =
  954. jit_x0_fnop() |
  955. jit_x1_ld_add(clob1, TREG_SP, 16);
  956. } else {
  957. /* Use one clobber register: clob1 only. */
  958. frag.insn[n++] =
  959. jit_x0_addi(TREG_SP, TREG_SP, -16) |
  960. jit_x1_fnop();
  961. frag.insn[n++] =
  962. jit_x0_addi(clob1, ra, 7) |
  963. jit_x1_st(TREG_SP, clob1);
  964. frag.insn[n++] =
  965. jit_x0_fnop() |
  966. jit_x1_ldna(rd, ra);
  967. frag.insn[n++] =
  968. jit_x0_fnop() |
  969. jit_x1_ldna(clob1, clob1);
  970. /*
  971. * Note: we must make sure that rd must not
  972. * be sp. Recover clob1 from stack.
  973. */
  974. frag.insn[n++] =
  975. jit_x0_dblalign(rd, clob1, ra) |
  976. jit_x1_ld_add(clob1, TREG_SP, 16);
  977. }
  978. if (bundle_2_enable)
  979. frag.insn[n++] = bundle_2;
  980. /*
  981. * For non 8-byte load, extract corresponding bytes and
  982. * signed extension.
  983. */
  984. if (load_store_size == 4) {
  985. if (load_store_signed)
  986. frag.insn[n++] =
  987. jit_x0_bfexts(
  988. rd, rd,
  989. UA_FIXUP_BFEXT_START(4),
  990. UA_FIXUP_BFEXT_END(4)) |
  991. jit_x1_fnop();
  992. else
  993. frag.insn[n++] =
  994. jit_x0_bfextu(
  995. rd, rd,
  996. UA_FIXUP_BFEXT_START(4),
  997. UA_FIXUP_BFEXT_END(4)) |
  998. jit_x1_fnop();
  999. } else if (load_store_size == 2) {
  1000. if (load_store_signed)
  1001. frag.insn[n++] =
  1002. jit_x0_bfexts(
  1003. rd, rd,
  1004. UA_FIXUP_BFEXT_START(2),
  1005. UA_FIXUP_BFEXT_END(2)) |
  1006. jit_x1_fnop();
  1007. else
  1008. frag.insn[n++] =
  1009. jit_x0_bfextu(
  1010. rd, rd,
  1011. UA_FIXUP_BFEXT_START(2),
  1012. UA_FIXUP_BFEXT_END(2)) |
  1013. jit_x1_fnop();
  1014. }
  1015. frag.insn[n++] =
  1016. jit_x0_fnop() |
  1017. jit_x1_iret();
  1018. }
  1019. } else if (!load_n_store) {
  1020. /*
  1021. * Generic memory store cases: use 3 clobber registers.
  1022. *
  1023. * Alloc space for saveing clob2,1,3 on user's stack.
  1024. * register clob3 points to where clob2 saved, followed by
  1025. * clob1 and 3 from high to low memory.
  1026. */
  1027. frag.insn[n++] =
  1028. jit_x0_addi(TREG_SP, TREG_SP, -32) |
  1029. jit_x1_fnop();
  1030. frag.insn[n++] =
  1031. jit_x0_addi(clob3, TREG_SP, 16) |
  1032. jit_x1_st_add(TREG_SP, clob3, 8);
  1033. #ifdef __LITTLE_ENDIAN
  1034. frag.insn[n++] =
  1035. jit_x0_addi(clob1, ra, 0) |
  1036. jit_x1_st_add(TREG_SP, clob1, 8);
  1037. #else
  1038. frag.insn[n++] =
  1039. jit_x0_addi(clob1, ra, load_store_size - 1) |
  1040. jit_x1_st_add(TREG_SP, clob1, 8);
  1041. #endif
  1042. if (load_store_size == 8) {
  1043. /*
  1044. * We save one byte a time, not for fast, but compact
  1045. * code. After each store, data source register shift
  1046. * right one byte. unchanged after 8 stores.
  1047. */
  1048. frag.insn[n++] =
  1049. jit_x0_addi(clob2, TREG_ZERO, 7) |
  1050. jit_x1_st_add(TREG_SP, clob2, 16);
  1051. frag.insn[n++] =
  1052. jit_x0_rotli(rb, rb, 56) |
  1053. jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
  1054. frag.insn[n++] =
  1055. jit_x0_addi(clob2, clob2, -1) |
  1056. jit_x1_bnezt(clob2, -1);
  1057. frag.insn[n++] =
  1058. jit_x0_fnop() |
  1059. jit_x1_addi(clob2, y1_br_reg, 0);
  1060. } else if (load_store_size == 4) {
  1061. frag.insn[n++] =
  1062. jit_x0_addi(clob2, TREG_ZERO, 3) |
  1063. jit_x1_st_add(TREG_SP, clob2, 16);
  1064. frag.insn[n++] =
  1065. jit_x0_rotli(rb, rb, 56) |
  1066. jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
  1067. frag.insn[n++] =
  1068. jit_x0_addi(clob2, clob2, -1) |
  1069. jit_x1_bnezt(clob2, -1);
  1070. /*
  1071. * same as 8-byte case, but need shift another 4
  1072. * byte to recover rb for 4-byte store.
  1073. */
  1074. frag.insn[n++] = jit_x0_rotli(rb, rb, 32) |
  1075. jit_x1_addi(clob2, y1_br_reg, 0);
  1076. } else { /* =2 */
  1077. frag.insn[n++] =
  1078. jit_x0_addi(clob2, rb, 0) |
  1079. jit_x1_st_add(TREG_SP, clob2, 16);
  1080. for (k = 0; k < 2; k++) {
  1081. frag.insn[n++] =
  1082. jit_x0_shrui(rb, rb, 8) |
  1083. jit_x1_st1_add(clob1, rb,
  1084. UA_FIXUP_ADDR_DELTA);
  1085. }
  1086. frag.insn[n++] =
  1087. jit_x0_addi(rb, clob2, 0) |
  1088. jit_x1_addi(clob2, y1_br_reg, 0);
  1089. }
  1090. if (bundle_2_enable)
  1091. frag.insn[n++] = bundle_2;
  1092. if (y1_lr) {
  1093. frag.insn[n++] =
  1094. jit_x0_fnop() |
  1095. jit_x1_mfspr(y1_lr_reg,
  1096. SPR_EX_CONTEXT_0_0);
  1097. }
  1098. if (y1_br) {
  1099. frag.insn[n++] =
  1100. jit_x0_fnop() |
  1101. jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
  1102. clob2);
  1103. }
  1104. if (x1_add) {
  1105. frag.insn[n++] =
  1106. jit_x0_addi(ra, ra, x1_add_imm8) |
  1107. jit_x1_ld_add(clob2, clob3, -8);
  1108. } else {
  1109. frag.insn[n++] =
  1110. jit_x0_fnop() |
  1111. jit_x1_ld_add(clob2, clob3, -8);
  1112. }
  1113. frag.insn[n++] =
  1114. jit_x0_fnop() |
  1115. jit_x1_ld_add(clob1, clob3, -8);
  1116. frag.insn[n++] = jit_x0_fnop() | jit_x1_ld(clob3, clob3);
  1117. frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
  1118. } else {
  1119. /*
  1120. * Generic memory load cases.
  1121. *
  1122. * Alloc space for saveing clob1,2,3 on user's stack.
  1123. * register clob3 points to where clob1 saved, followed
  1124. * by clob2 and 3 from high to low memory.
  1125. */
  1126. frag.insn[n++] =
  1127. jit_x0_addi(TREG_SP, TREG_SP, -32) |
  1128. jit_x1_fnop();
  1129. frag.insn[n++] =
  1130. jit_x0_addi(clob3, TREG_SP, 16) |
  1131. jit_x1_st_add(TREG_SP, clob3, 8);
  1132. frag.insn[n++] =
  1133. jit_x0_addi(clob2, ra, 0) |
  1134. jit_x1_st_add(TREG_SP, clob2, 8);
  1135. if (y1_br) {
  1136. frag.insn[n++] =
  1137. jit_x0_addi(clob1, y1_br_reg, 0) |
  1138. jit_x1_st_add(TREG_SP, clob1, 16);
  1139. } else {
  1140. frag.insn[n++] =
  1141. jit_x0_fnop() |
  1142. jit_x1_st_add(TREG_SP, clob1, 16);
  1143. }
  1144. if (bundle_2_enable)
  1145. frag.insn[n++] = bundle_2;
  1146. if (y1_lr) {
  1147. frag.insn[n++] =
  1148. jit_x0_fnop() |
  1149. jit_x1_mfspr(y1_lr_reg,
  1150. SPR_EX_CONTEXT_0_0);
  1151. }
  1152. if (y1_br) {
  1153. frag.insn[n++] =
  1154. jit_x0_fnop() |
  1155. jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
  1156. clob1);
  1157. }
  1158. frag.insn[n++] =
  1159. jit_x0_addi(clob1, clob2, 7) |
  1160. jit_x1_ldna(rd, clob2);
  1161. frag.insn[n++] =
  1162. jit_x0_fnop() |
  1163. jit_x1_ldna(clob1, clob1);
  1164. frag.insn[n++] =
  1165. jit_x0_dblalign(rd, clob1, clob2) |
  1166. jit_x1_ld_add(clob1, clob3, -8);
  1167. if (x1_add) {
  1168. frag.insn[n++] =
  1169. jit_x0_addi(ra, ra, x1_add_imm8) |
  1170. jit_x1_ld_add(clob2, clob3, -8);
  1171. } else {
  1172. frag.insn[n++] =
  1173. jit_x0_fnop() |
  1174. jit_x1_ld_add(clob2, clob3, -8);
  1175. }
  1176. frag.insn[n++] =
  1177. jit_x0_fnop() |
  1178. jit_x1_ld(clob3, clob3);
  1179. if (load_store_size == 4) {
  1180. if (load_store_signed)
  1181. frag.insn[n++] =
  1182. jit_x0_bfexts(
  1183. rd, rd,
  1184. UA_FIXUP_BFEXT_START(4),
  1185. UA_FIXUP_BFEXT_END(4)) |
  1186. jit_x1_fnop();
  1187. else
  1188. frag.insn[n++] =
  1189. jit_x0_bfextu(
  1190. rd, rd,
  1191. UA_FIXUP_BFEXT_START(4),
  1192. UA_FIXUP_BFEXT_END(4)) |
  1193. jit_x1_fnop();
  1194. } else if (load_store_size == 2) {
  1195. if (load_store_signed)
  1196. frag.insn[n++] =
  1197. jit_x0_bfexts(
  1198. rd, rd,
  1199. UA_FIXUP_BFEXT_START(2),
  1200. UA_FIXUP_BFEXT_END(2)) |
  1201. jit_x1_fnop();
  1202. else
  1203. frag.insn[n++] =
  1204. jit_x0_bfextu(
  1205. rd, rd,
  1206. UA_FIXUP_BFEXT_START(2),
  1207. UA_FIXUP_BFEXT_END(2)) |
  1208. jit_x1_fnop();
  1209. }
  1210. frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
  1211. }
  1212. /* Max JIT bundle count is 14. */
  1213. WARN_ON(n > 14);
  1214. if (!unexpected) {
  1215. int status = 0;
  1216. int idx = (regs->pc >> 3) &
  1217. ((1ULL << (PAGE_SHIFT - UNALIGN_JIT_SHIFT)) - 1);
  1218. frag.pc = regs->pc;
  1219. frag.bundle = bundle;
  1220. if (unaligned_printk) {
  1221. pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
  1222. current->comm, current->pid,
  1223. (unsigned long)frag.pc,
  1224. (unsigned long)frag.bundle,
  1225. (int)alias, (int)rd, (int)ra,
  1226. (int)rb, (int)bundle_2_enable,
  1227. (int)y1_lr, (int)y1_br, (int)x1_add);
  1228. for (k = 0; k < n; k += 2)
  1229. pr_info("[%d] %016llx %016llx\n",
  1230. k, (unsigned long long)frag.insn[k],
  1231. (unsigned long long)frag.insn[k+1]);
  1232. }
  1233. /* Swap bundle byte order for big endian sys. */
  1234. #ifdef __BIG_ENDIAN
  1235. frag.bundle = GX_INSN_BSWAP(frag.bundle);
  1236. for (k = 0; k < n; k++)
  1237. frag.insn[k] = GX_INSN_BSWAP(frag.insn[k]);
  1238. #endif /* __BIG_ENDIAN */
  1239. status = copy_to_user((void __user *)&jit_code_area[idx],
  1240. &frag, sizeof(frag));
  1241. if (status) {
  1242. /* Fail to copy JIT into user land. send SIGSEGV. */
  1243. siginfo_t info;
  1244. clear_siginfo(&info);
  1245. info.si_signo = SIGSEGV;
  1246. info.si_code = SEGV_MAPERR;
  1247. info.si_addr = (void __user *)&jit_code_area[idx];
  1248. pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
  1249. current->pid, current->comm,
  1250. (unsigned long long)&jit_code_area[idx]);
  1251. trace_unhandled_signal("segfault in unalign fixup",
  1252. regs,
  1253. (unsigned long)info.si_addr,
  1254. SIGSEGV);
  1255. force_sig_info(info.si_signo, &info, current);
  1256. return;
  1257. }
  1258. /* Do a cheaper increment, not accurate. */
  1259. unaligned_fixup_count++;
  1260. __flush_icache_range((unsigned long)&jit_code_area[idx],
  1261. (unsigned long)&jit_code_area[idx] +
  1262. sizeof(frag));
  1263. /* Setup SPR_EX_CONTEXT_0_0/1 for returning to user program.*/
  1264. __insn_mtspr(SPR_EX_CONTEXT_0_0, regs->pc + 8);
  1265. __insn_mtspr(SPR_EX_CONTEXT_0_1, PL_ICS_EX1(USER_PL, 0));
  1266. /* Modify pc at the start of new JIT. */
  1267. regs->pc = (unsigned long)&jit_code_area[idx].insn[0];
  1268. /* Set ICS in SPR_EX_CONTEXT_K_1. */
  1269. regs->ex1 = PL_ICS_EX1(USER_PL, 1);
  1270. }
  1271. }
  1272. /*
  1273. * C function to generate unalign data JIT. Called from unalign data
  1274. * interrupt handler.
  1275. *
  1276. * First check if unalign fix is disabled or exception did not not come from
  1277. * user space or sp register points to unalign address, if true, generate a
  1278. * SIGBUS. Then map a page into user space as JIT area if it is not mapped
  1279. * yet. Genenerate JIT code by calling jit_bundle_gen(). After that return
  1280. * back to exception handler.
  1281. *
  1282. * The exception handler will "iret" to new generated JIT code after
  1283. * restoring caller saved registers. In theory, the JIT code will perform
  1284. * another "iret" to resume user's program.
  1285. */
  1286. void do_unaligned(struct pt_regs *regs, int vecnum)
  1287. {
  1288. tilegx_bundle_bits __user *pc;
  1289. tilegx_bundle_bits bundle;
  1290. struct thread_info *info = current_thread_info();
  1291. int align_ctl;
  1292. /* Checks the per-process unaligned JIT flags */
  1293. align_ctl = unaligned_fixup;
  1294. switch (task_thread_info(current)->align_ctl) {
  1295. case PR_UNALIGN_NOPRINT:
  1296. align_ctl = 1;
  1297. break;
  1298. case PR_UNALIGN_SIGBUS:
  1299. align_ctl = 0;
  1300. break;
  1301. }
  1302. /* Enable iterrupt in order to access user land. */
  1303. local_irq_enable();
  1304. /*
  1305. * The fault came from kernel space. Two choices:
  1306. * (a) unaligned_fixup < 1, we will first call get/put_user fixup
  1307. * to return -EFAULT. If no fixup, simply panic the kernel.
  1308. * (b) unaligned_fixup >=1, we will try to fix the unaligned access
  1309. * if it was triggered by get_user/put_user() macros. Panic the
  1310. * kernel if it is not fixable.
  1311. */
  1312. if (EX1_PL(regs->ex1) != USER_PL) {
  1313. if (align_ctl < 1) {
  1314. unaligned_fixup_count++;
  1315. /* If exception came from kernel, try fix it up. */
  1316. if (fixup_exception(regs)) {
  1317. if (unaligned_printk)
  1318. pr_info("Unalign fixup: %d %llx @%llx\n",
  1319. (int)unaligned_fixup,
  1320. (unsigned long long)regs->ex1,
  1321. (unsigned long long)regs->pc);
  1322. } else {
  1323. /* Not fixable. Go panic. */
  1324. panic("Unalign exception in Kernel. pc=%lx",
  1325. regs->pc);
  1326. }
  1327. } else {
  1328. /*
  1329. * Try to fix the exception. If we can't, panic the
  1330. * kernel.
  1331. */
  1332. bundle = GX_INSN_BSWAP(
  1333. *((tilegx_bundle_bits *)(regs->pc)));
  1334. jit_bundle_gen(regs, bundle, align_ctl);
  1335. }
  1336. return;
  1337. }
  1338. /*
  1339. * Fault came from user with ICS or stack is not aligned.
  1340. * If so, we will trigger SIGBUS.
  1341. */
  1342. if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
  1343. siginfo_t info;
  1344. clear_siginfo(&info);
  1345. info.si_signo = SIGBUS;
  1346. info.si_code = BUS_ADRALN;
  1347. info.si_addr = (unsigned char __user *)0;
  1348. if (unaligned_printk)
  1349. pr_info("Unalign fixup: %d %llx @%llx\n",
  1350. (int)unaligned_fixup,
  1351. (unsigned long long)regs->ex1,
  1352. (unsigned long long)regs->pc);
  1353. unaligned_fixup_count++;
  1354. trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
  1355. force_sig_info(info.si_signo, &info, current);
  1356. return;
  1357. }
  1358. /* Read the bundle caused the exception! */
  1359. pc = (tilegx_bundle_bits __user *)(regs->pc);
  1360. if (get_user(bundle, pc) != 0) {
  1361. /* Probably never be here since pc is valid user address.*/
  1362. siginfo_t info;
  1363. clear_siginfo(&info);
  1364. info.si_signo = SIGSEGV;
  1365. info.si_code = SEGV_MAPERR;
  1366. info.si_addr = (void __user *)pc;
  1367. pr_err("Couldn't read instruction at %p trying to step\n", pc);
  1368. trace_unhandled_signal("segfault in unalign fixup", regs,
  1369. (unsigned long)info.si_addr, SIGSEGV);
  1370. force_sig_info(info.si_signo, &info, current);
  1371. return;
  1372. }
  1373. if (!info->unalign_jit_base) {
  1374. void __user *user_page;
  1375. /*
  1376. * Allocate a page in userland.
  1377. * For 64-bit processes we try to place the mapping far
  1378. * from anything else that might be going on (specifically
  1379. * 64 GB below the top of the user address space). If it
  1380. * happens not to be possible to put it there, it's OK;
  1381. * the kernel will choose another location and we'll
  1382. * remember it for later.
  1383. */
  1384. if (is_compat_task())
  1385. user_page = NULL;
  1386. else
  1387. user_page = (void __user *)(TASK_SIZE - (1UL << 36)) +
  1388. (current->pid << PAGE_SHIFT);
  1389. user_page = (void __user *) vm_mmap(NULL,
  1390. (unsigned long)user_page,
  1391. PAGE_SIZE,
  1392. PROT_EXEC | PROT_READ |
  1393. PROT_WRITE,
  1394. #ifdef CONFIG_HOMECACHE
  1395. MAP_CACHE_HOME_TASK |
  1396. #endif
  1397. MAP_PRIVATE |
  1398. MAP_ANONYMOUS,
  1399. 0);
  1400. if (IS_ERR((void __force *)user_page)) {
  1401. pr_err("Out of kernel pages trying do_mmap\n");
  1402. return;
  1403. }
  1404. /* Save the address in the thread_info struct */
  1405. info->unalign_jit_base = user_page;
  1406. if (unaligned_printk)
  1407. pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
  1408. raw_smp_processor_id(), current->pid,
  1409. (unsigned long long)user_page);
  1410. }
  1411. /* Generate unalign JIT */
  1412. jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
  1413. }
  1414. #endif /* __tilegx__ */