unaligned.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600
  1. /*
  2. * Copyright 2013 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * A code-rewriter that handles unaligned exception.
  15. */
  16. #include <linux/smp.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/slab.h>
  19. #include <linux/thread_info.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/mman.h>
  22. #include <linux/types.h>
  23. #include <linux/err.h>
  24. #include <linux/module.h>
  25. #include <linux/compat.h>
  26. #include <linux/prctl.h>
  27. #include <linux/context_tracking.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/traps.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/unaligned.h>
  32. #include <arch/abi.h>
  33. #include <arch/spr_def.h>
  34. #include <arch/opcode.h>
  35. /*
  36. * This file handles unaligned exception for tile-Gx. The tilepro's unaligned
  37. * exception is supported out of single_step.c
  38. */
  39. int unaligned_printk;
  40. static int __init setup_unaligned_printk(char *str)
  41. {
  42. long val;
  43. if (kstrtol(str, 0, &val) != 0)
  44. return 0;
  45. unaligned_printk = val;
  46. pr_info("Printk for each unaligned data accesses is %s\n",
  47. unaligned_printk ? "enabled" : "disabled");
  48. return 1;
  49. }
  50. __setup("unaligned_printk=", setup_unaligned_printk);
  51. unsigned int unaligned_fixup_count;
  52. #ifdef __tilegx__
  53. /*
  54. * Unalign data jit fixup code fragement. Reserved space is 128 bytes.
  55. * The 1st 64-bit word saves fault PC address, 2nd word is the fault
  56. * instruction bundle followed by 14 JIT bundles.
  57. */
  58. struct unaligned_jit_fragment {
  59. unsigned long pc;
  60. tilegx_bundle_bits bundle;
  61. tilegx_bundle_bits insn[14];
  62. };
  63. /*
  64. * Check if a nop or fnop at bundle's pipeline X0.
  65. */
  66. static bool is_bundle_x0_nop(tilegx_bundle_bits bundle)
  67. {
  68. return (((get_UnaryOpcodeExtension_X0(bundle) ==
  69. NOP_UNARY_OPCODE_X0) &&
  70. (get_RRROpcodeExtension_X0(bundle) ==
  71. UNARY_RRR_0_OPCODE_X0) &&
  72. (get_Opcode_X0(bundle) ==
  73. RRR_0_OPCODE_X0)) ||
  74. ((get_UnaryOpcodeExtension_X0(bundle) ==
  75. FNOP_UNARY_OPCODE_X0) &&
  76. (get_RRROpcodeExtension_X0(bundle) ==
  77. UNARY_RRR_0_OPCODE_X0) &&
  78. (get_Opcode_X0(bundle) ==
  79. RRR_0_OPCODE_X0)));
  80. }
  81. /*
  82. * Check if nop or fnop at bundle's pipeline X1.
  83. */
  84. static bool is_bundle_x1_nop(tilegx_bundle_bits bundle)
  85. {
  86. return (((get_UnaryOpcodeExtension_X1(bundle) ==
  87. NOP_UNARY_OPCODE_X1) &&
  88. (get_RRROpcodeExtension_X1(bundle) ==
  89. UNARY_RRR_0_OPCODE_X1) &&
  90. (get_Opcode_X1(bundle) ==
  91. RRR_0_OPCODE_X1)) ||
  92. ((get_UnaryOpcodeExtension_X1(bundle) ==
  93. FNOP_UNARY_OPCODE_X1) &&
  94. (get_RRROpcodeExtension_X1(bundle) ==
  95. UNARY_RRR_0_OPCODE_X1) &&
  96. (get_Opcode_X1(bundle) ==
  97. RRR_0_OPCODE_X1)));
  98. }
  99. /*
  100. * Check if nop or fnop at bundle's Y0 pipeline.
  101. */
  102. static bool is_bundle_y0_nop(tilegx_bundle_bits bundle)
  103. {
  104. return (((get_UnaryOpcodeExtension_Y0(bundle) ==
  105. NOP_UNARY_OPCODE_Y0) &&
  106. (get_RRROpcodeExtension_Y0(bundle) ==
  107. UNARY_RRR_1_OPCODE_Y0) &&
  108. (get_Opcode_Y0(bundle) ==
  109. RRR_1_OPCODE_Y0)) ||
  110. ((get_UnaryOpcodeExtension_Y0(bundle) ==
  111. FNOP_UNARY_OPCODE_Y0) &&
  112. (get_RRROpcodeExtension_Y0(bundle) ==
  113. UNARY_RRR_1_OPCODE_Y0) &&
  114. (get_Opcode_Y0(bundle) ==
  115. RRR_1_OPCODE_Y0)));
  116. }
  117. /*
  118. * Check if nop or fnop at bundle's pipeline Y1.
  119. */
  120. static bool is_bundle_y1_nop(tilegx_bundle_bits bundle)
  121. {
  122. return (((get_UnaryOpcodeExtension_Y1(bundle) ==
  123. NOP_UNARY_OPCODE_Y1) &&
  124. (get_RRROpcodeExtension_Y1(bundle) ==
  125. UNARY_RRR_1_OPCODE_Y1) &&
  126. (get_Opcode_Y1(bundle) ==
  127. RRR_1_OPCODE_Y1)) ||
  128. ((get_UnaryOpcodeExtension_Y1(bundle) ==
  129. FNOP_UNARY_OPCODE_Y1) &&
  130. (get_RRROpcodeExtension_Y1(bundle) ==
  131. UNARY_RRR_1_OPCODE_Y1) &&
  132. (get_Opcode_Y1(bundle) ==
  133. RRR_1_OPCODE_Y1)));
  134. }
  135. /*
  136. * Test if a bundle's y0 and y1 pipelines are both nop or fnop.
  137. */
  138. static bool is_y0_y1_nop(tilegx_bundle_bits bundle)
  139. {
  140. return is_bundle_y0_nop(bundle) && is_bundle_y1_nop(bundle);
  141. }
  142. /*
  143. * Test if a bundle's x0 and x1 pipelines are both nop or fnop.
  144. */
  145. static bool is_x0_x1_nop(tilegx_bundle_bits bundle)
  146. {
  147. return is_bundle_x0_nop(bundle) && is_bundle_x1_nop(bundle);
  148. }
  149. /*
  150. * Find the destination, source registers of fault unalign access instruction
  151. * at X1 or Y2. Also, allocate up to 3 scratch registers clob1, clob2 and
  152. * clob3, which are guaranteed different from any register used in the fault
  153. * bundle. r_alias is used to return if the other instructions other than the
  154. * unalign load/store shares same register with ra, rb and rd.
  155. */
  156. static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,
  157. uint64_t *rb, uint64_t *clob1, uint64_t *clob2,
  158. uint64_t *clob3, bool *r_alias)
  159. {
  160. int i;
  161. uint64_t reg;
  162. uint64_t reg_map = 0, alias_reg_map = 0, map;
  163. bool alias = false;
  164. /*
  165. * Parse fault bundle, find potential used registers and mark
  166. * corresponding bits in reg_map and alias_map. These 2 bit maps
  167. * are used to find the scratch registers and determine if there
  168. * is register alais.
  169. */
  170. if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* Y Mode Bundle. */
  171. reg = get_SrcA_Y2(bundle);
  172. reg_map |= 1ULL << reg;
  173. *ra = reg;
  174. reg = get_SrcBDest_Y2(bundle);
  175. reg_map |= 1ULL << reg;
  176. if (rd) {
  177. /* Load. */
  178. *rd = reg;
  179. alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
  180. } else {
  181. /* Store. */
  182. *rb = reg;
  183. alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
  184. }
  185. if (!is_bundle_y1_nop(bundle)) {
  186. reg = get_SrcA_Y1(bundle);
  187. reg_map |= (1ULL << reg);
  188. map = (1ULL << reg);
  189. reg = get_SrcB_Y1(bundle);
  190. reg_map |= (1ULL << reg);
  191. map |= (1ULL << reg);
  192. reg = get_Dest_Y1(bundle);
  193. reg_map |= (1ULL << reg);
  194. map |= (1ULL << reg);
  195. if (map & alias_reg_map)
  196. alias = true;
  197. }
  198. if (!is_bundle_y0_nop(bundle)) {
  199. reg = get_SrcA_Y0(bundle);
  200. reg_map |= (1ULL << reg);
  201. map = (1ULL << reg);
  202. reg = get_SrcB_Y0(bundle);
  203. reg_map |= (1ULL << reg);
  204. map |= (1ULL << reg);
  205. reg = get_Dest_Y0(bundle);
  206. reg_map |= (1ULL << reg);
  207. map |= (1ULL << reg);
  208. if (map & alias_reg_map)
  209. alias = true;
  210. }
  211. } else { /* X Mode Bundle. */
  212. reg = get_SrcA_X1(bundle);
  213. reg_map |= (1ULL << reg);
  214. *ra = reg;
  215. if (rd) {
  216. /* Load. */
  217. reg = get_Dest_X1(bundle);
  218. reg_map |= (1ULL << reg);
  219. *rd = reg;
  220. alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
  221. } else {
  222. /* Store. */
  223. reg = get_SrcB_X1(bundle);
  224. reg_map |= (1ULL << reg);
  225. *rb = reg;
  226. alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
  227. }
  228. if (!is_bundle_x0_nop(bundle)) {
  229. reg = get_SrcA_X0(bundle);
  230. reg_map |= (1ULL << reg);
  231. map = (1ULL << reg);
  232. reg = get_SrcB_X0(bundle);
  233. reg_map |= (1ULL << reg);
  234. map |= (1ULL << reg);
  235. reg = get_Dest_X0(bundle);
  236. reg_map |= (1ULL << reg);
  237. map |= (1ULL << reg);
  238. if (map & alias_reg_map)
  239. alias = true;
  240. }
  241. }
  242. /*
  243. * "alias" indicates if the unalign access registers have collision
  244. * with others in the same bundle. We jsut simply test all register
  245. * operands case (RRR), ignored the case with immidate. If a bundle
  246. * has no register alias, we may do fixup in a simple or fast manner.
  247. * So if an immidata field happens to hit with a register, we may end
  248. * up fall back to the generic handling.
  249. */
  250. *r_alias = alias;
  251. /* Flip bits on reg_map. */
  252. reg_map ^= -1ULL;
  253. /* Scan reg_map lower 54(TREG_SP) bits to find 3 set bits. */
  254. for (i = 0; i < TREG_SP; i++) {
  255. if (reg_map & (0x1ULL << i)) {
  256. if (*clob1 == -1) {
  257. *clob1 = i;
  258. } else if (*clob2 == -1) {
  259. *clob2 = i;
  260. } else if (*clob3 == -1) {
  261. *clob3 = i;
  262. return;
  263. }
  264. }
  265. }
  266. }
  267. /*
  268. * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them
  269. * is unexpected.
  270. */
  271. static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb,
  272. uint64_t clob1, uint64_t clob2, uint64_t clob3)
  273. {
  274. bool unexpected = false;
  275. if ((ra >= 56) && (ra != TREG_ZERO))
  276. unexpected = true;
  277. if ((clob1 >= 56) || (clob2 >= 56) || (clob3 >= 56))
  278. unexpected = true;
  279. if (rd != -1) {
  280. if ((rd >= 56) && (rd != TREG_ZERO))
  281. unexpected = true;
  282. } else {
  283. if ((rb >= 56) && (rb != TREG_ZERO))
  284. unexpected = true;
  285. }
  286. return unexpected;
  287. }
  288. #define GX_INSN_X0_MASK ((1ULL << 31) - 1)
  289. #define GX_INSN_X1_MASK (((1ULL << 31) - 1) << 31)
  290. #define GX_INSN_Y0_MASK ((0xFULL << 27) | (0xFFFFFULL))
  291. #define GX_INSN_Y1_MASK (GX_INSN_Y0_MASK << 31)
  292. #define GX_INSN_Y2_MASK ((0x7FULL << 51) | (0x7FULL << 20))
  293. #ifdef __LITTLE_ENDIAN
  294. #define GX_INSN_BSWAP(_bundle_) (_bundle_)
  295. #else
  296. #define GX_INSN_BSWAP(_bundle_) swab64(_bundle_)
  297. #endif /* __LITTLE_ENDIAN */
  298. /*
  299. * __JIT_CODE(.) creates template bundles in .rodata.unalign_data section.
  300. * The corresponding static function jix_x#_###(.) generates partial or
  301. * whole bundle based on the template and given arguments.
  302. */
  303. #define __JIT_CODE(_X_) \
  304. asm (".pushsection .rodata.unalign_data, \"a\"\n" \
  305. _X_"\n" \
  306. ".popsection\n")
  307. __JIT_CODE("__unalign_jit_x1_mtspr: {mtspr 0, r0}");
  308. static tilegx_bundle_bits jit_x1_mtspr(int spr, int reg)
  309. {
  310. extern tilegx_bundle_bits __unalign_jit_x1_mtspr;
  311. return (GX_INSN_BSWAP(__unalign_jit_x1_mtspr) & GX_INSN_X1_MASK) |
  312. create_MT_Imm14_X1(spr) | create_SrcA_X1(reg);
  313. }
  314. __JIT_CODE("__unalign_jit_x1_mfspr: {mfspr r0, 0}");
  315. static tilegx_bundle_bits jit_x1_mfspr(int reg, int spr)
  316. {
  317. extern tilegx_bundle_bits __unalign_jit_x1_mfspr;
  318. return (GX_INSN_BSWAP(__unalign_jit_x1_mfspr) & GX_INSN_X1_MASK) |
  319. create_MF_Imm14_X1(spr) | create_Dest_X1(reg);
  320. }
  321. __JIT_CODE("__unalign_jit_x0_addi: {addi r0, r0, 0; iret}");
  322. static tilegx_bundle_bits jit_x0_addi(int rd, int ra, int imm8)
  323. {
  324. extern tilegx_bundle_bits __unalign_jit_x0_addi;
  325. return (GX_INSN_BSWAP(__unalign_jit_x0_addi) & GX_INSN_X0_MASK) |
  326. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  327. create_Imm8_X0(imm8);
  328. }
  329. __JIT_CODE("__unalign_jit_x1_ldna: {ldna r0, r0}");
  330. static tilegx_bundle_bits jit_x1_ldna(int rd, int ra)
  331. {
  332. extern tilegx_bundle_bits __unalign_jit_x1_ldna;
  333. return (GX_INSN_BSWAP(__unalign_jit_x1_ldna) & GX_INSN_X1_MASK) |
  334. create_Dest_X1(rd) | create_SrcA_X1(ra);
  335. }
  336. __JIT_CODE("__unalign_jit_x0_dblalign: {dblalign r0, r0 ,r0}");
  337. static tilegx_bundle_bits jit_x0_dblalign(int rd, int ra, int rb)
  338. {
  339. extern tilegx_bundle_bits __unalign_jit_x0_dblalign;
  340. return (GX_INSN_BSWAP(__unalign_jit_x0_dblalign) & GX_INSN_X0_MASK) |
  341. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  342. create_SrcB_X0(rb);
  343. }
  344. __JIT_CODE("__unalign_jit_x1_iret: {iret}");
  345. static tilegx_bundle_bits jit_x1_iret(void)
  346. {
  347. extern tilegx_bundle_bits __unalign_jit_x1_iret;
  348. return GX_INSN_BSWAP(__unalign_jit_x1_iret) & GX_INSN_X1_MASK;
  349. }
  350. __JIT_CODE("__unalign_jit_x01_fnop: {fnop;fnop}");
  351. static tilegx_bundle_bits jit_x0_fnop(void)
  352. {
  353. extern tilegx_bundle_bits __unalign_jit_x01_fnop;
  354. return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X0_MASK;
  355. }
  356. static tilegx_bundle_bits jit_x1_fnop(void)
  357. {
  358. extern tilegx_bundle_bits __unalign_jit_x01_fnop;
  359. return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X1_MASK;
  360. }
  361. __JIT_CODE("__unalign_jit_y2_dummy: {fnop; fnop; ld zero, sp}");
  362. static tilegx_bundle_bits jit_y2_dummy(void)
  363. {
  364. extern tilegx_bundle_bits __unalign_jit_y2_dummy;
  365. return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y2_MASK;
  366. }
  367. static tilegx_bundle_bits jit_y1_fnop(void)
  368. {
  369. extern tilegx_bundle_bits __unalign_jit_y2_dummy;
  370. return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y1_MASK;
  371. }
  372. __JIT_CODE("__unalign_jit_x1_st1_add: {st1_add r1, r0, 0}");
  373. static tilegx_bundle_bits jit_x1_st1_add(int ra, int rb, int imm8)
  374. {
  375. extern tilegx_bundle_bits __unalign_jit_x1_st1_add;
  376. return (GX_INSN_BSWAP(__unalign_jit_x1_st1_add) &
  377. (~create_SrcA_X1(-1)) &
  378. GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
  379. create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
  380. }
  381. __JIT_CODE("__unalign_jit_x1_st: {crc32_8 r1, r0, r0; st r0, r0}");
  382. static tilegx_bundle_bits jit_x1_st(int ra, int rb)
  383. {
  384. extern tilegx_bundle_bits __unalign_jit_x1_st;
  385. return (GX_INSN_BSWAP(__unalign_jit_x1_st) & GX_INSN_X1_MASK) |
  386. create_SrcA_X1(ra) | create_SrcB_X1(rb);
  387. }
  388. __JIT_CODE("__unalign_jit_x1_st_add: {st_add r1, r0, 0}");
  389. static tilegx_bundle_bits jit_x1_st_add(int ra, int rb, int imm8)
  390. {
  391. extern tilegx_bundle_bits __unalign_jit_x1_st_add;
  392. return (GX_INSN_BSWAP(__unalign_jit_x1_st_add) &
  393. (~create_SrcA_X1(-1)) &
  394. GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
  395. create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
  396. }
  397. __JIT_CODE("__unalign_jit_x1_ld: {crc32_8 r1, r0, r0; ld r0, r0}");
  398. static tilegx_bundle_bits jit_x1_ld(int rd, int ra)
  399. {
  400. extern tilegx_bundle_bits __unalign_jit_x1_ld;
  401. return (GX_INSN_BSWAP(__unalign_jit_x1_ld) & GX_INSN_X1_MASK) |
  402. create_Dest_X1(rd) | create_SrcA_X1(ra);
  403. }
  404. __JIT_CODE("__unalign_jit_x1_ld_add: {ld_add r1, r0, 0}");
  405. static tilegx_bundle_bits jit_x1_ld_add(int rd, int ra, int imm8)
  406. {
  407. extern tilegx_bundle_bits __unalign_jit_x1_ld_add;
  408. return (GX_INSN_BSWAP(__unalign_jit_x1_ld_add) &
  409. (~create_Dest_X1(-1)) &
  410. GX_INSN_X1_MASK) | create_Dest_X1(rd) |
  411. create_SrcA_X1(ra) | create_Imm8_X1(imm8);
  412. }
  413. __JIT_CODE("__unalign_jit_x0_bfexts: {bfexts r0, r0, 0, 0}");
  414. static tilegx_bundle_bits jit_x0_bfexts(int rd, int ra, int bfs, int bfe)
  415. {
  416. extern tilegx_bundle_bits __unalign_jit_x0_bfexts;
  417. return (GX_INSN_BSWAP(__unalign_jit_x0_bfexts) &
  418. GX_INSN_X0_MASK) |
  419. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  420. create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
  421. }
  422. __JIT_CODE("__unalign_jit_x0_bfextu: {bfextu r0, r0, 0, 0}");
  423. static tilegx_bundle_bits jit_x0_bfextu(int rd, int ra, int bfs, int bfe)
  424. {
  425. extern tilegx_bundle_bits __unalign_jit_x0_bfextu;
  426. return (GX_INSN_BSWAP(__unalign_jit_x0_bfextu) &
  427. GX_INSN_X0_MASK) |
  428. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  429. create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
  430. }
  431. __JIT_CODE("__unalign_jit_x1_addi: {bfextu r1, r1, 0, 0; addi r0, r0, 0}");
  432. static tilegx_bundle_bits jit_x1_addi(int rd, int ra, int imm8)
  433. {
  434. extern tilegx_bundle_bits __unalign_jit_x1_addi;
  435. return (GX_INSN_BSWAP(__unalign_jit_x1_addi) & GX_INSN_X1_MASK) |
  436. create_Dest_X1(rd) | create_SrcA_X1(ra) |
  437. create_Imm8_X1(imm8);
  438. }
  439. __JIT_CODE("__unalign_jit_x0_shrui: {shrui r0, r0, 0; iret}");
  440. static tilegx_bundle_bits jit_x0_shrui(int rd, int ra, int imm6)
  441. {
  442. extern tilegx_bundle_bits __unalign_jit_x0_shrui;
  443. return (GX_INSN_BSWAP(__unalign_jit_x0_shrui) &
  444. GX_INSN_X0_MASK) |
  445. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  446. create_ShAmt_X0(imm6);
  447. }
  448. __JIT_CODE("__unalign_jit_x0_rotli: {rotli r0, r0, 0; iret}");
  449. static tilegx_bundle_bits jit_x0_rotli(int rd, int ra, int imm6)
  450. {
  451. extern tilegx_bundle_bits __unalign_jit_x0_rotli;
  452. return (GX_INSN_BSWAP(__unalign_jit_x0_rotli) &
  453. GX_INSN_X0_MASK) |
  454. create_Dest_X0(rd) | create_SrcA_X0(ra) |
  455. create_ShAmt_X0(imm6);
  456. }
  457. __JIT_CODE("__unalign_jit_x1_bnezt: {bnezt r0, __unalign_jit_x1_bnezt}");
  458. static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff)
  459. {
  460. extern tilegx_bundle_bits __unalign_jit_x1_bnezt;
  461. return (GX_INSN_BSWAP(__unalign_jit_x1_bnezt) &
  462. GX_INSN_X1_MASK) |
  463. create_SrcA_X1(ra) | create_BrOff_X1(broff);
  464. }
  465. #undef __JIT_CODE
  466. /*
  467. * This function generates unalign fixup JIT.
  468. *
  469. * We first find unalign load/store instruction's destination, source
  470. * registers: ra, rb and rd. and 3 scratch registers by calling
  471. * find_regs(...). 3 scratch clobbers should not alias with any register
  472. * used in the fault bundle. Then analyze the fault bundle to determine
  473. * if it's a load or store, operand width, branch or address increment etc.
  474. * At last generated JIT is copied into JIT code area in user space.
  475. */
  476. static
  477. void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
  478. int align_ctl)
  479. {
  480. struct thread_info *info = current_thread_info();
  481. struct unaligned_jit_fragment frag;
  482. struct unaligned_jit_fragment *jit_code_area;
  483. tilegx_bundle_bits bundle_2 = 0;
  484. /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
  485. bool bundle_2_enable = true;
  486. uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;
  487. /*
  488. * Indicate if the unalign access
  489. * instruction's registers hit with
  490. * others in the same bundle.
  491. */
  492. bool alias = false;
  493. bool load_n_store = true;
  494. bool load_store_signed = false;
  495. unsigned int load_store_size = 8;
  496. bool y1_br = false; /* True, for a branch in same bundle at Y1.*/
  497. int y1_br_reg = 0;
  498. /* True for link operation. i.e. jalr or lnk at Y1 */
  499. bool y1_lr = false;
  500. int y1_lr_reg = 0;
  501. bool x1_add = false;/* True, for load/store ADD instruction at X1*/
  502. int x1_add_imm8 = 0;
  503. bool unexpected = false;
  504. int n = 0, k;
  505. jit_code_area =
  506. (struct unaligned_jit_fragment *)(info->unalign_jit_base);
  507. memset((void *)&frag, 0, sizeof(frag));
  508. /* 0: X mode, Otherwise: Y mode. */
  509. if (bundle & TILEGX_BUNDLE_MODE_MASK) {
  510. unsigned int mod, opcode;
  511. if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
  512. get_RRROpcodeExtension_Y1(bundle) ==
  513. UNARY_RRR_1_OPCODE_Y1) {
  514. opcode = get_UnaryOpcodeExtension_Y1(bundle);
  515. /*
  516. * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
  517. * pipeline.
  518. */
  519. switch (opcode) {
  520. case JALR_UNARY_OPCODE_Y1:
  521. case JALRP_UNARY_OPCODE_Y1:
  522. y1_lr = true;
  523. y1_lr_reg = 55; /* Link register. */
  524. /* FALLTHROUGH */
  525. case JR_UNARY_OPCODE_Y1:
  526. case JRP_UNARY_OPCODE_Y1:
  527. y1_br = true;
  528. y1_br_reg = get_SrcA_Y1(bundle);
  529. break;
  530. case LNK_UNARY_OPCODE_Y1:
  531. /* "lnk" at Y1 pipeline. */
  532. y1_lr = true;
  533. y1_lr_reg = get_Dest_Y1(bundle);
  534. break;
  535. }
  536. }
  537. opcode = get_Opcode_Y2(bundle);
  538. mod = get_Mode(bundle);
  539. /*
  540. * bundle_2 is bundle after making Y2 as a dummy operation
  541. * - ld zero, sp
  542. */
  543. bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();
  544. /* Make Y1 as fnop if Y1 is a branch or lnk operation. */
  545. if (y1_br || y1_lr) {
  546. bundle_2 &= ~(GX_INSN_Y1_MASK);
  547. bundle_2 |= jit_y1_fnop();
  548. }
  549. if (is_y0_y1_nop(bundle_2))
  550. bundle_2_enable = false;
  551. if (mod == MODE_OPCODE_YC2) {
  552. /* Store. */
  553. load_n_store = false;
  554. load_store_size = 1 << opcode;
  555. load_store_signed = false;
  556. find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
  557. &clob3, &alias);
  558. if (load_store_size > 8)
  559. unexpected = true;
  560. } else {
  561. /* Load. */
  562. load_n_store = true;
  563. if (mod == MODE_OPCODE_YB2) {
  564. switch (opcode) {
  565. case LD_OPCODE_Y2:
  566. load_store_signed = false;
  567. load_store_size = 8;
  568. break;
  569. case LD4S_OPCODE_Y2:
  570. load_store_signed = true;
  571. load_store_size = 4;
  572. break;
  573. case LD4U_OPCODE_Y2:
  574. load_store_signed = false;
  575. load_store_size = 4;
  576. break;
  577. default:
  578. unexpected = true;
  579. }
  580. } else if (mod == MODE_OPCODE_YA2) {
  581. if (opcode == LD2S_OPCODE_Y2) {
  582. load_store_signed = true;
  583. load_store_size = 2;
  584. } else if (opcode == LD2U_OPCODE_Y2) {
  585. load_store_signed = false;
  586. load_store_size = 2;
  587. } else
  588. unexpected = true;
  589. } else
  590. unexpected = true;
  591. find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
  592. &clob3, &alias);
  593. }
  594. } else {
  595. unsigned int opcode;
  596. /* bundle_2 is bundle after making X1 as "fnop". */
  597. bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();
  598. if (is_x0_x1_nop(bundle_2))
  599. bundle_2_enable = false;
  600. if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
  601. opcode = get_UnaryOpcodeExtension_X1(bundle);
  602. if (get_RRROpcodeExtension_X1(bundle) ==
  603. UNARY_RRR_0_OPCODE_X1) {
  604. load_n_store = true;
  605. find_regs(bundle, &rd, &ra, &rb, &clob1,
  606. &clob2, &clob3, &alias);
  607. switch (opcode) {
  608. case LD_UNARY_OPCODE_X1:
  609. load_store_signed = false;
  610. load_store_size = 8;
  611. break;
  612. case LD4S_UNARY_OPCODE_X1:
  613. load_store_signed = true;
  614. /* FALLTHROUGH */
  615. case LD4U_UNARY_OPCODE_X1:
  616. load_store_size = 4;
  617. break;
  618. case LD2S_UNARY_OPCODE_X1:
  619. load_store_signed = true;
  620. /* FALLTHROUGH */
  621. case LD2U_UNARY_OPCODE_X1:
  622. load_store_size = 2;
  623. break;
  624. default:
  625. unexpected = true;
  626. }
  627. } else {
  628. load_n_store = false;
  629. load_store_signed = false;
  630. find_regs(bundle, 0, &ra, &rb,
  631. &clob1, &clob2, &clob3,
  632. &alias);
  633. opcode = get_RRROpcodeExtension_X1(bundle);
  634. switch (opcode) {
  635. case ST_RRR_0_OPCODE_X1:
  636. load_store_size = 8;
  637. break;
  638. case ST4_RRR_0_OPCODE_X1:
  639. load_store_size = 4;
  640. break;
  641. case ST2_RRR_0_OPCODE_X1:
  642. load_store_size = 2;
  643. break;
  644. default:
  645. unexpected = true;
  646. }
  647. }
  648. } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
  649. load_n_store = true;
  650. opcode = get_Imm8OpcodeExtension_X1(bundle);
  651. switch (opcode) {
  652. case LD_ADD_IMM8_OPCODE_X1:
  653. load_store_size = 8;
  654. break;
  655. case LD4S_ADD_IMM8_OPCODE_X1:
  656. load_store_signed = true;
  657. /* FALLTHROUGH */
  658. case LD4U_ADD_IMM8_OPCODE_X1:
  659. load_store_size = 4;
  660. break;
  661. case LD2S_ADD_IMM8_OPCODE_X1:
  662. load_store_signed = true;
  663. /* FALLTHROUGH */
  664. case LD2U_ADD_IMM8_OPCODE_X1:
  665. load_store_size = 2;
  666. break;
  667. case ST_ADD_IMM8_OPCODE_X1:
  668. load_n_store = false;
  669. load_store_size = 8;
  670. break;
  671. case ST4_ADD_IMM8_OPCODE_X1:
  672. load_n_store = false;
  673. load_store_size = 4;
  674. break;
  675. case ST2_ADD_IMM8_OPCODE_X1:
  676. load_n_store = false;
  677. load_store_size = 2;
  678. break;
  679. default:
  680. unexpected = true;
  681. }
  682. if (!unexpected) {
  683. x1_add = true;
  684. if (load_n_store)
  685. x1_add_imm8 = get_Imm8_X1(bundle);
  686. else
  687. x1_add_imm8 = get_Dest_Imm8_X1(bundle);
  688. }
  689. find_regs(bundle, load_n_store ? (&rd) : NULL,
  690. &ra, &rb, &clob1, &clob2, &clob3, &alias);
  691. } else
  692. unexpected = true;
  693. }
  694. /*
  695. * Some sanity check for register numbers extracted from fault bundle.
  696. */
  697. if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
  698. unexpected = true;
  699. /* Give warning if register ra has an aligned address. */
  700. if (!unexpected)
  701. WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));
  702. /*
  703. * Fault came from kernel space, here we only need take care of
  704. * unaligned "get_user/put_user" macros defined in "uaccess.h".
  705. * Basically, we will handle bundle like this:
  706. * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
  707. * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
  708. * For either load or store, byte-wise operation is performed by calling
  709. * get_user() or put_user(). If the macro returns non-zero value,
  710. * set the value to rx, otherwise set zero to rx. Finally make pc point
  711. * to next bundle and return.
  712. */
  713. if (EX1_PL(regs->ex1) != USER_PL) {
  714. unsigned long rx = 0;
  715. unsigned long x = 0, ret = 0;
  716. if (y1_br || y1_lr || x1_add ||
  717. (load_store_signed !=
  718. (load_n_store && load_store_size == 4))) {
  719. /* No branch, link, wrong sign-ext or load/store add. */
  720. unexpected = true;
  721. } else if (!unexpected) {
  722. if (bundle & TILEGX_BUNDLE_MODE_MASK) {
  723. /*
  724. * Fault bundle is Y mode.
  725. * Check if the Y1 and Y0 is the form of
  726. * { movei rx, 0; nop/fnop }, if yes,
  727. * find the rx.
  728. */
  729. if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
  730. && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
  731. (get_Imm8_Y1(bundle) == 0) &&
  732. is_bundle_y0_nop(bundle)) {
  733. rx = get_Dest_Y1(bundle);
  734. } else if ((get_Opcode_Y0(bundle) ==
  735. ADDI_OPCODE_Y0) &&
  736. (get_SrcA_Y0(bundle) == TREG_ZERO) &&
  737. (get_Imm8_Y0(bundle) == 0) &&
  738. is_bundle_y1_nop(bundle)) {
  739. rx = get_Dest_Y0(bundle);
  740. } else {
  741. unexpected = true;
  742. }
  743. } else {
  744. /*
  745. * Fault bundle is X mode.
  746. * Check if the X0 is 'movei rx, 0',
  747. * if yes, find the rx.
  748. */
  749. if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
  750. && (get_Imm8OpcodeExtension_X0(bundle) ==
  751. ADDI_IMM8_OPCODE_X0) &&
  752. (get_SrcA_X0(bundle) == TREG_ZERO) &&
  753. (get_Imm8_X0(bundle) == 0)) {
  754. rx = get_Dest_X0(bundle);
  755. } else {
  756. unexpected = true;
  757. }
  758. }
  759. /* rx should be less than 56. */
  760. if (!unexpected && (rx >= 56))
  761. unexpected = true;
  762. }
  763. if (!search_exception_tables(regs->pc)) {
  764. /* No fixup in the exception tables for the pc. */
  765. unexpected = true;
  766. }
  767. if (unexpected) {
  768. /* Unexpected unalign kernel fault. */
  769. struct task_struct *tsk = validate_current();
  770. bust_spinlocks(1);
  771. show_regs(regs);
  772. if (unlikely(tsk->pid < 2)) {
  773. panic("Kernel unalign fault running %s!",
  774. tsk->pid ? "init" : "the idle task");
  775. }
  776. #ifdef SUPPORT_DIE
  777. die("Oops", regs);
  778. #endif
  779. bust_spinlocks(1);
  780. do_group_exit(SIGKILL);
  781. } else {
  782. unsigned long i, b = 0;
  783. unsigned char *ptr =
  784. (unsigned char *)regs->regs[ra];
  785. if (load_n_store) {
  786. /* handle get_user(x, ptr) */
  787. for (i = 0; i < load_store_size; i++) {
  788. ret = get_user(b, ptr++);
  789. if (!ret) {
  790. /* Success! update x. */
  791. #ifdef __LITTLE_ENDIAN
  792. x |= (b << (8 * i));
  793. #else
  794. x <<= 8;
  795. x |= b;
  796. #endif /* __LITTLE_ENDIAN */
  797. } else {
  798. x = 0;
  799. break;
  800. }
  801. }
  802. /* Sign-extend 4-byte loads. */
  803. if (load_store_size == 4)
  804. x = (long)(int)x;
  805. /* Set register rd. */
  806. regs->regs[rd] = x;
  807. /* Set register rx. */
  808. regs->regs[rx] = ret;
  809. /* Bump pc. */
  810. regs->pc += 8;
  811. } else {
  812. /* Handle put_user(x, ptr) */
  813. x = regs->regs[rb];
  814. #ifdef __LITTLE_ENDIAN
  815. b = x;
  816. #else
  817. /*
  818. * Swap x in order to store x from low
  819. * to high memory same as the
  820. * little-endian case.
  821. */
  822. switch (load_store_size) {
  823. case 8:
  824. b = swab64(x);
  825. break;
  826. case 4:
  827. b = swab32(x);
  828. break;
  829. case 2:
  830. b = swab16(x);
  831. break;
  832. }
  833. #endif /* __LITTLE_ENDIAN */
  834. for (i = 0; i < load_store_size; i++) {
  835. ret = put_user(b, ptr++);
  836. if (ret)
  837. break;
  838. /* Success! shift 1 byte. */
  839. b >>= 8;
  840. }
  841. /* Set register rx. */
  842. regs->regs[rx] = ret;
  843. /* Bump pc. */
  844. regs->pc += 8;
  845. }
  846. }
  847. unaligned_fixup_count++;
  848. if (unaligned_printk) {
  849. pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
  850. current->comm, current->pid, regs->regs[ra]);
  851. }
  852. /* Done! Return to the exception handler. */
  853. return;
  854. }
  855. if ((align_ctl == 0) || unexpected) {
  856. siginfo_t info = {
  857. .si_signo = SIGBUS,
  858. .si_code = BUS_ADRALN,
  859. .si_addr = (unsigned char __user *)0
  860. };
  861. if (unaligned_printk)
  862. pr_info("Unalign bundle: unexp @%llx, %llx\n",
  863. (unsigned long long)regs->pc,
  864. (unsigned long long)bundle);
  865. if (ra < 56) {
  866. unsigned long uaa = (unsigned long)regs->regs[ra];
  867. /* Set bus Address. */
  868. info.si_addr = (unsigned char __user *)uaa;
  869. }
  870. unaligned_fixup_count++;
  871. trace_unhandled_signal("unaligned fixup trap", regs,
  872. (unsigned long)info.si_addr, SIGBUS);
  873. force_sig_info(info.si_signo, &info, current);
  874. return;
  875. }
  876. #ifdef __LITTLE_ENDIAN
  877. #define UA_FIXUP_ADDR_DELTA 1
  878. #define UA_FIXUP_BFEXT_START(_B_) 0
  879. #define UA_FIXUP_BFEXT_END(_B_) (8 * (_B_) - 1)
  880. #else /* __BIG_ENDIAN */
  881. #define UA_FIXUP_ADDR_DELTA -1
  882. #define UA_FIXUP_BFEXT_START(_B_) (64 - 8 * (_B_))
  883. #define UA_FIXUP_BFEXT_END(_B_) 63
  884. #endif /* __LITTLE_ENDIAN */
  885. if ((ra != rb) && (rd != TREG_SP) && !alias &&
  886. !y1_br && !y1_lr && !x1_add) {
  887. /*
  888. * Simple case: ra != rb and no register alias found,
  889. * and no branch or link. This will be the majority.
  890. * We can do a little better for simplae case than the
  891. * generic scheme below.
  892. */
  893. if (!load_n_store) {
  894. /*
  895. * Simple store: ra != rb, no need for scratch register.
  896. * Just store and rotate to right bytewise.
  897. */
  898. #ifdef __BIG_ENDIAN
  899. frag.insn[n++] =
  900. jit_x0_addi(ra, ra, load_store_size - 1) |
  901. jit_x1_fnop();
  902. #endif /* __BIG_ENDIAN */
  903. for (k = 0; k < load_store_size; k++) {
  904. /* Store a byte. */
  905. frag.insn[n++] =
  906. jit_x0_rotli(rb, rb, 56) |
  907. jit_x1_st1_add(ra, rb,
  908. UA_FIXUP_ADDR_DELTA);
  909. }
  910. #ifdef __BIG_ENDIAN
  911. frag.insn[n] = jit_x1_addi(ra, ra, 1);
  912. #else
  913. frag.insn[n] = jit_x1_addi(ra, ra,
  914. -1 * load_store_size);
  915. #endif /* __LITTLE_ENDIAN */
  916. if (load_store_size == 8) {
  917. frag.insn[n] |= jit_x0_fnop();
  918. } else if (load_store_size == 4) {
  919. frag.insn[n] |= jit_x0_rotli(rb, rb, 32);
  920. } else { /* = 2 */
  921. frag.insn[n] |= jit_x0_rotli(rb, rb, 16);
  922. }
  923. n++;
  924. if (bundle_2_enable)
  925. frag.insn[n++] = bundle_2;
  926. frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
  927. } else {
  928. if (rd == ra) {
  929. /* Use two clobber registers: clob1/2. */
  930. frag.insn[n++] =
  931. jit_x0_addi(TREG_SP, TREG_SP, -16) |
  932. jit_x1_fnop();
  933. frag.insn[n++] =
  934. jit_x0_addi(clob1, ra, 7) |
  935. jit_x1_st_add(TREG_SP, clob1, -8);
  936. frag.insn[n++] =
  937. jit_x0_addi(clob2, ra, 0) |
  938. jit_x1_st(TREG_SP, clob2);
  939. frag.insn[n++] =
  940. jit_x0_fnop() |
  941. jit_x1_ldna(rd, ra);
  942. frag.insn[n++] =
  943. jit_x0_fnop() |
  944. jit_x1_ldna(clob1, clob1);
  945. /*
  946. * Note: we must make sure that rd must not
  947. * be sp. Recover clob1/2 from stack.
  948. */
  949. frag.insn[n++] =
  950. jit_x0_dblalign(rd, clob1, clob2) |
  951. jit_x1_ld_add(clob2, TREG_SP, 8);
  952. frag.insn[n++] =
  953. jit_x0_fnop() |
  954. jit_x1_ld_add(clob1, TREG_SP, 16);
  955. } else {
  956. /* Use one clobber register: clob1 only. */
  957. frag.insn[n++] =
  958. jit_x0_addi(TREG_SP, TREG_SP, -16) |
  959. jit_x1_fnop();
  960. frag.insn[n++] =
  961. jit_x0_addi(clob1, ra, 7) |
  962. jit_x1_st(TREG_SP, clob1);
  963. frag.insn[n++] =
  964. jit_x0_fnop() |
  965. jit_x1_ldna(rd, ra);
  966. frag.insn[n++] =
  967. jit_x0_fnop() |
  968. jit_x1_ldna(clob1, clob1);
  969. /*
  970. * Note: we must make sure that rd must not
  971. * be sp. Recover clob1 from stack.
  972. */
  973. frag.insn[n++] =
  974. jit_x0_dblalign(rd, clob1, ra) |
  975. jit_x1_ld_add(clob1, TREG_SP, 16);
  976. }
  977. if (bundle_2_enable)
  978. frag.insn[n++] = bundle_2;
  979. /*
  980. * For non 8-byte load, extract corresponding bytes and
  981. * signed extension.
  982. */
  983. if (load_store_size == 4) {
  984. if (load_store_signed)
  985. frag.insn[n++] =
  986. jit_x0_bfexts(
  987. rd, rd,
  988. UA_FIXUP_BFEXT_START(4),
  989. UA_FIXUP_BFEXT_END(4)) |
  990. jit_x1_fnop();
  991. else
  992. frag.insn[n++] =
  993. jit_x0_bfextu(
  994. rd, rd,
  995. UA_FIXUP_BFEXT_START(4),
  996. UA_FIXUP_BFEXT_END(4)) |
  997. jit_x1_fnop();
  998. } else if (load_store_size == 2) {
  999. if (load_store_signed)
  1000. frag.insn[n++] =
  1001. jit_x0_bfexts(
  1002. rd, rd,
  1003. UA_FIXUP_BFEXT_START(2),
  1004. UA_FIXUP_BFEXT_END(2)) |
  1005. jit_x1_fnop();
  1006. else
  1007. frag.insn[n++] =
  1008. jit_x0_bfextu(
  1009. rd, rd,
  1010. UA_FIXUP_BFEXT_START(2),
  1011. UA_FIXUP_BFEXT_END(2)) |
  1012. jit_x1_fnop();
  1013. }
  1014. frag.insn[n++] =
  1015. jit_x0_fnop() |
  1016. jit_x1_iret();
  1017. }
  1018. } else if (!load_n_store) {
  1019. /*
  1020. * Generic memory store cases: use 3 clobber registers.
  1021. *
  1022. * Alloc space for saveing clob2,1,3 on user's stack.
  1023. * register clob3 points to where clob2 saved, followed by
  1024. * clob1 and 3 from high to low memory.
  1025. */
  1026. frag.insn[n++] =
  1027. jit_x0_addi(TREG_SP, TREG_SP, -32) |
  1028. jit_x1_fnop();
  1029. frag.insn[n++] =
  1030. jit_x0_addi(clob3, TREG_SP, 16) |
  1031. jit_x1_st_add(TREG_SP, clob3, 8);
  1032. #ifdef __LITTLE_ENDIAN
  1033. frag.insn[n++] =
  1034. jit_x0_addi(clob1, ra, 0) |
  1035. jit_x1_st_add(TREG_SP, clob1, 8);
  1036. #else
  1037. frag.insn[n++] =
  1038. jit_x0_addi(clob1, ra, load_store_size - 1) |
  1039. jit_x1_st_add(TREG_SP, clob1, 8);
  1040. #endif
  1041. if (load_store_size == 8) {
  1042. /*
  1043. * We save one byte a time, not for fast, but compact
  1044. * code. After each store, data source register shift
  1045. * right one byte. unchanged after 8 stores.
  1046. */
  1047. frag.insn[n++] =
  1048. jit_x0_addi(clob2, TREG_ZERO, 7) |
  1049. jit_x1_st_add(TREG_SP, clob2, 16);
  1050. frag.insn[n++] =
  1051. jit_x0_rotli(rb, rb, 56) |
  1052. jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
  1053. frag.insn[n++] =
  1054. jit_x0_addi(clob2, clob2, -1) |
  1055. jit_x1_bnezt(clob2, -1);
  1056. frag.insn[n++] =
  1057. jit_x0_fnop() |
  1058. jit_x1_addi(clob2, y1_br_reg, 0);
  1059. } else if (load_store_size == 4) {
  1060. frag.insn[n++] =
  1061. jit_x0_addi(clob2, TREG_ZERO, 3) |
  1062. jit_x1_st_add(TREG_SP, clob2, 16);
  1063. frag.insn[n++] =
  1064. jit_x0_rotli(rb, rb, 56) |
  1065. jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
  1066. frag.insn[n++] =
  1067. jit_x0_addi(clob2, clob2, -1) |
  1068. jit_x1_bnezt(clob2, -1);
  1069. /*
  1070. * same as 8-byte case, but need shift another 4
  1071. * byte to recover rb for 4-byte store.
  1072. */
  1073. frag.insn[n++] = jit_x0_rotli(rb, rb, 32) |
  1074. jit_x1_addi(clob2, y1_br_reg, 0);
  1075. } else { /* =2 */
  1076. frag.insn[n++] =
  1077. jit_x0_addi(clob2, rb, 0) |
  1078. jit_x1_st_add(TREG_SP, clob2, 16);
  1079. for (k = 0; k < 2; k++) {
  1080. frag.insn[n++] =
  1081. jit_x0_shrui(rb, rb, 8) |
  1082. jit_x1_st1_add(clob1, rb,
  1083. UA_FIXUP_ADDR_DELTA);
  1084. }
  1085. frag.insn[n++] =
  1086. jit_x0_addi(rb, clob2, 0) |
  1087. jit_x1_addi(clob2, y1_br_reg, 0);
  1088. }
  1089. if (bundle_2_enable)
  1090. frag.insn[n++] = bundle_2;
  1091. if (y1_lr) {
  1092. frag.insn[n++] =
  1093. jit_x0_fnop() |
  1094. jit_x1_mfspr(y1_lr_reg,
  1095. SPR_EX_CONTEXT_0_0);
  1096. }
  1097. if (y1_br) {
  1098. frag.insn[n++] =
  1099. jit_x0_fnop() |
  1100. jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
  1101. clob2);
  1102. }
  1103. if (x1_add) {
  1104. frag.insn[n++] =
  1105. jit_x0_addi(ra, ra, x1_add_imm8) |
  1106. jit_x1_ld_add(clob2, clob3, -8);
  1107. } else {
  1108. frag.insn[n++] =
  1109. jit_x0_fnop() |
  1110. jit_x1_ld_add(clob2, clob3, -8);
  1111. }
  1112. frag.insn[n++] =
  1113. jit_x0_fnop() |
  1114. jit_x1_ld_add(clob1, clob3, -8);
  1115. frag.insn[n++] = jit_x0_fnop() | jit_x1_ld(clob3, clob3);
  1116. frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
  1117. } else {
  1118. /*
  1119. * Generic memory load cases.
  1120. *
  1121. * Alloc space for saveing clob1,2,3 on user's stack.
  1122. * register clob3 points to where clob1 saved, followed
  1123. * by clob2 and 3 from high to low memory.
  1124. */
  1125. frag.insn[n++] =
  1126. jit_x0_addi(TREG_SP, TREG_SP, -32) |
  1127. jit_x1_fnop();
  1128. frag.insn[n++] =
  1129. jit_x0_addi(clob3, TREG_SP, 16) |
  1130. jit_x1_st_add(TREG_SP, clob3, 8);
  1131. frag.insn[n++] =
  1132. jit_x0_addi(clob2, ra, 0) |
  1133. jit_x1_st_add(TREG_SP, clob2, 8);
  1134. if (y1_br) {
  1135. frag.insn[n++] =
  1136. jit_x0_addi(clob1, y1_br_reg, 0) |
  1137. jit_x1_st_add(TREG_SP, clob1, 16);
  1138. } else {
  1139. frag.insn[n++] =
  1140. jit_x0_fnop() |
  1141. jit_x1_st_add(TREG_SP, clob1, 16);
  1142. }
  1143. if (bundle_2_enable)
  1144. frag.insn[n++] = bundle_2;
  1145. if (y1_lr) {
  1146. frag.insn[n++] =
  1147. jit_x0_fnop() |
  1148. jit_x1_mfspr(y1_lr_reg,
  1149. SPR_EX_CONTEXT_0_0);
  1150. }
  1151. if (y1_br) {
  1152. frag.insn[n++] =
  1153. jit_x0_fnop() |
  1154. jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
  1155. clob1);
  1156. }
  1157. frag.insn[n++] =
  1158. jit_x0_addi(clob1, clob2, 7) |
  1159. jit_x1_ldna(rd, clob2);
  1160. frag.insn[n++] =
  1161. jit_x0_fnop() |
  1162. jit_x1_ldna(clob1, clob1);
  1163. frag.insn[n++] =
  1164. jit_x0_dblalign(rd, clob1, clob2) |
  1165. jit_x1_ld_add(clob1, clob3, -8);
  1166. if (x1_add) {
  1167. frag.insn[n++] =
  1168. jit_x0_addi(ra, ra, x1_add_imm8) |
  1169. jit_x1_ld_add(clob2, clob3, -8);
  1170. } else {
  1171. frag.insn[n++] =
  1172. jit_x0_fnop() |
  1173. jit_x1_ld_add(clob2, clob3, -8);
  1174. }
  1175. frag.insn[n++] =
  1176. jit_x0_fnop() |
  1177. jit_x1_ld(clob3, clob3);
  1178. if (load_store_size == 4) {
  1179. if (load_store_signed)
  1180. frag.insn[n++] =
  1181. jit_x0_bfexts(
  1182. rd, rd,
  1183. UA_FIXUP_BFEXT_START(4),
  1184. UA_FIXUP_BFEXT_END(4)) |
  1185. jit_x1_fnop();
  1186. else
  1187. frag.insn[n++] =
  1188. jit_x0_bfextu(
  1189. rd, rd,
  1190. UA_FIXUP_BFEXT_START(4),
  1191. UA_FIXUP_BFEXT_END(4)) |
  1192. jit_x1_fnop();
  1193. } else if (load_store_size == 2) {
  1194. if (load_store_signed)
  1195. frag.insn[n++] =
  1196. jit_x0_bfexts(
  1197. rd, rd,
  1198. UA_FIXUP_BFEXT_START(2),
  1199. UA_FIXUP_BFEXT_END(2)) |
  1200. jit_x1_fnop();
  1201. else
  1202. frag.insn[n++] =
  1203. jit_x0_bfextu(
  1204. rd, rd,
  1205. UA_FIXUP_BFEXT_START(2),
  1206. UA_FIXUP_BFEXT_END(2)) |
  1207. jit_x1_fnop();
  1208. }
  1209. frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
  1210. }
  1211. /* Max JIT bundle count is 14. */
  1212. WARN_ON(n > 14);
  1213. if (!unexpected) {
  1214. int status = 0;
  1215. int idx = (regs->pc >> 3) &
  1216. ((1ULL << (PAGE_SHIFT - UNALIGN_JIT_SHIFT)) - 1);
  1217. frag.pc = regs->pc;
  1218. frag.bundle = bundle;
  1219. if (unaligned_printk) {
  1220. pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
  1221. current->comm, current->pid,
  1222. (unsigned long)frag.pc,
  1223. (unsigned long)frag.bundle,
  1224. (int)alias, (int)rd, (int)ra,
  1225. (int)rb, (int)bundle_2_enable,
  1226. (int)y1_lr, (int)y1_br, (int)x1_add);
  1227. for (k = 0; k < n; k += 2)
  1228. pr_info("[%d] %016llx %016llx\n",
  1229. k, (unsigned long long)frag.insn[k],
  1230. (unsigned long long)frag.insn[k+1]);
  1231. }
  1232. /* Swap bundle byte order for big endian sys. */
  1233. #ifdef __BIG_ENDIAN
  1234. frag.bundle = GX_INSN_BSWAP(frag.bundle);
  1235. for (k = 0; k < n; k++)
  1236. frag.insn[k] = GX_INSN_BSWAP(frag.insn[k]);
  1237. #endif /* __BIG_ENDIAN */
  1238. status = copy_to_user((void __user *)&jit_code_area[idx],
  1239. &frag, sizeof(frag));
  1240. if (status) {
  1241. /* Fail to copy JIT into user land. send SIGSEGV. */
  1242. siginfo_t info = {
  1243. .si_signo = SIGSEGV,
  1244. .si_code = SEGV_MAPERR,
  1245. .si_addr = (void __user *)&jit_code_area[idx]
  1246. };
  1247. pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
  1248. current->pid, current->comm,
  1249. (unsigned long long)&jit_code_area[idx]);
  1250. trace_unhandled_signal("segfault in unalign fixup",
  1251. regs,
  1252. (unsigned long)info.si_addr,
  1253. SIGSEGV);
  1254. force_sig_info(info.si_signo, &info, current);
  1255. return;
  1256. }
  1257. /* Do a cheaper increment, not accurate. */
  1258. unaligned_fixup_count++;
  1259. __flush_icache_range((unsigned long)&jit_code_area[idx],
  1260. (unsigned long)&jit_code_area[idx] +
  1261. sizeof(frag));
  1262. /* Setup SPR_EX_CONTEXT_0_0/1 for returning to user program.*/
  1263. __insn_mtspr(SPR_EX_CONTEXT_0_0, regs->pc + 8);
  1264. __insn_mtspr(SPR_EX_CONTEXT_0_1, PL_ICS_EX1(USER_PL, 0));
  1265. /* Modify pc at the start of new JIT. */
  1266. regs->pc = (unsigned long)&jit_code_area[idx].insn[0];
  1267. /* Set ICS in SPR_EX_CONTEXT_K_1. */
  1268. regs->ex1 = PL_ICS_EX1(USER_PL, 1);
  1269. }
  1270. }
  1271. /*
  1272. * C function to generate unalign data JIT. Called from unalign data
  1273. * interrupt handler.
  1274. *
  1275. * First check if unalign fix is disabled or exception did not not come from
  1276. * user space or sp register points to unalign address, if true, generate a
  1277. * SIGBUS. Then map a page into user space as JIT area if it is not mapped
  1278. * yet. Genenerate JIT code by calling jit_bundle_gen(). After that return
  1279. * back to exception handler.
  1280. *
  1281. * The exception handler will "iret" to new generated JIT code after
  1282. * restoring caller saved registers. In theory, the JIT code will perform
  1283. * another "iret" to resume user's program.
  1284. */
  1285. void do_unaligned(struct pt_regs *regs, int vecnum)
  1286. {
  1287. enum ctx_state prev_state = exception_enter();
  1288. tilegx_bundle_bits __user *pc;
  1289. tilegx_bundle_bits bundle;
  1290. struct thread_info *info = current_thread_info();
  1291. int align_ctl;
  1292. /* Checks the per-process unaligned JIT flags */
  1293. align_ctl = unaligned_fixup;
  1294. switch (task_thread_info(current)->align_ctl) {
  1295. case PR_UNALIGN_NOPRINT:
  1296. align_ctl = 1;
  1297. break;
  1298. case PR_UNALIGN_SIGBUS:
  1299. align_ctl = 0;
  1300. break;
  1301. }
  1302. /* Enable iterrupt in order to access user land. */
  1303. local_irq_enable();
  1304. /*
  1305. * The fault came from kernel space. Two choices:
  1306. * (a) unaligned_fixup < 1, we will first call get/put_user fixup
  1307. * to return -EFAULT. If no fixup, simply panic the kernel.
  1308. * (b) unaligned_fixup >=1, we will try to fix the unaligned access
  1309. * if it was triggered by get_user/put_user() macros. Panic the
  1310. * kernel if it is not fixable.
  1311. */
  1312. if (EX1_PL(regs->ex1) != USER_PL) {
  1313. if (align_ctl < 1) {
  1314. unaligned_fixup_count++;
  1315. /* If exception came from kernel, try fix it up. */
  1316. if (fixup_exception(regs)) {
  1317. if (unaligned_printk)
  1318. pr_info("Unalign fixup: %d %llx @%llx\n",
  1319. (int)unaligned_fixup,
  1320. (unsigned long long)regs->ex1,
  1321. (unsigned long long)regs->pc);
  1322. } else {
  1323. /* Not fixable. Go panic. */
  1324. panic("Unalign exception in Kernel. pc=%lx",
  1325. regs->pc);
  1326. }
  1327. } else {
  1328. /*
  1329. * Try to fix the exception. If we can't, panic the
  1330. * kernel.
  1331. */
  1332. bundle = GX_INSN_BSWAP(
  1333. *((tilegx_bundle_bits *)(regs->pc)));
  1334. jit_bundle_gen(regs, bundle, align_ctl);
  1335. }
  1336. goto done;
  1337. }
  1338. /*
  1339. * Fault came from user with ICS or stack is not aligned.
  1340. * If so, we will trigger SIGBUS.
  1341. */
  1342. if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
  1343. siginfo_t info = {
  1344. .si_signo = SIGBUS,
  1345. .si_code = BUS_ADRALN,
  1346. .si_addr = (unsigned char __user *)0
  1347. };
  1348. if (unaligned_printk)
  1349. pr_info("Unalign fixup: %d %llx @%llx\n",
  1350. (int)unaligned_fixup,
  1351. (unsigned long long)regs->ex1,
  1352. (unsigned long long)regs->pc);
  1353. unaligned_fixup_count++;
  1354. trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
  1355. force_sig_info(info.si_signo, &info, current);
  1356. goto done;
  1357. }
  1358. /* Read the bundle casued the exception! */
  1359. pc = (tilegx_bundle_bits __user *)(regs->pc);
  1360. if (get_user(bundle, pc) != 0) {
  1361. /* Probably never be here since pc is valid user address.*/
  1362. siginfo_t info = {
  1363. .si_signo = SIGSEGV,
  1364. .si_code = SEGV_MAPERR,
  1365. .si_addr = (void __user *)pc
  1366. };
  1367. pr_err("Couldn't read instruction at %p trying to step\n", pc);
  1368. trace_unhandled_signal("segfault in unalign fixup", regs,
  1369. (unsigned long)info.si_addr, SIGSEGV);
  1370. force_sig_info(info.si_signo, &info, current);
  1371. goto done;
  1372. }
  1373. if (!info->unalign_jit_base) {
  1374. void __user *user_page;
  1375. /*
  1376. * Allocate a page in userland.
  1377. * For 64-bit processes we try to place the mapping far
  1378. * from anything else that might be going on (specifically
  1379. * 64 GB below the top of the user address space). If it
  1380. * happens not to be possible to put it there, it's OK;
  1381. * the kernel will choose another location and we'll
  1382. * remember it for later.
  1383. */
  1384. if (is_compat_task())
  1385. user_page = NULL;
  1386. else
  1387. user_page = (void __user *)(TASK_SIZE - (1UL << 36)) +
  1388. (current->pid << PAGE_SHIFT);
  1389. user_page = (void __user *) vm_mmap(NULL,
  1390. (unsigned long)user_page,
  1391. PAGE_SIZE,
  1392. PROT_EXEC | PROT_READ |
  1393. PROT_WRITE,
  1394. #ifdef CONFIG_HOMECACHE
  1395. MAP_CACHE_HOME_TASK |
  1396. #endif
  1397. MAP_PRIVATE |
  1398. MAP_ANONYMOUS,
  1399. 0);
  1400. if (IS_ERR((void __force *)user_page)) {
  1401. pr_err("Out of kernel pages trying do_mmap\n");
  1402. goto done;
  1403. }
  1404. /* Save the address in the thread_info struct */
  1405. info->unalign_jit_base = user_page;
  1406. if (unaligned_printk)
  1407. pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
  1408. raw_smp_processor_id(), current->pid,
  1409. (unsigned long long)user_page);
  1410. }
  1411. /* Generate unalign JIT */
  1412. jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
  1413. done:
  1414. exception_exit(prev_state);
  1415. }
  1416. #endif /* __tilegx__ */