insn.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669
  1. /*
  2. * Copyright (C) 2013 Huawei Ltd.
  3. * Author: Jiang Liu <liuj97@gmail.com>
  4. *
  5. * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/bug.h>
  21. #include <linux/compiler.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/stop_machine.h>
  27. #include <linux/types.h>
  28. #include <linux/uaccess.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/debug-monitors.h>
  31. #include <asm/fixmap.h>
  32. #include <asm/insn.h>
  33. #include <asm/kprobes.h>
  34. #define AARCH64_INSN_SF_BIT BIT(31)
  35. #define AARCH64_INSN_N_BIT BIT(22)
  36. #define AARCH64_INSN_LSL_12 BIT(22)
  37. static int aarch64_insn_encoding_class[] = {
  38. AARCH64_INSN_CLS_UNKNOWN,
  39. AARCH64_INSN_CLS_UNKNOWN,
  40. AARCH64_INSN_CLS_UNKNOWN,
  41. AARCH64_INSN_CLS_UNKNOWN,
  42. AARCH64_INSN_CLS_LDST,
  43. AARCH64_INSN_CLS_DP_REG,
  44. AARCH64_INSN_CLS_LDST,
  45. AARCH64_INSN_CLS_DP_FPSIMD,
  46. AARCH64_INSN_CLS_DP_IMM,
  47. AARCH64_INSN_CLS_DP_IMM,
  48. AARCH64_INSN_CLS_BR_SYS,
  49. AARCH64_INSN_CLS_BR_SYS,
  50. AARCH64_INSN_CLS_LDST,
  51. AARCH64_INSN_CLS_DP_REG,
  52. AARCH64_INSN_CLS_LDST,
  53. AARCH64_INSN_CLS_DP_FPSIMD,
  54. };
  55. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  56. {
  57. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  58. }
  59. /* NOP is an alias of HINT */
  60. bool __kprobes aarch64_insn_is_nop(u32 insn)
  61. {
  62. if (!aarch64_insn_is_hint(insn))
  63. return false;
  64. switch (insn & 0xFE0) {
  65. case AARCH64_INSN_HINT_YIELD:
  66. case AARCH64_INSN_HINT_WFE:
  67. case AARCH64_INSN_HINT_WFI:
  68. case AARCH64_INSN_HINT_SEV:
  69. case AARCH64_INSN_HINT_SEVL:
  70. return false;
  71. default:
  72. return true;
  73. }
  74. }
  75. bool aarch64_insn_is_branch_imm(u32 insn)
  76. {
  77. return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  78. aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  79. aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  80. aarch64_insn_is_bcond(insn));
  81. }
  82. static DEFINE_RAW_SPINLOCK(patch_lock);
  83. static void __kprobes *patch_map(void *addr, int fixmap)
  84. {
  85. unsigned long uintaddr = (uintptr_t) addr;
  86. bool module = !core_kernel_text(uintaddr);
  87. struct page *page;
  88. if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  89. page = vmalloc_to_page(addr);
  90. else if (!module)
  91. page = phys_to_page(__pa_symbol(addr));
  92. else
  93. return addr;
  94. BUG_ON(!page);
  95. return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  96. (uintaddr & ~PAGE_MASK));
  97. }
  98. static void __kprobes patch_unmap(int fixmap)
  99. {
  100. clear_fixmap(fixmap);
  101. }
  102. /*
  103. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  104. * little-endian.
  105. */
  106. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  107. {
  108. int ret;
  109. __le32 val;
  110. ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
  111. if (!ret)
  112. *insnp = le32_to_cpu(val);
  113. return ret;
  114. }
  115. static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
  116. {
  117. void *waddr = addr;
  118. unsigned long flags = 0;
  119. int ret;
  120. raw_spin_lock_irqsave(&patch_lock, flags);
  121. waddr = patch_map(addr, FIX_TEXT_POKE0);
  122. ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
  123. patch_unmap(FIX_TEXT_POKE0);
  124. raw_spin_unlock_irqrestore(&patch_lock, flags);
  125. return ret;
  126. }
  127. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  128. {
  129. return __aarch64_insn_write(addr, cpu_to_le32(insn));
  130. }
  131. static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
  132. {
  133. if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
  134. return false;
  135. return aarch64_insn_is_b(insn) ||
  136. aarch64_insn_is_bl(insn) ||
  137. aarch64_insn_is_svc(insn) ||
  138. aarch64_insn_is_hvc(insn) ||
  139. aarch64_insn_is_smc(insn) ||
  140. aarch64_insn_is_brk(insn) ||
  141. aarch64_insn_is_nop(insn);
  142. }
  143. bool __kprobes aarch64_insn_uses_literal(u32 insn)
  144. {
  145. /* ldr/ldrsw (literal), prfm */
  146. return aarch64_insn_is_ldr_lit(insn) ||
  147. aarch64_insn_is_ldrsw_lit(insn) ||
  148. aarch64_insn_is_adr_adrp(insn) ||
  149. aarch64_insn_is_prfm_lit(insn);
  150. }
  151. bool __kprobes aarch64_insn_is_branch(u32 insn)
  152. {
  153. /* b, bl, cb*, tb*, b.cond, br, blr */
  154. return aarch64_insn_is_b(insn) ||
  155. aarch64_insn_is_bl(insn) ||
  156. aarch64_insn_is_cbz(insn) ||
  157. aarch64_insn_is_cbnz(insn) ||
  158. aarch64_insn_is_tbz(insn) ||
  159. aarch64_insn_is_tbnz(insn) ||
  160. aarch64_insn_is_ret(insn) ||
  161. aarch64_insn_is_br(insn) ||
  162. aarch64_insn_is_blr(insn) ||
  163. aarch64_insn_is_bcond(insn);
  164. }
  165. /*
  166. * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
  167. * Section B2.6.5 "Concurrent modification and execution of instructions":
  168. * Concurrent modification and execution of instructions can lead to the
  169. * resulting instruction performing any behavior that can be achieved by
  170. * executing any sequence of instructions that can be executed from the
  171. * same Exception level, except where the instruction before modification
  172. * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
  173. * or SMC instruction.
  174. */
  175. bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
  176. {
  177. return __aarch64_insn_hotpatch_safe(old_insn) &&
  178. __aarch64_insn_hotpatch_safe(new_insn);
  179. }
  180. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  181. {
  182. u32 *tp = addr;
  183. int ret;
  184. /* A64 instructions must be word aligned */
  185. if ((uintptr_t)tp & 0x3)
  186. return -EINVAL;
  187. ret = aarch64_insn_write(tp, insn);
  188. if (ret == 0)
  189. flush_icache_range((uintptr_t)tp,
  190. (uintptr_t)tp + AARCH64_INSN_SIZE);
  191. return ret;
  192. }
  193. struct aarch64_insn_patch {
  194. void **text_addrs;
  195. u32 *new_insns;
  196. int insn_cnt;
  197. atomic_t cpu_count;
  198. };
  199. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  200. {
  201. int i, ret = 0;
  202. struct aarch64_insn_patch *pp = arg;
  203. /* The first CPU becomes master */
  204. if (atomic_inc_return(&pp->cpu_count) == 1) {
  205. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  206. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  207. pp->new_insns[i]);
  208. /*
  209. * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
  210. * which ends with "dsb; isb" pair guaranteeing global
  211. * visibility.
  212. */
  213. /* Notify other processors with an additional increment. */
  214. atomic_inc(&pp->cpu_count);
  215. } else {
  216. while (atomic_read(&pp->cpu_count) <= num_online_cpus())
  217. cpu_relax();
  218. isb();
  219. }
  220. return ret;
  221. }
  222. static
  223. int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
  224. {
  225. struct aarch64_insn_patch patch = {
  226. .text_addrs = addrs,
  227. .new_insns = insns,
  228. .insn_cnt = cnt,
  229. .cpu_count = ATOMIC_INIT(0),
  230. };
  231. if (cnt <= 0)
  232. return -EINVAL;
  233. return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
  234. cpu_online_mask);
  235. }
  236. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  237. {
  238. int ret;
  239. u32 insn;
  240. /* Unsafe to patch multiple instructions without synchronizaiton */
  241. if (cnt == 1) {
  242. ret = aarch64_insn_read(addrs[0], &insn);
  243. if (ret)
  244. return ret;
  245. if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
  246. /*
  247. * ARMv8 architecture doesn't guarantee all CPUs see
  248. * the new instruction after returning from function
  249. * aarch64_insn_patch_text_nosync(). So send IPIs to
  250. * all other CPUs to achieve instruction
  251. * synchronization.
  252. */
  253. ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
  254. kick_all_cpus_sync();
  255. return ret;
  256. }
  257. }
  258. return aarch64_insn_patch_text_sync(addrs, insns, cnt);
  259. }
  260. static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
  261. u32 *maskp, int *shiftp)
  262. {
  263. u32 mask;
  264. int shift;
  265. switch (type) {
  266. case AARCH64_INSN_IMM_26:
  267. mask = BIT(26) - 1;
  268. shift = 0;
  269. break;
  270. case AARCH64_INSN_IMM_19:
  271. mask = BIT(19) - 1;
  272. shift = 5;
  273. break;
  274. case AARCH64_INSN_IMM_16:
  275. mask = BIT(16) - 1;
  276. shift = 5;
  277. break;
  278. case AARCH64_INSN_IMM_14:
  279. mask = BIT(14) - 1;
  280. shift = 5;
  281. break;
  282. case AARCH64_INSN_IMM_12:
  283. mask = BIT(12) - 1;
  284. shift = 10;
  285. break;
  286. case AARCH64_INSN_IMM_9:
  287. mask = BIT(9) - 1;
  288. shift = 12;
  289. break;
  290. case AARCH64_INSN_IMM_7:
  291. mask = BIT(7) - 1;
  292. shift = 15;
  293. break;
  294. case AARCH64_INSN_IMM_6:
  295. case AARCH64_INSN_IMM_S:
  296. mask = BIT(6) - 1;
  297. shift = 10;
  298. break;
  299. case AARCH64_INSN_IMM_R:
  300. mask = BIT(6) - 1;
  301. shift = 16;
  302. break;
  303. case AARCH64_INSN_IMM_N:
  304. mask = 1;
  305. shift = 22;
  306. break;
  307. default:
  308. return -EINVAL;
  309. }
  310. *maskp = mask;
  311. *shiftp = shift;
  312. return 0;
  313. }
  314. #define ADR_IMM_HILOSPLIT 2
  315. #define ADR_IMM_SIZE SZ_2M
  316. #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
  317. #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
  318. #define ADR_IMM_LOSHIFT 29
  319. #define ADR_IMM_HISHIFT 5
  320. u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
  321. {
  322. u32 immlo, immhi, mask;
  323. int shift;
  324. switch (type) {
  325. case AARCH64_INSN_IMM_ADR:
  326. shift = 0;
  327. immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
  328. immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
  329. insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
  330. mask = ADR_IMM_SIZE - 1;
  331. break;
  332. default:
  333. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  334. pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
  335. type);
  336. return 0;
  337. }
  338. }
  339. return (insn >> shift) & mask;
  340. }
  341. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  342. u32 insn, u64 imm)
  343. {
  344. u32 immlo, immhi, mask;
  345. int shift;
  346. if (insn == AARCH64_BREAK_FAULT)
  347. return AARCH64_BREAK_FAULT;
  348. switch (type) {
  349. case AARCH64_INSN_IMM_ADR:
  350. shift = 0;
  351. immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
  352. imm >>= ADR_IMM_HILOSPLIT;
  353. immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
  354. imm = immlo | immhi;
  355. mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
  356. (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
  357. break;
  358. default:
  359. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  360. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  361. type);
  362. return AARCH64_BREAK_FAULT;
  363. }
  364. }
  365. /* Update the immediate field. */
  366. insn &= ~(mask << shift);
  367. insn |= (imm & mask) << shift;
  368. return insn;
  369. }
  370. u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
  371. u32 insn)
  372. {
  373. int shift;
  374. switch (type) {
  375. case AARCH64_INSN_REGTYPE_RT:
  376. case AARCH64_INSN_REGTYPE_RD:
  377. shift = 0;
  378. break;
  379. case AARCH64_INSN_REGTYPE_RN:
  380. shift = 5;
  381. break;
  382. case AARCH64_INSN_REGTYPE_RT2:
  383. case AARCH64_INSN_REGTYPE_RA:
  384. shift = 10;
  385. break;
  386. case AARCH64_INSN_REGTYPE_RM:
  387. shift = 16;
  388. break;
  389. default:
  390. pr_err("%s: unknown register type encoding %d\n", __func__,
  391. type);
  392. return 0;
  393. }
  394. return (insn >> shift) & GENMASK(4, 0);
  395. }
  396. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  397. u32 insn,
  398. enum aarch64_insn_register reg)
  399. {
  400. int shift;
  401. if (insn == AARCH64_BREAK_FAULT)
  402. return AARCH64_BREAK_FAULT;
  403. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  404. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  405. return AARCH64_BREAK_FAULT;
  406. }
  407. switch (type) {
  408. case AARCH64_INSN_REGTYPE_RT:
  409. case AARCH64_INSN_REGTYPE_RD:
  410. shift = 0;
  411. break;
  412. case AARCH64_INSN_REGTYPE_RN:
  413. shift = 5;
  414. break;
  415. case AARCH64_INSN_REGTYPE_RT2:
  416. case AARCH64_INSN_REGTYPE_RA:
  417. shift = 10;
  418. break;
  419. case AARCH64_INSN_REGTYPE_RM:
  420. case AARCH64_INSN_REGTYPE_RS:
  421. shift = 16;
  422. break;
  423. default:
  424. pr_err("%s: unknown register type encoding %d\n", __func__,
  425. type);
  426. return AARCH64_BREAK_FAULT;
  427. }
  428. insn &= ~(GENMASK(4, 0) << shift);
  429. insn |= reg << shift;
  430. return insn;
  431. }
  432. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  433. u32 insn)
  434. {
  435. u32 size;
  436. switch (type) {
  437. case AARCH64_INSN_SIZE_8:
  438. size = 0;
  439. break;
  440. case AARCH64_INSN_SIZE_16:
  441. size = 1;
  442. break;
  443. case AARCH64_INSN_SIZE_32:
  444. size = 2;
  445. break;
  446. case AARCH64_INSN_SIZE_64:
  447. size = 3;
  448. break;
  449. default:
  450. pr_err("%s: unknown size encoding %d\n", __func__, type);
  451. return AARCH64_BREAK_FAULT;
  452. }
  453. insn &= ~GENMASK(31, 30);
  454. insn |= size << 30;
  455. return insn;
  456. }
  457. static inline long branch_imm_common(unsigned long pc, unsigned long addr,
  458. long range)
  459. {
  460. long offset;
  461. if ((pc & 0x3) || (addr & 0x3)) {
  462. pr_err("%s: A64 instructions must be word aligned\n", __func__);
  463. return range;
  464. }
  465. offset = ((long)addr - (long)pc);
  466. if (offset < -range || offset >= range) {
  467. pr_err("%s: offset out of range\n", __func__);
  468. return range;
  469. }
  470. return offset;
  471. }
  472. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  473. enum aarch64_insn_branch_type type)
  474. {
  475. u32 insn;
  476. long offset;
  477. /*
  478. * B/BL support [-128M, 128M) offset
  479. * ARM64 virtual address arrangement guarantees all kernel and module
  480. * texts are within +/-128M.
  481. */
  482. offset = branch_imm_common(pc, addr, SZ_128M);
  483. if (offset >= SZ_128M)
  484. return AARCH64_BREAK_FAULT;
  485. switch (type) {
  486. case AARCH64_INSN_BRANCH_LINK:
  487. insn = aarch64_insn_get_bl_value();
  488. break;
  489. case AARCH64_INSN_BRANCH_NOLINK:
  490. insn = aarch64_insn_get_b_value();
  491. break;
  492. default:
  493. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  494. return AARCH64_BREAK_FAULT;
  495. }
  496. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  497. offset >> 2);
  498. }
  499. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  500. enum aarch64_insn_register reg,
  501. enum aarch64_insn_variant variant,
  502. enum aarch64_insn_branch_type type)
  503. {
  504. u32 insn;
  505. long offset;
  506. offset = branch_imm_common(pc, addr, SZ_1M);
  507. if (offset >= SZ_1M)
  508. return AARCH64_BREAK_FAULT;
  509. switch (type) {
  510. case AARCH64_INSN_BRANCH_COMP_ZERO:
  511. insn = aarch64_insn_get_cbz_value();
  512. break;
  513. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  514. insn = aarch64_insn_get_cbnz_value();
  515. break;
  516. default:
  517. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  518. return AARCH64_BREAK_FAULT;
  519. }
  520. switch (variant) {
  521. case AARCH64_INSN_VARIANT_32BIT:
  522. break;
  523. case AARCH64_INSN_VARIANT_64BIT:
  524. insn |= AARCH64_INSN_SF_BIT;
  525. break;
  526. default:
  527. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  528. return AARCH64_BREAK_FAULT;
  529. }
  530. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  531. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  532. offset >> 2);
  533. }
  534. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  535. enum aarch64_insn_condition cond)
  536. {
  537. u32 insn;
  538. long offset;
  539. offset = branch_imm_common(pc, addr, SZ_1M);
  540. insn = aarch64_insn_get_bcond_value();
  541. if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
  542. pr_err("%s: unknown condition encoding %d\n", __func__, cond);
  543. return AARCH64_BREAK_FAULT;
  544. }
  545. insn |= cond;
  546. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  547. offset >> 2);
  548. }
  549. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
  550. {
  551. return aarch64_insn_get_hint_value() | op;
  552. }
  553. u32 __kprobes aarch64_insn_gen_nop(void)
  554. {
  555. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  556. }
  557. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  558. enum aarch64_insn_branch_type type)
  559. {
  560. u32 insn;
  561. switch (type) {
  562. case AARCH64_INSN_BRANCH_NOLINK:
  563. insn = aarch64_insn_get_br_value();
  564. break;
  565. case AARCH64_INSN_BRANCH_LINK:
  566. insn = aarch64_insn_get_blr_value();
  567. break;
  568. case AARCH64_INSN_BRANCH_RETURN:
  569. insn = aarch64_insn_get_ret_value();
  570. break;
  571. default:
  572. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  573. return AARCH64_BREAK_FAULT;
  574. }
  575. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  576. }
  577. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  578. enum aarch64_insn_register base,
  579. enum aarch64_insn_register offset,
  580. enum aarch64_insn_size_type size,
  581. enum aarch64_insn_ldst_type type)
  582. {
  583. u32 insn;
  584. switch (type) {
  585. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  586. insn = aarch64_insn_get_ldr_reg_value();
  587. break;
  588. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  589. insn = aarch64_insn_get_str_reg_value();
  590. break;
  591. default:
  592. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  593. return AARCH64_BREAK_FAULT;
  594. }
  595. insn = aarch64_insn_encode_ldst_size(size, insn);
  596. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  597. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  598. base);
  599. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  600. offset);
  601. }
  602. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  603. enum aarch64_insn_register reg2,
  604. enum aarch64_insn_register base,
  605. int offset,
  606. enum aarch64_insn_variant variant,
  607. enum aarch64_insn_ldst_type type)
  608. {
  609. u32 insn;
  610. int shift;
  611. switch (type) {
  612. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  613. insn = aarch64_insn_get_ldp_pre_value();
  614. break;
  615. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  616. insn = aarch64_insn_get_stp_pre_value();
  617. break;
  618. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  619. insn = aarch64_insn_get_ldp_post_value();
  620. break;
  621. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  622. insn = aarch64_insn_get_stp_post_value();
  623. break;
  624. default:
  625. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  626. return AARCH64_BREAK_FAULT;
  627. }
  628. switch (variant) {
  629. case AARCH64_INSN_VARIANT_32BIT:
  630. if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
  631. pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
  632. __func__, offset);
  633. return AARCH64_BREAK_FAULT;
  634. }
  635. shift = 2;
  636. break;
  637. case AARCH64_INSN_VARIANT_64BIT:
  638. if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
  639. pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
  640. __func__, offset);
  641. return AARCH64_BREAK_FAULT;
  642. }
  643. shift = 3;
  644. insn |= AARCH64_INSN_SF_BIT;
  645. break;
  646. default:
  647. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  648. return AARCH64_BREAK_FAULT;
  649. }
  650. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  651. reg1);
  652. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  653. reg2);
  654. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  655. base);
  656. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  657. offset >> shift);
  658. }
  659. u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
  660. enum aarch64_insn_register base,
  661. enum aarch64_insn_register state,
  662. enum aarch64_insn_size_type size,
  663. enum aarch64_insn_ldst_type type)
  664. {
  665. u32 insn;
  666. switch (type) {
  667. case AARCH64_INSN_LDST_LOAD_EX:
  668. insn = aarch64_insn_get_load_ex_value();
  669. break;
  670. case AARCH64_INSN_LDST_STORE_EX:
  671. insn = aarch64_insn_get_store_ex_value();
  672. break;
  673. default:
  674. pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
  675. return AARCH64_BREAK_FAULT;
  676. }
  677. insn = aarch64_insn_encode_ldst_size(size, insn);
  678. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  679. reg);
  680. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  681. base);
  682. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  683. AARCH64_INSN_REG_ZR);
  684. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  685. state);
  686. }
  687. static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
  688. enum aarch64_insn_prfm_target target,
  689. enum aarch64_insn_prfm_policy policy,
  690. u32 insn)
  691. {
  692. u32 imm_type = 0, imm_target = 0, imm_policy = 0;
  693. switch (type) {
  694. case AARCH64_INSN_PRFM_TYPE_PLD:
  695. break;
  696. case AARCH64_INSN_PRFM_TYPE_PLI:
  697. imm_type = BIT(0);
  698. break;
  699. case AARCH64_INSN_PRFM_TYPE_PST:
  700. imm_type = BIT(1);
  701. break;
  702. default:
  703. pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
  704. return AARCH64_BREAK_FAULT;
  705. }
  706. switch (target) {
  707. case AARCH64_INSN_PRFM_TARGET_L1:
  708. break;
  709. case AARCH64_INSN_PRFM_TARGET_L2:
  710. imm_target = BIT(0);
  711. break;
  712. case AARCH64_INSN_PRFM_TARGET_L3:
  713. imm_target = BIT(1);
  714. break;
  715. default:
  716. pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
  717. return AARCH64_BREAK_FAULT;
  718. }
  719. switch (policy) {
  720. case AARCH64_INSN_PRFM_POLICY_KEEP:
  721. break;
  722. case AARCH64_INSN_PRFM_POLICY_STRM:
  723. imm_policy = BIT(0);
  724. break;
  725. default:
  726. pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
  727. return AARCH64_BREAK_FAULT;
  728. }
  729. /* In this case, imm5 is encoded into Rt field. */
  730. insn &= ~GENMASK(4, 0);
  731. insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
  732. return insn;
  733. }
  734. u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
  735. enum aarch64_insn_prfm_type type,
  736. enum aarch64_insn_prfm_target target,
  737. enum aarch64_insn_prfm_policy policy)
  738. {
  739. u32 insn = aarch64_insn_get_prfm_value();
  740. insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
  741. insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
  742. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  743. base);
  744. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
  745. }
  746. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  747. enum aarch64_insn_register src,
  748. int imm, enum aarch64_insn_variant variant,
  749. enum aarch64_insn_adsb_type type)
  750. {
  751. u32 insn;
  752. switch (type) {
  753. case AARCH64_INSN_ADSB_ADD:
  754. insn = aarch64_insn_get_add_imm_value();
  755. break;
  756. case AARCH64_INSN_ADSB_SUB:
  757. insn = aarch64_insn_get_sub_imm_value();
  758. break;
  759. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  760. insn = aarch64_insn_get_adds_imm_value();
  761. break;
  762. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  763. insn = aarch64_insn_get_subs_imm_value();
  764. break;
  765. default:
  766. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  767. return AARCH64_BREAK_FAULT;
  768. }
  769. switch (variant) {
  770. case AARCH64_INSN_VARIANT_32BIT:
  771. break;
  772. case AARCH64_INSN_VARIANT_64BIT:
  773. insn |= AARCH64_INSN_SF_BIT;
  774. break;
  775. default:
  776. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  777. return AARCH64_BREAK_FAULT;
  778. }
  779. /* We can't encode more than a 24bit value (12bit + 12bit shift) */
  780. if (imm & ~(BIT(24) - 1))
  781. goto out;
  782. /* If we have something in the top 12 bits... */
  783. if (imm & ~(SZ_4K - 1)) {
  784. /* ... and in the low 12 bits -> error */
  785. if (imm & (SZ_4K - 1))
  786. goto out;
  787. imm >>= 12;
  788. insn |= AARCH64_INSN_LSL_12;
  789. }
  790. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  791. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  792. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  793. out:
  794. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  795. return AARCH64_BREAK_FAULT;
  796. }
  797. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  798. enum aarch64_insn_register src,
  799. int immr, int imms,
  800. enum aarch64_insn_variant variant,
  801. enum aarch64_insn_bitfield_type type)
  802. {
  803. u32 insn;
  804. u32 mask;
  805. switch (type) {
  806. case AARCH64_INSN_BITFIELD_MOVE:
  807. insn = aarch64_insn_get_bfm_value();
  808. break;
  809. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  810. insn = aarch64_insn_get_ubfm_value();
  811. break;
  812. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  813. insn = aarch64_insn_get_sbfm_value();
  814. break;
  815. default:
  816. pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
  817. return AARCH64_BREAK_FAULT;
  818. }
  819. switch (variant) {
  820. case AARCH64_INSN_VARIANT_32BIT:
  821. mask = GENMASK(4, 0);
  822. break;
  823. case AARCH64_INSN_VARIANT_64BIT:
  824. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  825. mask = GENMASK(5, 0);
  826. break;
  827. default:
  828. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  829. return AARCH64_BREAK_FAULT;
  830. }
  831. if (immr & ~mask) {
  832. pr_err("%s: invalid immr encoding %d\n", __func__, immr);
  833. return AARCH64_BREAK_FAULT;
  834. }
  835. if (imms & ~mask) {
  836. pr_err("%s: invalid imms encoding %d\n", __func__, imms);
  837. return AARCH64_BREAK_FAULT;
  838. }
  839. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  840. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  841. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  842. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  843. }
  844. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  845. int imm, int shift,
  846. enum aarch64_insn_variant variant,
  847. enum aarch64_insn_movewide_type type)
  848. {
  849. u32 insn;
  850. switch (type) {
  851. case AARCH64_INSN_MOVEWIDE_ZERO:
  852. insn = aarch64_insn_get_movz_value();
  853. break;
  854. case AARCH64_INSN_MOVEWIDE_KEEP:
  855. insn = aarch64_insn_get_movk_value();
  856. break;
  857. case AARCH64_INSN_MOVEWIDE_INVERSE:
  858. insn = aarch64_insn_get_movn_value();
  859. break;
  860. default:
  861. pr_err("%s: unknown movewide encoding %d\n", __func__, type);
  862. return AARCH64_BREAK_FAULT;
  863. }
  864. if (imm & ~(SZ_64K - 1)) {
  865. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  866. return AARCH64_BREAK_FAULT;
  867. }
  868. switch (variant) {
  869. case AARCH64_INSN_VARIANT_32BIT:
  870. if (shift != 0 && shift != 16) {
  871. pr_err("%s: invalid shift encoding %d\n", __func__,
  872. shift);
  873. return AARCH64_BREAK_FAULT;
  874. }
  875. break;
  876. case AARCH64_INSN_VARIANT_64BIT:
  877. insn |= AARCH64_INSN_SF_BIT;
  878. if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
  879. pr_err("%s: invalid shift encoding %d\n", __func__,
  880. shift);
  881. return AARCH64_BREAK_FAULT;
  882. }
  883. break;
  884. default:
  885. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  886. return AARCH64_BREAK_FAULT;
  887. }
  888. insn |= (shift >> 4) << 21;
  889. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  890. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  891. }
  892. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  893. enum aarch64_insn_register src,
  894. enum aarch64_insn_register reg,
  895. int shift,
  896. enum aarch64_insn_variant variant,
  897. enum aarch64_insn_adsb_type type)
  898. {
  899. u32 insn;
  900. switch (type) {
  901. case AARCH64_INSN_ADSB_ADD:
  902. insn = aarch64_insn_get_add_value();
  903. break;
  904. case AARCH64_INSN_ADSB_SUB:
  905. insn = aarch64_insn_get_sub_value();
  906. break;
  907. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  908. insn = aarch64_insn_get_adds_value();
  909. break;
  910. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  911. insn = aarch64_insn_get_subs_value();
  912. break;
  913. default:
  914. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  915. return AARCH64_BREAK_FAULT;
  916. }
  917. switch (variant) {
  918. case AARCH64_INSN_VARIANT_32BIT:
  919. if (shift & ~(SZ_32 - 1)) {
  920. pr_err("%s: invalid shift encoding %d\n", __func__,
  921. shift);
  922. return AARCH64_BREAK_FAULT;
  923. }
  924. break;
  925. case AARCH64_INSN_VARIANT_64BIT:
  926. insn |= AARCH64_INSN_SF_BIT;
  927. if (shift & ~(SZ_64 - 1)) {
  928. pr_err("%s: invalid shift encoding %d\n", __func__,
  929. shift);
  930. return AARCH64_BREAK_FAULT;
  931. }
  932. break;
  933. default:
  934. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  935. return AARCH64_BREAK_FAULT;
  936. }
  937. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  938. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  939. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  940. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  941. }
  942. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  943. enum aarch64_insn_register src,
  944. enum aarch64_insn_variant variant,
  945. enum aarch64_insn_data1_type type)
  946. {
  947. u32 insn;
  948. switch (type) {
  949. case AARCH64_INSN_DATA1_REVERSE_16:
  950. insn = aarch64_insn_get_rev16_value();
  951. break;
  952. case AARCH64_INSN_DATA1_REVERSE_32:
  953. insn = aarch64_insn_get_rev32_value();
  954. break;
  955. case AARCH64_INSN_DATA1_REVERSE_64:
  956. if (variant != AARCH64_INSN_VARIANT_64BIT) {
  957. pr_err("%s: invalid variant for reverse64 %d\n",
  958. __func__, variant);
  959. return AARCH64_BREAK_FAULT;
  960. }
  961. insn = aarch64_insn_get_rev64_value();
  962. break;
  963. default:
  964. pr_err("%s: unknown data1 encoding %d\n", __func__, type);
  965. return AARCH64_BREAK_FAULT;
  966. }
  967. switch (variant) {
  968. case AARCH64_INSN_VARIANT_32BIT:
  969. break;
  970. case AARCH64_INSN_VARIANT_64BIT:
  971. insn |= AARCH64_INSN_SF_BIT;
  972. break;
  973. default:
  974. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  975. return AARCH64_BREAK_FAULT;
  976. }
  977. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  978. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  979. }
  980. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  981. enum aarch64_insn_register src,
  982. enum aarch64_insn_register reg,
  983. enum aarch64_insn_variant variant,
  984. enum aarch64_insn_data2_type type)
  985. {
  986. u32 insn;
  987. switch (type) {
  988. case AARCH64_INSN_DATA2_UDIV:
  989. insn = aarch64_insn_get_udiv_value();
  990. break;
  991. case AARCH64_INSN_DATA2_SDIV:
  992. insn = aarch64_insn_get_sdiv_value();
  993. break;
  994. case AARCH64_INSN_DATA2_LSLV:
  995. insn = aarch64_insn_get_lslv_value();
  996. break;
  997. case AARCH64_INSN_DATA2_LSRV:
  998. insn = aarch64_insn_get_lsrv_value();
  999. break;
  1000. case AARCH64_INSN_DATA2_ASRV:
  1001. insn = aarch64_insn_get_asrv_value();
  1002. break;
  1003. case AARCH64_INSN_DATA2_RORV:
  1004. insn = aarch64_insn_get_rorv_value();
  1005. break;
  1006. default:
  1007. pr_err("%s: unknown data2 encoding %d\n", __func__, type);
  1008. return AARCH64_BREAK_FAULT;
  1009. }
  1010. switch (variant) {
  1011. case AARCH64_INSN_VARIANT_32BIT:
  1012. break;
  1013. case AARCH64_INSN_VARIANT_64BIT:
  1014. insn |= AARCH64_INSN_SF_BIT;
  1015. break;
  1016. default:
  1017. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1018. return AARCH64_BREAK_FAULT;
  1019. }
  1020. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1021. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  1022. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  1023. }
  1024. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  1025. enum aarch64_insn_register src,
  1026. enum aarch64_insn_register reg1,
  1027. enum aarch64_insn_register reg2,
  1028. enum aarch64_insn_variant variant,
  1029. enum aarch64_insn_data3_type type)
  1030. {
  1031. u32 insn;
  1032. switch (type) {
  1033. case AARCH64_INSN_DATA3_MADD:
  1034. insn = aarch64_insn_get_madd_value();
  1035. break;
  1036. case AARCH64_INSN_DATA3_MSUB:
  1037. insn = aarch64_insn_get_msub_value();
  1038. break;
  1039. default:
  1040. pr_err("%s: unknown data3 encoding %d\n", __func__, type);
  1041. return AARCH64_BREAK_FAULT;
  1042. }
  1043. switch (variant) {
  1044. case AARCH64_INSN_VARIANT_32BIT:
  1045. break;
  1046. case AARCH64_INSN_VARIANT_64BIT:
  1047. insn |= AARCH64_INSN_SF_BIT;
  1048. break;
  1049. default:
  1050. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1051. return AARCH64_BREAK_FAULT;
  1052. }
  1053. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1054. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  1055. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  1056. reg1);
  1057. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  1058. reg2);
  1059. }
  1060. u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
  1061. enum aarch64_insn_register src,
  1062. enum aarch64_insn_register reg,
  1063. int shift,
  1064. enum aarch64_insn_variant variant,
  1065. enum aarch64_insn_logic_type type)
  1066. {
  1067. u32 insn;
  1068. switch (type) {
  1069. case AARCH64_INSN_LOGIC_AND:
  1070. insn = aarch64_insn_get_and_value();
  1071. break;
  1072. case AARCH64_INSN_LOGIC_BIC:
  1073. insn = aarch64_insn_get_bic_value();
  1074. break;
  1075. case AARCH64_INSN_LOGIC_ORR:
  1076. insn = aarch64_insn_get_orr_value();
  1077. break;
  1078. case AARCH64_INSN_LOGIC_ORN:
  1079. insn = aarch64_insn_get_orn_value();
  1080. break;
  1081. case AARCH64_INSN_LOGIC_EOR:
  1082. insn = aarch64_insn_get_eor_value();
  1083. break;
  1084. case AARCH64_INSN_LOGIC_EON:
  1085. insn = aarch64_insn_get_eon_value();
  1086. break;
  1087. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1088. insn = aarch64_insn_get_ands_value();
  1089. break;
  1090. case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
  1091. insn = aarch64_insn_get_bics_value();
  1092. break;
  1093. default:
  1094. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1095. return AARCH64_BREAK_FAULT;
  1096. }
  1097. switch (variant) {
  1098. case AARCH64_INSN_VARIANT_32BIT:
  1099. if (shift & ~(SZ_32 - 1)) {
  1100. pr_err("%s: invalid shift encoding %d\n", __func__,
  1101. shift);
  1102. return AARCH64_BREAK_FAULT;
  1103. }
  1104. break;
  1105. case AARCH64_INSN_VARIANT_64BIT:
  1106. insn |= AARCH64_INSN_SF_BIT;
  1107. if (shift & ~(SZ_64 - 1)) {
  1108. pr_err("%s: invalid shift encoding %d\n", __func__,
  1109. shift);
  1110. return AARCH64_BREAK_FAULT;
  1111. }
  1112. break;
  1113. default:
  1114. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1115. return AARCH64_BREAK_FAULT;
  1116. }
  1117. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1118. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  1119. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  1120. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  1121. }
  1122. /*
  1123. * Decode the imm field of a branch, and return the byte offset as a
  1124. * signed value (so it can be used when computing a new branch
  1125. * target).
  1126. */
  1127. s32 aarch64_get_branch_offset(u32 insn)
  1128. {
  1129. s32 imm;
  1130. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
  1131. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
  1132. return (imm << 6) >> 4;
  1133. }
  1134. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1135. aarch64_insn_is_bcond(insn)) {
  1136. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
  1137. return (imm << 13) >> 11;
  1138. }
  1139. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
  1140. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
  1141. return (imm << 18) >> 16;
  1142. }
  1143. /* Unhandled instruction */
  1144. BUG();
  1145. }
  1146. /*
  1147. * Encode the displacement of a branch in the imm field and return the
  1148. * updated instruction.
  1149. */
  1150. u32 aarch64_set_branch_offset(u32 insn, s32 offset)
  1151. {
  1152. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
  1153. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  1154. offset >> 2);
  1155. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1156. aarch64_insn_is_bcond(insn))
  1157. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  1158. offset >> 2);
  1159. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
  1160. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
  1161. offset >> 2);
  1162. /* Unhandled instruction */
  1163. BUG();
  1164. }
  1165. s32 aarch64_insn_adrp_get_offset(u32 insn)
  1166. {
  1167. BUG_ON(!aarch64_insn_is_adrp(insn));
  1168. return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
  1169. }
  1170. u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
  1171. {
  1172. BUG_ON(!aarch64_insn_is_adrp(insn));
  1173. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
  1174. offset >> 12);
  1175. }
  1176. /*
  1177. * Extract the Op/CR data from a msr/mrs instruction.
  1178. */
  1179. u32 aarch64_insn_extract_system_reg(u32 insn)
  1180. {
  1181. return (insn & 0x1FFFE0) >> 5;
  1182. }
  1183. bool aarch32_insn_is_wide(u32 insn)
  1184. {
  1185. return insn >= 0xe800;
  1186. }
  1187. /*
  1188. * Macros/defines for extracting register numbers from instruction.
  1189. */
  1190. u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
  1191. {
  1192. return (insn & (0xf << offset)) >> offset;
  1193. }
  1194. #define OPC2_MASK 0x7
  1195. #define OPC2_OFFSET 5
  1196. u32 aarch32_insn_mcr_extract_opc2(u32 insn)
  1197. {
  1198. return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
  1199. }
  1200. #define CRM_MASK 0xf
  1201. u32 aarch32_insn_mcr_extract_crm(u32 insn)
  1202. {
  1203. return insn & CRM_MASK;
  1204. }
  1205. static bool __kprobes __check_eq(unsigned long pstate)
  1206. {
  1207. return (pstate & PSR_Z_BIT) != 0;
  1208. }
  1209. static bool __kprobes __check_ne(unsigned long pstate)
  1210. {
  1211. return (pstate & PSR_Z_BIT) == 0;
  1212. }
  1213. static bool __kprobes __check_cs(unsigned long pstate)
  1214. {
  1215. return (pstate & PSR_C_BIT) != 0;
  1216. }
  1217. static bool __kprobes __check_cc(unsigned long pstate)
  1218. {
  1219. return (pstate & PSR_C_BIT) == 0;
  1220. }
  1221. static bool __kprobes __check_mi(unsigned long pstate)
  1222. {
  1223. return (pstate & PSR_N_BIT) != 0;
  1224. }
  1225. static bool __kprobes __check_pl(unsigned long pstate)
  1226. {
  1227. return (pstate & PSR_N_BIT) == 0;
  1228. }
  1229. static bool __kprobes __check_vs(unsigned long pstate)
  1230. {
  1231. return (pstate & PSR_V_BIT) != 0;
  1232. }
  1233. static bool __kprobes __check_vc(unsigned long pstate)
  1234. {
  1235. return (pstate & PSR_V_BIT) == 0;
  1236. }
  1237. static bool __kprobes __check_hi(unsigned long pstate)
  1238. {
  1239. pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
  1240. return (pstate & PSR_C_BIT) != 0;
  1241. }
  1242. static bool __kprobes __check_ls(unsigned long pstate)
  1243. {
  1244. pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
  1245. return (pstate & PSR_C_BIT) == 0;
  1246. }
  1247. static bool __kprobes __check_ge(unsigned long pstate)
  1248. {
  1249. pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
  1250. return (pstate & PSR_N_BIT) == 0;
  1251. }
  1252. static bool __kprobes __check_lt(unsigned long pstate)
  1253. {
  1254. pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
  1255. return (pstate & PSR_N_BIT) != 0;
  1256. }
  1257. static bool __kprobes __check_gt(unsigned long pstate)
  1258. {
  1259. /*PSR_N_BIT ^= PSR_V_BIT */
  1260. unsigned long temp = pstate ^ (pstate << 3);
  1261. temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
  1262. return (temp & PSR_N_BIT) == 0;
  1263. }
  1264. static bool __kprobes __check_le(unsigned long pstate)
  1265. {
  1266. /*PSR_N_BIT ^= PSR_V_BIT */
  1267. unsigned long temp = pstate ^ (pstate << 3);
  1268. temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
  1269. return (temp & PSR_N_BIT) != 0;
  1270. }
  1271. static bool __kprobes __check_al(unsigned long pstate)
  1272. {
  1273. return true;
  1274. }
  1275. /*
  1276. * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
  1277. * it behaves identically to 0b1110 ("al").
  1278. */
  1279. pstate_check_t * const aarch32_opcode_cond_checks[16] = {
  1280. __check_eq, __check_ne, __check_cs, __check_cc,
  1281. __check_mi, __check_pl, __check_vs, __check_vc,
  1282. __check_hi, __check_ls, __check_ge, __check_lt,
  1283. __check_gt, __check_le, __check_al, __check_al
  1284. };
  1285. static bool range_of_ones(u64 val)
  1286. {
  1287. /* Doesn't handle full ones or full zeroes */
  1288. u64 sval = val >> __ffs64(val);
  1289. /* One of Sean Eron Anderson's bithack tricks */
  1290. return ((sval + 1) & (sval)) == 0;
  1291. }
  1292. static u32 aarch64_encode_immediate(u64 imm,
  1293. enum aarch64_insn_variant variant,
  1294. u32 insn)
  1295. {
  1296. unsigned int immr, imms, n, ones, ror, esz, tmp;
  1297. u64 mask = ~0UL;
  1298. /* Can't encode full zeroes or full ones */
  1299. if (!imm || !~imm)
  1300. return AARCH64_BREAK_FAULT;
  1301. switch (variant) {
  1302. case AARCH64_INSN_VARIANT_32BIT:
  1303. if (upper_32_bits(imm))
  1304. return AARCH64_BREAK_FAULT;
  1305. esz = 32;
  1306. break;
  1307. case AARCH64_INSN_VARIANT_64BIT:
  1308. insn |= AARCH64_INSN_SF_BIT;
  1309. esz = 64;
  1310. break;
  1311. default:
  1312. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1313. return AARCH64_BREAK_FAULT;
  1314. }
  1315. /*
  1316. * Inverse of Replicate(). Try to spot a repeating pattern
  1317. * with a pow2 stride.
  1318. */
  1319. for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
  1320. u64 emask = BIT(tmp) - 1;
  1321. if ((imm & emask) != ((imm >> tmp) & emask))
  1322. break;
  1323. esz = tmp;
  1324. mask = emask;
  1325. }
  1326. /* N is only set if we're encoding a 64bit value */
  1327. n = esz == 64;
  1328. /* Trim imm to the element size */
  1329. imm &= mask;
  1330. /* That's how many ones we need to encode */
  1331. ones = hweight64(imm);
  1332. /*
  1333. * imms is set to (ones - 1), prefixed with a string of ones
  1334. * and a zero if they fit. Cap it to 6 bits.
  1335. */
  1336. imms = ones - 1;
  1337. imms |= 0xf << ffs(esz);
  1338. imms &= BIT(6) - 1;
  1339. /* Compute the rotation */
  1340. if (range_of_ones(imm)) {
  1341. /*
  1342. * Pattern: 0..01..10..0
  1343. *
  1344. * Compute how many rotate we need to align it right
  1345. */
  1346. ror = __ffs64(imm);
  1347. } else {
  1348. /*
  1349. * Pattern: 0..01..10..01..1
  1350. *
  1351. * Fill the unused top bits with ones, and check if
  1352. * the result is a valid immediate (all ones with a
  1353. * contiguous ranges of zeroes).
  1354. */
  1355. imm |= ~mask;
  1356. if (!range_of_ones(~imm))
  1357. return AARCH64_BREAK_FAULT;
  1358. /*
  1359. * Compute the rotation to get a continuous set of
  1360. * ones, with the first bit set at position 0
  1361. */
  1362. ror = fls(~imm);
  1363. }
  1364. /*
  1365. * immr is the number of bits we need to rotate back to the
  1366. * original set of ones. Note that this is relative to the
  1367. * element size...
  1368. */
  1369. immr = (esz - ror) % esz;
  1370. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
  1371. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  1372. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  1373. }
  1374. u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
  1375. enum aarch64_insn_variant variant,
  1376. enum aarch64_insn_register Rn,
  1377. enum aarch64_insn_register Rd,
  1378. u64 imm)
  1379. {
  1380. u32 insn;
  1381. switch (type) {
  1382. case AARCH64_INSN_LOGIC_AND:
  1383. insn = aarch64_insn_get_and_imm_value();
  1384. break;
  1385. case AARCH64_INSN_LOGIC_ORR:
  1386. insn = aarch64_insn_get_orr_imm_value();
  1387. break;
  1388. case AARCH64_INSN_LOGIC_EOR:
  1389. insn = aarch64_insn_get_eor_imm_value();
  1390. break;
  1391. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1392. insn = aarch64_insn_get_ands_imm_value();
  1393. break;
  1394. default:
  1395. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1396. return AARCH64_BREAK_FAULT;
  1397. }
  1398. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1399. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1400. return aarch64_encode_immediate(imm, variant, insn);
  1401. }
  1402. u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
  1403. enum aarch64_insn_register Rm,
  1404. enum aarch64_insn_register Rn,
  1405. enum aarch64_insn_register Rd,
  1406. u8 lsb)
  1407. {
  1408. u32 insn;
  1409. insn = aarch64_insn_get_extr_value();
  1410. switch (variant) {
  1411. case AARCH64_INSN_VARIANT_32BIT:
  1412. if (lsb > 31)
  1413. return AARCH64_BREAK_FAULT;
  1414. break;
  1415. case AARCH64_INSN_VARIANT_64BIT:
  1416. if (lsb > 63)
  1417. return AARCH64_BREAK_FAULT;
  1418. insn |= AARCH64_INSN_SF_BIT;
  1419. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
  1420. break;
  1421. default:
  1422. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1423. return AARCH64_BREAK_FAULT;
  1424. }
  1425. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
  1426. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1427. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1428. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
  1429. }