insn.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. /*
  2. * Copyright (C) 2013 Huawei Ltd.
  3. * Author: Jiang Liu <liuj97@gmail.com>
  4. *
  5. * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/bug.h>
  21. #include <linux/compiler.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/stop_machine.h>
  27. #include <linux/types.h>
  28. #include <linux/uaccess.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/debug-monitors.h>
  31. #include <asm/fixmap.h>
  32. #include <asm/insn.h>
  33. #define AARCH64_INSN_SF_BIT BIT(31)
  34. #define AARCH64_INSN_N_BIT BIT(22)
  35. static int aarch64_insn_encoding_class[] = {
  36. AARCH64_INSN_CLS_UNKNOWN,
  37. AARCH64_INSN_CLS_UNKNOWN,
  38. AARCH64_INSN_CLS_UNKNOWN,
  39. AARCH64_INSN_CLS_UNKNOWN,
  40. AARCH64_INSN_CLS_LDST,
  41. AARCH64_INSN_CLS_DP_REG,
  42. AARCH64_INSN_CLS_LDST,
  43. AARCH64_INSN_CLS_DP_FPSIMD,
  44. AARCH64_INSN_CLS_DP_IMM,
  45. AARCH64_INSN_CLS_DP_IMM,
  46. AARCH64_INSN_CLS_BR_SYS,
  47. AARCH64_INSN_CLS_BR_SYS,
  48. AARCH64_INSN_CLS_LDST,
  49. AARCH64_INSN_CLS_DP_REG,
  50. AARCH64_INSN_CLS_LDST,
  51. AARCH64_INSN_CLS_DP_FPSIMD,
  52. };
  53. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  54. {
  55. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  56. }
  57. /* NOP is an alias of HINT */
  58. bool __kprobes aarch64_insn_is_nop(u32 insn)
  59. {
  60. if (!aarch64_insn_is_hint(insn))
  61. return false;
  62. switch (insn & 0xFE0) {
  63. case AARCH64_INSN_HINT_YIELD:
  64. case AARCH64_INSN_HINT_WFE:
  65. case AARCH64_INSN_HINT_WFI:
  66. case AARCH64_INSN_HINT_SEV:
  67. case AARCH64_INSN_HINT_SEVL:
  68. return false;
  69. default:
  70. return true;
  71. }
  72. }
  73. static DEFINE_SPINLOCK(patch_lock);
  74. static void __kprobes *patch_map(void *addr, int fixmap)
  75. {
  76. unsigned long uintaddr = (uintptr_t) addr;
  77. bool module = !core_kernel_text(uintaddr);
  78. struct page *page;
  79. if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
  80. page = vmalloc_to_page(addr);
  81. else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
  82. page = virt_to_page(addr);
  83. else
  84. return addr;
  85. BUG_ON(!page);
  86. set_fixmap(fixmap, page_to_phys(page));
  87. return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
  88. }
  89. static void __kprobes patch_unmap(int fixmap)
  90. {
  91. clear_fixmap(fixmap);
  92. }
  93. /*
  94. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  95. * little-endian.
  96. */
  97. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  98. {
  99. int ret;
  100. u32 val;
  101. ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
  102. if (!ret)
  103. *insnp = le32_to_cpu(val);
  104. return ret;
  105. }
  106. static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
  107. {
  108. void *waddr = addr;
  109. unsigned long flags = 0;
  110. int ret;
  111. spin_lock_irqsave(&patch_lock, flags);
  112. waddr = patch_map(addr, FIX_TEXT_POKE0);
  113. ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
  114. patch_unmap(FIX_TEXT_POKE0);
  115. spin_unlock_irqrestore(&patch_lock, flags);
  116. return ret;
  117. }
  118. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  119. {
  120. insn = cpu_to_le32(insn);
  121. return __aarch64_insn_write(addr, insn);
  122. }
  123. static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
  124. {
  125. if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
  126. return false;
  127. return aarch64_insn_is_b(insn) ||
  128. aarch64_insn_is_bl(insn) ||
  129. aarch64_insn_is_svc(insn) ||
  130. aarch64_insn_is_hvc(insn) ||
  131. aarch64_insn_is_smc(insn) ||
  132. aarch64_insn_is_brk(insn) ||
  133. aarch64_insn_is_nop(insn);
  134. }
  135. /*
  136. * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
  137. * Section B2.6.5 "Concurrent modification and execution of instructions":
  138. * Concurrent modification and execution of instructions can lead to the
  139. * resulting instruction performing any behavior that can be achieved by
  140. * executing any sequence of instructions that can be executed from the
  141. * same Exception level, except where the instruction before modification
  142. * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
  143. * or SMC instruction.
  144. */
  145. bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
  146. {
  147. return __aarch64_insn_hotpatch_safe(old_insn) &&
  148. __aarch64_insn_hotpatch_safe(new_insn);
  149. }
  150. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  151. {
  152. u32 *tp = addr;
  153. int ret;
  154. /* A64 instructions must be word aligned */
  155. if ((uintptr_t)tp & 0x3)
  156. return -EINVAL;
  157. ret = aarch64_insn_write(tp, insn);
  158. if (ret == 0)
  159. flush_icache_range((uintptr_t)tp,
  160. (uintptr_t)tp + AARCH64_INSN_SIZE);
  161. return ret;
  162. }
  163. struct aarch64_insn_patch {
  164. void **text_addrs;
  165. u32 *new_insns;
  166. int insn_cnt;
  167. atomic_t cpu_count;
  168. };
  169. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  170. {
  171. int i, ret = 0;
  172. struct aarch64_insn_patch *pp = arg;
  173. /* The first CPU becomes master */
  174. if (atomic_inc_return(&pp->cpu_count) == 1) {
  175. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  176. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  177. pp->new_insns[i]);
  178. /*
  179. * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
  180. * which ends with "dsb; isb" pair guaranteeing global
  181. * visibility.
  182. */
  183. /* Notify other processors with an additional increment. */
  184. atomic_inc(&pp->cpu_count);
  185. } else {
  186. while (atomic_read(&pp->cpu_count) <= num_online_cpus())
  187. cpu_relax();
  188. isb();
  189. }
  190. return ret;
  191. }
  192. int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
  193. {
  194. struct aarch64_insn_patch patch = {
  195. .text_addrs = addrs,
  196. .new_insns = insns,
  197. .insn_cnt = cnt,
  198. .cpu_count = ATOMIC_INIT(0),
  199. };
  200. if (cnt <= 0)
  201. return -EINVAL;
  202. return stop_machine(aarch64_insn_patch_text_cb, &patch,
  203. cpu_online_mask);
  204. }
  205. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  206. {
  207. int ret;
  208. u32 insn;
  209. /* Unsafe to patch multiple instructions without synchronizaiton */
  210. if (cnt == 1) {
  211. ret = aarch64_insn_read(addrs[0], &insn);
  212. if (ret)
  213. return ret;
  214. if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
  215. /*
  216. * ARMv8 architecture doesn't guarantee all CPUs see
  217. * the new instruction after returning from function
  218. * aarch64_insn_patch_text_nosync(). So send IPIs to
  219. * all other CPUs to achieve instruction
  220. * synchronization.
  221. */
  222. ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
  223. kick_all_cpus_sync();
  224. return ret;
  225. }
  226. }
  227. return aarch64_insn_patch_text_sync(addrs, insns, cnt);
  228. }
  229. static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
  230. u32 *maskp, int *shiftp)
  231. {
  232. u32 mask;
  233. int shift;
  234. switch (type) {
  235. case AARCH64_INSN_IMM_26:
  236. mask = BIT(26) - 1;
  237. shift = 0;
  238. break;
  239. case AARCH64_INSN_IMM_19:
  240. mask = BIT(19) - 1;
  241. shift = 5;
  242. break;
  243. case AARCH64_INSN_IMM_16:
  244. mask = BIT(16) - 1;
  245. shift = 5;
  246. break;
  247. case AARCH64_INSN_IMM_14:
  248. mask = BIT(14) - 1;
  249. shift = 5;
  250. break;
  251. case AARCH64_INSN_IMM_12:
  252. mask = BIT(12) - 1;
  253. shift = 10;
  254. break;
  255. case AARCH64_INSN_IMM_9:
  256. mask = BIT(9) - 1;
  257. shift = 12;
  258. break;
  259. case AARCH64_INSN_IMM_7:
  260. mask = BIT(7) - 1;
  261. shift = 15;
  262. break;
  263. case AARCH64_INSN_IMM_6:
  264. case AARCH64_INSN_IMM_S:
  265. mask = BIT(6) - 1;
  266. shift = 10;
  267. break;
  268. case AARCH64_INSN_IMM_R:
  269. mask = BIT(6) - 1;
  270. shift = 16;
  271. break;
  272. default:
  273. return -EINVAL;
  274. }
  275. *maskp = mask;
  276. *shiftp = shift;
  277. return 0;
  278. }
  279. #define ADR_IMM_HILOSPLIT 2
  280. #define ADR_IMM_SIZE SZ_2M
  281. #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
  282. #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
  283. #define ADR_IMM_LOSHIFT 29
  284. #define ADR_IMM_HISHIFT 5
  285. u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
  286. {
  287. u32 immlo, immhi, mask;
  288. int shift;
  289. switch (type) {
  290. case AARCH64_INSN_IMM_ADR:
  291. shift = 0;
  292. immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
  293. immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
  294. insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
  295. mask = ADR_IMM_SIZE - 1;
  296. break;
  297. default:
  298. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  299. pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
  300. type);
  301. return 0;
  302. }
  303. }
  304. return (insn >> shift) & mask;
  305. }
  306. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  307. u32 insn, u64 imm)
  308. {
  309. u32 immlo, immhi, mask;
  310. int shift;
  311. switch (type) {
  312. case AARCH64_INSN_IMM_ADR:
  313. shift = 0;
  314. immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
  315. imm >>= ADR_IMM_HILOSPLIT;
  316. immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
  317. imm = immlo | immhi;
  318. mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
  319. (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
  320. break;
  321. default:
  322. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  323. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  324. type);
  325. return 0;
  326. }
  327. }
  328. /* Update the immediate field. */
  329. insn &= ~(mask << shift);
  330. insn |= (imm & mask) << shift;
  331. return insn;
  332. }
  333. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  334. u32 insn,
  335. enum aarch64_insn_register reg)
  336. {
  337. int shift;
  338. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  339. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  340. return 0;
  341. }
  342. switch (type) {
  343. case AARCH64_INSN_REGTYPE_RT:
  344. case AARCH64_INSN_REGTYPE_RD:
  345. shift = 0;
  346. break;
  347. case AARCH64_INSN_REGTYPE_RN:
  348. shift = 5;
  349. break;
  350. case AARCH64_INSN_REGTYPE_RT2:
  351. case AARCH64_INSN_REGTYPE_RA:
  352. shift = 10;
  353. break;
  354. case AARCH64_INSN_REGTYPE_RM:
  355. shift = 16;
  356. break;
  357. default:
  358. pr_err("%s: unknown register type encoding %d\n", __func__,
  359. type);
  360. return 0;
  361. }
  362. insn &= ~(GENMASK(4, 0) << shift);
  363. insn |= reg << shift;
  364. return insn;
  365. }
  366. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  367. u32 insn)
  368. {
  369. u32 size;
  370. switch (type) {
  371. case AARCH64_INSN_SIZE_8:
  372. size = 0;
  373. break;
  374. case AARCH64_INSN_SIZE_16:
  375. size = 1;
  376. break;
  377. case AARCH64_INSN_SIZE_32:
  378. size = 2;
  379. break;
  380. case AARCH64_INSN_SIZE_64:
  381. size = 3;
  382. break;
  383. default:
  384. pr_err("%s: unknown size encoding %d\n", __func__, type);
  385. return 0;
  386. }
  387. insn &= ~GENMASK(31, 30);
  388. insn |= size << 30;
  389. return insn;
  390. }
  391. static inline long branch_imm_common(unsigned long pc, unsigned long addr,
  392. long range)
  393. {
  394. long offset;
  395. /*
  396. * PC: A 64-bit Program Counter holding the address of the current
  397. * instruction. A64 instructions must be word-aligned.
  398. */
  399. BUG_ON((pc & 0x3) || (addr & 0x3));
  400. offset = ((long)addr - (long)pc);
  401. BUG_ON(offset < -range || offset >= range);
  402. return offset;
  403. }
  404. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  405. enum aarch64_insn_branch_type type)
  406. {
  407. u32 insn;
  408. long offset;
  409. /*
  410. * B/BL support [-128M, 128M) offset
  411. * ARM64 virtual address arrangement guarantees all kernel and module
  412. * texts are within +/-128M.
  413. */
  414. offset = branch_imm_common(pc, addr, SZ_128M);
  415. switch (type) {
  416. case AARCH64_INSN_BRANCH_LINK:
  417. insn = aarch64_insn_get_bl_value();
  418. break;
  419. case AARCH64_INSN_BRANCH_NOLINK:
  420. insn = aarch64_insn_get_b_value();
  421. break;
  422. default:
  423. BUG_ON(1);
  424. return AARCH64_BREAK_FAULT;
  425. }
  426. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  427. offset >> 2);
  428. }
  429. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  430. enum aarch64_insn_register reg,
  431. enum aarch64_insn_variant variant,
  432. enum aarch64_insn_branch_type type)
  433. {
  434. u32 insn;
  435. long offset;
  436. offset = branch_imm_common(pc, addr, SZ_1M);
  437. switch (type) {
  438. case AARCH64_INSN_BRANCH_COMP_ZERO:
  439. insn = aarch64_insn_get_cbz_value();
  440. break;
  441. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  442. insn = aarch64_insn_get_cbnz_value();
  443. break;
  444. default:
  445. BUG_ON(1);
  446. return AARCH64_BREAK_FAULT;
  447. }
  448. switch (variant) {
  449. case AARCH64_INSN_VARIANT_32BIT:
  450. break;
  451. case AARCH64_INSN_VARIANT_64BIT:
  452. insn |= AARCH64_INSN_SF_BIT;
  453. break;
  454. default:
  455. BUG_ON(1);
  456. return AARCH64_BREAK_FAULT;
  457. }
  458. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  459. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  460. offset >> 2);
  461. }
  462. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  463. enum aarch64_insn_condition cond)
  464. {
  465. u32 insn;
  466. long offset;
  467. offset = branch_imm_common(pc, addr, SZ_1M);
  468. insn = aarch64_insn_get_bcond_value();
  469. BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
  470. insn |= cond;
  471. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  472. offset >> 2);
  473. }
  474. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
  475. {
  476. return aarch64_insn_get_hint_value() | op;
  477. }
  478. u32 __kprobes aarch64_insn_gen_nop(void)
  479. {
  480. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  481. }
  482. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  483. enum aarch64_insn_branch_type type)
  484. {
  485. u32 insn;
  486. switch (type) {
  487. case AARCH64_INSN_BRANCH_NOLINK:
  488. insn = aarch64_insn_get_br_value();
  489. break;
  490. case AARCH64_INSN_BRANCH_LINK:
  491. insn = aarch64_insn_get_blr_value();
  492. break;
  493. case AARCH64_INSN_BRANCH_RETURN:
  494. insn = aarch64_insn_get_ret_value();
  495. break;
  496. default:
  497. BUG_ON(1);
  498. return AARCH64_BREAK_FAULT;
  499. }
  500. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  501. }
  502. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  503. enum aarch64_insn_register base,
  504. enum aarch64_insn_register offset,
  505. enum aarch64_insn_size_type size,
  506. enum aarch64_insn_ldst_type type)
  507. {
  508. u32 insn;
  509. switch (type) {
  510. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  511. insn = aarch64_insn_get_ldr_reg_value();
  512. break;
  513. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  514. insn = aarch64_insn_get_str_reg_value();
  515. break;
  516. default:
  517. BUG_ON(1);
  518. return AARCH64_BREAK_FAULT;
  519. }
  520. insn = aarch64_insn_encode_ldst_size(size, insn);
  521. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  522. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  523. base);
  524. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  525. offset);
  526. }
  527. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  528. enum aarch64_insn_register reg2,
  529. enum aarch64_insn_register base,
  530. int offset,
  531. enum aarch64_insn_variant variant,
  532. enum aarch64_insn_ldst_type type)
  533. {
  534. u32 insn;
  535. int shift;
  536. switch (type) {
  537. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  538. insn = aarch64_insn_get_ldp_pre_value();
  539. break;
  540. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  541. insn = aarch64_insn_get_stp_pre_value();
  542. break;
  543. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  544. insn = aarch64_insn_get_ldp_post_value();
  545. break;
  546. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  547. insn = aarch64_insn_get_stp_post_value();
  548. break;
  549. default:
  550. BUG_ON(1);
  551. return AARCH64_BREAK_FAULT;
  552. }
  553. switch (variant) {
  554. case AARCH64_INSN_VARIANT_32BIT:
  555. /* offset must be multiples of 4 in the range [-256, 252] */
  556. BUG_ON(offset & 0x3);
  557. BUG_ON(offset < -256 || offset > 252);
  558. shift = 2;
  559. break;
  560. case AARCH64_INSN_VARIANT_64BIT:
  561. /* offset must be multiples of 8 in the range [-512, 504] */
  562. BUG_ON(offset & 0x7);
  563. BUG_ON(offset < -512 || offset > 504);
  564. shift = 3;
  565. insn |= AARCH64_INSN_SF_BIT;
  566. break;
  567. default:
  568. BUG_ON(1);
  569. return AARCH64_BREAK_FAULT;
  570. }
  571. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  572. reg1);
  573. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  574. reg2);
  575. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  576. base);
  577. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  578. offset >> shift);
  579. }
  580. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  581. enum aarch64_insn_register src,
  582. int imm, enum aarch64_insn_variant variant,
  583. enum aarch64_insn_adsb_type type)
  584. {
  585. u32 insn;
  586. switch (type) {
  587. case AARCH64_INSN_ADSB_ADD:
  588. insn = aarch64_insn_get_add_imm_value();
  589. break;
  590. case AARCH64_INSN_ADSB_SUB:
  591. insn = aarch64_insn_get_sub_imm_value();
  592. break;
  593. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  594. insn = aarch64_insn_get_adds_imm_value();
  595. break;
  596. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  597. insn = aarch64_insn_get_subs_imm_value();
  598. break;
  599. default:
  600. BUG_ON(1);
  601. return AARCH64_BREAK_FAULT;
  602. }
  603. switch (variant) {
  604. case AARCH64_INSN_VARIANT_32BIT:
  605. break;
  606. case AARCH64_INSN_VARIANT_64BIT:
  607. insn |= AARCH64_INSN_SF_BIT;
  608. break;
  609. default:
  610. BUG_ON(1);
  611. return AARCH64_BREAK_FAULT;
  612. }
  613. BUG_ON(imm & ~(SZ_4K - 1));
  614. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  615. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  616. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  617. }
  618. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  619. enum aarch64_insn_register src,
  620. int immr, int imms,
  621. enum aarch64_insn_variant variant,
  622. enum aarch64_insn_bitfield_type type)
  623. {
  624. u32 insn;
  625. u32 mask;
  626. switch (type) {
  627. case AARCH64_INSN_BITFIELD_MOVE:
  628. insn = aarch64_insn_get_bfm_value();
  629. break;
  630. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  631. insn = aarch64_insn_get_ubfm_value();
  632. break;
  633. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  634. insn = aarch64_insn_get_sbfm_value();
  635. break;
  636. default:
  637. BUG_ON(1);
  638. return AARCH64_BREAK_FAULT;
  639. }
  640. switch (variant) {
  641. case AARCH64_INSN_VARIANT_32BIT:
  642. mask = GENMASK(4, 0);
  643. break;
  644. case AARCH64_INSN_VARIANT_64BIT:
  645. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  646. mask = GENMASK(5, 0);
  647. break;
  648. default:
  649. BUG_ON(1);
  650. return AARCH64_BREAK_FAULT;
  651. }
  652. BUG_ON(immr & ~mask);
  653. BUG_ON(imms & ~mask);
  654. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  655. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  656. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  657. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  658. }
  659. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  660. int imm, int shift,
  661. enum aarch64_insn_variant variant,
  662. enum aarch64_insn_movewide_type type)
  663. {
  664. u32 insn;
  665. switch (type) {
  666. case AARCH64_INSN_MOVEWIDE_ZERO:
  667. insn = aarch64_insn_get_movz_value();
  668. break;
  669. case AARCH64_INSN_MOVEWIDE_KEEP:
  670. insn = aarch64_insn_get_movk_value();
  671. break;
  672. case AARCH64_INSN_MOVEWIDE_INVERSE:
  673. insn = aarch64_insn_get_movn_value();
  674. break;
  675. default:
  676. BUG_ON(1);
  677. return AARCH64_BREAK_FAULT;
  678. }
  679. BUG_ON(imm & ~(SZ_64K - 1));
  680. switch (variant) {
  681. case AARCH64_INSN_VARIANT_32BIT:
  682. BUG_ON(shift != 0 && shift != 16);
  683. break;
  684. case AARCH64_INSN_VARIANT_64BIT:
  685. insn |= AARCH64_INSN_SF_BIT;
  686. BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
  687. shift != 48);
  688. break;
  689. default:
  690. BUG_ON(1);
  691. return AARCH64_BREAK_FAULT;
  692. }
  693. insn |= (shift >> 4) << 21;
  694. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  695. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  696. }
  697. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  698. enum aarch64_insn_register src,
  699. enum aarch64_insn_register reg,
  700. int shift,
  701. enum aarch64_insn_variant variant,
  702. enum aarch64_insn_adsb_type type)
  703. {
  704. u32 insn;
  705. switch (type) {
  706. case AARCH64_INSN_ADSB_ADD:
  707. insn = aarch64_insn_get_add_value();
  708. break;
  709. case AARCH64_INSN_ADSB_SUB:
  710. insn = aarch64_insn_get_sub_value();
  711. break;
  712. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  713. insn = aarch64_insn_get_adds_value();
  714. break;
  715. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  716. insn = aarch64_insn_get_subs_value();
  717. break;
  718. default:
  719. BUG_ON(1);
  720. return AARCH64_BREAK_FAULT;
  721. }
  722. switch (variant) {
  723. case AARCH64_INSN_VARIANT_32BIT:
  724. BUG_ON(shift & ~(SZ_32 - 1));
  725. break;
  726. case AARCH64_INSN_VARIANT_64BIT:
  727. insn |= AARCH64_INSN_SF_BIT;
  728. BUG_ON(shift & ~(SZ_64 - 1));
  729. break;
  730. default:
  731. BUG_ON(1);
  732. return AARCH64_BREAK_FAULT;
  733. }
  734. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  735. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  736. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  737. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  738. }
  739. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  740. enum aarch64_insn_register src,
  741. enum aarch64_insn_variant variant,
  742. enum aarch64_insn_data1_type type)
  743. {
  744. u32 insn;
  745. switch (type) {
  746. case AARCH64_INSN_DATA1_REVERSE_16:
  747. insn = aarch64_insn_get_rev16_value();
  748. break;
  749. case AARCH64_INSN_DATA1_REVERSE_32:
  750. insn = aarch64_insn_get_rev32_value();
  751. break;
  752. case AARCH64_INSN_DATA1_REVERSE_64:
  753. BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
  754. insn = aarch64_insn_get_rev64_value();
  755. break;
  756. default:
  757. BUG_ON(1);
  758. return AARCH64_BREAK_FAULT;
  759. }
  760. switch (variant) {
  761. case AARCH64_INSN_VARIANT_32BIT:
  762. break;
  763. case AARCH64_INSN_VARIANT_64BIT:
  764. insn |= AARCH64_INSN_SF_BIT;
  765. break;
  766. default:
  767. BUG_ON(1);
  768. return AARCH64_BREAK_FAULT;
  769. }
  770. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  771. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  772. }
  773. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  774. enum aarch64_insn_register src,
  775. enum aarch64_insn_register reg,
  776. enum aarch64_insn_variant variant,
  777. enum aarch64_insn_data2_type type)
  778. {
  779. u32 insn;
  780. switch (type) {
  781. case AARCH64_INSN_DATA2_UDIV:
  782. insn = aarch64_insn_get_udiv_value();
  783. break;
  784. case AARCH64_INSN_DATA2_SDIV:
  785. insn = aarch64_insn_get_sdiv_value();
  786. break;
  787. case AARCH64_INSN_DATA2_LSLV:
  788. insn = aarch64_insn_get_lslv_value();
  789. break;
  790. case AARCH64_INSN_DATA2_LSRV:
  791. insn = aarch64_insn_get_lsrv_value();
  792. break;
  793. case AARCH64_INSN_DATA2_ASRV:
  794. insn = aarch64_insn_get_asrv_value();
  795. break;
  796. case AARCH64_INSN_DATA2_RORV:
  797. insn = aarch64_insn_get_rorv_value();
  798. break;
  799. default:
  800. BUG_ON(1);
  801. return AARCH64_BREAK_FAULT;
  802. }
  803. switch (variant) {
  804. case AARCH64_INSN_VARIANT_32BIT:
  805. break;
  806. case AARCH64_INSN_VARIANT_64BIT:
  807. insn |= AARCH64_INSN_SF_BIT;
  808. break;
  809. default:
  810. BUG_ON(1);
  811. return AARCH64_BREAK_FAULT;
  812. }
  813. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  814. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  815. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  816. }
  817. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  818. enum aarch64_insn_register src,
  819. enum aarch64_insn_register reg1,
  820. enum aarch64_insn_register reg2,
  821. enum aarch64_insn_variant variant,
  822. enum aarch64_insn_data3_type type)
  823. {
  824. u32 insn;
  825. switch (type) {
  826. case AARCH64_INSN_DATA3_MADD:
  827. insn = aarch64_insn_get_madd_value();
  828. break;
  829. case AARCH64_INSN_DATA3_MSUB:
  830. insn = aarch64_insn_get_msub_value();
  831. break;
  832. default:
  833. BUG_ON(1);
  834. return AARCH64_BREAK_FAULT;
  835. }
  836. switch (variant) {
  837. case AARCH64_INSN_VARIANT_32BIT:
  838. break;
  839. case AARCH64_INSN_VARIANT_64BIT:
  840. insn |= AARCH64_INSN_SF_BIT;
  841. break;
  842. default:
  843. BUG_ON(1);
  844. return AARCH64_BREAK_FAULT;
  845. }
  846. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  847. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  848. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  849. reg1);
  850. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  851. reg2);
  852. }
  853. u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
  854. enum aarch64_insn_register src,
  855. enum aarch64_insn_register reg,
  856. int shift,
  857. enum aarch64_insn_variant variant,
  858. enum aarch64_insn_logic_type type)
  859. {
  860. u32 insn;
  861. switch (type) {
  862. case AARCH64_INSN_LOGIC_AND:
  863. insn = aarch64_insn_get_and_value();
  864. break;
  865. case AARCH64_INSN_LOGIC_BIC:
  866. insn = aarch64_insn_get_bic_value();
  867. break;
  868. case AARCH64_INSN_LOGIC_ORR:
  869. insn = aarch64_insn_get_orr_value();
  870. break;
  871. case AARCH64_INSN_LOGIC_ORN:
  872. insn = aarch64_insn_get_orn_value();
  873. break;
  874. case AARCH64_INSN_LOGIC_EOR:
  875. insn = aarch64_insn_get_eor_value();
  876. break;
  877. case AARCH64_INSN_LOGIC_EON:
  878. insn = aarch64_insn_get_eon_value();
  879. break;
  880. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  881. insn = aarch64_insn_get_ands_value();
  882. break;
  883. case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
  884. insn = aarch64_insn_get_bics_value();
  885. break;
  886. default:
  887. BUG_ON(1);
  888. return AARCH64_BREAK_FAULT;
  889. }
  890. switch (variant) {
  891. case AARCH64_INSN_VARIANT_32BIT:
  892. BUG_ON(shift & ~(SZ_32 - 1));
  893. break;
  894. case AARCH64_INSN_VARIANT_64BIT:
  895. insn |= AARCH64_INSN_SF_BIT;
  896. BUG_ON(shift & ~(SZ_64 - 1));
  897. break;
  898. default:
  899. BUG_ON(1);
  900. return AARCH64_BREAK_FAULT;
  901. }
  902. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  903. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  904. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  905. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  906. }
  907. bool aarch32_insn_is_wide(u32 insn)
  908. {
  909. return insn >= 0xe800;
  910. }
  911. /*
  912. * Macros/defines for extracting register numbers from instruction.
  913. */
  914. u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
  915. {
  916. return (insn & (0xf << offset)) >> offset;
  917. }
  918. #define OPC2_MASK 0x7
  919. #define OPC2_OFFSET 5
  920. u32 aarch32_insn_mcr_extract_opc2(u32 insn)
  921. {
  922. return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
  923. }
  924. #define CRM_MASK 0xf
  925. u32 aarch32_insn_mcr_extract_crm(u32 insn)
  926. {
  927. return insn & CRM_MASK;
  928. }