insn.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. /*
  2. * Copyright (C) 2013 Huawei Ltd.
  3. * Author: Jiang Liu <liuj97@gmail.com>
  4. *
  5. * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/compiler.h>
  21. #include <linux/kernel.h>
  22. #include <linux/smp.h>
  23. #include <linux/stop_machine.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/insn.h>
  27. #define AARCH64_INSN_SF_BIT BIT(31)
  28. #define AARCH64_INSN_N_BIT BIT(22)
  29. static int aarch64_insn_encoding_class[] = {
  30. AARCH64_INSN_CLS_UNKNOWN,
  31. AARCH64_INSN_CLS_UNKNOWN,
  32. AARCH64_INSN_CLS_UNKNOWN,
  33. AARCH64_INSN_CLS_UNKNOWN,
  34. AARCH64_INSN_CLS_LDST,
  35. AARCH64_INSN_CLS_DP_REG,
  36. AARCH64_INSN_CLS_LDST,
  37. AARCH64_INSN_CLS_DP_FPSIMD,
  38. AARCH64_INSN_CLS_DP_IMM,
  39. AARCH64_INSN_CLS_DP_IMM,
  40. AARCH64_INSN_CLS_BR_SYS,
  41. AARCH64_INSN_CLS_BR_SYS,
  42. AARCH64_INSN_CLS_LDST,
  43. AARCH64_INSN_CLS_DP_REG,
  44. AARCH64_INSN_CLS_LDST,
  45. AARCH64_INSN_CLS_DP_FPSIMD,
  46. };
  47. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  48. {
  49. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  50. }
  51. /* NOP is an alias of HINT */
  52. bool __kprobes aarch64_insn_is_nop(u32 insn)
  53. {
  54. if (!aarch64_insn_is_hint(insn))
  55. return false;
  56. switch (insn & 0xFE0) {
  57. case AARCH64_INSN_HINT_YIELD:
  58. case AARCH64_INSN_HINT_WFE:
  59. case AARCH64_INSN_HINT_WFI:
  60. case AARCH64_INSN_HINT_SEV:
  61. case AARCH64_INSN_HINT_SEVL:
  62. return false;
  63. default:
  64. return true;
  65. }
  66. }
  67. /*
  68. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  69. * little-endian.
  70. */
  71. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  72. {
  73. int ret;
  74. u32 val;
  75. ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
  76. if (!ret)
  77. *insnp = le32_to_cpu(val);
  78. return ret;
  79. }
  80. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  81. {
  82. insn = cpu_to_le32(insn);
  83. return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
  84. }
  85. static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
  86. {
  87. if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
  88. return false;
  89. return aarch64_insn_is_b(insn) ||
  90. aarch64_insn_is_bl(insn) ||
  91. aarch64_insn_is_svc(insn) ||
  92. aarch64_insn_is_hvc(insn) ||
  93. aarch64_insn_is_smc(insn) ||
  94. aarch64_insn_is_brk(insn) ||
  95. aarch64_insn_is_nop(insn);
  96. }
  97. /*
  98. * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
  99. * Section B2.6.5 "Concurrent modification and execution of instructions":
  100. * Concurrent modification and execution of instructions can lead to the
  101. * resulting instruction performing any behavior that can be achieved by
  102. * executing any sequence of instructions that can be executed from the
  103. * same Exception level, except where the instruction before modification
  104. * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
  105. * or SMC instruction.
  106. */
  107. bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
  108. {
  109. return __aarch64_insn_hotpatch_safe(old_insn) &&
  110. __aarch64_insn_hotpatch_safe(new_insn);
  111. }
  112. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  113. {
  114. u32 *tp = addr;
  115. int ret;
  116. /* A64 instructions must be word aligned */
  117. if ((uintptr_t)tp & 0x3)
  118. return -EINVAL;
  119. ret = aarch64_insn_write(tp, insn);
  120. if (ret == 0)
  121. flush_icache_range((uintptr_t)tp,
  122. (uintptr_t)tp + AARCH64_INSN_SIZE);
  123. return ret;
  124. }
  125. struct aarch64_insn_patch {
  126. void **text_addrs;
  127. u32 *new_insns;
  128. int insn_cnt;
  129. atomic_t cpu_count;
  130. };
  131. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  132. {
  133. int i, ret = 0;
  134. struct aarch64_insn_patch *pp = arg;
  135. /* The first CPU becomes master */
  136. if (atomic_inc_return(&pp->cpu_count) == 1) {
  137. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  138. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  139. pp->new_insns[i]);
  140. /*
  141. * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
  142. * which ends with "dsb; isb" pair guaranteeing global
  143. * visibility.
  144. */
  145. atomic_set(&pp->cpu_count, -1);
  146. } else {
  147. while (atomic_read(&pp->cpu_count) != -1)
  148. cpu_relax();
  149. isb();
  150. }
  151. return ret;
  152. }
  153. int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
  154. {
  155. struct aarch64_insn_patch patch = {
  156. .text_addrs = addrs,
  157. .new_insns = insns,
  158. .insn_cnt = cnt,
  159. .cpu_count = ATOMIC_INIT(0),
  160. };
  161. if (cnt <= 0)
  162. return -EINVAL;
  163. return stop_machine(aarch64_insn_patch_text_cb, &patch,
  164. cpu_online_mask);
  165. }
  166. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  167. {
  168. int ret;
  169. u32 insn;
  170. /* Unsafe to patch multiple instructions without synchronizaiton */
  171. if (cnt == 1) {
  172. ret = aarch64_insn_read(addrs[0], &insn);
  173. if (ret)
  174. return ret;
  175. if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
  176. /*
  177. * ARMv8 architecture doesn't guarantee all CPUs see
  178. * the new instruction after returning from function
  179. * aarch64_insn_patch_text_nosync(). So send IPIs to
  180. * all other CPUs to achieve instruction
  181. * synchronization.
  182. */
  183. ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
  184. kick_all_cpus_sync();
  185. return ret;
  186. }
  187. }
  188. return aarch64_insn_patch_text_sync(addrs, insns, cnt);
  189. }
  190. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  191. u32 insn, u64 imm)
  192. {
  193. u32 immlo, immhi, lomask, himask, mask;
  194. int shift;
  195. switch (type) {
  196. case AARCH64_INSN_IMM_ADR:
  197. lomask = 0x3;
  198. himask = 0x7ffff;
  199. immlo = imm & lomask;
  200. imm >>= 2;
  201. immhi = imm & himask;
  202. imm = (immlo << 24) | (immhi);
  203. mask = (lomask << 24) | (himask);
  204. shift = 5;
  205. break;
  206. case AARCH64_INSN_IMM_26:
  207. mask = BIT(26) - 1;
  208. shift = 0;
  209. break;
  210. case AARCH64_INSN_IMM_19:
  211. mask = BIT(19) - 1;
  212. shift = 5;
  213. break;
  214. case AARCH64_INSN_IMM_16:
  215. mask = BIT(16) - 1;
  216. shift = 5;
  217. break;
  218. case AARCH64_INSN_IMM_14:
  219. mask = BIT(14) - 1;
  220. shift = 5;
  221. break;
  222. case AARCH64_INSN_IMM_12:
  223. mask = BIT(12) - 1;
  224. shift = 10;
  225. break;
  226. case AARCH64_INSN_IMM_9:
  227. mask = BIT(9) - 1;
  228. shift = 12;
  229. break;
  230. case AARCH64_INSN_IMM_7:
  231. mask = BIT(7) - 1;
  232. shift = 15;
  233. break;
  234. case AARCH64_INSN_IMM_6:
  235. case AARCH64_INSN_IMM_S:
  236. mask = BIT(6) - 1;
  237. shift = 10;
  238. break;
  239. case AARCH64_INSN_IMM_R:
  240. mask = BIT(6) - 1;
  241. shift = 16;
  242. break;
  243. default:
  244. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  245. type);
  246. return 0;
  247. }
  248. /* Update the immediate field. */
  249. insn &= ~(mask << shift);
  250. insn |= (imm & mask) << shift;
  251. return insn;
  252. }
  253. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  254. u32 insn,
  255. enum aarch64_insn_register reg)
  256. {
  257. int shift;
  258. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  259. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  260. return 0;
  261. }
  262. switch (type) {
  263. case AARCH64_INSN_REGTYPE_RT:
  264. case AARCH64_INSN_REGTYPE_RD:
  265. shift = 0;
  266. break;
  267. case AARCH64_INSN_REGTYPE_RN:
  268. shift = 5;
  269. break;
  270. case AARCH64_INSN_REGTYPE_RT2:
  271. case AARCH64_INSN_REGTYPE_RA:
  272. shift = 10;
  273. break;
  274. case AARCH64_INSN_REGTYPE_RM:
  275. shift = 16;
  276. break;
  277. default:
  278. pr_err("%s: unknown register type encoding %d\n", __func__,
  279. type);
  280. return 0;
  281. }
  282. insn &= ~(GENMASK(4, 0) << shift);
  283. insn |= reg << shift;
  284. return insn;
  285. }
  286. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  287. u32 insn)
  288. {
  289. u32 size;
  290. switch (type) {
  291. case AARCH64_INSN_SIZE_8:
  292. size = 0;
  293. break;
  294. case AARCH64_INSN_SIZE_16:
  295. size = 1;
  296. break;
  297. case AARCH64_INSN_SIZE_32:
  298. size = 2;
  299. break;
  300. case AARCH64_INSN_SIZE_64:
  301. size = 3;
  302. break;
  303. default:
  304. pr_err("%s: unknown size encoding %d\n", __func__, type);
  305. return 0;
  306. }
  307. insn &= ~GENMASK(31, 30);
  308. insn |= size << 30;
  309. return insn;
  310. }
  311. static inline long branch_imm_common(unsigned long pc, unsigned long addr,
  312. long range)
  313. {
  314. long offset;
  315. /*
  316. * PC: A 64-bit Program Counter holding the address of the current
  317. * instruction. A64 instructions must be word-aligned.
  318. */
  319. BUG_ON((pc & 0x3) || (addr & 0x3));
  320. offset = ((long)addr - (long)pc);
  321. BUG_ON(offset < -range || offset >= range);
  322. return offset;
  323. }
  324. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  325. enum aarch64_insn_branch_type type)
  326. {
  327. u32 insn;
  328. long offset;
  329. /*
  330. * B/BL support [-128M, 128M) offset
  331. * ARM64 virtual address arrangement guarantees all kernel and module
  332. * texts are within +/-128M.
  333. */
  334. offset = branch_imm_common(pc, addr, SZ_128M);
  335. switch (type) {
  336. case AARCH64_INSN_BRANCH_LINK:
  337. insn = aarch64_insn_get_bl_value();
  338. break;
  339. case AARCH64_INSN_BRANCH_NOLINK:
  340. insn = aarch64_insn_get_b_value();
  341. break;
  342. default:
  343. BUG_ON(1);
  344. }
  345. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  346. offset >> 2);
  347. }
  348. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  349. enum aarch64_insn_register reg,
  350. enum aarch64_insn_variant variant,
  351. enum aarch64_insn_branch_type type)
  352. {
  353. u32 insn;
  354. long offset;
  355. offset = branch_imm_common(pc, addr, SZ_1M);
  356. switch (type) {
  357. case AARCH64_INSN_BRANCH_COMP_ZERO:
  358. insn = aarch64_insn_get_cbz_value();
  359. break;
  360. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  361. insn = aarch64_insn_get_cbnz_value();
  362. break;
  363. default:
  364. BUG_ON(1);
  365. }
  366. switch (variant) {
  367. case AARCH64_INSN_VARIANT_32BIT:
  368. break;
  369. case AARCH64_INSN_VARIANT_64BIT:
  370. insn |= AARCH64_INSN_SF_BIT;
  371. break;
  372. default:
  373. BUG_ON(1);
  374. }
  375. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  376. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  377. offset >> 2);
  378. }
  379. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  380. enum aarch64_insn_condition cond)
  381. {
  382. u32 insn;
  383. long offset;
  384. offset = branch_imm_common(pc, addr, SZ_1M);
  385. insn = aarch64_insn_get_bcond_value();
  386. BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
  387. insn |= cond;
  388. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  389. offset >> 2);
  390. }
  391. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
  392. {
  393. return aarch64_insn_get_hint_value() | op;
  394. }
  395. u32 __kprobes aarch64_insn_gen_nop(void)
  396. {
  397. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  398. }
  399. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  400. enum aarch64_insn_branch_type type)
  401. {
  402. u32 insn;
  403. switch (type) {
  404. case AARCH64_INSN_BRANCH_NOLINK:
  405. insn = aarch64_insn_get_br_value();
  406. break;
  407. case AARCH64_INSN_BRANCH_LINK:
  408. insn = aarch64_insn_get_blr_value();
  409. break;
  410. case AARCH64_INSN_BRANCH_RETURN:
  411. insn = aarch64_insn_get_ret_value();
  412. break;
  413. default:
  414. BUG_ON(1);
  415. }
  416. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  417. }
  418. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  419. enum aarch64_insn_register base,
  420. enum aarch64_insn_register offset,
  421. enum aarch64_insn_size_type size,
  422. enum aarch64_insn_ldst_type type)
  423. {
  424. u32 insn;
  425. switch (type) {
  426. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  427. insn = aarch64_insn_get_ldr_reg_value();
  428. break;
  429. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  430. insn = aarch64_insn_get_str_reg_value();
  431. break;
  432. default:
  433. BUG_ON(1);
  434. }
  435. insn = aarch64_insn_encode_ldst_size(size, insn);
  436. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  437. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  438. base);
  439. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  440. offset);
  441. }
  442. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  443. enum aarch64_insn_register reg2,
  444. enum aarch64_insn_register base,
  445. int offset,
  446. enum aarch64_insn_variant variant,
  447. enum aarch64_insn_ldst_type type)
  448. {
  449. u32 insn;
  450. int shift;
  451. switch (type) {
  452. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  453. insn = aarch64_insn_get_ldp_pre_value();
  454. break;
  455. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  456. insn = aarch64_insn_get_stp_pre_value();
  457. break;
  458. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  459. insn = aarch64_insn_get_ldp_post_value();
  460. break;
  461. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  462. insn = aarch64_insn_get_stp_post_value();
  463. break;
  464. default:
  465. BUG_ON(1);
  466. }
  467. switch (variant) {
  468. case AARCH64_INSN_VARIANT_32BIT:
  469. /* offset must be multiples of 4 in the range [-256, 252] */
  470. BUG_ON(offset & 0x3);
  471. BUG_ON(offset < -256 || offset > 252);
  472. shift = 2;
  473. break;
  474. case AARCH64_INSN_VARIANT_64BIT:
  475. /* offset must be multiples of 8 in the range [-512, 504] */
  476. BUG_ON(offset & 0x7);
  477. BUG_ON(offset < -512 || offset > 504);
  478. shift = 3;
  479. insn |= AARCH64_INSN_SF_BIT;
  480. break;
  481. default:
  482. BUG_ON(1);
  483. }
  484. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  485. reg1);
  486. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  487. reg2);
  488. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  489. base);
  490. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  491. offset >> shift);
  492. }
  493. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  494. enum aarch64_insn_register src,
  495. int imm, enum aarch64_insn_variant variant,
  496. enum aarch64_insn_adsb_type type)
  497. {
  498. u32 insn;
  499. switch (type) {
  500. case AARCH64_INSN_ADSB_ADD:
  501. insn = aarch64_insn_get_add_imm_value();
  502. break;
  503. case AARCH64_INSN_ADSB_SUB:
  504. insn = aarch64_insn_get_sub_imm_value();
  505. break;
  506. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  507. insn = aarch64_insn_get_adds_imm_value();
  508. break;
  509. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  510. insn = aarch64_insn_get_subs_imm_value();
  511. break;
  512. default:
  513. BUG_ON(1);
  514. }
  515. switch (variant) {
  516. case AARCH64_INSN_VARIANT_32BIT:
  517. break;
  518. case AARCH64_INSN_VARIANT_64BIT:
  519. insn |= AARCH64_INSN_SF_BIT;
  520. break;
  521. default:
  522. BUG_ON(1);
  523. }
  524. BUG_ON(imm & ~(SZ_4K - 1));
  525. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  526. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  527. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  528. }
  529. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  530. enum aarch64_insn_register src,
  531. int immr, int imms,
  532. enum aarch64_insn_variant variant,
  533. enum aarch64_insn_bitfield_type type)
  534. {
  535. u32 insn;
  536. u32 mask;
  537. switch (type) {
  538. case AARCH64_INSN_BITFIELD_MOVE:
  539. insn = aarch64_insn_get_bfm_value();
  540. break;
  541. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  542. insn = aarch64_insn_get_ubfm_value();
  543. break;
  544. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  545. insn = aarch64_insn_get_sbfm_value();
  546. break;
  547. default:
  548. BUG_ON(1);
  549. }
  550. switch (variant) {
  551. case AARCH64_INSN_VARIANT_32BIT:
  552. mask = GENMASK(4, 0);
  553. break;
  554. case AARCH64_INSN_VARIANT_64BIT:
  555. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  556. mask = GENMASK(5, 0);
  557. break;
  558. default:
  559. BUG_ON(1);
  560. }
  561. BUG_ON(immr & ~mask);
  562. BUG_ON(imms & ~mask);
  563. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  564. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  565. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  566. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  567. }
  568. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  569. int imm, int shift,
  570. enum aarch64_insn_variant variant,
  571. enum aarch64_insn_movewide_type type)
  572. {
  573. u32 insn;
  574. switch (type) {
  575. case AARCH64_INSN_MOVEWIDE_ZERO:
  576. insn = aarch64_insn_get_movz_value();
  577. break;
  578. case AARCH64_INSN_MOVEWIDE_KEEP:
  579. insn = aarch64_insn_get_movk_value();
  580. break;
  581. case AARCH64_INSN_MOVEWIDE_INVERSE:
  582. insn = aarch64_insn_get_movn_value();
  583. break;
  584. default:
  585. BUG_ON(1);
  586. }
  587. BUG_ON(imm & ~(SZ_64K - 1));
  588. switch (variant) {
  589. case AARCH64_INSN_VARIANT_32BIT:
  590. BUG_ON(shift != 0 && shift != 16);
  591. break;
  592. case AARCH64_INSN_VARIANT_64BIT:
  593. insn |= AARCH64_INSN_SF_BIT;
  594. BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
  595. shift != 48);
  596. break;
  597. default:
  598. BUG_ON(1);
  599. }
  600. insn |= (shift >> 4) << 21;
  601. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  602. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  603. }
  604. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  605. enum aarch64_insn_register src,
  606. enum aarch64_insn_register reg,
  607. int shift,
  608. enum aarch64_insn_variant variant,
  609. enum aarch64_insn_adsb_type type)
  610. {
  611. u32 insn;
  612. switch (type) {
  613. case AARCH64_INSN_ADSB_ADD:
  614. insn = aarch64_insn_get_add_value();
  615. break;
  616. case AARCH64_INSN_ADSB_SUB:
  617. insn = aarch64_insn_get_sub_value();
  618. break;
  619. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  620. insn = aarch64_insn_get_adds_value();
  621. break;
  622. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  623. insn = aarch64_insn_get_subs_value();
  624. break;
  625. default:
  626. BUG_ON(1);
  627. }
  628. switch (variant) {
  629. case AARCH64_INSN_VARIANT_32BIT:
  630. BUG_ON(shift & ~(SZ_32 - 1));
  631. break;
  632. case AARCH64_INSN_VARIANT_64BIT:
  633. insn |= AARCH64_INSN_SF_BIT;
  634. BUG_ON(shift & ~(SZ_64 - 1));
  635. break;
  636. default:
  637. BUG_ON(1);
  638. }
  639. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  640. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  641. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  642. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  643. }
  644. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  645. enum aarch64_insn_register src,
  646. enum aarch64_insn_variant variant,
  647. enum aarch64_insn_data1_type type)
  648. {
  649. u32 insn;
  650. switch (type) {
  651. case AARCH64_INSN_DATA1_REVERSE_16:
  652. insn = aarch64_insn_get_rev16_value();
  653. break;
  654. case AARCH64_INSN_DATA1_REVERSE_32:
  655. insn = aarch64_insn_get_rev32_value();
  656. break;
  657. case AARCH64_INSN_DATA1_REVERSE_64:
  658. BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
  659. insn = aarch64_insn_get_rev64_value();
  660. break;
  661. default:
  662. BUG_ON(1);
  663. }
  664. switch (variant) {
  665. case AARCH64_INSN_VARIANT_32BIT:
  666. break;
  667. case AARCH64_INSN_VARIANT_64BIT:
  668. insn |= AARCH64_INSN_SF_BIT;
  669. break;
  670. default:
  671. BUG_ON(1);
  672. }
  673. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  674. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  675. }
  676. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  677. enum aarch64_insn_register src,
  678. enum aarch64_insn_register reg,
  679. enum aarch64_insn_variant variant,
  680. enum aarch64_insn_data2_type type)
  681. {
  682. u32 insn;
  683. switch (type) {
  684. case AARCH64_INSN_DATA2_UDIV:
  685. insn = aarch64_insn_get_udiv_value();
  686. break;
  687. case AARCH64_INSN_DATA2_SDIV:
  688. insn = aarch64_insn_get_sdiv_value();
  689. break;
  690. case AARCH64_INSN_DATA2_LSLV:
  691. insn = aarch64_insn_get_lslv_value();
  692. break;
  693. case AARCH64_INSN_DATA2_LSRV:
  694. insn = aarch64_insn_get_lsrv_value();
  695. break;
  696. case AARCH64_INSN_DATA2_ASRV:
  697. insn = aarch64_insn_get_asrv_value();
  698. break;
  699. case AARCH64_INSN_DATA2_RORV:
  700. insn = aarch64_insn_get_rorv_value();
  701. break;
  702. default:
  703. BUG_ON(1);
  704. }
  705. switch (variant) {
  706. case AARCH64_INSN_VARIANT_32BIT:
  707. break;
  708. case AARCH64_INSN_VARIANT_64BIT:
  709. insn |= AARCH64_INSN_SF_BIT;
  710. break;
  711. default:
  712. BUG_ON(1);
  713. }
  714. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  715. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  716. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  717. }
  718. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  719. enum aarch64_insn_register src,
  720. enum aarch64_insn_register reg1,
  721. enum aarch64_insn_register reg2,
  722. enum aarch64_insn_variant variant,
  723. enum aarch64_insn_data3_type type)
  724. {
  725. u32 insn;
  726. switch (type) {
  727. case AARCH64_INSN_DATA3_MADD:
  728. insn = aarch64_insn_get_madd_value();
  729. break;
  730. case AARCH64_INSN_DATA3_MSUB:
  731. insn = aarch64_insn_get_msub_value();
  732. break;
  733. default:
  734. BUG_ON(1);
  735. }
  736. switch (variant) {
  737. case AARCH64_INSN_VARIANT_32BIT:
  738. break;
  739. case AARCH64_INSN_VARIANT_64BIT:
  740. insn |= AARCH64_INSN_SF_BIT;
  741. break;
  742. default:
  743. BUG_ON(1);
  744. }
  745. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  746. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  747. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  748. reg1);
  749. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  750. reg2);
  751. }