insn.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Copyright (C) 2013 Huawei Ltd.
  3. * Author: Jiang Liu <liuj97@gmail.com>
  4. *
  5. * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/compiler.h>
  21. #include <linux/kernel.h>
  22. #include <linux/smp.h>
  23. #include <linux/stop_machine.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/debug-monitors.h>
  27. #include <asm/insn.h>
  28. #define AARCH64_INSN_SF_BIT BIT(31)
  29. #define AARCH64_INSN_N_BIT BIT(22)
  30. static int aarch64_insn_encoding_class[] = {
  31. AARCH64_INSN_CLS_UNKNOWN,
  32. AARCH64_INSN_CLS_UNKNOWN,
  33. AARCH64_INSN_CLS_UNKNOWN,
  34. AARCH64_INSN_CLS_UNKNOWN,
  35. AARCH64_INSN_CLS_LDST,
  36. AARCH64_INSN_CLS_DP_REG,
  37. AARCH64_INSN_CLS_LDST,
  38. AARCH64_INSN_CLS_DP_FPSIMD,
  39. AARCH64_INSN_CLS_DP_IMM,
  40. AARCH64_INSN_CLS_DP_IMM,
  41. AARCH64_INSN_CLS_BR_SYS,
  42. AARCH64_INSN_CLS_BR_SYS,
  43. AARCH64_INSN_CLS_LDST,
  44. AARCH64_INSN_CLS_DP_REG,
  45. AARCH64_INSN_CLS_LDST,
  46. AARCH64_INSN_CLS_DP_FPSIMD,
  47. };
  48. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  49. {
  50. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  51. }
  52. /* NOP is an alias of HINT */
  53. bool __kprobes aarch64_insn_is_nop(u32 insn)
  54. {
  55. if (!aarch64_insn_is_hint(insn))
  56. return false;
  57. switch (insn & 0xFE0) {
  58. case AARCH64_INSN_HINT_YIELD:
  59. case AARCH64_INSN_HINT_WFE:
  60. case AARCH64_INSN_HINT_WFI:
  61. case AARCH64_INSN_HINT_SEV:
  62. case AARCH64_INSN_HINT_SEVL:
  63. return false;
  64. default:
  65. return true;
  66. }
  67. }
  68. /*
  69. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  70. * little-endian.
  71. */
  72. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  73. {
  74. int ret;
  75. u32 val;
  76. ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
  77. if (!ret)
  78. *insnp = le32_to_cpu(val);
  79. return ret;
  80. }
  81. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  82. {
  83. insn = cpu_to_le32(insn);
  84. return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
  85. }
  86. static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
  87. {
  88. if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
  89. return false;
  90. return aarch64_insn_is_b(insn) ||
  91. aarch64_insn_is_bl(insn) ||
  92. aarch64_insn_is_svc(insn) ||
  93. aarch64_insn_is_hvc(insn) ||
  94. aarch64_insn_is_smc(insn) ||
  95. aarch64_insn_is_brk(insn) ||
  96. aarch64_insn_is_nop(insn);
  97. }
  98. /*
  99. * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
  100. * Section B2.6.5 "Concurrent modification and execution of instructions":
  101. * Concurrent modification and execution of instructions can lead to the
  102. * resulting instruction performing any behavior that can be achieved by
  103. * executing any sequence of instructions that can be executed from the
  104. * same Exception level, except where the instruction before modification
  105. * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
  106. * or SMC instruction.
  107. */
  108. bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
  109. {
  110. return __aarch64_insn_hotpatch_safe(old_insn) &&
  111. __aarch64_insn_hotpatch_safe(new_insn);
  112. }
  113. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  114. {
  115. u32 *tp = addr;
  116. int ret;
  117. /* A64 instructions must be word aligned */
  118. if ((uintptr_t)tp & 0x3)
  119. return -EINVAL;
  120. ret = aarch64_insn_write(tp, insn);
  121. if (ret == 0)
  122. flush_icache_range((uintptr_t)tp,
  123. (uintptr_t)tp + AARCH64_INSN_SIZE);
  124. return ret;
  125. }
  126. struct aarch64_insn_patch {
  127. void **text_addrs;
  128. u32 *new_insns;
  129. int insn_cnt;
  130. atomic_t cpu_count;
  131. };
  132. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  133. {
  134. int i, ret = 0;
  135. struct aarch64_insn_patch *pp = arg;
  136. /* The first CPU becomes master */
  137. if (atomic_inc_return(&pp->cpu_count) == 1) {
  138. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  139. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  140. pp->new_insns[i]);
  141. /*
  142. * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
  143. * which ends with "dsb; isb" pair guaranteeing global
  144. * visibility.
  145. */
  146. /* Notify other processors with an additional increment. */
  147. atomic_inc(&pp->cpu_count);
  148. } else {
  149. while (atomic_read(&pp->cpu_count) <= num_online_cpus())
  150. cpu_relax();
  151. isb();
  152. }
  153. return ret;
  154. }
  155. int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
  156. {
  157. struct aarch64_insn_patch patch = {
  158. .text_addrs = addrs,
  159. .new_insns = insns,
  160. .insn_cnt = cnt,
  161. .cpu_count = ATOMIC_INIT(0),
  162. };
  163. if (cnt <= 0)
  164. return -EINVAL;
  165. return stop_machine(aarch64_insn_patch_text_cb, &patch,
  166. cpu_online_mask);
  167. }
  168. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  169. {
  170. int ret;
  171. u32 insn;
  172. /* Unsafe to patch multiple instructions without synchronizaiton */
  173. if (cnt == 1) {
  174. ret = aarch64_insn_read(addrs[0], &insn);
  175. if (ret)
  176. return ret;
  177. if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
  178. /*
  179. * ARMv8 architecture doesn't guarantee all CPUs see
  180. * the new instruction after returning from function
  181. * aarch64_insn_patch_text_nosync(). So send IPIs to
  182. * all other CPUs to achieve instruction
  183. * synchronization.
  184. */
  185. ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
  186. kick_all_cpus_sync();
  187. return ret;
  188. }
  189. }
  190. return aarch64_insn_patch_text_sync(addrs, insns, cnt);
  191. }
  192. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  193. u32 insn, u64 imm)
  194. {
  195. u32 immlo, immhi, lomask, himask, mask;
  196. int shift;
  197. switch (type) {
  198. case AARCH64_INSN_IMM_ADR:
  199. lomask = 0x3;
  200. himask = 0x7ffff;
  201. immlo = imm & lomask;
  202. imm >>= 2;
  203. immhi = imm & himask;
  204. imm = (immlo << 24) | (immhi);
  205. mask = (lomask << 24) | (himask);
  206. shift = 5;
  207. break;
  208. case AARCH64_INSN_IMM_26:
  209. mask = BIT(26) - 1;
  210. shift = 0;
  211. break;
  212. case AARCH64_INSN_IMM_19:
  213. mask = BIT(19) - 1;
  214. shift = 5;
  215. break;
  216. case AARCH64_INSN_IMM_16:
  217. mask = BIT(16) - 1;
  218. shift = 5;
  219. break;
  220. case AARCH64_INSN_IMM_14:
  221. mask = BIT(14) - 1;
  222. shift = 5;
  223. break;
  224. case AARCH64_INSN_IMM_12:
  225. mask = BIT(12) - 1;
  226. shift = 10;
  227. break;
  228. case AARCH64_INSN_IMM_9:
  229. mask = BIT(9) - 1;
  230. shift = 12;
  231. break;
  232. case AARCH64_INSN_IMM_7:
  233. mask = BIT(7) - 1;
  234. shift = 15;
  235. break;
  236. case AARCH64_INSN_IMM_6:
  237. case AARCH64_INSN_IMM_S:
  238. mask = BIT(6) - 1;
  239. shift = 10;
  240. break;
  241. case AARCH64_INSN_IMM_R:
  242. mask = BIT(6) - 1;
  243. shift = 16;
  244. break;
  245. default:
  246. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  247. type);
  248. return 0;
  249. }
  250. /* Update the immediate field. */
  251. insn &= ~(mask << shift);
  252. insn |= (imm & mask) << shift;
  253. return insn;
  254. }
  255. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  256. u32 insn,
  257. enum aarch64_insn_register reg)
  258. {
  259. int shift;
  260. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  261. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  262. return 0;
  263. }
  264. switch (type) {
  265. case AARCH64_INSN_REGTYPE_RT:
  266. case AARCH64_INSN_REGTYPE_RD:
  267. shift = 0;
  268. break;
  269. case AARCH64_INSN_REGTYPE_RN:
  270. shift = 5;
  271. break;
  272. case AARCH64_INSN_REGTYPE_RT2:
  273. case AARCH64_INSN_REGTYPE_RA:
  274. shift = 10;
  275. break;
  276. case AARCH64_INSN_REGTYPE_RM:
  277. shift = 16;
  278. break;
  279. default:
  280. pr_err("%s: unknown register type encoding %d\n", __func__,
  281. type);
  282. return 0;
  283. }
  284. insn &= ~(GENMASK(4, 0) << shift);
  285. insn |= reg << shift;
  286. return insn;
  287. }
  288. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  289. u32 insn)
  290. {
  291. u32 size;
  292. switch (type) {
  293. case AARCH64_INSN_SIZE_8:
  294. size = 0;
  295. break;
  296. case AARCH64_INSN_SIZE_16:
  297. size = 1;
  298. break;
  299. case AARCH64_INSN_SIZE_32:
  300. size = 2;
  301. break;
  302. case AARCH64_INSN_SIZE_64:
  303. size = 3;
  304. break;
  305. default:
  306. pr_err("%s: unknown size encoding %d\n", __func__, type);
  307. return 0;
  308. }
  309. insn &= ~GENMASK(31, 30);
  310. insn |= size << 30;
  311. return insn;
  312. }
  313. static inline long branch_imm_common(unsigned long pc, unsigned long addr,
  314. long range)
  315. {
  316. long offset;
  317. /*
  318. * PC: A 64-bit Program Counter holding the address of the current
  319. * instruction. A64 instructions must be word-aligned.
  320. */
  321. BUG_ON((pc & 0x3) || (addr & 0x3));
  322. offset = ((long)addr - (long)pc);
  323. BUG_ON(offset < -range || offset >= range);
  324. return offset;
  325. }
  326. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  327. enum aarch64_insn_branch_type type)
  328. {
  329. u32 insn;
  330. long offset;
  331. /*
  332. * B/BL support [-128M, 128M) offset
  333. * ARM64 virtual address arrangement guarantees all kernel and module
  334. * texts are within +/-128M.
  335. */
  336. offset = branch_imm_common(pc, addr, SZ_128M);
  337. switch (type) {
  338. case AARCH64_INSN_BRANCH_LINK:
  339. insn = aarch64_insn_get_bl_value();
  340. break;
  341. case AARCH64_INSN_BRANCH_NOLINK:
  342. insn = aarch64_insn_get_b_value();
  343. break;
  344. default:
  345. BUG_ON(1);
  346. return AARCH64_BREAK_FAULT;
  347. }
  348. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  349. offset >> 2);
  350. }
  351. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  352. enum aarch64_insn_register reg,
  353. enum aarch64_insn_variant variant,
  354. enum aarch64_insn_branch_type type)
  355. {
  356. u32 insn;
  357. long offset;
  358. offset = branch_imm_common(pc, addr, SZ_1M);
  359. switch (type) {
  360. case AARCH64_INSN_BRANCH_COMP_ZERO:
  361. insn = aarch64_insn_get_cbz_value();
  362. break;
  363. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  364. insn = aarch64_insn_get_cbnz_value();
  365. break;
  366. default:
  367. BUG_ON(1);
  368. return AARCH64_BREAK_FAULT;
  369. }
  370. switch (variant) {
  371. case AARCH64_INSN_VARIANT_32BIT:
  372. break;
  373. case AARCH64_INSN_VARIANT_64BIT:
  374. insn |= AARCH64_INSN_SF_BIT;
  375. break;
  376. default:
  377. BUG_ON(1);
  378. return AARCH64_BREAK_FAULT;
  379. }
  380. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  381. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  382. offset >> 2);
  383. }
  384. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  385. enum aarch64_insn_condition cond)
  386. {
  387. u32 insn;
  388. long offset;
  389. offset = branch_imm_common(pc, addr, SZ_1M);
  390. insn = aarch64_insn_get_bcond_value();
  391. BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
  392. insn |= cond;
  393. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  394. offset >> 2);
  395. }
  396. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
  397. {
  398. return aarch64_insn_get_hint_value() | op;
  399. }
  400. u32 __kprobes aarch64_insn_gen_nop(void)
  401. {
  402. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  403. }
  404. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  405. enum aarch64_insn_branch_type type)
  406. {
  407. u32 insn;
  408. switch (type) {
  409. case AARCH64_INSN_BRANCH_NOLINK:
  410. insn = aarch64_insn_get_br_value();
  411. break;
  412. case AARCH64_INSN_BRANCH_LINK:
  413. insn = aarch64_insn_get_blr_value();
  414. break;
  415. case AARCH64_INSN_BRANCH_RETURN:
  416. insn = aarch64_insn_get_ret_value();
  417. break;
  418. default:
  419. BUG_ON(1);
  420. return AARCH64_BREAK_FAULT;
  421. }
  422. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  423. }
  424. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  425. enum aarch64_insn_register base,
  426. enum aarch64_insn_register offset,
  427. enum aarch64_insn_size_type size,
  428. enum aarch64_insn_ldst_type type)
  429. {
  430. u32 insn;
  431. switch (type) {
  432. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  433. insn = aarch64_insn_get_ldr_reg_value();
  434. break;
  435. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  436. insn = aarch64_insn_get_str_reg_value();
  437. break;
  438. default:
  439. BUG_ON(1);
  440. return AARCH64_BREAK_FAULT;
  441. }
  442. insn = aarch64_insn_encode_ldst_size(size, insn);
  443. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  444. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  445. base);
  446. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  447. offset);
  448. }
  449. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  450. enum aarch64_insn_register reg2,
  451. enum aarch64_insn_register base,
  452. int offset,
  453. enum aarch64_insn_variant variant,
  454. enum aarch64_insn_ldst_type type)
  455. {
  456. u32 insn;
  457. int shift;
  458. switch (type) {
  459. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  460. insn = aarch64_insn_get_ldp_pre_value();
  461. break;
  462. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  463. insn = aarch64_insn_get_stp_pre_value();
  464. break;
  465. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  466. insn = aarch64_insn_get_ldp_post_value();
  467. break;
  468. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  469. insn = aarch64_insn_get_stp_post_value();
  470. break;
  471. default:
  472. BUG_ON(1);
  473. return AARCH64_BREAK_FAULT;
  474. }
  475. switch (variant) {
  476. case AARCH64_INSN_VARIANT_32BIT:
  477. /* offset must be multiples of 4 in the range [-256, 252] */
  478. BUG_ON(offset & 0x3);
  479. BUG_ON(offset < -256 || offset > 252);
  480. shift = 2;
  481. break;
  482. case AARCH64_INSN_VARIANT_64BIT:
  483. /* offset must be multiples of 8 in the range [-512, 504] */
  484. BUG_ON(offset & 0x7);
  485. BUG_ON(offset < -512 || offset > 504);
  486. shift = 3;
  487. insn |= AARCH64_INSN_SF_BIT;
  488. break;
  489. default:
  490. BUG_ON(1);
  491. return AARCH64_BREAK_FAULT;
  492. }
  493. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  494. reg1);
  495. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  496. reg2);
  497. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  498. base);
  499. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  500. offset >> shift);
  501. }
  502. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  503. enum aarch64_insn_register src,
  504. int imm, enum aarch64_insn_variant variant,
  505. enum aarch64_insn_adsb_type type)
  506. {
  507. u32 insn;
  508. switch (type) {
  509. case AARCH64_INSN_ADSB_ADD:
  510. insn = aarch64_insn_get_add_imm_value();
  511. break;
  512. case AARCH64_INSN_ADSB_SUB:
  513. insn = aarch64_insn_get_sub_imm_value();
  514. break;
  515. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  516. insn = aarch64_insn_get_adds_imm_value();
  517. break;
  518. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  519. insn = aarch64_insn_get_subs_imm_value();
  520. break;
  521. default:
  522. BUG_ON(1);
  523. return AARCH64_BREAK_FAULT;
  524. }
  525. switch (variant) {
  526. case AARCH64_INSN_VARIANT_32BIT:
  527. break;
  528. case AARCH64_INSN_VARIANT_64BIT:
  529. insn |= AARCH64_INSN_SF_BIT;
  530. break;
  531. default:
  532. BUG_ON(1);
  533. return AARCH64_BREAK_FAULT;
  534. }
  535. BUG_ON(imm & ~(SZ_4K - 1));
  536. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  537. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  538. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  539. }
  540. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  541. enum aarch64_insn_register src,
  542. int immr, int imms,
  543. enum aarch64_insn_variant variant,
  544. enum aarch64_insn_bitfield_type type)
  545. {
  546. u32 insn;
  547. u32 mask;
  548. switch (type) {
  549. case AARCH64_INSN_BITFIELD_MOVE:
  550. insn = aarch64_insn_get_bfm_value();
  551. break;
  552. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  553. insn = aarch64_insn_get_ubfm_value();
  554. break;
  555. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  556. insn = aarch64_insn_get_sbfm_value();
  557. break;
  558. default:
  559. BUG_ON(1);
  560. return AARCH64_BREAK_FAULT;
  561. }
  562. switch (variant) {
  563. case AARCH64_INSN_VARIANT_32BIT:
  564. mask = GENMASK(4, 0);
  565. break;
  566. case AARCH64_INSN_VARIANT_64BIT:
  567. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  568. mask = GENMASK(5, 0);
  569. break;
  570. default:
  571. BUG_ON(1);
  572. return AARCH64_BREAK_FAULT;
  573. }
  574. BUG_ON(immr & ~mask);
  575. BUG_ON(imms & ~mask);
  576. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  577. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  578. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  579. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  580. }
  581. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  582. int imm, int shift,
  583. enum aarch64_insn_variant variant,
  584. enum aarch64_insn_movewide_type type)
  585. {
  586. u32 insn;
  587. switch (type) {
  588. case AARCH64_INSN_MOVEWIDE_ZERO:
  589. insn = aarch64_insn_get_movz_value();
  590. break;
  591. case AARCH64_INSN_MOVEWIDE_KEEP:
  592. insn = aarch64_insn_get_movk_value();
  593. break;
  594. case AARCH64_INSN_MOVEWIDE_INVERSE:
  595. insn = aarch64_insn_get_movn_value();
  596. break;
  597. default:
  598. BUG_ON(1);
  599. return AARCH64_BREAK_FAULT;
  600. }
  601. BUG_ON(imm & ~(SZ_64K - 1));
  602. switch (variant) {
  603. case AARCH64_INSN_VARIANT_32BIT:
  604. BUG_ON(shift != 0 && shift != 16);
  605. break;
  606. case AARCH64_INSN_VARIANT_64BIT:
  607. insn |= AARCH64_INSN_SF_BIT;
  608. BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
  609. shift != 48);
  610. break;
  611. default:
  612. BUG_ON(1);
  613. return AARCH64_BREAK_FAULT;
  614. }
  615. insn |= (shift >> 4) << 21;
  616. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  617. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  618. }
  619. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  620. enum aarch64_insn_register src,
  621. enum aarch64_insn_register reg,
  622. int shift,
  623. enum aarch64_insn_variant variant,
  624. enum aarch64_insn_adsb_type type)
  625. {
  626. u32 insn;
  627. switch (type) {
  628. case AARCH64_INSN_ADSB_ADD:
  629. insn = aarch64_insn_get_add_value();
  630. break;
  631. case AARCH64_INSN_ADSB_SUB:
  632. insn = aarch64_insn_get_sub_value();
  633. break;
  634. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  635. insn = aarch64_insn_get_adds_value();
  636. break;
  637. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  638. insn = aarch64_insn_get_subs_value();
  639. break;
  640. default:
  641. BUG_ON(1);
  642. return AARCH64_BREAK_FAULT;
  643. }
  644. switch (variant) {
  645. case AARCH64_INSN_VARIANT_32BIT:
  646. BUG_ON(shift & ~(SZ_32 - 1));
  647. break;
  648. case AARCH64_INSN_VARIANT_64BIT:
  649. insn |= AARCH64_INSN_SF_BIT;
  650. BUG_ON(shift & ~(SZ_64 - 1));
  651. break;
  652. default:
  653. BUG_ON(1);
  654. return AARCH64_BREAK_FAULT;
  655. }
  656. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  657. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  658. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  659. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  660. }
  661. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  662. enum aarch64_insn_register src,
  663. enum aarch64_insn_variant variant,
  664. enum aarch64_insn_data1_type type)
  665. {
  666. u32 insn;
  667. switch (type) {
  668. case AARCH64_INSN_DATA1_REVERSE_16:
  669. insn = aarch64_insn_get_rev16_value();
  670. break;
  671. case AARCH64_INSN_DATA1_REVERSE_32:
  672. insn = aarch64_insn_get_rev32_value();
  673. break;
  674. case AARCH64_INSN_DATA1_REVERSE_64:
  675. BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
  676. insn = aarch64_insn_get_rev64_value();
  677. break;
  678. default:
  679. BUG_ON(1);
  680. return AARCH64_BREAK_FAULT;
  681. }
  682. switch (variant) {
  683. case AARCH64_INSN_VARIANT_32BIT:
  684. break;
  685. case AARCH64_INSN_VARIANT_64BIT:
  686. insn |= AARCH64_INSN_SF_BIT;
  687. break;
  688. default:
  689. BUG_ON(1);
  690. return AARCH64_BREAK_FAULT;
  691. }
  692. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  693. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  694. }
  695. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  696. enum aarch64_insn_register src,
  697. enum aarch64_insn_register reg,
  698. enum aarch64_insn_variant variant,
  699. enum aarch64_insn_data2_type type)
  700. {
  701. u32 insn;
  702. switch (type) {
  703. case AARCH64_INSN_DATA2_UDIV:
  704. insn = aarch64_insn_get_udiv_value();
  705. break;
  706. case AARCH64_INSN_DATA2_SDIV:
  707. insn = aarch64_insn_get_sdiv_value();
  708. break;
  709. case AARCH64_INSN_DATA2_LSLV:
  710. insn = aarch64_insn_get_lslv_value();
  711. break;
  712. case AARCH64_INSN_DATA2_LSRV:
  713. insn = aarch64_insn_get_lsrv_value();
  714. break;
  715. case AARCH64_INSN_DATA2_ASRV:
  716. insn = aarch64_insn_get_asrv_value();
  717. break;
  718. case AARCH64_INSN_DATA2_RORV:
  719. insn = aarch64_insn_get_rorv_value();
  720. break;
  721. default:
  722. BUG_ON(1);
  723. return AARCH64_BREAK_FAULT;
  724. }
  725. switch (variant) {
  726. case AARCH64_INSN_VARIANT_32BIT:
  727. break;
  728. case AARCH64_INSN_VARIANT_64BIT:
  729. insn |= AARCH64_INSN_SF_BIT;
  730. break;
  731. default:
  732. BUG_ON(1);
  733. return AARCH64_BREAK_FAULT;
  734. }
  735. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  736. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  737. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  738. }
  739. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  740. enum aarch64_insn_register src,
  741. enum aarch64_insn_register reg1,
  742. enum aarch64_insn_register reg2,
  743. enum aarch64_insn_variant variant,
  744. enum aarch64_insn_data3_type type)
  745. {
  746. u32 insn;
  747. switch (type) {
  748. case AARCH64_INSN_DATA3_MADD:
  749. insn = aarch64_insn_get_madd_value();
  750. break;
  751. case AARCH64_INSN_DATA3_MSUB:
  752. insn = aarch64_insn_get_msub_value();
  753. break;
  754. default:
  755. BUG_ON(1);
  756. return AARCH64_BREAK_FAULT;
  757. }
  758. switch (variant) {
  759. case AARCH64_INSN_VARIANT_32BIT:
  760. break;
  761. case AARCH64_INSN_VARIANT_64BIT:
  762. insn |= AARCH64_INSN_SF_BIT;
  763. break;
  764. default:
  765. BUG_ON(1);
  766. return AARCH64_BREAK_FAULT;
  767. }
  768. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  769. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  770. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  771. reg1);
  772. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  773. reg2);
  774. }
  775. u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
  776. enum aarch64_insn_register src,
  777. enum aarch64_insn_register reg,
  778. int shift,
  779. enum aarch64_insn_variant variant,
  780. enum aarch64_insn_logic_type type)
  781. {
  782. u32 insn;
  783. switch (type) {
  784. case AARCH64_INSN_LOGIC_AND:
  785. insn = aarch64_insn_get_and_value();
  786. break;
  787. case AARCH64_INSN_LOGIC_BIC:
  788. insn = aarch64_insn_get_bic_value();
  789. break;
  790. case AARCH64_INSN_LOGIC_ORR:
  791. insn = aarch64_insn_get_orr_value();
  792. break;
  793. case AARCH64_INSN_LOGIC_ORN:
  794. insn = aarch64_insn_get_orn_value();
  795. break;
  796. case AARCH64_INSN_LOGIC_EOR:
  797. insn = aarch64_insn_get_eor_value();
  798. break;
  799. case AARCH64_INSN_LOGIC_EON:
  800. insn = aarch64_insn_get_eon_value();
  801. break;
  802. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  803. insn = aarch64_insn_get_ands_value();
  804. break;
  805. case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
  806. insn = aarch64_insn_get_bics_value();
  807. break;
  808. default:
  809. BUG_ON(1);
  810. return AARCH64_BREAK_FAULT;
  811. }
  812. switch (variant) {
  813. case AARCH64_INSN_VARIANT_32BIT:
  814. BUG_ON(shift & ~(SZ_32 - 1));
  815. break;
  816. case AARCH64_INSN_VARIANT_64BIT:
  817. insn |= AARCH64_INSN_SF_BIT;
  818. BUG_ON(shift & ~(SZ_64 - 1));
  819. break;
  820. default:
  821. BUG_ON(1);
  822. return AARCH64_BREAK_FAULT;
  823. }
  824. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  825. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  826. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  827. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  828. }
  829. bool aarch32_insn_is_wide(u32 insn)
  830. {
  831. return insn >= 0xe800;
  832. }
  833. /*
  834. * Macros/defines for extracting register numbers from instruction.
  835. */
  836. u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
  837. {
  838. return (insn & (0xf << offset)) >> offset;
  839. }
  840. #define OPC2_MASK 0x7
  841. #define OPC2_OFFSET 5
  842. u32 aarch32_insn_mcr_extract_opc2(u32 insn)
  843. {
  844. return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
  845. }
  846. #define CRM_MASK 0xf
  847. u32 aarch32_insn_mcr_extract_crm(u32 insn)
  848. {
  849. return insn & CRM_MASK;
  850. }