insn.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. /*
  2. * Copyright (C) 2013 Huawei Ltd.
  3. * Author: Jiang Liu <liuj97@gmail.com>
  4. *
  5. * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/bug.h>
  21. #include <linux/compiler.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/stop_machine.h>
  27. #include <linux/types.h>
  28. #include <linux/uaccess.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/debug-monitors.h>
  31. #include <asm/fixmap.h>
  32. #include <asm/insn.h>
  33. #define AARCH64_INSN_SF_BIT BIT(31)
  34. #define AARCH64_INSN_N_BIT BIT(22)
  35. static int aarch64_insn_encoding_class[] = {
  36. AARCH64_INSN_CLS_UNKNOWN,
  37. AARCH64_INSN_CLS_UNKNOWN,
  38. AARCH64_INSN_CLS_UNKNOWN,
  39. AARCH64_INSN_CLS_UNKNOWN,
  40. AARCH64_INSN_CLS_LDST,
  41. AARCH64_INSN_CLS_DP_REG,
  42. AARCH64_INSN_CLS_LDST,
  43. AARCH64_INSN_CLS_DP_FPSIMD,
  44. AARCH64_INSN_CLS_DP_IMM,
  45. AARCH64_INSN_CLS_DP_IMM,
  46. AARCH64_INSN_CLS_BR_SYS,
  47. AARCH64_INSN_CLS_BR_SYS,
  48. AARCH64_INSN_CLS_LDST,
  49. AARCH64_INSN_CLS_DP_REG,
  50. AARCH64_INSN_CLS_LDST,
  51. AARCH64_INSN_CLS_DP_FPSIMD,
  52. };
  53. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  54. {
  55. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  56. }
  57. /* NOP is an alias of HINT */
  58. bool __kprobes aarch64_insn_is_nop(u32 insn)
  59. {
  60. if (!aarch64_insn_is_hint(insn))
  61. return false;
  62. switch (insn & 0xFE0) {
  63. case AARCH64_INSN_HINT_YIELD:
  64. case AARCH64_INSN_HINT_WFE:
  65. case AARCH64_INSN_HINT_WFI:
  66. case AARCH64_INSN_HINT_SEV:
  67. case AARCH64_INSN_HINT_SEVL:
  68. return false;
  69. default:
  70. return true;
  71. }
  72. }
  73. bool aarch64_insn_is_branch_imm(u32 insn)
  74. {
  75. return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  76. aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  77. aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  78. aarch64_insn_is_bcond(insn));
  79. }
  80. static DEFINE_RAW_SPINLOCK(patch_lock);
  81. static void __kprobes *patch_map(void *addr, int fixmap)
  82. {
  83. unsigned long uintaddr = (uintptr_t) addr;
  84. bool module = !core_kernel_text(uintaddr);
  85. struct page *page;
  86. if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
  87. page = vmalloc_to_page(addr);
  88. else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
  89. page = virt_to_page(addr);
  90. else
  91. return addr;
  92. BUG_ON(!page);
  93. return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  94. (uintaddr & ~PAGE_MASK));
  95. }
  96. static void __kprobes patch_unmap(int fixmap)
  97. {
  98. clear_fixmap(fixmap);
  99. }
  100. /*
  101. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  102. * little-endian.
  103. */
  104. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  105. {
  106. int ret;
  107. u32 val;
  108. ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
  109. if (!ret)
  110. *insnp = le32_to_cpu(val);
  111. return ret;
  112. }
  113. static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
  114. {
  115. void *waddr = addr;
  116. unsigned long flags = 0;
  117. int ret;
  118. raw_spin_lock_irqsave(&patch_lock, flags);
  119. waddr = patch_map(addr, FIX_TEXT_POKE0);
  120. ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
  121. patch_unmap(FIX_TEXT_POKE0);
  122. raw_spin_unlock_irqrestore(&patch_lock, flags);
  123. return ret;
  124. }
  125. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  126. {
  127. insn = cpu_to_le32(insn);
  128. return __aarch64_insn_write(addr, insn);
  129. }
  130. static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
  131. {
  132. if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
  133. return false;
  134. return aarch64_insn_is_b(insn) ||
  135. aarch64_insn_is_bl(insn) ||
  136. aarch64_insn_is_svc(insn) ||
  137. aarch64_insn_is_hvc(insn) ||
  138. aarch64_insn_is_smc(insn) ||
  139. aarch64_insn_is_brk(insn) ||
  140. aarch64_insn_is_nop(insn);
  141. }
  142. /*
  143. * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
  144. * Section B2.6.5 "Concurrent modification and execution of instructions":
  145. * Concurrent modification and execution of instructions can lead to the
  146. * resulting instruction performing any behavior that can be achieved by
  147. * executing any sequence of instructions that can be executed from the
  148. * same Exception level, except where the instruction before modification
  149. * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
  150. * or SMC instruction.
  151. */
  152. bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
  153. {
  154. return __aarch64_insn_hotpatch_safe(old_insn) &&
  155. __aarch64_insn_hotpatch_safe(new_insn);
  156. }
  157. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  158. {
  159. u32 *tp = addr;
  160. int ret;
  161. /* A64 instructions must be word aligned */
  162. if ((uintptr_t)tp & 0x3)
  163. return -EINVAL;
  164. ret = aarch64_insn_write(tp, insn);
  165. if (ret == 0)
  166. flush_icache_range((uintptr_t)tp,
  167. (uintptr_t)tp + AARCH64_INSN_SIZE);
  168. return ret;
  169. }
  170. struct aarch64_insn_patch {
  171. void **text_addrs;
  172. u32 *new_insns;
  173. int insn_cnt;
  174. atomic_t cpu_count;
  175. };
  176. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  177. {
  178. int i, ret = 0;
  179. struct aarch64_insn_patch *pp = arg;
  180. /* The first CPU becomes master */
  181. if (atomic_inc_return(&pp->cpu_count) == 1) {
  182. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  183. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  184. pp->new_insns[i]);
  185. /*
  186. * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
  187. * which ends with "dsb; isb" pair guaranteeing global
  188. * visibility.
  189. */
  190. /* Notify other processors with an additional increment. */
  191. atomic_inc(&pp->cpu_count);
  192. } else {
  193. while (atomic_read(&pp->cpu_count) <= num_online_cpus())
  194. cpu_relax();
  195. isb();
  196. }
  197. return ret;
  198. }
  199. int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
  200. {
  201. struct aarch64_insn_patch patch = {
  202. .text_addrs = addrs,
  203. .new_insns = insns,
  204. .insn_cnt = cnt,
  205. .cpu_count = ATOMIC_INIT(0),
  206. };
  207. if (cnt <= 0)
  208. return -EINVAL;
  209. return stop_machine(aarch64_insn_patch_text_cb, &patch,
  210. cpu_online_mask);
  211. }
  212. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  213. {
  214. int ret;
  215. u32 insn;
  216. /* Unsafe to patch multiple instructions without synchronizaiton */
  217. if (cnt == 1) {
  218. ret = aarch64_insn_read(addrs[0], &insn);
  219. if (ret)
  220. return ret;
  221. if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
  222. /*
  223. * ARMv8 architecture doesn't guarantee all CPUs see
  224. * the new instruction after returning from function
  225. * aarch64_insn_patch_text_nosync(). So send IPIs to
  226. * all other CPUs to achieve instruction
  227. * synchronization.
  228. */
  229. ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
  230. kick_all_cpus_sync();
  231. return ret;
  232. }
  233. }
  234. return aarch64_insn_patch_text_sync(addrs, insns, cnt);
  235. }
  236. static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
  237. u32 *maskp, int *shiftp)
  238. {
  239. u32 mask;
  240. int shift;
  241. switch (type) {
  242. case AARCH64_INSN_IMM_26:
  243. mask = BIT(26) - 1;
  244. shift = 0;
  245. break;
  246. case AARCH64_INSN_IMM_19:
  247. mask = BIT(19) - 1;
  248. shift = 5;
  249. break;
  250. case AARCH64_INSN_IMM_16:
  251. mask = BIT(16) - 1;
  252. shift = 5;
  253. break;
  254. case AARCH64_INSN_IMM_14:
  255. mask = BIT(14) - 1;
  256. shift = 5;
  257. break;
  258. case AARCH64_INSN_IMM_12:
  259. mask = BIT(12) - 1;
  260. shift = 10;
  261. break;
  262. case AARCH64_INSN_IMM_9:
  263. mask = BIT(9) - 1;
  264. shift = 12;
  265. break;
  266. case AARCH64_INSN_IMM_7:
  267. mask = BIT(7) - 1;
  268. shift = 15;
  269. break;
  270. case AARCH64_INSN_IMM_6:
  271. case AARCH64_INSN_IMM_S:
  272. mask = BIT(6) - 1;
  273. shift = 10;
  274. break;
  275. case AARCH64_INSN_IMM_R:
  276. mask = BIT(6) - 1;
  277. shift = 16;
  278. break;
  279. default:
  280. return -EINVAL;
  281. }
  282. *maskp = mask;
  283. *shiftp = shift;
  284. return 0;
  285. }
  286. #define ADR_IMM_HILOSPLIT 2
  287. #define ADR_IMM_SIZE SZ_2M
  288. #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
  289. #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
  290. #define ADR_IMM_LOSHIFT 29
  291. #define ADR_IMM_HISHIFT 5
  292. u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
  293. {
  294. u32 immlo, immhi, mask;
  295. int shift;
  296. switch (type) {
  297. case AARCH64_INSN_IMM_ADR:
  298. shift = 0;
  299. immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
  300. immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
  301. insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
  302. mask = ADR_IMM_SIZE - 1;
  303. break;
  304. default:
  305. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  306. pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
  307. type);
  308. return 0;
  309. }
  310. }
  311. return (insn >> shift) & mask;
  312. }
  313. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  314. u32 insn, u64 imm)
  315. {
  316. u32 immlo, immhi, mask;
  317. int shift;
  318. if (insn == AARCH64_BREAK_FAULT)
  319. return AARCH64_BREAK_FAULT;
  320. switch (type) {
  321. case AARCH64_INSN_IMM_ADR:
  322. shift = 0;
  323. immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
  324. imm >>= ADR_IMM_HILOSPLIT;
  325. immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
  326. imm = immlo | immhi;
  327. mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
  328. (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
  329. break;
  330. default:
  331. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  332. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  333. type);
  334. return AARCH64_BREAK_FAULT;
  335. }
  336. }
  337. /* Update the immediate field. */
  338. insn &= ~(mask << shift);
  339. insn |= (imm & mask) << shift;
  340. return insn;
  341. }
  342. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  343. u32 insn,
  344. enum aarch64_insn_register reg)
  345. {
  346. int shift;
  347. if (insn == AARCH64_BREAK_FAULT)
  348. return AARCH64_BREAK_FAULT;
  349. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  350. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  351. return AARCH64_BREAK_FAULT;
  352. }
  353. switch (type) {
  354. case AARCH64_INSN_REGTYPE_RT:
  355. case AARCH64_INSN_REGTYPE_RD:
  356. shift = 0;
  357. break;
  358. case AARCH64_INSN_REGTYPE_RN:
  359. shift = 5;
  360. break;
  361. case AARCH64_INSN_REGTYPE_RT2:
  362. case AARCH64_INSN_REGTYPE_RA:
  363. shift = 10;
  364. break;
  365. case AARCH64_INSN_REGTYPE_RM:
  366. shift = 16;
  367. break;
  368. default:
  369. pr_err("%s: unknown register type encoding %d\n", __func__,
  370. type);
  371. return AARCH64_BREAK_FAULT;
  372. }
  373. insn &= ~(GENMASK(4, 0) << shift);
  374. insn |= reg << shift;
  375. return insn;
  376. }
  377. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  378. u32 insn)
  379. {
  380. u32 size;
  381. switch (type) {
  382. case AARCH64_INSN_SIZE_8:
  383. size = 0;
  384. break;
  385. case AARCH64_INSN_SIZE_16:
  386. size = 1;
  387. break;
  388. case AARCH64_INSN_SIZE_32:
  389. size = 2;
  390. break;
  391. case AARCH64_INSN_SIZE_64:
  392. size = 3;
  393. break;
  394. default:
  395. pr_err("%s: unknown size encoding %d\n", __func__, type);
  396. return AARCH64_BREAK_FAULT;
  397. }
  398. insn &= ~GENMASK(31, 30);
  399. insn |= size << 30;
  400. return insn;
  401. }
  402. static inline long branch_imm_common(unsigned long pc, unsigned long addr,
  403. long range)
  404. {
  405. long offset;
  406. if ((pc & 0x3) || (addr & 0x3)) {
  407. pr_err("%s: A64 instructions must be word aligned\n", __func__);
  408. return range;
  409. }
  410. offset = ((long)addr - (long)pc);
  411. if (offset < -range || offset >= range) {
  412. pr_err("%s: offset out of range\n", __func__);
  413. return range;
  414. }
  415. return offset;
  416. }
  417. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  418. enum aarch64_insn_branch_type type)
  419. {
  420. u32 insn;
  421. long offset;
  422. /*
  423. * B/BL support [-128M, 128M) offset
  424. * ARM64 virtual address arrangement guarantees all kernel and module
  425. * texts are within +/-128M.
  426. */
  427. offset = branch_imm_common(pc, addr, SZ_128M);
  428. if (offset >= SZ_128M)
  429. return AARCH64_BREAK_FAULT;
  430. switch (type) {
  431. case AARCH64_INSN_BRANCH_LINK:
  432. insn = aarch64_insn_get_bl_value();
  433. break;
  434. case AARCH64_INSN_BRANCH_NOLINK:
  435. insn = aarch64_insn_get_b_value();
  436. break;
  437. default:
  438. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  439. return AARCH64_BREAK_FAULT;
  440. }
  441. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  442. offset >> 2);
  443. }
  444. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  445. enum aarch64_insn_register reg,
  446. enum aarch64_insn_variant variant,
  447. enum aarch64_insn_branch_type type)
  448. {
  449. u32 insn;
  450. long offset;
  451. offset = branch_imm_common(pc, addr, SZ_1M);
  452. if (offset >= SZ_1M)
  453. return AARCH64_BREAK_FAULT;
  454. switch (type) {
  455. case AARCH64_INSN_BRANCH_COMP_ZERO:
  456. insn = aarch64_insn_get_cbz_value();
  457. break;
  458. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  459. insn = aarch64_insn_get_cbnz_value();
  460. break;
  461. default:
  462. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  463. return AARCH64_BREAK_FAULT;
  464. }
  465. switch (variant) {
  466. case AARCH64_INSN_VARIANT_32BIT:
  467. break;
  468. case AARCH64_INSN_VARIANT_64BIT:
  469. insn |= AARCH64_INSN_SF_BIT;
  470. break;
  471. default:
  472. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  473. return AARCH64_BREAK_FAULT;
  474. }
  475. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  476. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  477. offset >> 2);
  478. }
  479. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  480. enum aarch64_insn_condition cond)
  481. {
  482. u32 insn;
  483. long offset;
  484. offset = branch_imm_common(pc, addr, SZ_1M);
  485. insn = aarch64_insn_get_bcond_value();
  486. if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
  487. pr_err("%s: unknown condition encoding %d\n", __func__, cond);
  488. return AARCH64_BREAK_FAULT;
  489. }
  490. insn |= cond;
  491. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  492. offset >> 2);
  493. }
  494. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
  495. {
  496. return aarch64_insn_get_hint_value() | op;
  497. }
  498. u32 __kprobes aarch64_insn_gen_nop(void)
  499. {
  500. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  501. }
  502. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  503. enum aarch64_insn_branch_type type)
  504. {
  505. u32 insn;
  506. switch (type) {
  507. case AARCH64_INSN_BRANCH_NOLINK:
  508. insn = aarch64_insn_get_br_value();
  509. break;
  510. case AARCH64_INSN_BRANCH_LINK:
  511. insn = aarch64_insn_get_blr_value();
  512. break;
  513. case AARCH64_INSN_BRANCH_RETURN:
  514. insn = aarch64_insn_get_ret_value();
  515. break;
  516. default:
  517. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  518. return AARCH64_BREAK_FAULT;
  519. }
  520. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  521. }
  522. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  523. enum aarch64_insn_register base,
  524. enum aarch64_insn_register offset,
  525. enum aarch64_insn_size_type size,
  526. enum aarch64_insn_ldst_type type)
  527. {
  528. u32 insn;
  529. switch (type) {
  530. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  531. insn = aarch64_insn_get_ldr_reg_value();
  532. break;
  533. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  534. insn = aarch64_insn_get_str_reg_value();
  535. break;
  536. default:
  537. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  538. return AARCH64_BREAK_FAULT;
  539. }
  540. insn = aarch64_insn_encode_ldst_size(size, insn);
  541. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  542. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  543. base);
  544. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  545. offset);
  546. }
  547. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  548. enum aarch64_insn_register reg2,
  549. enum aarch64_insn_register base,
  550. int offset,
  551. enum aarch64_insn_variant variant,
  552. enum aarch64_insn_ldst_type type)
  553. {
  554. u32 insn;
  555. int shift;
  556. switch (type) {
  557. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  558. insn = aarch64_insn_get_ldp_pre_value();
  559. break;
  560. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  561. insn = aarch64_insn_get_stp_pre_value();
  562. break;
  563. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  564. insn = aarch64_insn_get_ldp_post_value();
  565. break;
  566. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  567. insn = aarch64_insn_get_stp_post_value();
  568. break;
  569. default:
  570. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  571. return AARCH64_BREAK_FAULT;
  572. }
  573. switch (variant) {
  574. case AARCH64_INSN_VARIANT_32BIT:
  575. if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
  576. pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
  577. __func__, offset);
  578. return AARCH64_BREAK_FAULT;
  579. }
  580. shift = 2;
  581. break;
  582. case AARCH64_INSN_VARIANT_64BIT:
  583. if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
  584. pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
  585. __func__, offset);
  586. return AARCH64_BREAK_FAULT;
  587. }
  588. shift = 3;
  589. insn |= AARCH64_INSN_SF_BIT;
  590. break;
  591. default:
  592. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  593. return AARCH64_BREAK_FAULT;
  594. }
  595. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  596. reg1);
  597. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  598. reg2);
  599. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  600. base);
  601. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  602. offset >> shift);
  603. }
  604. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  605. enum aarch64_insn_register src,
  606. int imm, enum aarch64_insn_variant variant,
  607. enum aarch64_insn_adsb_type type)
  608. {
  609. u32 insn;
  610. switch (type) {
  611. case AARCH64_INSN_ADSB_ADD:
  612. insn = aarch64_insn_get_add_imm_value();
  613. break;
  614. case AARCH64_INSN_ADSB_SUB:
  615. insn = aarch64_insn_get_sub_imm_value();
  616. break;
  617. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  618. insn = aarch64_insn_get_adds_imm_value();
  619. break;
  620. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  621. insn = aarch64_insn_get_subs_imm_value();
  622. break;
  623. default:
  624. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  625. return AARCH64_BREAK_FAULT;
  626. }
  627. switch (variant) {
  628. case AARCH64_INSN_VARIANT_32BIT:
  629. break;
  630. case AARCH64_INSN_VARIANT_64BIT:
  631. insn |= AARCH64_INSN_SF_BIT;
  632. break;
  633. default:
  634. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  635. return AARCH64_BREAK_FAULT;
  636. }
  637. if (imm & ~(SZ_4K - 1)) {
  638. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  639. return AARCH64_BREAK_FAULT;
  640. }
  641. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  642. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  643. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  644. }
  645. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  646. enum aarch64_insn_register src,
  647. int immr, int imms,
  648. enum aarch64_insn_variant variant,
  649. enum aarch64_insn_bitfield_type type)
  650. {
  651. u32 insn;
  652. u32 mask;
  653. switch (type) {
  654. case AARCH64_INSN_BITFIELD_MOVE:
  655. insn = aarch64_insn_get_bfm_value();
  656. break;
  657. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  658. insn = aarch64_insn_get_ubfm_value();
  659. break;
  660. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  661. insn = aarch64_insn_get_sbfm_value();
  662. break;
  663. default:
  664. pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
  665. return AARCH64_BREAK_FAULT;
  666. }
  667. switch (variant) {
  668. case AARCH64_INSN_VARIANT_32BIT:
  669. mask = GENMASK(4, 0);
  670. break;
  671. case AARCH64_INSN_VARIANT_64BIT:
  672. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  673. mask = GENMASK(5, 0);
  674. break;
  675. default:
  676. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  677. return AARCH64_BREAK_FAULT;
  678. }
  679. if (immr & ~mask) {
  680. pr_err("%s: invalid immr encoding %d\n", __func__, immr);
  681. return AARCH64_BREAK_FAULT;
  682. }
  683. if (imms & ~mask) {
  684. pr_err("%s: invalid imms encoding %d\n", __func__, imms);
  685. return AARCH64_BREAK_FAULT;
  686. }
  687. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  688. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  689. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  690. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  691. }
  692. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  693. int imm, int shift,
  694. enum aarch64_insn_variant variant,
  695. enum aarch64_insn_movewide_type type)
  696. {
  697. u32 insn;
  698. switch (type) {
  699. case AARCH64_INSN_MOVEWIDE_ZERO:
  700. insn = aarch64_insn_get_movz_value();
  701. break;
  702. case AARCH64_INSN_MOVEWIDE_KEEP:
  703. insn = aarch64_insn_get_movk_value();
  704. break;
  705. case AARCH64_INSN_MOVEWIDE_INVERSE:
  706. insn = aarch64_insn_get_movn_value();
  707. break;
  708. default:
  709. pr_err("%s: unknown movewide encoding %d\n", __func__, type);
  710. return AARCH64_BREAK_FAULT;
  711. }
  712. if (imm & ~(SZ_64K - 1)) {
  713. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  714. return AARCH64_BREAK_FAULT;
  715. }
  716. switch (variant) {
  717. case AARCH64_INSN_VARIANT_32BIT:
  718. if (shift != 0 && shift != 16) {
  719. pr_err("%s: invalid shift encoding %d\n", __func__,
  720. shift);
  721. return AARCH64_BREAK_FAULT;
  722. }
  723. break;
  724. case AARCH64_INSN_VARIANT_64BIT:
  725. insn |= AARCH64_INSN_SF_BIT;
  726. if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
  727. pr_err("%s: invalid shift encoding %d\n", __func__,
  728. shift);
  729. return AARCH64_BREAK_FAULT;
  730. }
  731. break;
  732. default:
  733. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  734. return AARCH64_BREAK_FAULT;
  735. }
  736. insn |= (shift >> 4) << 21;
  737. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  738. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  739. }
  740. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  741. enum aarch64_insn_register src,
  742. enum aarch64_insn_register reg,
  743. int shift,
  744. enum aarch64_insn_variant variant,
  745. enum aarch64_insn_adsb_type type)
  746. {
  747. u32 insn;
  748. switch (type) {
  749. case AARCH64_INSN_ADSB_ADD:
  750. insn = aarch64_insn_get_add_value();
  751. break;
  752. case AARCH64_INSN_ADSB_SUB:
  753. insn = aarch64_insn_get_sub_value();
  754. break;
  755. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  756. insn = aarch64_insn_get_adds_value();
  757. break;
  758. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  759. insn = aarch64_insn_get_subs_value();
  760. break;
  761. default:
  762. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  763. return AARCH64_BREAK_FAULT;
  764. }
  765. switch (variant) {
  766. case AARCH64_INSN_VARIANT_32BIT:
  767. if (shift & ~(SZ_32 - 1)) {
  768. pr_err("%s: invalid shift encoding %d\n", __func__,
  769. shift);
  770. return AARCH64_BREAK_FAULT;
  771. }
  772. break;
  773. case AARCH64_INSN_VARIANT_64BIT:
  774. insn |= AARCH64_INSN_SF_BIT;
  775. if (shift & ~(SZ_64 - 1)) {
  776. pr_err("%s: invalid shift encoding %d\n", __func__,
  777. shift);
  778. return AARCH64_BREAK_FAULT;
  779. }
  780. break;
  781. default:
  782. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  783. return AARCH64_BREAK_FAULT;
  784. }
  785. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  786. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  787. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  788. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  789. }
  790. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  791. enum aarch64_insn_register src,
  792. enum aarch64_insn_variant variant,
  793. enum aarch64_insn_data1_type type)
  794. {
  795. u32 insn;
  796. switch (type) {
  797. case AARCH64_INSN_DATA1_REVERSE_16:
  798. insn = aarch64_insn_get_rev16_value();
  799. break;
  800. case AARCH64_INSN_DATA1_REVERSE_32:
  801. insn = aarch64_insn_get_rev32_value();
  802. break;
  803. case AARCH64_INSN_DATA1_REVERSE_64:
  804. if (variant != AARCH64_INSN_VARIANT_64BIT) {
  805. pr_err("%s: invalid variant for reverse64 %d\n",
  806. __func__, variant);
  807. return AARCH64_BREAK_FAULT;
  808. }
  809. insn = aarch64_insn_get_rev64_value();
  810. break;
  811. default:
  812. pr_err("%s: unknown data1 encoding %d\n", __func__, type);
  813. return AARCH64_BREAK_FAULT;
  814. }
  815. switch (variant) {
  816. case AARCH64_INSN_VARIANT_32BIT:
  817. break;
  818. case AARCH64_INSN_VARIANT_64BIT:
  819. insn |= AARCH64_INSN_SF_BIT;
  820. break;
  821. default:
  822. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  823. return AARCH64_BREAK_FAULT;
  824. }
  825. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  826. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  827. }
  828. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  829. enum aarch64_insn_register src,
  830. enum aarch64_insn_register reg,
  831. enum aarch64_insn_variant variant,
  832. enum aarch64_insn_data2_type type)
  833. {
  834. u32 insn;
  835. switch (type) {
  836. case AARCH64_INSN_DATA2_UDIV:
  837. insn = aarch64_insn_get_udiv_value();
  838. break;
  839. case AARCH64_INSN_DATA2_SDIV:
  840. insn = aarch64_insn_get_sdiv_value();
  841. break;
  842. case AARCH64_INSN_DATA2_LSLV:
  843. insn = aarch64_insn_get_lslv_value();
  844. break;
  845. case AARCH64_INSN_DATA2_LSRV:
  846. insn = aarch64_insn_get_lsrv_value();
  847. break;
  848. case AARCH64_INSN_DATA2_ASRV:
  849. insn = aarch64_insn_get_asrv_value();
  850. break;
  851. case AARCH64_INSN_DATA2_RORV:
  852. insn = aarch64_insn_get_rorv_value();
  853. break;
  854. default:
  855. pr_err("%s: unknown data2 encoding %d\n", __func__, type);
  856. return AARCH64_BREAK_FAULT;
  857. }
  858. switch (variant) {
  859. case AARCH64_INSN_VARIANT_32BIT:
  860. break;
  861. case AARCH64_INSN_VARIANT_64BIT:
  862. insn |= AARCH64_INSN_SF_BIT;
  863. break;
  864. default:
  865. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  866. return AARCH64_BREAK_FAULT;
  867. }
  868. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  869. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  870. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  871. }
  872. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  873. enum aarch64_insn_register src,
  874. enum aarch64_insn_register reg1,
  875. enum aarch64_insn_register reg2,
  876. enum aarch64_insn_variant variant,
  877. enum aarch64_insn_data3_type type)
  878. {
  879. u32 insn;
  880. switch (type) {
  881. case AARCH64_INSN_DATA3_MADD:
  882. insn = aarch64_insn_get_madd_value();
  883. break;
  884. case AARCH64_INSN_DATA3_MSUB:
  885. insn = aarch64_insn_get_msub_value();
  886. break;
  887. default:
  888. pr_err("%s: unknown data3 encoding %d\n", __func__, type);
  889. return AARCH64_BREAK_FAULT;
  890. }
  891. switch (variant) {
  892. case AARCH64_INSN_VARIANT_32BIT:
  893. break;
  894. case AARCH64_INSN_VARIANT_64BIT:
  895. insn |= AARCH64_INSN_SF_BIT;
  896. break;
  897. default:
  898. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  899. return AARCH64_BREAK_FAULT;
  900. }
  901. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  902. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  903. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  904. reg1);
  905. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  906. reg2);
  907. }
  908. u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
  909. enum aarch64_insn_register src,
  910. enum aarch64_insn_register reg,
  911. int shift,
  912. enum aarch64_insn_variant variant,
  913. enum aarch64_insn_logic_type type)
  914. {
  915. u32 insn;
  916. switch (type) {
  917. case AARCH64_INSN_LOGIC_AND:
  918. insn = aarch64_insn_get_and_value();
  919. break;
  920. case AARCH64_INSN_LOGIC_BIC:
  921. insn = aarch64_insn_get_bic_value();
  922. break;
  923. case AARCH64_INSN_LOGIC_ORR:
  924. insn = aarch64_insn_get_orr_value();
  925. break;
  926. case AARCH64_INSN_LOGIC_ORN:
  927. insn = aarch64_insn_get_orn_value();
  928. break;
  929. case AARCH64_INSN_LOGIC_EOR:
  930. insn = aarch64_insn_get_eor_value();
  931. break;
  932. case AARCH64_INSN_LOGIC_EON:
  933. insn = aarch64_insn_get_eon_value();
  934. break;
  935. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  936. insn = aarch64_insn_get_ands_value();
  937. break;
  938. case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
  939. insn = aarch64_insn_get_bics_value();
  940. break;
  941. default:
  942. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  943. return AARCH64_BREAK_FAULT;
  944. }
  945. switch (variant) {
  946. case AARCH64_INSN_VARIANT_32BIT:
  947. if (shift & ~(SZ_32 - 1)) {
  948. pr_err("%s: invalid shift encoding %d\n", __func__,
  949. shift);
  950. return AARCH64_BREAK_FAULT;
  951. }
  952. break;
  953. case AARCH64_INSN_VARIANT_64BIT:
  954. insn |= AARCH64_INSN_SF_BIT;
  955. if (shift & ~(SZ_64 - 1)) {
  956. pr_err("%s: invalid shift encoding %d\n", __func__,
  957. shift);
  958. return AARCH64_BREAK_FAULT;
  959. }
  960. break;
  961. default:
  962. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  963. return AARCH64_BREAK_FAULT;
  964. }
  965. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  966. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  967. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  968. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  969. }
  970. /*
  971. * Decode the imm field of a branch, and return the byte offset as a
  972. * signed value (so it can be used when computing a new branch
  973. * target).
  974. */
  975. s32 aarch64_get_branch_offset(u32 insn)
  976. {
  977. s32 imm;
  978. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
  979. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
  980. return (imm << 6) >> 4;
  981. }
  982. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  983. aarch64_insn_is_bcond(insn)) {
  984. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
  985. return (imm << 13) >> 11;
  986. }
  987. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
  988. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
  989. return (imm << 18) >> 16;
  990. }
  991. /* Unhandled instruction */
  992. BUG();
  993. }
  994. /*
  995. * Encode the displacement of a branch in the imm field and return the
  996. * updated instruction.
  997. */
  998. u32 aarch64_set_branch_offset(u32 insn, s32 offset)
  999. {
  1000. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
  1001. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  1002. offset >> 2);
  1003. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1004. aarch64_insn_is_bcond(insn))
  1005. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  1006. offset >> 2);
  1007. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
  1008. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
  1009. offset >> 2);
  1010. /* Unhandled instruction */
  1011. BUG();
  1012. }
  1013. bool aarch32_insn_is_wide(u32 insn)
  1014. {
  1015. return insn >= 0xe800;
  1016. }
  1017. /*
  1018. * Macros/defines for extracting register numbers from instruction.
  1019. */
  1020. u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
  1021. {
  1022. return (insn & (0xf << offset)) >> offset;
  1023. }
  1024. #define OPC2_MASK 0x7
  1025. #define OPC2_OFFSET 5
  1026. u32 aarch32_insn_mcr_extract_opc2(u32 insn)
  1027. {
  1028. return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
  1029. }
  1030. #define CRM_MASK 0xf
  1031. u32 aarch32_insn_mcr_extract_crm(u32 insn)
  1032. {
  1033. return insn & CRM_MASK;
  1034. }