insn.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. /*
  2. * Copyright (C) 2013 Huawei Ltd.
  3. * Author: Jiang Liu <liuj97@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/bitops.h>
  18. #include <linux/compiler.h>
  19. #include <linux/kernel.h>
  20. #include <linux/smp.h>
  21. #include <linux/stop_machine.h>
  22. #include <linux/uaccess.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/insn.h>
  25. static int aarch64_insn_encoding_class[] = {
  26. AARCH64_INSN_CLS_UNKNOWN,
  27. AARCH64_INSN_CLS_UNKNOWN,
  28. AARCH64_INSN_CLS_UNKNOWN,
  29. AARCH64_INSN_CLS_UNKNOWN,
  30. AARCH64_INSN_CLS_LDST,
  31. AARCH64_INSN_CLS_DP_REG,
  32. AARCH64_INSN_CLS_LDST,
  33. AARCH64_INSN_CLS_DP_FPSIMD,
  34. AARCH64_INSN_CLS_DP_IMM,
  35. AARCH64_INSN_CLS_DP_IMM,
  36. AARCH64_INSN_CLS_BR_SYS,
  37. AARCH64_INSN_CLS_BR_SYS,
  38. AARCH64_INSN_CLS_LDST,
  39. AARCH64_INSN_CLS_DP_REG,
  40. AARCH64_INSN_CLS_LDST,
  41. AARCH64_INSN_CLS_DP_FPSIMD,
  42. };
  43. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  44. {
  45. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  46. }
  47. /* NOP is an alias of HINT */
  48. bool __kprobes aarch64_insn_is_nop(u32 insn)
  49. {
  50. if (!aarch64_insn_is_hint(insn))
  51. return false;
  52. switch (insn & 0xFE0) {
  53. case AARCH64_INSN_HINT_YIELD:
  54. case AARCH64_INSN_HINT_WFE:
  55. case AARCH64_INSN_HINT_WFI:
  56. case AARCH64_INSN_HINT_SEV:
  57. case AARCH64_INSN_HINT_SEVL:
  58. return false;
  59. default:
  60. return true;
  61. }
  62. }
  63. /*
  64. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  65. * little-endian.
  66. */
  67. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  68. {
  69. int ret;
  70. u32 val;
  71. ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
  72. if (!ret)
  73. *insnp = le32_to_cpu(val);
  74. return ret;
  75. }
  76. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  77. {
  78. insn = cpu_to_le32(insn);
  79. return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
  80. }
  81. static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
  82. {
  83. if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
  84. return false;
  85. return aarch64_insn_is_b(insn) ||
  86. aarch64_insn_is_bl(insn) ||
  87. aarch64_insn_is_svc(insn) ||
  88. aarch64_insn_is_hvc(insn) ||
  89. aarch64_insn_is_smc(insn) ||
  90. aarch64_insn_is_brk(insn) ||
  91. aarch64_insn_is_nop(insn);
  92. }
  93. /*
  94. * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
  95. * Section B2.6.5 "Concurrent modification and execution of instructions":
  96. * Concurrent modification and execution of instructions can lead to the
  97. * resulting instruction performing any behavior that can be achieved by
  98. * executing any sequence of instructions that can be executed from the
  99. * same Exception level, except where the instruction before modification
  100. * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
  101. * or SMC instruction.
  102. */
  103. bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
  104. {
  105. return __aarch64_insn_hotpatch_safe(old_insn) &&
  106. __aarch64_insn_hotpatch_safe(new_insn);
  107. }
  108. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  109. {
  110. u32 *tp = addr;
  111. int ret;
  112. /* A64 instructions must be word aligned */
  113. if ((uintptr_t)tp & 0x3)
  114. return -EINVAL;
  115. ret = aarch64_insn_write(tp, insn);
  116. if (ret == 0)
  117. flush_icache_range((uintptr_t)tp,
  118. (uintptr_t)tp + AARCH64_INSN_SIZE);
  119. return ret;
  120. }
  121. struct aarch64_insn_patch {
  122. void **text_addrs;
  123. u32 *new_insns;
  124. int insn_cnt;
  125. atomic_t cpu_count;
  126. };
  127. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  128. {
  129. int i, ret = 0;
  130. struct aarch64_insn_patch *pp = arg;
  131. /* The first CPU becomes master */
  132. if (atomic_inc_return(&pp->cpu_count) == 1) {
  133. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  134. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  135. pp->new_insns[i]);
  136. /*
  137. * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
  138. * which ends with "dsb; isb" pair guaranteeing global
  139. * visibility.
  140. */
  141. atomic_set(&pp->cpu_count, -1);
  142. } else {
  143. while (atomic_read(&pp->cpu_count) != -1)
  144. cpu_relax();
  145. isb();
  146. }
  147. return ret;
  148. }
  149. int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
  150. {
  151. struct aarch64_insn_patch patch = {
  152. .text_addrs = addrs,
  153. .new_insns = insns,
  154. .insn_cnt = cnt,
  155. .cpu_count = ATOMIC_INIT(0),
  156. };
  157. if (cnt <= 0)
  158. return -EINVAL;
  159. return stop_machine(aarch64_insn_patch_text_cb, &patch,
  160. cpu_online_mask);
  161. }
  162. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  163. {
  164. int ret;
  165. u32 insn;
  166. /* Unsafe to patch multiple instructions without synchronizaiton */
  167. if (cnt == 1) {
  168. ret = aarch64_insn_read(addrs[0], &insn);
  169. if (ret)
  170. return ret;
  171. if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
  172. /*
  173. * ARMv8 architecture doesn't guarantee all CPUs see
  174. * the new instruction after returning from function
  175. * aarch64_insn_patch_text_nosync(). So send IPIs to
  176. * all other CPUs to achieve instruction
  177. * synchronization.
  178. */
  179. ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
  180. kick_all_cpus_sync();
  181. return ret;
  182. }
  183. }
  184. return aarch64_insn_patch_text_sync(addrs, insns, cnt);
  185. }
  186. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  187. u32 insn, u64 imm)
  188. {
  189. u32 immlo, immhi, lomask, himask, mask;
  190. int shift;
  191. switch (type) {
  192. case AARCH64_INSN_IMM_ADR:
  193. lomask = 0x3;
  194. himask = 0x7ffff;
  195. immlo = imm & lomask;
  196. imm >>= 2;
  197. immhi = imm & himask;
  198. imm = (immlo << 24) | (immhi);
  199. mask = (lomask << 24) | (himask);
  200. shift = 5;
  201. break;
  202. case AARCH64_INSN_IMM_26:
  203. mask = BIT(26) - 1;
  204. shift = 0;
  205. break;
  206. case AARCH64_INSN_IMM_19:
  207. mask = BIT(19) - 1;
  208. shift = 5;
  209. break;
  210. case AARCH64_INSN_IMM_16:
  211. mask = BIT(16) - 1;
  212. shift = 5;
  213. break;
  214. case AARCH64_INSN_IMM_14:
  215. mask = BIT(14) - 1;
  216. shift = 5;
  217. break;
  218. case AARCH64_INSN_IMM_12:
  219. mask = BIT(12) - 1;
  220. shift = 10;
  221. break;
  222. case AARCH64_INSN_IMM_9:
  223. mask = BIT(9) - 1;
  224. shift = 12;
  225. break;
  226. default:
  227. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  228. type);
  229. return 0;
  230. }
  231. /* Update the immediate field. */
  232. insn &= ~(mask << shift);
  233. insn |= (imm & mask) << shift;
  234. return insn;
  235. }
  236. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  237. enum aarch64_insn_branch_type type)
  238. {
  239. u32 insn;
  240. long offset;
  241. /*
  242. * PC: A 64-bit Program Counter holding the address of the current
  243. * instruction. A64 instructions must be word-aligned.
  244. */
  245. BUG_ON((pc & 0x3) || (addr & 0x3));
  246. /*
  247. * B/BL support [-128M, 128M) offset
  248. * ARM64 virtual address arrangement guarantees all kernel and module
  249. * texts are within +/-128M.
  250. */
  251. offset = ((long)addr - (long)pc);
  252. BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
  253. if (type == AARCH64_INSN_BRANCH_LINK)
  254. insn = aarch64_insn_get_bl_value();
  255. else
  256. insn = aarch64_insn_get_b_value();
  257. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  258. offset >> 2);
  259. }
  260. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
  261. {
  262. return aarch64_insn_get_hint_value() | op;
  263. }
  264. u32 __kprobes aarch64_insn_gen_nop(void)
  265. {
  266. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  267. }