module.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * AArch64 loadable module support.
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * Author: Will Deacon <will.deacon@arm.com>
  19. */
  20. #include <linux/bitops.h>
  21. #include <linux/elf.h>
  22. #include <linux/gfp.h>
  23. #include <linux/kasan.h>
  24. #include <linux/kernel.h>
  25. #include <linux/mm.h>
  26. #include <linux/moduleloader.h>
  27. #include <linux/vmalloc.h>
  28. #include <asm/alternative.h>
  29. #include <asm/insn.h>
  30. #include <asm/sections.h>
  31. void *module_alloc(unsigned long size)
  32. {
  33. gfp_t gfp_mask = GFP_KERNEL;
  34. void *p;
  35. /* Silence the initial allocation */
  36. if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
  37. gfp_mask |= __GFP_NOWARN;
  38. p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  39. module_alloc_base + MODULES_VSIZE,
  40. gfp_mask, PAGE_KERNEL_EXEC, 0,
  41. NUMA_NO_NODE, __builtin_return_address(0));
  42. if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  43. !IS_ENABLED(CONFIG_KASAN))
  44. /*
  45. * KASAN can only deal with module allocations being served
  46. * from the reserved module region, since the remainder of
  47. * the vmalloc region is already backed by zero shadow pages,
  48. * and punching holes into it is non-trivial. Since the module
  49. * region is not randomized when KASAN is enabled, it is even
  50. * less likely that the module region gets exhausted, so we
  51. * can simply omit this fallback in that case.
  52. */
  53. p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  54. module_alloc_base + SZ_4G, GFP_KERNEL,
  55. PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
  56. __builtin_return_address(0));
  57. if (p && (kasan_module_alloc(p, size) < 0)) {
  58. vfree(p);
  59. return NULL;
  60. }
  61. return p;
  62. }
  63. enum aarch64_reloc_op {
  64. RELOC_OP_NONE,
  65. RELOC_OP_ABS,
  66. RELOC_OP_PREL,
  67. RELOC_OP_PAGE,
  68. };
  69. static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
  70. {
  71. switch (reloc_op) {
  72. case RELOC_OP_ABS:
  73. return val;
  74. case RELOC_OP_PREL:
  75. return val - (u64)place;
  76. case RELOC_OP_PAGE:
  77. return (val & ~0xfff) - ((u64)place & ~0xfff);
  78. case RELOC_OP_NONE:
  79. return 0;
  80. }
  81. pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  82. return 0;
  83. }
  84. static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  85. {
  86. s64 sval = do_reloc(op, place, val);
  87. switch (len) {
  88. case 16:
  89. *(s16 *)place = sval;
  90. if (sval < S16_MIN || sval > U16_MAX)
  91. return -ERANGE;
  92. break;
  93. case 32:
  94. *(s32 *)place = sval;
  95. if (sval < S32_MIN || sval > U32_MAX)
  96. return -ERANGE;
  97. break;
  98. case 64:
  99. *(s64 *)place = sval;
  100. break;
  101. default:
  102. pr_err("Invalid length (%d) for data relocation\n", len);
  103. return 0;
  104. }
  105. return 0;
  106. }
  107. enum aarch64_insn_movw_imm_type {
  108. AARCH64_INSN_IMM_MOVNZ,
  109. AARCH64_INSN_IMM_MOVKZ,
  110. };
  111. static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
  112. int lsb, enum aarch64_insn_movw_imm_type imm_type)
  113. {
  114. u64 imm;
  115. s64 sval;
  116. u32 insn = le32_to_cpu(*place);
  117. sval = do_reloc(op, place, val);
  118. imm = sval >> lsb;
  119. if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
  120. /*
  121. * For signed MOVW relocations, we have to manipulate the
  122. * instruction encoding depending on whether or not the
  123. * immediate is less than zero.
  124. */
  125. insn &= ~(3 << 29);
  126. if (sval >= 0) {
  127. /* >=0: Set the instruction to MOVZ (opcode 10b). */
  128. insn |= 2 << 29;
  129. } else {
  130. /*
  131. * <0: Set the instruction to MOVN (opcode 00b).
  132. * Since we've masked the opcode already, we
  133. * don't need to do anything other than
  134. * inverting the new immediate field.
  135. */
  136. imm = ~imm;
  137. }
  138. }
  139. /* Update the instruction with the new encoding. */
  140. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  141. *place = cpu_to_le32(insn);
  142. if (imm > U16_MAX)
  143. return -ERANGE;
  144. return 0;
  145. }
  146. static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
  147. int lsb, int len, enum aarch64_insn_imm_type imm_type)
  148. {
  149. u64 imm, imm_mask;
  150. s64 sval;
  151. u32 insn = le32_to_cpu(*place);
  152. /* Calculate the relocation value. */
  153. sval = do_reloc(op, place, val);
  154. sval >>= lsb;
  155. /* Extract the value bits and shift them to bit 0. */
  156. imm_mask = (BIT(lsb + len) - 1) >> lsb;
  157. imm = sval & imm_mask;
  158. /* Update the instruction's immediate field. */
  159. insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
  160. *place = cpu_to_le32(insn);
  161. /*
  162. * Extract the upper value bits (including the sign bit) and
  163. * shift them to bit 0.
  164. */
  165. sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
  166. /*
  167. * Overflow has occurred if the upper bits are not all equal to
  168. * the sign bit of the value.
  169. */
  170. if ((u64)(sval + 1) >= 2)
  171. return -ERANGE;
  172. return 0;
  173. }
  174. static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
  175. {
  176. u32 insn;
  177. if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
  178. !cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
  179. ((u64)place & 0xfff) < 0xff8)
  180. return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
  181. AARCH64_INSN_IMM_ADR);
  182. /* patch ADRP to ADR if it is in range */
  183. if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
  184. AARCH64_INSN_IMM_ADR)) {
  185. insn = le32_to_cpu(*place);
  186. insn &= ~BIT(31);
  187. } else {
  188. /* out of range for ADR -> emit a veneer */
  189. val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
  190. if (!val)
  191. return -ENOEXEC;
  192. insn = aarch64_insn_gen_branch_imm((u64)place, val,
  193. AARCH64_INSN_BRANCH_NOLINK);
  194. }
  195. *place = cpu_to_le32(insn);
  196. return 0;
  197. }
  198. int apply_relocate_add(Elf64_Shdr *sechdrs,
  199. const char *strtab,
  200. unsigned int symindex,
  201. unsigned int relsec,
  202. struct module *me)
  203. {
  204. unsigned int i;
  205. int ovf;
  206. bool overflow_check;
  207. Elf64_Sym *sym;
  208. void *loc;
  209. u64 val;
  210. Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
  211. for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
  212. /* loc corresponds to P in the AArch64 ELF document. */
  213. loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
  214. + rel[i].r_offset;
  215. /* sym is the ELF symbol we're referring to. */
  216. sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
  217. + ELF64_R_SYM(rel[i].r_info);
  218. /* val corresponds to (S + A) in the AArch64 ELF document. */
  219. val = sym->st_value + rel[i].r_addend;
  220. /* Check for overflow by default. */
  221. overflow_check = true;
  222. /* Perform the static relocation. */
  223. switch (ELF64_R_TYPE(rel[i].r_info)) {
  224. /* Null relocations. */
  225. case R_ARM_NONE:
  226. case R_AARCH64_NONE:
  227. ovf = 0;
  228. break;
  229. /* Data relocations. */
  230. case R_AARCH64_ABS64:
  231. overflow_check = false;
  232. ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
  233. break;
  234. case R_AARCH64_ABS32:
  235. ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
  236. break;
  237. case R_AARCH64_ABS16:
  238. ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
  239. break;
  240. case R_AARCH64_PREL64:
  241. overflow_check = false;
  242. ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
  243. break;
  244. case R_AARCH64_PREL32:
  245. ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
  246. break;
  247. case R_AARCH64_PREL16:
  248. ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
  249. break;
  250. /* MOVW instruction relocations. */
  251. case R_AARCH64_MOVW_UABS_G0_NC:
  252. overflow_check = false;
  253. case R_AARCH64_MOVW_UABS_G0:
  254. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
  255. AARCH64_INSN_IMM_MOVKZ);
  256. break;
  257. case R_AARCH64_MOVW_UABS_G1_NC:
  258. overflow_check = false;
  259. case R_AARCH64_MOVW_UABS_G1:
  260. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
  261. AARCH64_INSN_IMM_MOVKZ);
  262. break;
  263. case R_AARCH64_MOVW_UABS_G2_NC:
  264. overflow_check = false;
  265. case R_AARCH64_MOVW_UABS_G2:
  266. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
  267. AARCH64_INSN_IMM_MOVKZ);
  268. break;
  269. case R_AARCH64_MOVW_UABS_G3:
  270. /* We're using the top bits so we can't overflow. */
  271. overflow_check = false;
  272. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
  273. AARCH64_INSN_IMM_MOVKZ);
  274. break;
  275. case R_AARCH64_MOVW_SABS_G0:
  276. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
  277. AARCH64_INSN_IMM_MOVNZ);
  278. break;
  279. case R_AARCH64_MOVW_SABS_G1:
  280. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
  281. AARCH64_INSN_IMM_MOVNZ);
  282. break;
  283. case R_AARCH64_MOVW_SABS_G2:
  284. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
  285. AARCH64_INSN_IMM_MOVNZ);
  286. break;
  287. case R_AARCH64_MOVW_PREL_G0_NC:
  288. overflow_check = false;
  289. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
  290. AARCH64_INSN_IMM_MOVKZ);
  291. break;
  292. case R_AARCH64_MOVW_PREL_G0:
  293. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
  294. AARCH64_INSN_IMM_MOVNZ);
  295. break;
  296. case R_AARCH64_MOVW_PREL_G1_NC:
  297. overflow_check = false;
  298. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
  299. AARCH64_INSN_IMM_MOVKZ);
  300. break;
  301. case R_AARCH64_MOVW_PREL_G1:
  302. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
  303. AARCH64_INSN_IMM_MOVNZ);
  304. break;
  305. case R_AARCH64_MOVW_PREL_G2_NC:
  306. overflow_check = false;
  307. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
  308. AARCH64_INSN_IMM_MOVKZ);
  309. break;
  310. case R_AARCH64_MOVW_PREL_G2:
  311. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
  312. AARCH64_INSN_IMM_MOVNZ);
  313. break;
  314. case R_AARCH64_MOVW_PREL_G3:
  315. /* We're using the top bits so we can't overflow. */
  316. overflow_check = false;
  317. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
  318. AARCH64_INSN_IMM_MOVNZ);
  319. break;
  320. /* Immediate instruction relocations. */
  321. case R_AARCH64_LD_PREL_LO19:
  322. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
  323. AARCH64_INSN_IMM_19);
  324. break;
  325. case R_AARCH64_ADR_PREL_LO21:
  326. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
  327. AARCH64_INSN_IMM_ADR);
  328. break;
  329. case R_AARCH64_ADR_PREL_PG_HI21_NC:
  330. overflow_check = false;
  331. case R_AARCH64_ADR_PREL_PG_HI21:
  332. ovf = reloc_insn_adrp(me, loc, val);
  333. if (ovf && ovf != -ERANGE)
  334. return ovf;
  335. break;
  336. case R_AARCH64_ADD_ABS_LO12_NC:
  337. case R_AARCH64_LDST8_ABS_LO12_NC:
  338. overflow_check = false;
  339. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
  340. AARCH64_INSN_IMM_12);
  341. break;
  342. case R_AARCH64_LDST16_ABS_LO12_NC:
  343. overflow_check = false;
  344. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
  345. AARCH64_INSN_IMM_12);
  346. break;
  347. case R_AARCH64_LDST32_ABS_LO12_NC:
  348. overflow_check = false;
  349. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
  350. AARCH64_INSN_IMM_12);
  351. break;
  352. case R_AARCH64_LDST64_ABS_LO12_NC:
  353. overflow_check = false;
  354. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
  355. AARCH64_INSN_IMM_12);
  356. break;
  357. case R_AARCH64_LDST128_ABS_LO12_NC:
  358. overflow_check = false;
  359. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
  360. AARCH64_INSN_IMM_12);
  361. break;
  362. case R_AARCH64_TSTBR14:
  363. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
  364. AARCH64_INSN_IMM_14);
  365. break;
  366. case R_AARCH64_CONDBR19:
  367. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
  368. AARCH64_INSN_IMM_19);
  369. break;
  370. case R_AARCH64_JUMP26:
  371. case R_AARCH64_CALL26:
  372. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
  373. AARCH64_INSN_IMM_26);
  374. if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  375. ovf == -ERANGE) {
  376. val = module_emit_plt_entry(me, loc, &rel[i], sym);
  377. if (!val)
  378. return -ENOEXEC;
  379. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
  380. 26, AARCH64_INSN_IMM_26);
  381. }
  382. break;
  383. default:
  384. pr_err("module %s: unsupported RELA relocation: %llu\n",
  385. me->name, ELF64_R_TYPE(rel[i].r_info));
  386. return -ENOEXEC;
  387. }
  388. if (overflow_check && ovf == -ERANGE)
  389. goto overflow;
  390. }
  391. return 0;
  392. overflow:
  393. pr_err("module %s: overflow in relocation type %d val %Lx\n",
  394. me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
  395. return -ENOEXEC;
  396. }
  397. int module_finalize(const Elf_Ehdr *hdr,
  398. const Elf_Shdr *sechdrs,
  399. struct module *me)
  400. {
  401. const Elf_Shdr *s, *se;
  402. const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  403. for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
  404. if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
  405. apply_alternatives_module((void *)s->sh_addr, s->sh_size);
  406. #ifdef CONFIG_ARM64_MODULE_PLTS
  407. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
  408. !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
  409. me->arch.ftrace_trampoline = (void *)s->sh_addr;
  410. #endif
  411. }
  412. return 0;
  413. }