module.h 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #ifndef __ASM_MODULE_H
  17. #define __ASM_MODULE_H
  18. #include <asm-generic/module.h>
  19. #define MODULE_ARCH_VERMAGIC "aarch64"
  20. #ifdef CONFIG_ARM64_MODULE_PLTS
  21. struct mod_plt_sec {
  22. struct elf64_shdr *plt;
  23. int plt_num_entries;
  24. int plt_max_entries;
  25. };
  26. struct mod_arch_specific {
  27. struct mod_plt_sec core;
  28. struct mod_plt_sec init;
  29. /* for CONFIG_DYNAMIC_FTRACE */
  30. struct plt_entry *ftrace_trampoline;
  31. };
  32. #endif
  33. u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
  34. Elf64_Sym *sym);
  35. u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val);
  36. #ifdef CONFIG_RANDOMIZE_BASE
  37. extern u64 module_alloc_base;
  38. #else
  39. #define module_alloc_base ((u64)_etext - MODULES_VSIZE)
  40. #endif
  41. struct plt_entry {
  42. /*
  43. * A program that conforms to the AArch64 Procedure Call Standard
  44. * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
  45. * IP1 (x17) may be inserted at any branch instruction that is
  46. * exposed to a relocation that supports long branches. Since that
  47. * is exactly what we are dealing with here, we are free to use x16
  48. * as a scratch register in the PLT veneers.
  49. */
  50. __le32 mov0; /* movn x16, #0x.... */
  51. __le32 mov1; /* movk x16, #0x...., lsl #16 */
  52. __le32 mov2; /* movk x16, #0x...., lsl #32 */
  53. __le32 br; /* br x16 */
  54. };
  55. static inline struct plt_entry get_plt_entry(u64 val)
  56. {
  57. /*
  58. * MOVK/MOVN/MOVZ opcode:
  59. * +--------+------------+--------+-----------+-------------+---------+
  60. * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
  61. * +--------+------------+--------+-----------+-------------+---------+
  62. *
  63. * Rd := 0x10 (x16)
  64. * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
  65. * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
  66. * sf := 1 (64-bit variant)
  67. */
  68. return (struct plt_entry){
  69. cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
  70. cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
  71. cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
  72. cpu_to_le32(0xd61f0200)
  73. };
  74. }
  75. static inline bool plt_entries_equal(const struct plt_entry *a,
  76. const struct plt_entry *b)
  77. {
  78. return a->mov0 == b->mov0 &&
  79. a->mov1 == b->mov1 &&
  80. a->mov2 == b->mov2;
  81. }
  82. #endif /* __ASM_MODULE_H */