module.h 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #ifndef __ASM_MODULE_H
  17. #define __ASM_MODULE_H
  18. #include <asm-generic/module.h>
  19. #define MODULE_ARCH_VERMAGIC "aarch64"
  20. #ifdef CONFIG_ARM64_MODULE_PLTS
  21. struct mod_plt_sec {
  22. struct elf64_shdr *plt;
  23. int plt_num_entries;
  24. int plt_max_entries;
  25. };
  26. struct mod_arch_specific {
  27. struct mod_plt_sec core;
  28. struct mod_plt_sec init;
  29. /* for CONFIG_DYNAMIC_FTRACE */
  30. void *ftrace_trampoline;
  31. };
  32. #endif
  33. u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
  34. Elf64_Sym *sym);
  35. #ifdef CONFIG_RANDOMIZE_BASE
  36. extern u64 module_alloc_base;
  37. #else
  38. #define module_alloc_base ((u64)_etext - MODULES_VSIZE)
  39. #endif
  40. struct plt_entry {
  41. /*
  42. * A program that conforms to the AArch64 Procedure Call Standard
  43. * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
  44. * IP1 (x17) may be inserted at any branch instruction that is
  45. * exposed to a relocation that supports long branches. Since that
  46. * is exactly what we are dealing with here, we are free to use x16
  47. * as a scratch register in the PLT veneers.
  48. */
  49. __le32 mov0; /* movn x16, #0x.... */
  50. __le32 mov1; /* movk x16, #0x...., lsl #16 */
  51. __le32 mov2; /* movk x16, #0x...., lsl #32 */
  52. __le32 br; /* br x16 */
  53. };
  54. static inline struct plt_entry get_plt_entry(u64 val)
  55. {
  56. /*
  57. * MOVK/MOVN/MOVZ opcode:
  58. * +--------+------------+--------+-----------+-------------+---------+
  59. * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
  60. * +--------+------------+--------+-----------+-------------+---------+
  61. *
  62. * Rd := 0x10 (x16)
  63. * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
  64. * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
  65. * sf := 1 (64-bit variant)
  66. */
  67. return (struct plt_entry){
  68. cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
  69. cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
  70. cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
  71. cpu_to_le32(0xd61f0200)
  72. };
  73. }
  74. static inline bool plt_entries_equal(const struct plt_entry *a,
  75. const struct plt_entry *b)
  76. {
  77. return a->mov0 == b->mov0 &&
  78. a->mov1 == b->mov1 &&
  79. a->mov2 == b->mov2;
  80. }
  81. #endif /* __ASM_MODULE_H */