bpf_jit.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * BPF JIT compiler for ARM64
  3. *
  4. * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #ifndef _BPF_JIT_H
  19. #define _BPF_JIT_H
  20. #include <asm/insn.h>
  21. /* 5-bit Register Operand */
  22. #define A64_R(x) AARCH64_INSN_REG_##x
  23. #define A64_FP AARCH64_INSN_REG_FP
  24. #define A64_LR AARCH64_INSN_REG_LR
  25. #define A64_ZR AARCH64_INSN_REG_ZR
  26. #define A64_SP AARCH64_INSN_REG_SP
  27. #define A64_VARIANT(sf) \
  28. ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
  29. /* Compare & branch (immediate) */
  30. #define A64_COMP_BRANCH(sf, Rt, offset, type) \
  31. aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
  32. AARCH64_INSN_BRANCH_COMP_##type)
  33. #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
  34. /* Conditional branch (immediate) */
  35. #define A64_COND_BRANCH(cond, offset) \
  36. aarch64_insn_gen_cond_branch_imm(0, offset, cond)
  37. #define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
  38. #define A64_COND_NE AARCH64_INSN_COND_NE /* != */
  39. #define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
  40. #define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
  41. #define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
  42. #define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
  43. #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
  44. /* Unconditional branch (immediate) */
  45. #define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
  46. AARCH64_INSN_BRANCH_##type)
  47. #define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
  48. #define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
  49. /* Unconditional branch (register) */
  50. #define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
  51. #define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
  52. /* Load/store register (register offset) */
  53. #define A64_LS_REG(Rt, Rn, Rm, size, type) \
  54. aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
  55. AARCH64_INSN_SIZE_##size, \
  56. AARCH64_INSN_LDST_##type##_REG_OFFSET)
  57. #define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
  58. #define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
  59. #define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
  60. #define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
  61. #define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
  62. #define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
  63. #define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
  64. #define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
  65. /* Load/store register pair */
  66. #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
  67. aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
  68. AARCH64_INSN_VARIANT_64BIT, \
  69. AARCH64_INSN_LDST_##ls##_PAIR_##type)
  70. /* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
  71. #define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
  72. /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
  73. #define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
  74. /* Add/subtract (immediate) */
  75. #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
  76. aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
  77. A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
  78. /* Rd = Rn OP imm12 */
  79. #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
  80. #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
  81. /* Rd = Rn */
  82. #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
  83. /* Bitfield move */
  84. #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
  85. aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
  86. A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
  87. /* Signed, with sign replication to left and zeros to right */
  88. #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
  89. /* Unsigned, with zeros to left and right */
  90. #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
  91. /* Rd = Rn << shift */
  92. #define A64_LSL(sf, Rd, Rn, shift) ({ \
  93. int sz = (sf) ? 64 : 32; \
  94. A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
  95. })
  96. /* Rd = Rn >> shift */
  97. #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
  98. /* Rd = Rn >> shift; signed */
  99. #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
  100. /* Zero extend */
  101. #define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
  102. #define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
  103. /* Move wide (immediate) */
  104. #define A64_MOVEW(sf, Rd, imm16, shift, type) \
  105. aarch64_insn_gen_movewide(Rd, imm16, shift, \
  106. A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
  107. /* Rd = Zeros (for MOVZ);
  108. * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
  109. * Rd = ~Rd; (for MOVN); */
  110. #define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
  111. #define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
  112. #define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
  113. /* Add/subtract (shifted register) */
  114. #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
  115. aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
  116. A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
  117. /* Rd = Rn OP Rm */
  118. #define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
  119. #define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
  120. #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
  121. /* Rd = -Rm */
  122. #define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
  123. /* Rn - Rm; set condition flags */
  124. #define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
  125. /* Data-processing (1 source) */
  126. #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
  127. A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
  128. /* Rd = BSWAPx(Rn) */
  129. #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
  130. #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
  131. #define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
  132. /* Data-processing (2 source) */
  133. /* Rd = Rn OP Rm */
  134. #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
  135. A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
  136. #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
  137. #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
  138. #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
  139. #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
  140. /* Data-processing (3 source) */
  141. /* Rd = Ra + Rn * Rm */
  142. #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
  143. A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
  144. /* Rd = Rn * Rm */
  145. #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
  146. /* Logical (shifted register) */
  147. #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
  148. aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
  149. A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
  150. /* Rd = Rn OP Rm */
  151. #define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
  152. #define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
  153. #define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
  154. #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
  155. /* Rn & Rm; set condition flags */
  156. #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
  157. #endif /* _BPF_JIT_H */