bpf_jit_asm.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
  3. * compiler.
  4. *
  5. * Copyright (C) 2015 Imagination Technologies Ltd.
  6. * Author: Markos Chandras <markos.chandras@imgtec.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; version 2 of the License.
  11. */
  12. #include <asm/asm.h>
  13. #include <asm/regdef.h>
  14. #include "bpf_jit.h"
  15. /* ABI
  16. *
  17. * r_skb_hl skb header length
  18. * r_skb_data skb data
  19. * r_off(a1) offset register
  20. * r_A BPF register A
  21. * r_X PF register X
  22. * r_skb(a0) *skb
  23. * r_M *scratch memory
  24. * r_skb_le skb length
  25. * r_s0 Scratch register 0
  26. * r_s1 Scratch register 1
  27. *
  28. * On entry:
  29. * a0: *skb
  30. * a1: offset (imm or imm + X)
  31. *
  32. * All non-BPF-ABI registers are free for use. On return, we only
  33. * care about r_ret. The BPF-ABI registers are assumed to remain
  34. * unmodified during the entire filter operation.
  35. */
  36. #define skb a0
  37. #define offset a1
  38. #define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
  39. /* We know better :) so prevent assembler reordering etc */
  40. .set noreorder
  41. #define is_offset_negative(TYPE) \
  42. /* If offset is negative we have more work to do */ \
  43. slti t0, offset, 0; \
  44. bgtz t0, bpf_slow_path_##TYPE##_neg; \
  45. /* Be careful what follows in DS. */
  46. #define is_offset_in_header(SIZE, TYPE) \
  47. /* Reading from header? */ \
  48. addiu $r_s0, $r_skb_hl, -SIZE; \
  49. slt t0, $r_s0, offset; \
  50. bgtz t0, bpf_slow_path_##TYPE; \
  51. LEAF(sk_load_word)
  52. is_offset_negative(word)
  53. .globl sk_load_word_positive
  54. sk_load_word_positive:
  55. is_offset_in_header(4, word)
  56. /* Offset within header boundaries */
  57. PTR_ADDU t1, $r_skb_data, offset
  58. lw $r_A, 0(t1)
  59. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  60. wsbh t0, $r_A
  61. rotr $r_A, t0, 16
  62. #endif
  63. jr $r_ra
  64. move $r_ret, zero
  65. END(sk_load_word)
  66. LEAF(sk_load_half)
  67. is_offset_negative(half)
  68. .globl sk_load_half_positive
  69. sk_load_half_positive:
  70. is_offset_in_header(2, half)
  71. /* Offset within header boundaries */
  72. PTR_ADDU t1, $r_skb_data, offset
  73. lh $r_A, 0(t1)
  74. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  75. wsbh t0, $r_A
  76. seh $r_A, t0
  77. #endif
  78. jr $r_ra
  79. move $r_ret, zero
  80. END(sk_load_half)
  81. LEAF(sk_load_byte)
  82. is_offset_negative(byte)
  83. .globl sk_load_byte_positive
  84. sk_load_byte_positive:
  85. is_offset_in_header(1, byte)
  86. /* Offset within header boundaries */
  87. PTR_ADDU t1, $r_skb_data, offset
  88. lb $r_A, 0(t1)
  89. jr $r_ra
  90. move $r_ret, zero
  91. END(sk_load_byte)
  92. /*
  93. * call skb_copy_bits:
  94. * (prototype in linux/skbuff.h)
  95. *
  96. * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
  97. *
  98. * o32 mandates we leave 4 spaces for argument registers in case
  99. * the callee needs to use them. Even though we don't care about
  100. * the argument registers ourselves, we need to allocate that space
  101. * to remain ABI compliant since the callee may want to use that space.
  102. * We also allocate 2 more spaces for $r_ra and our return register (*to).
  103. *
  104. * n64 is a bit different. The *caller* will allocate the space to preserve
  105. * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
  106. * good reason but it does not matter that much really.
  107. *
  108. * (void *to) is returned in r_s0
  109. *
  110. */
  111. #define bpf_slow_path_common(SIZE) \
  112. /* Quick check. Are we within reasonable boundaries? */ \
  113. LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
  114. sltu $r_s0, offset, $r_s1; \
  115. beqz $r_s0, fault; \
  116. /* Load 4th argument in DS */ \
  117. LONG_ADDIU a3, zero, SIZE; \
  118. PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
  119. PTR_LA t0, skb_copy_bits; \
  120. PTR_S $r_ra, (5 * SZREG)($r_sp); \
  121. /* Assign low slot to a2 */ \
  122. move a2, $r_sp; \
  123. jalr t0; \
  124. /* Reset our destination slot (DS but it's ok) */ \
  125. INT_S zero, (4 * SZREG)($r_sp); \
  126. /* \
  127. * skb_copy_bits returns 0 on success and -EFAULT \
  128. * on error. Our data live in a2. Do not bother with \
  129. * our data if an error has been returned. \
  130. */ \
  131. /* Restore our frame */ \
  132. PTR_L $r_ra, (5 * SZREG)($r_sp); \
  133. INT_L $r_s0, (4 * SZREG)($r_sp); \
  134. bltz v0, fault; \
  135. PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
  136. move $r_ret, zero; \
  137. NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
  138. bpf_slow_path_common(4)
  139. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  140. wsbh t0, $r_s0
  141. jr $r_ra
  142. rotr $r_A, t0, 16
  143. #endif
  144. jr $r_ra
  145. move $r_A, $r_s0
  146. END(bpf_slow_path_word)
  147. NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
  148. bpf_slow_path_common(2)
  149. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  150. jr $r_ra
  151. wsbh $r_A, $r_s0
  152. #endif
  153. jr $r_ra
  154. move $r_A, $r_s0
  155. END(bpf_slow_path_half)
  156. NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
  157. bpf_slow_path_common(1)
  158. jr $r_ra
  159. move $r_A, $r_s0
  160. END(bpf_slow_path_byte)
  161. /*
  162. * Negative entry points
  163. */
  164. .macro bpf_is_end_of_data
  165. li t0, SKF_LL_OFF
  166. /* Reading link layer data? */
  167. slt t1, offset, t0
  168. bgtz t1, fault
  169. /* Be careful what follows in DS. */
  170. .endm
  171. /*
  172. * call skb_copy_bits:
  173. * (prototype in linux/filter.h)
  174. *
  175. * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
  176. * int k, unsigned int size)
  177. *
  178. * see above (bpf_slow_path_common) for ABI restrictions
  179. */
  180. #define bpf_negative_common(SIZE) \
  181. PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
  182. PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
  183. PTR_S $r_ra, (5 * SZREG)($r_sp); \
  184. jalr t0; \
  185. li a2, SIZE; \
  186. PTR_L $r_ra, (5 * SZREG)($r_sp); \
  187. /* Check return pointer */ \
  188. beqz v0, fault; \
  189. PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
  190. /* Preserve our pointer */ \
  191. move $r_s0, v0; \
  192. /* Set return value */ \
  193. move $r_ret, zero; \
  194. bpf_slow_path_word_neg:
  195. bpf_is_end_of_data
  196. NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
  197. bpf_negative_common(4)
  198. jr $r_ra
  199. lw $r_A, 0($r_s0)
  200. END(sk_load_word_negative)
  201. bpf_slow_path_half_neg:
  202. bpf_is_end_of_data
  203. NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
  204. bpf_negative_common(2)
  205. jr $r_ra
  206. lhu $r_A, 0($r_s0)
  207. END(sk_load_half_negative)
  208. bpf_slow_path_byte_neg:
  209. bpf_is_end_of_data
  210. NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
  211. bpf_negative_common(1)
  212. jr $r_ra
  213. lbu $r_A, 0($r_s0)
  214. END(sk_load_byte_negative)
  215. fault:
  216. jr $r_ra
  217. addiu $r_ret, zero, 1