bpf_jit64.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * bpf_jit64.h: BPF JIT compiler for PPC64
  3. *
  4. * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  5. * IBM Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; version 2
  10. * of the License.
  11. */
  12. #ifndef _BPF_JIT64_H
  13. #define _BPF_JIT64_H
  14. #include "bpf_jit.h"
  15. /*
  16. * Stack layout:
  17. *
  18. * [ prev sp ] <-------------
  19. * [ nv gpr save area ] 8*8 |
  20. * fp (r31) --> [ ebpf stack space ] 512 |
  21. * [ local/tmp var space ] 16 |
  22. * [ frame header ] 32/112 |
  23. * sp (r1) ---> [ stack pointer ] --------------
  24. */
  25. /* for bpf JIT code internal usage */
  26. #define BPF_PPC_STACK_LOCALS 16
  27. /* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
  28. #define BPF_PPC_STACK_SAVE (8*8)
  29. /* Ensure this is quadword aligned */
  30. #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS + \
  31. MAX_BPF_STACK + BPF_PPC_STACK_SAVE)
  32. #ifndef __ASSEMBLY__
  33. /* BPF register usage */
  34. #define SKB_HLEN_REG (MAX_BPF_REG + 0)
  35. #define SKB_DATA_REG (MAX_BPF_REG + 1)
  36. #define TMP_REG_1 (MAX_BPF_REG + 2)
  37. #define TMP_REG_2 (MAX_BPF_REG + 3)
  38. /* BPF to ppc register mappings */
  39. static const int b2p[] = {
  40. /* function return value */
  41. [BPF_REG_0] = 8,
  42. /* function arguments */
  43. [BPF_REG_1] = 3,
  44. [BPF_REG_2] = 4,
  45. [BPF_REG_3] = 5,
  46. [BPF_REG_4] = 6,
  47. [BPF_REG_5] = 7,
  48. /* non volatile registers */
  49. [BPF_REG_6] = 27,
  50. [BPF_REG_7] = 28,
  51. [BPF_REG_8] = 29,
  52. [BPF_REG_9] = 30,
  53. /* frame pointer aka BPF_REG_10 */
  54. [BPF_REG_FP] = 31,
  55. /* eBPF jit internal registers */
  56. [SKB_HLEN_REG] = 25,
  57. [SKB_DATA_REG] = 26,
  58. [TMP_REG_1] = 9,
  59. [TMP_REG_2] = 10
  60. };
  61. /* Assembly helpers */
  62. #define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
  63. u64 func##_negative_offset(u64 r3, u64 r4); \
  64. u64 func##_positive_offset(u64 r3, u64 r4);
  65. DECLARE_LOAD_FUNC(sk_load_word);
  66. DECLARE_LOAD_FUNC(sk_load_half);
  67. DECLARE_LOAD_FUNC(sk_load_byte);
  68. #define CHOOSE_LOAD_FUNC(imm, func) \
  69. (imm < 0 ? \
  70. (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
  71. func##_positive_offset)
  72. #define SEEN_FUNC 0x1000 /* might call external helpers */
  73. #define SEEN_STACK 0x2000 /* uses BPF stack */
  74. #define SEEN_SKB 0x4000 /* uses sk_buff */
  75. struct codegen_context {
  76. /*
  77. * This is used to track register usage as well
  78. * as calls to external helpers.
  79. * - register usage is tracked with corresponding
  80. * bits (r3-r10 and r25-r31)
  81. * - rest of the bits can be used to track other
  82. * things -- for now, we use bits 16 to 23
  83. * encoded in SEEN_* macros above
  84. */
  85. unsigned int seen;
  86. unsigned int idx;
  87. };
  88. #endif /* !__ASSEMBLY__ */
  89. #endif