libbpf.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /* eBPF mini library */
  2. #ifndef __LIBBPF_H
  3. #define __LIBBPF_H
  4. struct bpf_insn;
  5. int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
  6. int max_entries);
  7. int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
  8. int bpf_lookup_elem(int fd, void *key, void *value);
  9. int bpf_delete_elem(int fd, void *key);
  10. int bpf_get_next_key(int fd, void *key, void *next_key);
  11. int bpf_prog_load(enum bpf_prog_type prog_type,
  12. const struct bpf_insn *insns, int insn_len,
  13. const char *license, int kern_version);
  14. #define LOG_BUF_SIZE 65536
  15. extern char bpf_log_buf[LOG_BUF_SIZE];
  16. /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
  17. #define BPF_ALU64_REG(OP, DST, SRC) \
  18. ((struct bpf_insn) { \
  19. .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
  20. .dst_reg = DST, \
  21. .src_reg = SRC, \
  22. .off = 0, \
  23. .imm = 0 })
  24. #define BPF_ALU32_REG(OP, DST, SRC) \
  25. ((struct bpf_insn) { \
  26. .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
  27. .dst_reg = DST, \
  28. .src_reg = SRC, \
  29. .off = 0, \
  30. .imm = 0 })
  31. /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
  32. #define BPF_ALU64_IMM(OP, DST, IMM) \
  33. ((struct bpf_insn) { \
  34. .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
  35. .dst_reg = DST, \
  36. .src_reg = 0, \
  37. .off = 0, \
  38. .imm = IMM })
  39. #define BPF_ALU32_IMM(OP, DST, IMM) \
  40. ((struct bpf_insn) { \
  41. .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
  42. .dst_reg = DST, \
  43. .src_reg = 0, \
  44. .off = 0, \
  45. .imm = IMM })
  46. /* Short form of mov, dst_reg = src_reg */
  47. #define BPF_MOV64_REG(DST, SRC) \
  48. ((struct bpf_insn) { \
  49. .code = BPF_ALU64 | BPF_MOV | BPF_X, \
  50. .dst_reg = DST, \
  51. .src_reg = SRC, \
  52. .off = 0, \
  53. .imm = 0 })
  54. #define BPF_MOV32_REG(DST, SRC) \
  55. ((struct bpf_insn) { \
  56. .code = BPF_ALU | BPF_MOV | BPF_X, \
  57. .dst_reg = DST, \
  58. .src_reg = SRC, \
  59. .off = 0, \
  60. .imm = 0 })
  61. /* Short form of mov, dst_reg = imm32 */
  62. #define BPF_MOV64_IMM(DST, IMM) \
  63. ((struct bpf_insn) { \
  64. .code = BPF_ALU64 | BPF_MOV | BPF_K, \
  65. .dst_reg = DST, \
  66. .src_reg = 0, \
  67. .off = 0, \
  68. .imm = IMM })
  69. /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
  70. #define BPF_LD_IMM64(DST, IMM) \
  71. BPF_LD_IMM64_RAW(DST, 0, IMM)
  72. #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
  73. ((struct bpf_insn) { \
  74. .code = BPF_LD | BPF_DW | BPF_IMM, \
  75. .dst_reg = DST, \
  76. .src_reg = SRC, \
  77. .off = 0, \
  78. .imm = (__u32) (IMM) }), \
  79. ((struct bpf_insn) { \
  80. .code = 0, /* zero is reserved opcode */ \
  81. .dst_reg = 0, \
  82. .src_reg = 0, \
  83. .off = 0, \
  84. .imm = ((__u64) (IMM)) >> 32 })
  85. #ifndef BPF_PSEUDO_MAP_FD
  86. # define BPF_PSEUDO_MAP_FD 1
  87. #endif
  88. /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
  89. #define BPF_LD_MAP_FD(DST, MAP_FD) \
  90. BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
  91. /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
  92. #define BPF_LD_ABS(SIZE, IMM) \
  93. ((struct bpf_insn) { \
  94. .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
  95. .dst_reg = 0, \
  96. .src_reg = 0, \
  97. .off = 0, \
  98. .imm = IMM })
  99. /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
  100. #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
  101. ((struct bpf_insn) { \
  102. .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
  103. .dst_reg = DST, \
  104. .src_reg = SRC, \
  105. .off = OFF, \
  106. .imm = 0 })
  107. /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
  108. #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
  109. ((struct bpf_insn) { \
  110. .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
  111. .dst_reg = DST, \
  112. .src_reg = SRC, \
  113. .off = OFF, \
  114. .imm = 0 })
  115. /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
  116. #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
  117. ((struct bpf_insn) { \
  118. .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
  119. .dst_reg = DST, \
  120. .src_reg = 0, \
  121. .off = OFF, \
  122. .imm = IMM })
  123. /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
  124. #define BPF_JMP_REG(OP, DST, SRC, OFF) \
  125. ((struct bpf_insn) { \
  126. .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
  127. .dst_reg = DST, \
  128. .src_reg = SRC, \
  129. .off = OFF, \
  130. .imm = 0 })
  131. /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
  132. #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
  133. ((struct bpf_insn) { \
  134. .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
  135. .dst_reg = DST, \
  136. .src_reg = 0, \
  137. .off = OFF, \
  138. .imm = IMM })
  139. /* Raw code statement block */
  140. #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
  141. ((struct bpf_insn) { \
  142. .code = CODE, \
  143. .dst_reg = DST, \
  144. .src_reg = SRC, \
  145. .off = OFF, \
  146. .imm = IMM })
  147. /* Program exit */
  148. #define BPF_EXIT_INSN() \
  149. ((struct bpf_insn) { \
  150. .code = BPF_JMP | BPF_EXIT, \
  151. .dst_reg = 0, \
  152. .src_reg = 0, \
  153. .off = 0, \
  154. .imm = 0 })
  155. /* create RAW socket and bind to interface 'name' */
  156. int open_raw_sock(const char *name);
  157. struct perf_event_attr;
  158. int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
  159. int group_fd, unsigned long flags);
  160. #endif