libbpf.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /* eBPF mini library */
  2. #ifndef __LIBBPF_H
  3. #define __LIBBPF_H
  4. struct bpf_insn;
  5. int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
  6. int max_entries);
  7. int bpf_update_elem(int fd, void *key, void *value);
  8. int bpf_lookup_elem(int fd, void *key, void *value);
  9. int bpf_delete_elem(int fd, void *key);
  10. int bpf_get_next_key(int fd, void *key, void *next_key);
  11. int bpf_prog_load(enum bpf_prog_type prog_type,
  12. const struct bpf_insn *insns, int insn_len,
  13. const char *license);
  14. #define LOG_BUF_SIZE 8192
  15. extern char bpf_log_buf[LOG_BUF_SIZE];
  16. /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
  17. #define BPF_ALU64_REG(OP, DST, SRC) \
  18. ((struct bpf_insn) { \
  19. .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
  20. .dst_reg = DST, \
  21. .src_reg = SRC, \
  22. .off = 0, \
  23. .imm = 0 })
  24. #define BPF_ALU32_REG(OP, DST, SRC) \
  25. ((struct bpf_insn) { \
  26. .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
  27. .dst_reg = DST, \
  28. .src_reg = SRC, \
  29. .off = 0, \
  30. .imm = 0 })
  31. /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
  32. #define BPF_ALU64_IMM(OP, DST, IMM) \
  33. ((struct bpf_insn) { \
  34. .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
  35. .dst_reg = DST, \
  36. .src_reg = 0, \
  37. .off = 0, \
  38. .imm = IMM })
  39. #define BPF_ALU32_IMM(OP, DST, IMM) \
  40. ((struct bpf_insn) { \
  41. .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
  42. .dst_reg = DST, \
  43. .src_reg = 0, \
  44. .off = 0, \
  45. .imm = IMM })
  46. /* Short form of mov, dst_reg = src_reg */
  47. #define BPF_MOV64_REG(DST, SRC) \
  48. ((struct bpf_insn) { \
  49. .code = BPF_ALU64 | BPF_MOV | BPF_X, \
  50. .dst_reg = DST, \
  51. .src_reg = SRC, \
  52. .off = 0, \
  53. .imm = 0 })
  54. /* Short form of mov, dst_reg = imm32 */
  55. #define BPF_MOV64_IMM(DST, IMM) \
  56. ((struct bpf_insn) { \
  57. .code = BPF_ALU64 | BPF_MOV | BPF_K, \
  58. .dst_reg = DST, \
  59. .src_reg = 0, \
  60. .off = 0, \
  61. .imm = IMM })
  62. /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
  63. #define BPF_LD_IMM64(DST, IMM) \
  64. BPF_LD_IMM64_RAW(DST, 0, IMM)
  65. #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
  66. ((struct bpf_insn) { \
  67. .code = BPF_LD | BPF_DW | BPF_IMM, \
  68. .dst_reg = DST, \
  69. .src_reg = SRC, \
  70. .off = 0, \
  71. .imm = (__u32) (IMM) }), \
  72. ((struct bpf_insn) { \
  73. .code = 0, /* zero is reserved opcode */ \
  74. .dst_reg = 0, \
  75. .src_reg = 0, \
  76. .off = 0, \
  77. .imm = ((__u64) (IMM)) >> 32 })
  78. #define BPF_PSEUDO_MAP_FD 1
  79. /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
  80. #define BPF_LD_MAP_FD(DST, MAP_FD) \
  81. BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
  82. /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
  83. #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
  84. ((struct bpf_insn) { \
  85. .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
  86. .dst_reg = DST, \
  87. .src_reg = SRC, \
  88. .off = OFF, \
  89. .imm = 0 })
  90. /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
  91. #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
  92. ((struct bpf_insn) { \
  93. .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
  94. .dst_reg = DST, \
  95. .src_reg = SRC, \
  96. .off = OFF, \
  97. .imm = 0 })
  98. /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
  99. #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
  100. ((struct bpf_insn) { \
  101. .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
  102. .dst_reg = DST, \
  103. .src_reg = 0, \
  104. .off = OFF, \
  105. .imm = IMM })
  106. /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
  107. #define BPF_JMP_REG(OP, DST, SRC, OFF) \
  108. ((struct bpf_insn) { \
  109. .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
  110. .dst_reg = DST, \
  111. .src_reg = SRC, \
  112. .off = OFF, \
  113. .imm = 0 })
  114. /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
  115. #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
  116. ((struct bpf_insn) { \
  117. .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
  118. .dst_reg = DST, \
  119. .src_reg = 0, \
  120. .off = OFF, \
  121. .imm = IMM })
  122. /* Raw code statement block */
  123. #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
  124. ((struct bpf_insn) { \
  125. .code = CODE, \
  126. .dst_reg = DST, \
  127. .src_reg = SRC, \
  128. .off = OFF, \
  129. .imm = IMM })
  130. /* Program exit */
  131. #define BPF_EXIT_INSN() \
  132. ((struct bpf_insn) { \
  133. .code = BPF_JMP | BPF_EXIT, \
  134. .dst_reg = 0, \
  135. .src_reg = 0, \
  136. .off = 0, \
  137. .imm = 0 })
  138. #endif