test_align.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. #include <asm/types.h>
  2. #include <linux/types.h>
  3. #include <stdint.h>
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <unistd.h>
  7. #include <errno.h>
  8. #include <string.h>
  9. #include <stddef.h>
  10. #include <stdbool.h>
  11. #include <linux/unistd.h>
  12. #include <linux/filter.h>
  13. #include <linux/bpf_perf_event.h>
  14. #include <linux/bpf.h>
  15. #include <bpf/bpf.h>
  16. #include "../../../include/linux/filter.h"
  17. #include "bpf_rlimit.h"
  18. #ifndef ARRAY_SIZE
  19. # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
  20. #endif
  21. #define MAX_INSNS 512
  22. #define MAX_MATCHES 16
  23. struct bpf_reg_match {
  24. unsigned int line;
  25. const char *match;
  26. };
  27. struct bpf_align_test {
  28. const char *descr;
  29. struct bpf_insn insns[MAX_INSNS];
  30. enum {
  31. UNDEF,
  32. ACCEPT,
  33. REJECT
  34. } result;
  35. enum bpf_prog_type prog_type;
  36. /* Matches must be in order of increasing line */
  37. struct bpf_reg_match matches[MAX_MATCHES];
  38. };
  39. static struct bpf_align_test tests[] = {
  40. /* Four tests of known constants. These aren't staggeringly
  41. * interesting since we track exact values now.
  42. */
  43. {
  44. .descr = "mov",
  45. .insns = {
  46. BPF_MOV64_IMM(BPF_REG_3, 2),
  47. BPF_MOV64_IMM(BPF_REG_3, 4),
  48. BPF_MOV64_IMM(BPF_REG_3, 8),
  49. BPF_MOV64_IMM(BPF_REG_3, 16),
  50. BPF_MOV64_IMM(BPF_REG_3, 32),
  51. BPF_MOV64_IMM(BPF_REG_0, 0),
  52. BPF_EXIT_INSN(),
  53. },
  54. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  55. .matches = {
  56. {1, "R1=ctx(id=0,off=0,imm=0)"},
  57. {1, "R10=fp0"},
  58. {1, "R3_w=inv2"},
  59. {2, "R3_w=inv4"},
  60. {3, "R3_w=inv8"},
  61. {4, "R3_w=inv16"},
  62. {5, "R3_w=inv32"},
  63. },
  64. },
  65. {
  66. .descr = "shift",
  67. .insns = {
  68. BPF_MOV64_IMM(BPF_REG_3, 1),
  69. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  70. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  71. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  72. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  73. BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
  74. BPF_MOV64_IMM(BPF_REG_4, 32),
  75. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  76. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  77. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  78. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  79. BPF_MOV64_IMM(BPF_REG_0, 0),
  80. BPF_EXIT_INSN(),
  81. },
  82. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  83. .matches = {
  84. {1, "R1=ctx(id=0,off=0,imm=0)"},
  85. {1, "R10=fp0"},
  86. {1, "R3_w=inv1"},
  87. {2, "R3_w=inv2"},
  88. {3, "R3_w=inv4"},
  89. {4, "R3_w=inv8"},
  90. {5, "R3_w=inv16"},
  91. {6, "R3_w=inv1"},
  92. {7, "R4_w=inv32"},
  93. {8, "R4_w=inv16"},
  94. {9, "R4_w=inv8"},
  95. {10, "R4_w=inv4"},
  96. {11, "R4_w=inv2"},
  97. },
  98. },
  99. {
  100. .descr = "addsub",
  101. .insns = {
  102. BPF_MOV64_IMM(BPF_REG_3, 4),
  103. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
  104. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
  105. BPF_MOV64_IMM(BPF_REG_4, 8),
  106. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  107. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
  108. BPF_MOV64_IMM(BPF_REG_0, 0),
  109. BPF_EXIT_INSN(),
  110. },
  111. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  112. .matches = {
  113. {1, "R1=ctx(id=0,off=0,imm=0)"},
  114. {1, "R10=fp0"},
  115. {1, "R3_w=inv4"},
  116. {2, "R3_w=inv8"},
  117. {3, "R3_w=inv10"},
  118. {4, "R4_w=inv8"},
  119. {5, "R4_w=inv12"},
  120. {6, "R4_w=inv14"},
  121. },
  122. },
  123. {
  124. .descr = "mul",
  125. .insns = {
  126. BPF_MOV64_IMM(BPF_REG_3, 7),
  127. BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
  128. BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
  129. BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
  130. BPF_MOV64_IMM(BPF_REG_0, 0),
  131. BPF_EXIT_INSN(),
  132. },
  133. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  134. .matches = {
  135. {1, "R1=ctx(id=0,off=0,imm=0)"},
  136. {1, "R10=fp0"},
  137. {1, "R3_w=inv7"},
  138. {2, "R3_w=inv7"},
  139. {3, "R3_w=inv14"},
  140. {4, "R3_w=inv56"},
  141. },
  142. },
  143. /* Tests using unknown values */
  144. #define PREP_PKT_POINTERS \
  145. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
  146. offsetof(struct __sk_buff, data)), \
  147. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
  148. offsetof(struct __sk_buff, data_end))
  149. #define LOAD_UNKNOWN(DST_REG) \
  150. PREP_PKT_POINTERS, \
  151. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
  152. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
  153. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
  154. BPF_EXIT_INSN(), \
  155. BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
  156. {
  157. .descr = "unknown shift",
  158. .insns = {
  159. LOAD_UNKNOWN(BPF_REG_3),
  160. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  161. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  162. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  163. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  164. LOAD_UNKNOWN(BPF_REG_4),
  165. BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
  166. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  167. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  168. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  169. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  170. BPF_MOV64_IMM(BPF_REG_0, 0),
  171. BPF_EXIT_INSN(),
  172. },
  173. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  174. .matches = {
  175. {7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
  176. {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  177. {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
  178. {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  179. {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
  180. {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
  181. {18, "R3=pkt_end(id=0,off=0,imm=0)"},
  182. {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  183. {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
  184. {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
  185. {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
  186. {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  187. {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
  188. },
  189. },
  190. {
  191. .descr = "unknown mul",
  192. .insns = {
  193. LOAD_UNKNOWN(BPF_REG_3),
  194. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  195. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
  196. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  197. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
  198. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  199. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
  200. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  201. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
  202. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
  203. BPF_MOV64_IMM(BPF_REG_0, 0),
  204. BPF_EXIT_INSN(),
  205. },
  206. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  207. .matches = {
  208. {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  209. {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  210. {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  211. {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  212. {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
  213. {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  214. {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  215. {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  216. {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
  217. {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
  218. },
  219. },
  220. {
  221. .descr = "packet const offset",
  222. .insns = {
  223. PREP_PKT_POINTERS,
  224. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  225. BPF_MOV64_IMM(BPF_REG_0, 0),
  226. /* Skip over ethernet header. */
  227. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  228. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  229. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  230. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  231. BPF_EXIT_INSN(),
  232. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
  233. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
  234. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
  235. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
  236. BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
  237. BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
  238. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  239. BPF_MOV64_IMM(BPF_REG_0, 0),
  240. BPF_EXIT_INSN(),
  241. },
  242. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  243. .matches = {
  244. {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
  245. {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
  246. {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
  247. {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
  248. {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
  249. {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  250. {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
  251. {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
  252. },
  253. },
  254. {
  255. .descr = "packet variable offset",
  256. .insns = {
  257. LOAD_UNKNOWN(BPF_REG_6),
  258. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  259. /* First, add a constant to the R5 packet pointer,
  260. * then a variable with a known alignment.
  261. */
  262. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  263. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  264. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  265. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  266. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  267. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  268. BPF_EXIT_INSN(),
  269. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  270. /* Now, test in the other direction. Adding first
  271. * the variable offset to R5, then the constant.
  272. */
  273. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  274. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  275. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  276. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  277. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  278. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  279. BPF_EXIT_INSN(),
  280. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  281. /* Test multiple accumulations of unknown values
  282. * into a packet pointer.
  283. */
  284. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  285. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  286. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  287. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
  288. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  289. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  290. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  291. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  292. BPF_EXIT_INSN(),
  293. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  294. BPF_MOV64_IMM(BPF_REG_0, 0),
  295. BPF_EXIT_INSN(),
  296. },
  297. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  298. .matches = {
  299. /* Calculated offset in R6 has unknown value, but known
  300. * alignment of 4.
  301. */
  302. {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  303. {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  304. /* Offset is added to packet pointer R5, resulting in
  305. * known fixed offset, and variable offset from R6.
  306. */
  307. {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  308. /* At the time the word size load is performed from R5,
  309. * it's total offset is NET_IP_ALIGN + reg->off (0) +
  310. * reg->aux_off (14) which is 16. Then the variable
  311. * offset is considered using reg->aux_off_align which
  312. * is 4 and meets the load's requirements.
  313. */
  314. {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  315. {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  316. /* Variable offset is added to R5 packet pointer,
  317. * resulting in auxiliary alignment of 4.
  318. */
  319. {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  320. /* Constant offset is added to R5, resulting in
  321. * reg->off of 14.
  322. */
  323. {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  324. /* At the time the word size load is performed from R5,
  325. * its total fixed offset is NET_IP_ALIGN + reg->off
  326. * (14) which is 16. Then the variable offset is 4-byte
  327. * aligned, so the total offset is 4-byte aligned and
  328. * meets the load's requirements.
  329. */
  330. {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  331. {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  332. /* Constant offset is added to R5 packet pointer,
  333. * resulting in reg->off value of 14.
  334. */
  335. {26, "R5_w=pkt(id=0,off=14,r=8"},
  336. /* Variable offset is added to R5, resulting in a
  337. * variable offset of (4n).
  338. */
  339. {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  340. /* Constant is added to R5 again, setting reg->off to 18. */
  341. {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  342. /* And once more we add a variable; resulting var_off
  343. * is still (4n), fixed offset is not changed.
  344. * Also, we create a new reg->id.
  345. */
  346. {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
  347. /* At the time the word size load is performed from R5,
  348. * its total fixed offset is NET_IP_ALIGN + reg->off (18)
  349. * which is 20. Then the variable offset is (4n), so
  350. * the total offset is 4-byte aligned and meets the
  351. * load's requirements.
  352. */
  353. {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
  354. {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
  355. },
  356. },
  357. {
  358. .descr = "packet variable offset 2",
  359. .insns = {
  360. /* Create an unknown offset, (4n+2)-aligned */
  361. LOAD_UNKNOWN(BPF_REG_6),
  362. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  363. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
  364. /* Add it to the packet pointer */
  365. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  366. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  367. /* Check bounds and perform a read */
  368. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  369. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  370. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  371. BPF_EXIT_INSN(),
  372. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  373. /* Make a (4n) offset from the value we just read */
  374. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
  375. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  376. /* Add it to the packet pointer */
  377. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  378. /* Check bounds and perform a read */
  379. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  380. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  381. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  382. BPF_EXIT_INSN(),
  383. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  384. BPF_MOV64_IMM(BPF_REG_0, 0),
  385. BPF_EXIT_INSN(),
  386. },
  387. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  388. .matches = {
  389. /* Calculated offset in R6 has unknown value, but known
  390. * alignment of 4.
  391. */
  392. {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  393. {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  394. /* Adding 14 makes R6 be (4n+2) */
  395. {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  396. /* Packet pointer has (4n+2) offset */
  397. {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  398. {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  399. /* At the time the word size load is performed from R5,
  400. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  401. * which is 2. Then the variable offset is (4n+2), so
  402. * the total offset is 4-byte aligned and meets the
  403. * load's requirements.
  404. */
  405. {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  406. /* Newly read value in R6 was shifted left by 2, so has
  407. * known alignment of 4.
  408. */
  409. {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  410. /* Added (4n) to packet pointer's (4n+2) var_off, giving
  411. * another (4n+2).
  412. */
  413. {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
  414. {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
  415. /* At the time the word size load is performed from R5,
  416. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  417. * which is 2. Then the variable offset is (4n+2), so
  418. * the total offset is 4-byte aligned and meets the
  419. * load's requirements.
  420. */
  421. {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
  422. },
  423. },
  424. {
  425. .descr = "dubious pointer arithmetic",
  426. .insns = {
  427. PREP_PKT_POINTERS,
  428. BPF_MOV64_IMM(BPF_REG_0, 0),
  429. /* (ptr - ptr) << 2 */
  430. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  431. BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
  432. BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
  433. /* We have a (4n) value. Let's make a packet offset
  434. * out of it. First add 14, to make it a (4n+2)
  435. */
  436. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  437. /* Then make sure it's nonnegative */
  438. BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
  439. BPF_EXIT_INSN(),
  440. /* Add it to packet pointer */
  441. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  442. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  443. /* Check bounds and perform a read */
  444. BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
  445. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  446. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  447. BPF_EXIT_INSN(),
  448. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
  449. BPF_EXIT_INSN(),
  450. },
  451. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  452. .result = REJECT,
  453. .matches = {
  454. {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
  455. /* (ptr - ptr) << 2 == unknown, (4n) */
  456. {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
  457. /* (4n) + 14 == (4n+2). We blow our bounds, because
  458. * the add could overflow.
  459. */
  460. {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
  461. /* Checked s>=0 */
  462. {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  463. /* packet pointer + nonnegative (4n+2) */
  464. {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  465. {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  466. /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
  467. * We checked the bounds, but it might have been able
  468. * to overflow if the packet pointer started in the
  469. * upper half of the address space.
  470. * So we did not get a 'range' on R6, and the access
  471. * attempt will fail.
  472. */
  473. {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  474. }
  475. },
  476. {
  477. .descr = "variable subtraction",
  478. .insns = {
  479. /* Create an unknown offset, (4n+2)-aligned */
  480. LOAD_UNKNOWN(BPF_REG_6),
  481. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  482. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  483. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
  484. /* Create another unknown, (4n)-aligned, and subtract
  485. * it from the first one
  486. */
  487. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
  488. BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
  489. /* Bounds-check the result */
  490. BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
  491. BPF_EXIT_INSN(),
  492. /* Add it to the packet pointer */
  493. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  494. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  495. /* Check bounds and perform a read */
  496. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  497. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  498. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  499. BPF_EXIT_INSN(),
  500. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  501. BPF_EXIT_INSN(),
  502. },
  503. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  504. .matches = {
  505. /* Calculated offset in R6 has unknown value, but known
  506. * alignment of 4.
  507. */
  508. {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  509. {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  510. /* Adding 14 makes R6 be (4n+2) */
  511. {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  512. /* New unknown value in R7 is (4n) */
  513. {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  514. /* Subtracting it from R6 blows our unsigned bounds */
  515. {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
  516. /* Checked s>= 0 */
  517. {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
  518. /* At the time the word size load is performed from R5,
  519. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  520. * which is 2. Then the variable offset is (4n+2), so
  521. * the total offset is 4-byte aligned and meets the
  522. * load's requirements.
  523. */
  524. {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
  525. },
  526. },
  527. {
  528. .descr = "pointer variable subtraction",
  529. .insns = {
  530. /* Create an unknown offset, (4n+2)-aligned and bounded
  531. * to [14,74]
  532. */
  533. LOAD_UNKNOWN(BPF_REG_6),
  534. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  535. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
  536. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  537. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
  538. /* Subtract it from the packet pointer */
  539. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  540. BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
  541. /* Create another unknown, (4n)-aligned and >= 74.
  542. * That in fact means >= 76, since 74 % 4 == 2
  543. */
  544. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
  545. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
  546. /* Add it to the packet pointer */
  547. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
  548. /* Check bounds and perform a read */
  549. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  550. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  551. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  552. BPF_EXIT_INSN(),
  553. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  554. BPF_EXIT_INSN(),
  555. },
  556. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  557. .matches = {
  558. /* Calculated offset in R6 has unknown value, but known
  559. * alignment of 4.
  560. */
  561. {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  562. {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
  563. /* Adding 14 makes R6 be (4n+2) */
  564. {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
  565. /* Subtracting from packet pointer overflows ubounds */
  566. {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
  567. /* New unknown value in R7 is (4n), >= 76 */
  568. {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
  569. /* Adding it to packet pointer gives nice bounds again */
  570. {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
  571. /* At the time the word size load is performed from R5,
  572. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  573. * which is 2. Then the variable offset is (4n+2), so
  574. * the total offset is 4-byte aligned and meets the
  575. * load's requirements.
  576. */
  577. {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
  578. },
  579. },
  580. };
  581. static int probe_filter_length(const struct bpf_insn *fp)
  582. {
  583. int len;
  584. for (len = MAX_INSNS - 1; len > 0; --len)
  585. if (fp[len].code != 0 || fp[len].imm != 0)
  586. break;
  587. return len + 1;
  588. }
  589. static char bpf_vlog[32768];
  590. static int do_test_single(struct bpf_align_test *test)
  591. {
  592. struct bpf_insn *prog = test->insns;
  593. int prog_type = test->prog_type;
  594. char bpf_vlog_copy[32768];
  595. const char *line_ptr;
  596. int cur_line = -1;
  597. int prog_len, i;
  598. int fd_prog;
  599. int ret;
  600. prog_len = probe_filter_length(prog);
  601. fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
  602. prog, prog_len, 1, "GPL", 0,
  603. bpf_vlog, sizeof(bpf_vlog), 2);
  604. if (fd_prog < 0 && test->result != REJECT) {
  605. printf("Failed to load program.\n");
  606. printf("%s", bpf_vlog);
  607. ret = 1;
  608. } else if (fd_prog >= 0 && test->result == REJECT) {
  609. printf("Unexpected success to load!\n");
  610. printf("%s", bpf_vlog);
  611. ret = 1;
  612. close(fd_prog);
  613. } else {
  614. ret = 0;
  615. /* We make a local copy so that we can strtok() it */
  616. strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
  617. line_ptr = strtok(bpf_vlog_copy, "\n");
  618. for (i = 0; i < MAX_MATCHES; i++) {
  619. struct bpf_reg_match m = test->matches[i];
  620. if (!m.match)
  621. break;
  622. while (line_ptr) {
  623. cur_line = -1;
  624. sscanf(line_ptr, "%u: ", &cur_line);
  625. if (cur_line == m.line)
  626. break;
  627. line_ptr = strtok(NULL, "\n");
  628. }
  629. if (!line_ptr) {
  630. printf("Failed to find line %u for match: %s\n",
  631. m.line, m.match);
  632. ret = 1;
  633. printf("%s", bpf_vlog);
  634. break;
  635. }
  636. if (!strstr(line_ptr, m.match)) {
  637. printf("Failed to find match %u: %s\n",
  638. m.line, m.match);
  639. ret = 1;
  640. printf("%s", bpf_vlog);
  641. break;
  642. }
  643. }
  644. if (fd_prog >= 0)
  645. close(fd_prog);
  646. }
  647. return ret;
  648. }
  649. static int do_test(unsigned int from, unsigned int to)
  650. {
  651. int all_pass = 0;
  652. int all_fail = 0;
  653. unsigned int i;
  654. for (i = from; i < to; i++) {
  655. struct bpf_align_test *test = &tests[i];
  656. int fail;
  657. printf("Test %3d: %s ... ",
  658. i, test->descr);
  659. fail = do_test_single(test);
  660. if (fail) {
  661. all_fail++;
  662. printf("FAIL\n");
  663. } else {
  664. all_pass++;
  665. printf("PASS\n");
  666. }
  667. }
  668. printf("Results: %d pass %d fail\n",
  669. all_pass, all_fail);
  670. return all_fail ? EXIT_FAILURE : EXIT_SUCCESS;
  671. }
  672. int main(int argc, char **argv)
  673. {
  674. unsigned int from = 0, to = ARRAY_SIZE(tests);
  675. if (argc == 3) {
  676. unsigned int l = atoi(argv[argc - 2]);
  677. unsigned int u = atoi(argv[argc - 1]);
  678. if (l < to && u < to) {
  679. from = l;
  680. to = u + 1;
  681. }
  682. } else if (argc == 2) {
  683. unsigned int t = atoi(argv[argc - 1]);
  684. if (t < to) {
  685. from = t;
  686. to = t + 1;
  687. }
  688. }
  689. return do_test(from, to);
  690. }