test_sk_lookup_kern.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
  3. #include <stddef.h>
  4. #include <stdbool.h>
  5. #include <string.h>
  6. #include <linux/bpf.h>
  7. #include <linux/if_ether.h>
  8. #include <linux/in.h>
  9. #include <linux/ip.h>
  10. #include <linux/ipv6.h>
  11. #include <linux/pkt_cls.h>
  12. #include <linux/tcp.h>
  13. #include <sys/socket.h>
  14. #include "bpf_helpers.h"
  15. #include "bpf_endian.h"
  16. int _version SEC("version") = 1;
  17. char _license[] SEC("license") = "GPL";
  18. /* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
  19. static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
  20. void *data_end, __u16 eth_proto,
  21. bool *ipv4)
  22. {
  23. struct bpf_sock_tuple *result;
  24. __u8 proto = 0;
  25. __u64 ihl_len;
  26. if (eth_proto == bpf_htons(ETH_P_IP)) {
  27. struct iphdr *iph = (struct iphdr *)(data + nh_off);
  28. if (iph + 1 > data_end)
  29. return NULL;
  30. ihl_len = iph->ihl * 4;
  31. proto = iph->protocol;
  32. *ipv4 = true;
  33. result = (struct bpf_sock_tuple *)&iph->saddr;
  34. } else if (eth_proto == bpf_htons(ETH_P_IPV6)) {
  35. struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + nh_off);
  36. if (ip6h + 1 > data_end)
  37. return NULL;
  38. ihl_len = sizeof(*ip6h);
  39. proto = ip6h->nexthdr;
  40. *ipv4 = true;
  41. result = (struct bpf_sock_tuple *)&ip6h->saddr;
  42. }
  43. if (data + nh_off + ihl_len > data_end || proto != IPPROTO_TCP)
  44. return NULL;
  45. return result;
  46. }
  47. SEC("sk_lookup_success")
  48. int bpf_sk_lookup_test0(struct __sk_buff *skb)
  49. {
  50. void *data_end = (void *)(long)skb->data_end;
  51. void *data = (void *)(long)skb->data;
  52. struct ethhdr *eth = (struct ethhdr *)(data);
  53. struct bpf_sock_tuple *tuple;
  54. struct bpf_sock *sk;
  55. size_t tuple_len;
  56. bool ipv4;
  57. if (eth + 1 > data_end)
  58. return TC_ACT_SHOT;
  59. tuple = get_tuple(data, sizeof(*eth), data_end, eth->h_proto, &ipv4);
  60. if (!tuple || tuple + sizeof *tuple > data_end)
  61. return TC_ACT_SHOT;
  62. tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
  63. sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
  64. if (sk)
  65. bpf_sk_release(sk);
  66. return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
  67. }
  68. SEC("sk_lookup_success_simple")
  69. int bpf_sk_lookup_test1(struct __sk_buff *skb)
  70. {
  71. struct bpf_sock_tuple tuple = {};
  72. struct bpf_sock *sk;
  73. sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  74. if (sk)
  75. bpf_sk_release(sk);
  76. return 0;
  77. }
  78. SEC("fail_use_after_free")
  79. int bpf_sk_lookup_uaf(struct __sk_buff *skb)
  80. {
  81. struct bpf_sock_tuple tuple = {};
  82. struct bpf_sock *sk;
  83. __u32 family = 0;
  84. sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  85. if (sk) {
  86. bpf_sk_release(sk);
  87. family = sk->family;
  88. }
  89. return family;
  90. }
  91. SEC("fail_modify_sk_pointer")
  92. int bpf_sk_lookup_modptr(struct __sk_buff *skb)
  93. {
  94. struct bpf_sock_tuple tuple = {};
  95. struct bpf_sock *sk;
  96. __u32 family;
  97. sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  98. if (sk) {
  99. sk += 1;
  100. bpf_sk_release(sk);
  101. }
  102. return 0;
  103. }
  104. SEC("fail_modify_sk_or_null_pointer")
  105. int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
  106. {
  107. struct bpf_sock_tuple tuple = {};
  108. struct bpf_sock *sk;
  109. __u32 family;
  110. sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  111. sk += 1;
  112. if (sk)
  113. bpf_sk_release(sk);
  114. return 0;
  115. }
  116. SEC("fail_no_release")
  117. int bpf_sk_lookup_test2(struct __sk_buff *skb)
  118. {
  119. struct bpf_sock_tuple tuple = {};
  120. bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  121. return 0;
  122. }
  123. SEC("fail_release_twice")
  124. int bpf_sk_lookup_test3(struct __sk_buff *skb)
  125. {
  126. struct bpf_sock_tuple tuple = {};
  127. struct bpf_sock *sk;
  128. sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  129. bpf_sk_release(sk);
  130. bpf_sk_release(sk);
  131. return 0;
  132. }
  133. SEC("fail_release_unchecked")
  134. int bpf_sk_lookup_test4(struct __sk_buff *skb)
  135. {
  136. struct bpf_sock_tuple tuple = {};
  137. struct bpf_sock *sk;
  138. sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  139. bpf_sk_release(sk);
  140. return 0;
  141. }
  142. void lookup_no_release(struct __sk_buff *skb)
  143. {
  144. struct bpf_sock_tuple tuple = {};
  145. bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
  146. }
  147. SEC("fail_no_release_subcall")
  148. int bpf_sk_lookup_test5(struct __sk_buff *skb)
  149. {
  150. lookup_no_release(skb);
  151. return 0;
  152. }