test_run.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <linux/bpf.h>
  8. #include <linux/slab.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/filter.h>
  12. #include <linux/sched/signal.h>
  13. #include <net/sock.h>
  14. #include <net/tcp.h>
  15. static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
  16. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
  17. {
  18. u32 ret;
  19. preempt_disable();
  20. rcu_read_lock();
  21. bpf_cgroup_storage_set(storage);
  22. ret = BPF_PROG_RUN(prog, ctx);
  23. rcu_read_unlock();
  24. preempt_enable();
  25. return ret;
  26. }
  27. static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
  28. u32 *time)
  29. {
  30. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
  31. enum bpf_cgroup_storage_type stype;
  32. u64 time_start, time_spent = 0;
  33. u32 i;
  34. for_each_cgroup_storage_type(stype) {
  35. storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
  36. if (IS_ERR(storage[stype])) {
  37. storage[stype] = NULL;
  38. for_each_cgroup_storage_type(stype)
  39. bpf_cgroup_storage_free(storage[stype]);
  40. return -ENOMEM;
  41. }
  42. }
  43. if (!repeat)
  44. repeat = 1;
  45. time_start = ktime_get_ns();
  46. for (i = 0; i < repeat; i++) {
  47. *ret = bpf_test_run_one(prog, ctx, storage);
  48. if (need_resched()) {
  49. if (signal_pending(current))
  50. break;
  51. time_spent += ktime_get_ns() - time_start;
  52. cond_resched();
  53. time_start = ktime_get_ns();
  54. }
  55. }
  56. time_spent += ktime_get_ns() - time_start;
  57. do_div(time_spent, repeat);
  58. *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  59. for_each_cgroup_storage_type(stype)
  60. bpf_cgroup_storage_free(storage[stype]);
  61. return 0;
  62. }
  63. static int bpf_test_finish(const union bpf_attr *kattr,
  64. union bpf_attr __user *uattr, const void *data,
  65. u32 size, u32 retval, u32 duration)
  66. {
  67. void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
  68. int err = -EFAULT;
  69. if (data_out && copy_to_user(data_out, data, size))
  70. goto out;
  71. if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
  72. goto out;
  73. if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
  74. goto out;
  75. if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
  76. goto out;
  77. err = 0;
  78. out:
  79. return err;
  80. }
  81. static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
  82. u32 headroom, u32 tailroom)
  83. {
  84. void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
  85. void *data;
  86. if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
  87. return ERR_PTR(-EINVAL);
  88. data = kzalloc(size + headroom + tailroom, GFP_USER);
  89. if (!data)
  90. return ERR_PTR(-ENOMEM);
  91. if (copy_from_user(data + headroom, data_in, size)) {
  92. kfree(data);
  93. return ERR_PTR(-EFAULT);
  94. }
  95. return data;
  96. }
  97. int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  98. union bpf_attr __user *uattr)
  99. {
  100. bool is_l2 = false, is_direct_pkt_access = false;
  101. u32 size = kattr->test.data_size_in;
  102. u32 repeat = kattr->test.repeat;
  103. u32 retval, duration;
  104. int hh_len = ETH_HLEN;
  105. struct sk_buff *skb;
  106. struct sock *sk;
  107. void *data;
  108. int ret;
  109. data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
  110. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  111. if (IS_ERR(data))
  112. return PTR_ERR(data);
  113. switch (prog->type) {
  114. case BPF_PROG_TYPE_SCHED_CLS:
  115. case BPF_PROG_TYPE_SCHED_ACT:
  116. is_l2 = true;
  117. /* fall through */
  118. case BPF_PROG_TYPE_LWT_IN:
  119. case BPF_PROG_TYPE_LWT_OUT:
  120. case BPF_PROG_TYPE_LWT_XMIT:
  121. is_direct_pkt_access = true;
  122. break;
  123. default:
  124. break;
  125. }
  126. sk = kzalloc(sizeof(struct sock), GFP_USER);
  127. if (!sk) {
  128. kfree(data);
  129. return -ENOMEM;
  130. }
  131. sock_net_set(sk, current->nsproxy->net_ns);
  132. sock_init_data(NULL, sk);
  133. skb = build_skb(data, 0);
  134. if (!skb) {
  135. kfree(data);
  136. kfree(sk);
  137. return -ENOMEM;
  138. }
  139. skb->sk = sk;
  140. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  141. __skb_put(skb, size);
  142. skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
  143. skb_reset_network_header(skb);
  144. if (is_l2)
  145. __skb_push(skb, hh_len);
  146. if (is_direct_pkt_access)
  147. bpf_compute_data_pointers(skb);
  148. ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
  149. if (ret) {
  150. kfree_skb(skb);
  151. kfree(sk);
  152. return ret;
  153. }
  154. if (!is_l2) {
  155. if (skb_headroom(skb) < hh_len) {
  156. int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
  157. if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
  158. kfree_skb(skb);
  159. kfree(sk);
  160. return -ENOMEM;
  161. }
  162. }
  163. memset(__skb_push(skb, hh_len), 0, hh_len);
  164. }
  165. size = skb->len;
  166. /* bpf program can never convert linear skb to non-linear */
  167. if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
  168. size = skb_headlen(skb);
  169. ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
  170. kfree_skb(skb);
  171. kfree(sk);
  172. return ret;
  173. }
  174. int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  175. union bpf_attr __user *uattr)
  176. {
  177. u32 size = kattr->test.data_size_in;
  178. u32 repeat = kattr->test.repeat;
  179. struct netdev_rx_queue *rxqueue;
  180. struct xdp_buff xdp = {};
  181. u32 retval, duration;
  182. void *data;
  183. int ret;
  184. data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
  185. if (IS_ERR(data))
  186. return PTR_ERR(data);
  187. xdp.data_hard_start = data;
  188. xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
  189. xdp.data_meta = xdp.data;
  190. xdp.data_end = xdp.data + size;
  191. rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
  192. xdp.rxq = &rxqueue->xdp_rxq;
  193. ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
  194. if (ret)
  195. goto out;
  196. if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
  197. xdp.data_end != xdp.data + size)
  198. size = xdp.data_end - xdp.data;
  199. ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
  200. out:
  201. kfree(data);
  202. return ret;
  203. }