test_run.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <linux/bpf.h>
  8. #include <linux/slab.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/filter.h>
  12. #include <linux/sched/signal.h>
  13. #include <net/sock.h>
  14. #include <net/tcp.h>
  15. static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
  16. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
  17. {
  18. u32 ret;
  19. preempt_disable();
  20. rcu_read_lock();
  21. bpf_cgroup_storage_set(storage);
  22. ret = BPF_PROG_RUN(prog, ctx);
  23. rcu_read_unlock();
  24. preempt_enable();
  25. return ret;
  26. }
  27. static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
  28. {
  29. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
  30. enum bpf_cgroup_storage_type stype;
  31. u64 time_start, time_spent = 0;
  32. u32 ret = 0, i;
  33. for_each_cgroup_storage_type(stype) {
  34. storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
  35. if (IS_ERR(storage[stype])) {
  36. storage[stype] = NULL;
  37. for_each_cgroup_storage_type(stype)
  38. bpf_cgroup_storage_free(storage[stype]);
  39. return -ENOMEM;
  40. }
  41. }
  42. if (!repeat)
  43. repeat = 1;
  44. time_start = ktime_get_ns();
  45. for (i = 0; i < repeat; i++) {
  46. ret = bpf_test_run_one(prog, ctx, storage);
  47. if (need_resched()) {
  48. if (signal_pending(current))
  49. break;
  50. time_spent += ktime_get_ns() - time_start;
  51. cond_resched();
  52. time_start = ktime_get_ns();
  53. }
  54. }
  55. time_spent += ktime_get_ns() - time_start;
  56. do_div(time_spent, repeat);
  57. *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  58. for_each_cgroup_storage_type(stype)
  59. bpf_cgroup_storage_free(storage[stype]);
  60. return ret;
  61. }
  62. static int bpf_test_finish(const union bpf_attr *kattr,
  63. union bpf_attr __user *uattr, const void *data,
  64. u32 size, u32 retval, u32 duration)
  65. {
  66. void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
  67. int err = -EFAULT;
  68. if (data_out && copy_to_user(data_out, data, size))
  69. goto out;
  70. if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
  71. goto out;
  72. if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
  73. goto out;
  74. if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
  75. goto out;
  76. err = 0;
  77. out:
  78. return err;
  79. }
  80. static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
  81. u32 headroom, u32 tailroom)
  82. {
  83. void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
  84. void *data;
  85. if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
  86. return ERR_PTR(-EINVAL);
  87. data = kzalloc(size + headroom + tailroom, GFP_USER);
  88. if (!data)
  89. return ERR_PTR(-ENOMEM);
  90. if (copy_from_user(data + headroom, data_in, size)) {
  91. kfree(data);
  92. return ERR_PTR(-EFAULT);
  93. }
  94. return data;
  95. }
  96. int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  97. union bpf_attr __user *uattr)
  98. {
  99. bool is_l2 = false, is_direct_pkt_access = false;
  100. u32 size = kattr->test.data_size_in;
  101. u32 repeat = kattr->test.repeat;
  102. u32 retval, duration;
  103. int hh_len = ETH_HLEN;
  104. struct sk_buff *skb;
  105. struct sock *sk;
  106. void *data;
  107. int ret;
  108. data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
  109. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  110. if (IS_ERR(data))
  111. return PTR_ERR(data);
  112. switch (prog->type) {
  113. case BPF_PROG_TYPE_SCHED_CLS:
  114. case BPF_PROG_TYPE_SCHED_ACT:
  115. is_l2 = true;
  116. /* fall through */
  117. case BPF_PROG_TYPE_LWT_IN:
  118. case BPF_PROG_TYPE_LWT_OUT:
  119. case BPF_PROG_TYPE_LWT_XMIT:
  120. is_direct_pkt_access = true;
  121. break;
  122. default:
  123. break;
  124. }
  125. sk = kzalloc(sizeof(struct sock), GFP_USER);
  126. if (!sk) {
  127. kfree(data);
  128. return -ENOMEM;
  129. }
  130. sock_net_set(sk, current->nsproxy->net_ns);
  131. sock_init_data(NULL, sk);
  132. skb = build_skb(data, 0);
  133. if (!skb) {
  134. kfree(data);
  135. kfree(sk);
  136. return -ENOMEM;
  137. }
  138. skb->sk = sk;
  139. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  140. __skb_put(skb, size);
  141. skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
  142. skb_reset_network_header(skb);
  143. if (is_l2)
  144. __skb_push(skb, hh_len);
  145. if (is_direct_pkt_access)
  146. bpf_compute_data_pointers(skb);
  147. retval = bpf_test_run(prog, skb, repeat, &duration);
  148. if (!is_l2) {
  149. if (skb_headroom(skb) < hh_len) {
  150. int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
  151. if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
  152. kfree_skb(skb);
  153. kfree(sk);
  154. return -ENOMEM;
  155. }
  156. }
  157. memset(__skb_push(skb, hh_len), 0, hh_len);
  158. }
  159. size = skb->len;
  160. /* bpf program can never convert linear skb to non-linear */
  161. if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
  162. size = skb_headlen(skb);
  163. ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
  164. kfree_skb(skb);
  165. kfree(sk);
  166. return ret;
  167. }
  168. int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  169. union bpf_attr __user *uattr)
  170. {
  171. u32 size = kattr->test.data_size_in;
  172. u32 repeat = kattr->test.repeat;
  173. struct netdev_rx_queue *rxqueue;
  174. struct xdp_buff xdp = {};
  175. u32 retval, duration;
  176. void *data;
  177. int ret;
  178. data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
  179. if (IS_ERR(data))
  180. return PTR_ERR(data);
  181. xdp.data_hard_start = data;
  182. xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
  183. xdp.data_meta = xdp.data;
  184. xdp.data_end = xdp.data + size;
  185. rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
  186. xdp.rxq = &rxqueue->xdp_rxq;
  187. retval = bpf_test_run(prog, &xdp, repeat, &duration);
  188. if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
  189. xdp.data_end != xdp.data + size)
  190. size = xdp.data_end - xdp.data;
  191. ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
  192. kfree(data);
  193. return ret;
  194. }