test_run.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <linux/bpf.h>
  8. #include <linux/slab.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/filter.h>
  12. #include <linux/sched/signal.h>
  13. static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
  14. {
  15. u32 ret;
  16. preempt_disable();
  17. rcu_read_lock();
  18. ret = BPF_PROG_RUN(prog, ctx);
  19. rcu_read_unlock();
  20. preempt_enable();
  21. return ret;
  22. }
  23. static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
  24. {
  25. u64 time_start, time_spent = 0;
  26. u32 ret = 0, i;
  27. if (!repeat)
  28. repeat = 1;
  29. time_start = ktime_get_ns();
  30. for (i = 0; i < repeat; i++) {
  31. ret = bpf_test_run_one(prog, ctx);
  32. if (need_resched()) {
  33. if (signal_pending(current))
  34. break;
  35. time_spent += ktime_get_ns() - time_start;
  36. cond_resched();
  37. time_start = ktime_get_ns();
  38. }
  39. }
  40. time_spent += ktime_get_ns() - time_start;
  41. do_div(time_spent, repeat);
  42. *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  43. return ret;
  44. }
  45. static int bpf_test_finish(union bpf_attr __user *uattr, const void *data,
  46. u32 size, u32 retval, u32 duration)
  47. {
  48. void __user *data_out = u64_to_user_ptr(uattr->test.data_out);
  49. int err = -EFAULT;
  50. if (data_out && copy_to_user(data_out, data, size))
  51. goto out;
  52. if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
  53. goto out;
  54. if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
  55. goto out;
  56. if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
  57. goto out;
  58. err = 0;
  59. out:
  60. return err;
  61. }
  62. static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
  63. u32 headroom, u32 tailroom)
  64. {
  65. void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
  66. void *data;
  67. if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
  68. return ERR_PTR(-EINVAL);
  69. data = kzalloc(size + headroom + tailroom, GFP_USER);
  70. if (!data)
  71. return ERR_PTR(-ENOMEM);
  72. if (copy_from_user(data + headroom, data_in, size)) {
  73. kfree(data);
  74. return ERR_PTR(-EFAULT);
  75. }
  76. return data;
  77. }
  78. int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  79. union bpf_attr __user *uattr)
  80. {
  81. bool is_l2 = false, is_direct_pkt_access = false;
  82. u32 size = kattr->test.data_size_in;
  83. u32 repeat = kattr->test.repeat;
  84. u32 retval, duration;
  85. struct sk_buff *skb;
  86. void *data;
  87. int ret;
  88. data = bpf_test_init(kattr, size, NET_SKB_PAD,
  89. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  90. if (IS_ERR(data))
  91. return PTR_ERR(data);
  92. switch (prog->type) {
  93. case BPF_PROG_TYPE_SCHED_CLS:
  94. case BPF_PROG_TYPE_SCHED_ACT:
  95. is_l2 = true;
  96. /* fall through */
  97. case BPF_PROG_TYPE_LWT_IN:
  98. case BPF_PROG_TYPE_LWT_OUT:
  99. case BPF_PROG_TYPE_LWT_XMIT:
  100. is_direct_pkt_access = true;
  101. break;
  102. default:
  103. break;
  104. }
  105. skb = build_skb(data, 0);
  106. if (!skb) {
  107. kfree(data);
  108. return -ENOMEM;
  109. }
  110. skb_reserve(skb, NET_SKB_PAD);
  111. __skb_put(skb, size);
  112. skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
  113. skb_reset_network_header(skb);
  114. if (is_l2)
  115. __skb_push(skb, ETH_HLEN);
  116. if (is_direct_pkt_access)
  117. bpf_compute_data_end(skb);
  118. retval = bpf_test_run(prog, skb, repeat, &duration);
  119. if (!is_l2)
  120. __skb_push(skb, ETH_HLEN);
  121. size = skb->len;
  122. /* bpf program can never convert linear skb to non-linear */
  123. if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
  124. size = skb_headlen(skb);
  125. ret = bpf_test_finish(uattr, skb->data, size, retval, duration);
  126. kfree_skb(skb);
  127. return ret;
  128. }
  129. int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  130. union bpf_attr __user *uattr)
  131. {
  132. u32 size = kattr->test.data_size_in;
  133. u32 repeat = kattr->test.repeat;
  134. struct xdp_buff xdp = {};
  135. u32 retval, duration;
  136. void *data;
  137. int ret;
  138. data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM, 0);
  139. if (IS_ERR(data))
  140. return PTR_ERR(data);
  141. xdp.data_hard_start = data;
  142. xdp.data = data + XDP_PACKET_HEADROOM;
  143. xdp.data_end = xdp.data + size;
  144. retval = bpf_test_run(prog, &xdp, repeat, &duration);
  145. if (xdp.data != data + XDP_PACKET_HEADROOM)
  146. size = xdp.data_end - xdp.data;
  147. ret = bpf_test_finish(uattr, xdp.data, size, retval, duration);
  148. kfree(data);
  149. return ret;
  150. }