bpf-lirc.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // SPDX-License-Identifier: GPL-2.0
  2. // bpf-lirc.c - handles bpf
  3. //
  4. // Copyright (C) 2018 Sean Young <sean@mess.org>
  5. #include <linux/bpf.h>
  6. #include <linux/filter.h>
  7. #include <linux/bpf_lirc.h>
  8. #include "rc-core-priv.h"
  9. /*
  10. * BPF interface for raw IR
  11. */
  12. const struct bpf_prog_ops lirc_mode2_prog_ops = {
  13. };
  14. BPF_CALL_1(bpf_rc_repeat, u32*, sample)
  15. {
  16. struct ir_raw_event_ctrl *ctrl;
  17. ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
  18. rc_repeat(ctrl->dev);
  19. return 0;
  20. }
  21. static const struct bpf_func_proto rc_repeat_proto = {
  22. .func = bpf_rc_repeat,
  23. .gpl_only = true, /* rc_repeat is EXPORT_SYMBOL_GPL */
  24. .ret_type = RET_INTEGER,
  25. .arg1_type = ARG_PTR_TO_CTX,
  26. };
  27. /*
  28. * Currently rc-core does not support 64-bit scancodes, but there are many
  29. * known protocols with more than 32 bits. So, define the interface as u64
  30. * as a future-proof.
  31. */
  32. BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode,
  33. u32, toggle)
  34. {
  35. struct ir_raw_event_ctrl *ctrl;
  36. ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
  37. rc_keydown(ctrl->dev, protocol, scancode, toggle != 0);
  38. return 0;
  39. }
  40. static const struct bpf_func_proto rc_keydown_proto = {
  41. .func = bpf_rc_keydown,
  42. .gpl_only = true, /* rc_keydown is EXPORT_SYMBOL_GPL */
  43. .ret_type = RET_INTEGER,
  44. .arg1_type = ARG_PTR_TO_CTX,
  45. .arg2_type = ARG_ANYTHING,
  46. .arg3_type = ARG_ANYTHING,
  47. .arg4_type = ARG_ANYTHING,
  48. };
  49. static const struct bpf_func_proto *
  50. lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  51. {
  52. switch (func_id) {
  53. case BPF_FUNC_rc_repeat:
  54. return &rc_repeat_proto;
  55. case BPF_FUNC_rc_keydown:
  56. return &rc_keydown_proto;
  57. case BPF_FUNC_map_lookup_elem:
  58. return &bpf_map_lookup_elem_proto;
  59. case BPF_FUNC_map_update_elem:
  60. return &bpf_map_update_elem_proto;
  61. case BPF_FUNC_map_delete_elem:
  62. return &bpf_map_delete_elem_proto;
  63. case BPF_FUNC_ktime_get_ns:
  64. return &bpf_ktime_get_ns_proto;
  65. case BPF_FUNC_tail_call:
  66. return &bpf_tail_call_proto;
  67. case BPF_FUNC_get_prandom_u32:
  68. return &bpf_get_prandom_u32_proto;
  69. case BPF_FUNC_trace_printk:
  70. if (capable(CAP_SYS_ADMIN))
  71. return bpf_get_trace_printk_proto();
  72. /* fall through */
  73. default:
  74. return NULL;
  75. }
  76. }
  77. static bool lirc_mode2_is_valid_access(int off, int size,
  78. enum bpf_access_type type,
  79. const struct bpf_prog *prog,
  80. struct bpf_insn_access_aux *info)
  81. {
  82. /* We have one field of u32 */
  83. return type == BPF_READ && off == 0 && size == sizeof(u32);
  84. }
  85. const struct bpf_verifier_ops lirc_mode2_verifier_ops = {
  86. .get_func_proto = lirc_mode2_func_proto,
  87. .is_valid_access = lirc_mode2_is_valid_access
  88. };
  89. #define BPF_MAX_PROGS 64
  90. static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
  91. {
  92. struct bpf_prog_array __rcu *old_array;
  93. struct bpf_prog_array *new_array;
  94. struct ir_raw_event_ctrl *raw;
  95. int ret;
  96. if (rcdev->driver_type != RC_DRIVER_IR_RAW)
  97. return -EINVAL;
  98. ret = mutex_lock_interruptible(&ir_raw_handler_lock);
  99. if (ret)
  100. return ret;
  101. raw = rcdev->raw;
  102. if (!raw) {
  103. ret = -ENODEV;
  104. goto unlock;
  105. }
  106. if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) {
  107. ret = -E2BIG;
  108. goto unlock;
  109. }
  110. old_array = raw->progs;
  111. ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
  112. if (ret < 0)
  113. goto unlock;
  114. rcu_assign_pointer(raw->progs, new_array);
  115. bpf_prog_array_free(old_array);
  116. unlock:
  117. mutex_unlock(&ir_raw_handler_lock);
  118. return ret;
  119. }
  120. static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
  121. {
  122. struct bpf_prog_array __rcu *old_array;
  123. struct bpf_prog_array *new_array;
  124. struct ir_raw_event_ctrl *raw;
  125. int ret;
  126. if (rcdev->driver_type != RC_DRIVER_IR_RAW)
  127. return -EINVAL;
  128. ret = mutex_lock_interruptible(&ir_raw_handler_lock);
  129. if (ret)
  130. return ret;
  131. raw = rcdev->raw;
  132. if (!raw) {
  133. ret = -ENODEV;
  134. goto unlock;
  135. }
  136. old_array = raw->progs;
  137. ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array);
  138. /*
  139. * Do not use bpf_prog_array_delete_safe() as we would end up
  140. * with a dummy entry in the array, and the we would free the
  141. * dummy in lirc_bpf_free()
  142. */
  143. if (ret)
  144. goto unlock;
  145. rcu_assign_pointer(raw->progs, new_array);
  146. bpf_prog_array_free(old_array);
  147. unlock:
  148. mutex_unlock(&ir_raw_handler_lock);
  149. return ret;
  150. }
  151. void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
  152. {
  153. struct ir_raw_event_ctrl *raw = rcdev->raw;
  154. raw->bpf_sample = sample;
  155. if (raw->progs)
  156. BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, BPF_PROG_RUN);
  157. }
  158. /*
  159. * This should be called once the rc thread has been stopped, so there can be
  160. * no concurrent bpf execution.
  161. */
  162. void lirc_bpf_free(struct rc_dev *rcdev)
  163. {
  164. struct bpf_prog_array_item *item;
  165. if (!rcdev->raw->progs)
  166. return;
  167. item = rcu_dereference(rcdev->raw->progs)->items;
  168. while (item->prog) {
  169. bpf_prog_put(item->prog);
  170. item++;
  171. }
  172. bpf_prog_array_free(rcdev->raw->progs);
  173. }
  174. int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  175. {
  176. struct rc_dev *rcdev;
  177. int ret;
  178. if (attr->attach_flags)
  179. return -EINVAL;
  180. rcdev = rc_dev_get_from_fd(attr->target_fd);
  181. if (IS_ERR(rcdev))
  182. return PTR_ERR(rcdev);
  183. ret = lirc_bpf_attach(rcdev, prog);
  184. put_device(&rcdev->dev);
  185. return ret;
  186. }
  187. int lirc_prog_detach(const union bpf_attr *attr)
  188. {
  189. struct bpf_prog *prog;
  190. struct rc_dev *rcdev;
  191. int ret;
  192. if (attr->attach_flags)
  193. return -EINVAL;
  194. prog = bpf_prog_get_type(attr->attach_bpf_fd,
  195. BPF_PROG_TYPE_LIRC_MODE2);
  196. if (IS_ERR(prog))
  197. return PTR_ERR(prog);
  198. rcdev = rc_dev_get_from_fd(attr->target_fd);
  199. if (IS_ERR(rcdev)) {
  200. bpf_prog_put(prog);
  201. return PTR_ERR(rcdev);
  202. }
  203. ret = lirc_bpf_detach(rcdev, prog);
  204. bpf_prog_put(prog);
  205. put_device(&rcdev->dev);
  206. return ret;
  207. }
  208. int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
  209. {
  210. __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
  211. struct bpf_prog_array __rcu *progs;
  212. struct rc_dev *rcdev;
  213. u32 cnt, flags = 0;
  214. int ret;
  215. if (attr->query.query_flags)
  216. return -EINVAL;
  217. rcdev = rc_dev_get_from_fd(attr->query.target_fd);
  218. if (IS_ERR(rcdev))
  219. return PTR_ERR(rcdev);
  220. if (rcdev->driver_type != RC_DRIVER_IR_RAW) {
  221. ret = -EINVAL;
  222. goto put;
  223. }
  224. ret = mutex_lock_interruptible(&ir_raw_handler_lock);
  225. if (ret)
  226. goto put;
  227. progs = rcdev->raw->progs;
  228. cnt = progs ? bpf_prog_array_length(progs) : 0;
  229. if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) {
  230. ret = -EFAULT;
  231. goto unlock;
  232. }
  233. if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) {
  234. ret = -EFAULT;
  235. goto unlock;
  236. }
  237. if (attr->query.prog_cnt != 0 && prog_ids && cnt)
  238. ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
  239. unlock:
  240. mutex_unlock(&ir_raw_handler_lock);
  241. put:
  242. put_device(&rcdev->dev);
  243. return ret;
  244. }