flow.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #include <rdma/ib_user_verbs.h>
  6. #include <rdma/ib_verbs.h>
  7. #include <rdma/uverbs_types.h>
  8. #include <rdma/uverbs_ioctl.h>
  9. #include <rdma/mlx5_user_ioctl_cmds.h>
  10. #include <rdma/ib_umem.h>
  11. #include <linux/mlx5/driver.h>
  12. #include <linux/mlx5/fs.h>
  13. #include "mlx5_ib.h"
  14. #define UVERBS_MODULE_NAME mlx5_ib
  15. #include <rdma/uverbs_named_ioctl.h>
  16. static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
  17. [MLX5_IB_FLOW_TYPE_NORMAL] = {
  18. .type = UVERBS_ATTR_TYPE_PTR_IN,
  19. .u.ptr = {
  20. .len = sizeof(u16), /* data is priority */
  21. .min_len = sizeof(u16),
  22. }
  23. },
  24. [MLX5_IB_FLOW_TYPE_SNIFFER] = {
  25. .type = UVERBS_ATTR_TYPE_PTR_IN,
  26. UVERBS_ATTR_NO_DATA(),
  27. },
  28. [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
  29. .type = UVERBS_ATTR_TYPE_PTR_IN,
  30. UVERBS_ATTR_NO_DATA(),
  31. },
  32. [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
  33. .type = UVERBS_ATTR_TYPE_PTR_IN,
  34. UVERBS_ATTR_NO_DATA(),
  35. },
  36. };
  37. static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
  38. struct ib_device *ib_dev, struct ib_uverbs_file *file,
  39. struct uverbs_attr_bundle *attrs)
  40. {
  41. struct mlx5_ib_flow_handler *flow_handler;
  42. struct mlx5_ib_flow_matcher *fs_matcher;
  43. void *devx_obj;
  44. int dest_id, dest_type;
  45. void *cmd_in;
  46. int inlen;
  47. bool dest_devx, dest_qp;
  48. struct ib_qp *qp = NULL;
  49. struct ib_uobject *uobj =
  50. uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
  51. struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
  52. if (!capable(CAP_NET_RAW))
  53. return -EPERM;
  54. dest_devx =
  55. uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
  56. dest_qp = uverbs_attr_is_valid(attrs,
  57. MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
  58. if ((dest_devx && dest_qp) || (!dest_devx && !dest_qp))
  59. return -EINVAL;
  60. if (dest_devx) {
  61. devx_obj = uverbs_attr_get_obj(
  62. attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
  63. if (IS_ERR(devx_obj))
  64. return PTR_ERR(devx_obj);
  65. /* Verify that the given DEVX object is a flow
  66. * steering destination.
  67. */
  68. if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
  69. return -EINVAL;
  70. } else {
  71. struct mlx5_ib_qp *mqp;
  72. qp = uverbs_attr_get_obj(attrs,
  73. MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
  74. if (IS_ERR(qp))
  75. return PTR_ERR(qp);
  76. if (qp->qp_type != IB_QPT_RAW_PACKET)
  77. return -EINVAL;
  78. mqp = to_mqp(qp);
  79. if (mqp->flags & MLX5_IB_QP_RSS)
  80. dest_id = mqp->rss_qp.tirn;
  81. else
  82. dest_id = mqp->raw_packet_qp.rq.tirn;
  83. dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  84. }
  85. if (dev->rep)
  86. return -ENOTSUPP;
  87. cmd_in = uverbs_attr_get_alloced_ptr(
  88. attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
  89. inlen = uverbs_attr_get_len(attrs,
  90. MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
  91. fs_matcher = uverbs_attr_get_obj(attrs,
  92. MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
  93. flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, cmd_in, inlen,
  94. dest_id, dest_type);
  95. if (IS_ERR(flow_handler))
  96. return PTR_ERR(flow_handler);
  97. ib_set_flow(uobj, &flow_handler->ibflow, qp, ib_dev);
  98. return 0;
  99. }
  100. static int flow_matcher_cleanup(struct ib_uobject *uobject,
  101. enum rdma_remove_reason why)
  102. {
  103. struct mlx5_ib_flow_matcher *obj = uobject->object;
  104. int ret;
  105. ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
  106. if (ret)
  107. return ret;
  108. kfree(obj);
  109. return 0;
  110. }
  111. static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
  112. struct ib_device *ib_dev, struct ib_uverbs_file *file,
  113. struct uverbs_attr_bundle *attrs)
  114. {
  115. struct ib_uobject *uobj = uverbs_attr_get_uobject(
  116. attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
  117. struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
  118. struct mlx5_ib_flow_matcher *obj;
  119. int err;
  120. obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
  121. if (!obj)
  122. return -ENOMEM;
  123. obj->mask_len = uverbs_attr_get_len(
  124. attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
  125. err = uverbs_copy_from(&obj->matcher_mask,
  126. attrs,
  127. MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
  128. if (err)
  129. goto end;
  130. obj->flow_type = uverbs_attr_get_enum_id(
  131. attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
  132. if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
  133. err = uverbs_copy_from(&obj->priority,
  134. attrs,
  135. MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
  136. if (err)
  137. goto end;
  138. }
  139. err = uverbs_copy_from(&obj->match_criteria_enable,
  140. attrs,
  141. MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
  142. if (err)
  143. goto end;
  144. uobj->object = obj;
  145. obj->mdev = dev->mdev;
  146. atomic_set(&obj->usecnt, 0);
  147. return 0;
  148. end:
  149. kfree(obj);
  150. return err;
  151. }
  152. DECLARE_UVERBS_NAMED_METHOD(
  153. MLX5_IB_METHOD_CREATE_FLOW,
  154. UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
  155. UVERBS_OBJECT_FLOW,
  156. UVERBS_ACCESS_NEW,
  157. UA_MANDATORY),
  158. UVERBS_ATTR_PTR_IN(
  159. MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
  160. UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
  161. UA_MANDATORY,
  162. UA_ALLOC_AND_COPY),
  163. UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
  164. MLX5_IB_OBJECT_FLOW_MATCHER,
  165. UVERBS_ACCESS_READ,
  166. UA_MANDATORY),
  167. UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
  168. UVERBS_OBJECT_QP,
  169. UVERBS_ACCESS_READ),
  170. UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
  171. MLX5_IB_OBJECT_DEVX_OBJ,
  172. UVERBS_ACCESS_READ));
  173. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  174. MLX5_IB_METHOD_DESTROY_FLOW,
  175. UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
  176. UVERBS_OBJECT_FLOW,
  177. UVERBS_ACCESS_DESTROY,
  178. UA_MANDATORY));
  179. ADD_UVERBS_METHODS(mlx5_ib_fs,
  180. UVERBS_OBJECT_FLOW,
  181. &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
  182. &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
  183. DECLARE_UVERBS_NAMED_METHOD(
  184. MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
  185. UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
  186. MLX5_IB_OBJECT_FLOW_MATCHER,
  187. UVERBS_ACCESS_NEW,
  188. UA_MANDATORY),
  189. UVERBS_ATTR_PTR_IN(
  190. MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
  191. UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
  192. UA_MANDATORY),
  193. UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
  194. mlx5_ib_flow_type,
  195. UA_MANDATORY),
  196. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
  197. UVERBS_ATTR_TYPE(u8),
  198. UA_MANDATORY));
  199. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  200. MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
  201. UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
  202. MLX5_IB_OBJECT_FLOW_MATCHER,
  203. UVERBS_ACCESS_DESTROY,
  204. UA_MANDATORY));
  205. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
  206. UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
  207. &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
  208. &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
  209. DECLARE_UVERBS_OBJECT_TREE(flow_objects,
  210. &UVERBS_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER));