devx.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #include <rdma/ib_user_verbs.h>
  6. #include <rdma/ib_verbs.h>
  7. #include <rdma/uverbs_types.h>
  8. #include <rdma/uverbs_ioctl.h>
  9. #include <rdma/mlx5_user_ioctl_cmds.h>
  10. #include <rdma/ib_umem.h>
  11. #include <linux/mlx5/driver.h>
  12. #include <linux/mlx5/fs.h>
  13. #include "mlx5_ib.h"
  14. #define UVERBS_MODULE_NAME mlx5_ib
  15. #include <rdma/uverbs_named_ioctl.h>
  16. #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
  17. struct devx_obj {
  18. struct mlx5_core_dev *mdev;
  19. u32 obj_id;
  20. u32 dinlen; /* destroy inbox length */
  21. u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
  22. };
  23. struct devx_umem {
  24. struct mlx5_core_dev *mdev;
  25. struct ib_umem *umem;
  26. u32 page_offset;
  27. int page_shift;
  28. int ncont;
  29. u32 dinlen;
  30. u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
  31. };
  32. struct devx_umem_reg_cmd {
  33. void *in;
  34. u32 inlen;
  35. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  36. };
  37. static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
  38. {
  39. return to_mucontext(ib_uverbs_get_ucontext(file));
  40. }
  41. int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  42. {
  43. u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
  44. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  45. u64 general_obj_types;
  46. void *hdr;
  47. int err;
  48. hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
  49. general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
  50. if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
  51. !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
  52. return -EINVAL;
  53. if (!capable(CAP_NET_RAW))
  54. return -EPERM;
  55. MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  56. MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
  57. err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  58. if (err)
  59. return err;
  60. context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  61. return 0;
  62. }
  63. void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
  64. struct mlx5_ib_ucontext *context)
  65. {
  66. u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
  67. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  68. MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  69. MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
  70. MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
  71. mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  72. }
  73. bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
  74. {
  75. struct devx_obj *devx_obj = obj;
  76. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
  77. switch (opcode) {
  78. case MLX5_CMD_OP_DESTROY_TIR:
  79. *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  80. *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
  81. obj_id);
  82. return true;
  83. case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
  84. *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
  85. *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
  86. table_id);
  87. return true;
  88. default:
  89. return false;
  90. }
  91. }
  92. static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
  93. {
  94. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  95. u32 obj_id;
  96. switch (opcode) {
  97. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  98. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  99. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  100. break;
  101. case MLX5_CMD_OP_QUERY_MKEY:
  102. obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
  103. break;
  104. case MLX5_CMD_OP_QUERY_CQ:
  105. obj_id = MLX5_GET(query_cq_in, in, cqn);
  106. break;
  107. case MLX5_CMD_OP_MODIFY_CQ:
  108. obj_id = MLX5_GET(modify_cq_in, in, cqn);
  109. break;
  110. case MLX5_CMD_OP_QUERY_SQ:
  111. obj_id = MLX5_GET(query_sq_in, in, sqn);
  112. break;
  113. case MLX5_CMD_OP_MODIFY_SQ:
  114. obj_id = MLX5_GET(modify_sq_in, in, sqn);
  115. break;
  116. case MLX5_CMD_OP_QUERY_RQ:
  117. obj_id = MLX5_GET(query_rq_in, in, rqn);
  118. break;
  119. case MLX5_CMD_OP_MODIFY_RQ:
  120. obj_id = MLX5_GET(modify_rq_in, in, rqn);
  121. break;
  122. case MLX5_CMD_OP_QUERY_RMP:
  123. obj_id = MLX5_GET(query_rmp_in, in, rmpn);
  124. break;
  125. case MLX5_CMD_OP_MODIFY_RMP:
  126. obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
  127. break;
  128. case MLX5_CMD_OP_QUERY_RQT:
  129. obj_id = MLX5_GET(query_rqt_in, in, rqtn);
  130. break;
  131. case MLX5_CMD_OP_MODIFY_RQT:
  132. obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
  133. break;
  134. case MLX5_CMD_OP_QUERY_TIR:
  135. obj_id = MLX5_GET(query_tir_in, in, tirn);
  136. break;
  137. case MLX5_CMD_OP_MODIFY_TIR:
  138. obj_id = MLX5_GET(modify_tir_in, in, tirn);
  139. break;
  140. case MLX5_CMD_OP_QUERY_TIS:
  141. obj_id = MLX5_GET(query_tis_in, in, tisn);
  142. break;
  143. case MLX5_CMD_OP_MODIFY_TIS:
  144. obj_id = MLX5_GET(modify_tis_in, in, tisn);
  145. break;
  146. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  147. obj_id = MLX5_GET(query_flow_table_in, in, table_id);
  148. break;
  149. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  150. obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
  151. break;
  152. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  153. obj_id = MLX5_GET(query_flow_group_in, in, group_id);
  154. break;
  155. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  156. obj_id = MLX5_GET(query_fte_in, in, flow_index);
  157. break;
  158. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  159. obj_id = MLX5_GET(set_fte_in, in, flow_index);
  160. break;
  161. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  162. obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
  163. break;
  164. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  165. obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
  166. break;
  167. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  168. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  169. break;
  170. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  171. obj_id = MLX5_GET(query_scheduling_element_in, in,
  172. scheduling_element_id);
  173. break;
  174. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  175. obj_id = MLX5_GET(modify_scheduling_element_in, in,
  176. scheduling_element_id);
  177. break;
  178. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  179. obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  180. break;
  181. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  182. obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
  183. break;
  184. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  185. obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  186. break;
  187. case MLX5_CMD_OP_QUERY_QP:
  188. obj_id = MLX5_GET(query_qp_in, in, qpn);
  189. break;
  190. case MLX5_CMD_OP_RST2INIT_QP:
  191. obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
  192. break;
  193. case MLX5_CMD_OP_INIT2RTR_QP:
  194. obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
  195. break;
  196. case MLX5_CMD_OP_RTR2RTS_QP:
  197. obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
  198. break;
  199. case MLX5_CMD_OP_RTS2RTS_QP:
  200. obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
  201. break;
  202. case MLX5_CMD_OP_SQERR2RTS_QP:
  203. obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
  204. break;
  205. case MLX5_CMD_OP_2ERR_QP:
  206. obj_id = MLX5_GET(qp_2err_in, in, qpn);
  207. break;
  208. case MLX5_CMD_OP_2RST_QP:
  209. obj_id = MLX5_GET(qp_2rst_in, in, qpn);
  210. break;
  211. case MLX5_CMD_OP_QUERY_DCT:
  212. obj_id = MLX5_GET(query_dct_in, in, dctn);
  213. break;
  214. case MLX5_CMD_OP_QUERY_XRQ:
  215. obj_id = MLX5_GET(query_xrq_in, in, xrqn);
  216. break;
  217. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  218. obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
  219. break;
  220. case MLX5_CMD_OP_ARM_XRC_SRQ:
  221. obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
  222. break;
  223. case MLX5_CMD_OP_QUERY_SRQ:
  224. obj_id = MLX5_GET(query_srq_in, in, srqn);
  225. break;
  226. case MLX5_CMD_OP_ARM_RQ:
  227. obj_id = MLX5_GET(arm_rq_in, in, srq_number);
  228. break;
  229. case MLX5_CMD_OP_DRAIN_DCT:
  230. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  231. obj_id = MLX5_GET(drain_dct_in, in, dctn);
  232. break;
  233. case MLX5_CMD_OP_ARM_XRQ:
  234. obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
  235. break;
  236. default:
  237. return false;
  238. }
  239. if (obj_id == obj->obj_id)
  240. return true;
  241. return false;
  242. }
  243. static bool devx_is_obj_create_cmd(const void *in)
  244. {
  245. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  246. switch (opcode) {
  247. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  248. case MLX5_CMD_OP_CREATE_MKEY:
  249. case MLX5_CMD_OP_CREATE_CQ:
  250. case MLX5_CMD_OP_ALLOC_PD:
  251. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  252. case MLX5_CMD_OP_CREATE_RMP:
  253. case MLX5_CMD_OP_CREATE_SQ:
  254. case MLX5_CMD_OP_CREATE_RQ:
  255. case MLX5_CMD_OP_CREATE_RQT:
  256. case MLX5_CMD_OP_CREATE_TIR:
  257. case MLX5_CMD_OP_CREATE_TIS:
  258. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  259. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  260. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  261. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  262. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  263. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  264. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  265. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  266. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  267. case MLX5_CMD_OP_CREATE_QP:
  268. case MLX5_CMD_OP_CREATE_SRQ:
  269. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  270. case MLX5_CMD_OP_CREATE_DCT:
  271. case MLX5_CMD_OP_CREATE_XRQ:
  272. case MLX5_CMD_OP_ATTACH_TO_MCG:
  273. case MLX5_CMD_OP_ALLOC_XRCD:
  274. return true;
  275. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  276. {
  277. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  278. if (op_mod == 0)
  279. return true;
  280. return false;
  281. }
  282. default:
  283. return false;
  284. }
  285. }
  286. static bool devx_is_obj_modify_cmd(const void *in)
  287. {
  288. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  289. switch (opcode) {
  290. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  291. case MLX5_CMD_OP_MODIFY_CQ:
  292. case MLX5_CMD_OP_MODIFY_RMP:
  293. case MLX5_CMD_OP_MODIFY_SQ:
  294. case MLX5_CMD_OP_MODIFY_RQ:
  295. case MLX5_CMD_OP_MODIFY_RQT:
  296. case MLX5_CMD_OP_MODIFY_TIR:
  297. case MLX5_CMD_OP_MODIFY_TIS:
  298. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  299. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  300. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  301. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  302. case MLX5_CMD_OP_RST2INIT_QP:
  303. case MLX5_CMD_OP_INIT2RTR_QP:
  304. case MLX5_CMD_OP_RTR2RTS_QP:
  305. case MLX5_CMD_OP_RTS2RTS_QP:
  306. case MLX5_CMD_OP_SQERR2RTS_QP:
  307. case MLX5_CMD_OP_2ERR_QP:
  308. case MLX5_CMD_OP_2RST_QP:
  309. case MLX5_CMD_OP_ARM_XRC_SRQ:
  310. case MLX5_CMD_OP_ARM_RQ:
  311. case MLX5_CMD_OP_DRAIN_DCT:
  312. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  313. case MLX5_CMD_OP_ARM_XRQ:
  314. return true;
  315. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  316. {
  317. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  318. if (op_mod == 1)
  319. return true;
  320. return false;
  321. }
  322. default:
  323. return false;
  324. }
  325. }
  326. static bool devx_is_obj_query_cmd(const void *in)
  327. {
  328. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  329. switch (opcode) {
  330. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  331. case MLX5_CMD_OP_QUERY_MKEY:
  332. case MLX5_CMD_OP_QUERY_CQ:
  333. case MLX5_CMD_OP_QUERY_RMP:
  334. case MLX5_CMD_OP_QUERY_SQ:
  335. case MLX5_CMD_OP_QUERY_RQ:
  336. case MLX5_CMD_OP_QUERY_RQT:
  337. case MLX5_CMD_OP_QUERY_TIR:
  338. case MLX5_CMD_OP_QUERY_TIS:
  339. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  340. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  341. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  342. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  343. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  344. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  345. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  346. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  347. case MLX5_CMD_OP_QUERY_QP:
  348. case MLX5_CMD_OP_QUERY_SRQ:
  349. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  350. case MLX5_CMD_OP_QUERY_DCT:
  351. case MLX5_CMD_OP_QUERY_XRQ:
  352. return true;
  353. default:
  354. return false;
  355. }
  356. }
  357. static bool devx_is_general_cmd(void *in)
  358. {
  359. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  360. switch (opcode) {
  361. case MLX5_CMD_OP_QUERY_HCA_CAP:
  362. case MLX5_CMD_OP_QUERY_VPORT_STATE:
  363. case MLX5_CMD_OP_QUERY_ADAPTER:
  364. case MLX5_CMD_OP_QUERY_ISSI:
  365. case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
  366. case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
  367. case MLX5_CMD_OP_QUERY_VNIC_ENV:
  368. case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
  369. case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
  370. case MLX5_CMD_OP_NOP:
  371. case MLX5_CMD_OP_QUERY_CONG_STATUS:
  372. case MLX5_CMD_OP_QUERY_CONG_PARAMS:
  373. case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
  374. return true;
  375. default:
  376. return false;
  377. }
  378. }
  379. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(struct ib_device *ib_dev,
  380. struct ib_uverbs_file *file,
  381. struct uverbs_attr_bundle *attrs)
  382. {
  383. struct mlx5_ib_dev *dev = to_mdev(ib_dev);
  384. int user_vector;
  385. int dev_eqn;
  386. unsigned int irqn;
  387. int err;
  388. if (uverbs_copy_from(&user_vector, attrs,
  389. MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
  390. return -EFAULT;
  391. err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
  392. if (err < 0)
  393. return err;
  394. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  395. &dev_eqn, sizeof(dev_eqn)))
  396. return -EFAULT;
  397. return 0;
  398. }
  399. /*
  400. *Security note:
  401. * The hardware protection mechanism works like this: Each device object that
  402. * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
  403. * the device specification manual) upon its creation. Then upon doorbell,
  404. * hardware fetches the object context for which the doorbell was rang, and
  405. * validates that the UAR through which the DB was rang matches the UAR ID
  406. * of the object.
  407. * If no match the doorbell is silently ignored by the hardware. Of course,
  408. * the user cannot ring a doorbell on a UAR that was not mapped to it.
  409. * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
  410. * mailboxes (except tagging them with UID), we expose to the user its UAR
  411. * ID, so it can embed it in these objects in the expected specification
  412. * format. So the only thing the user can do is hurt itself by creating a
  413. * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
  414. * may ring a doorbell on its objects.
  415. * The consequence of that will be that another user can schedule a QP/SQ
  416. * of the buggy user for execution (just insert it to the hardware schedule
  417. * queue or arm its CQ for event generation), no further harm is expected.
  418. */
  419. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(struct ib_device *ib_dev,
  420. struct ib_uverbs_file *file,
  421. struct uverbs_attr_bundle *attrs)
  422. {
  423. struct mlx5_ib_ucontext *c = devx_ufile2uctx(file);
  424. u32 user_idx;
  425. s32 dev_idx;
  426. if (uverbs_copy_from(&user_idx, attrs,
  427. MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
  428. return -EFAULT;
  429. dev_idx = bfregn_to_uar_index(to_mdev(ib_dev),
  430. &c->bfregi, user_idx, true);
  431. if (dev_idx < 0)
  432. return dev_idx;
  433. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  434. &dev_idx, sizeof(dev_idx)))
  435. return -EFAULT;
  436. return 0;
  437. }
  438. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(struct ib_device *ib_dev,
  439. struct ib_uverbs_file *file,
  440. struct uverbs_attr_bundle *attrs)
  441. {
  442. struct mlx5_ib_ucontext *c = devx_ufile2uctx(file);
  443. struct mlx5_ib_dev *dev = to_mdev(ib_dev);
  444. void *cmd_in = uverbs_attr_get_alloced_ptr(
  445. attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
  446. int cmd_out_len = uverbs_attr_get_len(attrs,
  447. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
  448. void *cmd_out;
  449. int err;
  450. if (!c->devx_uid)
  451. return -EPERM;
  452. /* Only white list of some general HCA commands are allowed for this method. */
  453. if (!devx_is_general_cmd(cmd_in))
  454. return -EINVAL;
  455. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  456. if (!cmd_out)
  457. return -ENOMEM;
  458. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  459. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  460. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
  461. cmd_out, cmd_out_len);
  462. if (err)
  463. goto other_cmd_free;
  464. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len);
  465. other_cmd_free:
  466. kvfree(cmd_out);
  467. return err;
  468. }
  469. static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
  470. u32 *dinlen,
  471. u32 *obj_id)
  472. {
  473. u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
  474. u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
  475. *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  476. *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
  477. MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
  478. MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
  479. switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
  480. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  481. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  482. MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
  483. break;
  484. case MLX5_CMD_OP_CREATE_MKEY:
  485. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
  486. break;
  487. case MLX5_CMD_OP_CREATE_CQ:
  488. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
  489. break;
  490. case MLX5_CMD_OP_ALLOC_PD:
  491. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
  492. break;
  493. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  494. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  495. MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
  496. break;
  497. case MLX5_CMD_OP_CREATE_RMP:
  498. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
  499. break;
  500. case MLX5_CMD_OP_CREATE_SQ:
  501. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
  502. break;
  503. case MLX5_CMD_OP_CREATE_RQ:
  504. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
  505. break;
  506. case MLX5_CMD_OP_CREATE_RQT:
  507. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
  508. break;
  509. case MLX5_CMD_OP_CREATE_TIR:
  510. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
  511. break;
  512. case MLX5_CMD_OP_CREATE_TIS:
  513. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
  514. break;
  515. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  516. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  517. MLX5_CMD_OP_DEALLOC_Q_COUNTER);
  518. break;
  519. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  520. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
  521. *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
  522. MLX5_SET(destroy_flow_table_in, din, other_vport,
  523. MLX5_GET(create_flow_table_in, in, other_vport));
  524. MLX5_SET(destroy_flow_table_in, din, vport_number,
  525. MLX5_GET(create_flow_table_in, in, vport_number));
  526. MLX5_SET(destroy_flow_table_in, din, table_type,
  527. MLX5_GET(create_flow_table_in, in, table_type));
  528. MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
  529. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  530. MLX5_CMD_OP_DESTROY_FLOW_TABLE);
  531. break;
  532. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  533. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
  534. *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
  535. MLX5_SET(destroy_flow_group_in, din, other_vport,
  536. MLX5_GET(create_flow_group_in, in, other_vport));
  537. MLX5_SET(destroy_flow_group_in, din, vport_number,
  538. MLX5_GET(create_flow_group_in, in, vport_number));
  539. MLX5_SET(destroy_flow_group_in, din, table_type,
  540. MLX5_GET(create_flow_group_in, in, table_type));
  541. MLX5_SET(destroy_flow_group_in, din, table_id,
  542. MLX5_GET(create_flow_group_in, in, table_id));
  543. MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
  544. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  545. MLX5_CMD_OP_DESTROY_FLOW_GROUP);
  546. break;
  547. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  548. *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
  549. *obj_id = MLX5_GET(set_fte_in, in, flow_index);
  550. MLX5_SET(delete_fte_in, din, other_vport,
  551. MLX5_GET(set_fte_in, in, other_vport));
  552. MLX5_SET(delete_fte_in, din, vport_number,
  553. MLX5_GET(set_fte_in, in, vport_number));
  554. MLX5_SET(delete_fte_in, din, table_type,
  555. MLX5_GET(set_fte_in, in, table_type));
  556. MLX5_SET(delete_fte_in, din, table_id,
  557. MLX5_GET(set_fte_in, in, table_id));
  558. MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
  559. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  560. MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
  561. break;
  562. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  563. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  564. MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
  565. break;
  566. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  567. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  568. MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
  569. break;
  570. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  571. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  572. MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
  573. break;
  574. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  575. *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
  576. *obj_id = MLX5_GET(create_scheduling_element_out, out,
  577. scheduling_element_id);
  578. MLX5_SET(destroy_scheduling_element_in, din,
  579. scheduling_hierarchy,
  580. MLX5_GET(create_scheduling_element_in, in,
  581. scheduling_hierarchy));
  582. MLX5_SET(destroy_scheduling_element_in, din,
  583. scheduling_element_id, *obj_id);
  584. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  585. MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
  586. break;
  587. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  588. *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
  589. *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  590. MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
  591. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  592. MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
  593. break;
  594. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  595. *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
  596. *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  597. MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
  598. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  599. MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
  600. break;
  601. case MLX5_CMD_OP_CREATE_QP:
  602. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
  603. break;
  604. case MLX5_CMD_OP_CREATE_SRQ:
  605. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
  606. break;
  607. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  608. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  609. MLX5_CMD_OP_DESTROY_XRC_SRQ);
  610. break;
  611. case MLX5_CMD_OP_CREATE_DCT:
  612. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
  613. break;
  614. case MLX5_CMD_OP_CREATE_XRQ:
  615. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
  616. break;
  617. case MLX5_CMD_OP_ATTACH_TO_MCG:
  618. *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
  619. MLX5_SET(detach_from_mcg_in, din, qpn,
  620. MLX5_GET(attach_to_mcg_in, in, qpn));
  621. memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
  622. MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
  623. MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
  624. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
  625. break;
  626. case MLX5_CMD_OP_ALLOC_XRCD:
  627. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
  628. break;
  629. default:
  630. /* The entry must match to one of the devx_is_obj_create_cmd */
  631. WARN_ON(true);
  632. break;
  633. }
  634. }
  635. static int devx_obj_cleanup(struct ib_uobject *uobject,
  636. enum rdma_remove_reason why)
  637. {
  638. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  639. struct devx_obj *obj = uobject->object;
  640. int ret;
  641. ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  642. if (ib_is_destroy_retryable(ret, why, uobject))
  643. return ret;
  644. kfree(obj);
  645. return ret;
  646. }
  647. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(struct ib_device *ib_dev,
  648. struct ib_uverbs_file *file,
  649. struct uverbs_attr_bundle *attrs)
  650. {
  651. struct mlx5_ib_ucontext *c = devx_ufile2uctx(file);
  652. struct mlx5_ib_dev *dev = to_mdev(ib_dev);
  653. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
  654. int cmd_out_len = uverbs_attr_get_len(attrs,
  655. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
  656. void *cmd_out;
  657. struct ib_uobject *uobj;
  658. struct devx_obj *obj;
  659. int err;
  660. if (!c->devx_uid)
  661. return -EPERM;
  662. if (!devx_is_obj_create_cmd(cmd_in))
  663. return -EINVAL;
  664. obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
  665. if (!obj)
  666. return -ENOMEM;
  667. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  668. if (!cmd_out) {
  669. err = -ENOMEM;
  670. goto obj_free;
  671. }
  672. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  673. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  674. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
  675. cmd_out, cmd_out_len);
  676. if (err)
  677. goto cmd_free;
  678. uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
  679. uobj->object = obj;
  680. obj->mdev = dev->mdev;
  681. devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
  682. WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
  683. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
  684. if (err)
  685. goto cmd_free;
  686. kvfree(cmd_out);
  687. return 0;
  688. cmd_free:
  689. kvfree(cmd_out);
  690. obj_free:
  691. kfree(obj);
  692. return err;
  693. }
  694. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(struct ib_device *ib_dev,
  695. struct ib_uverbs_file *file,
  696. struct uverbs_attr_bundle *attrs)
  697. {
  698. struct mlx5_ib_ucontext *c = devx_ufile2uctx(file);
  699. struct mlx5_ib_dev *dev = to_mdev(ib_dev);
  700. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
  701. int cmd_out_len = uverbs_attr_get_len(attrs,
  702. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
  703. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  704. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
  705. void *cmd_out;
  706. int err;
  707. if (!c->devx_uid)
  708. return -EPERM;
  709. if (!devx_is_obj_modify_cmd(cmd_in))
  710. return -EINVAL;
  711. if (!devx_is_valid_obj_id(uobj->object, cmd_in))
  712. return -EINVAL;
  713. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  714. if (!cmd_out)
  715. return -ENOMEM;
  716. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  717. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  718. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
  719. cmd_out, cmd_out_len);
  720. if (err)
  721. goto other_cmd_free;
  722. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  723. cmd_out, cmd_out_len);
  724. other_cmd_free:
  725. kvfree(cmd_out);
  726. return err;
  727. }
  728. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(struct ib_device *ib_dev,
  729. struct ib_uverbs_file *file,
  730. struct uverbs_attr_bundle *attrs)
  731. {
  732. struct mlx5_ib_ucontext *c = devx_ufile2uctx(file);
  733. struct mlx5_ib_dev *dev = to_mdev(ib_dev);
  734. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
  735. int cmd_out_len = uverbs_attr_get_len(attrs,
  736. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
  737. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  738. MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
  739. void *cmd_out;
  740. int err;
  741. if (!c->devx_uid)
  742. return -EPERM;
  743. if (!devx_is_obj_query_cmd(cmd_in))
  744. return -EINVAL;
  745. if (!devx_is_valid_obj_id(uobj->object, cmd_in))
  746. return -EINVAL;
  747. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  748. if (!cmd_out)
  749. return -ENOMEM;
  750. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  751. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  752. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
  753. cmd_out, cmd_out_len);
  754. if (err)
  755. goto other_cmd_free;
  756. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, cmd_out, cmd_out_len);
  757. other_cmd_free:
  758. kvfree(cmd_out);
  759. return err;
  760. }
  761. static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
  762. struct uverbs_attr_bundle *attrs,
  763. struct devx_umem *obj)
  764. {
  765. u64 addr;
  766. size_t size;
  767. int access;
  768. int npages;
  769. int err;
  770. u32 page_mask;
  771. if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
  772. uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN) ||
  773. uverbs_copy_from(&access, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS))
  774. return -EFAULT;
  775. err = ib_check_mr_access(access);
  776. if (err)
  777. return err;
  778. obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
  779. if (IS_ERR(obj->umem))
  780. return PTR_ERR(obj->umem);
  781. mlx5_ib_cont_pages(obj->umem, obj->umem->address,
  782. MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
  783. &obj->page_shift, &obj->ncont, NULL);
  784. if (!npages) {
  785. ib_umem_release(obj->umem);
  786. return -EINVAL;
  787. }
  788. page_mask = (1 << obj->page_shift) - 1;
  789. obj->page_offset = obj->umem->address & page_mask;
  790. return 0;
  791. }
  792. static int devx_umem_reg_cmd_alloc(struct devx_umem *obj,
  793. struct devx_umem_reg_cmd *cmd)
  794. {
  795. cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
  796. (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
  797. cmd->in = kvzalloc(cmd->inlen, GFP_KERNEL);
  798. return cmd->in ? 0 : -ENOMEM;
  799. }
  800. static void devx_umem_reg_cmd_free(struct devx_umem_reg_cmd *cmd)
  801. {
  802. kvfree(cmd->in);
  803. }
  804. static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
  805. struct devx_umem *obj,
  806. struct devx_umem_reg_cmd *cmd)
  807. {
  808. void *umem;
  809. __be64 *mtt;
  810. umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
  811. mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
  812. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  813. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
  814. MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
  815. MLX5_SET(umem, umem, log_page_size, obj->page_shift -
  816. MLX5_ADAPTER_PAGE_SHIFT);
  817. MLX5_SET(umem, umem, page_offset, obj->page_offset);
  818. mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
  819. (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
  820. MLX5_IB_MTT_READ);
  821. }
  822. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(struct ib_device *ib_dev,
  823. struct ib_uverbs_file *file,
  824. struct uverbs_attr_bundle *attrs)
  825. {
  826. struct mlx5_ib_ucontext *c = devx_ufile2uctx(file);
  827. struct mlx5_ib_dev *dev = to_mdev(ib_dev);
  828. struct devx_umem_reg_cmd cmd;
  829. struct devx_umem *obj;
  830. struct ib_uobject *uobj;
  831. u32 obj_id;
  832. int err;
  833. if (!c->devx_uid)
  834. return -EPERM;
  835. uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
  836. obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
  837. if (!obj)
  838. return -ENOMEM;
  839. err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
  840. if (err)
  841. goto err_obj_free;
  842. err = devx_umem_reg_cmd_alloc(obj, &cmd);
  843. if (err)
  844. goto err_umem_release;
  845. devx_umem_reg_cmd_build(dev, obj, &cmd);
  846. MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
  847. err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
  848. sizeof(cmd.out));
  849. if (err)
  850. goto err_umem_reg_cmd_free;
  851. obj->mdev = dev->mdev;
  852. uobj->object = obj;
  853. devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
  854. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
  855. if (err)
  856. goto err_umem_destroy;
  857. devx_umem_reg_cmd_free(&cmd);
  858. return 0;
  859. err_umem_destroy:
  860. mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
  861. err_umem_reg_cmd_free:
  862. devx_umem_reg_cmd_free(&cmd);
  863. err_umem_release:
  864. ib_umem_release(obj->umem);
  865. err_obj_free:
  866. kfree(obj);
  867. return err;
  868. }
  869. static int devx_umem_cleanup(struct ib_uobject *uobject,
  870. enum rdma_remove_reason why)
  871. {
  872. struct devx_umem *obj = uobject->object;
  873. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  874. int err;
  875. err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  876. if (ib_is_destroy_retryable(err, why, uobject))
  877. return err;
  878. ib_umem_release(obj->umem);
  879. kfree(obj);
  880. return 0;
  881. }
  882. DECLARE_UVERBS_NAMED_METHOD(
  883. MLX5_IB_METHOD_DEVX_UMEM_REG,
  884. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
  885. MLX5_IB_OBJECT_DEVX_UMEM,
  886. UVERBS_ACCESS_NEW,
  887. UA_MANDATORY),
  888. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
  889. UVERBS_ATTR_TYPE(u64),
  890. UA_MANDATORY),
  891. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
  892. UVERBS_ATTR_TYPE(u64),
  893. UA_MANDATORY),
  894. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
  895. UVERBS_ATTR_TYPE(u32),
  896. UA_MANDATORY),
  897. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
  898. UVERBS_ATTR_TYPE(u32),
  899. UA_MANDATORY));
  900. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  901. MLX5_IB_METHOD_DEVX_UMEM_DEREG,
  902. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
  903. MLX5_IB_OBJECT_DEVX_UMEM,
  904. UVERBS_ACCESS_DESTROY,
  905. UA_MANDATORY));
  906. DECLARE_UVERBS_NAMED_METHOD(
  907. MLX5_IB_METHOD_DEVX_QUERY_EQN,
  908. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
  909. UVERBS_ATTR_TYPE(u32),
  910. UA_MANDATORY),
  911. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  912. UVERBS_ATTR_TYPE(u32),
  913. UA_MANDATORY));
  914. DECLARE_UVERBS_NAMED_METHOD(
  915. MLX5_IB_METHOD_DEVX_QUERY_UAR,
  916. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
  917. UVERBS_ATTR_TYPE(u32),
  918. UA_MANDATORY),
  919. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  920. UVERBS_ATTR_TYPE(u32),
  921. UA_MANDATORY));
  922. DECLARE_UVERBS_NAMED_METHOD(
  923. MLX5_IB_METHOD_DEVX_OTHER,
  924. UVERBS_ATTR_PTR_IN(
  925. MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
  926. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  927. UA_MANDATORY,
  928. UA_ALLOC_AND_COPY),
  929. UVERBS_ATTR_PTR_OUT(
  930. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
  931. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  932. UA_MANDATORY));
  933. DECLARE_UVERBS_NAMED_METHOD(
  934. MLX5_IB_METHOD_DEVX_OBJ_CREATE,
  935. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
  936. MLX5_IB_OBJECT_DEVX_OBJ,
  937. UVERBS_ACCESS_NEW,
  938. UA_MANDATORY),
  939. UVERBS_ATTR_PTR_IN(
  940. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
  941. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  942. UA_MANDATORY,
  943. UA_ALLOC_AND_COPY),
  944. UVERBS_ATTR_PTR_OUT(
  945. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
  946. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  947. UA_MANDATORY));
  948. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  949. MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
  950. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
  951. MLX5_IB_OBJECT_DEVX_OBJ,
  952. UVERBS_ACCESS_DESTROY,
  953. UA_MANDATORY));
  954. DECLARE_UVERBS_NAMED_METHOD(
  955. MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
  956. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
  957. MLX5_IB_OBJECT_DEVX_OBJ,
  958. UVERBS_ACCESS_WRITE,
  959. UA_MANDATORY),
  960. UVERBS_ATTR_PTR_IN(
  961. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
  962. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  963. UA_MANDATORY,
  964. UA_ALLOC_AND_COPY),
  965. UVERBS_ATTR_PTR_OUT(
  966. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  967. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  968. UA_MANDATORY));
  969. DECLARE_UVERBS_NAMED_METHOD(
  970. MLX5_IB_METHOD_DEVX_OBJ_QUERY,
  971. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
  972. MLX5_IB_OBJECT_DEVX_OBJ,
  973. UVERBS_ACCESS_READ,
  974. UA_MANDATORY),
  975. UVERBS_ATTR_PTR_IN(
  976. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
  977. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  978. UA_MANDATORY,
  979. UA_ALLOC_AND_COPY),
  980. UVERBS_ATTR_PTR_OUT(
  981. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
  982. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  983. UA_MANDATORY));
  984. DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
  985. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
  986. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
  987. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
  988. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
  989. UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
  990. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
  991. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
  992. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
  993. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
  994. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
  995. UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
  996. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
  997. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
  998. DECLARE_UVERBS_OBJECT_TREE(devx_objects,
  999. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
  1000. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
  1001. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
  1002. const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
  1003. {
  1004. return &devx_objects;
  1005. }