devx.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #include <rdma/ib_user_verbs.h>
  6. #include <rdma/ib_verbs.h>
  7. #include <rdma/uverbs_types.h>
  8. #include <rdma/uverbs_ioctl.h>
  9. #include <rdma/mlx5_user_ioctl_cmds.h>
  10. #include <rdma/ib_umem.h>
  11. #include <linux/mlx5/driver.h>
  12. #include <linux/mlx5/fs.h>
  13. #include "mlx5_ib.h"
  14. #define UVERBS_MODULE_NAME mlx5_ib
  15. #include <rdma/uverbs_named_ioctl.h>
  16. #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
  17. struct devx_obj {
  18. struct mlx5_core_dev *mdev;
  19. u32 obj_id;
  20. u32 dinlen; /* destroy inbox length */
  21. u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
  22. };
  23. struct devx_umem {
  24. struct mlx5_core_dev *mdev;
  25. struct ib_umem *umem;
  26. u32 page_offset;
  27. int page_shift;
  28. int ncont;
  29. u32 dinlen;
  30. u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
  31. };
  32. struct devx_umem_reg_cmd {
  33. void *in;
  34. u32 inlen;
  35. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  36. };
  37. static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
  38. {
  39. return to_mucontext(ib_uverbs_get_ucontext(file));
  40. }
  41. int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  42. {
  43. u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
  44. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  45. u64 general_obj_types;
  46. void *hdr;
  47. int err;
  48. hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
  49. general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
  50. if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
  51. !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
  52. return -EINVAL;
  53. if (!capable(CAP_NET_RAW))
  54. return -EPERM;
  55. MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  56. MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
  57. err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  58. if (err)
  59. return err;
  60. context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  61. return 0;
  62. }
  63. void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
  64. struct mlx5_ib_ucontext *context)
  65. {
  66. u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
  67. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  68. MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  69. MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
  70. MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
  71. mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  72. }
  73. bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
  74. {
  75. struct devx_obj *devx_obj = obj;
  76. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
  77. switch (opcode) {
  78. case MLX5_CMD_OP_DESTROY_TIR:
  79. *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  80. *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
  81. obj_id);
  82. return true;
  83. case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
  84. *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
  85. *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
  86. table_id);
  87. return true;
  88. default:
  89. return false;
  90. }
  91. }
  92. static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
  93. {
  94. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  95. u32 obj_id;
  96. switch (opcode) {
  97. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  98. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  99. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  100. break;
  101. case MLX5_CMD_OP_QUERY_MKEY:
  102. obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
  103. break;
  104. case MLX5_CMD_OP_QUERY_CQ:
  105. obj_id = MLX5_GET(query_cq_in, in, cqn);
  106. break;
  107. case MLX5_CMD_OP_MODIFY_CQ:
  108. obj_id = MLX5_GET(modify_cq_in, in, cqn);
  109. break;
  110. case MLX5_CMD_OP_QUERY_SQ:
  111. obj_id = MLX5_GET(query_sq_in, in, sqn);
  112. break;
  113. case MLX5_CMD_OP_MODIFY_SQ:
  114. obj_id = MLX5_GET(modify_sq_in, in, sqn);
  115. break;
  116. case MLX5_CMD_OP_QUERY_RQ:
  117. obj_id = MLX5_GET(query_rq_in, in, rqn);
  118. break;
  119. case MLX5_CMD_OP_MODIFY_RQ:
  120. obj_id = MLX5_GET(modify_rq_in, in, rqn);
  121. break;
  122. case MLX5_CMD_OP_QUERY_RMP:
  123. obj_id = MLX5_GET(query_rmp_in, in, rmpn);
  124. break;
  125. case MLX5_CMD_OP_MODIFY_RMP:
  126. obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
  127. break;
  128. case MLX5_CMD_OP_QUERY_RQT:
  129. obj_id = MLX5_GET(query_rqt_in, in, rqtn);
  130. break;
  131. case MLX5_CMD_OP_MODIFY_RQT:
  132. obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
  133. break;
  134. case MLX5_CMD_OP_QUERY_TIR:
  135. obj_id = MLX5_GET(query_tir_in, in, tirn);
  136. break;
  137. case MLX5_CMD_OP_MODIFY_TIR:
  138. obj_id = MLX5_GET(modify_tir_in, in, tirn);
  139. break;
  140. case MLX5_CMD_OP_QUERY_TIS:
  141. obj_id = MLX5_GET(query_tis_in, in, tisn);
  142. break;
  143. case MLX5_CMD_OP_MODIFY_TIS:
  144. obj_id = MLX5_GET(modify_tis_in, in, tisn);
  145. break;
  146. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  147. obj_id = MLX5_GET(query_flow_table_in, in, table_id);
  148. break;
  149. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  150. obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
  151. break;
  152. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  153. obj_id = MLX5_GET(query_flow_group_in, in, group_id);
  154. break;
  155. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  156. obj_id = MLX5_GET(query_fte_in, in, flow_index);
  157. break;
  158. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  159. obj_id = MLX5_GET(set_fte_in, in, flow_index);
  160. break;
  161. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  162. obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
  163. break;
  164. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  165. obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
  166. break;
  167. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  168. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  169. break;
  170. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  171. obj_id = MLX5_GET(query_scheduling_element_in, in,
  172. scheduling_element_id);
  173. break;
  174. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  175. obj_id = MLX5_GET(modify_scheduling_element_in, in,
  176. scheduling_element_id);
  177. break;
  178. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  179. obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  180. break;
  181. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  182. obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
  183. break;
  184. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  185. obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  186. break;
  187. case MLX5_CMD_OP_QUERY_QP:
  188. obj_id = MLX5_GET(query_qp_in, in, qpn);
  189. break;
  190. case MLX5_CMD_OP_RST2INIT_QP:
  191. obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
  192. break;
  193. case MLX5_CMD_OP_INIT2RTR_QP:
  194. obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
  195. break;
  196. case MLX5_CMD_OP_RTR2RTS_QP:
  197. obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
  198. break;
  199. case MLX5_CMD_OP_RTS2RTS_QP:
  200. obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
  201. break;
  202. case MLX5_CMD_OP_SQERR2RTS_QP:
  203. obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
  204. break;
  205. case MLX5_CMD_OP_2ERR_QP:
  206. obj_id = MLX5_GET(qp_2err_in, in, qpn);
  207. break;
  208. case MLX5_CMD_OP_2RST_QP:
  209. obj_id = MLX5_GET(qp_2rst_in, in, qpn);
  210. break;
  211. case MLX5_CMD_OP_QUERY_DCT:
  212. obj_id = MLX5_GET(query_dct_in, in, dctn);
  213. break;
  214. case MLX5_CMD_OP_QUERY_XRQ:
  215. obj_id = MLX5_GET(query_xrq_in, in, xrqn);
  216. break;
  217. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  218. obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
  219. break;
  220. case MLX5_CMD_OP_ARM_XRC_SRQ:
  221. obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
  222. break;
  223. case MLX5_CMD_OP_QUERY_SRQ:
  224. obj_id = MLX5_GET(query_srq_in, in, srqn);
  225. break;
  226. case MLX5_CMD_OP_ARM_RQ:
  227. obj_id = MLX5_GET(arm_rq_in, in, srq_number);
  228. break;
  229. case MLX5_CMD_OP_DRAIN_DCT:
  230. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  231. obj_id = MLX5_GET(drain_dct_in, in, dctn);
  232. break;
  233. case MLX5_CMD_OP_ARM_XRQ:
  234. obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
  235. break;
  236. default:
  237. return false;
  238. }
  239. if (obj_id == obj->obj_id)
  240. return true;
  241. return false;
  242. }
  243. static bool devx_is_obj_create_cmd(const void *in)
  244. {
  245. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  246. switch (opcode) {
  247. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  248. case MLX5_CMD_OP_CREATE_MKEY:
  249. case MLX5_CMD_OP_CREATE_CQ:
  250. case MLX5_CMD_OP_ALLOC_PD:
  251. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  252. case MLX5_CMD_OP_CREATE_RMP:
  253. case MLX5_CMD_OP_CREATE_SQ:
  254. case MLX5_CMD_OP_CREATE_RQ:
  255. case MLX5_CMD_OP_CREATE_RQT:
  256. case MLX5_CMD_OP_CREATE_TIR:
  257. case MLX5_CMD_OP_CREATE_TIS:
  258. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  259. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  260. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  261. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  262. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  263. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  264. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  265. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  266. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  267. case MLX5_CMD_OP_CREATE_QP:
  268. case MLX5_CMD_OP_CREATE_SRQ:
  269. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  270. case MLX5_CMD_OP_CREATE_DCT:
  271. case MLX5_CMD_OP_CREATE_XRQ:
  272. case MLX5_CMD_OP_ATTACH_TO_MCG:
  273. case MLX5_CMD_OP_ALLOC_XRCD:
  274. return true;
  275. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  276. {
  277. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  278. if (op_mod == 0)
  279. return true;
  280. return false;
  281. }
  282. default:
  283. return false;
  284. }
  285. }
  286. static bool devx_is_obj_modify_cmd(const void *in)
  287. {
  288. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  289. switch (opcode) {
  290. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  291. case MLX5_CMD_OP_MODIFY_CQ:
  292. case MLX5_CMD_OP_MODIFY_RMP:
  293. case MLX5_CMD_OP_MODIFY_SQ:
  294. case MLX5_CMD_OP_MODIFY_RQ:
  295. case MLX5_CMD_OP_MODIFY_RQT:
  296. case MLX5_CMD_OP_MODIFY_TIR:
  297. case MLX5_CMD_OP_MODIFY_TIS:
  298. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  299. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  300. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  301. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  302. case MLX5_CMD_OP_RST2INIT_QP:
  303. case MLX5_CMD_OP_INIT2RTR_QP:
  304. case MLX5_CMD_OP_RTR2RTS_QP:
  305. case MLX5_CMD_OP_RTS2RTS_QP:
  306. case MLX5_CMD_OP_SQERR2RTS_QP:
  307. case MLX5_CMD_OP_2ERR_QP:
  308. case MLX5_CMD_OP_2RST_QP:
  309. case MLX5_CMD_OP_ARM_XRC_SRQ:
  310. case MLX5_CMD_OP_ARM_RQ:
  311. case MLX5_CMD_OP_DRAIN_DCT:
  312. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  313. case MLX5_CMD_OP_ARM_XRQ:
  314. return true;
  315. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  316. {
  317. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  318. if (op_mod == 1)
  319. return true;
  320. return false;
  321. }
  322. default:
  323. return false;
  324. }
  325. }
  326. static bool devx_is_obj_query_cmd(const void *in)
  327. {
  328. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  329. switch (opcode) {
  330. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  331. case MLX5_CMD_OP_QUERY_MKEY:
  332. case MLX5_CMD_OP_QUERY_CQ:
  333. case MLX5_CMD_OP_QUERY_RMP:
  334. case MLX5_CMD_OP_QUERY_SQ:
  335. case MLX5_CMD_OP_QUERY_RQ:
  336. case MLX5_CMD_OP_QUERY_RQT:
  337. case MLX5_CMD_OP_QUERY_TIR:
  338. case MLX5_CMD_OP_QUERY_TIS:
  339. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  340. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  341. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  342. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  343. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  344. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  345. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  346. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  347. case MLX5_CMD_OP_QUERY_QP:
  348. case MLX5_CMD_OP_QUERY_SRQ:
  349. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  350. case MLX5_CMD_OP_QUERY_DCT:
  351. case MLX5_CMD_OP_QUERY_XRQ:
  352. return true;
  353. default:
  354. return false;
  355. }
  356. }
  357. static bool devx_is_general_cmd(void *in)
  358. {
  359. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  360. switch (opcode) {
  361. case MLX5_CMD_OP_QUERY_HCA_CAP:
  362. case MLX5_CMD_OP_QUERY_VPORT_STATE:
  363. case MLX5_CMD_OP_QUERY_ADAPTER:
  364. case MLX5_CMD_OP_QUERY_ISSI:
  365. case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
  366. case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
  367. case MLX5_CMD_OP_QUERY_VNIC_ENV:
  368. case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
  369. case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
  370. case MLX5_CMD_OP_NOP:
  371. case MLX5_CMD_OP_QUERY_CONG_STATUS:
  372. case MLX5_CMD_OP_QUERY_CONG_PARAMS:
  373. case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
  374. return true;
  375. default:
  376. return false;
  377. }
  378. }
  379. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(struct ib_device *ib_dev,
  380. struct ib_uverbs_file *file,
  381. struct uverbs_attr_bundle *attrs)
  382. {
  383. struct mlx5_ib_dev *dev = to_mdev(ib_dev);
  384. int user_vector;
  385. int dev_eqn;
  386. unsigned int irqn;
  387. int err;
  388. if (uverbs_copy_from(&user_vector, attrs,
  389. MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
  390. return -EFAULT;
  391. err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
  392. if (err < 0)
  393. return err;
  394. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  395. &dev_eqn, sizeof(dev_eqn)))
  396. return -EFAULT;
  397. return 0;
  398. }
  399. /*
  400. *Security note:
  401. * The hardware protection mechanism works like this: Each device object that
  402. * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
  403. * the device specification manual) upon its creation. Then upon doorbell,
  404. * hardware fetches the object context for which the doorbell was rang, and
  405. * validates that the UAR through which the DB was rang matches the UAR ID
  406. * of the object.
  407. * If no match the doorbell is silently ignored by the hardware. Of course,
  408. * the user cannot ring a doorbell on a UAR that was not mapped to it.
  409. * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
  410. * mailboxes (except tagging them with UID), we expose to the user its UAR
  411. * ID, so it can embed it in these objects in the expected specification
  412. * format. So the only thing the user can do is hurt itself by creating a
  413. * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
  414. * may ring a doorbell on its objects.
  415. * The consequence of that will be that another user can schedule a QP/SQ
  416. * of the buggy user for execution (just insert it to the hardware schedule
  417. * queue or arm its CQ for event generation), no further harm is expected.
  418. */
  419. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(struct ib_device *ib_dev,
  420. struct ib_uverbs_file *file,
  421. struct uverbs_attr_bundle *attrs)
  422. {
  423. struct mlx5_ib_ucontext *c;
  424. struct mlx5_ib_dev *dev;
  425. u32 user_idx;
  426. s32 dev_idx;
  427. c = devx_ufile2uctx(file);
  428. if (IS_ERR(c))
  429. return PTR_ERR(c);
  430. dev = to_mdev(c->ibucontext.device);
  431. if (uverbs_copy_from(&user_idx, attrs,
  432. MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
  433. return -EFAULT;
  434. dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
  435. if (dev_idx < 0)
  436. return dev_idx;
  437. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  438. &dev_idx, sizeof(dev_idx)))
  439. return -EFAULT;
  440. return 0;
  441. }
  442. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(struct ib_device *ib_dev,
  443. struct ib_uverbs_file *file,
  444. struct uverbs_attr_bundle *attrs)
  445. {
  446. struct mlx5_ib_ucontext *c;
  447. struct mlx5_ib_dev *dev;
  448. void *cmd_in = uverbs_attr_get_alloced_ptr(
  449. attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
  450. int cmd_out_len = uverbs_attr_get_len(attrs,
  451. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
  452. void *cmd_out;
  453. int err;
  454. c = devx_ufile2uctx(file);
  455. if (IS_ERR(c))
  456. return PTR_ERR(c);
  457. dev = to_mdev(c->ibucontext.device);
  458. if (!c->devx_uid)
  459. return -EPERM;
  460. /* Only white list of some general HCA commands are allowed for this method. */
  461. if (!devx_is_general_cmd(cmd_in))
  462. return -EINVAL;
  463. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  464. if (!cmd_out)
  465. return -ENOMEM;
  466. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  467. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  468. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
  469. cmd_out, cmd_out_len);
  470. if (err)
  471. goto other_cmd_free;
  472. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len);
  473. other_cmd_free:
  474. kvfree(cmd_out);
  475. return err;
  476. }
  477. static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
  478. u32 *dinlen,
  479. u32 *obj_id)
  480. {
  481. u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
  482. u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
  483. *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  484. *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
  485. MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
  486. MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
  487. switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
  488. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  489. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  490. MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
  491. break;
  492. case MLX5_CMD_OP_CREATE_MKEY:
  493. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
  494. break;
  495. case MLX5_CMD_OP_CREATE_CQ:
  496. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
  497. break;
  498. case MLX5_CMD_OP_ALLOC_PD:
  499. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
  500. break;
  501. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  502. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  503. MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
  504. break;
  505. case MLX5_CMD_OP_CREATE_RMP:
  506. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
  507. break;
  508. case MLX5_CMD_OP_CREATE_SQ:
  509. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
  510. break;
  511. case MLX5_CMD_OP_CREATE_RQ:
  512. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
  513. break;
  514. case MLX5_CMD_OP_CREATE_RQT:
  515. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
  516. break;
  517. case MLX5_CMD_OP_CREATE_TIR:
  518. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
  519. break;
  520. case MLX5_CMD_OP_CREATE_TIS:
  521. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
  522. break;
  523. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  524. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  525. MLX5_CMD_OP_DEALLOC_Q_COUNTER);
  526. break;
  527. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  528. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
  529. *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
  530. MLX5_SET(destroy_flow_table_in, din, other_vport,
  531. MLX5_GET(create_flow_table_in, in, other_vport));
  532. MLX5_SET(destroy_flow_table_in, din, vport_number,
  533. MLX5_GET(create_flow_table_in, in, vport_number));
  534. MLX5_SET(destroy_flow_table_in, din, table_type,
  535. MLX5_GET(create_flow_table_in, in, table_type));
  536. MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
  537. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  538. MLX5_CMD_OP_DESTROY_FLOW_TABLE);
  539. break;
  540. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  541. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
  542. *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
  543. MLX5_SET(destroy_flow_group_in, din, other_vport,
  544. MLX5_GET(create_flow_group_in, in, other_vport));
  545. MLX5_SET(destroy_flow_group_in, din, vport_number,
  546. MLX5_GET(create_flow_group_in, in, vport_number));
  547. MLX5_SET(destroy_flow_group_in, din, table_type,
  548. MLX5_GET(create_flow_group_in, in, table_type));
  549. MLX5_SET(destroy_flow_group_in, din, table_id,
  550. MLX5_GET(create_flow_group_in, in, table_id));
  551. MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
  552. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  553. MLX5_CMD_OP_DESTROY_FLOW_GROUP);
  554. break;
  555. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  556. *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
  557. *obj_id = MLX5_GET(set_fte_in, in, flow_index);
  558. MLX5_SET(delete_fte_in, din, other_vport,
  559. MLX5_GET(set_fte_in, in, other_vport));
  560. MLX5_SET(delete_fte_in, din, vport_number,
  561. MLX5_GET(set_fte_in, in, vport_number));
  562. MLX5_SET(delete_fte_in, din, table_type,
  563. MLX5_GET(set_fte_in, in, table_type));
  564. MLX5_SET(delete_fte_in, din, table_id,
  565. MLX5_GET(set_fte_in, in, table_id));
  566. MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
  567. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  568. MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
  569. break;
  570. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  571. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  572. MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
  573. break;
  574. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  575. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  576. MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
  577. break;
  578. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  579. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  580. MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
  581. break;
  582. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  583. *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
  584. *obj_id = MLX5_GET(create_scheduling_element_out, out,
  585. scheduling_element_id);
  586. MLX5_SET(destroy_scheduling_element_in, din,
  587. scheduling_hierarchy,
  588. MLX5_GET(create_scheduling_element_in, in,
  589. scheduling_hierarchy));
  590. MLX5_SET(destroy_scheduling_element_in, din,
  591. scheduling_element_id, *obj_id);
  592. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  593. MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
  594. break;
  595. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  596. *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
  597. *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  598. MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
  599. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  600. MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
  601. break;
  602. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  603. *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
  604. *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  605. MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
  606. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  607. MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
  608. break;
  609. case MLX5_CMD_OP_CREATE_QP:
  610. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
  611. break;
  612. case MLX5_CMD_OP_CREATE_SRQ:
  613. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
  614. break;
  615. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  616. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  617. MLX5_CMD_OP_DESTROY_XRC_SRQ);
  618. break;
  619. case MLX5_CMD_OP_CREATE_DCT:
  620. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
  621. break;
  622. case MLX5_CMD_OP_CREATE_XRQ:
  623. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
  624. break;
  625. case MLX5_CMD_OP_ATTACH_TO_MCG:
  626. *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
  627. MLX5_SET(detach_from_mcg_in, din, qpn,
  628. MLX5_GET(attach_to_mcg_in, in, qpn));
  629. memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
  630. MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
  631. MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
  632. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
  633. break;
  634. case MLX5_CMD_OP_ALLOC_XRCD:
  635. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
  636. break;
  637. default:
  638. /* The entry must match to one of the devx_is_obj_create_cmd */
  639. WARN_ON(true);
  640. break;
  641. }
  642. }
  643. static int devx_obj_cleanup(struct ib_uobject *uobject,
  644. enum rdma_remove_reason why)
  645. {
  646. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  647. struct devx_obj *obj = uobject->object;
  648. int ret;
  649. ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  650. if (ib_is_destroy_retryable(ret, why, uobject))
  651. return ret;
  652. kfree(obj);
  653. return ret;
  654. }
  655. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(struct ib_device *ib_dev,
  656. struct ib_uverbs_file *file,
  657. struct uverbs_attr_bundle *attrs)
  658. {
  659. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
  660. int cmd_out_len = uverbs_attr_get_len(attrs,
  661. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
  662. void *cmd_out;
  663. struct ib_uobject *uobj = uverbs_attr_get_uobject(
  664. attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
  665. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  666. struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
  667. struct devx_obj *obj;
  668. int err;
  669. if (!c->devx_uid)
  670. return -EPERM;
  671. if (!devx_is_obj_create_cmd(cmd_in))
  672. return -EINVAL;
  673. obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
  674. if (!obj)
  675. return -ENOMEM;
  676. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  677. if (!cmd_out) {
  678. err = -ENOMEM;
  679. goto obj_free;
  680. }
  681. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  682. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  683. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
  684. cmd_out, cmd_out_len);
  685. if (err)
  686. goto cmd_free;
  687. uobj->object = obj;
  688. obj->mdev = dev->mdev;
  689. devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
  690. WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
  691. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
  692. if (err)
  693. goto cmd_free;
  694. kvfree(cmd_out);
  695. return 0;
  696. cmd_free:
  697. kvfree(cmd_out);
  698. obj_free:
  699. kfree(obj);
  700. return err;
  701. }
  702. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(struct ib_device *ib_dev,
  703. struct ib_uverbs_file *file,
  704. struct uverbs_attr_bundle *attrs)
  705. {
  706. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
  707. int cmd_out_len = uverbs_attr_get_len(attrs,
  708. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
  709. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  710. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
  711. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  712. struct devx_obj *obj = uobj->object;
  713. void *cmd_out;
  714. int err;
  715. if (!c->devx_uid)
  716. return -EPERM;
  717. if (!devx_is_obj_modify_cmd(cmd_in))
  718. return -EINVAL;
  719. if (!devx_is_valid_obj_id(obj, cmd_in))
  720. return -EINVAL;
  721. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  722. if (!cmd_out)
  723. return -ENOMEM;
  724. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  725. err = mlx5_cmd_exec(obj->mdev, cmd_in,
  726. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
  727. cmd_out, cmd_out_len);
  728. if (err)
  729. goto other_cmd_free;
  730. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  731. cmd_out, cmd_out_len);
  732. other_cmd_free:
  733. kvfree(cmd_out);
  734. return err;
  735. }
  736. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(struct ib_device *ib_dev,
  737. struct ib_uverbs_file *file,
  738. struct uverbs_attr_bundle *attrs)
  739. {
  740. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
  741. int cmd_out_len = uverbs_attr_get_len(attrs,
  742. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
  743. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  744. MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
  745. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  746. struct devx_obj *obj = uobj->object;
  747. void *cmd_out;
  748. int err;
  749. if (!c->devx_uid)
  750. return -EPERM;
  751. if (!devx_is_obj_query_cmd(cmd_in))
  752. return -EINVAL;
  753. if (!devx_is_valid_obj_id(obj, cmd_in))
  754. return -EINVAL;
  755. cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
  756. if (!cmd_out)
  757. return -ENOMEM;
  758. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  759. err = mlx5_cmd_exec(obj->mdev, cmd_in,
  760. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
  761. cmd_out, cmd_out_len);
  762. if (err)
  763. goto other_cmd_free;
  764. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, cmd_out, cmd_out_len);
  765. other_cmd_free:
  766. kvfree(cmd_out);
  767. return err;
  768. }
  769. static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
  770. struct uverbs_attr_bundle *attrs,
  771. struct devx_umem *obj)
  772. {
  773. u64 addr;
  774. size_t size;
  775. int access;
  776. int npages;
  777. int err;
  778. u32 page_mask;
  779. if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
  780. uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN) ||
  781. uverbs_copy_from(&access, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS))
  782. return -EFAULT;
  783. err = ib_check_mr_access(access);
  784. if (err)
  785. return err;
  786. obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
  787. if (IS_ERR(obj->umem))
  788. return PTR_ERR(obj->umem);
  789. mlx5_ib_cont_pages(obj->umem, obj->umem->address,
  790. MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
  791. &obj->page_shift, &obj->ncont, NULL);
  792. if (!npages) {
  793. ib_umem_release(obj->umem);
  794. return -EINVAL;
  795. }
  796. page_mask = (1 << obj->page_shift) - 1;
  797. obj->page_offset = obj->umem->address & page_mask;
  798. return 0;
  799. }
  800. static int devx_umem_reg_cmd_alloc(struct devx_umem *obj,
  801. struct devx_umem_reg_cmd *cmd)
  802. {
  803. cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
  804. (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
  805. cmd->in = kvzalloc(cmd->inlen, GFP_KERNEL);
  806. return cmd->in ? 0 : -ENOMEM;
  807. }
  808. static void devx_umem_reg_cmd_free(struct devx_umem_reg_cmd *cmd)
  809. {
  810. kvfree(cmd->in);
  811. }
  812. static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
  813. struct devx_umem *obj,
  814. struct devx_umem_reg_cmd *cmd)
  815. {
  816. void *umem;
  817. __be64 *mtt;
  818. umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
  819. mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
  820. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  821. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
  822. MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
  823. MLX5_SET(umem, umem, log_page_size, obj->page_shift -
  824. MLX5_ADAPTER_PAGE_SHIFT);
  825. MLX5_SET(umem, umem, page_offset, obj->page_offset);
  826. mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
  827. (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
  828. MLX5_IB_MTT_READ);
  829. }
  830. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(struct ib_device *ib_dev,
  831. struct ib_uverbs_file *file,
  832. struct uverbs_attr_bundle *attrs)
  833. {
  834. struct devx_umem_reg_cmd cmd;
  835. struct devx_umem *obj;
  836. struct ib_uobject *uobj = uverbs_attr_get_uobject(
  837. attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
  838. u32 obj_id;
  839. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  840. struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
  841. int err;
  842. if (!c->devx_uid)
  843. return -EPERM;
  844. obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
  845. if (!obj)
  846. return -ENOMEM;
  847. err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
  848. if (err)
  849. goto err_obj_free;
  850. err = devx_umem_reg_cmd_alloc(obj, &cmd);
  851. if (err)
  852. goto err_umem_release;
  853. devx_umem_reg_cmd_build(dev, obj, &cmd);
  854. MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
  855. err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
  856. sizeof(cmd.out));
  857. if (err)
  858. goto err_umem_reg_cmd_free;
  859. obj->mdev = dev->mdev;
  860. uobj->object = obj;
  861. devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
  862. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
  863. if (err)
  864. goto err_umem_destroy;
  865. devx_umem_reg_cmd_free(&cmd);
  866. return 0;
  867. err_umem_destroy:
  868. mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
  869. err_umem_reg_cmd_free:
  870. devx_umem_reg_cmd_free(&cmd);
  871. err_umem_release:
  872. ib_umem_release(obj->umem);
  873. err_obj_free:
  874. kfree(obj);
  875. return err;
  876. }
  877. static int devx_umem_cleanup(struct ib_uobject *uobject,
  878. enum rdma_remove_reason why)
  879. {
  880. struct devx_umem *obj = uobject->object;
  881. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  882. int err;
  883. err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  884. if (ib_is_destroy_retryable(err, why, uobject))
  885. return err;
  886. ib_umem_release(obj->umem);
  887. kfree(obj);
  888. return 0;
  889. }
  890. DECLARE_UVERBS_NAMED_METHOD(
  891. MLX5_IB_METHOD_DEVX_UMEM_REG,
  892. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
  893. MLX5_IB_OBJECT_DEVX_UMEM,
  894. UVERBS_ACCESS_NEW,
  895. UA_MANDATORY),
  896. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
  897. UVERBS_ATTR_TYPE(u64),
  898. UA_MANDATORY),
  899. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
  900. UVERBS_ATTR_TYPE(u64),
  901. UA_MANDATORY),
  902. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
  903. UVERBS_ATTR_TYPE(u32),
  904. UA_MANDATORY),
  905. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
  906. UVERBS_ATTR_TYPE(u32),
  907. UA_MANDATORY));
  908. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  909. MLX5_IB_METHOD_DEVX_UMEM_DEREG,
  910. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
  911. MLX5_IB_OBJECT_DEVX_UMEM,
  912. UVERBS_ACCESS_DESTROY,
  913. UA_MANDATORY));
  914. DECLARE_UVERBS_NAMED_METHOD(
  915. MLX5_IB_METHOD_DEVX_QUERY_EQN,
  916. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
  917. UVERBS_ATTR_TYPE(u32),
  918. UA_MANDATORY),
  919. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  920. UVERBS_ATTR_TYPE(u32),
  921. UA_MANDATORY));
  922. DECLARE_UVERBS_NAMED_METHOD(
  923. MLX5_IB_METHOD_DEVX_QUERY_UAR,
  924. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
  925. UVERBS_ATTR_TYPE(u32),
  926. UA_MANDATORY),
  927. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  928. UVERBS_ATTR_TYPE(u32),
  929. UA_MANDATORY));
  930. DECLARE_UVERBS_NAMED_METHOD(
  931. MLX5_IB_METHOD_DEVX_OTHER,
  932. UVERBS_ATTR_PTR_IN(
  933. MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
  934. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  935. UA_MANDATORY,
  936. UA_ALLOC_AND_COPY),
  937. UVERBS_ATTR_PTR_OUT(
  938. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
  939. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  940. UA_MANDATORY));
  941. DECLARE_UVERBS_NAMED_METHOD(
  942. MLX5_IB_METHOD_DEVX_OBJ_CREATE,
  943. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
  944. MLX5_IB_OBJECT_DEVX_OBJ,
  945. UVERBS_ACCESS_NEW,
  946. UA_MANDATORY),
  947. UVERBS_ATTR_PTR_IN(
  948. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
  949. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  950. UA_MANDATORY,
  951. UA_ALLOC_AND_COPY),
  952. UVERBS_ATTR_PTR_OUT(
  953. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
  954. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  955. UA_MANDATORY));
  956. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  957. MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
  958. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
  959. MLX5_IB_OBJECT_DEVX_OBJ,
  960. UVERBS_ACCESS_DESTROY,
  961. UA_MANDATORY));
  962. DECLARE_UVERBS_NAMED_METHOD(
  963. MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
  964. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
  965. MLX5_IB_OBJECT_DEVX_OBJ,
  966. UVERBS_ACCESS_WRITE,
  967. UA_MANDATORY),
  968. UVERBS_ATTR_PTR_IN(
  969. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
  970. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  971. UA_MANDATORY,
  972. UA_ALLOC_AND_COPY),
  973. UVERBS_ATTR_PTR_OUT(
  974. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  975. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  976. UA_MANDATORY));
  977. DECLARE_UVERBS_NAMED_METHOD(
  978. MLX5_IB_METHOD_DEVX_OBJ_QUERY,
  979. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
  980. MLX5_IB_OBJECT_DEVX_OBJ,
  981. UVERBS_ACCESS_READ,
  982. UA_MANDATORY),
  983. UVERBS_ATTR_PTR_IN(
  984. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
  985. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  986. UA_MANDATORY,
  987. UA_ALLOC_AND_COPY),
  988. UVERBS_ATTR_PTR_OUT(
  989. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
  990. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  991. UA_MANDATORY));
  992. DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
  993. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
  994. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
  995. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
  996. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
  997. UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
  998. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
  999. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
  1000. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
  1001. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
  1002. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
  1003. UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
  1004. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
  1005. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
  1006. DECLARE_UVERBS_OBJECT_TREE(devx_objects,
  1007. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
  1008. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
  1009. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
  1010. const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
  1011. {
  1012. return &devx_objects;
  1013. }