mlx5_ib.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef MLX5_IB_H
  33. #define MLX5_IB_H
  34. #include <linux/kernel.h>
  35. #include <linux/sched.h>
  36. #include <rdma/ib_verbs.h>
  37. #include <rdma/ib_smi.h>
  38. #include <linux/mlx5/driver.h>
  39. #include <linux/mlx5/cq.h>
  40. #include <linux/mlx5/fs.h>
  41. #include <linux/mlx5/qp.h>
  42. #include <linux/mlx5/srq.h>
  43. #include <linux/mlx5/fs.h>
  44. #include <linux/types.h>
  45. #include <linux/mlx5/transobj.h>
  46. #include <rdma/ib_user_verbs.h>
  47. #include <rdma/mlx5-abi.h>
  48. #include <rdma/uverbs_ioctl.h>
  49. #include <rdma/mlx5_user_ioctl_cmds.h>
  50. #define mlx5_ib_dbg(_dev, format, arg...) \
  51. dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
  52. __LINE__, current->pid, ##arg)
  53. #define mlx5_ib_err(_dev, format, arg...) \
  54. dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
  55. __LINE__, current->pid, ##arg)
  56. #define mlx5_ib_warn(_dev, format, arg...) \
  57. dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
  58. __LINE__, current->pid, ##arg)
  59. #define field_avail(type, fld, sz) (offsetof(type, fld) + \
  60. sizeof(((type *)0)->fld) <= (sz))
  61. #define MLX5_IB_DEFAULT_UIDX 0xffffff
  62. #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
  63. #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
  64. enum {
  65. MLX5_IB_MMAP_CMD_SHIFT = 8,
  66. MLX5_IB_MMAP_CMD_MASK = 0xff,
  67. };
  68. enum {
  69. MLX5_RES_SCAT_DATA32_CQE = 0x1,
  70. MLX5_RES_SCAT_DATA64_CQE = 0x2,
  71. MLX5_REQ_SCAT_DATA32_CQE = 0x11,
  72. MLX5_REQ_SCAT_DATA64_CQE = 0x22,
  73. };
  74. enum mlx5_ib_mad_ifc_flags {
  75. MLX5_MAD_IFC_IGNORE_MKEY = 1,
  76. MLX5_MAD_IFC_IGNORE_BKEY = 2,
  77. MLX5_MAD_IFC_NET_VIEW = 4,
  78. };
  79. enum {
  80. MLX5_CROSS_CHANNEL_BFREG = 0,
  81. };
  82. enum {
  83. MLX5_CQE_VERSION_V0,
  84. MLX5_CQE_VERSION_V1,
  85. };
  86. enum {
  87. MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
  88. MLX5_TM_MAX_SGE = 1,
  89. };
  90. enum {
  91. MLX5_IB_INVALID_UAR_INDEX = BIT(31),
  92. MLX5_IB_INVALID_BFREG = BIT(31),
  93. };
  94. enum {
  95. MLX5_MAX_MEMIC_PAGES = 0x100,
  96. MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
  97. };
  98. enum {
  99. MLX5_MEMIC_BASE_ALIGN = 6,
  100. MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
  101. };
  102. struct mlx5_ib_ucontext {
  103. struct ib_ucontext ibucontext;
  104. struct list_head db_page_list;
  105. /* protect doorbell record alloc/free
  106. */
  107. struct mutex db_page_mutex;
  108. struct mlx5_bfreg_info bfregi;
  109. u8 cqe_version;
  110. /* Transport Domain number */
  111. u32 tdn;
  112. u64 lib_caps;
  113. DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
  114. u16 devx_uid;
  115. /* For RoCE LAG TX affinity */
  116. atomic_t tx_port_affinity;
  117. };
  118. static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
  119. {
  120. return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
  121. }
  122. struct mlx5_ib_pd {
  123. struct ib_pd ibpd;
  124. u32 pdn;
  125. u16 uid;
  126. };
  127. enum {
  128. MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
  129. MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
  130. MLX5_IB_FLOW_ACTION_DECAP,
  131. };
  132. #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
  133. #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
  134. #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
  135. #error "Invalid number of bypass priorities"
  136. #endif
  137. #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
  138. #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
  139. #define MLX5_IB_NUM_SNIFFER_FTS 2
  140. #define MLX5_IB_NUM_EGRESS_FTS 1
  141. struct mlx5_ib_flow_prio {
  142. struct mlx5_flow_table *flow_table;
  143. unsigned int refcount;
  144. };
  145. struct mlx5_ib_flow_handler {
  146. struct list_head list;
  147. struct ib_flow ibflow;
  148. struct mlx5_ib_flow_prio *prio;
  149. struct mlx5_flow_handle *rule;
  150. struct ib_counters *ibcounters;
  151. struct mlx5_ib_dev *dev;
  152. struct mlx5_ib_flow_matcher *flow_matcher;
  153. };
  154. struct mlx5_ib_flow_matcher {
  155. struct mlx5_ib_match_params matcher_mask;
  156. int mask_len;
  157. enum mlx5_ib_flow_type flow_type;
  158. enum mlx5_flow_namespace_type ns_type;
  159. u16 priority;
  160. struct mlx5_core_dev *mdev;
  161. atomic_t usecnt;
  162. u8 match_criteria_enable;
  163. };
  164. struct mlx5_ib_flow_db {
  165. struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
  166. struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
  167. struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
  168. struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
  169. struct mlx5_flow_table *lag_demux_ft;
  170. /* Protect flow steering bypass flow tables
  171. * when add/del flow rules.
  172. * only single add/removal of flow steering rule could be done
  173. * simultaneously.
  174. */
  175. struct mutex lock;
  176. };
  177. /* Use macros here so that don't have to duplicate
  178. * enum ib_send_flags and enum ib_qp_type for low-level driver
  179. */
  180. #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
  181. #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
  182. #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
  183. #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
  184. #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
  185. #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
  186. #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
  187. /*
  188. * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
  189. * creates the actual hardware QP.
  190. */
  191. #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
  192. #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
  193. #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
  194. #define MLX5_IB_WR_UMR IB_WR_RESERVED1
  195. #define MLX5_IB_UMR_OCTOWORD 16
  196. #define MLX5_IB_UMR_XLT_ALIGNMENT 64
  197. #define MLX5_IB_UPD_XLT_ZAP BIT(0)
  198. #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
  199. #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
  200. #define MLX5_IB_UPD_XLT_ADDR BIT(3)
  201. #define MLX5_IB_UPD_XLT_PD BIT(4)
  202. #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
  203. #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
  204. /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
  205. *
  206. * These flags are intended for internal use by the mlx5_ib driver, and they
  207. * rely on the range reserved for that use in the ib_qp_create_flags enum.
  208. */
  209. /* Create a UD QP whose source QP number is 1 */
  210. static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
  211. {
  212. return IB_QP_CREATE_RESERVED_START;
  213. }
  214. struct wr_list {
  215. u16 opcode;
  216. u16 next;
  217. };
  218. enum mlx5_ib_rq_flags {
  219. MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
  220. MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
  221. };
  222. struct mlx5_ib_wq {
  223. u64 *wrid;
  224. u32 *wr_data;
  225. struct wr_list *w_list;
  226. unsigned *wqe_head;
  227. u16 unsig_count;
  228. /* serialize post to the work queue
  229. */
  230. spinlock_t lock;
  231. int wqe_cnt;
  232. int max_post;
  233. int max_gs;
  234. int offset;
  235. int wqe_shift;
  236. unsigned head;
  237. unsigned tail;
  238. u16 cur_post;
  239. u16 last_poll;
  240. void *qend;
  241. };
  242. enum mlx5_ib_wq_flags {
  243. MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
  244. MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
  245. };
  246. #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
  247. #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
  248. #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
  249. #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
  250. struct mlx5_ib_rwq {
  251. struct ib_wq ibwq;
  252. struct mlx5_core_qp core_qp;
  253. u32 rq_num_pas;
  254. u32 log_rq_stride;
  255. u32 log_rq_size;
  256. u32 rq_page_offset;
  257. u32 log_page_size;
  258. u32 log_num_strides;
  259. u32 two_byte_shift_en;
  260. u32 single_stride_log_num_of_bytes;
  261. struct ib_umem *umem;
  262. size_t buf_size;
  263. unsigned int page_shift;
  264. int create_type;
  265. struct mlx5_db db;
  266. u32 user_index;
  267. u32 wqe_count;
  268. u32 wqe_shift;
  269. int wq_sig;
  270. u32 create_flags; /* Use enum mlx5_ib_wq_flags */
  271. };
  272. enum {
  273. MLX5_QP_USER,
  274. MLX5_QP_KERNEL,
  275. MLX5_QP_EMPTY
  276. };
  277. enum {
  278. MLX5_WQ_USER,
  279. MLX5_WQ_KERNEL
  280. };
  281. struct mlx5_ib_rwq_ind_table {
  282. struct ib_rwq_ind_table ib_rwq_ind_tbl;
  283. u32 rqtn;
  284. u16 uid;
  285. };
  286. struct mlx5_ib_ubuffer {
  287. struct ib_umem *umem;
  288. int buf_size;
  289. u64 buf_addr;
  290. };
  291. struct mlx5_ib_qp_base {
  292. struct mlx5_ib_qp *container_mibqp;
  293. struct mlx5_core_qp mqp;
  294. struct mlx5_ib_ubuffer ubuffer;
  295. };
  296. struct mlx5_ib_qp_trans {
  297. struct mlx5_ib_qp_base base;
  298. u16 xrcdn;
  299. u8 alt_port;
  300. u8 atomic_rd_en;
  301. u8 resp_depth;
  302. };
  303. struct mlx5_ib_rss_qp {
  304. u32 tirn;
  305. };
  306. struct mlx5_ib_rq {
  307. struct mlx5_ib_qp_base base;
  308. struct mlx5_ib_wq *rq;
  309. struct mlx5_ib_ubuffer ubuffer;
  310. struct mlx5_db *doorbell;
  311. u32 tirn;
  312. u8 state;
  313. u32 flags;
  314. };
  315. struct mlx5_ib_sq {
  316. struct mlx5_ib_qp_base base;
  317. struct mlx5_ib_wq *sq;
  318. struct mlx5_ib_ubuffer ubuffer;
  319. struct mlx5_db *doorbell;
  320. struct mlx5_flow_handle *flow_rule;
  321. u32 tisn;
  322. u8 state;
  323. };
  324. struct mlx5_ib_raw_packet_qp {
  325. struct mlx5_ib_sq sq;
  326. struct mlx5_ib_rq rq;
  327. };
  328. struct mlx5_bf {
  329. int buf_size;
  330. unsigned long offset;
  331. struct mlx5_sq_bfreg *bfreg;
  332. };
  333. struct mlx5_ib_dct {
  334. struct mlx5_core_dct mdct;
  335. u32 *in;
  336. };
  337. struct mlx5_ib_qp {
  338. struct ib_qp ibqp;
  339. union {
  340. struct mlx5_ib_qp_trans trans_qp;
  341. struct mlx5_ib_raw_packet_qp raw_packet_qp;
  342. struct mlx5_ib_rss_qp rss_qp;
  343. struct mlx5_ib_dct dct;
  344. };
  345. struct mlx5_frag_buf buf;
  346. struct mlx5_db db;
  347. struct mlx5_ib_wq rq;
  348. u8 sq_signal_bits;
  349. u8 next_fence;
  350. struct mlx5_ib_wq sq;
  351. /* serialize qp state modifications
  352. */
  353. struct mutex mutex;
  354. u32 flags;
  355. u8 port;
  356. u8 state;
  357. int wq_sig;
  358. int scat_cqe;
  359. int max_inline_data;
  360. struct mlx5_bf bf;
  361. int has_rq;
  362. /* only for user space QPs. For kernel
  363. * we have it from the bf object
  364. */
  365. int bfregn;
  366. int create_type;
  367. /* Store signature errors */
  368. bool signature_en;
  369. struct list_head qps_list;
  370. struct list_head cq_recv_list;
  371. struct list_head cq_send_list;
  372. struct mlx5_rate_limit rl;
  373. u32 underlay_qpn;
  374. u32 flags_en;
  375. /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
  376. enum ib_qp_type qp_sub_type;
  377. };
  378. struct mlx5_ib_cq_buf {
  379. struct mlx5_frag_buf_ctrl fbc;
  380. struct mlx5_frag_buf frag_buf;
  381. struct ib_umem *umem;
  382. int cqe_size;
  383. int nent;
  384. };
  385. enum mlx5_ib_qp_flags {
  386. MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
  387. MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
  388. MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
  389. MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
  390. MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
  391. MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
  392. /* QP uses 1 as its source QP number */
  393. MLX5_IB_QP_SQPN_QP1 = 1 << 6,
  394. MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
  395. MLX5_IB_QP_RSS = 1 << 8,
  396. MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
  397. MLX5_IB_QP_UNDERLAY = 1 << 10,
  398. MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
  399. MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
  400. };
  401. struct mlx5_umr_wr {
  402. struct ib_send_wr wr;
  403. u64 virt_addr;
  404. u64 offset;
  405. struct ib_pd *pd;
  406. unsigned int page_shift;
  407. unsigned int xlt_size;
  408. u64 length;
  409. int access_flags;
  410. u32 mkey;
  411. };
  412. static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
  413. {
  414. return container_of(wr, struct mlx5_umr_wr, wr);
  415. }
  416. struct mlx5_shared_mr_info {
  417. int mr_id;
  418. struct ib_umem *umem;
  419. };
  420. enum mlx5_ib_cq_pr_flags {
  421. MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
  422. };
  423. struct mlx5_ib_cq {
  424. struct ib_cq ibcq;
  425. struct mlx5_core_cq mcq;
  426. struct mlx5_ib_cq_buf buf;
  427. struct mlx5_db db;
  428. /* serialize access to the CQ
  429. */
  430. spinlock_t lock;
  431. /* protect resize cq
  432. */
  433. struct mutex resize_mutex;
  434. struct mlx5_ib_cq_buf *resize_buf;
  435. struct ib_umem *resize_umem;
  436. int cqe_size;
  437. struct list_head list_send_qp;
  438. struct list_head list_recv_qp;
  439. u32 create_flags;
  440. struct list_head wc_list;
  441. enum ib_cq_notify_flags notify_flags;
  442. struct work_struct notify_work;
  443. u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
  444. };
  445. struct mlx5_ib_wc {
  446. struct ib_wc wc;
  447. struct list_head list;
  448. };
  449. struct mlx5_ib_srq {
  450. struct ib_srq ibsrq;
  451. struct mlx5_core_srq msrq;
  452. struct mlx5_frag_buf buf;
  453. struct mlx5_db db;
  454. u64 *wrid;
  455. /* protect SRQ hanlding
  456. */
  457. spinlock_t lock;
  458. int head;
  459. int tail;
  460. u16 wqe_ctr;
  461. struct ib_umem *umem;
  462. /* serialize arming a SRQ
  463. */
  464. struct mutex mutex;
  465. int wq_sig;
  466. };
  467. struct mlx5_ib_xrcd {
  468. struct ib_xrcd ibxrcd;
  469. u32 xrcdn;
  470. u16 uid;
  471. };
  472. enum mlx5_ib_mtt_access_flags {
  473. MLX5_IB_MTT_READ = (1 << 0),
  474. MLX5_IB_MTT_WRITE = (1 << 1),
  475. };
  476. struct mlx5_ib_dm {
  477. struct ib_dm ibdm;
  478. phys_addr_t dev_addr;
  479. };
  480. #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
  481. #define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
  482. IB_ACCESS_REMOTE_WRITE |\
  483. IB_ACCESS_REMOTE_READ |\
  484. IB_ACCESS_REMOTE_ATOMIC |\
  485. IB_ZERO_BASED)
  486. struct mlx5_ib_mr {
  487. struct ib_mr ibmr;
  488. void *descs;
  489. dma_addr_t desc_map;
  490. int ndescs;
  491. int max_descs;
  492. int desc_size;
  493. int access_mode;
  494. struct mlx5_core_mkey mmkey;
  495. struct ib_umem *umem;
  496. struct mlx5_shared_mr_info *smr_info;
  497. struct list_head list;
  498. int order;
  499. bool allocated_from_cache;
  500. int npages;
  501. struct mlx5_ib_dev *dev;
  502. u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
  503. struct mlx5_core_sig_ctx *sig;
  504. int live;
  505. void *descs_alloc;
  506. int access_flags; /* Needed for rereg MR */
  507. struct mlx5_ib_mr *parent;
  508. atomic_t num_leaf_free;
  509. wait_queue_head_t q_leaf_free;
  510. };
  511. struct mlx5_ib_mw {
  512. struct ib_mw ibmw;
  513. struct mlx5_core_mkey mmkey;
  514. int ndescs;
  515. };
  516. struct mlx5_ib_umr_context {
  517. struct ib_cqe cqe;
  518. enum ib_wc_status status;
  519. struct completion done;
  520. };
  521. struct umr_common {
  522. struct ib_pd *pd;
  523. struct ib_cq *cq;
  524. struct ib_qp *qp;
  525. /* control access to UMR QP
  526. */
  527. struct semaphore sem;
  528. };
  529. enum {
  530. MLX5_FMR_INVALID,
  531. MLX5_FMR_VALID,
  532. MLX5_FMR_BUSY,
  533. };
  534. struct mlx5_cache_ent {
  535. struct list_head head;
  536. /* sync access to the cahce entry
  537. */
  538. spinlock_t lock;
  539. struct dentry *dir;
  540. char name[4];
  541. u32 order;
  542. u32 xlt;
  543. u32 access_mode;
  544. u32 page;
  545. u32 size;
  546. u32 cur;
  547. u32 miss;
  548. u32 limit;
  549. struct dentry *fsize;
  550. struct dentry *fcur;
  551. struct dentry *fmiss;
  552. struct dentry *flimit;
  553. struct mlx5_ib_dev *dev;
  554. struct work_struct work;
  555. struct delayed_work dwork;
  556. int pending;
  557. struct completion compl;
  558. };
  559. struct mlx5_mr_cache {
  560. struct workqueue_struct *wq;
  561. struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
  562. int stopped;
  563. struct dentry *root;
  564. unsigned long last_add;
  565. };
  566. struct mlx5_ib_gsi_qp;
  567. struct mlx5_ib_port_resources {
  568. struct mlx5_ib_resources *devr;
  569. struct mlx5_ib_gsi_qp *gsi;
  570. struct work_struct pkey_change_work;
  571. };
  572. struct mlx5_ib_resources {
  573. struct ib_cq *c0;
  574. struct ib_xrcd *x0;
  575. struct ib_xrcd *x1;
  576. struct ib_pd *p0;
  577. struct ib_srq *s0;
  578. struct ib_srq *s1;
  579. struct mlx5_ib_port_resources ports[2];
  580. /* Protects changes to the port resources */
  581. struct mutex mutex;
  582. };
  583. struct mlx5_ib_counters {
  584. const char **names;
  585. size_t *offsets;
  586. u32 num_q_counters;
  587. u32 num_cong_counters;
  588. u32 num_ext_ppcnt_counters;
  589. u16 set_id;
  590. bool set_id_valid;
  591. };
  592. struct mlx5_ib_multiport_info;
  593. struct mlx5_ib_multiport {
  594. struct mlx5_ib_multiport_info *mpi;
  595. /* To be held when accessing the multiport info */
  596. spinlock_t mpi_lock;
  597. };
  598. struct mlx5_ib_port {
  599. struct mlx5_ib_counters cnts;
  600. struct mlx5_ib_multiport mp;
  601. struct mlx5_ib_dbg_cc_params *dbg_cc_params;
  602. };
  603. struct mlx5_roce {
  604. /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
  605. * netdev pointer
  606. */
  607. rwlock_t netdev_lock;
  608. struct net_device *netdev;
  609. struct notifier_block nb;
  610. atomic_t tx_port_affinity;
  611. enum ib_port_state last_port_state;
  612. struct mlx5_ib_dev *dev;
  613. u8 native_port_num;
  614. };
  615. struct mlx5_ib_dbg_param {
  616. int offset;
  617. struct mlx5_ib_dev *dev;
  618. struct dentry *dentry;
  619. u8 port_num;
  620. };
  621. enum mlx5_ib_dbg_cc_types {
  622. MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
  623. MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
  624. MLX5_IB_DBG_CC_RP_TIME_RESET,
  625. MLX5_IB_DBG_CC_RP_BYTE_RESET,
  626. MLX5_IB_DBG_CC_RP_THRESHOLD,
  627. MLX5_IB_DBG_CC_RP_AI_RATE,
  628. MLX5_IB_DBG_CC_RP_HAI_RATE,
  629. MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
  630. MLX5_IB_DBG_CC_RP_MIN_RATE,
  631. MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
  632. MLX5_IB_DBG_CC_RP_DCE_TCP_G,
  633. MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
  634. MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
  635. MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
  636. MLX5_IB_DBG_CC_RP_GD,
  637. MLX5_IB_DBG_CC_NP_CNP_DSCP,
  638. MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
  639. MLX5_IB_DBG_CC_NP_CNP_PRIO,
  640. MLX5_IB_DBG_CC_MAX,
  641. };
  642. struct mlx5_ib_dbg_cc_params {
  643. struct dentry *root;
  644. struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
  645. };
  646. enum {
  647. MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
  648. };
  649. struct mlx5_ib_dbg_delay_drop {
  650. struct dentry *dir_debugfs;
  651. struct dentry *rqs_cnt_debugfs;
  652. struct dentry *events_cnt_debugfs;
  653. struct dentry *timeout_debugfs;
  654. };
  655. struct mlx5_ib_delay_drop {
  656. struct mlx5_ib_dev *dev;
  657. struct work_struct delay_drop_work;
  658. /* serialize setting of delay drop */
  659. struct mutex lock;
  660. u32 timeout;
  661. bool activate;
  662. atomic_t events_cnt;
  663. atomic_t rqs_cnt;
  664. struct mlx5_ib_dbg_delay_drop *dbg;
  665. };
  666. enum mlx5_ib_stages {
  667. MLX5_IB_STAGE_INIT,
  668. MLX5_IB_STAGE_FLOW_DB,
  669. MLX5_IB_STAGE_CAPS,
  670. MLX5_IB_STAGE_NON_DEFAULT_CB,
  671. MLX5_IB_STAGE_ROCE,
  672. MLX5_IB_STAGE_DEVICE_RESOURCES,
  673. MLX5_IB_STAGE_ODP,
  674. MLX5_IB_STAGE_COUNTERS,
  675. MLX5_IB_STAGE_CONG_DEBUGFS,
  676. MLX5_IB_STAGE_UAR,
  677. MLX5_IB_STAGE_BFREG,
  678. MLX5_IB_STAGE_PRE_IB_REG_UMR,
  679. MLX5_IB_STAGE_SPECS,
  680. MLX5_IB_STAGE_IB_REG,
  681. MLX5_IB_STAGE_POST_IB_REG_UMR,
  682. MLX5_IB_STAGE_DELAY_DROP,
  683. MLX5_IB_STAGE_CLASS_ATTR,
  684. MLX5_IB_STAGE_REP_REG,
  685. MLX5_IB_STAGE_MAX,
  686. };
  687. struct mlx5_ib_stage {
  688. int (*init)(struct mlx5_ib_dev *dev);
  689. void (*cleanup)(struct mlx5_ib_dev *dev);
  690. };
  691. #define STAGE_CREATE(_stage, _init, _cleanup) \
  692. .stage[_stage] = {.init = _init, .cleanup = _cleanup}
  693. struct mlx5_ib_profile {
  694. struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
  695. };
  696. struct mlx5_ib_multiport_info {
  697. struct list_head list;
  698. struct mlx5_ib_dev *ibdev;
  699. struct mlx5_core_dev *mdev;
  700. struct completion unref_comp;
  701. u64 sys_image_guid;
  702. u32 mdev_refcnt;
  703. bool is_master;
  704. bool unaffiliate;
  705. };
  706. struct mlx5_ib_flow_action {
  707. struct ib_flow_action ib_action;
  708. union {
  709. struct {
  710. u64 ib_flags;
  711. struct mlx5_accel_esp_xfrm *ctx;
  712. } esp_aes_gcm;
  713. struct {
  714. struct mlx5_ib_dev *dev;
  715. u32 sub_type;
  716. u32 action_id;
  717. } flow_action_raw;
  718. };
  719. };
  720. struct mlx5_memic {
  721. struct mlx5_core_dev *dev;
  722. spinlock_t memic_lock;
  723. DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
  724. };
  725. struct mlx5_read_counters_attr {
  726. struct mlx5_fc *hw_cntrs_hndl;
  727. u64 *out;
  728. u32 flags;
  729. };
  730. enum mlx5_ib_counters_type {
  731. MLX5_IB_COUNTERS_FLOW,
  732. };
  733. struct mlx5_ib_mcounters {
  734. struct ib_counters ibcntrs;
  735. enum mlx5_ib_counters_type type;
  736. /* number of counters supported for this counters type */
  737. u32 counters_num;
  738. struct mlx5_fc *hw_cntrs_hndl;
  739. /* read function for this counters type */
  740. int (*read_counters)(struct ib_device *ibdev,
  741. struct mlx5_read_counters_attr *read_attr);
  742. /* max index set as part of create_flow */
  743. u32 cntrs_max_index;
  744. /* number of counters data entries (<description,index> pair) */
  745. u32 ncounters;
  746. /* counters data array for descriptions and indexes */
  747. struct mlx5_ib_flow_counters_desc *counters_data;
  748. /* protects access to mcounters internal data */
  749. struct mutex mcntrs_mutex;
  750. };
  751. static inline struct mlx5_ib_mcounters *
  752. to_mcounters(struct ib_counters *ibcntrs)
  753. {
  754. return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
  755. }
  756. int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
  757. bool is_egress,
  758. struct mlx5_flow_act *action);
  759. struct mlx5_ib_lb_state {
  760. /* protect the user_td */
  761. struct mutex mutex;
  762. u32 user_td;
  763. int qps;
  764. bool enabled;
  765. };
  766. struct mlx5_ib_dev {
  767. struct ib_device ib_dev;
  768. const struct uverbs_object_tree_def *driver_trees[7];
  769. struct mlx5_core_dev *mdev;
  770. struct mlx5_roce roce[MLX5_MAX_PORTS];
  771. int num_ports;
  772. /* serialize update of capability mask
  773. */
  774. struct mutex cap_mask_mutex;
  775. bool ib_active;
  776. struct umr_common umrc;
  777. /* sync used page count stats
  778. */
  779. struct mlx5_ib_resources devr;
  780. struct mlx5_mr_cache cache;
  781. struct timer_list delay_timer;
  782. /* Prevents soft lock on massive reg MRs */
  783. struct mutex slow_path_mutex;
  784. int fill_delay;
  785. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  786. struct ib_odp_caps odp_caps;
  787. u64 odp_max_size;
  788. /*
  789. * Sleepable RCU that prevents destruction of MRs while they are still
  790. * being used by a page fault handler.
  791. */
  792. struct srcu_struct mr_srcu;
  793. u32 null_mkey;
  794. #endif
  795. struct mlx5_ib_flow_db *flow_db;
  796. /* protect resources needed as part of reset flow */
  797. spinlock_t reset_flow_resource_lock;
  798. struct list_head qp_list;
  799. /* Array with num_ports elements */
  800. struct mlx5_ib_port *port;
  801. struct mlx5_sq_bfreg bfreg;
  802. struct mlx5_sq_bfreg fp_bfreg;
  803. struct mlx5_ib_delay_drop delay_drop;
  804. const struct mlx5_ib_profile *profile;
  805. struct mlx5_eswitch_rep *rep;
  806. struct mlx5_ib_lb_state lb;
  807. u8 umr_fence;
  808. struct list_head ib_dev_list;
  809. u64 sys_image_guid;
  810. struct mlx5_memic memic;
  811. u16 devx_whitelist_uid;
  812. };
  813. static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
  814. {
  815. return container_of(mcq, struct mlx5_ib_cq, mcq);
  816. }
  817. static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
  818. {
  819. return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
  820. }
  821. static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
  822. {
  823. return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
  824. }
  825. static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
  826. {
  827. return container_of(ibcq, struct mlx5_ib_cq, ibcq);
  828. }
  829. static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
  830. {
  831. return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
  832. }
  833. static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
  834. {
  835. return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
  836. }
  837. static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
  838. {
  839. return container_of(mmkey, struct mlx5_ib_mr, mmkey);
  840. }
  841. static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
  842. {
  843. return container_of(ibpd, struct mlx5_ib_pd, ibpd);
  844. }
  845. static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
  846. {
  847. return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
  848. }
  849. static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
  850. {
  851. return container_of(ibqp, struct mlx5_ib_qp, ibqp);
  852. }
  853. static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
  854. {
  855. return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
  856. }
  857. static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
  858. {
  859. return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
  860. }
  861. static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
  862. {
  863. return container_of(msrq, struct mlx5_ib_srq, msrq);
  864. }
  865. static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
  866. {
  867. return container_of(ibdm, struct mlx5_ib_dm, ibdm);
  868. }
  869. static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
  870. {
  871. return container_of(ibmr, struct mlx5_ib_mr, ibmr);
  872. }
  873. static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
  874. {
  875. return container_of(ibmw, struct mlx5_ib_mw, ibmw);
  876. }
  877. static inline struct mlx5_ib_flow_action *
  878. to_mflow_act(struct ib_flow_action *ibact)
  879. {
  880. return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
  881. }
  882. int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
  883. struct mlx5_db *db);
  884. void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
  885. void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
  886. void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
  887. void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
  888. int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
  889. u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  890. const void *in_mad, void *response_mad);
  891. struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
  892. struct ib_udata *udata);
  893. int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
  894. int mlx5_ib_destroy_ah(struct ib_ah *ah);
  895. struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
  896. struct ib_srq_init_attr *init_attr,
  897. struct ib_udata *udata);
  898. int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  899. enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
  900. int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
  901. int mlx5_ib_destroy_srq(struct ib_srq *srq);
  902. int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
  903. const struct ib_recv_wr **bad_wr);
  904. int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
  905. void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
  906. struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
  907. struct ib_qp_init_attr *init_attr,
  908. struct ib_udata *udata);
  909. int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  910. int attr_mask, struct ib_udata *udata);
  911. int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
  912. struct ib_qp_init_attr *qp_init_attr);
  913. int mlx5_ib_destroy_qp(struct ib_qp *qp);
  914. void mlx5_ib_drain_sq(struct ib_qp *qp);
  915. void mlx5_ib_drain_rq(struct ib_qp *qp);
  916. int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
  917. const struct ib_send_wr **bad_wr);
  918. int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
  919. const struct ib_recv_wr **bad_wr);
  920. void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
  921. int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
  922. void *buffer, u32 length,
  923. struct mlx5_ib_qp_base *base);
  924. struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
  925. const struct ib_cq_init_attr *attr,
  926. struct ib_ucontext *context,
  927. struct ib_udata *udata);
  928. int mlx5_ib_destroy_cq(struct ib_cq *cq);
  929. int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
  930. int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
  931. int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
  932. int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
  933. struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
  934. struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  935. u64 virt_addr, int access_flags,
  936. struct ib_udata *udata);
  937. struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
  938. struct ib_udata *udata);
  939. int mlx5_ib_dealloc_mw(struct ib_mw *mw);
  940. int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
  941. int page_shift, int flags);
  942. struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
  943. int access_flags);
  944. void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
  945. int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
  946. u64 length, u64 virt_addr, int access_flags,
  947. struct ib_pd *pd, struct ib_udata *udata);
  948. int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
  949. struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
  950. enum ib_mr_type mr_type,
  951. u32 max_num_sg);
  952. int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
  953. unsigned int *sg_offset);
  954. int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  955. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  956. const struct ib_mad_hdr *in, size_t in_mad_size,
  957. struct ib_mad_hdr *out, size_t *out_mad_size,
  958. u16 *out_mad_pkey_index);
  959. struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
  960. struct ib_ucontext *context,
  961. struct ib_udata *udata);
  962. int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
  963. int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
  964. int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
  965. int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
  966. struct ib_smp *out_mad);
  967. int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
  968. __be64 *sys_image_guid);
  969. int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
  970. u16 *max_pkeys);
  971. int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
  972. u32 *vendor_id);
  973. int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
  974. int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
  975. int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
  976. u16 *pkey);
  977. int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
  978. union ib_gid *gid);
  979. int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
  980. struct ib_port_attr *props);
  981. int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
  982. struct ib_port_attr *props);
  983. int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
  984. void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
  985. void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
  986. unsigned long max_page_shift,
  987. int *count, int *shift,
  988. int *ncont, int *order);
  989. void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
  990. int page_shift, size_t offset, size_t num_pages,
  991. __be64 *pas, int access_flags);
  992. void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
  993. int page_shift, __be64 *pas, int access_flags);
  994. void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
  995. int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
  996. int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
  997. int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
  998. struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
  999. void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
  1000. int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
  1001. struct ib_mr_status *mr_status);
  1002. struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
  1003. struct ib_wq_init_attr *init_attr,
  1004. struct ib_udata *udata);
  1005. int mlx5_ib_destroy_wq(struct ib_wq *wq);
  1006. int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
  1007. u32 wq_attr_mask, struct ib_udata *udata);
  1008. struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
  1009. struct ib_rwq_ind_table_init_attr *init_attr,
  1010. struct ib_udata *udata);
  1011. int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
  1012. bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
  1013. struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
  1014. struct ib_ucontext *context,
  1015. struct ib_dm_alloc_attr *attr,
  1016. struct uverbs_attr_bundle *attrs);
  1017. int mlx5_ib_dealloc_dm(struct ib_dm *ibdm);
  1018. struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
  1019. struct ib_dm_mr_attr *attr,
  1020. struct uverbs_attr_bundle *attrs);
  1021. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1022. void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
  1023. void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
  1024. struct mlx5_pagefault *pfault);
  1025. int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
  1026. int __init mlx5_ib_odp_init(void);
  1027. void mlx5_ib_odp_cleanup(void);
  1028. void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
  1029. unsigned long end);
  1030. void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
  1031. void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
  1032. size_t nentries, struct mlx5_ib_mr *mr, int flags);
  1033. #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  1034. static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
  1035. {
  1036. return;
  1037. }
  1038. static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
  1039. static inline int mlx5_ib_odp_init(void) { return 0; }
  1040. static inline void mlx5_ib_odp_cleanup(void) {}
  1041. static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
  1042. static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
  1043. size_t nentries, struct mlx5_ib_mr *mr,
  1044. int flags) {}
  1045. #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  1046. /* Needed for rep profile */
  1047. int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev);
  1048. void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev);
  1049. int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev);
  1050. int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev);
  1051. int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev);
  1052. int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev);
  1053. void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev);
  1054. int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev);
  1055. void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev);
  1056. int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
  1057. void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
  1058. int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
  1059. void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
  1060. void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
  1061. int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
  1062. void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
  1063. int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
  1064. void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
  1065. const struct mlx5_ib_profile *profile,
  1066. int stage);
  1067. void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
  1068. const struct mlx5_ib_profile *profile);
  1069. int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
  1070. u8 port, struct ifla_vf_info *info);
  1071. int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
  1072. u8 port, int state);
  1073. int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
  1074. u8 port, struct ifla_vf_stats *stats);
  1075. int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
  1076. u64 guid, int type);
  1077. __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
  1078. const struct ib_gid_attr *attr);
  1079. void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
  1080. int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
  1081. /* GSI QP helper functions */
  1082. struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
  1083. struct ib_qp_init_attr *init_attr);
  1084. int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
  1085. int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
  1086. int attr_mask);
  1087. int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
  1088. int qp_attr_mask,
  1089. struct ib_qp_init_attr *qp_init_attr);
  1090. int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
  1091. const struct ib_send_wr **bad_wr);
  1092. int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
  1093. const struct ib_recv_wr **bad_wr);
  1094. void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
  1095. int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
  1096. void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
  1097. int bfregn);
  1098. struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
  1099. struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
  1100. u8 ib_port_num,
  1101. u8 *native_port_num);
  1102. void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
  1103. u8 port_num);
  1104. #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
  1105. int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
  1106. void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
  1107. const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
  1108. struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
  1109. struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
  1110. struct mlx5_flow_act *flow_act, void *cmd_in, int inlen,
  1111. int dest_id, int dest_type);
  1112. bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
  1113. int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
  1114. void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
  1115. #else
  1116. static inline int
  1117. mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
  1118. static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
  1119. static inline const struct uverbs_object_tree_def *
  1120. mlx5_ib_get_devx_tree(void) { return NULL; }
  1121. static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
  1122. int *dest_type)
  1123. {
  1124. return false;
  1125. }
  1126. static inline int
  1127. mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
  1128. {
  1129. return 0;
  1130. }
  1131. static inline void
  1132. mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
  1133. {
  1134. return;
  1135. };
  1136. #endif
  1137. static inline void init_query_mad(struct ib_smp *mad)
  1138. {
  1139. mad->base_version = 1;
  1140. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  1141. mad->class_version = 1;
  1142. mad->method = IB_MGMT_METHOD_GET;
  1143. }
  1144. static inline u8 convert_access(int acc)
  1145. {
  1146. return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
  1147. (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
  1148. (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
  1149. (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
  1150. MLX5_PERM_LOCAL_READ;
  1151. }
  1152. static inline int is_qp1(enum ib_qp_type qp_type)
  1153. {
  1154. return qp_type == MLX5_IB_QPT_HW_GSI;
  1155. }
  1156. #define MLX5_MAX_UMR_SHIFT 16
  1157. #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
  1158. static inline u32 check_cq_create_flags(u32 flags)
  1159. {
  1160. /*
  1161. * It returns non-zero value for unsupported CQ
  1162. * create flags, otherwise it returns zero.
  1163. */
  1164. return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
  1165. IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
  1166. }
  1167. static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
  1168. u32 *user_index)
  1169. {
  1170. if (cqe_version) {
  1171. if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
  1172. (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
  1173. return -EINVAL;
  1174. *user_index = cmd_uidx;
  1175. } else {
  1176. *user_index = MLX5_IB_DEFAULT_UIDX;
  1177. }
  1178. return 0;
  1179. }
  1180. static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
  1181. struct mlx5_ib_create_qp *ucmd,
  1182. int inlen,
  1183. u32 *user_index)
  1184. {
  1185. u8 cqe_version = ucontext->cqe_version;
  1186. if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
  1187. !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
  1188. return 0;
  1189. if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
  1190. !!cqe_version))
  1191. return -EINVAL;
  1192. return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
  1193. }
  1194. static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
  1195. struct mlx5_ib_create_srq *ucmd,
  1196. int inlen,
  1197. u32 *user_index)
  1198. {
  1199. u8 cqe_version = ucontext->cqe_version;
  1200. if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
  1201. !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
  1202. return 0;
  1203. if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
  1204. !!cqe_version))
  1205. return -EINVAL;
  1206. return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
  1207. }
  1208. static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
  1209. {
  1210. return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  1211. MLX5_UARS_IN_PAGE : 1;
  1212. }
  1213. static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
  1214. struct mlx5_bfreg_info *bfregi)
  1215. {
  1216. return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
  1217. }
  1218. unsigned long mlx5_ib_get_xlt_emergency_page(void);
  1219. void mlx5_ib_put_xlt_emergency_page(void);
  1220. int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
  1221. struct mlx5_bfreg_info *bfregi, u32 bfregn,
  1222. bool dyn_bfreg);
  1223. #endif /* MLX5_IB_H */