qed_rdma_if.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_RDMA_IF_H
  33. #define _QED_RDMA_IF_H
  34. #include <linux/types.h>
  35. #include <linux/delay.h>
  36. #include <linux/list.h>
  37. #include <linux/slab.h>
  38. #include <linux/qed/qed_if.h>
  39. #include <linux/qed/qed_ll2_if.h>
  40. #include <linux/qed/rdma_common.h>
  41. enum qed_roce_ll2_tx_dest {
  42. /* Light L2 TX Destination to the Network */
  43. QED_ROCE_LL2_TX_DEST_NW,
  44. /* Light L2 TX Destination to the Loopback */
  45. QED_ROCE_LL2_TX_DEST_LB,
  46. QED_ROCE_LL2_TX_DEST_MAX
  47. };
  48. #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
  49. /* rdma interface */
  50. enum qed_roce_qp_state {
  51. QED_ROCE_QP_STATE_RESET,
  52. QED_ROCE_QP_STATE_INIT,
  53. QED_ROCE_QP_STATE_RTR,
  54. QED_ROCE_QP_STATE_RTS,
  55. QED_ROCE_QP_STATE_SQD,
  56. QED_ROCE_QP_STATE_ERR,
  57. QED_ROCE_QP_STATE_SQE
  58. };
  59. enum qed_rdma_tid_type {
  60. QED_RDMA_TID_REGISTERED_MR,
  61. QED_RDMA_TID_FMR,
  62. QED_RDMA_TID_MW
  63. };
  64. struct qed_rdma_events {
  65. void *context;
  66. void (*affiliated_event)(void *context, u8 fw_event_code,
  67. void *fw_handle);
  68. void (*unaffiliated_event)(void *context, u8 event_code);
  69. };
  70. struct qed_rdma_device {
  71. u32 vendor_id;
  72. u32 vendor_part_id;
  73. u32 hw_ver;
  74. u64 fw_ver;
  75. u64 node_guid;
  76. u64 sys_image_guid;
  77. u8 max_cnq;
  78. u8 max_sge;
  79. u8 max_srq_sge;
  80. u16 max_inline;
  81. u32 max_wqe;
  82. u32 max_srq_wqe;
  83. u8 max_qp_resp_rd_atomic_resc;
  84. u8 max_qp_req_rd_atomic_resc;
  85. u64 max_dev_resp_rd_atomic_resc;
  86. u32 max_cq;
  87. u32 max_qp;
  88. u32 max_srq;
  89. u32 max_mr;
  90. u64 max_mr_size;
  91. u32 max_cqe;
  92. u32 max_mw;
  93. u32 max_fmr;
  94. u32 max_mr_mw_fmr_pbl;
  95. u64 max_mr_mw_fmr_size;
  96. u32 max_pd;
  97. u32 max_ah;
  98. u8 max_pkey;
  99. u16 max_srq_wr;
  100. u8 max_stats_queues;
  101. u32 dev_caps;
  102. /* Abilty to support RNR-NAK generation */
  103. #define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
  104. #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
  105. /* Abilty to support shutdown port */
  106. #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
  107. #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
  108. /* Abilty to support port active event */
  109. #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
  110. #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
  111. /* Abilty to support port change event */
  112. #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
  113. #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
  114. /* Abilty to support system image GUID */
  115. #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
  116. #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
  117. /* Abilty to support bad P_Key counter support */
  118. #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
  119. #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
  120. /* Abilty to support atomic operations */
  121. #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
  122. #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
  123. #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
  124. #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
  125. /* Abilty to support modifying the maximum number of
  126. * outstanding work requests per QP
  127. */
  128. #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
  129. #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
  130. /* Abilty to support automatic path migration */
  131. #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
  132. #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
  133. /* Abilty to support the base memory management extensions */
  134. #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
  135. #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
  136. #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
  137. #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
  138. /* Abilty to support multipile page sizes per memory region */
  139. #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
  140. #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
  141. /* Abilty to support block list physical buffer list */
  142. #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
  143. #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
  144. /* Abilty to support zero based virtual addresses */
  145. #define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
  146. #define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
  147. /* Abilty to support local invalidate fencing */
  148. #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
  149. #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
  150. /* Abilty to support Loopback on QP */
  151. #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
  152. #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
  153. u64 page_size_caps;
  154. u8 dev_ack_delay;
  155. u32 reserved_lkey;
  156. u32 bad_pkey_counter;
  157. struct qed_rdma_events events;
  158. };
  159. enum qed_port_state {
  160. QED_RDMA_PORT_UP,
  161. QED_RDMA_PORT_DOWN,
  162. };
  163. enum qed_roce_capability {
  164. QED_ROCE_V1 = 1 << 0,
  165. QED_ROCE_V2 = 1 << 1,
  166. };
  167. struct qed_rdma_port {
  168. enum qed_port_state port_state;
  169. int link_speed;
  170. u64 max_msg_size;
  171. u8 source_gid_table_len;
  172. void *source_gid_table_ptr;
  173. u8 pkey_table_len;
  174. void *pkey_table_ptr;
  175. u32 pkey_bad_counter;
  176. enum qed_roce_capability capability;
  177. };
  178. struct qed_rdma_cnq_params {
  179. u8 num_pbl_pages;
  180. u64 pbl_ptr;
  181. };
  182. /* The CQ Mode affects the CQ doorbell transaction size.
  183. * 64/32 bit machines should configure to 32/16 bits respectively.
  184. */
  185. enum qed_rdma_cq_mode {
  186. QED_RDMA_CQ_MODE_16_BITS,
  187. QED_RDMA_CQ_MODE_32_BITS,
  188. };
  189. struct qed_roce_dcqcn_params {
  190. u8 notification_point;
  191. u8 reaction_point;
  192. /* fields for notification point */
  193. u32 cnp_send_timeout;
  194. /* fields for reaction point */
  195. u32 rl_bc_rate;
  196. u16 rl_max_rate;
  197. u16 rl_r_ai;
  198. u16 rl_r_hai;
  199. u16 dcqcn_g;
  200. u32 dcqcn_k_us;
  201. u32 dcqcn_timeout_us;
  202. };
  203. struct qed_rdma_start_in_params {
  204. struct qed_rdma_events *events;
  205. struct qed_rdma_cnq_params cnq_pbl_list[128];
  206. u8 desired_cnq;
  207. enum qed_rdma_cq_mode cq_mode;
  208. struct qed_roce_dcqcn_params dcqcn_params;
  209. u16 max_mtu;
  210. u8 mac_addr[ETH_ALEN];
  211. u8 iwarp_flags;
  212. };
  213. struct qed_rdma_add_user_out_params {
  214. u16 dpi;
  215. u64 dpi_addr;
  216. u64 dpi_phys_addr;
  217. u32 dpi_size;
  218. u16 wid_count;
  219. };
  220. enum roce_mode {
  221. ROCE_V1,
  222. ROCE_V2_IPV4,
  223. ROCE_V2_IPV6,
  224. MAX_ROCE_MODE
  225. };
  226. union qed_gid {
  227. u8 bytes[16];
  228. u16 words[8];
  229. u32 dwords[4];
  230. u64 qwords[2];
  231. u32 ipv4_addr;
  232. };
  233. struct qed_rdma_register_tid_in_params {
  234. u32 itid;
  235. enum qed_rdma_tid_type tid_type;
  236. u8 key;
  237. u16 pd;
  238. bool local_read;
  239. bool local_write;
  240. bool remote_read;
  241. bool remote_write;
  242. bool remote_atomic;
  243. bool mw_bind;
  244. u64 pbl_ptr;
  245. bool pbl_two_level;
  246. u8 pbl_page_size_log;
  247. u8 page_size_log;
  248. u32 fbo;
  249. u64 length;
  250. u64 vaddr;
  251. bool zbva;
  252. bool phy_mr;
  253. bool dma_mr;
  254. bool dif_enabled;
  255. u64 dif_error_addr;
  256. };
  257. struct qed_rdma_create_cq_in_params {
  258. u32 cq_handle_lo;
  259. u32 cq_handle_hi;
  260. u32 cq_size;
  261. u16 dpi;
  262. bool pbl_two_level;
  263. u64 pbl_ptr;
  264. u16 pbl_num_pages;
  265. u8 pbl_page_size_log;
  266. u8 cnq_id;
  267. u16 int_timeout;
  268. };
  269. struct qed_rdma_create_srq_in_params {
  270. u64 pbl_base_addr;
  271. u64 prod_pair_addr;
  272. u16 num_pages;
  273. u16 pd_id;
  274. u16 page_size;
  275. };
  276. struct qed_rdma_destroy_cq_in_params {
  277. u16 icid;
  278. };
  279. struct qed_rdma_destroy_cq_out_params {
  280. u16 num_cq_notif;
  281. };
  282. struct qed_rdma_create_qp_in_params {
  283. u32 qp_handle_lo;
  284. u32 qp_handle_hi;
  285. u32 qp_handle_async_lo;
  286. u32 qp_handle_async_hi;
  287. bool use_srq;
  288. bool signal_all;
  289. bool fmr_and_reserved_lkey;
  290. u16 pd;
  291. u16 dpi;
  292. u16 sq_cq_id;
  293. u16 sq_num_pages;
  294. u64 sq_pbl_ptr;
  295. u8 max_sq_sges;
  296. u16 rq_cq_id;
  297. u16 rq_num_pages;
  298. u64 rq_pbl_ptr;
  299. u16 srq_id;
  300. u8 stats_queue;
  301. };
  302. struct qed_rdma_create_qp_out_params {
  303. u32 qp_id;
  304. u16 icid;
  305. void *rq_pbl_virt;
  306. dma_addr_t rq_pbl_phys;
  307. void *sq_pbl_virt;
  308. dma_addr_t sq_pbl_phys;
  309. };
  310. struct qed_rdma_modify_qp_in_params {
  311. u32 modify_flags;
  312. #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
  313. #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
  314. #define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
  315. #define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
  316. #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
  317. #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
  318. #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
  319. #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
  320. #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
  321. #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
  322. #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
  323. #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
  324. #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
  325. #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
  326. #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
  327. #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
  328. #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
  329. #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
  330. #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
  331. #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
  332. #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
  333. #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
  334. #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
  335. #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
  336. #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
  337. #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
  338. #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
  339. #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
  340. #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
  341. #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
  342. enum qed_roce_qp_state new_state;
  343. u16 pkey;
  344. bool incoming_rdma_read_en;
  345. bool incoming_rdma_write_en;
  346. bool incoming_atomic_en;
  347. bool e2e_flow_control_en;
  348. u32 dest_qp;
  349. bool lb_indication;
  350. u16 mtu;
  351. u8 traffic_class_tos;
  352. u8 hop_limit_ttl;
  353. u32 flow_label;
  354. union qed_gid sgid;
  355. union qed_gid dgid;
  356. u16 udp_src_port;
  357. u16 vlan_id;
  358. u32 rq_psn;
  359. u32 sq_psn;
  360. u8 max_rd_atomic_resp;
  361. u8 max_rd_atomic_req;
  362. u32 ack_timeout;
  363. u8 retry_cnt;
  364. u8 rnr_retry_cnt;
  365. u8 min_rnr_nak_timer;
  366. bool sqd_async;
  367. u8 remote_mac_addr[6];
  368. u8 local_mac_addr[6];
  369. bool use_local_mac;
  370. enum roce_mode roce_mode;
  371. };
  372. struct qed_rdma_query_qp_out_params {
  373. enum qed_roce_qp_state state;
  374. u32 rq_psn;
  375. u32 sq_psn;
  376. bool draining;
  377. u16 mtu;
  378. u32 dest_qp;
  379. bool incoming_rdma_read_en;
  380. bool incoming_rdma_write_en;
  381. bool incoming_atomic_en;
  382. bool e2e_flow_control_en;
  383. union qed_gid sgid;
  384. union qed_gid dgid;
  385. u32 flow_label;
  386. u8 hop_limit_ttl;
  387. u8 traffic_class_tos;
  388. u32 timeout;
  389. u8 rnr_retry;
  390. u8 retry_cnt;
  391. u8 min_rnr_nak_timer;
  392. u16 pkey_index;
  393. u8 max_rd_atomic;
  394. u8 max_dest_rd_atomic;
  395. bool sqd_async;
  396. };
  397. struct qed_rdma_create_srq_out_params {
  398. u16 srq_id;
  399. };
  400. struct qed_rdma_destroy_srq_in_params {
  401. u16 srq_id;
  402. };
  403. struct qed_rdma_modify_srq_in_params {
  404. u32 wqe_limit;
  405. u16 srq_id;
  406. };
  407. struct qed_rdma_stats_out_params {
  408. u64 sent_bytes;
  409. u64 sent_pkts;
  410. u64 rcv_bytes;
  411. u64 rcv_pkts;
  412. };
  413. struct qed_rdma_counters_out_params {
  414. u64 pd_count;
  415. u64 max_pd;
  416. u64 dpi_count;
  417. u64 max_dpi;
  418. u64 cq_count;
  419. u64 max_cq;
  420. u64 qp_count;
  421. u64 max_qp;
  422. u64 tid_count;
  423. u64 max_tid;
  424. };
  425. #define QED_ROCE_TX_HEAD_FAILURE (1)
  426. #define QED_ROCE_TX_FRAG_FAILURE (2)
  427. enum qed_iwarp_event_type {
  428. QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
  429. QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
  430. QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
  431. QED_IWARP_EVENT_DISCONNECT,
  432. QED_IWARP_EVENT_CLOSE,
  433. QED_IWARP_EVENT_IRQ_FULL,
  434. QED_IWARP_EVENT_RQ_EMPTY,
  435. QED_IWARP_EVENT_LLP_TIMEOUT,
  436. QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
  437. QED_IWARP_EVENT_CQ_OVERFLOW,
  438. QED_IWARP_EVENT_QP_CATASTROPHIC,
  439. QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
  440. QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
  441. QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
  442. QED_IWARP_EVENT_TERMINATE_RECEIVED,
  443. QED_IWARP_EVENT_SRQ_LIMIT,
  444. QED_IWARP_EVENT_SRQ_EMPTY,
  445. };
  446. enum qed_tcp_ip_version {
  447. QED_TCP_IPV4,
  448. QED_TCP_IPV6,
  449. };
  450. struct qed_iwarp_cm_info {
  451. enum qed_tcp_ip_version ip_version;
  452. u32 remote_ip[4];
  453. u32 local_ip[4];
  454. u16 remote_port;
  455. u16 local_port;
  456. u16 vlan;
  457. u8 ord;
  458. u8 ird;
  459. u16 private_data_len;
  460. const void *private_data;
  461. };
  462. struct qed_iwarp_cm_event_params {
  463. enum qed_iwarp_event_type event;
  464. const struct qed_iwarp_cm_info *cm_info;
  465. void *ep_context; /* To be passed to accept call */
  466. int status;
  467. };
  468. typedef int (*iwarp_event_handler) (void *context,
  469. struct qed_iwarp_cm_event_params *event);
  470. struct qed_iwarp_connect_in {
  471. iwarp_event_handler event_cb;
  472. void *cb_context;
  473. struct qed_rdma_qp *qp;
  474. struct qed_iwarp_cm_info cm_info;
  475. u16 mss;
  476. u8 remote_mac_addr[ETH_ALEN];
  477. u8 local_mac_addr[ETH_ALEN];
  478. };
  479. struct qed_iwarp_connect_out {
  480. void *ep_context;
  481. };
  482. struct qed_iwarp_listen_in {
  483. iwarp_event_handler event_cb;
  484. void *cb_context; /* passed to event_cb */
  485. u32 max_backlog;
  486. enum qed_tcp_ip_version ip_version;
  487. u32 ip_addr[4];
  488. u16 port;
  489. u16 vlan;
  490. };
  491. struct qed_iwarp_listen_out {
  492. void *handle;
  493. };
  494. struct qed_iwarp_accept_in {
  495. void *ep_context;
  496. void *cb_context;
  497. struct qed_rdma_qp *qp;
  498. const void *private_data;
  499. u16 private_data_len;
  500. u8 ord;
  501. u8 ird;
  502. };
  503. struct qed_iwarp_reject_in {
  504. void *ep_context;
  505. void *cb_context;
  506. const void *private_data;
  507. u16 private_data_len;
  508. };
  509. struct qed_iwarp_send_rtr_in {
  510. void *ep_context;
  511. };
  512. struct qed_roce_ll2_header {
  513. void *vaddr;
  514. dma_addr_t baddr;
  515. size_t len;
  516. };
  517. struct qed_roce_ll2_buffer {
  518. dma_addr_t baddr;
  519. size_t len;
  520. };
  521. struct qed_roce_ll2_packet {
  522. struct qed_roce_ll2_header header;
  523. int n_seg;
  524. struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
  525. int roce_mode;
  526. enum qed_roce_ll2_tx_dest tx_dest;
  527. };
  528. enum qed_rdma_type {
  529. QED_RDMA_TYPE_ROCE,
  530. QED_RDMA_TYPE_IWARP
  531. };
  532. struct qed_dev_rdma_info {
  533. struct qed_dev_info common;
  534. enum qed_rdma_type rdma_type;
  535. u8 user_dpm_enabled;
  536. };
  537. struct qed_rdma_ops {
  538. const struct qed_common_ops *common;
  539. int (*fill_dev_info)(struct qed_dev *cdev,
  540. struct qed_dev_rdma_info *info);
  541. void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
  542. int (*rdma_init)(struct qed_dev *dev,
  543. struct qed_rdma_start_in_params *iparams);
  544. int (*rdma_add_user)(void *rdma_cxt,
  545. struct qed_rdma_add_user_out_params *oparams);
  546. void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
  547. int (*rdma_stop)(void *rdma_cxt);
  548. struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
  549. struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
  550. int (*rdma_get_start_sb)(struct qed_dev *cdev);
  551. int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
  552. void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
  553. int (*rdma_get_rdma_int)(struct qed_dev *cdev,
  554. struct qed_int_info *info);
  555. int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
  556. int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
  557. void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
  558. int (*rdma_create_cq)(void *rdma_cxt,
  559. struct qed_rdma_create_cq_in_params *params,
  560. u16 *icid);
  561. int (*rdma_destroy_cq)(void *rdma_cxt,
  562. struct qed_rdma_destroy_cq_in_params *iparams,
  563. struct qed_rdma_destroy_cq_out_params *oparams);
  564. struct qed_rdma_qp *
  565. (*rdma_create_qp)(void *rdma_cxt,
  566. struct qed_rdma_create_qp_in_params *iparams,
  567. struct qed_rdma_create_qp_out_params *oparams);
  568. int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
  569. struct qed_rdma_modify_qp_in_params *iparams);
  570. int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
  571. struct qed_rdma_query_qp_out_params *oparams);
  572. int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
  573. int
  574. (*rdma_register_tid)(void *rdma_cxt,
  575. struct qed_rdma_register_tid_in_params *iparams);
  576. int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
  577. int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
  578. void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
  579. int (*rdma_create_srq)(void *rdma_cxt,
  580. struct qed_rdma_create_srq_in_params *iparams,
  581. struct qed_rdma_create_srq_out_params *oparams);
  582. int (*rdma_destroy_srq)(void *rdma_cxt,
  583. struct qed_rdma_destroy_srq_in_params *iparams);
  584. int (*rdma_modify_srq)(void *rdma_cxt,
  585. struct qed_rdma_modify_srq_in_params *iparams);
  586. int (*ll2_acquire_connection)(void *rdma_cxt,
  587. struct qed_ll2_acquire_data *data);
  588. int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle);
  589. int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle);
  590. void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle);
  591. int (*ll2_prepare_tx_packet)(void *rdma_cxt,
  592. u8 connection_handle,
  593. struct qed_ll2_tx_pkt_info *pkt,
  594. bool notify_fw);
  595. int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt,
  596. u8 connection_handle,
  597. dma_addr_t addr,
  598. u16 nbytes);
  599. int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle,
  600. dma_addr_t addr, u16 buf_len, void *cookie,
  601. u8 notify_fw);
  602. int (*ll2_get_stats)(void *rdma_cxt,
  603. u8 connection_handle,
  604. struct qed_ll2_stats *p_stats);
  605. int (*ll2_set_mac_filter)(struct qed_dev *cdev,
  606. u8 *old_mac_address, u8 *new_mac_address);
  607. int (*iwarp_connect)(void *rdma_cxt,
  608. struct qed_iwarp_connect_in *iparams,
  609. struct qed_iwarp_connect_out *oparams);
  610. int (*iwarp_create_listen)(void *rdma_cxt,
  611. struct qed_iwarp_listen_in *iparams,
  612. struct qed_iwarp_listen_out *oparams);
  613. int (*iwarp_accept)(void *rdma_cxt,
  614. struct qed_iwarp_accept_in *iparams);
  615. int (*iwarp_reject)(void *rdma_cxt,
  616. struct qed_iwarp_reject_in *iparams);
  617. int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
  618. int (*iwarp_send_rtr)(void *rdma_cxt,
  619. struct qed_iwarp_send_rtr_in *iparams);
  620. };
  621. const struct qed_rdma_ops *qed_get_rdma_ops(void);
  622. #endif