qplib_fp.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: Fast Path Operators (header)
  37. */
  38. #ifndef __BNXT_QPLIB_FP_H__
  39. #define __BNXT_QPLIB_FP_H__
  40. struct bnxt_qplib_srq {
  41. struct bnxt_qplib_pd *pd;
  42. struct bnxt_qplib_dpi *dpi;
  43. void __iomem *dbr_base;
  44. u64 srq_handle;
  45. u32 id;
  46. u32 max_wqe;
  47. u32 max_sge;
  48. u32 threshold;
  49. bool arm_req;
  50. struct bnxt_qplib_cq *cq;
  51. struct bnxt_qplib_hwq hwq;
  52. struct bnxt_qplib_swq *swq;
  53. struct scatterlist *sglist;
  54. int start_idx;
  55. int last_idx;
  56. u32 nmap;
  57. u16 eventq_hw_ring_id;
  58. spinlock_t lock; /* protect SRQE link list */
  59. };
  60. struct bnxt_qplib_sge {
  61. u64 addr;
  62. u32 lkey;
  63. u32 size;
  64. };
  65. #define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
  66. #define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
  67. #define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
  68. static inline u32 get_sqe_pg(u32 val)
  69. {
  70. return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
  71. }
  72. static inline u32 get_sqe_idx(u32 val)
  73. {
  74. return (val & SQE_MAX_IDX_PER_PG);
  75. }
  76. #define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
  77. #define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
  78. #define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
  79. static inline u32 get_psne_pg(u32 val)
  80. {
  81. return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
  82. }
  83. static inline u32 get_psne_idx(u32 val)
  84. {
  85. return (val & PSNE_MAX_IDX_PER_PG);
  86. }
  87. #define BNXT_QPLIB_QP_MAX_SGL 6
  88. struct bnxt_qplib_swq {
  89. u64 wr_id;
  90. int next_idx;
  91. u8 type;
  92. u8 flags;
  93. u32 start_psn;
  94. u32 next_psn;
  95. struct sq_psn_search *psn_search;
  96. };
  97. struct bnxt_qplib_swqe {
  98. /* General */
  99. #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
  100. u64 wr_id;
  101. u8 reqs_type;
  102. u8 type;
  103. #define BNXT_QPLIB_SWQE_TYPE_SEND 0
  104. #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
  105. #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
  106. #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
  107. #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
  108. #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
  109. #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
  110. #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
  111. #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
  112. #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
  113. #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
  114. #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
  115. #define BNXT_QPLIB_SWQE_TYPE_RECV 128
  116. #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
  117. u8 flags;
  118. #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0)
  119. #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1)
  120. #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
  121. #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
  122. #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
  123. struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
  124. int num_sge;
  125. /* Max inline data is 96 bytes */
  126. u32 inline_len;
  127. #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96
  128. u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH];
  129. union {
  130. /* Send, with imm, inval key */
  131. struct {
  132. union {
  133. __be32 imm_data;
  134. u32 inv_key;
  135. };
  136. u32 q_key;
  137. u32 dst_qp;
  138. u16 avid;
  139. } send;
  140. /* Send Raw Ethernet and QP1 */
  141. struct {
  142. u16 lflags;
  143. u16 cfa_action;
  144. u32 cfa_meta;
  145. } rawqp1;
  146. /* RDMA write, with imm, read */
  147. struct {
  148. union {
  149. __be32 imm_data;
  150. u32 inv_key;
  151. };
  152. u64 remote_va;
  153. u32 r_key;
  154. } rdma;
  155. /* Atomic cmp/swap, fetch/add */
  156. struct {
  157. u64 remote_va;
  158. u32 r_key;
  159. u64 swap_data;
  160. u64 cmp_data;
  161. } atomic;
  162. /* Local Invalidate */
  163. struct {
  164. u32 inv_l_key;
  165. } local_inv;
  166. /* FR-PMR */
  167. struct {
  168. u8 access_cntl;
  169. u8 pg_sz_log;
  170. bool zero_based;
  171. u32 l_key;
  172. u32 length;
  173. u8 pbl_pg_sz_log;
  174. #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
  175. #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
  176. #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
  177. #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
  178. #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
  179. #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
  180. #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
  181. #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
  182. u8 levels;
  183. #define PAGE_SHIFT_4K 12
  184. __le64 *pbl_ptr;
  185. dma_addr_t pbl_dma_ptr;
  186. u64 *page_list;
  187. u16 page_list_len;
  188. u64 va;
  189. } frmr;
  190. /* Bind */
  191. struct {
  192. u8 access_cntl;
  193. #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0)
  194. #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1)
  195. #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2)
  196. #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3)
  197. #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4)
  198. bool zero_based;
  199. u8 mw_type;
  200. u32 parent_l_key;
  201. u32 r_key;
  202. u64 va;
  203. u32 length;
  204. } bind;
  205. };
  206. };
  207. #define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
  208. #define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
  209. #define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
  210. #define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
  211. #define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
  212. struct bnxt_qplib_q {
  213. struct bnxt_qplib_hwq hwq;
  214. struct bnxt_qplib_swq *swq;
  215. struct scatterlist *sglist;
  216. u32 nmap;
  217. u32 max_wqe;
  218. u16 q_full_delta;
  219. u16 max_sge;
  220. u32 psn;
  221. bool condition;
  222. bool single;
  223. bool send_phantom;
  224. u32 phantom_wqe_cnt;
  225. u32 phantom_cqe_cnt;
  226. u32 next_cq_cons;
  227. bool flushed;
  228. };
  229. struct bnxt_qplib_qp {
  230. struct bnxt_qplib_pd *pd;
  231. struct bnxt_qplib_dpi *dpi;
  232. u64 qp_handle;
  233. #define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
  234. u32 id;
  235. u8 type;
  236. u8 sig_type;
  237. u32 modify_flags;
  238. u8 state;
  239. u8 cur_qp_state;
  240. u32 max_inline_data;
  241. u32 mtu;
  242. u8 path_mtu;
  243. bool en_sqd_async_notify;
  244. u16 pkey_index;
  245. u32 qkey;
  246. u32 dest_qp_id;
  247. u8 access;
  248. u8 timeout;
  249. u8 retry_cnt;
  250. u8 rnr_retry;
  251. u64 wqe_cnt;
  252. u32 min_rnr_timer;
  253. u32 max_rd_atomic;
  254. u32 max_dest_rd_atomic;
  255. u32 dest_qpn;
  256. u8 smac[6];
  257. u16 vlan_id;
  258. u8 nw_type;
  259. struct bnxt_qplib_ah ah;
  260. #define BTH_PSN_MASK ((1 << 24) - 1)
  261. /* SQ */
  262. struct bnxt_qplib_q sq;
  263. /* RQ */
  264. struct bnxt_qplib_q rq;
  265. /* SRQ */
  266. struct bnxt_qplib_srq *srq;
  267. /* CQ */
  268. struct bnxt_qplib_cq *scq;
  269. struct bnxt_qplib_cq *rcq;
  270. /* IRRQ and ORRQ */
  271. struct bnxt_qplib_hwq irrq;
  272. struct bnxt_qplib_hwq orrq;
  273. /* Header buffer for QP1 */
  274. int sq_hdr_buf_size;
  275. int rq_hdr_buf_size;
  276. /*
  277. * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
  278. * and ib_bth + ib_deth (20).
  279. * Max required is 82 when RoCE V2 is enabled
  280. */
  281. #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
  282. /* Ethernet header = 14 */
  283. /* ib_grh = 40 (provided by MAD) */
  284. /* ib_bth + ib_deth = 20 */
  285. /* MAD = 256 (provided by MAD) */
  286. /* iCRC = 4 */
  287. #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
  288. #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
  289. #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
  290. #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
  291. #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
  292. void *sq_hdr_buf;
  293. dma_addr_t sq_hdr_buf_map;
  294. void *rq_hdr_buf;
  295. dma_addr_t rq_hdr_buf_map;
  296. struct list_head sq_flush;
  297. struct list_head rq_flush;
  298. };
  299. #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
  300. #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
  301. #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1)
  302. #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
  303. #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
  304. #define ROCE_CQE_CMP_V 0
  305. #define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \
  306. (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
  307. !((raw_cons) & (cp_bit)))
  308. static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
  309. {
  310. return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
  311. &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
  312. &qplib_q->hwq);
  313. }
  314. struct bnxt_qplib_cqe {
  315. u8 status;
  316. u8 type;
  317. u8 opcode;
  318. u32 length;
  319. u64 wr_id;
  320. union {
  321. __be32 immdata;
  322. u32 invrkey;
  323. };
  324. u64 qp_handle;
  325. u64 mr_handle;
  326. u16 flags;
  327. u8 smac[6];
  328. u32 src_qp;
  329. u16 raweth_qp1_flags;
  330. u16 raweth_qp1_errors;
  331. u16 raweth_qp1_cfa_code;
  332. u32 raweth_qp1_flags2;
  333. u32 raweth_qp1_metadata;
  334. u8 raweth_qp1_payload_offset;
  335. u16 pkey_index;
  336. };
  337. #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
  338. struct bnxt_qplib_cq {
  339. struct bnxt_qplib_dpi *dpi;
  340. void __iomem *dbr_base;
  341. u32 max_wqe;
  342. u32 id;
  343. u16 count;
  344. u16 period;
  345. struct bnxt_qplib_hwq hwq;
  346. u32 cnq_hw_ring_id;
  347. struct bnxt_qplib_nq *nq;
  348. bool resize_in_progress;
  349. struct scatterlist *sghead;
  350. u32 nmap;
  351. u64 cq_handle;
  352. #define CQ_RESIZE_WAIT_TIME_MS 500
  353. unsigned long flags;
  354. #define CQ_FLAGS_RESIZE_IN_PROG 1
  355. wait_queue_head_t waitq;
  356. struct list_head sqf_head, rqf_head;
  357. atomic_t arm_state;
  358. spinlock_t compl_lock; /* synch CQ handlers */
  359. /* Locking Notes:
  360. * QP can move to error state from modify_qp, async error event or error
  361. * CQE as part of poll_cq. When QP is moved to error state, it gets added
  362. * to two flush lists, one each for SQ and RQ.
  363. * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
  364. * flush_locks should be acquired when QP is moved to error. The control path
  365. * operations(modify_qp and async error events) are synchronized with poll_cq
  366. * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
  367. * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
  368. * of the same QP while manipulating the flush list.
  369. */
  370. spinlock_t flush_lock; /* QP flush management */
  371. };
  372. #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
  373. #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
  374. #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2)
  375. #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1)
  376. #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1)
  377. #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1)
  378. #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base)
  379. #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
  380. #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1)
  381. #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
  382. #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
  383. #define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \
  384. (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
  385. !((raw_cons) & (cp_bit)))
  386. #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
  387. #define NQ_CONS_PCI_BAR_REGION 2
  388. #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
  389. #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
  390. #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
  391. #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \
  392. NQ_DB_IDX_VALID)
  393. #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
  394. NQ_DB_IDX_VALID | \
  395. NQ_DB_IRQ_DIS)
  396. #define NQ_DB_REARM(db, raw_cons, cp_bit) \
  397. writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
  398. #define NQ_DB(db, raw_cons, cp_bit) \
  399. writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
  400. struct bnxt_qplib_nq {
  401. struct pci_dev *pdev;
  402. int vector;
  403. cpumask_t mask;
  404. int budget;
  405. bool requested;
  406. struct tasklet_struct worker;
  407. struct bnxt_qplib_hwq hwq;
  408. u16 bar_reg;
  409. u16 bar_reg_off;
  410. u16 ring_id;
  411. void __iomem *bar_reg_iomem;
  412. int (*cqn_handler)(struct bnxt_qplib_nq *nq,
  413. struct bnxt_qplib_cq *cq);
  414. int (*srqn_handler)(struct bnxt_qplib_nq *nq,
  415. struct bnxt_qplib_srq *srq,
  416. u8 event);
  417. struct workqueue_struct *cqn_wq;
  418. char name[32];
  419. };
  420. struct bnxt_qplib_nq_work {
  421. struct work_struct work;
  422. struct bnxt_qplib_nq *nq;
  423. struct bnxt_qplib_cq *cq;
  424. };
  425. void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
  426. int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
  427. int nq_idx, int msix_vector, int bar_reg_offset,
  428. int (*cqn_handler)(struct bnxt_qplib_nq *nq,
  429. struct bnxt_qplib_cq *cq),
  430. int (*srqn_handler)(struct bnxt_qplib_nq *nq,
  431. struct bnxt_qplib_srq *srq,
  432. u8 event));
  433. int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
  434. struct bnxt_qplib_srq *srq);
  435. int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
  436. struct bnxt_qplib_srq *srq);
  437. int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
  438. struct bnxt_qplib_srq *srq);
  439. int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
  440. struct bnxt_qplib_srq *srq);
  441. int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
  442. struct bnxt_qplib_swqe *wqe);
  443. int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
  444. int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
  445. int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
  446. int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
  447. int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
  448. void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
  449. void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
  450. struct bnxt_qplib_qp *qp);
  451. void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
  452. struct bnxt_qplib_sge *sge);
  453. void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
  454. struct bnxt_qplib_sge *sge);
  455. u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
  456. dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp,
  457. u32 index);
  458. void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
  459. int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
  460. struct bnxt_qplib_swqe *wqe);
  461. void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
  462. int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
  463. struct bnxt_qplib_swqe *wqe);
  464. int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
  465. int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
  466. int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
  467. int num, struct bnxt_qplib_qp **qp);
  468. bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
  469. void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
  470. void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
  471. int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
  472. void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
  473. void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
  474. unsigned long *flags);
  475. void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
  476. unsigned long *flags);
  477. int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
  478. struct bnxt_qplib_cqe *cqe,
  479. int num_cqes);
  480. void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
  481. #endif /* __BNXT_QPLIB_FP_H__ */