iw_cxgb4.h 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. * - Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials
  20. * provided with the distribution.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29. * SOFTWARE.
  30. */
  31. #ifndef __IW_CXGB4_H__
  32. #define __IW_CXGB4_H__
  33. #include <linux/mutex.h>
  34. #include <linux/list.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/idr.h>
  37. #include <linux/completion.h>
  38. #include <linux/netdevice.h>
  39. #include <linux/sched.h>
  40. #include <linux/pci.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/inet.h>
  43. #include <linux/wait.h>
  44. #include <linux/kref.h>
  45. #include <linux/timer.h>
  46. #include <linux/io.h>
  47. #include <asm/byteorder.h>
  48. #include <net/net_namespace.h>
  49. #include <rdma/ib_verbs.h>
  50. #include <rdma/iw_cm.h>
  51. #include <rdma/rdma_netlink.h>
  52. #include <rdma/iw_portmap.h>
  53. #include "cxgb4.h"
  54. #include "cxgb4_uld.h"
  55. #include "l2t.h"
  56. #include "user.h"
  57. #define DRV_NAME "iw_cxgb4"
  58. #define MOD DRV_NAME ":"
  59. extern int c4iw_debug;
  60. #define PDBG(fmt, args...) \
  61. do { \
  62. if (c4iw_debug) \
  63. printk(MOD fmt, ## args); \
  64. } while (0)
  65. #include "t4.h"
  66. #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
  67. #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
  68. static inline void *cplhdr(struct sk_buff *skb)
  69. {
  70. return skb->data;
  71. }
  72. #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
  73. #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
  74. struct c4iw_id_table {
  75. u32 flags;
  76. u32 start; /* logical minimal id */
  77. u32 last; /* hint for find */
  78. u32 max;
  79. spinlock_t lock;
  80. unsigned long *table;
  81. };
  82. struct c4iw_resource {
  83. struct c4iw_id_table tpt_table;
  84. struct c4iw_id_table qid_table;
  85. struct c4iw_id_table pdid_table;
  86. };
  87. struct c4iw_qid_list {
  88. struct list_head entry;
  89. u32 qid;
  90. };
  91. struct c4iw_dev_ucontext {
  92. struct list_head qpids;
  93. struct list_head cqids;
  94. struct mutex lock;
  95. };
  96. enum c4iw_rdev_flags {
  97. T4_FATAL_ERROR = (1<<0),
  98. T4_STATUS_PAGE_DISABLED = (1<<1),
  99. };
  100. struct c4iw_stat {
  101. u64 total;
  102. u64 cur;
  103. u64 max;
  104. u64 fail;
  105. };
  106. struct c4iw_stats {
  107. struct mutex lock;
  108. struct c4iw_stat qid;
  109. struct c4iw_stat pd;
  110. struct c4iw_stat stag;
  111. struct c4iw_stat pbl;
  112. struct c4iw_stat rqt;
  113. struct c4iw_stat ocqp;
  114. u64 db_full;
  115. u64 db_empty;
  116. u64 db_drop;
  117. u64 db_state_transitions;
  118. u64 db_fc_interruptions;
  119. u64 tcam_full;
  120. u64 act_ofld_conn_fails;
  121. u64 pas_ofld_conn_fails;
  122. };
  123. struct c4iw_hw_queue {
  124. int t4_eq_status_entries;
  125. int t4_max_eq_size;
  126. int t4_max_iq_size;
  127. int t4_max_rq_size;
  128. int t4_max_sq_size;
  129. int t4_max_qp_depth;
  130. int t4_max_cq_depth;
  131. int t4_stat_len;
  132. };
  133. struct wr_log_entry {
  134. struct timespec post_host_ts;
  135. struct timespec poll_host_ts;
  136. u64 post_sge_ts;
  137. u64 cqe_sge_ts;
  138. u64 poll_sge_ts;
  139. u16 qid;
  140. u16 wr_id;
  141. u8 opcode;
  142. u8 valid;
  143. };
  144. struct c4iw_rdev {
  145. struct c4iw_resource resource;
  146. unsigned long qpshift;
  147. u32 qpmask;
  148. unsigned long cqshift;
  149. u32 cqmask;
  150. struct c4iw_dev_ucontext uctx;
  151. struct gen_pool *pbl_pool;
  152. struct gen_pool *rqt_pool;
  153. struct gen_pool *ocqp_pool;
  154. u32 flags;
  155. struct cxgb4_lld_info lldi;
  156. unsigned long bar2_pa;
  157. void __iomem *bar2_kva;
  158. unsigned long oc_mw_pa;
  159. void __iomem *oc_mw_kva;
  160. struct c4iw_stats stats;
  161. struct c4iw_hw_queue hw_queue;
  162. struct t4_dev_status_page *status_page;
  163. atomic_t wr_log_idx;
  164. struct wr_log_entry *wr_log;
  165. int wr_log_size;
  166. };
  167. static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
  168. {
  169. return rdev->flags & T4_FATAL_ERROR;
  170. }
  171. static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
  172. {
  173. return (int)(rdev->lldi.vr->stag.size >> 5);
  174. }
  175. #define C4IW_WR_TO (30*HZ)
  176. struct c4iw_wr_wait {
  177. struct completion completion;
  178. int ret;
  179. };
  180. static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
  181. {
  182. wr_waitp->ret = 0;
  183. init_completion(&wr_waitp->completion);
  184. }
  185. static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
  186. {
  187. wr_waitp->ret = ret;
  188. complete(&wr_waitp->completion);
  189. }
  190. static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
  191. struct c4iw_wr_wait *wr_waitp,
  192. u32 hwtid, u32 qpid,
  193. const char *func)
  194. {
  195. unsigned to = C4IW_WR_TO;
  196. int ret;
  197. do {
  198. ret = wait_for_completion_timeout(&wr_waitp->completion, to);
  199. if (!ret) {
  200. printk(KERN_ERR MOD "%s - Device %s not responding - "
  201. "tid %u qpid %u\n", func,
  202. pci_name(rdev->lldi.pdev), hwtid, qpid);
  203. if (c4iw_fatal_error(rdev)) {
  204. wr_waitp->ret = -EIO;
  205. break;
  206. }
  207. to = to << 2;
  208. }
  209. } while (!ret);
  210. if (wr_waitp->ret)
  211. PDBG("%s: FW reply %d tid %u qpid %u\n",
  212. pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
  213. return wr_waitp->ret;
  214. }
  215. enum db_state {
  216. NORMAL = 0,
  217. FLOW_CONTROL = 1,
  218. RECOVERY = 2,
  219. STOPPED = 3
  220. };
  221. struct c4iw_dev {
  222. struct ib_device ibdev;
  223. struct c4iw_rdev rdev;
  224. u32 device_cap_flags;
  225. struct idr cqidr;
  226. struct idr qpidr;
  227. struct idr mmidr;
  228. spinlock_t lock;
  229. struct mutex db_mutex;
  230. struct dentry *debugfs_root;
  231. enum db_state db_state;
  232. struct idr hwtid_idr;
  233. struct idr atid_idr;
  234. struct idr stid_idr;
  235. struct list_head db_fc_list;
  236. u32 avail_ird;
  237. };
  238. static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
  239. {
  240. return container_of(ibdev, struct c4iw_dev, ibdev);
  241. }
  242. static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
  243. {
  244. return container_of(rdev, struct c4iw_dev, rdev);
  245. }
  246. static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
  247. {
  248. return idr_find(&rhp->cqidr, cqid);
  249. }
  250. static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
  251. {
  252. return idr_find(&rhp->qpidr, qpid);
  253. }
  254. static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
  255. {
  256. return idr_find(&rhp->mmidr, mmid);
  257. }
  258. static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
  259. void *handle, u32 id, int lock)
  260. {
  261. int ret;
  262. if (lock) {
  263. idr_preload(GFP_KERNEL);
  264. spin_lock_irq(&rhp->lock);
  265. }
  266. ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
  267. if (lock) {
  268. spin_unlock_irq(&rhp->lock);
  269. idr_preload_end();
  270. }
  271. BUG_ON(ret == -ENOSPC);
  272. return ret < 0 ? ret : 0;
  273. }
  274. static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
  275. void *handle, u32 id)
  276. {
  277. return _insert_handle(rhp, idr, handle, id, 1);
  278. }
  279. static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
  280. void *handle, u32 id)
  281. {
  282. return _insert_handle(rhp, idr, handle, id, 0);
  283. }
  284. static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
  285. u32 id, int lock)
  286. {
  287. if (lock)
  288. spin_lock_irq(&rhp->lock);
  289. idr_remove(idr, id);
  290. if (lock)
  291. spin_unlock_irq(&rhp->lock);
  292. }
  293. static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
  294. {
  295. _remove_handle(rhp, idr, id, 1);
  296. }
  297. static inline void remove_handle_nolock(struct c4iw_dev *rhp,
  298. struct idr *idr, u32 id)
  299. {
  300. _remove_handle(rhp, idr, id, 0);
  301. }
  302. extern uint c4iw_max_read_depth;
  303. static inline int cur_max_read_depth(struct c4iw_dev *dev)
  304. {
  305. return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
  306. }
  307. struct c4iw_pd {
  308. struct ib_pd ibpd;
  309. u32 pdid;
  310. struct c4iw_dev *rhp;
  311. };
  312. static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
  313. {
  314. return container_of(ibpd, struct c4iw_pd, ibpd);
  315. }
  316. struct tpt_attributes {
  317. u64 len;
  318. u64 va_fbo;
  319. enum fw_ri_mem_perms perms;
  320. u32 stag;
  321. u32 pdid;
  322. u32 qpid;
  323. u32 pbl_addr;
  324. u32 pbl_size;
  325. u32 state:1;
  326. u32 type:2;
  327. u32 rsvd:1;
  328. u32 remote_invaliate_disable:1;
  329. u32 zbva:1;
  330. u32 mw_bind_enable:1;
  331. u32 page_size:5;
  332. };
  333. struct c4iw_mr {
  334. struct ib_mr ibmr;
  335. struct ib_umem *umem;
  336. struct c4iw_dev *rhp;
  337. u64 kva;
  338. struct tpt_attributes attr;
  339. };
  340. static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
  341. {
  342. return container_of(ibmr, struct c4iw_mr, ibmr);
  343. }
  344. struct c4iw_mw {
  345. struct ib_mw ibmw;
  346. struct c4iw_dev *rhp;
  347. u64 kva;
  348. struct tpt_attributes attr;
  349. };
  350. static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
  351. {
  352. return container_of(ibmw, struct c4iw_mw, ibmw);
  353. }
  354. struct c4iw_fr_page_list {
  355. struct ib_fast_reg_page_list ibpl;
  356. DEFINE_DMA_UNMAP_ADDR(mapping);
  357. dma_addr_t dma_addr;
  358. struct c4iw_dev *dev;
  359. int pll_len;
  360. };
  361. static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
  362. struct ib_fast_reg_page_list *ibpl)
  363. {
  364. return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
  365. }
  366. struct c4iw_cq {
  367. struct ib_cq ibcq;
  368. struct c4iw_dev *rhp;
  369. struct t4_cq cq;
  370. spinlock_t lock;
  371. spinlock_t comp_handler_lock;
  372. atomic_t refcnt;
  373. wait_queue_head_t wait;
  374. };
  375. static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
  376. {
  377. return container_of(ibcq, struct c4iw_cq, ibcq);
  378. }
  379. struct c4iw_mpa_attributes {
  380. u8 initiator;
  381. u8 recv_marker_enabled;
  382. u8 xmit_marker_enabled;
  383. u8 crc_enabled;
  384. u8 enhanced_rdma_conn;
  385. u8 version;
  386. u8 p2p_type;
  387. };
  388. struct c4iw_qp_attributes {
  389. u32 scq;
  390. u32 rcq;
  391. u32 sq_num_entries;
  392. u32 rq_num_entries;
  393. u32 sq_max_sges;
  394. u32 sq_max_sges_rdma_write;
  395. u32 rq_max_sges;
  396. u32 state;
  397. u8 enable_rdma_read;
  398. u8 enable_rdma_write;
  399. u8 enable_bind;
  400. u8 enable_mmid0_fastreg;
  401. u32 max_ord;
  402. u32 max_ird;
  403. u32 pd;
  404. u32 next_state;
  405. char terminate_buffer[52];
  406. u32 terminate_msg_len;
  407. u8 is_terminate_local;
  408. struct c4iw_mpa_attributes mpa_attr;
  409. struct c4iw_ep *llp_stream_handle;
  410. u8 layer_etype;
  411. u8 ecode;
  412. u16 sq_db_inc;
  413. u16 rq_db_inc;
  414. u8 send_term;
  415. };
  416. struct c4iw_qp {
  417. struct ib_qp ibqp;
  418. struct list_head db_fc_entry;
  419. struct c4iw_dev *rhp;
  420. struct c4iw_ep *ep;
  421. struct c4iw_qp_attributes attr;
  422. struct t4_wq wq;
  423. spinlock_t lock;
  424. struct mutex mutex;
  425. atomic_t refcnt;
  426. wait_queue_head_t wait;
  427. struct timer_list timer;
  428. int sq_sig_all;
  429. };
  430. static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
  431. {
  432. return container_of(ibqp, struct c4iw_qp, ibqp);
  433. }
  434. struct c4iw_ucontext {
  435. struct ib_ucontext ibucontext;
  436. struct c4iw_dev_ucontext uctx;
  437. u32 key;
  438. spinlock_t mmap_lock;
  439. struct list_head mmaps;
  440. };
  441. static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
  442. {
  443. return container_of(c, struct c4iw_ucontext, ibucontext);
  444. }
  445. struct c4iw_mm_entry {
  446. struct list_head entry;
  447. u64 addr;
  448. u32 key;
  449. unsigned len;
  450. };
  451. static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
  452. u32 key, unsigned len)
  453. {
  454. struct list_head *pos, *nxt;
  455. struct c4iw_mm_entry *mm;
  456. spin_lock(&ucontext->mmap_lock);
  457. list_for_each_safe(pos, nxt, &ucontext->mmaps) {
  458. mm = list_entry(pos, struct c4iw_mm_entry, entry);
  459. if (mm->key == key && mm->len == len) {
  460. list_del_init(&mm->entry);
  461. spin_unlock(&ucontext->mmap_lock);
  462. PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
  463. key, (unsigned long long) mm->addr, mm->len);
  464. return mm;
  465. }
  466. }
  467. spin_unlock(&ucontext->mmap_lock);
  468. return NULL;
  469. }
  470. static inline void insert_mmap(struct c4iw_ucontext *ucontext,
  471. struct c4iw_mm_entry *mm)
  472. {
  473. spin_lock(&ucontext->mmap_lock);
  474. PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
  475. mm->key, (unsigned long long) mm->addr, mm->len);
  476. list_add_tail(&mm->entry, &ucontext->mmaps);
  477. spin_unlock(&ucontext->mmap_lock);
  478. }
  479. enum c4iw_qp_attr_mask {
  480. C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
  481. C4IW_QP_ATTR_SQ_DB = 1<<1,
  482. C4IW_QP_ATTR_RQ_DB = 1<<2,
  483. C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
  484. C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
  485. C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
  486. C4IW_QP_ATTR_MAX_ORD = 1 << 11,
  487. C4IW_QP_ATTR_MAX_IRD = 1 << 12,
  488. C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
  489. C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
  490. C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
  491. C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
  492. C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
  493. C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
  494. C4IW_QP_ATTR_MAX_ORD |
  495. C4IW_QP_ATTR_MAX_IRD |
  496. C4IW_QP_ATTR_LLP_STREAM_HANDLE |
  497. C4IW_QP_ATTR_STREAM_MSG_BUFFER |
  498. C4IW_QP_ATTR_MPA_ATTR |
  499. C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
  500. };
  501. int c4iw_modify_qp(struct c4iw_dev *rhp,
  502. struct c4iw_qp *qhp,
  503. enum c4iw_qp_attr_mask mask,
  504. struct c4iw_qp_attributes *attrs,
  505. int internal);
  506. enum c4iw_qp_state {
  507. C4IW_QP_STATE_IDLE,
  508. C4IW_QP_STATE_RTS,
  509. C4IW_QP_STATE_ERROR,
  510. C4IW_QP_STATE_TERMINATE,
  511. C4IW_QP_STATE_CLOSING,
  512. C4IW_QP_STATE_TOT
  513. };
  514. static inline int c4iw_convert_state(enum ib_qp_state ib_state)
  515. {
  516. switch (ib_state) {
  517. case IB_QPS_RESET:
  518. case IB_QPS_INIT:
  519. return C4IW_QP_STATE_IDLE;
  520. case IB_QPS_RTS:
  521. return C4IW_QP_STATE_RTS;
  522. case IB_QPS_SQD:
  523. return C4IW_QP_STATE_CLOSING;
  524. case IB_QPS_SQE:
  525. return C4IW_QP_STATE_TERMINATE;
  526. case IB_QPS_ERR:
  527. return C4IW_QP_STATE_ERROR;
  528. default:
  529. return -1;
  530. }
  531. }
  532. static inline int to_ib_qp_state(int c4iw_qp_state)
  533. {
  534. switch (c4iw_qp_state) {
  535. case C4IW_QP_STATE_IDLE:
  536. return IB_QPS_INIT;
  537. case C4IW_QP_STATE_RTS:
  538. return IB_QPS_RTS;
  539. case C4IW_QP_STATE_CLOSING:
  540. return IB_QPS_SQD;
  541. case C4IW_QP_STATE_TERMINATE:
  542. return IB_QPS_SQE;
  543. case C4IW_QP_STATE_ERROR:
  544. return IB_QPS_ERR;
  545. }
  546. return IB_QPS_ERR;
  547. }
  548. static inline u32 c4iw_ib_to_tpt_access(int a)
  549. {
  550. return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
  551. (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
  552. (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
  553. FW_RI_MEM_ACCESS_LOCAL_READ;
  554. }
  555. static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
  556. {
  557. return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
  558. (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
  559. }
  560. enum c4iw_mmid_state {
  561. C4IW_STAG_STATE_VALID,
  562. C4IW_STAG_STATE_INVALID
  563. };
  564. #define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
  565. #define MPA_KEY_REQ "MPA ID Req Frame"
  566. #define MPA_KEY_REP "MPA ID Rep Frame"
  567. #define MPA_MAX_PRIVATE_DATA 256
  568. #define MPA_ENHANCED_RDMA_CONN 0x10
  569. #define MPA_REJECT 0x20
  570. #define MPA_CRC 0x40
  571. #define MPA_MARKERS 0x80
  572. #define MPA_FLAGS_MASK 0xE0
  573. #define MPA_V2_PEER2PEER_MODEL 0x8000
  574. #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
  575. #define MPA_V2_RDMA_WRITE_RTR 0x8000
  576. #define MPA_V2_RDMA_READ_RTR 0x4000
  577. #define MPA_V2_IRD_ORD_MASK 0x3FFF
  578. #define c4iw_put_ep(ep) { \
  579. PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
  580. ep, atomic_read(&((ep)->kref.refcount))); \
  581. WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
  582. kref_put(&((ep)->kref), _c4iw_free_ep); \
  583. }
  584. #define c4iw_get_ep(ep) { \
  585. PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
  586. ep, atomic_read(&((ep)->kref.refcount))); \
  587. kref_get(&((ep)->kref)); \
  588. }
  589. void _c4iw_free_ep(struct kref *kref);
  590. struct mpa_message {
  591. u8 key[16];
  592. u8 flags;
  593. u8 revision;
  594. __be16 private_data_size;
  595. u8 private_data[0];
  596. };
  597. struct mpa_v2_conn_params {
  598. __be16 ird;
  599. __be16 ord;
  600. };
  601. struct terminate_message {
  602. u8 layer_etype;
  603. u8 ecode;
  604. __be16 hdrct_rsvd;
  605. u8 len_hdrs[0];
  606. };
  607. #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
  608. enum c4iw_layers_types {
  609. LAYER_RDMAP = 0x00,
  610. LAYER_DDP = 0x10,
  611. LAYER_MPA = 0x20,
  612. RDMAP_LOCAL_CATA = 0x00,
  613. RDMAP_REMOTE_PROT = 0x01,
  614. RDMAP_REMOTE_OP = 0x02,
  615. DDP_LOCAL_CATA = 0x00,
  616. DDP_TAGGED_ERR = 0x01,
  617. DDP_UNTAGGED_ERR = 0x02,
  618. DDP_LLP = 0x03
  619. };
  620. enum c4iw_rdma_ecodes {
  621. RDMAP_INV_STAG = 0x00,
  622. RDMAP_BASE_BOUNDS = 0x01,
  623. RDMAP_ACC_VIOL = 0x02,
  624. RDMAP_STAG_NOT_ASSOC = 0x03,
  625. RDMAP_TO_WRAP = 0x04,
  626. RDMAP_INV_VERS = 0x05,
  627. RDMAP_INV_OPCODE = 0x06,
  628. RDMAP_STREAM_CATA = 0x07,
  629. RDMAP_GLOBAL_CATA = 0x08,
  630. RDMAP_CANT_INV_STAG = 0x09,
  631. RDMAP_UNSPECIFIED = 0xff
  632. };
  633. enum c4iw_ddp_ecodes {
  634. DDPT_INV_STAG = 0x00,
  635. DDPT_BASE_BOUNDS = 0x01,
  636. DDPT_STAG_NOT_ASSOC = 0x02,
  637. DDPT_TO_WRAP = 0x03,
  638. DDPT_INV_VERS = 0x04,
  639. DDPU_INV_QN = 0x01,
  640. DDPU_INV_MSN_NOBUF = 0x02,
  641. DDPU_INV_MSN_RANGE = 0x03,
  642. DDPU_INV_MO = 0x04,
  643. DDPU_MSG_TOOBIG = 0x05,
  644. DDPU_INV_VERS = 0x06
  645. };
  646. enum c4iw_mpa_ecodes {
  647. MPA_CRC_ERR = 0x02,
  648. MPA_MARKER_ERR = 0x03,
  649. MPA_LOCAL_CATA = 0x05,
  650. MPA_INSUFF_IRD = 0x06,
  651. MPA_NOMATCH_RTR = 0x07,
  652. };
  653. enum c4iw_ep_state {
  654. IDLE = 0,
  655. LISTEN,
  656. CONNECTING,
  657. MPA_REQ_WAIT,
  658. MPA_REQ_SENT,
  659. MPA_REQ_RCVD,
  660. MPA_REP_SENT,
  661. FPDU_MODE,
  662. ABORTING,
  663. CLOSING,
  664. MORIBUND,
  665. DEAD,
  666. };
  667. enum c4iw_ep_flags {
  668. PEER_ABORT_IN_PROGRESS = 0,
  669. ABORT_REQ_IN_PROGRESS = 1,
  670. RELEASE_RESOURCES = 2,
  671. CLOSE_SENT = 3,
  672. TIMEOUT = 4,
  673. QP_REFERENCED = 5,
  674. RELEASE_MAPINFO = 6,
  675. };
  676. enum c4iw_ep_history {
  677. ACT_OPEN_REQ = 0,
  678. ACT_OFLD_CONN = 1,
  679. ACT_OPEN_RPL = 2,
  680. ACT_ESTAB = 3,
  681. PASS_ACCEPT_REQ = 4,
  682. PASS_ESTAB = 5,
  683. ABORT_UPCALL = 6,
  684. ESTAB_UPCALL = 7,
  685. CLOSE_UPCALL = 8,
  686. ULP_ACCEPT = 9,
  687. ULP_REJECT = 10,
  688. TIMEDOUT = 11,
  689. PEER_ABORT = 12,
  690. PEER_CLOSE = 13,
  691. CONNREQ_UPCALL = 14,
  692. ABORT_CONN = 15,
  693. DISCONN_UPCALL = 16,
  694. EP_DISC_CLOSE = 17,
  695. EP_DISC_ABORT = 18,
  696. CONN_RPL_UPCALL = 19,
  697. ACT_RETRY_NOMEM = 20,
  698. ACT_RETRY_INUSE = 21
  699. };
  700. struct c4iw_ep_common {
  701. struct iw_cm_id *cm_id;
  702. struct c4iw_qp *qp;
  703. struct c4iw_dev *dev;
  704. enum c4iw_ep_state state;
  705. struct kref kref;
  706. struct mutex mutex;
  707. struct sockaddr_storage local_addr;
  708. struct sockaddr_storage remote_addr;
  709. struct sockaddr_storage mapped_local_addr;
  710. struct sockaddr_storage mapped_remote_addr;
  711. struct c4iw_wr_wait wr_wait;
  712. unsigned long flags;
  713. unsigned long history;
  714. };
  715. struct c4iw_listen_ep {
  716. struct c4iw_ep_common com;
  717. unsigned int stid;
  718. int backlog;
  719. };
  720. struct c4iw_ep {
  721. struct c4iw_ep_common com;
  722. struct c4iw_ep *parent_ep;
  723. struct timer_list timer;
  724. struct list_head entry;
  725. unsigned int atid;
  726. u32 hwtid;
  727. u32 snd_seq;
  728. u32 rcv_seq;
  729. struct l2t_entry *l2t;
  730. struct dst_entry *dst;
  731. struct sk_buff *mpa_skb;
  732. struct c4iw_mpa_attributes mpa_attr;
  733. u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
  734. unsigned int mpa_pkt_len;
  735. u32 ird;
  736. u32 ord;
  737. u32 smac_idx;
  738. u32 tx_chan;
  739. u32 mtu;
  740. u16 mss;
  741. u16 emss;
  742. u16 plen;
  743. u16 rss_qid;
  744. u16 txq_idx;
  745. u16 ctrlq_idx;
  746. u8 tos;
  747. u8 retry_with_mpa_v1;
  748. u8 tried_with_mpa_v1;
  749. unsigned int retry_count;
  750. int snd_win;
  751. int rcv_win;
  752. };
  753. static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
  754. const char *msg)
  755. {
  756. #define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr))
  757. #define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port)
  758. #define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr))
  759. #define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port)
  760. if (c4iw_debug) {
  761. switch (epc->local_addr.ss_family) {
  762. case AF_INET:
  763. PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n",
  764. func, msg, SINA(&epc->local_addr),
  765. SINP(&epc->local_addr),
  766. SINP(&epc->mapped_local_addr),
  767. SINA(&epc->remote_addr),
  768. SINP(&epc->remote_addr),
  769. SINP(&epc->mapped_remote_addr));
  770. break;
  771. case AF_INET6:
  772. PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n",
  773. func, msg, SIN6A(&epc->local_addr),
  774. SIN6P(&epc->local_addr),
  775. SIN6P(&epc->mapped_local_addr),
  776. SIN6A(&epc->remote_addr),
  777. SIN6P(&epc->remote_addr),
  778. SIN6P(&epc->mapped_remote_addr));
  779. break;
  780. default:
  781. break;
  782. }
  783. }
  784. #undef SINA
  785. #undef SINP
  786. #undef SIN6A
  787. #undef SIN6P
  788. }
  789. static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
  790. {
  791. return cm_id->provider_data;
  792. }
  793. static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
  794. {
  795. return cm_id->provider_data;
  796. }
  797. static inline int compute_wscale(int win)
  798. {
  799. int wscale = 0;
  800. while (wscale < 14 && (65535<<wscale) < win)
  801. wscale++;
  802. return wscale;
  803. }
  804. static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
  805. {
  806. #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
  807. return infop->vr->ocq.size > 0;
  808. #else
  809. return 0;
  810. #endif
  811. }
  812. u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
  813. void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
  814. int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
  815. u32 reserved, u32 flags);
  816. void c4iw_id_table_free(struct c4iw_id_table *alloc);
  817. typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
  818. int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
  819. struct l2t_entry *l2t);
  820. void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
  821. struct c4iw_dev_ucontext *uctx);
  822. u32 c4iw_get_resource(struct c4iw_id_table *id_table);
  823. void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
  824. int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
  825. int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
  826. int c4iw_pblpool_create(struct c4iw_rdev *rdev);
  827. int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
  828. int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
  829. void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
  830. void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
  831. void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
  832. void c4iw_destroy_resource(struct c4iw_resource *rscp);
  833. int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
  834. int c4iw_register_device(struct c4iw_dev *dev);
  835. void c4iw_unregister_device(struct c4iw_dev *dev);
  836. int __init c4iw_cm_init(void);
  837. void c4iw_cm_term(void);
  838. void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
  839. struct c4iw_dev_ucontext *uctx);
  840. void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
  841. struct c4iw_dev_ucontext *uctx);
  842. int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
  843. int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  844. struct ib_send_wr **bad_wr);
  845. int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  846. struct ib_recv_wr **bad_wr);
  847. int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
  848. struct ib_mw_bind *mw_bind);
  849. int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
  850. int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
  851. int c4iw_destroy_listen(struct iw_cm_id *cm_id);
  852. int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
  853. int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
  854. void c4iw_qp_add_ref(struct ib_qp *qp);
  855. void c4iw_qp_rem_ref(struct ib_qp *qp);
  856. void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
  857. struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
  858. struct ib_device *device,
  859. int page_list_len);
  860. struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
  861. int c4iw_dealloc_mw(struct ib_mw *mw);
  862. struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
  863. struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
  864. u64 length, u64 virt, int acc,
  865. struct ib_udata *udata);
  866. struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
  867. struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
  868. struct ib_phys_buf *buffer_list,
  869. int num_phys_buf,
  870. int acc,
  871. u64 *iova_start);
  872. int c4iw_reregister_phys_mem(struct ib_mr *mr,
  873. int mr_rereg_mask,
  874. struct ib_pd *pd,
  875. struct ib_phys_buf *buffer_list,
  876. int num_phys_buf,
  877. int acc, u64 *iova_start);
  878. int c4iw_dereg_mr(struct ib_mr *ib_mr);
  879. int c4iw_destroy_cq(struct ib_cq *ib_cq);
  880. struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
  881. int vector,
  882. struct ib_ucontext *ib_context,
  883. struct ib_udata *udata);
  884. int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
  885. int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
  886. int c4iw_destroy_qp(struct ib_qp *ib_qp);
  887. struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
  888. struct ib_qp_init_attr *attrs,
  889. struct ib_udata *udata);
  890. int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  891. int attr_mask, struct ib_udata *udata);
  892. int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  893. int attr_mask, struct ib_qp_init_attr *init_attr);
  894. struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
  895. u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
  896. void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
  897. u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
  898. void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
  899. u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
  900. void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
  901. int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
  902. void c4iw_flush_hw_cq(struct c4iw_cq *chp);
  903. void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
  904. int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
  905. int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
  906. int c4iw_flush_sq(struct c4iw_qp *qhp);
  907. int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
  908. u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
  909. int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
  910. u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
  911. void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
  912. struct c4iw_dev_ucontext *uctx);
  913. u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
  914. void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
  915. struct c4iw_dev_ucontext *uctx);
  916. void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
  917. extern struct cxgb4_client t4c_client;
  918. extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
  919. extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
  920. extern int c4iw_wr_log;
  921. extern int db_fc_threshold;
  922. extern int db_coalescing_threshold;
  923. extern int use_dsgl;
  924. #endif