main.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. /* QLogic qedr NIC Driver
  2. * Copyright (c) 2015-2016 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <rdma/ib_verbs.h>
  34. #include <rdma/ib_addr.h>
  35. #include <rdma/ib_user_verbs.h>
  36. #include <rdma/iw_cm.h>
  37. #include <rdma/ib_mad.h>
  38. #include <linux/netdevice.h>
  39. #include <linux/iommu.h>
  40. #include <linux/pci.h>
  41. #include <net/addrconf.h>
  42. #include <linux/idr.h>
  43. #include <linux/qed/qed_chain.h>
  44. #include <linux/qed/qed_if.h>
  45. #include "qedr.h"
  46. #include "verbs.h"
  47. #include <rdma/qedr-abi.h>
  48. #include "qedr_iw_cm.h"
  49. MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
  50. MODULE_AUTHOR("QLogic Corporation");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. #define QEDR_WQ_MULTIPLIER_DFT (3)
  53. static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
  54. enum ib_event_type type)
  55. {
  56. struct ib_event ibev;
  57. ibev.device = &dev->ibdev;
  58. ibev.element.port_num = port_num;
  59. ibev.event = type;
  60. ib_dispatch_event(&ibev);
  61. }
  62. static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
  63. u8 port_num)
  64. {
  65. return IB_LINK_LAYER_ETHERNET;
  66. }
  67. static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
  68. {
  69. struct qedr_dev *qedr = get_qedr_dev(ibdev);
  70. u32 fw_ver = (u32)qedr->attr.fw_ver;
  71. snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
  72. (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
  73. (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
  74. }
  75. static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
  76. {
  77. struct qedr_dev *qdev;
  78. qdev = get_qedr_dev(dev);
  79. dev_hold(qdev->ndev);
  80. /* The HW vendor's device driver must guarantee
  81. * that this function returns NULL before the net device has finished
  82. * NETDEV_UNREGISTER state.
  83. */
  84. return qdev->ndev;
  85. }
  86. static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
  87. struct ib_port_immutable *immutable)
  88. {
  89. struct ib_port_attr attr;
  90. int err;
  91. err = qedr_query_port(ibdev, port_num, &attr);
  92. if (err)
  93. return err;
  94. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  95. immutable->gid_tbl_len = attr.gid_tbl_len;
  96. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
  97. RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  98. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  99. return 0;
  100. }
  101. static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
  102. struct ib_port_immutable *immutable)
  103. {
  104. struct ib_port_attr attr;
  105. int err;
  106. err = qedr_query_port(ibdev, port_num, &attr);
  107. if (err)
  108. return err;
  109. immutable->pkey_tbl_len = 1;
  110. immutable->gid_tbl_len = 1;
  111. immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
  112. immutable->max_mad_size = 0;
  113. return 0;
  114. }
  115. static int qedr_iw_register_device(struct qedr_dev *dev)
  116. {
  117. dev->ibdev.node_type = RDMA_NODE_RNIC;
  118. dev->ibdev.query_gid = qedr_iw_query_gid;
  119. dev->ibdev.get_port_immutable = qedr_iw_port_immutable;
  120. dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
  121. if (!dev->ibdev.iwcm)
  122. return -ENOMEM;
  123. dev->ibdev.iwcm->connect = qedr_iw_connect;
  124. dev->ibdev.iwcm->accept = qedr_iw_accept;
  125. dev->ibdev.iwcm->reject = qedr_iw_reject;
  126. dev->ibdev.iwcm->create_listen = qedr_iw_create_listen;
  127. dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen;
  128. dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref;
  129. dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref;
  130. dev->ibdev.iwcm->get_qp = qedr_iw_get_qp;
  131. memcpy(dev->ibdev.iwcm->ifname,
  132. dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname));
  133. return 0;
  134. }
  135. static void qedr_roce_register_device(struct qedr_dev *dev)
  136. {
  137. dev->ibdev.node_type = RDMA_NODE_IB_CA;
  138. dev->ibdev.get_port_immutable = qedr_roce_port_immutable;
  139. }
  140. static int qedr_register_device(struct qedr_dev *dev)
  141. {
  142. int rc;
  143. strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
  144. dev->ibdev.node_guid = dev->attr.node_guid;
  145. memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
  146. dev->ibdev.owner = THIS_MODULE;
  147. dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
  148. dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
  149. QEDR_UVERBS(QUERY_DEVICE) |
  150. QEDR_UVERBS(QUERY_PORT) |
  151. QEDR_UVERBS(ALLOC_PD) |
  152. QEDR_UVERBS(DEALLOC_PD) |
  153. QEDR_UVERBS(CREATE_COMP_CHANNEL) |
  154. QEDR_UVERBS(CREATE_CQ) |
  155. QEDR_UVERBS(RESIZE_CQ) |
  156. QEDR_UVERBS(DESTROY_CQ) |
  157. QEDR_UVERBS(REQ_NOTIFY_CQ) |
  158. QEDR_UVERBS(CREATE_QP) |
  159. QEDR_UVERBS(MODIFY_QP) |
  160. QEDR_UVERBS(QUERY_QP) |
  161. QEDR_UVERBS(DESTROY_QP) |
  162. QEDR_UVERBS(REG_MR) |
  163. QEDR_UVERBS(DEREG_MR) |
  164. QEDR_UVERBS(POLL_CQ) |
  165. QEDR_UVERBS(POST_SEND) |
  166. QEDR_UVERBS(POST_RECV);
  167. if (IS_IWARP(dev)) {
  168. rc = qedr_iw_register_device(dev);
  169. if (rc)
  170. return rc;
  171. } else {
  172. qedr_roce_register_device(dev);
  173. }
  174. dev->ibdev.phys_port_cnt = 1;
  175. dev->ibdev.num_comp_vectors = dev->num_cnq;
  176. dev->ibdev.query_device = qedr_query_device;
  177. dev->ibdev.query_port = qedr_query_port;
  178. dev->ibdev.modify_port = qedr_modify_port;
  179. dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
  180. dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
  181. dev->ibdev.mmap = qedr_mmap;
  182. dev->ibdev.alloc_pd = qedr_alloc_pd;
  183. dev->ibdev.dealloc_pd = qedr_dealloc_pd;
  184. dev->ibdev.create_cq = qedr_create_cq;
  185. dev->ibdev.destroy_cq = qedr_destroy_cq;
  186. dev->ibdev.resize_cq = qedr_resize_cq;
  187. dev->ibdev.req_notify_cq = qedr_arm_cq;
  188. dev->ibdev.create_qp = qedr_create_qp;
  189. dev->ibdev.modify_qp = qedr_modify_qp;
  190. dev->ibdev.query_qp = qedr_query_qp;
  191. dev->ibdev.destroy_qp = qedr_destroy_qp;
  192. dev->ibdev.query_pkey = qedr_query_pkey;
  193. dev->ibdev.create_ah = qedr_create_ah;
  194. dev->ibdev.destroy_ah = qedr_destroy_ah;
  195. dev->ibdev.get_dma_mr = qedr_get_dma_mr;
  196. dev->ibdev.dereg_mr = qedr_dereg_mr;
  197. dev->ibdev.reg_user_mr = qedr_reg_user_mr;
  198. dev->ibdev.alloc_mr = qedr_alloc_mr;
  199. dev->ibdev.map_mr_sg = qedr_map_mr_sg;
  200. dev->ibdev.poll_cq = qedr_poll_cq;
  201. dev->ibdev.post_send = qedr_post_send;
  202. dev->ibdev.post_recv = qedr_post_recv;
  203. dev->ibdev.process_mad = qedr_process_mad;
  204. dev->ibdev.get_netdev = qedr_get_netdev;
  205. dev->ibdev.dev.parent = &dev->pdev->dev;
  206. dev->ibdev.get_link_layer = qedr_link_layer;
  207. dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
  208. dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
  209. return ib_register_device(&dev->ibdev, NULL);
  210. }
  211. /* This function allocates fast-path status block memory */
  212. static int qedr_alloc_mem_sb(struct qedr_dev *dev,
  213. struct qed_sb_info *sb_info, u16 sb_id)
  214. {
  215. struct status_block_e4 *sb_virt;
  216. dma_addr_t sb_phys;
  217. int rc;
  218. sb_virt = dma_alloc_coherent(&dev->pdev->dev,
  219. sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
  220. if (!sb_virt)
  221. return -ENOMEM;
  222. rc = dev->ops->common->sb_init(dev->cdev, sb_info,
  223. sb_virt, sb_phys, sb_id,
  224. QED_SB_TYPE_CNQ);
  225. if (rc) {
  226. pr_err("Status block initialization failed\n");
  227. dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
  228. sb_virt, sb_phys);
  229. return rc;
  230. }
  231. return 0;
  232. }
  233. static void qedr_free_mem_sb(struct qedr_dev *dev,
  234. struct qed_sb_info *sb_info, int sb_id)
  235. {
  236. if (sb_info->sb_virt) {
  237. dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
  238. dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
  239. (void *)sb_info->sb_virt, sb_info->sb_phys);
  240. }
  241. }
  242. static void qedr_free_resources(struct qedr_dev *dev)
  243. {
  244. int i;
  245. if (IS_IWARP(dev))
  246. destroy_workqueue(dev->iwarp_wq);
  247. for (i = 0; i < dev->num_cnq; i++) {
  248. qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
  249. dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
  250. }
  251. kfree(dev->cnq_array);
  252. kfree(dev->sb_array);
  253. kfree(dev->sgid_tbl);
  254. }
  255. static int qedr_alloc_resources(struct qedr_dev *dev)
  256. {
  257. struct qedr_cnq *cnq;
  258. __le16 *cons_pi;
  259. u16 n_entries;
  260. int i, rc;
  261. dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
  262. QEDR_MAX_SGID, GFP_KERNEL);
  263. if (!dev->sgid_tbl)
  264. return -ENOMEM;
  265. spin_lock_init(&dev->sgid_lock);
  266. if (IS_IWARP(dev)) {
  267. spin_lock_init(&dev->idr_lock);
  268. idr_init(&dev->qpidr);
  269. dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
  270. }
  271. /* Allocate Status blocks for CNQ */
  272. dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
  273. GFP_KERNEL);
  274. if (!dev->sb_array) {
  275. rc = -ENOMEM;
  276. goto err1;
  277. }
  278. dev->cnq_array = kcalloc(dev->num_cnq,
  279. sizeof(*dev->cnq_array), GFP_KERNEL);
  280. if (!dev->cnq_array) {
  281. rc = -ENOMEM;
  282. goto err2;
  283. }
  284. dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
  285. /* Allocate CNQ PBLs */
  286. n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
  287. for (i = 0; i < dev->num_cnq; i++) {
  288. cnq = &dev->cnq_array[i];
  289. rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
  290. dev->sb_start + i);
  291. if (rc)
  292. goto err3;
  293. rc = dev->ops->common->chain_alloc(dev->cdev,
  294. QED_CHAIN_USE_TO_CONSUME,
  295. QED_CHAIN_MODE_PBL,
  296. QED_CHAIN_CNT_TYPE_U16,
  297. n_entries,
  298. sizeof(struct regpair *),
  299. &cnq->pbl, NULL);
  300. if (rc)
  301. goto err4;
  302. cnq->dev = dev;
  303. cnq->sb = &dev->sb_array[i];
  304. cons_pi = dev->sb_array[i].sb_virt->pi_array;
  305. cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
  306. cnq->index = i;
  307. sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
  308. DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
  309. i, qed_chain_get_cons_idx(&cnq->pbl));
  310. }
  311. return 0;
  312. err4:
  313. qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
  314. err3:
  315. for (--i; i >= 0; i--) {
  316. dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
  317. qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
  318. }
  319. kfree(dev->cnq_array);
  320. err2:
  321. kfree(dev->sb_array);
  322. err1:
  323. kfree(dev->sgid_tbl);
  324. return rc;
  325. }
  326. /* QEDR sysfs interface */
  327. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  328. char *buf)
  329. {
  330. struct qedr_dev *dev = dev_get_drvdata(device);
  331. return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
  332. }
  333. static ssize_t show_hca_type(struct device *device,
  334. struct device_attribute *attr, char *buf)
  335. {
  336. return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
  337. }
  338. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  339. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
  340. static struct device_attribute *qedr_attributes[] = {
  341. &dev_attr_hw_rev,
  342. &dev_attr_hca_type
  343. };
  344. static void qedr_remove_sysfiles(struct qedr_dev *dev)
  345. {
  346. int i;
  347. for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
  348. device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
  349. }
  350. static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
  351. {
  352. int rc = pci_enable_atomic_ops_to_root(pdev,
  353. PCI_EXP_DEVCAP2_ATOMIC_COMP64);
  354. if (rc) {
  355. dev->atomic_cap = IB_ATOMIC_NONE;
  356. DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
  357. } else {
  358. dev->atomic_cap = IB_ATOMIC_GLOB;
  359. DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
  360. }
  361. }
  362. static const struct qed_rdma_ops *qed_ops;
  363. #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
  364. static irqreturn_t qedr_irq_handler(int irq, void *handle)
  365. {
  366. u16 hw_comp_cons, sw_comp_cons;
  367. struct qedr_cnq *cnq = handle;
  368. struct regpair *cq_handle;
  369. struct qedr_cq *cq;
  370. qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
  371. qed_sb_update_sb_idx(cnq->sb);
  372. hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
  373. sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
  374. /* Align protocol-index and chain reads */
  375. rmb();
  376. while (sw_comp_cons != hw_comp_cons) {
  377. cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
  378. cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
  379. cq_handle->lo);
  380. if (cq == NULL) {
  381. DP_ERR(cnq->dev,
  382. "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
  383. cq_handle->hi, cq_handle->lo, sw_comp_cons,
  384. hw_comp_cons);
  385. break;
  386. }
  387. if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
  388. DP_ERR(cnq->dev,
  389. "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
  390. cq_handle->hi, cq_handle->lo, cq);
  391. break;
  392. }
  393. cq->arm_flags = 0;
  394. if (!cq->destroyed && cq->ibcq.comp_handler)
  395. (*cq->ibcq.comp_handler)
  396. (&cq->ibcq, cq->ibcq.cq_context);
  397. /* The CQ's CNQ notification counter is checked before
  398. * destroying the CQ in a busy-wait loop that waits for all of
  399. * the CQ's CNQ interrupts to be processed. It is increased
  400. * here, only after the completion handler, to ensure that the
  401. * the handler is not running when the CQ is destroyed.
  402. */
  403. cq->cnq_notif++;
  404. sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
  405. cnq->n_comp++;
  406. }
  407. qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
  408. sw_comp_cons);
  409. qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
  410. return IRQ_HANDLED;
  411. }
  412. static void qedr_sync_free_irqs(struct qedr_dev *dev)
  413. {
  414. u32 vector;
  415. int i;
  416. for (i = 0; i < dev->int_info.used_cnt; i++) {
  417. if (dev->int_info.msix_cnt) {
  418. vector = dev->int_info.msix[i * dev->num_hwfns].vector;
  419. synchronize_irq(vector);
  420. free_irq(vector, &dev->cnq_array[i]);
  421. }
  422. }
  423. dev->int_info.used_cnt = 0;
  424. }
  425. static int qedr_req_msix_irqs(struct qedr_dev *dev)
  426. {
  427. int i, rc = 0;
  428. if (dev->num_cnq > dev->int_info.msix_cnt) {
  429. DP_ERR(dev,
  430. "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
  431. dev->num_cnq, dev->int_info.msix_cnt);
  432. return -EINVAL;
  433. }
  434. for (i = 0; i < dev->num_cnq; i++) {
  435. rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
  436. qedr_irq_handler, 0, dev->cnq_array[i].name,
  437. &dev->cnq_array[i]);
  438. if (rc) {
  439. DP_ERR(dev, "Request cnq %d irq failed\n", i);
  440. qedr_sync_free_irqs(dev);
  441. } else {
  442. DP_DEBUG(dev, QEDR_MSG_INIT,
  443. "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
  444. dev->cnq_array[i].name, i,
  445. &dev->cnq_array[i]);
  446. dev->int_info.used_cnt++;
  447. }
  448. }
  449. return rc;
  450. }
  451. static int qedr_setup_irqs(struct qedr_dev *dev)
  452. {
  453. int rc;
  454. DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
  455. /* Learn Interrupt configuration */
  456. rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
  457. if (rc < 0)
  458. return rc;
  459. rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
  460. if (rc) {
  461. DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
  462. return rc;
  463. }
  464. if (dev->int_info.msix_cnt) {
  465. DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
  466. dev->int_info.msix_cnt);
  467. rc = qedr_req_msix_irqs(dev);
  468. if (rc)
  469. return rc;
  470. }
  471. DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
  472. return 0;
  473. }
  474. static int qedr_set_device_attr(struct qedr_dev *dev)
  475. {
  476. struct qed_rdma_device *qed_attr;
  477. struct qedr_device_attr *attr;
  478. u32 page_size;
  479. /* Part 1 - query core capabilities */
  480. qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
  481. /* Part 2 - check capabilities */
  482. page_size = ~dev->attr.page_size_caps + 1;
  483. if (page_size > PAGE_SIZE) {
  484. DP_ERR(dev,
  485. "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
  486. PAGE_SIZE, page_size);
  487. return -ENODEV;
  488. }
  489. /* Part 3 - copy and update capabilities */
  490. attr = &dev->attr;
  491. attr->vendor_id = qed_attr->vendor_id;
  492. attr->vendor_part_id = qed_attr->vendor_part_id;
  493. attr->hw_ver = qed_attr->hw_ver;
  494. attr->fw_ver = qed_attr->fw_ver;
  495. attr->node_guid = qed_attr->node_guid;
  496. attr->sys_image_guid = qed_attr->sys_image_guid;
  497. attr->max_cnq = qed_attr->max_cnq;
  498. attr->max_sge = qed_attr->max_sge;
  499. attr->max_inline = qed_attr->max_inline;
  500. attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
  501. attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
  502. attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
  503. attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
  504. attr->max_dev_resp_rd_atomic_resc =
  505. qed_attr->max_dev_resp_rd_atomic_resc;
  506. attr->max_cq = qed_attr->max_cq;
  507. attr->max_qp = qed_attr->max_qp;
  508. attr->max_mr = qed_attr->max_mr;
  509. attr->max_mr_size = qed_attr->max_mr_size;
  510. attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
  511. attr->max_mw = qed_attr->max_mw;
  512. attr->max_fmr = qed_attr->max_fmr;
  513. attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
  514. attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
  515. attr->max_pd = qed_attr->max_pd;
  516. attr->max_ah = qed_attr->max_ah;
  517. attr->max_pkey = qed_attr->max_pkey;
  518. attr->max_srq = qed_attr->max_srq;
  519. attr->max_srq_wr = qed_attr->max_srq_wr;
  520. attr->dev_caps = qed_attr->dev_caps;
  521. attr->page_size_caps = qed_attr->page_size_caps;
  522. attr->dev_ack_delay = qed_attr->dev_ack_delay;
  523. attr->reserved_lkey = qed_attr->reserved_lkey;
  524. attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
  525. attr->max_stats_queues = qed_attr->max_stats_queues;
  526. return 0;
  527. }
  528. static void qedr_unaffiliated_event(void *context, u8 event_code)
  529. {
  530. pr_err("unaffiliated event not implemented yet\n");
  531. }
  532. static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
  533. {
  534. #define EVENT_TYPE_NOT_DEFINED 0
  535. #define EVENT_TYPE_CQ 1
  536. #define EVENT_TYPE_QP 2
  537. struct qedr_dev *dev = (struct qedr_dev *)context;
  538. struct regpair *async_handle = (struct regpair *)fw_handle;
  539. u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
  540. u8 event_type = EVENT_TYPE_NOT_DEFINED;
  541. struct ib_event event;
  542. struct ib_cq *ibcq;
  543. struct ib_qp *ibqp;
  544. struct qedr_cq *cq;
  545. struct qedr_qp *qp;
  546. switch (e_code) {
  547. case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
  548. event.event = IB_EVENT_CQ_ERR;
  549. event_type = EVENT_TYPE_CQ;
  550. break;
  551. case ROCE_ASYNC_EVENT_SQ_DRAINED:
  552. event.event = IB_EVENT_SQ_DRAINED;
  553. event_type = EVENT_TYPE_QP;
  554. break;
  555. case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
  556. event.event = IB_EVENT_QP_FATAL;
  557. event_type = EVENT_TYPE_QP;
  558. break;
  559. case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
  560. event.event = IB_EVENT_QP_REQ_ERR;
  561. event_type = EVENT_TYPE_QP;
  562. break;
  563. case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
  564. event.event = IB_EVENT_QP_ACCESS_ERR;
  565. event_type = EVENT_TYPE_QP;
  566. break;
  567. default:
  568. DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
  569. roce_handle64);
  570. }
  571. switch (event_type) {
  572. case EVENT_TYPE_CQ:
  573. cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
  574. if (cq) {
  575. ibcq = &cq->ibcq;
  576. if (ibcq->event_handler) {
  577. event.device = ibcq->device;
  578. event.element.cq = ibcq;
  579. ibcq->event_handler(&event, ibcq->cq_context);
  580. }
  581. } else {
  582. WARN(1,
  583. "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
  584. roce_handle64);
  585. }
  586. DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
  587. break;
  588. case EVENT_TYPE_QP:
  589. qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
  590. if (qp) {
  591. ibqp = &qp->ibqp;
  592. if (ibqp->event_handler) {
  593. event.device = ibqp->device;
  594. event.element.qp = ibqp;
  595. ibqp->event_handler(&event, ibqp->qp_context);
  596. }
  597. } else {
  598. WARN(1,
  599. "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
  600. roce_handle64);
  601. }
  602. DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
  603. break;
  604. default:
  605. break;
  606. }
  607. }
  608. static int qedr_init_hw(struct qedr_dev *dev)
  609. {
  610. struct qed_rdma_add_user_out_params out_params;
  611. struct qed_rdma_start_in_params *in_params;
  612. struct qed_rdma_cnq_params *cur_pbl;
  613. struct qed_rdma_events events;
  614. dma_addr_t p_phys_table;
  615. u32 page_cnt;
  616. int rc = 0;
  617. int i;
  618. in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
  619. if (!in_params) {
  620. rc = -ENOMEM;
  621. goto out;
  622. }
  623. in_params->desired_cnq = dev->num_cnq;
  624. for (i = 0; i < dev->num_cnq; i++) {
  625. cur_pbl = &in_params->cnq_pbl_list[i];
  626. page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
  627. cur_pbl->num_pbl_pages = page_cnt;
  628. p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
  629. cur_pbl->pbl_ptr = (u64)p_phys_table;
  630. }
  631. events.affiliated_event = qedr_affiliated_event;
  632. events.unaffiliated_event = qedr_unaffiliated_event;
  633. events.context = dev;
  634. in_params->events = &events;
  635. in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
  636. in_params->max_mtu = dev->ndev->mtu;
  637. dev->iwarp_max_mtu = dev->ndev->mtu;
  638. ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
  639. rc = dev->ops->rdma_init(dev->cdev, in_params);
  640. if (rc)
  641. goto out;
  642. rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
  643. if (rc)
  644. goto out;
  645. dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr;
  646. dev->db_phys_addr = out_params.dpi_phys_addr;
  647. dev->db_size = out_params.dpi_size;
  648. dev->dpi = out_params.dpi;
  649. rc = qedr_set_device_attr(dev);
  650. out:
  651. kfree(in_params);
  652. if (rc)
  653. DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
  654. return rc;
  655. }
  656. static void qedr_stop_hw(struct qedr_dev *dev)
  657. {
  658. dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
  659. dev->ops->rdma_stop(dev->rdma_ctx);
  660. }
  661. static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
  662. struct net_device *ndev)
  663. {
  664. struct qed_dev_rdma_info dev_info;
  665. struct qedr_dev *dev;
  666. int rc = 0, i;
  667. dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
  668. if (!dev) {
  669. pr_err("Unable to allocate ib device\n");
  670. return NULL;
  671. }
  672. DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
  673. dev->pdev = pdev;
  674. dev->ndev = ndev;
  675. dev->cdev = cdev;
  676. qed_ops = qed_get_rdma_ops();
  677. if (!qed_ops) {
  678. DP_ERR(dev, "Failed to get qed roce operations\n");
  679. goto init_err;
  680. }
  681. dev->ops = qed_ops;
  682. rc = qed_ops->fill_dev_info(cdev, &dev_info);
  683. if (rc)
  684. goto init_err;
  685. dev->user_dpm_enabled = dev_info.user_dpm_enabled;
  686. dev->rdma_type = dev_info.rdma_type;
  687. dev->num_hwfns = dev_info.common.num_hwfns;
  688. dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
  689. dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
  690. if (!dev->num_cnq) {
  691. DP_ERR(dev, "Failed. At least one CNQ is required.\n");
  692. rc = -ENOMEM;
  693. goto init_err;
  694. }
  695. dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
  696. qedr_pci_set_atomic(dev, pdev);
  697. rc = qedr_alloc_resources(dev);
  698. if (rc)
  699. goto init_err;
  700. rc = qedr_init_hw(dev);
  701. if (rc)
  702. goto alloc_err;
  703. rc = qedr_setup_irqs(dev);
  704. if (rc)
  705. goto irq_err;
  706. rc = qedr_register_device(dev);
  707. if (rc) {
  708. DP_ERR(dev, "Unable to allocate register device\n");
  709. goto reg_err;
  710. }
  711. for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
  712. if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
  713. goto sysfs_err;
  714. if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
  715. qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
  716. DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
  717. return dev;
  718. sysfs_err:
  719. ib_unregister_device(&dev->ibdev);
  720. reg_err:
  721. qedr_sync_free_irqs(dev);
  722. irq_err:
  723. qedr_stop_hw(dev);
  724. alloc_err:
  725. qedr_free_resources(dev);
  726. init_err:
  727. ib_dealloc_device(&dev->ibdev);
  728. DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
  729. return NULL;
  730. }
  731. static void qedr_remove(struct qedr_dev *dev)
  732. {
  733. /* First unregister with stack to stop all the active traffic
  734. * of the registered clients.
  735. */
  736. qedr_remove_sysfiles(dev);
  737. ib_unregister_device(&dev->ibdev);
  738. qedr_stop_hw(dev);
  739. qedr_sync_free_irqs(dev);
  740. qedr_free_resources(dev);
  741. ib_dealloc_device(&dev->ibdev);
  742. }
  743. static void qedr_close(struct qedr_dev *dev)
  744. {
  745. if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
  746. qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
  747. }
  748. static void qedr_shutdown(struct qedr_dev *dev)
  749. {
  750. qedr_close(dev);
  751. qedr_remove(dev);
  752. }
  753. static void qedr_open(struct qedr_dev *dev)
  754. {
  755. if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
  756. qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
  757. }
  758. static void qedr_mac_address_change(struct qedr_dev *dev)
  759. {
  760. union ib_gid *sgid = &dev->sgid_tbl[0];
  761. u8 guid[8], mac_addr[6];
  762. int rc;
  763. /* Update SGID */
  764. ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
  765. guid[0] = mac_addr[0] ^ 2;
  766. guid[1] = mac_addr[1];
  767. guid[2] = mac_addr[2];
  768. guid[3] = 0xff;
  769. guid[4] = 0xfe;
  770. guid[5] = mac_addr[3];
  771. guid[6] = mac_addr[4];
  772. guid[7] = mac_addr[5];
  773. sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  774. memcpy(&sgid->raw[8], guid, sizeof(guid));
  775. /* Update LL2 */
  776. rc = dev->ops->ll2_set_mac_filter(dev->cdev,
  777. dev->gsi_ll2_mac_address,
  778. dev->ndev->dev_addr);
  779. ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
  780. qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
  781. if (rc)
  782. DP_ERR(dev, "Error updating mac filter\n");
  783. }
  784. /* event handling via NIC driver ensures that all the NIC specific
  785. * initialization done before RoCE driver notifies
  786. * event to stack.
  787. */
  788. static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
  789. {
  790. switch (event) {
  791. case QEDE_UP:
  792. qedr_open(dev);
  793. break;
  794. case QEDE_DOWN:
  795. qedr_close(dev);
  796. break;
  797. case QEDE_CLOSE:
  798. qedr_shutdown(dev);
  799. break;
  800. case QEDE_CHANGE_ADDR:
  801. qedr_mac_address_change(dev);
  802. break;
  803. default:
  804. pr_err("Event not supported\n");
  805. }
  806. }
  807. static struct qedr_driver qedr_drv = {
  808. .name = "qedr_driver",
  809. .add = qedr_add,
  810. .remove = qedr_remove,
  811. .notify = qedr_notify,
  812. };
  813. static int __init qedr_init_module(void)
  814. {
  815. return qede_rdma_register_driver(&qedr_drv);
  816. }
  817. static void __exit qedr_exit_module(void)
  818. {
  819. qede_rdma_unregister_driver(&qedr_drv);
  820. }
  821. module_init(qedr_init_module);
  822. module_exit(qedr_exit_module);