usnic_ib_qp_grp.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/bug.h>
  34. #include <linux/errno.h>
  35. #include <linux/module.h>
  36. #include <linux/spinlock.h>
  37. #include "usnic_log.h"
  38. #include "usnic_vnic.h"
  39. #include "usnic_fwd.h"
  40. #include "usnic_uiom.h"
  41. #include "usnic_debugfs.h"
  42. #include "usnic_ib_qp_grp.h"
  43. #include "usnic_ib_sysfs.h"
  44. #include "usnic_transport.h"
  45. #define DFLT_RQ_IDX 0
  46. const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
  47. {
  48. switch (state) {
  49. case IB_QPS_RESET:
  50. return "Rst";
  51. case IB_QPS_INIT:
  52. return "Init";
  53. case IB_QPS_RTR:
  54. return "RTR";
  55. case IB_QPS_RTS:
  56. return "RTS";
  57. case IB_QPS_SQD:
  58. return "SQD";
  59. case IB_QPS_SQE:
  60. return "SQE";
  61. case IB_QPS_ERR:
  62. return "ERR";
  63. default:
  64. return "UNKNOWN STATE";
  65. }
  66. }
  67. int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
  68. {
  69. return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
  70. }
  71. int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
  72. {
  73. struct usnic_ib_qp_grp *qp_grp = obj;
  74. struct usnic_ib_qp_grp_flow *default_flow;
  75. if (obj) {
  76. default_flow = list_first_entry(&qp_grp->flows_lst,
  77. struct usnic_ib_qp_grp_flow, link);
  78. return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
  79. qp_grp->ibqp.qp_num,
  80. usnic_ib_qp_grp_state_to_string(
  81. qp_grp->state),
  82. qp_grp->owner_pid,
  83. usnic_vnic_get_index(qp_grp->vf->vnic),
  84. default_flow->flow->flow_id);
  85. } else {
  86. return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
  87. }
  88. }
  89. static struct usnic_vnic_res_chunk *
  90. get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
  91. {
  92. lockdep_assert_held(&qp_grp->lock);
  93. /*
  94. * The QP res chunk, used to derive qp indices,
  95. * are just indices of the RQs
  96. */
  97. return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  98. }
  99. static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  100. {
  101. int status;
  102. int i, vnic_idx;
  103. struct usnic_vnic_res_chunk *res_chunk;
  104. struct usnic_vnic_res *res;
  105. lockdep_assert_held(&qp_grp->lock);
  106. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  107. res_chunk = get_qp_res_chunk(qp_grp);
  108. if (IS_ERR(res_chunk)) {
  109. usnic_err("Unable to get qp res with err %ld\n",
  110. PTR_ERR(res_chunk));
  111. return PTR_ERR(res_chunk);
  112. }
  113. for (i = 0; i < res_chunk->cnt; i++) {
  114. res = res_chunk->res[i];
  115. status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
  116. res->vnic_idx);
  117. if (status) {
  118. usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
  119. res->vnic_idx, qp_grp->ufdev->name,
  120. vnic_idx, status);
  121. goto out_err;
  122. }
  123. }
  124. return 0;
  125. out_err:
  126. for (i--; i >= 0; i--) {
  127. res = res_chunk->res[i];
  128. usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  129. res->vnic_idx);
  130. }
  131. return status;
  132. }
  133. static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  134. {
  135. int i, vnic_idx;
  136. struct usnic_vnic_res_chunk *res_chunk;
  137. struct usnic_vnic_res *res;
  138. int status = 0;
  139. lockdep_assert_held(&qp_grp->lock);
  140. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  141. res_chunk = get_qp_res_chunk(qp_grp);
  142. if (IS_ERR(res_chunk)) {
  143. usnic_err("Unable to get qp res with err %ld\n",
  144. PTR_ERR(res_chunk));
  145. return PTR_ERR(res_chunk);
  146. }
  147. for (i = 0; i < res_chunk->cnt; i++) {
  148. res = res_chunk->res[i];
  149. status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  150. res->vnic_idx);
  151. if (status) {
  152. usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
  153. res->vnic_idx,
  154. qp_grp->ufdev->name,
  155. vnic_idx, status);
  156. }
  157. }
  158. return status;
  159. }
  160. static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
  161. struct usnic_filter_action *uaction)
  162. {
  163. struct usnic_vnic_res_chunk *res_chunk;
  164. res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  165. if (IS_ERR(res_chunk)) {
  166. usnic_err("Unable to get %s with err %ld\n",
  167. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
  168. PTR_ERR(res_chunk));
  169. return PTR_ERR(res_chunk);
  170. }
  171. uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  172. uaction->action.type = FILTER_ACTION_RQ_STEERING;
  173. uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
  174. return 0;
  175. }
  176. static struct usnic_ib_qp_grp_flow*
  177. create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
  178. struct usnic_transport_spec *trans_spec)
  179. {
  180. uint16_t port_num;
  181. int err;
  182. struct filter filter;
  183. struct usnic_filter_action uaction;
  184. struct usnic_ib_qp_grp_flow *qp_flow;
  185. struct usnic_fwd_flow *flow;
  186. enum usnic_transport_type trans_type;
  187. trans_type = trans_spec->trans_type;
  188. port_num = trans_spec->usnic_roce.port_num;
  189. /* Reserve Port */
  190. port_num = usnic_transport_rsrv_port(trans_type, port_num);
  191. if (port_num == 0)
  192. return ERR_PTR(-EINVAL);
  193. /* Create Flow */
  194. usnic_fwd_init_usnic_filter(&filter, port_num);
  195. err = init_filter_action(qp_grp, &uaction);
  196. if (err)
  197. goto out_unreserve_port;
  198. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  199. if (IS_ERR_OR_NULL(flow)) {
  200. err = flow ? PTR_ERR(flow) : -EFAULT;
  201. goto out_unreserve_port;
  202. }
  203. /* Create Flow Handle */
  204. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  205. if (!qp_flow) {
  206. err = -ENOMEM;
  207. goto out_dealloc_flow;
  208. }
  209. qp_flow->flow = flow;
  210. qp_flow->trans_type = trans_type;
  211. qp_flow->usnic_roce.port_num = port_num;
  212. qp_flow->qp_grp = qp_grp;
  213. return qp_flow;
  214. out_dealloc_flow:
  215. usnic_fwd_dealloc_flow(flow);
  216. out_unreserve_port:
  217. usnic_transport_unrsrv_port(trans_type, port_num);
  218. return ERR_PTR(err);
  219. }
  220. static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  221. {
  222. usnic_fwd_dealloc_flow(qp_flow->flow);
  223. usnic_transport_unrsrv_port(qp_flow->trans_type,
  224. qp_flow->usnic_roce.port_num);
  225. kfree(qp_flow);
  226. }
  227. static struct usnic_ib_qp_grp_flow*
  228. create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
  229. struct usnic_transport_spec *trans_spec)
  230. {
  231. struct socket *sock;
  232. int sock_fd;
  233. int err;
  234. struct filter filter;
  235. struct usnic_filter_action uaction;
  236. struct usnic_ib_qp_grp_flow *qp_flow;
  237. struct usnic_fwd_flow *flow;
  238. enum usnic_transport_type trans_type;
  239. uint32_t addr;
  240. uint16_t port_num;
  241. int proto;
  242. trans_type = trans_spec->trans_type;
  243. sock_fd = trans_spec->udp.sock_fd;
  244. /* Get and check socket */
  245. sock = usnic_transport_get_socket(sock_fd);
  246. if (IS_ERR_OR_NULL(sock))
  247. return ERR_CAST(sock);
  248. err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
  249. if (err)
  250. goto out_put_sock;
  251. if (proto != IPPROTO_UDP) {
  252. usnic_err("Protocol for fd %d is not UDP", sock_fd);
  253. err = -EPERM;
  254. goto out_put_sock;
  255. }
  256. /* Create flow */
  257. usnic_fwd_init_udp_filter(&filter, addr, port_num);
  258. err = init_filter_action(qp_grp, &uaction);
  259. if (err)
  260. goto out_put_sock;
  261. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  262. if (IS_ERR_OR_NULL(flow)) {
  263. err = flow ? PTR_ERR(flow) : -EFAULT;
  264. goto out_put_sock;
  265. }
  266. /* Create qp_flow */
  267. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  268. if (!qp_flow) {
  269. err = -ENOMEM;
  270. goto out_dealloc_flow;
  271. }
  272. qp_flow->flow = flow;
  273. qp_flow->trans_type = trans_type;
  274. qp_flow->udp.sock = sock;
  275. qp_flow->qp_grp = qp_grp;
  276. return qp_flow;
  277. out_dealloc_flow:
  278. usnic_fwd_dealloc_flow(flow);
  279. out_put_sock:
  280. usnic_transport_put_socket(sock);
  281. return ERR_PTR(err);
  282. }
  283. static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  284. {
  285. usnic_fwd_dealloc_flow(qp_flow->flow);
  286. usnic_transport_put_socket(qp_flow->udp.sock);
  287. kfree(qp_flow);
  288. }
  289. static struct usnic_ib_qp_grp_flow*
  290. create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
  291. struct usnic_transport_spec *trans_spec)
  292. {
  293. struct usnic_ib_qp_grp_flow *qp_flow;
  294. enum usnic_transport_type trans_type;
  295. trans_type = trans_spec->trans_type;
  296. switch (trans_type) {
  297. case USNIC_TRANSPORT_ROCE_CUSTOM:
  298. qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
  299. break;
  300. case USNIC_TRANSPORT_IPV4_UDP:
  301. qp_flow = create_udp_flow(qp_grp, trans_spec);
  302. break;
  303. default:
  304. usnic_err("Unsupported transport %u\n",
  305. trans_spec->trans_type);
  306. return ERR_PTR(-EINVAL);
  307. }
  308. if (!IS_ERR_OR_NULL(qp_flow)) {
  309. list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
  310. usnic_debugfs_flow_add(qp_flow);
  311. }
  312. return qp_flow;
  313. }
  314. static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  315. {
  316. usnic_debugfs_flow_remove(qp_flow);
  317. list_del(&qp_flow->link);
  318. switch (qp_flow->trans_type) {
  319. case USNIC_TRANSPORT_ROCE_CUSTOM:
  320. release_roce_custom_flow(qp_flow);
  321. break;
  322. case USNIC_TRANSPORT_IPV4_UDP:
  323. release_udp_flow(qp_flow);
  324. break;
  325. default:
  326. WARN(1, "Unsupported transport %u\n",
  327. qp_flow->trans_type);
  328. break;
  329. }
  330. }
  331. static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
  332. {
  333. struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
  334. list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
  335. release_and_remove_flow(qp_flow);
  336. }
  337. int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
  338. enum ib_qp_state new_state,
  339. void *data)
  340. {
  341. int status = 0;
  342. int vnic_idx;
  343. struct ib_event ib_event;
  344. enum ib_qp_state old_state;
  345. struct usnic_transport_spec *trans_spec;
  346. struct usnic_ib_qp_grp_flow *qp_flow;
  347. old_state = qp_grp->state;
  348. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  349. trans_spec = (struct usnic_transport_spec *) data;
  350. spin_lock(&qp_grp->lock);
  351. switch (new_state) {
  352. case IB_QPS_RESET:
  353. switch (old_state) {
  354. case IB_QPS_RESET:
  355. /* NO-OP */
  356. break;
  357. case IB_QPS_INIT:
  358. release_and_remove_all_flows(qp_grp);
  359. status = 0;
  360. break;
  361. case IB_QPS_RTR:
  362. case IB_QPS_RTS:
  363. case IB_QPS_ERR:
  364. status = disable_qp_grp(qp_grp);
  365. release_and_remove_all_flows(qp_grp);
  366. break;
  367. default:
  368. status = -EINVAL;
  369. }
  370. break;
  371. case IB_QPS_INIT:
  372. switch (old_state) {
  373. case IB_QPS_RESET:
  374. if (trans_spec) {
  375. qp_flow = create_and_add_flow(qp_grp,
  376. trans_spec);
  377. if (IS_ERR_OR_NULL(qp_flow)) {
  378. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  379. break;
  380. }
  381. } else {
  382. /*
  383. * Optional to specify filters.
  384. */
  385. status = 0;
  386. }
  387. break;
  388. case IB_QPS_INIT:
  389. if (trans_spec) {
  390. qp_flow = create_and_add_flow(qp_grp,
  391. trans_spec);
  392. if (IS_ERR_OR_NULL(qp_flow)) {
  393. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  394. break;
  395. }
  396. } else {
  397. /*
  398. * Doesn't make sense to go into INIT state
  399. * from INIT state w/o adding filters.
  400. */
  401. status = -EINVAL;
  402. }
  403. break;
  404. case IB_QPS_RTR:
  405. status = disable_qp_grp(qp_grp);
  406. break;
  407. case IB_QPS_RTS:
  408. status = disable_qp_grp(qp_grp);
  409. break;
  410. default:
  411. status = -EINVAL;
  412. }
  413. break;
  414. case IB_QPS_RTR:
  415. switch (old_state) {
  416. case IB_QPS_INIT:
  417. status = enable_qp_grp(qp_grp);
  418. break;
  419. default:
  420. status = -EINVAL;
  421. }
  422. break;
  423. case IB_QPS_RTS:
  424. switch (old_state) {
  425. case IB_QPS_RTR:
  426. /* NO-OP FOR NOW */
  427. break;
  428. default:
  429. status = -EINVAL;
  430. }
  431. break;
  432. case IB_QPS_ERR:
  433. ib_event.device = &qp_grp->vf->pf->ib_dev;
  434. ib_event.element.qp = &qp_grp->ibqp;
  435. ib_event.event = IB_EVENT_QP_FATAL;
  436. switch (old_state) {
  437. case IB_QPS_RESET:
  438. qp_grp->ibqp.event_handler(&ib_event,
  439. qp_grp->ibqp.qp_context);
  440. break;
  441. case IB_QPS_INIT:
  442. release_and_remove_all_flows(qp_grp);
  443. qp_grp->ibqp.event_handler(&ib_event,
  444. qp_grp->ibqp.qp_context);
  445. break;
  446. case IB_QPS_RTR:
  447. case IB_QPS_RTS:
  448. status = disable_qp_grp(qp_grp);
  449. release_and_remove_all_flows(qp_grp);
  450. qp_grp->ibqp.event_handler(&ib_event,
  451. qp_grp->ibqp.qp_context);
  452. break;
  453. default:
  454. status = -EINVAL;
  455. }
  456. break;
  457. default:
  458. status = -EINVAL;
  459. }
  460. spin_unlock(&qp_grp->lock);
  461. if (!status) {
  462. qp_grp->state = new_state;
  463. usnic_info("Transitioned %u from %s to %s",
  464. qp_grp->grp_id,
  465. usnic_ib_qp_grp_state_to_string(old_state),
  466. usnic_ib_qp_grp_state_to_string(new_state));
  467. } else {
  468. usnic_err("Failed to transition %u from %s to %s",
  469. qp_grp->grp_id,
  470. usnic_ib_qp_grp_state_to_string(old_state),
  471. usnic_ib_qp_grp_state_to_string(new_state));
  472. }
  473. return status;
  474. }
  475. static struct usnic_vnic_res_chunk**
  476. alloc_res_chunk_list(struct usnic_vnic *vnic,
  477. struct usnic_vnic_res_spec *res_spec, void *owner_obj)
  478. {
  479. enum usnic_vnic_res_type res_type;
  480. struct usnic_vnic_res_chunk **res_chunk_list;
  481. int err, i, res_cnt, res_lst_sz;
  482. for (res_lst_sz = 0;
  483. res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
  484. res_lst_sz++) {
  485. /* Do Nothing */
  486. }
  487. res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
  488. GFP_ATOMIC);
  489. if (!res_chunk_list)
  490. return ERR_PTR(-ENOMEM);
  491. for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
  492. i++) {
  493. res_type = res_spec->resources[i].type;
  494. res_cnt = res_spec->resources[i].cnt;
  495. res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
  496. res_cnt, owner_obj);
  497. if (IS_ERR_OR_NULL(res_chunk_list[i])) {
  498. err = res_chunk_list[i] ?
  499. PTR_ERR(res_chunk_list[i]) : -ENOMEM;
  500. usnic_err("Failed to get %s from %s with err %d\n",
  501. usnic_vnic_res_type_to_str(res_type),
  502. usnic_vnic_pci_name(vnic),
  503. err);
  504. goto out_free_res;
  505. }
  506. }
  507. return res_chunk_list;
  508. out_free_res:
  509. for (i--; i >= 0; i--)
  510. usnic_vnic_put_resources(res_chunk_list[i]);
  511. kfree(res_chunk_list);
  512. return ERR_PTR(err);
  513. }
  514. static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
  515. {
  516. int i;
  517. for (i = 0; res_chunk_list[i]; i++)
  518. usnic_vnic_put_resources(res_chunk_list[i]);
  519. kfree(res_chunk_list);
  520. }
  521. static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
  522. struct usnic_ib_pd *pd,
  523. struct usnic_ib_qp_grp *qp_grp)
  524. {
  525. int err;
  526. struct pci_dev *pdev;
  527. lockdep_assert_held(&vf->lock);
  528. pdev = usnic_vnic_get_pdev(vf->vnic);
  529. if (vf->qp_grp_ref_cnt == 0) {
  530. err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
  531. if (err) {
  532. usnic_err("Failed to attach %s to domain\n",
  533. pci_name(pdev));
  534. return err;
  535. }
  536. vf->pd = pd;
  537. }
  538. vf->qp_grp_ref_cnt++;
  539. WARN_ON(vf->pd != pd);
  540. qp_grp->vf = vf;
  541. return 0;
  542. }
  543. static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
  544. {
  545. struct pci_dev *pdev;
  546. struct usnic_ib_pd *pd;
  547. lockdep_assert_held(&qp_grp->vf->lock);
  548. pd = qp_grp->vf->pd;
  549. pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
  550. if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
  551. qp_grp->vf->pd = NULL;
  552. usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
  553. }
  554. qp_grp->vf = NULL;
  555. }
  556. static void log_spec(struct usnic_vnic_res_spec *res_spec)
  557. {
  558. char buf[512];
  559. usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
  560. usnic_dbg("%s\n", buf);
  561. }
  562. static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
  563. uint32_t *id)
  564. {
  565. enum usnic_transport_type trans_type = qp_flow->trans_type;
  566. int err;
  567. uint16_t port_num = 0;
  568. switch (trans_type) {
  569. case USNIC_TRANSPORT_ROCE_CUSTOM:
  570. *id = qp_flow->usnic_roce.port_num;
  571. break;
  572. case USNIC_TRANSPORT_IPV4_UDP:
  573. err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
  574. NULL, NULL,
  575. &port_num);
  576. if (err)
  577. return err;
  578. /*
  579. * Copy port_num to stack first and then to *id,
  580. * so that the short to int cast works for little
  581. * and big endian systems.
  582. */
  583. *id = port_num;
  584. break;
  585. default:
  586. usnic_err("Unsupported transport %u\n", trans_type);
  587. return -EINVAL;
  588. }
  589. return 0;
  590. }
  591. struct usnic_ib_qp_grp *
  592. usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
  593. struct usnic_ib_pd *pd,
  594. struct usnic_vnic_res_spec *res_spec,
  595. struct usnic_transport_spec *transport_spec)
  596. {
  597. struct usnic_ib_qp_grp *qp_grp;
  598. int err;
  599. enum usnic_transport_type transport = transport_spec->trans_type;
  600. struct usnic_ib_qp_grp_flow *qp_flow;
  601. lockdep_assert_held(&vf->lock);
  602. err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
  603. res_spec);
  604. if (err) {
  605. usnic_err("Spec does not meet miniumum req for transport %d\n",
  606. transport);
  607. log_spec(res_spec);
  608. return ERR_PTR(err);
  609. }
  610. qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
  611. if (!qp_grp)
  612. return NULL;
  613. qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
  614. qp_grp);
  615. if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
  616. err = qp_grp->res_chunk_list ?
  617. PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
  618. goto out_free_qp_grp;
  619. }
  620. err = qp_grp_and_vf_bind(vf, pd, qp_grp);
  621. if (err)
  622. goto out_free_res;
  623. INIT_LIST_HEAD(&qp_grp->flows_lst);
  624. spin_lock_init(&qp_grp->lock);
  625. qp_grp->ufdev = ufdev;
  626. qp_grp->state = IB_QPS_RESET;
  627. qp_grp->owner_pid = current->pid;
  628. qp_flow = create_and_add_flow(qp_grp, transport_spec);
  629. if (IS_ERR_OR_NULL(qp_flow)) {
  630. usnic_err("Unable to create and add flow with err %ld\n",
  631. PTR_ERR(qp_flow));
  632. err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  633. goto out_qp_grp_vf_unbind;
  634. }
  635. err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
  636. if (err)
  637. goto out_release_flow;
  638. qp_grp->ibqp.qp_num = qp_grp->grp_id;
  639. usnic_ib_sysfs_qpn_add(qp_grp);
  640. return qp_grp;
  641. out_release_flow:
  642. release_and_remove_flow(qp_flow);
  643. out_qp_grp_vf_unbind:
  644. qp_grp_and_vf_unbind(qp_grp);
  645. out_free_res:
  646. free_qp_grp_res(qp_grp->res_chunk_list);
  647. out_free_qp_grp:
  648. kfree(qp_grp);
  649. return ERR_PTR(err);
  650. }
  651. void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
  652. {
  653. WARN_ON(qp_grp->state != IB_QPS_RESET);
  654. lockdep_assert_held(&qp_grp->vf->lock);
  655. release_and_remove_all_flows(qp_grp);
  656. usnic_ib_sysfs_qpn_remove(qp_grp);
  657. qp_grp_and_vf_unbind(qp_grp);
  658. free_qp_grp_res(qp_grp->res_chunk_list);
  659. kfree(qp_grp);
  660. }
  661. struct usnic_vnic_res_chunk*
  662. usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
  663. enum usnic_vnic_res_type res_type)
  664. {
  665. int i;
  666. for (i = 0; qp_grp->res_chunk_list[i]; i++) {
  667. if (qp_grp->res_chunk_list[i]->type == res_type)
  668. return qp_grp->res_chunk_list[i];
  669. }
  670. return ERR_PTR(-EINVAL);
  671. }