ib_cm.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /*
  2. * Copyright (c) 2006 Oracle. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/in.h>
  35. #include <linux/slab.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/ratelimit.h>
  38. #include "rds.h"
  39. #include "ib.h"
  40. static char *rds_ib_event_type_strings[] = {
  41. #define RDS_IB_EVENT_STRING(foo) \
  42. [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
  43. RDS_IB_EVENT_STRING(CQ_ERR),
  44. RDS_IB_EVENT_STRING(QP_FATAL),
  45. RDS_IB_EVENT_STRING(QP_REQ_ERR),
  46. RDS_IB_EVENT_STRING(QP_ACCESS_ERR),
  47. RDS_IB_EVENT_STRING(COMM_EST),
  48. RDS_IB_EVENT_STRING(SQ_DRAINED),
  49. RDS_IB_EVENT_STRING(PATH_MIG),
  50. RDS_IB_EVENT_STRING(PATH_MIG_ERR),
  51. RDS_IB_EVENT_STRING(DEVICE_FATAL),
  52. RDS_IB_EVENT_STRING(PORT_ACTIVE),
  53. RDS_IB_EVENT_STRING(PORT_ERR),
  54. RDS_IB_EVENT_STRING(LID_CHANGE),
  55. RDS_IB_EVENT_STRING(PKEY_CHANGE),
  56. RDS_IB_EVENT_STRING(SM_CHANGE),
  57. RDS_IB_EVENT_STRING(SRQ_ERR),
  58. RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED),
  59. RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED),
  60. RDS_IB_EVENT_STRING(CLIENT_REREGISTER),
  61. #undef RDS_IB_EVENT_STRING
  62. };
  63. static char *rds_ib_event_str(enum ib_event_type type)
  64. {
  65. return rds_str_array(rds_ib_event_type_strings,
  66. ARRAY_SIZE(rds_ib_event_type_strings), type);
  67. };
  68. /*
  69. * Set the selected protocol version
  70. */
  71. static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
  72. {
  73. conn->c_version = version;
  74. }
  75. /*
  76. * Set up flow control
  77. */
  78. static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
  79. {
  80. struct rds_ib_connection *ic = conn->c_transport_data;
  81. if (rds_ib_sysctl_flow_control && credits != 0) {
  82. /* We're doing flow control */
  83. ic->i_flowctl = 1;
  84. rds_ib_send_add_credits(conn, credits);
  85. } else {
  86. ic->i_flowctl = 0;
  87. }
  88. }
  89. /*
  90. * Tune RNR behavior. Without flow control, we use a rather
  91. * low timeout, but not the absolute minimum - this should
  92. * be tunable.
  93. *
  94. * We already set the RNR retry count to 7 (which is the
  95. * smallest infinite number :-) above.
  96. * If flow control is off, we want to change this back to 0
  97. * so that we learn quickly when our credit accounting is
  98. * buggy.
  99. *
  100. * Caller passes in a qp_attr pointer - don't waste stack spacv
  101. * by allocation this twice.
  102. */
  103. static void
  104. rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
  105. {
  106. int ret;
  107. attr->min_rnr_timer = IB_RNR_TIMER_000_32;
  108. ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
  109. if (ret)
  110. printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
  111. }
  112. /*
  113. * Connection established.
  114. * We get here for both outgoing and incoming connection.
  115. */
  116. void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
  117. {
  118. const struct rds_ib_connect_private *dp = NULL;
  119. struct rds_ib_connection *ic = conn->c_transport_data;
  120. struct ib_qp_attr qp_attr;
  121. int err;
  122. if (event->param.conn.private_data_len >= sizeof(*dp)) {
  123. dp = event->param.conn.private_data;
  124. /* make sure it isn't empty data */
  125. if (dp->dp_protocol_major) {
  126. rds_ib_set_protocol(conn,
  127. RDS_PROTOCOL(dp->dp_protocol_major,
  128. dp->dp_protocol_minor));
  129. rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
  130. }
  131. }
  132. if (conn->c_version < RDS_PROTOCOL(3,1)) {
  133. printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
  134. " no longer supported\n",
  135. &conn->c_faddr,
  136. RDS_PROTOCOL_MAJOR(conn->c_version),
  137. RDS_PROTOCOL_MINOR(conn->c_version));
  138. rds_conn_destroy(conn);
  139. return;
  140. } else {
  141. printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
  142. &conn->c_faddr,
  143. RDS_PROTOCOL_MAJOR(conn->c_version),
  144. RDS_PROTOCOL_MINOR(conn->c_version),
  145. ic->i_flowctl ? ", flow control" : "");
  146. }
  147. /*
  148. * Init rings and fill recv. this needs to wait until protocol negotiation
  149. * is complete, since ring layout is different from 3.0 to 3.1.
  150. */
  151. rds_ib_send_init_ring(ic);
  152. rds_ib_recv_init_ring(ic);
  153. /* Post receive buffers - as a side effect, this will update
  154. * the posted credit count. */
  155. rds_ib_recv_refill(conn, 1);
  156. /* Tune RNR behavior */
  157. rds_ib_tune_rnr(ic, &qp_attr);
  158. qp_attr.qp_state = IB_QPS_RTS;
  159. err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
  160. if (err)
  161. printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
  162. /* update ib_device with this local ipaddr */
  163. err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
  164. if (err)
  165. printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
  166. err);
  167. /* If the peer gave us the last packet it saw, process this as if
  168. * we had received a regular ACK. */
  169. if (dp) {
  170. /* dp structure start is not guaranteed to be 8 bytes aligned.
  171. * Since dp_ack_seq is 64-bit extended load operations can be
  172. * used so go through get_unaligned to avoid unaligned errors.
  173. */
  174. __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
  175. if (dp_ack_seq)
  176. rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
  177. NULL);
  178. }
  179. rds_connect_complete(conn);
  180. }
  181. static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
  182. struct rdma_conn_param *conn_param,
  183. struct rds_ib_connect_private *dp,
  184. u32 protocol_version,
  185. u32 max_responder_resources,
  186. u32 max_initiator_depth)
  187. {
  188. struct rds_ib_connection *ic = conn->c_transport_data;
  189. struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
  190. memset(conn_param, 0, sizeof(struct rdma_conn_param));
  191. conn_param->responder_resources =
  192. min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
  193. conn_param->initiator_depth =
  194. min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
  195. conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
  196. conn_param->rnr_retry_count = 7;
  197. if (dp) {
  198. memset(dp, 0, sizeof(*dp));
  199. dp->dp_saddr = conn->c_laddr;
  200. dp->dp_daddr = conn->c_faddr;
  201. dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
  202. dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
  203. dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
  204. dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
  205. /* Advertise flow control */
  206. if (ic->i_flowctl) {
  207. unsigned int credits;
  208. credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
  209. dp->dp_credit = cpu_to_be32(credits);
  210. atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
  211. }
  212. conn_param->private_data = dp;
  213. conn_param->private_data_len = sizeof(*dp);
  214. }
  215. }
  216. static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
  217. {
  218. rdsdebug("event %u (%s) data %p\n",
  219. event->event, rds_ib_event_str(event->event), data);
  220. }
  221. static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
  222. {
  223. struct rds_connection *conn = data;
  224. struct rds_ib_connection *ic = conn->c_transport_data;
  225. rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
  226. rds_ib_event_str(event->event));
  227. switch (event->event) {
  228. case IB_EVENT_COMM_EST:
  229. rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
  230. break;
  231. default:
  232. rdsdebug("Fatal QP Event %u (%s) "
  233. "- connection %pI4->%pI4, reconnecting\n",
  234. event->event, rds_ib_event_str(event->event),
  235. &conn->c_laddr, &conn->c_faddr);
  236. rds_conn_drop(conn);
  237. break;
  238. }
  239. }
  240. /*
  241. * This needs to be very careful to not leave IS_ERR pointers around for
  242. * cleanup to trip over.
  243. */
  244. static int rds_ib_setup_qp(struct rds_connection *conn)
  245. {
  246. struct rds_ib_connection *ic = conn->c_transport_data;
  247. struct ib_device *dev = ic->i_cm_id->device;
  248. struct ib_qp_init_attr attr;
  249. struct rds_ib_device *rds_ibdev;
  250. int ret;
  251. /*
  252. * It's normal to see a null device if an incoming connection races
  253. * with device removal, so we don't print a warning.
  254. */
  255. rds_ibdev = rds_ib_get_client_data(dev);
  256. if (!rds_ibdev)
  257. return -EOPNOTSUPP;
  258. /* add the conn now so that connection establishment has the dev */
  259. rds_ib_add_conn(rds_ibdev, conn);
  260. if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
  261. rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
  262. if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
  263. rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
  264. /* Protection domain and memory range */
  265. ic->i_pd = rds_ibdev->pd;
  266. ic->i_mr = rds_ibdev->mr;
  267. ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
  268. rds_ib_cq_event_handler, conn,
  269. ic->i_send_ring.w_nr + 1, 0);
  270. if (IS_ERR(ic->i_send_cq)) {
  271. ret = PTR_ERR(ic->i_send_cq);
  272. ic->i_send_cq = NULL;
  273. rdsdebug("ib_create_cq send failed: %d\n", ret);
  274. goto out;
  275. }
  276. ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
  277. rds_ib_cq_event_handler, conn,
  278. ic->i_recv_ring.w_nr, 0);
  279. if (IS_ERR(ic->i_recv_cq)) {
  280. ret = PTR_ERR(ic->i_recv_cq);
  281. ic->i_recv_cq = NULL;
  282. rdsdebug("ib_create_cq recv failed: %d\n", ret);
  283. goto out;
  284. }
  285. ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
  286. if (ret) {
  287. rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
  288. goto out;
  289. }
  290. ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
  291. if (ret) {
  292. rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
  293. goto out;
  294. }
  295. /* XXX negotiate max send/recv with remote? */
  296. memset(&attr, 0, sizeof(attr));
  297. attr.event_handler = rds_ib_qp_event_handler;
  298. attr.qp_context = conn;
  299. /* + 1 to allow for the single ack message */
  300. attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1;
  301. attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
  302. attr.cap.max_send_sge = rds_ibdev->max_sge;
  303. attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
  304. attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  305. attr.qp_type = IB_QPT_RC;
  306. attr.send_cq = ic->i_send_cq;
  307. attr.recv_cq = ic->i_recv_cq;
  308. /*
  309. * XXX this can fail if max_*_wr is too large? Are we supposed
  310. * to back off until we get a value that the hardware can support?
  311. */
  312. ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
  313. if (ret) {
  314. rdsdebug("rdma_create_qp failed: %d\n", ret);
  315. goto out;
  316. }
  317. ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
  318. ic->i_send_ring.w_nr *
  319. sizeof(struct rds_header),
  320. &ic->i_send_hdrs_dma, GFP_KERNEL);
  321. if (!ic->i_send_hdrs) {
  322. ret = -ENOMEM;
  323. rdsdebug("ib_dma_alloc_coherent send failed\n");
  324. goto out;
  325. }
  326. ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
  327. ic->i_recv_ring.w_nr *
  328. sizeof(struct rds_header),
  329. &ic->i_recv_hdrs_dma, GFP_KERNEL);
  330. if (!ic->i_recv_hdrs) {
  331. ret = -ENOMEM;
  332. rdsdebug("ib_dma_alloc_coherent recv failed\n");
  333. goto out;
  334. }
  335. ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
  336. &ic->i_ack_dma, GFP_KERNEL);
  337. if (!ic->i_ack) {
  338. ret = -ENOMEM;
  339. rdsdebug("ib_dma_alloc_coherent ack failed\n");
  340. goto out;
  341. }
  342. ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
  343. ibdev_to_node(dev));
  344. if (!ic->i_sends) {
  345. ret = -ENOMEM;
  346. rdsdebug("send allocation failed\n");
  347. goto out;
  348. }
  349. ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
  350. ibdev_to_node(dev));
  351. if (!ic->i_recvs) {
  352. ret = -ENOMEM;
  353. rdsdebug("recv allocation failed\n");
  354. goto out;
  355. }
  356. rds_ib_recv_init_ack(ic);
  357. rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
  358. ic->i_send_cq, ic->i_recv_cq);
  359. out:
  360. rds_ib_dev_put(rds_ibdev);
  361. return ret;
  362. }
  363. static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
  364. {
  365. const struct rds_ib_connect_private *dp = event->param.conn.private_data;
  366. u16 common;
  367. u32 version = 0;
  368. /*
  369. * rdma_cm private data is odd - when there is any private data in the
  370. * request, we will be given a pretty large buffer without telling us the
  371. * original size. The only way to tell the difference is by looking at
  372. * the contents, which are initialized to zero.
  373. * If the protocol version fields aren't set, this is a connection attempt
  374. * from an older version. This could could be 3.0 or 2.0 - we can't tell.
  375. * We really should have changed this for OFED 1.3 :-(
  376. */
  377. /* Be paranoid. RDS always has privdata */
  378. if (!event->param.conn.private_data_len) {
  379. printk(KERN_NOTICE "RDS incoming connection has no private data, "
  380. "rejecting\n");
  381. return 0;
  382. }
  383. /* Even if len is crap *now* I still want to check it. -ASG */
  384. if (event->param.conn.private_data_len < sizeof (*dp) ||
  385. dp->dp_protocol_major == 0)
  386. return RDS_PROTOCOL_3_0;
  387. common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
  388. if (dp->dp_protocol_major == 3 && common) {
  389. version = RDS_PROTOCOL_3_0;
  390. while ((common >>= 1) != 0)
  391. version++;
  392. } else
  393. printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
  394. &dp->dp_saddr,
  395. dp->dp_protocol_major,
  396. dp->dp_protocol_minor);
  397. return version;
  398. }
  399. int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
  400. struct rdma_cm_event *event)
  401. {
  402. __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
  403. __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
  404. const struct rds_ib_connect_private *dp = event->param.conn.private_data;
  405. struct rds_ib_connect_private dp_rep;
  406. struct rds_connection *conn = NULL;
  407. struct rds_ib_connection *ic = NULL;
  408. struct rdma_conn_param conn_param;
  409. u32 version;
  410. int err = 1, destroy = 1;
  411. /* Check whether the remote protocol version matches ours. */
  412. version = rds_ib_protocol_compatible(event);
  413. if (!version)
  414. goto out;
  415. rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
  416. "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
  417. RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
  418. (unsigned long long)be64_to_cpu(lguid),
  419. (unsigned long long)be64_to_cpu(fguid));
  420. conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
  421. GFP_KERNEL);
  422. if (IS_ERR(conn)) {
  423. rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
  424. conn = NULL;
  425. goto out;
  426. }
  427. /*
  428. * The connection request may occur while the
  429. * previous connection exist, e.g. in case of failover.
  430. * But as connections may be initiated simultaneously
  431. * by both hosts, we have a random backoff mechanism -
  432. * see the comment above rds_queue_reconnect()
  433. */
  434. mutex_lock(&conn->c_cm_lock);
  435. if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
  436. if (rds_conn_state(conn) == RDS_CONN_UP) {
  437. rdsdebug("incoming connect while connecting\n");
  438. rds_conn_drop(conn);
  439. rds_ib_stats_inc(s_ib_listen_closed_stale);
  440. } else
  441. if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
  442. /* Wait and see - our connect may still be succeeding */
  443. rds_ib_stats_inc(s_ib_connect_raced);
  444. }
  445. goto out;
  446. }
  447. ic = conn->c_transport_data;
  448. rds_ib_set_protocol(conn, version);
  449. rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
  450. /* If the peer gave us the last packet it saw, process this as if
  451. * we had received a regular ACK. */
  452. if (dp->dp_ack_seq)
  453. rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
  454. BUG_ON(cm_id->context);
  455. BUG_ON(ic->i_cm_id);
  456. ic->i_cm_id = cm_id;
  457. cm_id->context = conn;
  458. /* We got halfway through setting up the ib_connection, if we
  459. * fail now, we have to take the long route out of this mess. */
  460. destroy = 0;
  461. err = rds_ib_setup_qp(conn);
  462. if (err) {
  463. rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
  464. goto out;
  465. }
  466. rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
  467. event->param.conn.responder_resources,
  468. event->param.conn.initiator_depth);
  469. /* rdma_accept() calls rdma_reject() internally if it fails */
  470. err = rdma_accept(cm_id, &conn_param);
  471. if (err)
  472. rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
  473. out:
  474. if (conn)
  475. mutex_unlock(&conn->c_cm_lock);
  476. if (err)
  477. rdma_reject(cm_id, NULL, 0);
  478. return destroy;
  479. }
  480. int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
  481. {
  482. struct rds_connection *conn = cm_id->context;
  483. struct rds_ib_connection *ic = conn->c_transport_data;
  484. struct rdma_conn_param conn_param;
  485. struct rds_ib_connect_private dp;
  486. int ret;
  487. /* If the peer doesn't do protocol negotiation, we must
  488. * default to RDSv3.0 */
  489. rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
  490. ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
  491. ret = rds_ib_setup_qp(conn);
  492. if (ret) {
  493. rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
  494. goto out;
  495. }
  496. rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
  497. UINT_MAX, UINT_MAX);
  498. ret = rdma_connect(cm_id, &conn_param);
  499. if (ret)
  500. rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
  501. out:
  502. /* Beware - returning non-zero tells the rdma_cm to destroy
  503. * the cm_id. We should certainly not do it as long as we still
  504. * "own" the cm_id. */
  505. if (ret) {
  506. if (ic->i_cm_id == cm_id)
  507. ret = 0;
  508. }
  509. return ret;
  510. }
  511. int rds_ib_conn_connect(struct rds_connection *conn)
  512. {
  513. struct rds_ib_connection *ic = conn->c_transport_data;
  514. struct sockaddr_in src, dest;
  515. int ret;
  516. /* XXX I wonder what affect the port space has */
  517. /* delegate cm event handler to rdma_transport */
  518. ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
  519. RDMA_PS_TCP, IB_QPT_RC);
  520. if (IS_ERR(ic->i_cm_id)) {
  521. ret = PTR_ERR(ic->i_cm_id);
  522. ic->i_cm_id = NULL;
  523. rdsdebug("rdma_create_id() failed: %d\n", ret);
  524. goto out;
  525. }
  526. rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
  527. src.sin_family = AF_INET;
  528. src.sin_addr.s_addr = (__force u32)conn->c_laddr;
  529. src.sin_port = (__force u16)htons(0);
  530. dest.sin_family = AF_INET;
  531. dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
  532. dest.sin_port = (__force u16)htons(RDS_PORT);
  533. ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
  534. (struct sockaddr *)&dest,
  535. RDS_RDMA_RESOLVE_TIMEOUT_MS);
  536. if (ret) {
  537. rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
  538. ret);
  539. rdma_destroy_id(ic->i_cm_id);
  540. ic->i_cm_id = NULL;
  541. }
  542. out:
  543. return ret;
  544. }
  545. /*
  546. * This is so careful about only cleaning up resources that were built up
  547. * so that it can be called at any point during startup. In fact it
  548. * can be called multiple times for a given connection.
  549. */
  550. void rds_ib_conn_shutdown(struct rds_connection *conn)
  551. {
  552. struct rds_ib_connection *ic = conn->c_transport_data;
  553. int err = 0;
  554. rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
  555. ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
  556. ic->i_cm_id ? ic->i_cm_id->qp : NULL);
  557. if (ic->i_cm_id) {
  558. struct ib_device *dev = ic->i_cm_id->device;
  559. rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
  560. err = rdma_disconnect(ic->i_cm_id);
  561. if (err) {
  562. /* Actually this may happen quite frequently, when
  563. * an outgoing connect raced with an incoming connect.
  564. */
  565. rdsdebug("failed to disconnect, cm: %p err %d\n",
  566. ic->i_cm_id, err);
  567. }
  568. /*
  569. * We want to wait for tx and rx completion to finish
  570. * before we tear down the connection, but we have to be
  571. * careful not to get stuck waiting on a send ring that
  572. * only has unsignaled sends in it. We've shutdown new
  573. * sends before getting here so by waiting for signaled
  574. * sends to complete we're ensured that there will be no
  575. * more tx processing.
  576. */
  577. wait_event(rds_ib_ring_empty_wait,
  578. rds_ib_ring_empty(&ic->i_recv_ring) &&
  579. (atomic_read(&ic->i_signaled_sends) == 0));
  580. tasklet_kill(&ic->i_recv_tasklet);
  581. if (ic->i_send_hdrs)
  582. ib_dma_free_coherent(dev,
  583. ic->i_send_ring.w_nr *
  584. sizeof(struct rds_header),
  585. ic->i_send_hdrs,
  586. ic->i_send_hdrs_dma);
  587. if (ic->i_recv_hdrs)
  588. ib_dma_free_coherent(dev,
  589. ic->i_recv_ring.w_nr *
  590. sizeof(struct rds_header),
  591. ic->i_recv_hdrs,
  592. ic->i_recv_hdrs_dma);
  593. if (ic->i_ack)
  594. ib_dma_free_coherent(dev, sizeof(struct rds_header),
  595. ic->i_ack, ic->i_ack_dma);
  596. if (ic->i_sends)
  597. rds_ib_send_clear_ring(ic);
  598. if (ic->i_recvs)
  599. rds_ib_recv_clear_ring(ic);
  600. if (ic->i_cm_id->qp)
  601. rdma_destroy_qp(ic->i_cm_id);
  602. if (ic->i_send_cq)
  603. ib_destroy_cq(ic->i_send_cq);
  604. if (ic->i_recv_cq)
  605. ib_destroy_cq(ic->i_recv_cq);
  606. rdma_destroy_id(ic->i_cm_id);
  607. /*
  608. * Move connection back to the nodev list.
  609. */
  610. if (ic->rds_ibdev)
  611. rds_ib_remove_conn(ic->rds_ibdev, conn);
  612. ic->i_cm_id = NULL;
  613. ic->i_pd = NULL;
  614. ic->i_mr = NULL;
  615. ic->i_send_cq = NULL;
  616. ic->i_recv_cq = NULL;
  617. ic->i_send_hdrs = NULL;
  618. ic->i_recv_hdrs = NULL;
  619. ic->i_ack = NULL;
  620. }
  621. BUG_ON(ic->rds_ibdev);
  622. /* Clear pending transmit */
  623. if (ic->i_data_op) {
  624. struct rds_message *rm;
  625. rm = container_of(ic->i_data_op, struct rds_message, data);
  626. rds_message_put(rm);
  627. ic->i_data_op = NULL;
  628. }
  629. /* Clear the ACK state */
  630. clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
  631. #ifdef KERNEL_HAS_ATOMIC64
  632. atomic64_set(&ic->i_ack_next, 0);
  633. #else
  634. ic->i_ack_next = 0;
  635. #endif
  636. ic->i_ack_recv = 0;
  637. /* Clear flow control state */
  638. ic->i_flowctl = 0;
  639. atomic_set(&ic->i_credits, 0);
  640. rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
  641. rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
  642. if (ic->i_ibinc) {
  643. rds_inc_put(&ic->i_ibinc->ii_inc);
  644. ic->i_ibinc = NULL;
  645. }
  646. vfree(ic->i_sends);
  647. ic->i_sends = NULL;
  648. vfree(ic->i_recvs);
  649. ic->i_recvs = NULL;
  650. }
  651. int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
  652. {
  653. struct rds_ib_connection *ic;
  654. unsigned long flags;
  655. int ret;
  656. /* XXX too lazy? */
  657. ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
  658. if (!ic)
  659. return -ENOMEM;
  660. ret = rds_ib_recv_alloc_caches(ic);
  661. if (ret) {
  662. kfree(ic);
  663. return ret;
  664. }
  665. INIT_LIST_HEAD(&ic->ib_node);
  666. tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
  667. (unsigned long) ic);
  668. mutex_init(&ic->i_recv_mutex);
  669. #ifndef KERNEL_HAS_ATOMIC64
  670. spin_lock_init(&ic->i_ack_lock);
  671. #endif
  672. atomic_set(&ic->i_signaled_sends, 0);
  673. /*
  674. * rds_ib_conn_shutdown() waits for these to be emptied so they
  675. * must be initialized before it can be called.
  676. */
  677. rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
  678. rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
  679. ic->conn = conn;
  680. conn->c_transport_data = ic;
  681. spin_lock_irqsave(&ib_nodev_conns_lock, flags);
  682. list_add_tail(&ic->ib_node, &ib_nodev_conns);
  683. spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
  684. rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
  685. return 0;
  686. }
  687. /*
  688. * Free a connection. Connection must be shut down and not set for reconnect.
  689. */
  690. void rds_ib_conn_free(void *arg)
  691. {
  692. struct rds_ib_connection *ic = arg;
  693. spinlock_t *lock_ptr;
  694. rdsdebug("ic %p\n", ic);
  695. /*
  696. * Conn is either on a dev's list or on the nodev list.
  697. * A race with shutdown() or connect() would cause problems
  698. * (since rds_ibdev would change) but that should never happen.
  699. */
  700. lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
  701. spin_lock_irq(lock_ptr);
  702. list_del(&ic->ib_node);
  703. spin_unlock_irq(lock_ptr);
  704. rds_ib_recv_free_caches(ic);
  705. kfree(ic);
  706. }
  707. /*
  708. * An error occurred on the connection
  709. */
  710. void
  711. __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
  712. {
  713. va_list ap;
  714. rds_conn_drop(conn);
  715. va_start(ap, fmt);
  716. vprintk(fmt, ap);
  717. va_end(ap);
  718. }