iwcm.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  7. * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
  8. *
  9. * This software is available to you under a choice of one of two
  10. * licenses. You may choose to be licensed under the terms of the GNU
  11. * General Public License (GPL) Version 2, available from the file
  12. * COPYING in the main directory of this source tree, or the
  13. * OpenIB.org BSD license below:
  14. *
  15. * Redistribution and use in source and binary forms, with or
  16. * without modification, are permitted provided that the following
  17. * conditions are met:
  18. *
  19. * - Redistributions of source code must retain the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer.
  22. *
  23. * - Redistributions in binary form must reproduce the above
  24. * copyright notice, this list of conditions and the following
  25. * disclaimer in the documentation and/or other materials
  26. * provided with the distribution.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  29. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  30. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  31. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  32. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  33. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  34. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  35. * SOFTWARE.
  36. *
  37. */
  38. #include <linux/dma-mapping.h>
  39. #include <linux/err.h>
  40. #include <linux/idr.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/rbtree.h>
  43. #include <linux/sched.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/completion.h>
  47. #include <linux/slab.h>
  48. #include <linux/module.h>
  49. #include <linux/sysctl.h>
  50. #include <rdma/iw_cm.h>
  51. #include <rdma/ib_addr.h>
  52. #include <rdma/iw_portmap.h>
  53. #include <rdma/rdma_netlink.h>
  54. #include "iwcm.h"
  55. MODULE_AUTHOR("Tom Tucker");
  56. MODULE_DESCRIPTION("iWARP CM");
  57. MODULE_LICENSE("Dual BSD/GPL");
  58. static const char * const iwcm_rej_reason_strs[] = {
  59. [ECONNRESET] = "reset by remote host",
  60. [ECONNREFUSED] = "refused by remote application",
  61. [ETIMEDOUT] = "setup timeout",
  62. };
  63. const char *__attribute_const__ iwcm_reject_msg(int reason)
  64. {
  65. size_t index;
  66. /* iWARP uses negative errnos */
  67. index = -reason;
  68. if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
  69. iwcm_rej_reason_strs[index])
  70. return iwcm_rej_reason_strs[index];
  71. else
  72. return "unrecognized reason";
  73. }
  74. EXPORT_SYMBOL(iwcm_reject_msg);
  75. static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
  76. [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
  77. [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
  78. [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
  79. [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
  80. [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
  81. [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
  82. [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
  83. };
  84. static struct workqueue_struct *iwcm_wq;
  85. struct iwcm_work {
  86. struct work_struct work;
  87. struct iwcm_id_private *cm_id;
  88. struct list_head list;
  89. struct iw_cm_event event;
  90. struct list_head free_list;
  91. };
  92. static unsigned int default_backlog = 256;
  93. static struct ctl_table_header *iwcm_ctl_table_hdr;
  94. static struct ctl_table iwcm_ctl_table[] = {
  95. {
  96. .procname = "default_backlog",
  97. .data = &default_backlog,
  98. .maxlen = sizeof(default_backlog),
  99. .mode = 0644,
  100. .proc_handler = proc_dointvec,
  101. },
  102. { }
  103. };
  104. /*
  105. * The following services provide a mechanism for pre-allocating iwcm_work
  106. * elements. The design pre-allocates them based on the cm_id type:
  107. * LISTENING IDS: Get enough elements preallocated to handle the
  108. * listen backlog.
  109. * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
  110. * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
  111. *
  112. * Allocating them in connect and listen avoids having to deal
  113. * with allocation failures on the event upcall from the provider (which
  114. * is called in the interrupt context).
  115. *
  116. * One exception is when creating the cm_id for incoming connection requests.
  117. * There are two cases:
  118. * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
  119. * the backlog is exceeded, then no more connection request events will
  120. * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
  121. * to the provider to reject the connection request.
  122. * 2) in the connection request workqueue handler, cm_conn_req_handler().
  123. * If work elements cannot be allocated for the new connect request cm_id,
  124. * then IWCM will call the provider reject method. This is ok since
  125. * cm_conn_req_handler() runs in the workqueue thread context.
  126. */
  127. static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
  128. {
  129. struct iwcm_work *work;
  130. if (list_empty(&cm_id_priv->work_free_list))
  131. return NULL;
  132. work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
  133. free_list);
  134. list_del_init(&work->free_list);
  135. return work;
  136. }
  137. static void put_work(struct iwcm_work *work)
  138. {
  139. list_add(&work->free_list, &work->cm_id->work_free_list);
  140. }
  141. static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
  142. {
  143. struct list_head *e, *tmp;
  144. list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
  145. kfree(list_entry(e, struct iwcm_work, free_list));
  146. }
  147. static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
  148. {
  149. struct iwcm_work *work;
  150. BUG_ON(!list_empty(&cm_id_priv->work_free_list));
  151. while (count--) {
  152. work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
  153. if (!work) {
  154. dealloc_work_entries(cm_id_priv);
  155. return -ENOMEM;
  156. }
  157. work->cm_id = cm_id_priv;
  158. INIT_LIST_HEAD(&work->list);
  159. put_work(work);
  160. }
  161. return 0;
  162. }
  163. /*
  164. * Save private data from incoming connection requests to
  165. * iw_cm_event, so the low level driver doesn't have to. Adjust
  166. * the event ptr to point to the local copy.
  167. */
  168. static int copy_private_data(struct iw_cm_event *event)
  169. {
  170. void *p;
  171. p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
  172. if (!p)
  173. return -ENOMEM;
  174. event->private_data = p;
  175. return 0;
  176. }
  177. static void free_cm_id(struct iwcm_id_private *cm_id_priv)
  178. {
  179. dealloc_work_entries(cm_id_priv);
  180. kfree(cm_id_priv);
  181. }
  182. /*
  183. * Release a reference on cm_id. If the last reference is being
  184. * released, free the cm_id and return 1.
  185. */
  186. static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
  187. {
  188. BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
  189. if (atomic_dec_and_test(&cm_id_priv->refcount)) {
  190. BUG_ON(!list_empty(&cm_id_priv->work_list));
  191. free_cm_id(cm_id_priv);
  192. return 1;
  193. }
  194. return 0;
  195. }
  196. static void add_ref(struct iw_cm_id *cm_id)
  197. {
  198. struct iwcm_id_private *cm_id_priv;
  199. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  200. atomic_inc(&cm_id_priv->refcount);
  201. }
  202. static void rem_ref(struct iw_cm_id *cm_id)
  203. {
  204. struct iwcm_id_private *cm_id_priv;
  205. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  206. (void)iwcm_deref_id(cm_id_priv);
  207. }
  208. static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
  209. struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
  210. iw_cm_handler cm_handler,
  211. void *context)
  212. {
  213. struct iwcm_id_private *cm_id_priv;
  214. cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
  215. if (!cm_id_priv)
  216. return ERR_PTR(-ENOMEM);
  217. cm_id_priv->state = IW_CM_STATE_IDLE;
  218. cm_id_priv->id.device = device;
  219. cm_id_priv->id.cm_handler = cm_handler;
  220. cm_id_priv->id.context = context;
  221. cm_id_priv->id.event_handler = cm_event_handler;
  222. cm_id_priv->id.add_ref = add_ref;
  223. cm_id_priv->id.rem_ref = rem_ref;
  224. spin_lock_init(&cm_id_priv->lock);
  225. atomic_set(&cm_id_priv->refcount, 1);
  226. init_waitqueue_head(&cm_id_priv->connect_wait);
  227. init_completion(&cm_id_priv->destroy_comp);
  228. INIT_LIST_HEAD(&cm_id_priv->work_list);
  229. INIT_LIST_HEAD(&cm_id_priv->work_free_list);
  230. return &cm_id_priv->id;
  231. }
  232. EXPORT_SYMBOL(iw_create_cm_id);
  233. static int iwcm_modify_qp_err(struct ib_qp *qp)
  234. {
  235. struct ib_qp_attr qp_attr;
  236. if (!qp)
  237. return -EINVAL;
  238. qp_attr.qp_state = IB_QPS_ERR;
  239. return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
  240. }
  241. /*
  242. * This is really the RDMAC CLOSING state. It is most similar to the
  243. * IB SQD QP state.
  244. */
  245. static int iwcm_modify_qp_sqd(struct ib_qp *qp)
  246. {
  247. struct ib_qp_attr qp_attr;
  248. BUG_ON(qp == NULL);
  249. qp_attr.qp_state = IB_QPS_SQD;
  250. return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
  251. }
  252. /*
  253. * CM_ID <-- CLOSING
  254. *
  255. * Block if a passive or active connection is currently being processed. Then
  256. * process the event as follows:
  257. * - If we are ESTABLISHED, move to CLOSING and modify the QP state
  258. * based on the abrupt flag
  259. * - If the connection is already in the CLOSING or IDLE state, the peer is
  260. * disconnecting concurrently with us and we've already seen the
  261. * DISCONNECT event -- ignore the request and return 0
  262. * - Disconnect on a listening endpoint returns -EINVAL
  263. */
  264. int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
  265. {
  266. struct iwcm_id_private *cm_id_priv;
  267. unsigned long flags;
  268. int ret = 0;
  269. struct ib_qp *qp = NULL;
  270. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  271. /* Wait if we're currently in a connect or accept downcall */
  272. wait_event(cm_id_priv->connect_wait,
  273. !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
  274. spin_lock_irqsave(&cm_id_priv->lock, flags);
  275. switch (cm_id_priv->state) {
  276. case IW_CM_STATE_ESTABLISHED:
  277. cm_id_priv->state = IW_CM_STATE_CLOSING;
  278. /* QP could be <nul> for user-mode client */
  279. if (cm_id_priv->qp)
  280. qp = cm_id_priv->qp;
  281. else
  282. ret = -EINVAL;
  283. break;
  284. case IW_CM_STATE_LISTEN:
  285. ret = -EINVAL;
  286. break;
  287. case IW_CM_STATE_CLOSING:
  288. /* remote peer closed first */
  289. case IW_CM_STATE_IDLE:
  290. /* accept or connect returned !0 */
  291. break;
  292. case IW_CM_STATE_CONN_RECV:
  293. /*
  294. * App called disconnect before/without calling accept after
  295. * connect_request event delivered.
  296. */
  297. break;
  298. case IW_CM_STATE_CONN_SENT:
  299. /* Can only get here if wait above fails */
  300. default:
  301. BUG();
  302. }
  303. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  304. if (qp) {
  305. if (abrupt)
  306. ret = iwcm_modify_qp_err(qp);
  307. else
  308. ret = iwcm_modify_qp_sqd(qp);
  309. /*
  310. * If both sides are disconnecting the QP could
  311. * already be in ERR or SQD states
  312. */
  313. ret = 0;
  314. }
  315. return ret;
  316. }
  317. EXPORT_SYMBOL(iw_cm_disconnect);
  318. /*
  319. * CM_ID <-- DESTROYING
  320. *
  321. * Clean up all resources associated with the connection and release
  322. * the initial reference taken by iw_create_cm_id.
  323. */
  324. static void destroy_cm_id(struct iw_cm_id *cm_id)
  325. {
  326. struct iwcm_id_private *cm_id_priv;
  327. unsigned long flags;
  328. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  329. /*
  330. * Wait if we're currently in a connect or accept downcall. A
  331. * listening endpoint should never block here.
  332. */
  333. wait_event(cm_id_priv->connect_wait,
  334. !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
  335. /*
  336. * Since we're deleting the cm_id, drop any events that
  337. * might arrive before the last dereference.
  338. */
  339. set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
  340. spin_lock_irqsave(&cm_id_priv->lock, flags);
  341. switch (cm_id_priv->state) {
  342. case IW_CM_STATE_LISTEN:
  343. cm_id_priv->state = IW_CM_STATE_DESTROYING;
  344. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  345. /* destroy the listening endpoint */
  346. cm_id->device->iwcm->destroy_listen(cm_id);
  347. spin_lock_irqsave(&cm_id_priv->lock, flags);
  348. break;
  349. case IW_CM_STATE_ESTABLISHED:
  350. cm_id_priv->state = IW_CM_STATE_DESTROYING;
  351. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  352. /* Abrupt close of the connection */
  353. (void)iwcm_modify_qp_err(cm_id_priv->qp);
  354. spin_lock_irqsave(&cm_id_priv->lock, flags);
  355. break;
  356. case IW_CM_STATE_IDLE:
  357. case IW_CM_STATE_CLOSING:
  358. cm_id_priv->state = IW_CM_STATE_DESTROYING;
  359. break;
  360. case IW_CM_STATE_CONN_RECV:
  361. /*
  362. * App called destroy before/without calling accept after
  363. * receiving connection request event notification or
  364. * returned non zero from the event callback function.
  365. * In either case, must tell the provider to reject.
  366. */
  367. cm_id_priv->state = IW_CM_STATE_DESTROYING;
  368. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  369. cm_id->device->iwcm->reject(cm_id, NULL, 0);
  370. spin_lock_irqsave(&cm_id_priv->lock, flags);
  371. break;
  372. case IW_CM_STATE_CONN_SENT:
  373. case IW_CM_STATE_DESTROYING:
  374. default:
  375. BUG();
  376. break;
  377. }
  378. if (cm_id_priv->qp) {
  379. cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
  380. cm_id_priv->qp = NULL;
  381. }
  382. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  383. if (cm_id->mapped) {
  384. iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
  385. iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
  386. }
  387. (void)iwcm_deref_id(cm_id_priv);
  388. }
  389. /*
  390. * This function is only called by the application thread and cannot
  391. * be called by the event thread. The function will wait for all
  392. * references to be released on the cm_id and then kfree the cm_id
  393. * object.
  394. */
  395. void iw_destroy_cm_id(struct iw_cm_id *cm_id)
  396. {
  397. destroy_cm_id(cm_id);
  398. }
  399. EXPORT_SYMBOL(iw_destroy_cm_id);
  400. /**
  401. * iw_cm_check_wildcard - If IP address is 0 then use original
  402. * @pm_addr: sockaddr containing the ip to check for wildcard
  403. * @cm_addr: sockaddr containing the actual IP address
  404. * @cm_outaddr: sockaddr to set IP addr which leaving port
  405. *
  406. * Checks the pm_addr for wildcard and then sets cm_outaddr's
  407. * IP to the actual (cm_addr).
  408. */
  409. static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
  410. struct sockaddr_storage *cm_addr,
  411. struct sockaddr_storage *cm_outaddr)
  412. {
  413. if (pm_addr->ss_family == AF_INET) {
  414. struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
  415. if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
  416. struct sockaddr_in *cm4_addr =
  417. (struct sockaddr_in *)cm_addr;
  418. struct sockaddr_in *cm4_outaddr =
  419. (struct sockaddr_in *)cm_outaddr;
  420. cm4_outaddr->sin_addr = cm4_addr->sin_addr;
  421. }
  422. } else {
  423. struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
  424. if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
  425. struct sockaddr_in6 *cm6_addr =
  426. (struct sockaddr_in6 *)cm_addr;
  427. struct sockaddr_in6 *cm6_outaddr =
  428. (struct sockaddr_in6 *)cm_outaddr;
  429. cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
  430. }
  431. }
  432. }
  433. /**
  434. * iw_cm_map - Use portmapper to map the ports
  435. * @cm_id: connection manager pointer
  436. * @active: Indicates the active side when true
  437. * returns nonzero for error only if iwpm_create_mapinfo() fails
  438. *
  439. * Tries to add a mapping for a port using the Portmapper. If
  440. * successful in mapping the IP/Port it will check the remote
  441. * mapped IP address for a wildcard IP address and replace the
  442. * zero IP address with the remote_addr.
  443. */
  444. static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
  445. {
  446. struct iwpm_dev_data pm_reg_msg;
  447. struct iwpm_sa_data pm_msg;
  448. int status;
  449. cm_id->m_local_addr = cm_id->local_addr;
  450. cm_id->m_remote_addr = cm_id->remote_addr;
  451. memcpy(pm_reg_msg.dev_name, cm_id->device->name,
  452. sizeof(pm_reg_msg.dev_name));
  453. memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
  454. sizeof(pm_reg_msg.if_name));
  455. if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
  456. !iwpm_valid_pid())
  457. return 0;
  458. cm_id->mapped = true;
  459. pm_msg.loc_addr = cm_id->local_addr;
  460. pm_msg.rem_addr = cm_id->remote_addr;
  461. if (active)
  462. status = iwpm_add_and_query_mapping(&pm_msg,
  463. RDMA_NL_IWCM);
  464. else
  465. status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
  466. if (!status) {
  467. cm_id->m_local_addr = pm_msg.mapped_loc_addr;
  468. if (active) {
  469. cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
  470. iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
  471. &cm_id->remote_addr,
  472. &cm_id->m_remote_addr);
  473. }
  474. }
  475. return iwpm_create_mapinfo(&cm_id->local_addr,
  476. &cm_id->m_local_addr,
  477. RDMA_NL_IWCM);
  478. }
  479. /*
  480. * CM_ID <-- LISTEN
  481. *
  482. * Start listening for connect requests. Generates one CONNECT_REQUEST
  483. * event for each inbound connect request.
  484. */
  485. int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
  486. {
  487. struct iwcm_id_private *cm_id_priv;
  488. unsigned long flags;
  489. int ret;
  490. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  491. if (!backlog)
  492. backlog = default_backlog;
  493. ret = alloc_work_entries(cm_id_priv, backlog);
  494. if (ret)
  495. return ret;
  496. spin_lock_irqsave(&cm_id_priv->lock, flags);
  497. switch (cm_id_priv->state) {
  498. case IW_CM_STATE_IDLE:
  499. cm_id_priv->state = IW_CM_STATE_LISTEN;
  500. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  501. ret = iw_cm_map(cm_id, false);
  502. if (!ret)
  503. ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
  504. if (ret)
  505. cm_id_priv->state = IW_CM_STATE_IDLE;
  506. spin_lock_irqsave(&cm_id_priv->lock, flags);
  507. break;
  508. default:
  509. ret = -EINVAL;
  510. }
  511. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  512. return ret;
  513. }
  514. EXPORT_SYMBOL(iw_cm_listen);
  515. /*
  516. * CM_ID <-- IDLE
  517. *
  518. * Rejects an inbound connection request. No events are generated.
  519. */
  520. int iw_cm_reject(struct iw_cm_id *cm_id,
  521. const void *private_data,
  522. u8 private_data_len)
  523. {
  524. struct iwcm_id_private *cm_id_priv;
  525. unsigned long flags;
  526. int ret;
  527. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  528. set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  529. spin_lock_irqsave(&cm_id_priv->lock, flags);
  530. if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
  531. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  532. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  533. wake_up_all(&cm_id_priv->connect_wait);
  534. return -EINVAL;
  535. }
  536. cm_id_priv->state = IW_CM_STATE_IDLE;
  537. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  538. ret = cm_id->device->iwcm->reject(cm_id, private_data,
  539. private_data_len);
  540. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  541. wake_up_all(&cm_id_priv->connect_wait);
  542. return ret;
  543. }
  544. EXPORT_SYMBOL(iw_cm_reject);
  545. /*
  546. * CM_ID <-- ESTABLISHED
  547. *
  548. * Accepts an inbound connection request and generates an ESTABLISHED
  549. * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
  550. * until the ESTABLISHED event is received from the provider.
  551. */
  552. int iw_cm_accept(struct iw_cm_id *cm_id,
  553. struct iw_cm_conn_param *iw_param)
  554. {
  555. struct iwcm_id_private *cm_id_priv;
  556. struct ib_qp *qp;
  557. unsigned long flags;
  558. int ret;
  559. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  560. set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  561. spin_lock_irqsave(&cm_id_priv->lock, flags);
  562. if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
  563. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  564. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  565. wake_up_all(&cm_id_priv->connect_wait);
  566. return -EINVAL;
  567. }
  568. /* Get the ib_qp given the QPN */
  569. qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
  570. if (!qp) {
  571. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  572. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  573. wake_up_all(&cm_id_priv->connect_wait);
  574. return -EINVAL;
  575. }
  576. cm_id->device->iwcm->add_ref(qp);
  577. cm_id_priv->qp = qp;
  578. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  579. ret = cm_id->device->iwcm->accept(cm_id, iw_param);
  580. if (ret) {
  581. /* An error on accept precludes provider events */
  582. BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
  583. cm_id_priv->state = IW_CM_STATE_IDLE;
  584. spin_lock_irqsave(&cm_id_priv->lock, flags);
  585. if (cm_id_priv->qp) {
  586. cm_id->device->iwcm->rem_ref(qp);
  587. cm_id_priv->qp = NULL;
  588. }
  589. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  590. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  591. wake_up_all(&cm_id_priv->connect_wait);
  592. }
  593. return ret;
  594. }
  595. EXPORT_SYMBOL(iw_cm_accept);
  596. /*
  597. * Active Side: CM_ID <-- CONN_SENT
  598. *
  599. * If successful, results in the generation of a CONNECT_REPLY
  600. * event. iw_cm_disconnect and iw_cm_destroy will block until the
  601. * CONNECT_REPLY event is received from the provider.
  602. */
  603. int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
  604. {
  605. struct iwcm_id_private *cm_id_priv;
  606. int ret;
  607. unsigned long flags;
  608. struct ib_qp *qp;
  609. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  610. ret = alloc_work_entries(cm_id_priv, 4);
  611. if (ret)
  612. return ret;
  613. set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  614. spin_lock_irqsave(&cm_id_priv->lock, flags);
  615. if (cm_id_priv->state != IW_CM_STATE_IDLE) {
  616. ret = -EINVAL;
  617. goto err;
  618. }
  619. /* Get the ib_qp given the QPN */
  620. qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
  621. if (!qp) {
  622. ret = -EINVAL;
  623. goto err;
  624. }
  625. cm_id->device->iwcm->add_ref(qp);
  626. cm_id_priv->qp = qp;
  627. cm_id_priv->state = IW_CM_STATE_CONN_SENT;
  628. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  629. ret = iw_cm_map(cm_id, true);
  630. if (!ret)
  631. ret = cm_id->device->iwcm->connect(cm_id, iw_param);
  632. if (!ret)
  633. return 0; /* success */
  634. spin_lock_irqsave(&cm_id_priv->lock, flags);
  635. if (cm_id_priv->qp) {
  636. cm_id->device->iwcm->rem_ref(qp);
  637. cm_id_priv->qp = NULL;
  638. }
  639. cm_id_priv->state = IW_CM_STATE_IDLE;
  640. err:
  641. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  642. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  643. wake_up_all(&cm_id_priv->connect_wait);
  644. return ret;
  645. }
  646. EXPORT_SYMBOL(iw_cm_connect);
  647. /*
  648. * Passive Side: new CM_ID <-- CONN_RECV
  649. *
  650. * Handles an inbound connect request. The function creates a new
  651. * iw_cm_id to represent the new connection and inherits the client
  652. * callback function and other attributes from the listening parent.
  653. *
  654. * The work item contains a pointer to the listen_cm_id and the event. The
  655. * listen_cm_id contains the client cm_handler, context and
  656. * device. These are copied when the device is cloned. The event
  657. * contains the new four tuple.
  658. *
  659. * An error on the child should not affect the parent, so this
  660. * function does not return a value.
  661. */
  662. static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
  663. struct iw_cm_event *iw_event)
  664. {
  665. unsigned long flags;
  666. struct iw_cm_id *cm_id;
  667. struct iwcm_id_private *cm_id_priv;
  668. int ret;
  669. /*
  670. * The provider should never generate a connection request
  671. * event with a bad status.
  672. */
  673. BUG_ON(iw_event->status);
  674. cm_id = iw_create_cm_id(listen_id_priv->id.device,
  675. listen_id_priv->id.cm_handler,
  676. listen_id_priv->id.context);
  677. /* If the cm_id could not be created, ignore the request */
  678. if (IS_ERR(cm_id))
  679. goto out;
  680. cm_id->provider_data = iw_event->provider_data;
  681. cm_id->m_local_addr = iw_event->local_addr;
  682. cm_id->m_remote_addr = iw_event->remote_addr;
  683. cm_id->local_addr = listen_id_priv->id.local_addr;
  684. ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
  685. &iw_event->remote_addr,
  686. &cm_id->remote_addr,
  687. RDMA_NL_IWCM);
  688. if (ret) {
  689. cm_id->remote_addr = iw_event->remote_addr;
  690. } else {
  691. iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
  692. &iw_event->local_addr,
  693. &cm_id->local_addr);
  694. iw_event->local_addr = cm_id->local_addr;
  695. iw_event->remote_addr = cm_id->remote_addr;
  696. }
  697. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  698. cm_id_priv->state = IW_CM_STATE_CONN_RECV;
  699. /*
  700. * We could be destroying the listening id. If so, ignore this
  701. * upcall.
  702. */
  703. spin_lock_irqsave(&listen_id_priv->lock, flags);
  704. if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
  705. spin_unlock_irqrestore(&listen_id_priv->lock, flags);
  706. iw_cm_reject(cm_id, NULL, 0);
  707. iw_destroy_cm_id(cm_id);
  708. goto out;
  709. }
  710. spin_unlock_irqrestore(&listen_id_priv->lock, flags);
  711. ret = alloc_work_entries(cm_id_priv, 3);
  712. if (ret) {
  713. iw_cm_reject(cm_id, NULL, 0);
  714. iw_destroy_cm_id(cm_id);
  715. goto out;
  716. }
  717. /* Call the client CM handler */
  718. ret = cm_id->cm_handler(cm_id, iw_event);
  719. if (ret) {
  720. iw_cm_reject(cm_id, NULL, 0);
  721. iw_destroy_cm_id(cm_id);
  722. }
  723. out:
  724. if (iw_event->private_data_len)
  725. kfree(iw_event->private_data);
  726. }
  727. /*
  728. * Passive Side: CM_ID <-- ESTABLISHED
  729. *
  730. * The provider generated an ESTABLISHED event which means that
  731. * the MPA negotion has completed successfully and we are now in MPA
  732. * FPDU mode.
  733. *
  734. * This event can only be received in the CONN_RECV state. If the
  735. * remote peer closed, the ESTABLISHED event would be received followed
  736. * by the CLOSE event. If the app closes, it will block until we wake
  737. * it up after processing this event.
  738. */
  739. static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
  740. struct iw_cm_event *iw_event)
  741. {
  742. unsigned long flags;
  743. int ret;
  744. spin_lock_irqsave(&cm_id_priv->lock, flags);
  745. /*
  746. * We clear the CONNECT_WAIT bit here to allow the callback
  747. * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
  748. * from a callback handler is not allowed.
  749. */
  750. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  751. BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
  752. cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
  753. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  754. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
  755. wake_up_all(&cm_id_priv->connect_wait);
  756. return ret;
  757. }
  758. /*
  759. * Active Side: CM_ID <-- ESTABLISHED
  760. *
  761. * The app has called connect and is waiting for the established event to
  762. * post it's requests to the server. This event will wake up anyone
  763. * blocked in iw_cm_disconnect or iw_destroy_id.
  764. */
  765. static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
  766. struct iw_cm_event *iw_event)
  767. {
  768. unsigned long flags;
  769. int ret;
  770. spin_lock_irqsave(&cm_id_priv->lock, flags);
  771. /*
  772. * Clear the connect wait bit so a callback function calling
  773. * iw_cm_disconnect will not wait and deadlock this thread
  774. */
  775. clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
  776. BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
  777. if (iw_event->status == 0) {
  778. cm_id_priv->id.m_local_addr = iw_event->local_addr;
  779. cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
  780. iw_event->local_addr = cm_id_priv->id.local_addr;
  781. iw_event->remote_addr = cm_id_priv->id.remote_addr;
  782. cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
  783. } else {
  784. /* REJECTED or RESET */
  785. cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
  786. cm_id_priv->qp = NULL;
  787. cm_id_priv->state = IW_CM_STATE_IDLE;
  788. }
  789. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  790. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
  791. if (iw_event->private_data_len)
  792. kfree(iw_event->private_data);
  793. /* Wake up waiters on connect complete */
  794. wake_up_all(&cm_id_priv->connect_wait);
  795. return ret;
  796. }
  797. /*
  798. * CM_ID <-- CLOSING
  799. *
  800. * If in the ESTABLISHED state, move to CLOSING.
  801. */
  802. static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
  803. struct iw_cm_event *iw_event)
  804. {
  805. unsigned long flags;
  806. spin_lock_irqsave(&cm_id_priv->lock, flags);
  807. if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
  808. cm_id_priv->state = IW_CM_STATE_CLOSING;
  809. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  810. }
  811. /*
  812. * CM_ID <-- IDLE
  813. *
  814. * If in the ESTBLISHED or CLOSING states, the QP will have have been
  815. * moved by the provider to the ERR state. Disassociate the CM_ID from
  816. * the QP, move to IDLE, and remove the 'connected' reference.
  817. *
  818. * If in some other state, the cm_id was destroyed asynchronously.
  819. * This is the last reference that will result in waking up
  820. * the app thread blocked in iw_destroy_cm_id.
  821. */
  822. static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
  823. struct iw_cm_event *iw_event)
  824. {
  825. unsigned long flags;
  826. int ret = 0;
  827. spin_lock_irqsave(&cm_id_priv->lock, flags);
  828. if (cm_id_priv->qp) {
  829. cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
  830. cm_id_priv->qp = NULL;
  831. }
  832. switch (cm_id_priv->state) {
  833. case IW_CM_STATE_ESTABLISHED:
  834. case IW_CM_STATE_CLOSING:
  835. cm_id_priv->state = IW_CM_STATE_IDLE;
  836. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  837. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
  838. spin_lock_irqsave(&cm_id_priv->lock, flags);
  839. break;
  840. case IW_CM_STATE_DESTROYING:
  841. break;
  842. default:
  843. BUG();
  844. }
  845. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  846. return ret;
  847. }
  848. static int process_event(struct iwcm_id_private *cm_id_priv,
  849. struct iw_cm_event *iw_event)
  850. {
  851. int ret = 0;
  852. switch (iw_event->event) {
  853. case IW_CM_EVENT_CONNECT_REQUEST:
  854. cm_conn_req_handler(cm_id_priv, iw_event);
  855. break;
  856. case IW_CM_EVENT_CONNECT_REPLY:
  857. ret = cm_conn_rep_handler(cm_id_priv, iw_event);
  858. break;
  859. case IW_CM_EVENT_ESTABLISHED:
  860. ret = cm_conn_est_handler(cm_id_priv, iw_event);
  861. break;
  862. case IW_CM_EVENT_DISCONNECT:
  863. cm_disconnect_handler(cm_id_priv, iw_event);
  864. break;
  865. case IW_CM_EVENT_CLOSE:
  866. ret = cm_close_handler(cm_id_priv, iw_event);
  867. break;
  868. default:
  869. BUG();
  870. }
  871. return ret;
  872. }
  873. /*
  874. * Process events on the work_list for the cm_id. If the callback
  875. * function requests that the cm_id be deleted, a flag is set in the
  876. * cm_id flags to indicate that when the last reference is
  877. * removed, the cm_id is to be destroyed. This is necessary to
  878. * distinguish between an object that will be destroyed by the app
  879. * thread asleep on the destroy_comp list vs. an object destroyed
  880. * here synchronously when the last reference is removed.
  881. */
  882. static void cm_work_handler(struct work_struct *_work)
  883. {
  884. struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
  885. struct iw_cm_event levent;
  886. struct iwcm_id_private *cm_id_priv = work->cm_id;
  887. unsigned long flags;
  888. int empty;
  889. int ret = 0;
  890. spin_lock_irqsave(&cm_id_priv->lock, flags);
  891. empty = list_empty(&cm_id_priv->work_list);
  892. while (!empty) {
  893. work = list_entry(cm_id_priv->work_list.next,
  894. struct iwcm_work, list);
  895. list_del_init(&work->list);
  896. empty = list_empty(&cm_id_priv->work_list);
  897. levent = work->event;
  898. put_work(work);
  899. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  900. if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
  901. ret = process_event(cm_id_priv, &levent);
  902. if (ret)
  903. destroy_cm_id(&cm_id_priv->id);
  904. } else
  905. pr_debug("dropping event %d\n", levent.event);
  906. if (iwcm_deref_id(cm_id_priv))
  907. return;
  908. if (empty)
  909. return;
  910. spin_lock_irqsave(&cm_id_priv->lock, flags);
  911. }
  912. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  913. }
  914. /*
  915. * This function is called on interrupt context. Schedule events on
  916. * the iwcm_wq thread to allow callback functions to downcall into
  917. * the CM and/or block. Events are queued to a per-CM_ID
  918. * work_list. If this is the first event on the work_list, the work
  919. * element is also queued on the iwcm_wq thread.
  920. *
  921. * Each event holds a reference on the cm_id. Until the last posted
  922. * event has been delivered and processed, the cm_id cannot be
  923. * deleted.
  924. *
  925. * Returns:
  926. * 0 - the event was handled.
  927. * -ENOMEM - the event was not handled due to lack of resources.
  928. */
  929. static int cm_event_handler(struct iw_cm_id *cm_id,
  930. struct iw_cm_event *iw_event)
  931. {
  932. struct iwcm_work *work;
  933. struct iwcm_id_private *cm_id_priv;
  934. unsigned long flags;
  935. int ret = 0;
  936. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  937. spin_lock_irqsave(&cm_id_priv->lock, flags);
  938. work = get_work(cm_id_priv);
  939. if (!work) {
  940. ret = -ENOMEM;
  941. goto out;
  942. }
  943. INIT_WORK(&work->work, cm_work_handler);
  944. work->cm_id = cm_id_priv;
  945. work->event = *iw_event;
  946. if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
  947. work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
  948. work->event.private_data_len) {
  949. ret = copy_private_data(&work->event);
  950. if (ret) {
  951. put_work(work);
  952. goto out;
  953. }
  954. }
  955. atomic_inc(&cm_id_priv->refcount);
  956. if (list_empty(&cm_id_priv->work_list)) {
  957. list_add_tail(&work->list, &cm_id_priv->work_list);
  958. queue_work(iwcm_wq, &work->work);
  959. } else
  960. list_add_tail(&work->list, &cm_id_priv->work_list);
  961. out:
  962. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  963. return ret;
  964. }
  965. static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
  966. struct ib_qp_attr *qp_attr,
  967. int *qp_attr_mask)
  968. {
  969. unsigned long flags;
  970. int ret;
  971. spin_lock_irqsave(&cm_id_priv->lock, flags);
  972. switch (cm_id_priv->state) {
  973. case IW_CM_STATE_IDLE:
  974. case IW_CM_STATE_CONN_SENT:
  975. case IW_CM_STATE_CONN_RECV:
  976. case IW_CM_STATE_ESTABLISHED:
  977. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
  978. qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
  979. IB_ACCESS_REMOTE_READ;
  980. ret = 0;
  981. break;
  982. default:
  983. ret = -EINVAL;
  984. break;
  985. }
  986. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  987. return ret;
  988. }
  989. static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
  990. struct ib_qp_attr *qp_attr,
  991. int *qp_attr_mask)
  992. {
  993. unsigned long flags;
  994. int ret;
  995. spin_lock_irqsave(&cm_id_priv->lock, flags);
  996. switch (cm_id_priv->state) {
  997. case IW_CM_STATE_IDLE:
  998. case IW_CM_STATE_CONN_SENT:
  999. case IW_CM_STATE_CONN_RECV:
  1000. case IW_CM_STATE_ESTABLISHED:
  1001. *qp_attr_mask = 0;
  1002. ret = 0;
  1003. break;
  1004. default:
  1005. ret = -EINVAL;
  1006. break;
  1007. }
  1008. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1009. return ret;
  1010. }
  1011. int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
  1012. struct ib_qp_attr *qp_attr,
  1013. int *qp_attr_mask)
  1014. {
  1015. struct iwcm_id_private *cm_id_priv;
  1016. int ret;
  1017. cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  1018. switch (qp_attr->qp_state) {
  1019. case IB_QPS_INIT:
  1020. case IB_QPS_RTR:
  1021. ret = iwcm_init_qp_init_attr(cm_id_priv,
  1022. qp_attr, qp_attr_mask);
  1023. break;
  1024. case IB_QPS_RTS:
  1025. ret = iwcm_init_qp_rts_attr(cm_id_priv,
  1026. qp_attr, qp_attr_mask);
  1027. break;
  1028. default:
  1029. ret = -EINVAL;
  1030. break;
  1031. }
  1032. return ret;
  1033. }
  1034. EXPORT_SYMBOL(iw_cm_init_qp_attr);
  1035. static int __init iw_cm_init(void)
  1036. {
  1037. int ret;
  1038. ret = iwpm_init(RDMA_NL_IWCM);
  1039. if (ret)
  1040. pr_err("iw_cm: couldn't init iwpm\n");
  1041. else
  1042. rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
  1043. iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
  1044. if (!iwcm_wq)
  1045. return -ENOMEM;
  1046. iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
  1047. iwcm_ctl_table);
  1048. if (!iwcm_ctl_table_hdr) {
  1049. pr_err("iw_cm: couldn't register sysctl paths\n");
  1050. destroy_workqueue(iwcm_wq);
  1051. return -ENOMEM;
  1052. }
  1053. return 0;
  1054. }
  1055. static void __exit iw_cm_cleanup(void)
  1056. {
  1057. unregister_net_sysctl_table(iwcm_ctl_table_hdr);
  1058. destroy_workqueue(iwcm_wq);
  1059. rdma_nl_unregister(RDMA_NL_IWCM);
  1060. iwpm_exit(RDMA_NL_IWCM);
  1061. }
  1062. MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
  1063. module_init(iw_cm_init);
  1064. module_exit(iw_cm_cleanup);