ipoib_ib.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5. * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/delay.h>
  36. #include <linux/moduleparam.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/slab.h>
  39. #include <linux/ip.h>
  40. #include <linux/tcp.h>
  41. #include "ipoib.h"
  42. #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
  43. static int data_debug_level;
  44. module_param(data_debug_level, int, 0644);
  45. MODULE_PARM_DESC(data_debug_level,
  46. "Enable data path debug tracing if > 0");
  47. #endif
  48. struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
  49. struct ib_pd *pd, struct rdma_ah_attr *attr)
  50. {
  51. struct ipoib_ah *ah;
  52. struct ib_ah *vah;
  53. ah = kmalloc(sizeof *ah, GFP_KERNEL);
  54. if (!ah)
  55. return ERR_PTR(-ENOMEM);
  56. ah->dev = dev;
  57. ah->last_send = 0;
  58. kref_init(&ah->ref);
  59. vah = rdma_create_ah(pd, attr);
  60. if (IS_ERR(vah)) {
  61. kfree(ah);
  62. ah = (struct ipoib_ah *)vah;
  63. } else {
  64. ah->ah = vah;
  65. ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
  66. }
  67. return ah;
  68. }
  69. void ipoib_free_ah(struct kref *kref)
  70. {
  71. struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
  72. struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
  73. unsigned long flags;
  74. spin_lock_irqsave(&priv->lock, flags);
  75. list_add_tail(&ah->list, &priv->dead_ahs);
  76. spin_unlock_irqrestore(&priv->lock, flags);
  77. }
  78. static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
  79. u64 mapping[IPOIB_UD_RX_SG])
  80. {
  81. ib_dma_unmap_single(priv->ca, mapping[0],
  82. IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
  83. DMA_FROM_DEVICE);
  84. }
  85. static int ipoib_ib_post_receive(struct net_device *dev, int id)
  86. {
  87. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  88. struct ib_recv_wr *bad_wr;
  89. int ret;
  90. priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
  91. priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
  92. priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
  93. ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
  94. if (unlikely(ret)) {
  95. ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
  96. ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
  97. dev_kfree_skb_any(priv->rx_ring[id].skb);
  98. priv->rx_ring[id].skb = NULL;
  99. }
  100. return ret;
  101. }
  102. static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
  103. {
  104. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  105. struct sk_buff *skb;
  106. int buf_size;
  107. u64 *mapping;
  108. buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
  109. skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
  110. if (unlikely(!skb))
  111. return NULL;
  112. /*
  113. * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
  114. * 64 bytes aligned
  115. */
  116. skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
  117. mapping = priv->rx_ring[id].mapping;
  118. mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
  119. DMA_FROM_DEVICE);
  120. if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
  121. goto error;
  122. priv->rx_ring[id].skb = skb;
  123. return skb;
  124. error:
  125. dev_kfree_skb_any(skb);
  126. return NULL;
  127. }
  128. static int ipoib_ib_post_receives(struct net_device *dev)
  129. {
  130. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  131. int i;
  132. for (i = 0; i < ipoib_recvq_size; ++i) {
  133. if (!ipoib_alloc_rx_skb(dev, i)) {
  134. ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
  135. return -ENOMEM;
  136. }
  137. if (ipoib_ib_post_receive(dev, i)) {
  138. ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
  139. return -EIO;
  140. }
  141. }
  142. return 0;
  143. }
  144. static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
  145. {
  146. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  147. unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
  148. struct sk_buff *skb;
  149. u64 mapping[IPOIB_UD_RX_SG];
  150. union ib_gid *dgid;
  151. union ib_gid *sgid;
  152. ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
  153. wr_id, wc->status);
  154. if (unlikely(wr_id >= ipoib_recvq_size)) {
  155. ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
  156. wr_id, ipoib_recvq_size);
  157. return;
  158. }
  159. skb = priv->rx_ring[wr_id].skb;
  160. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  161. if (wc->status != IB_WC_WR_FLUSH_ERR)
  162. ipoib_warn(priv, "failed recv event "
  163. "(status=%d, wrid=%d vend_err %x)\n",
  164. wc->status, wr_id, wc->vendor_err);
  165. ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
  166. dev_kfree_skb_any(skb);
  167. priv->rx_ring[wr_id].skb = NULL;
  168. return;
  169. }
  170. memcpy(mapping, priv->rx_ring[wr_id].mapping,
  171. IPOIB_UD_RX_SG * sizeof *mapping);
  172. /*
  173. * If we can't allocate a new RX buffer, dump
  174. * this packet and reuse the old buffer.
  175. */
  176. if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
  177. ++dev->stats.rx_dropped;
  178. goto repost;
  179. }
  180. ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
  181. wc->byte_len, wc->slid);
  182. ipoib_ud_dma_unmap_rx(priv, mapping);
  183. skb_put(skb, wc->byte_len);
  184. /* First byte of dgid signals multicast when 0xff */
  185. dgid = &((struct ib_grh *)skb->data)->dgid;
  186. if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
  187. skb->pkt_type = PACKET_HOST;
  188. else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
  189. skb->pkt_type = PACKET_BROADCAST;
  190. else
  191. skb->pkt_type = PACKET_MULTICAST;
  192. sgid = &((struct ib_grh *)skb->data)->sgid;
  193. /*
  194. * Drop packets that this interface sent, ie multicast packets
  195. * that the HCA has replicated.
  196. */
  197. if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
  198. int need_repost = 1;
  199. if ((wc->wc_flags & IB_WC_GRH) &&
  200. sgid->global.interface_id != priv->local_gid.global.interface_id)
  201. need_repost = 0;
  202. if (need_repost) {
  203. dev_kfree_skb_any(skb);
  204. goto repost;
  205. }
  206. }
  207. skb_pull(skb, IB_GRH_BYTES);
  208. skb->protocol = ((struct ipoib_header *) skb->data)->proto;
  209. skb_add_pseudo_hdr(skb);
  210. ++dev->stats.rx_packets;
  211. dev->stats.rx_bytes += skb->len;
  212. skb->dev = dev;
  213. if ((dev->features & NETIF_F_RXCSUM) &&
  214. likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
  215. skb->ip_summed = CHECKSUM_UNNECESSARY;
  216. napi_gro_receive(&priv->napi, skb);
  217. repost:
  218. if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
  219. ipoib_warn(priv, "ipoib_ib_post_receive failed "
  220. "for buf %d\n", wr_id);
  221. }
  222. int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
  223. {
  224. struct sk_buff *skb = tx_req->skb;
  225. u64 *mapping = tx_req->mapping;
  226. int i;
  227. int off;
  228. if (skb_headlen(skb)) {
  229. mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
  230. DMA_TO_DEVICE);
  231. if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
  232. return -EIO;
  233. off = 1;
  234. } else
  235. off = 0;
  236. for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
  237. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  238. mapping[i + off] = ib_dma_map_page(ca,
  239. skb_frag_page(frag),
  240. frag->page_offset, skb_frag_size(frag),
  241. DMA_TO_DEVICE);
  242. if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
  243. goto partial_error;
  244. }
  245. return 0;
  246. partial_error:
  247. for (; i > 0; --i) {
  248. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
  249. ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
  250. }
  251. if (off)
  252. ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
  253. return -EIO;
  254. }
  255. void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
  256. struct ipoib_tx_buf *tx_req)
  257. {
  258. struct sk_buff *skb = tx_req->skb;
  259. u64 *mapping = tx_req->mapping;
  260. int i;
  261. int off;
  262. if (skb_headlen(skb)) {
  263. ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
  264. DMA_TO_DEVICE);
  265. off = 1;
  266. } else
  267. off = 0;
  268. for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
  269. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  270. ib_dma_unmap_page(priv->ca, mapping[i + off],
  271. skb_frag_size(frag), DMA_TO_DEVICE);
  272. }
  273. }
  274. /*
  275. * As the result of a completion error the QP Can be transferred to SQE states.
  276. * The function checks if the (send)QP is in SQE state and
  277. * moves it back to RTS state, that in order to have it functional again.
  278. */
  279. static void ipoib_qp_state_validate_work(struct work_struct *work)
  280. {
  281. struct ipoib_qp_state_validate *qp_work =
  282. container_of(work, struct ipoib_qp_state_validate, work);
  283. struct ipoib_dev_priv *priv = qp_work->priv;
  284. struct ib_qp_attr qp_attr;
  285. struct ib_qp_init_attr query_init_attr;
  286. int ret;
  287. ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
  288. if (ret) {
  289. ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
  290. __func__, ret);
  291. goto free_res;
  292. }
  293. pr_info("%s: QP: 0x%x is in state: %d\n",
  294. __func__, priv->qp->qp_num, qp_attr.qp_state);
  295. /* currently support only in SQE->RTS transition*/
  296. if (qp_attr.qp_state == IB_QPS_SQE) {
  297. qp_attr.qp_state = IB_QPS_RTS;
  298. ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
  299. if (ret) {
  300. pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
  301. ret, priv->qp->qp_num);
  302. goto free_res;
  303. }
  304. pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
  305. __func__, priv->qp->qp_num);
  306. } else {
  307. pr_warn("QP (%d) will stay in state: %d\n",
  308. priv->qp->qp_num, qp_attr.qp_state);
  309. }
  310. free_res:
  311. kfree(qp_work);
  312. }
  313. static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
  314. {
  315. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  316. unsigned int wr_id = wc->wr_id;
  317. struct ipoib_tx_buf *tx_req;
  318. ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
  319. wr_id, wc->status);
  320. if (unlikely(wr_id >= ipoib_sendq_size)) {
  321. ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
  322. wr_id, ipoib_sendq_size);
  323. return;
  324. }
  325. tx_req = &priv->tx_ring[wr_id];
  326. ipoib_dma_unmap_tx(priv, tx_req);
  327. ++dev->stats.tx_packets;
  328. dev->stats.tx_bytes += tx_req->skb->len;
  329. dev_kfree_skb_any(tx_req->skb);
  330. ++priv->tx_tail;
  331. if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
  332. netif_queue_stopped(dev) &&
  333. test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
  334. netif_wake_queue(dev);
  335. if (wc->status != IB_WC_SUCCESS &&
  336. wc->status != IB_WC_WR_FLUSH_ERR) {
  337. struct ipoib_qp_state_validate *qp_work;
  338. ipoib_warn(priv, "failed send event "
  339. "(status=%d, wrid=%d vend_err %x)\n",
  340. wc->status, wr_id, wc->vendor_err);
  341. qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
  342. if (!qp_work)
  343. return;
  344. INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
  345. qp_work->priv = priv;
  346. queue_work(priv->wq, &qp_work->work);
  347. }
  348. }
  349. static int poll_tx(struct ipoib_dev_priv *priv)
  350. {
  351. int n, i;
  352. n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
  353. for (i = 0; i < n; ++i)
  354. ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
  355. return n == MAX_SEND_CQE;
  356. }
  357. int ipoib_poll(struct napi_struct *napi, int budget)
  358. {
  359. struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
  360. struct net_device *dev = priv->dev;
  361. int done;
  362. int t;
  363. int n, i;
  364. done = 0;
  365. poll_more:
  366. while (done < budget) {
  367. int max = (budget - done);
  368. t = min(IPOIB_NUM_WC, max);
  369. n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
  370. for (i = 0; i < n; i++) {
  371. struct ib_wc *wc = priv->ibwc + i;
  372. if (wc->wr_id & IPOIB_OP_RECV) {
  373. ++done;
  374. if (wc->wr_id & IPOIB_OP_CM)
  375. ipoib_cm_handle_rx_wc(dev, wc);
  376. else
  377. ipoib_ib_handle_rx_wc(dev, wc);
  378. } else
  379. ipoib_cm_handle_tx_wc(priv->dev, wc);
  380. }
  381. if (n != t)
  382. break;
  383. }
  384. if (done < budget) {
  385. napi_complete(napi);
  386. if (unlikely(ib_req_notify_cq(priv->recv_cq,
  387. IB_CQ_NEXT_COMP |
  388. IB_CQ_REPORT_MISSED_EVENTS)) &&
  389. napi_reschedule(napi))
  390. goto poll_more;
  391. }
  392. return done;
  393. }
  394. void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
  395. {
  396. struct net_device *dev = dev_ptr;
  397. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  398. napi_schedule(&priv->napi);
  399. }
  400. static void drain_tx_cq(struct net_device *dev)
  401. {
  402. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  403. netif_tx_lock(dev);
  404. while (poll_tx(priv))
  405. ; /* nothing */
  406. if (netif_queue_stopped(dev))
  407. mod_timer(&priv->poll_timer, jiffies + 1);
  408. netif_tx_unlock(dev);
  409. }
  410. void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
  411. {
  412. struct ipoib_dev_priv *priv = ipoib_priv(dev_ptr);
  413. mod_timer(&priv->poll_timer, jiffies);
  414. }
  415. static inline int post_send(struct ipoib_dev_priv *priv,
  416. unsigned int wr_id,
  417. struct ib_ah *address, u32 dqpn,
  418. struct ipoib_tx_buf *tx_req,
  419. void *head, int hlen)
  420. {
  421. struct ib_send_wr *bad_wr;
  422. struct sk_buff *skb = tx_req->skb;
  423. ipoib_build_sge(priv, tx_req);
  424. priv->tx_wr.wr.wr_id = wr_id;
  425. priv->tx_wr.remote_qpn = dqpn;
  426. priv->tx_wr.ah = address;
  427. if (head) {
  428. priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
  429. priv->tx_wr.header = head;
  430. priv->tx_wr.hlen = hlen;
  431. priv->tx_wr.wr.opcode = IB_WR_LSO;
  432. } else
  433. priv->tx_wr.wr.opcode = IB_WR_SEND;
  434. return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
  435. }
  436. int ipoib_send(struct net_device *dev, struct sk_buff *skb,
  437. struct ib_ah *address, u32 dqpn)
  438. {
  439. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  440. struct ipoib_tx_buf *tx_req;
  441. int hlen, rc;
  442. void *phead;
  443. unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
  444. if (skb_is_gso(skb)) {
  445. hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
  446. phead = skb->data;
  447. if (unlikely(!skb_pull(skb, hlen))) {
  448. ipoib_warn(priv, "linear data too small\n");
  449. ++dev->stats.tx_dropped;
  450. ++dev->stats.tx_errors;
  451. dev_kfree_skb_any(skb);
  452. return -1;
  453. }
  454. } else {
  455. if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
  456. ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  457. skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
  458. ++dev->stats.tx_dropped;
  459. ++dev->stats.tx_errors;
  460. ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
  461. return -1;
  462. }
  463. phead = NULL;
  464. hlen = 0;
  465. }
  466. if (skb_shinfo(skb)->nr_frags > usable_sge) {
  467. if (skb_linearize(skb) < 0) {
  468. ipoib_warn(priv, "skb could not be linearized\n");
  469. ++dev->stats.tx_dropped;
  470. ++dev->stats.tx_errors;
  471. dev_kfree_skb_any(skb);
  472. return -1;
  473. }
  474. /* Does skb_linearize return ok without reducing nr_frags? */
  475. if (skb_shinfo(skb)->nr_frags > usable_sge) {
  476. ipoib_warn(priv, "too many frags after skb linearize\n");
  477. ++dev->stats.tx_dropped;
  478. ++dev->stats.tx_errors;
  479. dev_kfree_skb_any(skb);
  480. return -1;
  481. }
  482. }
  483. ipoib_dbg_data(priv,
  484. "sending packet, length=%d address=%p dqpn=0x%06x\n",
  485. skb->len, address, dqpn);
  486. /*
  487. * We put the skb into the tx_ring _before_ we call post_send()
  488. * because it's entirely possible that the completion handler will
  489. * run before we execute anything after the post_send(). That
  490. * means we have to make sure everything is properly recorded and
  491. * our state is consistent before we call post_send().
  492. */
  493. tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
  494. tx_req->skb = skb;
  495. if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
  496. ++dev->stats.tx_errors;
  497. dev_kfree_skb_any(skb);
  498. return -1;
  499. }
  500. if (skb->ip_summed == CHECKSUM_PARTIAL)
  501. priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
  502. else
  503. priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
  504. if (++priv->tx_outstanding == ipoib_sendq_size) {
  505. ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
  506. if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
  507. ipoib_warn(priv, "request notify on send CQ failed\n");
  508. netif_stop_queue(dev);
  509. }
  510. skb_orphan(skb);
  511. skb_dst_drop(skb);
  512. rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
  513. address, dqpn, tx_req, phead, hlen);
  514. if (unlikely(rc)) {
  515. ipoib_warn(priv, "post_send failed, error %d\n", rc);
  516. ++dev->stats.tx_errors;
  517. --priv->tx_outstanding;
  518. ipoib_dma_unmap_tx(priv, tx_req);
  519. dev_kfree_skb_any(skb);
  520. if (netif_queue_stopped(dev))
  521. netif_wake_queue(dev);
  522. rc = 0;
  523. } else {
  524. netif_trans_update(dev);
  525. rc = priv->tx_head;
  526. ++priv->tx_head;
  527. }
  528. if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
  529. while (poll_tx(priv))
  530. ; /* nothing */
  531. return rc;
  532. }
  533. static void __ipoib_reap_ah(struct net_device *dev)
  534. {
  535. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  536. struct ipoib_ah *ah, *tah;
  537. LIST_HEAD(remove_list);
  538. unsigned long flags;
  539. netif_tx_lock_bh(dev);
  540. spin_lock_irqsave(&priv->lock, flags);
  541. list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
  542. if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
  543. list_del(&ah->list);
  544. rdma_destroy_ah(ah->ah);
  545. kfree(ah);
  546. }
  547. spin_unlock_irqrestore(&priv->lock, flags);
  548. netif_tx_unlock_bh(dev);
  549. }
  550. void ipoib_reap_ah(struct work_struct *work)
  551. {
  552. struct ipoib_dev_priv *priv =
  553. container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
  554. struct net_device *dev = priv->dev;
  555. __ipoib_reap_ah(dev);
  556. if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
  557. queue_delayed_work(priv->wq, &priv->ah_reap_task,
  558. round_jiffies_relative(HZ));
  559. }
  560. static void ipoib_flush_ah(struct net_device *dev)
  561. {
  562. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  563. cancel_delayed_work(&priv->ah_reap_task);
  564. flush_workqueue(priv->wq);
  565. ipoib_reap_ah(&priv->ah_reap_task.work);
  566. }
  567. static void ipoib_stop_ah(struct net_device *dev)
  568. {
  569. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  570. set_bit(IPOIB_STOP_REAPER, &priv->flags);
  571. ipoib_flush_ah(dev);
  572. }
  573. static int recvs_pending(struct net_device *dev)
  574. {
  575. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  576. int pending = 0;
  577. int i;
  578. for (i = 0; i < ipoib_recvq_size; ++i)
  579. if (priv->rx_ring[i].skb)
  580. ++pending;
  581. return pending;
  582. }
  583. int ipoib_ib_dev_stop_default(struct net_device *dev)
  584. {
  585. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  586. struct ib_qp_attr qp_attr;
  587. unsigned long begin;
  588. struct ipoib_tx_buf *tx_req;
  589. int i;
  590. if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
  591. napi_disable(&priv->napi);
  592. ipoib_cm_dev_stop(dev);
  593. /*
  594. * Move our QP to the error state and then reinitialize in
  595. * when all work requests have completed or have been flushed.
  596. */
  597. qp_attr.qp_state = IB_QPS_ERR;
  598. if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
  599. ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
  600. /* Wait for all sends and receives to complete */
  601. begin = jiffies;
  602. while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
  603. if (time_after(jiffies, begin + 5 * HZ)) {
  604. ipoib_warn(priv,
  605. "timing out; %d sends %d receives not completed\n",
  606. priv->tx_head - priv->tx_tail,
  607. recvs_pending(dev));
  608. /*
  609. * assume the HW is wedged and just free up
  610. * all our pending work requests.
  611. */
  612. while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
  613. tx_req = &priv->tx_ring[priv->tx_tail &
  614. (ipoib_sendq_size - 1)];
  615. ipoib_dma_unmap_tx(priv, tx_req);
  616. dev_kfree_skb_any(tx_req->skb);
  617. ++priv->tx_tail;
  618. --priv->tx_outstanding;
  619. }
  620. for (i = 0; i < ipoib_recvq_size; ++i) {
  621. struct ipoib_rx_buf *rx_req;
  622. rx_req = &priv->rx_ring[i];
  623. if (!rx_req->skb)
  624. continue;
  625. ipoib_ud_dma_unmap_rx(priv,
  626. priv->rx_ring[i].mapping);
  627. dev_kfree_skb_any(rx_req->skb);
  628. rx_req->skb = NULL;
  629. }
  630. goto timeout;
  631. }
  632. ipoib_drain_cq(dev);
  633. msleep(1);
  634. }
  635. ipoib_dbg(priv, "All sends and receives done.\n");
  636. timeout:
  637. del_timer_sync(&priv->poll_timer);
  638. qp_attr.qp_state = IB_QPS_RESET;
  639. if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
  640. ipoib_warn(priv, "Failed to modify QP to RESET state\n");
  641. ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  642. return 0;
  643. }
  644. int ipoib_ib_dev_stop(struct net_device *dev)
  645. {
  646. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  647. priv->rn_ops->ndo_stop(dev);
  648. clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
  649. ipoib_flush_ah(dev);
  650. return 0;
  651. }
  652. void ipoib_ib_tx_timer_func(unsigned long ctx)
  653. {
  654. drain_tx_cq((struct net_device *)ctx);
  655. }
  656. int ipoib_ib_dev_open_default(struct net_device *dev)
  657. {
  658. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  659. int ret;
  660. ret = ipoib_init_qp(dev);
  661. if (ret) {
  662. ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
  663. return -1;
  664. }
  665. ret = ipoib_ib_post_receives(dev);
  666. if (ret) {
  667. ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
  668. goto out;
  669. }
  670. ret = ipoib_cm_dev_open(dev);
  671. if (ret) {
  672. ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
  673. goto out;
  674. }
  675. if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
  676. napi_enable(&priv->napi);
  677. return 0;
  678. out:
  679. return -1;
  680. }
  681. int ipoib_ib_dev_open(struct net_device *dev)
  682. {
  683. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  684. ipoib_pkey_dev_check_presence(dev);
  685. if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
  686. ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
  687. (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
  688. return -1;
  689. }
  690. clear_bit(IPOIB_STOP_REAPER, &priv->flags);
  691. queue_delayed_work(priv->wq, &priv->ah_reap_task,
  692. round_jiffies_relative(HZ));
  693. if (priv->rn_ops->ndo_open(dev)) {
  694. pr_warn("%s: Failed to open dev\n", dev->name);
  695. goto dev_stop;
  696. }
  697. set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
  698. return 0;
  699. dev_stop:
  700. set_bit(IPOIB_STOP_REAPER, &priv->flags);
  701. cancel_delayed_work(&priv->ah_reap_task);
  702. set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
  703. napi_enable(&priv->napi);
  704. ipoib_ib_dev_stop(dev);
  705. return -1;
  706. }
  707. void ipoib_pkey_dev_check_presence(struct net_device *dev)
  708. {
  709. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  710. if (!(priv->pkey & 0x7fff) ||
  711. ib_find_pkey(priv->ca, priv->port, priv->pkey,
  712. &priv->pkey_index))
  713. clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  714. else
  715. set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  716. }
  717. void ipoib_ib_dev_up(struct net_device *dev)
  718. {
  719. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  720. ipoib_pkey_dev_check_presence(dev);
  721. if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
  722. ipoib_dbg(priv, "PKEY is not assigned.\n");
  723. return;
  724. }
  725. set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
  726. ipoib_mcast_start_thread(dev);
  727. }
  728. void ipoib_ib_dev_down(struct net_device *dev)
  729. {
  730. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  731. ipoib_dbg(priv, "downing ib_dev\n");
  732. clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
  733. netif_carrier_off(dev);
  734. ipoib_mcast_stop_thread(dev);
  735. ipoib_mcast_dev_flush(dev);
  736. ipoib_flush_paths(dev);
  737. }
  738. void ipoib_drain_cq(struct net_device *dev)
  739. {
  740. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  741. int i, n;
  742. /*
  743. * We call completion handling routines that expect to be
  744. * called from the BH-disabled NAPI poll context, so disable
  745. * BHs here too.
  746. */
  747. local_bh_disable();
  748. do {
  749. n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
  750. for (i = 0; i < n; ++i) {
  751. /*
  752. * Convert any successful completions to flush
  753. * errors to avoid passing packets up the
  754. * stack after bringing the device down.
  755. */
  756. if (priv->ibwc[i].status == IB_WC_SUCCESS)
  757. priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
  758. if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
  759. if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
  760. ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
  761. else
  762. ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
  763. } else
  764. ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
  765. }
  766. } while (n == IPOIB_NUM_WC);
  767. while (poll_tx(priv))
  768. ; /* nothing */
  769. local_bh_enable();
  770. }
  771. /*
  772. * Takes whatever value which is in pkey index 0 and updates priv->pkey
  773. * returns 0 if the pkey value was changed.
  774. */
  775. static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
  776. {
  777. int result;
  778. u16 prev_pkey;
  779. prev_pkey = priv->pkey;
  780. result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
  781. if (result) {
  782. ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
  783. priv->port, result);
  784. return result;
  785. }
  786. priv->pkey |= 0x8000;
  787. if (prev_pkey != priv->pkey) {
  788. ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
  789. prev_pkey, priv->pkey);
  790. /*
  791. * Update the pkey in the broadcast address, while making sure to set
  792. * the full membership bit, so that we join the right broadcast group.
  793. */
  794. priv->dev->broadcast[8] = priv->pkey >> 8;
  795. priv->dev->broadcast[9] = priv->pkey & 0xff;
  796. /*
  797. * Update the broadcast address in the priv->broadcast object,
  798. * in case it already exists, otherwise no one will do that.
  799. */
  800. if (priv->broadcast) {
  801. spin_lock_irq(&priv->lock);
  802. memcpy(priv->broadcast->mcmember.mgid.raw,
  803. priv->dev->broadcast + 4,
  804. sizeof(union ib_gid));
  805. spin_unlock_irq(&priv->lock);
  806. }
  807. return 0;
  808. }
  809. return 1;
  810. }
  811. /*
  812. * returns 0 if pkey value was found in a different slot.
  813. */
  814. static inline int update_child_pkey(struct ipoib_dev_priv *priv)
  815. {
  816. u16 old_index = priv->pkey_index;
  817. priv->pkey_index = 0;
  818. ipoib_pkey_dev_check_presence(priv->dev);
  819. if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
  820. (old_index == priv->pkey_index))
  821. return 1;
  822. return 0;
  823. }
  824. /*
  825. * returns true if the device address of the ipoib interface has changed and the
  826. * new address is a valid one (i.e in the gid table), return false otherwise.
  827. */
  828. static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
  829. {
  830. union ib_gid search_gid;
  831. union ib_gid gid0;
  832. union ib_gid *netdev_gid;
  833. int err;
  834. u16 index;
  835. u8 port;
  836. bool ret = false;
  837. netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
  838. if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
  839. return false;
  840. netif_addr_lock_bh(priv->dev);
  841. /* The subnet prefix may have changed, update it now so we won't have
  842. * to do it later
  843. */
  844. priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
  845. netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
  846. search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
  847. search_gid.global.interface_id = priv->local_gid.global.interface_id;
  848. netif_addr_unlock_bh(priv->dev);
  849. err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
  850. priv->dev, &port, &index);
  851. netif_addr_lock_bh(priv->dev);
  852. if (search_gid.global.interface_id !=
  853. priv->local_gid.global.interface_id)
  854. /* There was a change while we were looking up the gid, bail
  855. * here and let the next work sort this out
  856. */
  857. goto out;
  858. /* The next section of code needs some background:
  859. * Per IB spec the port GUID can't change if the HCA is powered on.
  860. * port GUID is the basis for GID at index 0 which is the basis for
  861. * the default device address of a ipoib interface.
  862. *
  863. * so it seems the flow should be:
  864. * if user_changed_dev_addr && gid in gid tbl
  865. * set bit dev_addr_set
  866. * return true
  867. * else
  868. * return false
  869. *
  870. * The issue is that there are devices that don't follow the spec,
  871. * they change the port GUID when the HCA is powered, so in order
  872. * not to break userspace applications, We need to check if the
  873. * user wanted to control the device address and we assume that
  874. * if he sets the device address back to be based on GID index 0,
  875. * he no longer wishs to control it.
  876. *
  877. * If the user doesn't control the the device address,
  878. * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
  879. * the port GUID has changed and GID at index 0 has changed
  880. * so we need to change priv->local_gid and priv->dev->dev_addr
  881. * to reflect the new GID.
  882. */
  883. if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
  884. if (!err && port == priv->port) {
  885. set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
  886. if (index == 0)
  887. clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
  888. &priv->flags);
  889. else
  890. set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
  891. ret = true;
  892. } else {
  893. ret = false;
  894. }
  895. } else {
  896. if (!err && port == priv->port) {
  897. ret = true;
  898. } else {
  899. if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
  900. memcpy(&priv->local_gid, &gid0,
  901. sizeof(priv->local_gid));
  902. memcpy(priv->dev->dev_addr + 4, &gid0,
  903. sizeof(priv->local_gid));
  904. ret = true;
  905. }
  906. }
  907. }
  908. out:
  909. netif_addr_unlock_bh(priv->dev);
  910. return ret;
  911. }
  912. static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
  913. enum ipoib_flush_level level,
  914. int nesting)
  915. {
  916. struct ipoib_dev_priv *cpriv;
  917. struct net_device *dev = priv->dev;
  918. int result;
  919. down_read_nested(&priv->vlan_rwsem, nesting);
  920. /*
  921. * Flush any child interfaces too -- they might be up even if
  922. * the parent is down.
  923. */
  924. list_for_each_entry(cpriv, &priv->child_intfs, list)
  925. __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
  926. up_read(&priv->vlan_rwsem);
  927. if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
  928. level != IPOIB_FLUSH_HEAVY) {
  929. /* Make sure the dev_addr is set even if not flushing */
  930. if (level == IPOIB_FLUSH_LIGHT)
  931. ipoib_dev_addr_changed_valid(priv);
  932. ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
  933. return;
  934. }
  935. if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
  936. /* interface is down. update pkey and leave. */
  937. if (level == IPOIB_FLUSH_HEAVY) {
  938. if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
  939. update_parent_pkey(priv);
  940. else
  941. update_child_pkey(priv);
  942. } else if (level == IPOIB_FLUSH_LIGHT)
  943. ipoib_dev_addr_changed_valid(priv);
  944. ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
  945. return;
  946. }
  947. if (level == IPOIB_FLUSH_HEAVY) {
  948. /* child devices chase their origin pkey value, while non-child
  949. * (parent) devices should always takes what present in pkey index 0
  950. */
  951. if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
  952. result = update_child_pkey(priv);
  953. if (result) {
  954. /* restart QP only if P_Key index is changed */
  955. ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
  956. return;
  957. }
  958. } else {
  959. result = update_parent_pkey(priv);
  960. /* restart QP only if P_Key value changed */
  961. if (result) {
  962. ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
  963. return;
  964. }
  965. }
  966. }
  967. if (level == IPOIB_FLUSH_LIGHT) {
  968. int oper_up;
  969. ipoib_mark_paths_invalid(dev);
  970. /* Set IPoIB operation as down to prevent races between:
  971. * the flush flow which leaves MCG and on the fly joins
  972. * which can happen during that time. mcast restart task
  973. * should deal with join requests we missed.
  974. */
  975. oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
  976. ipoib_mcast_dev_flush(dev);
  977. if (oper_up)
  978. set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
  979. ipoib_flush_ah(dev);
  980. }
  981. if (level >= IPOIB_FLUSH_NORMAL)
  982. ipoib_ib_dev_down(dev);
  983. if (level == IPOIB_FLUSH_HEAVY) {
  984. if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
  985. ipoib_ib_dev_stop(dev);
  986. if (ipoib_ib_dev_open(dev) != 0)
  987. return;
  988. if (netif_queue_stopped(dev))
  989. netif_start_queue(dev);
  990. }
  991. /*
  992. * The device could have been brought down between the start and when
  993. * we get here, don't bring it back up if it's not configured up
  994. */
  995. if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
  996. if (level >= IPOIB_FLUSH_NORMAL)
  997. ipoib_ib_dev_up(dev);
  998. if (ipoib_dev_addr_changed_valid(priv))
  999. ipoib_mcast_restart_task(&priv->restart_task);
  1000. }
  1001. }
  1002. void ipoib_ib_dev_flush_light(struct work_struct *work)
  1003. {
  1004. struct ipoib_dev_priv *priv =
  1005. container_of(work, struct ipoib_dev_priv, flush_light);
  1006. __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
  1007. }
  1008. void ipoib_ib_dev_flush_normal(struct work_struct *work)
  1009. {
  1010. struct ipoib_dev_priv *priv =
  1011. container_of(work, struct ipoib_dev_priv, flush_normal);
  1012. __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
  1013. }
  1014. void ipoib_ib_dev_flush_heavy(struct work_struct *work)
  1015. {
  1016. struct ipoib_dev_priv *priv =
  1017. container_of(work, struct ipoib_dev_priv, flush_heavy);
  1018. __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
  1019. }
  1020. void ipoib_ib_dev_cleanup(struct net_device *dev)
  1021. {
  1022. struct ipoib_dev_priv *priv = ipoib_priv(dev);
  1023. ipoib_dbg(priv, "cleaning up ib_dev\n");
  1024. /*
  1025. * We must make sure there are no more (path) completions
  1026. * that may wish to touch priv fields that are no longer valid
  1027. */
  1028. ipoib_flush_paths(dev);
  1029. ipoib_mcast_stop_thread(dev);
  1030. ipoib_mcast_dev_flush(dev);
  1031. /*
  1032. * All of our ah references aren't free until after
  1033. * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
  1034. * the neighbor garbage collection is stopped and reaped.
  1035. * That should all be done now, so make a final ah flush.
  1036. */
  1037. ipoib_stop_ah(dev);
  1038. clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
  1039. priv->rn_ops->ndo_uninit(dev);
  1040. if (priv->pd) {
  1041. ib_dealloc_pd(priv->pd);
  1042. priv->pd = NULL;
  1043. }
  1044. }