vnic_sdma.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * Copyright(c) 2017 - 2018 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. /*
  48. * This file contains HFI1 support for VNIC SDMA functionality
  49. */
  50. #include "sdma.h"
  51. #include "vnic.h"
  52. #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0)
  53. #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
  54. #define HFI1_VNIC_TXREQ_NAME_LEN 32
  55. #define HFI1_VNIC_SDMA_DESC_WTRMRK 64
  56. #define HFI1_VNIC_SDMA_RETRY_COUNT 1
  57. /*
  58. * struct vnic_txreq - VNIC transmit descriptor
  59. * @txreq: sdma transmit request
  60. * @sdma: vnic sdma pointer
  61. * @skb: skb to send
  62. * @pad: pad buffer
  63. * @plen: pad length
  64. * @pbc_val: pbc value
  65. * @retry_count: tx retry count
  66. */
  67. struct vnic_txreq {
  68. struct sdma_txreq txreq;
  69. struct hfi1_vnic_sdma *sdma;
  70. struct sk_buff *skb;
  71. unsigned char pad[HFI1_VNIC_MAX_PAD];
  72. u16 plen;
  73. __le64 pbc_val;
  74. u32 retry_count;
  75. };
  76. static void vnic_sdma_complete(struct sdma_txreq *txreq,
  77. int status)
  78. {
  79. struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
  80. struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
  81. sdma_txclean(vnic_sdma->dd, txreq);
  82. dev_kfree_skb_any(tx->skb);
  83. kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
  84. }
  85. static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
  86. struct vnic_txreq *tx)
  87. {
  88. int i, ret = 0;
  89. ret = sdma_txadd_kvaddr(
  90. sde->dd,
  91. &tx->txreq,
  92. tx->skb->data,
  93. skb_headlen(tx->skb));
  94. if (unlikely(ret))
  95. goto bail_txadd;
  96. for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
  97. struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
  98. /* combine physically continuous fragments later? */
  99. ret = sdma_txadd_page(sde->dd,
  100. &tx->txreq,
  101. skb_frag_page(frag),
  102. frag->page_offset,
  103. skb_frag_size(frag));
  104. if (unlikely(ret))
  105. goto bail_txadd;
  106. }
  107. if (tx->plen)
  108. ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
  109. tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
  110. tx->plen);
  111. bail_txadd:
  112. return ret;
  113. }
  114. static int build_vnic_tx_desc(struct sdma_engine *sde,
  115. struct vnic_txreq *tx,
  116. u64 pbc)
  117. {
  118. int ret = 0;
  119. u16 hdrbytes = 2 << 2; /* PBC */
  120. ret = sdma_txinit_ahg(
  121. &tx->txreq,
  122. 0,
  123. hdrbytes + tx->skb->len + tx->plen,
  124. 0,
  125. 0,
  126. NULL,
  127. 0,
  128. vnic_sdma_complete);
  129. if (unlikely(ret))
  130. goto bail_txadd;
  131. /* add pbc */
  132. tx->pbc_val = cpu_to_le64(pbc);
  133. ret = sdma_txadd_kvaddr(
  134. sde->dd,
  135. &tx->txreq,
  136. &tx->pbc_val,
  137. hdrbytes);
  138. if (unlikely(ret))
  139. goto bail_txadd;
  140. /* add the ulp payload */
  141. ret = build_vnic_ulp_payload(sde, tx);
  142. bail_txadd:
  143. return ret;
  144. }
  145. /* setup the last plen bypes of pad */
  146. static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
  147. {
  148. pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
  149. }
  150. int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
  151. struct hfi1_vnic_vport_info *vinfo,
  152. struct sk_buff *skb, u64 pbc, u8 plen)
  153. {
  154. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
  155. struct sdma_engine *sde = vnic_sdma->sde;
  156. struct vnic_txreq *tx;
  157. int ret = -ECOMM;
  158. if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
  159. goto tx_err;
  160. if (unlikely(!sde || !sdma_running(sde)))
  161. goto tx_err;
  162. tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
  163. if (unlikely(!tx)) {
  164. ret = -ENOMEM;
  165. goto tx_err;
  166. }
  167. tx->sdma = vnic_sdma;
  168. tx->skb = skb;
  169. hfi1_vnic_update_pad(tx->pad, plen);
  170. tx->plen = plen;
  171. ret = build_vnic_tx_desc(sde, tx, pbc);
  172. if (unlikely(ret))
  173. goto free_desc;
  174. tx->retry_count = 0;
  175. ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
  176. &tx->txreq, vnic_sdma->pkts_sent);
  177. /* When -ECOMM, sdma callback will be called with ABORT status */
  178. if (unlikely(ret && unlikely(ret != -ECOMM)))
  179. goto free_desc;
  180. if (!ret) {
  181. vnic_sdma->pkts_sent = true;
  182. iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
  183. }
  184. return ret;
  185. free_desc:
  186. sdma_txclean(dd, &tx->txreq);
  187. kmem_cache_free(dd->vnic.txreq_cache, tx);
  188. tx_err:
  189. if (ret != -EBUSY)
  190. dev_kfree_skb_any(skb);
  191. else
  192. vnic_sdma->pkts_sent = false;
  193. return ret;
  194. }
  195. /*
  196. * hfi1_vnic_sdma_sleep - vnic sdma sleep function
  197. *
  198. * This function gets called from sdma_send_txreq() when there are not enough
  199. * sdma descriptors available to send the packet. It adds Tx queue's wait
  200. * structure to sdma engine's dmawait list to be woken up when descriptors
  201. * become available.
  202. */
  203. static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
  204. struct iowait_work *wait,
  205. struct sdma_txreq *txreq,
  206. uint seq,
  207. bool pkts_sent)
  208. {
  209. struct hfi1_vnic_sdma *vnic_sdma =
  210. container_of(wait->iow, struct hfi1_vnic_sdma, wait);
  211. struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
  212. struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
  213. if (sdma_progress(sde, seq, txreq))
  214. if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT)
  215. return -EAGAIN;
  216. vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
  217. write_seqlock(&dev->iowait_lock);
  218. if (list_empty(&vnic_sdma->wait.list))
  219. iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
  220. write_sequnlock(&dev->iowait_lock);
  221. return -EBUSY;
  222. }
  223. /*
  224. * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function
  225. *
  226. * This function gets called when SDMA descriptors becomes available and Tx
  227. * queue's wait structure was previously added to sdma engine's dmawait list.
  228. * It notifies the upper driver about Tx queue wakeup.
  229. */
  230. static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
  231. {
  232. struct hfi1_vnic_sdma *vnic_sdma =
  233. container_of(wait, struct hfi1_vnic_sdma, wait);
  234. struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
  235. vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
  236. if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
  237. netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
  238. };
  239. inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
  240. u8 q_idx)
  241. {
  242. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
  243. return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
  244. }
  245. void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
  246. {
  247. int i;
  248. for (i = 0; i < vinfo->num_tx_q; i++) {
  249. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
  250. iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
  251. hfi1_vnic_sdma_sleep,
  252. hfi1_vnic_sdma_wakeup, NULL);
  253. vnic_sdma->sde = &vinfo->dd->per_sdma[i];
  254. vnic_sdma->dd = vinfo->dd;
  255. vnic_sdma->vinfo = vinfo;
  256. vnic_sdma->q_idx = i;
  257. vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
  258. /* Add a free descriptor watermark for wakeups */
  259. if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
  260. struct iowait_work *work;
  261. INIT_LIST_HEAD(&vnic_sdma->stx.list);
  262. vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
  263. work = iowait_get_ib_work(&vnic_sdma->wait);
  264. list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
  265. }
  266. }
  267. }
  268. int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
  269. {
  270. char buf[HFI1_VNIC_TXREQ_NAME_LEN];
  271. snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
  272. dd->vnic.txreq_cache = kmem_cache_create(buf,
  273. sizeof(struct vnic_txreq),
  274. 0, SLAB_HWCACHE_ALIGN,
  275. NULL);
  276. if (!dd->vnic.txreq_cache)
  277. return -ENOMEM;
  278. return 0;
  279. }
  280. void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
  281. {
  282. kmem_cache_destroy(dd->vnic.txreq_cache);
  283. dd->vnic.txreq_cache = NULL;
  284. }