vnic_sdma.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * Copyright(c) 2017 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. /*
  48. * This file contains HFI1 support for VNIC SDMA functionality
  49. */
  50. #include "sdma.h"
  51. #include "vnic.h"
  52. #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0)
  53. #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
  54. #define HFI1_VNIC_TXREQ_NAME_LEN 32
  55. #define HFI1_VNIC_SDMA_DESC_WTRMRK 64
  56. #define HFI1_VNIC_SDMA_RETRY_COUNT 1
  57. /*
  58. * struct vnic_txreq - VNIC transmit descriptor
  59. * @txreq: sdma transmit request
  60. * @sdma: vnic sdma pointer
  61. * @skb: skb to send
  62. * @pad: pad buffer
  63. * @plen: pad length
  64. * @pbc_val: pbc value
  65. * @retry_count: tx retry count
  66. */
  67. struct vnic_txreq {
  68. struct sdma_txreq txreq;
  69. struct hfi1_vnic_sdma *sdma;
  70. struct sk_buff *skb;
  71. unsigned char pad[HFI1_VNIC_MAX_PAD];
  72. u16 plen;
  73. __le64 pbc_val;
  74. u32 retry_count;
  75. };
  76. static void vnic_sdma_complete(struct sdma_txreq *txreq,
  77. int status)
  78. {
  79. struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
  80. struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
  81. sdma_txclean(vnic_sdma->dd, txreq);
  82. dev_kfree_skb_any(tx->skb);
  83. kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
  84. }
  85. static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
  86. struct vnic_txreq *tx)
  87. {
  88. int i, ret = 0;
  89. ret = sdma_txadd_kvaddr(
  90. sde->dd,
  91. &tx->txreq,
  92. tx->skb->data,
  93. skb_headlen(tx->skb));
  94. if (unlikely(ret))
  95. goto bail_txadd;
  96. for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
  97. struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
  98. /* combine physically continuous fragments later? */
  99. ret = sdma_txadd_page(sde->dd,
  100. &tx->txreq,
  101. skb_frag_page(frag),
  102. frag->page_offset,
  103. skb_frag_size(frag));
  104. if (unlikely(ret))
  105. goto bail_txadd;
  106. }
  107. if (tx->plen)
  108. ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
  109. tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
  110. tx->plen);
  111. bail_txadd:
  112. return ret;
  113. }
  114. static int build_vnic_tx_desc(struct sdma_engine *sde,
  115. struct vnic_txreq *tx,
  116. u64 pbc)
  117. {
  118. int ret = 0;
  119. u16 hdrbytes = 2 << 2; /* PBC */
  120. ret = sdma_txinit_ahg(
  121. &tx->txreq,
  122. 0,
  123. hdrbytes + tx->skb->len + tx->plen,
  124. 0,
  125. 0,
  126. NULL,
  127. 0,
  128. vnic_sdma_complete);
  129. if (unlikely(ret))
  130. goto bail_txadd;
  131. /* add pbc */
  132. tx->pbc_val = cpu_to_le64(pbc);
  133. ret = sdma_txadd_kvaddr(
  134. sde->dd,
  135. &tx->txreq,
  136. &tx->pbc_val,
  137. hdrbytes);
  138. if (unlikely(ret))
  139. goto bail_txadd;
  140. /* add the ulp payload */
  141. ret = build_vnic_ulp_payload(sde, tx);
  142. bail_txadd:
  143. return ret;
  144. }
  145. /* setup the last plen bypes of pad */
  146. static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
  147. {
  148. pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
  149. }
  150. int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
  151. struct hfi1_vnic_vport_info *vinfo,
  152. struct sk_buff *skb, u64 pbc, u8 plen)
  153. {
  154. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
  155. struct sdma_engine *sde = vnic_sdma->sde;
  156. struct vnic_txreq *tx;
  157. int ret = -ECOMM;
  158. if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
  159. goto tx_err;
  160. if (unlikely(!sde || !sdma_running(sde)))
  161. goto tx_err;
  162. tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
  163. if (unlikely(!tx)) {
  164. ret = -ENOMEM;
  165. goto tx_err;
  166. }
  167. tx->sdma = vnic_sdma;
  168. tx->skb = skb;
  169. hfi1_vnic_update_pad(tx->pad, plen);
  170. tx->plen = plen;
  171. ret = build_vnic_tx_desc(sde, tx, pbc);
  172. if (unlikely(ret))
  173. goto free_desc;
  174. tx->retry_count = 0;
  175. ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq);
  176. /* When -ECOMM, sdma callback will be called with ABORT status */
  177. if (unlikely(ret && unlikely(ret != -ECOMM)))
  178. goto free_desc;
  179. return ret;
  180. free_desc:
  181. sdma_txclean(dd, &tx->txreq);
  182. kmem_cache_free(dd->vnic.txreq_cache, tx);
  183. tx_err:
  184. if (ret != -EBUSY)
  185. dev_kfree_skb_any(skb);
  186. return ret;
  187. }
  188. /*
  189. * hfi1_vnic_sdma_sleep - vnic sdma sleep function
  190. *
  191. * This function gets called from sdma_send_txreq() when there are not enough
  192. * sdma descriptors available to send the packet. It adds Tx queue's wait
  193. * structure to sdma engine's dmawait list to be woken up when descriptors
  194. * become available.
  195. */
  196. static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
  197. struct iowait *wait,
  198. struct sdma_txreq *txreq,
  199. unsigned int seq)
  200. {
  201. struct hfi1_vnic_sdma *vnic_sdma =
  202. container_of(wait, struct hfi1_vnic_sdma, wait);
  203. struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
  204. struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
  205. if (sdma_progress(sde, seq, txreq))
  206. if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT)
  207. return -EAGAIN;
  208. vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
  209. write_seqlock(&dev->iowait_lock);
  210. if (list_empty(&vnic_sdma->wait.list))
  211. list_add_tail(&vnic_sdma->wait.list, &sde->dmawait);
  212. write_sequnlock(&dev->iowait_lock);
  213. return -EBUSY;
  214. }
  215. /*
  216. * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function
  217. *
  218. * This function gets called when SDMA descriptors becomes available and Tx
  219. * queue's wait structure was previously added to sdma engine's dmawait list.
  220. * It notifies the upper driver about Tx queue wakeup.
  221. */
  222. static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
  223. {
  224. struct hfi1_vnic_sdma *vnic_sdma =
  225. container_of(wait, struct hfi1_vnic_sdma, wait);
  226. struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
  227. vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
  228. if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
  229. netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
  230. };
  231. inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
  232. u8 q_idx)
  233. {
  234. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
  235. return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
  236. }
  237. void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
  238. {
  239. int i;
  240. for (i = 0; i < vinfo->num_tx_q; i++) {
  241. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
  242. iowait_init(&vnic_sdma->wait, 0, NULL, hfi1_vnic_sdma_sleep,
  243. hfi1_vnic_sdma_wakeup, NULL);
  244. vnic_sdma->sde = &vinfo->dd->per_sdma[i];
  245. vnic_sdma->dd = vinfo->dd;
  246. vnic_sdma->vinfo = vinfo;
  247. vnic_sdma->q_idx = i;
  248. vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
  249. /* Add a free descriptor watermark for wakeups */
  250. if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
  251. INIT_LIST_HEAD(&vnic_sdma->stx.list);
  252. vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
  253. list_add_tail(&vnic_sdma->stx.list,
  254. &vnic_sdma->wait.tx_head);
  255. }
  256. }
  257. }
  258. static void hfi1_vnic_txreq_kmem_cache_ctor(void *obj)
  259. {
  260. struct vnic_txreq *tx = (struct vnic_txreq *)obj;
  261. memset(tx, 0, sizeof(*tx));
  262. }
  263. int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
  264. {
  265. char buf[HFI1_VNIC_TXREQ_NAME_LEN];
  266. snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
  267. dd->vnic.txreq_cache = kmem_cache_create(buf,
  268. sizeof(struct vnic_txreq),
  269. 0, SLAB_HWCACHE_ALIGN,
  270. hfi1_vnic_txreq_kmem_cache_ctor);
  271. if (!dd->vnic.txreq_cache)
  272. return -ENOMEM;
  273. return 0;
  274. }
  275. void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
  276. {
  277. kmem_cache_destroy(dd->vnic.txreq_cache);
  278. dd->vnic.txreq_cache = NULL;
  279. }