cxgbit_ddp.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /*
  2. * Copyright (c) 2016 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include "cxgbit.h"
  9. static void
  10. cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
  11. struct cxgbi_task_tag_info *ttinfo,
  12. struct scatterlist **sg_pp, unsigned int *sg_off)
  13. {
  14. struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
  15. unsigned int offset = sg_off ? *sg_off : 0;
  16. dma_addr_t addr = 0UL;
  17. unsigned int len = 0;
  18. int i;
  19. memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
  20. if (sg) {
  21. addr = sg_dma_address(sg);
  22. len = sg_dma_len(sg);
  23. }
  24. for (i = 0; i < PPOD_PAGES_MAX; i++) {
  25. if (sg) {
  26. ppod->addr[i] = cpu_to_be64(addr + offset);
  27. offset += PAGE_SIZE;
  28. if (offset == (len + sg->offset)) {
  29. offset = 0;
  30. sg = sg_next(sg);
  31. if (sg) {
  32. addr = sg_dma_address(sg);
  33. len = sg_dma_len(sg);
  34. }
  35. }
  36. } else {
  37. ppod->addr[i] = 0ULL;
  38. }
  39. }
  40. /*
  41. * the fifth address needs to be repeated in the next ppod, so do
  42. * not move sg
  43. */
  44. if (sg_pp) {
  45. *sg_pp = sg;
  46. *sg_off = offset;
  47. }
  48. if (offset == len) {
  49. offset = 0;
  50. if (sg) {
  51. sg = sg_next(sg);
  52. if (sg)
  53. addr = sg_dma_address(sg);
  54. }
  55. }
  56. ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
  57. }
  58. static struct sk_buff *
  59. cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
  60. unsigned int idx, unsigned int npods, unsigned int tid)
  61. {
  62. struct ulp_mem_io *req;
  63. struct ulptx_idata *idata;
  64. unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
  65. unsigned int dlen = npods << PPOD_SIZE_SHIFT;
  66. unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
  67. sizeof(struct ulptx_idata) + dlen, 16);
  68. struct sk_buff *skb;
  69. skb = alloc_skb(wr_len, GFP_KERNEL);
  70. if (!skb)
  71. return NULL;
  72. req = __skb_put(skb, wr_len);
  73. INIT_ULPTX_WR(req, wr_len, 0, tid);
  74. req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
  75. FW_WR_ATOMIC_V(0));
  76. req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
  77. ULP_MEMIO_ORDER_V(0) |
  78. T5_ULP_MEMIO_IMM_V(1));
  79. req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
  80. req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
  81. req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
  82. idata = (struct ulptx_idata *)(req + 1);
  83. idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
  84. idata->len = htonl(dlen);
  85. return skb;
  86. }
  87. static int
  88. cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
  89. struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
  90. unsigned int npods, struct scatterlist **sg_pp,
  91. unsigned int *sg_off)
  92. {
  93. struct cxgbit_device *cdev = csk->com.cdev;
  94. struct sk_buff *skb;
  95. struct ulp_mem_io *req;
  96. struct ulptx_idata *idata;
  97. struct cxgbi_pagepod *ppod;
  98. unsigned int i;
  99. skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
  100. if (!skb)
  101. return -ENOMEM;
  102. req = (struct ulp_mem_io *)skb->data;
  103. idata = (struct ulptx_idata *)(req + 1);
  104. ppod = (struct cxgbi_pagepod *)(idata + 1);
  105. for (i = 0; i < npods; i++, ppod++)
  106. cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
  107. __skb_queue_tail(&csk->ppodq, skb);
  108. return 0;
  109. }
  110. static int
  111. cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
  112. struct cxgbi_task_tag_info *ttinfo)
  113. {
  114. unsigned int pidx = ttinfo->idx;
  115. unsigned int npods = ttinfo->npods;
  116. unsigned int i, cnt;
  117. struct scatterlist *sg = ttinfo->sgl;
  118. unsigned int offset = 0;
  119. int ret = 0;
  120. for (i = 0; i < npods; i += cnt, pidx += cnt) {
  121. cnt = npods - i;
  122. if (cnt > ULPMEM_IDATA_MAX_NPPODS)
  123. cnt = ULPMEM_IDATA_MAX_NPPODS;
  124. ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
  125. &sg, &offset);
  126. if (ret < 0)
  127. break;
  128. }
  129. return ret;
  130. }
  131. static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
  132. unsigned int nents)
  133. {
  134. unsigned int last_sgidx = nents - 1;
  135. unsigned int i;
  136. for (i = 0; i < nents; i++, sg = sg_next(sg)) {
  137. unsigned int len = sg->length + sg->offset;
  138. if ((sg->offset & 0x3) || (i && sg->offset) ||
  139. ((i != last_sgidx) && (len != PAGE_SIZE))) {
  140. return -EINVAL;
  141. }
  142. }
  143. return 0;
  144. }
  145. static int
  146. cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
  147. unsigned int xferlen)
  148. {
  149. struct cxgbit_device *cdev = csk->com.cdev;
  150. struct cxgbi_ppm *ppm = cdev2ppm(cdev);
  151. struct scatterlist *sgl = ttinfo->sgl;
  152. unsigned int sgcnt = ttinfo->nents;
  153. unsigned int sg_offset = sgl->offset;
  154. int ret;
  155. if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
  156. pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
  157. ppm, ppm->tformat.pgsz_idx_dflt,
  158. xferlen, ttinfo->nents);
  159. return -EINVAL;
  160. }
  161. if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
  162. return -EINVAL;
  163. ttinfo->nr_pages = (xferlen + sgl->offset +
  164. (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
  165. /*
  166. * the ddp tag will be used for the ttt in the outgoing r2t pdu
  167. */
  168. ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
  169. &ttinfo->tag, 0);
  170. if (ret < 0)
  171. return ret;
  172. ttinfo->npods = ret;
  173. sgl->offset = 0;
  174. ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
  175. sgl->offset = sg_offset;
  176. if (!ret) {
  177. pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
  178. __func__, 0, xferlen, sgcnt);
  179. goto rel_ppods;
  180. }
  181. cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
  182. xferlen, &ttinfo->hdr);
  183. ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
  184. if (ret < 0) {
  185. __skb_queue_purge(&csk->ppodq);
  186. dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
  187. goto rel_ppods;
  188. }
  189. return 0;
  190. rel_ppods:
  191. cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
  192. return -EINVAL;
  193. }
  194. void
  195. cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  196. struct iscsi_r2t *r2t)
  197. {
  198. struct cxgbit_sock *csk = conn->context;
  199. struct cxgbit_device *cdev = csk->com.cdev;
  200. struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
  201. struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
  202. int ret = -EINVAL;
  203. if ((!ccmd->setup_ddp) ||
  204. (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
  205. goto out;
  206. ccmd->setup_ddp = false;
  207. ttinfo->sgl = cmd->se_cmd.t_data_sg;
  208. ttinfo->nents = cmd->se_cmd.t_data_nents;
  209. ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
  210. if (ret < 0) {
  211. pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
  212. csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
  213. ttinfo->sgl = NULL;
  214. ttinfo->nents = 0;
  215. } else {
  216. ccmd->release = true;
  217. }
  218. out:
  219. pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
  220. r2t->targ_xfer_tag = ttinfo->tag;
  221. }
  222. void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
  223. {
  224. struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
  225. if (ccmd->release) {
  226. struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
  227. if (ttinfo->sgl) {
  228. struct cxgbit_sock *csk = conn->context;
  229. struct cxgbit_device *cdev = csk->com.cdev;
  230. struct cxgbi_ppm *ppm = cdev2ppm(cdev);
  231. /* Abort the TCP conn if DDP is not complete to
  232. * avoid any possibility of DDP after freeing
  233. * the cmd.
  234. */
  235. if (unlikely(cmd->write_data_done !=
  236. cmd->se_cmd.data_length))
  237. cxgbit_abort_conn(csk);
  238. cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
  239. dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
  240. ttinfo->nents, DMA_FROM_DEVICE);
  241. } else {
  242. put_page(sg_page(&ccmd->sg));
  243. }
  244. ccmd->release = false;
  245. }
  246. }
  247. int cxgbit_ddp_init(struct cxgbit_device *cdev)
  248. {
  249. struct cxgb4_lld_info *lldi = &cdev->lldi;
  250. struct net_device *ndev = cdev->lldi.ports[0];
  251. struct cxgbi_tag_format tformat;
  252. unsigned int ppmax;
  253. int ret, i;
  254. if (!lldi->vr->iscsi.size) {
  255. pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
  256. return -EACCES;
  257. }
  258. ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
  259. memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
  260. for (i = 0; i < 4; i++)
  261. tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
  262. & 0xF;
  263. cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
  264. ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
  265. cdev->lldi.pdev, &cdev->lldi, &tformat,
  266. ppmax, lldi->iscsi_llimit,
  267. lldi->vr->iscsi.start, 2);
  268. if (ret >= 0) {
  269. struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
  270. if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
  271. (ppm->ppmax >= 1024))
  272. set_bit(CDEV_DDP_ENABLE, &cdev->flags);
  273. ret = 0;
  274. }
  275. return ret;
  276. }