iser_memory.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /*
  2. * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  3. * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/kernel.h>
  35. #include <linux/slab.h>
  36. #include <linux/mm.h>
  37. #include <linux/highmem.h>
  38. #include <linux/scatterlist.h>
  39. #include "iscsi_iser.h"
  40. static
  41. int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
  42. struct iser_data_buf *mem,
  43. struct iser_reg_resources *rsc,
  44. struct iser_mem_reg *mem_reg);
  45. static
  46. int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
  47. struct iser_data_buf *mem,
  48. struct iser_reg_resources *rsc,
  49. struct iser_mem_reg *mem_reg);
  50. static const struct iser_reg_ops fastreg_ops = {
  51. .alloc_reg_res = iser_alloc_fastreg_pool,
  52. .free_reg_res = iser_free_fastreg_pool,
  53. .reg_mem = iser_fast_reg_mr,
  54. .unreg_mem = iser_unreg_mem_fastreg,
  55. .reg_desc_get = iser_reg_desc_get_fr,
  56. .reg_desc_put = iser_reg_desc_put_fr,
  57. };
  58. static const struct iser_reg_ops fmr_ops = {
  59. .alloc_reg_res = iser_alloc_fmr_pool,
  60. .free_reg_res = iser_free_fmr_pool,
  61. .reg_mem = iser_fast_reg_fmr,
  62. .unreg_mem = iser_unreg_mem_fmr,
  63. .reg_desc_get = iser_reg_desc_get_fmr,
  64. .reg_desc_put = iser_reg_desc_put_fmr,
  65. };
  66. void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
  67. {
  68. iser_err_comp(wc, "memreg");
  69. }
  70. int iser_assign_reg_ops(struct iser_device *device)
  71. {
  72. struct ib_device *ib_dev = device->ib_device;
  73. /* Assign function handles - based on FMR support */
  74. if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr &&
  75. ib_dev->map_phys_fmr && ib_dev->unmap_fmr) {
  76. iser_info("FMR supported, using FMR for registration\n");
  77. device->reg_ops = &fmr_ops;
  78. } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
  79. iser_info("FastReg supported, using FastReg for registration\n");
  80. device->reg_ops = &fastreg_ops;
  81. device->remote_inv_sup = iser_always_reg;
  82. } else {
  83. iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
  84. return -1;
  85. }
  86. return 0;
  87. }
  88. struct iser_fr_desc *
  89. iser_reg_desc_get_fr(struct ib_conn *ib_conn)
  90. {
  91. struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
  92. struct iser_fr_desc *desc;
  93. unsigned long flags;
  94. spin_lock_irqsave(&fr_pool->lock, flags);
  95. desc = list_first_entry(&fr_pool->list,
  96. struct iser_fr_desc, list);
  97. list_del(&desc->list);
  98. spin_unlock_irqrestore(&fr_pool->lock, flags);
  99. return desc;
  100. }
  101. void
  102. iser_reg_desc_put_fr(struct ib_conn *ib_conn,
  103. struct iser_fr_desc *desc)
  104. {
  105. struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
  106. unsigned long flags;
  107. spin_lock_irqsave(&fr_pool->lock, flags);
  108. list_add(&desc->list, &fr_pool->list);
  109. spin_unlock_irqrestore(&fr_pool->lock, flags);
  110. }
  111. struct iser_fr_desc *
  112. iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
  113. {
  114. struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
  115. return list_first_entry(&fr_pool->list,
  116. struct iser_fr_desc, list);
  117. }
  118. void
  119. iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
  120. struct iser_fr_desc *desc)
  121. {
  122. }
  123. static void iser_data_buf_dump(struct iser_data_buf *data,
  124. struct ib_device *ibdev)
  125. {
  126. struct scatterlist *sg;
  127. int i;
  128. for_each_sg(data->sg, sg, data->dma_nents, i)
  129. iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
  130. "off:0x%x sz:0x%x dma_len:0x%x\n",
  131. i, (unsigned long)ib_sg_dma_address(ibdev, sg),
  132. sg_page(sg), sg->offset,
  133. sg->length, ib_sg_dma_len(ibdev, sg));
  134. }
  135. static void iser_dump_page_vec(struct iser_page_vec *page_vec)
  136. {
  137. int i;
  138. iser_err("page vec npages %d data length %d\n",
  139. page_vec->npages, page_vec->fake_mr.length);
  140. for (i = 0; i < page_vec->npages; i++)
  141. iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
  142. }
  143. int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
  144. struct iser_data_buf *data,
  145. enum iser_data_dir iser_dir,
  146. enum dma_data_direction dma_dir)
  147. {
  148. struct ib_device *dev;
  149. iser_task->dir[iser_dir] = 1;
  150. dev = iser_task->iser_conn->ib_conn.device->ib_device;
  151. data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
  152. if (data->dma_nents == 0) {
  153. iser_err("dma_map_sg failed!!!\n");
  154. return -EINVAL;
  155. }
  156. return 0;
  157. }
  158. void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
  159. struct iser_data_buf *data,
  160. enum dma_data_direction dir)
  161. {
  162. struct ib_device *dev;
  163. dev = iser_task->iser_conn->ib_conn.device->ib_device;
  164. ib_dma_unmap_sg(dev, data->sg, data->size, dir);
  165. }
  166. static int
  167. iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
  168. struct iser_mem_reg *reg)
  169. {
  170. struct scatterlist *sg = mem->sg;
  171. reg->sge.lkey = device->pd->local_dma_lkey;
  172. /*
  173. * FIXME: rework the registration code path to differentiate
  174. * rkey/lkey use cases
  175. */
  176. reg->rkey = device->mr ? device->mr->rkey : 0;
  177. reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
  178. reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
  179. iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
  180. " length=0x%x\n", reg->sge.lkey, reg->rkey,
  181. reg->sge.addr, reg->sge.length);
  182. return 0;
  183. }
  184. static int iser_set_page(struct ib_mr *mr, u64 addr)
  185. {
  186. struct iser_page_vec *page_vec =
  187. container_of(mr, struct iser_page_vec, fake_mr);
  188. page_vec->pages[page_vec->npages++] = addr;
  189. return 0;
  190. }
  191. static
  192. int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
  193. struct iser_data_buf *mem,
  194. struct iser_reg_resources *rsc,
  195. struct iser_mem_reg *reg)
  196. {
  197. struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
  198. struct iser_device *device = ib_conn->device;
  199. struct iser_page_vec *page_vec = rsc->page_vec;
  200. struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
  201. struct ib_pool_fmr *fmr;
  202. int ret, plen;
  203. page_vec->npages = 0;
  204. page_vec->fake_mr.page_size = SIZE_4K;
  205. plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
  206. mem->size, iser_set_page);
  207. if (unlikely(plen < mem->size)) {
  208. iser_err("page vec too short to hold this SG\n");
  209. iser_data_buf_dump(mem, device->ib_device);
  210. iser_dump_page_vec(page_vec);
  211. return -EINVAL;
  212. }
  213. fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages,
  214. page_vec->npages, page_vec->pages[0]);
  215. if (IS_ERR(fmr)) {
  216. ret = PTR_ERR(fmr);
  217. iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
  218. return ret;
  219. }
  220. reg->sge.lkey = fmr->fmr->lkey;
  221. reg->rkey = fmr->fmr->rkey;
  222. reg->sge.addr = page_vec->fake_mr.iova;
  223. reg->sge.length = page_vec->fake_mr.length;
  224. reg->mem_h = fmr;
  225. iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
  226. " length=0x%x\n", reg->sge.lkey, reg->rkey,
  227. reg->sge.addr, reg->sge.length);
  228. return 0;
  229. }
  230. /**
  231. * Unregister (previosuly registered using FMR) memory.
  232. * If memory is non-FMR does nothing.
  233. */
  234. void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
  235. enum iser_data_dir cmd_dir)
  236. {
  237. struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
  238. int ret;
  239. if (!reg->mem_h)
  240. return;
  241. iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
  242. ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
  243. if (ret)
  244. iser_err("ib_fmr_pool_unmap failed %d\n", ret);
  245. reg->mem_h = NULL;
  246. }
  247. void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
  248. enum iser_data_dir cmd_dir)
  249. {
  250. struct iser_device *device = iser_task->iser_conn->ib_conn.device;
  251. struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
  252. if (!reg->mem_h)
  253. return;
  254. device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
  255. reg->mem_h);
  256. reg->mem_h = NULL;
  257. }
  258. static void
  259. iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
  260. struct ib_sig_domain *domain)
  261. {
  262. domain->sig_type = IB_SIG_TYPE_T10_DIF;
  263. domain->sig.dif.pi_interval = scsi_prot_interval(sc);
  264. domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
  265. /*
  266. * At the moment we hard code those, but in the future
  267. * we will take them from sc.
  268. */
  269. domain->sig.dif.apptag_check_mask = 0xffff;
  270. domain->sig.dif.app_escape = true;
  271. domain->sig.dif.ref_escape = true;
  272. if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
  273. domain->sig.dif.ref_remap = true;
  274. };
  275. static int
  276. iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
  277. {
  278. switch (scsi_get_prot_op(sc)) {
  279. case SCSI_PROT_WRITE_INSERT:
  280. case SCSI_PROT_READ_STRIP:
  281. sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
  282. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
  283. sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
  284. break;
  285. case SCSI_PROT_READ_INSERT:
  286. case SCSI_PROT_WRITE_STRIP:
  287. sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
  288. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
  289. sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
  290. IB_T10DIF_CSUM : IB_T10DIF_CRC;
  291. break;
  292. case SCSI_PROT_READ_PASS:
  293. case SCSI_PROT_WRITE_PASS:
  294. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
  295. sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
  296. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
  297. sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
  298. IB_T10DIF_CSUM : IB_T10DIF_CRC;
  299. break;
  300. default:
  301. iser_err("Unsupported PI operation %d\n",
  302. scsi_get_prot_op(sc));
  303. return -EINVAL;
  304. }
  305. return 0;
  306. }
  307. static inline void
  308. iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
  309. {
  310. *mask = 0;
  311. if (sc->prot_flags & SCSI_PROT_REF_CHECK)
  312. *mask |= ISER_CHECK_REFTAG;
  313. if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
  314. *mask |= ISER_CHECK_GUARD;
  315. }
  316. static inline void
  317. iser_inv_rkey(struct ib_send_wr *inv_wr,
  318. struct ib_mr *mr,
  319. struct ib_cqe *cqe)
  320. {
  321. inv_wr->opcode = IB_WR_LOCAL_INV;
  322. inv_wr->wr_cqe = cqe;
  323. inv_wr->ex.invalidate_rkey = mr->rkey;
  324. inv_wr->send_flags = 0;
  325. inv_wr->num_sge = 0;
  326. }
  327. static int
  328. iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
  329. struct iser_pi_context *pi_ctx,
  330. struct iser_mem_reg *data_reg,
  331. struct iser_mem_reg *prot_reg,
  332. struct iser_mem_reg *sig_reg)
  333. {
  334. struct iser_tx_desc *tx_desc = &iser_task->desc;
  335. struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
  336. struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
  337. struct ib_sig_handover_wr *wr;
  338. struct ib_mr *mr = pi_ctx->sig_mr;
  339. int ret;
  340. memset(sig_attrs, 0, sizeof(*sig_attrs));
  341. ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
  342. if (ret)
  343. goto err;
  344. iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
  345. if (pi_ctx->sig_mr_valid)
  346. iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
  347. ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
  348. wr = sig_handover_wr(iser_tx_next_wr(tx_desc));
  349. wr->wr.opcode = IB_WR_REG_SIG_MR;
  350. wr->wr.wr_cqe = cqe;
  351. wr->wr.sg_list = &data_reg->sge;
  352. wr->wr.num_sge = 1;
  353. wr->wr.send_flags = 0;
  354. wr->sig_attrs = sig_attrs;
  355. wr->sig_mr = mr;
  356. if (scsi_prot_sg_count(iser_task->sc))
  357. wr->prot = &prot_reg->sge;
  358. else
  359. wr->prot = NULL;
  360. wr->access_flags = IB_ACCESS_LOCAL_WRITE |
  361. IB_ACCESS_REMOTE_READ |
  362. IB_ACCESS_REMOTE_WRITE;
  363. pi_ctx->sig_mr_valid = 1;
  364. sig_reg->sge.lkey = mr->lkey;
  365. sig_reg->rkey = mr->rkey;
  366. sig_reg->sge.addr = 0;
  367. sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
  368. iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
  369. sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
  370. sig_reg->sge.length);
  371. err:
  372. return ret;
  373. }
  374. static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
  375. struct iser_data_buf *mem,
  376. struct iser_reg_resources *rsc,
  377. struct iser_mem_reg *reg)
  378. {
  379. struct iser_tx_desc *tx_desc = &iser_task->desc;
  380. struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
  381. struct ib_mr *mr = rsc->mr;
  382. struct ib_reg_wr *wr;
  383. int n;
  384. if (rsc->mr_valid)
  385. iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
  386. ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
  387. n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K);
  388. if (unlikely(n != mem->size)) {
  389. iser_err("failed to map sg (%d/%d)\n",
  390. n, mem->size);
  391. return n < 0 ? n : -EINVAL;
  392. }
  393. wr = reg_wr(iser_tx_next_wr(tx_desc));
  394. wr->wr.opcode = IB_WR_REG_MR;
  395. wr->wr.wr_cqe = cqe;
  396. wr->wr.send_flags = 0;
  397. wr->wr.num_sge = 0;
  398. wr->mr = mr;
  399. wr->key = mr->rkey;
  400. wr->access = IB_ACCESS_LOCAL_WRITE |
  401. IB_ACCESS_REMOTE_WRITE |
  402. IB_ACCESS_REMOTE_READ;
  403. rsc->mr_valid = 1;
  404. reg->sge.lkey = mr->lkey;
  405. reg->rkey = mr->rkey;
  406. reg->sge.addr = mr->iova;
  407. reg->sge.length = mr->length;
  408. iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
  409. reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
  410. return 0;
  411. }
  412. static int
  413. iser_reg_prot_sg(struct iscsi_iser_task *task,
  414. struct iser_data_buf *mem,
  415. struct iser_fr_desc *desc,
  416. bool use_dma_key,
  417. struct iser_mem_reg *reg)
  418. {
  419. struct iser_device *device = task->iser_conn->ib_conn.device;
  420. if (use_dma_key)
  421. return iser_reg_dma(device, mem, reg);
  422. return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
  423. }
  424. static int
  425. iser_reg_data_sg(struct iscsi_iser_task *task,
  426. struct iser_data_buf *mem,
  427. struct iser_fr_desc *desc,
  428. bool use_dma_key,
  429. struct iser_mem_reg *reg)
  430. {
  431. struct iser_device *device = task->iser_conn->ib_conn.device;
  432. if (use_dma_key)
  433. return iser_reg_dma(device, mem, reg);
  434. return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
  435. }
  436. int iser_reg_rdma_mem(struct iscsi_iser_task *task,
  437. enum iser_data_dir dir,
  438. bool all_imm)
  439. {
  440. struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
  441. struct iser_device *device = ib_conn->device;
  442. struct iser_data_buf *mem = &task->data[dir];
  443. struct iser_mem_reg *reg = &task->rdma_reg[dir];
  444. struct iser_mem_reg *data_reg;
  445. struct iser_fr_desc *desc = NULL;
  446. bool use_dma_key;
  447. int err;
  448. use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
  449. scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
  450. if (!use_dma_key) {
  451. desc = device->reg_ops->reg_desc_get(ib_conn);
  452. reg->mem_h = desc;
  453. }
  454. if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
  455. data_reg = reg;
  456. else
  457. data_reg = &task->desc.data_reg;
  458. err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
  459. if (unlikely(err))
  460. goto err_reg;
  461. if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
  462. struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
  463. if (scsi_prot_sg_count(task->sc)) {
  464. mem = &task->prot[dir];
  465. err = iser_reg_prot_sg(task, mem, desc,
  466. use_dma_key, prot_reg);
  467. if (unlikely(err))
  468. goto err_reg;
  469. }
  470. err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
  471. prot_reg, reg);
  472. if (unlikely(err))
  473. goto err_reg;
  474. desc->pi_ctx->sig_protected = 1;
  475. }
  476. return 0;
  477. err_reg:
  478. if (desc)
  479. device->reg_ops->reg_desc_put(ib_conn, desc);
  480. return err;
  481. }
  482. void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
  483. enum iser_data_dir dir)
  484. {
  485. struct iser_device *device = task->iser_conn->ib_conn.device;
  486. device->reg_ops->unreg_mem(task, dir);
  487. }