frwr_ops.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /*
  2. * Copyright (c) 2015 Oracle. All rights reserved.
  3. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  4. */
  5. /* Lightweight memory registration using Fast Registration Work
  6. * Requests (FRWR). Also referred to sometimes as FRMR mode.
  7. *
  8. * FRWR features ordered asynchronous registration and deregistration
  9. * of arbitrarily sized memory regions. This is the fastest and safest
  10. * but most complex memory registration mode.
  11. */
  12. /* Normal operation
  13. *
  14. * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
  15. * Work Request (frmr_op_map). When the RDMA operation is finished, this
  16. * Memory Region is invalidated using a LOCAL_INV Work Request
  17. * (frmr_op_unmap).
  18. *
  19. * Typically these Work Requests are not signaled, and neither are RDMA
  20. * SEND Work Requests (with the exception of signaling occasionally to
  21. * prevent provider work queue overflows). This greatly reduces HCA
  22. * interrupt workload.
  23. *
  24. * As an optimization, frwr_op_unmap marks MRs INVALID before the
  25. * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
  26. * rb_mws immediately so that no work (like managing a linked list
  27. * under a spinlock) is needed in the completion upcall.
  28. *
  29. * But this means that frwr_op_map() can occasionally encounter an MR
  30. * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
  31. * ordering prevents a subsequent FAST_REG WR from executing against
  32. * that MR while it is still being invalidated.
  33. */
  34. /* Transport recovery
  35. *
  36. * ->op_map and the transport connect worker cannot run at the same
  37. * time, but ->op_unmap can fire while the transport connect worker
  38. * is running. Thus MR recovery is handled in ->op_map, to guarantee
  39. * that recovered MRs are owned by a sending RPC, and not one where
  40. * ->op_unmap could fire at the same time transport reconnect is
  41. * being done.
  42. *
  43. * When the underlying transport disconnects, MRs are left in one of
  44. * three states:
  45. *
  46. * INVALID: The MR was not in use before the QP entered ERROR state.
  47. * (Or, the LOCAL_INV WR has not completed or flushed yet).
  48. *
  49. * STALE: The MR was being registered or unregistered when the QP
  50. * entered ERROR state, and the pending WR was flushed.
  51. *
  52. * VALID: The MR was registered before the QP entered ERROR state.
  53. *
  54. * When frwr_op_map encounters STALE and VALID MRs, they are recovered
  55. * with ib_dereg_mr and then are re-initialized. Beause MR recovery
  56. * allocates fresh resources, it is deferred to a workqueue, and the
  57. * recovered MRs are placed back on the rb_mws list when recovery is
  58. * complete. frwr_op_map allocates another MR for the current RPC while
  59. * the broken MR is reset.
  60. *
  61. * To ensure that frwr_op_map doesn't encounter an MR that is marked
  62. * INVALID but that is about to be flushed due to a previous transport
  63. * disconnect, the transport connect worker attempts to drain all
  64. * pending send queue WRs before the transport is reconnected.
  65. */
  66. #include "xprt_rdma.h"
  67. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  68. # define RPCDBG_FACILITY RPCDBG_TRANS
  69. #endif
  70. bool
  71. frwr_is_supported(struct rpcrdma_ia *ia)
  72. {
  73. struct ib_device_attr *attrs = &ia->ri_device->attrs;
  74. if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
  75. goto out_not_supported;
  76. if (attrs->max_fast_reg_page_list_len == 0)
  77. goto out_not_supported;
  78. return true;
  79. out_not_supported:
  80. pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
  81. ia->ri_device->name);
  82. return false;
  83. }
  84. static int
  85. frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
  86. {
  87. unsigned int depth = ia->ri_max_frmr_depth;
  88. struct rpcrdma_frmr *f = &r->frmr;
  89. int rc;
  90. f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG, depth);
  91. if (IS_ERR(f->fr_mr))
  92. goto out_mr_err;
  93. r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
  94. if (!r->mw_sg)
  95. goto out_list_err;
  96. sg_init_table(r->mw_sg, depth);
  97. init_completion(&f->fr_linv_done);
  98. return 0;
  99. out_mr_err:
  100. rc = PTR_ERR(f->fr_mr);
  101. dprintk("RPC: %s: ib_alloc_mr status %i\n",
  102. __func__, rc);
  103. return rc;
  104. out_list_err:
  105. rc = -ENOMEM;
  106. dprintk("RPC: %s: sg allocation failure\n",
  107. __func__);
  108. ib_dereg_mr(f->fr_mr);
  109. return rc;
  110. }
  111. static void
  112. frwr_op_release_mr(struct rpcrdma_mw *r)
  113. {
  114. int rc;
  115. /* Ensure MW is not on any rl_registered list */
  116. if (!list_empty(&r->mw_list))
  117. list_del(&r->mw_list);
  118. rc = ib_dereg_mr(r->frmr.fr_mr);
  119. if (rc)
  120. pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
  121. r, rc);
  122. kfree(r->mw_sg);
  123. kfree(r);
  124. }
  125. static int
  126. __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
  127. {
  128. struct rpcrdma_frmr *f = &r->frmr;
  129. int rc;
  130. rc = ib_dereg_mr(f->fr_mr);
  131. if (rc) {
  132. pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
  133. rc, r);
  134. return rc;
  135. }
  136. f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
  137. ia->ri_max_frmr_depth);
  138. if (IS_ERR(f->fr_mr)) {
  139. pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
  140. PTR_ERR(f->fr_mr), r);
  141. return PTR_ERR(f->fr_mr);
  142. }
  143. dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
  144. f->fr_state = FRMR_IS_INVALID;
  145. return 0;
  146. }
  147. /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
  148. *
  149. * There's no recovery if this fails. The FRMR is abandoned, but
  150. * remains in rb_all. It will be cleaned up when the transport is
  151. * destroyed.
  152. */
  153. static void
  154. frwr_op_recover_mr(struct rpcrdma_mw *mw)
  155. {
  156. struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
  157. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  158. int rc;
  159. rc = __frwr_reset_mr(ia, mw);
  160. ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
  161. if (rc)
  162. goto out_release;
  163. rpcrdma_put_mw(r_xprt, mw);
  164. r_xprt->rx_stats.mrs_recovered++;
  165. return;
  166. out_release:
  167. pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
  168. r_xprt->rx_stats.mrs_orphaned++;
  169. spin_lock(&r_xprt->rx_buf.rb_mwlock);
  170. list_del(&mw->mw_all);
  171. spin_unlock(&r_xprt->rx_buf.rb_mwlock);
  172. frwr_op_release_mr(mw);
  173. }
  174. static int
  175. frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
  176. struct rpcrdma_create_data_internal *cdata)
  177. {
  178. int depth, delta;
  179. ia->ri_max_frmr_depth =
  180. min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
  181. ia->ri_device->attrs.max_fast_reg_page_list_len);
  182. dprintk("RPC: %s: device's max FR page list len = %u\n",
  183. __func__, ia->ri_max_frmr_depth);
  184. /* Add room for frmr register and invalidate WRs.
  185. * 1. FRMR reg WR for head
  186. * 2. FRMR invalidate WR for head
  187. * 3. N FRMR reg WRs for pagelist
  188. * 4. N FRMR invalidate WRs for pagelist
  189. * 5. FRMR reg WR for tail
  190. * 6. FRMR invalidate WR for tail
  191. * 7. The RDMA_SEND WR
  192. */
  193. depth = 7;
  194. /* Calculate N if the device max FRMR depth is smaller than
  195. * RPCRDMA_MAX_DATA_SEGS.
  196. */
  197. if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
  198. delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
  199. do {
  200. depth += 2; /* FRMR reg + invalidate */
  201. delta -= ia->ri_max_frmr_depth;
  202. } while (delta > 0);
  203. }
  204. ep->rep_attr.cap.max_send_wr *= depth;
  205. if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
  206. cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
  207. if (!cdata->max_requests)
  208. return -EINVAL;
  209. ep->rep_attr.cap.max_send_wr = cdata->max_requests *
  210. depth;
  211. }
  212. rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
  213. RPCRDMA_MAX_DATA_SEGS /
  214. ia->ri_max_frmr_depth));
  215. return 0;
  216. }
  217. /* FRWR mode conveys a list of pages per chunk segment. The
  218. * maximum length of that list is the FRWR page list depth.
  219. */
  220. static size_t
  221. frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
  222. {
  223. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  224. return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
  225. RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
  226. }
  227. static void
  228. __frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
  229. const char *wr)
  230. {
  231. frmr->fr_state = FRMR_IS_STALE;
  232. if (wc->status != IB_WC_WR_FLUSH_ERR)
  233. pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
  234. wr, ib_wc_status_msg(wc->status),
  235. wc->status, wc->vendor_err);
  236. }
  237. /**
  238. * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
  239. * @cq: completion queue (ignored)
  240. * @wc: completed WR
  241. *
  242. */
  243. static void
  244. frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
  245. {
  246. struct rpcrdma_frmr *frmr;
  247. struct ib_cqe *cqe;
  248. /* WARNING: Only wr_cqe and status are reliable at this point */
  249. if (wc->status != IB_WC_SUCCESS) {
  250. cqe = wc->wr_cqe;
  251. frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
  252. __frwr_sendcompletion_flush(wc, frmr, "fastreg");
  253. }
  254. }
  255. /**
  256. * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
  257. * @cq: completion queue (ignored)
  258. * @wc: completed WR
  259. *
  260. */
  261. static void
  262. frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
  263. {
  264. struct rpcrdma_frmr *frmr;
  265. struct ib_cqe *cqe;
  266. /* WARNING: Only wr_cqe and status are reliable at this point */
  267. if (wc->status != IB_WC_SUCCESS) {
  268. cqe = wc->wr_cqe;
  269. frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
  270. __frwr_sendcompletion_flush(wc, frmr, "localinv");
  271. }
  272. }
  273. /**
  274. * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
  275. * @cq: completion queue (ignored)
  276. * @wc: completed WR
  277. *
  278. * Awaken anyone waiting for an MR to finish being fenced.
  279. */
  280. static void
  281. frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
  282. {
  283. struct rpcrdma_frmr *frmr;
  284. struct ib_cqe *cqe;
  285. /* WARNING: Only wr_cqe and status are reliable at this point */
  286. cqe = wc->wr_cqe;
  287. frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
  288. if (wc->status != IB_WC_SUCCESS)
  289. __frwr_sendcompletion_flush(wc, frmr, "localinv");
  290. complete_all(&frmr->fr_linv_done);
  291. }
  292. /* Post a REG_MR Work Request to register a memory region
  293. * for remote access via RDMA READ or RDMA WRITE.
  294. */
  295. static int
  296. frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
  297. int nsegs, bool writing, struct rpcrdma_mw **out)
  298. {
  299. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  300. struct rpcrdma_mw *mw;
  301. struct rpcrdma_frmr *frmr;
  302. struct ib_mr *mr;
  303. struct ib_reg_wr *reg_wr;
  304. struct ib_send_wr *bad_wr;
  305. int rc, i, n, dma_nents;
  306. u8 key;
  307. mw = NULL;
  308. do {
  309. if (mw)
  310. rpcrdma_defer_mr_recovery(mw);
  311. mw = rpcrdma_get_mw(r_xprt);
  312. if (!mw)
  313. return -ENOBUFS;
  314. } while (mw->frmr.fr_state != FRMR_IS_INVALID);
  315. frmr = &mw->frmr;
  316. frmr->fr_state = FRMR_IS_VALID;
  317. mr = frmr->fr_mr;
  318. reg_wr = &frmr->fr_regwr;
  319. if (nsegs > ia->ri_max_frmr_depth)
  320. nsegs = ia->ri_max_frmr_depth;
  321. for (i = 0; i < nsegs;) {
  322. if (seg->mr_page)
  323. sg_set_page(&mw->mw_sg[i],
  324. seg->mr_page,
  325. seg->mr_len,
  326. offset_in_page(seg->mr_offset));
  327. else
  328. sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
  329. seg->mr_len);
  330. ++seg;
  331. ++i;
  332. /* Check for holes */
  333. if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
  334. offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
  335. break;
  336. }
  337. mw->mw_nents = i;
  338. mw->mw_dir = rpcrdma_data_dir(writing);
  339. if (i == 0)
  340. goto out_dmamap_err;
  341. dma_nents = ib_dma_map_sg(ia->ri_device,
  342. mw->mw_sg, mw->mw_nents, mw->mw_dir);
  343. if (!dma_nents)
  344. goto out_dmamap_err;
  345. n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
  346. if (unlikely(n != mw->mw_nents))
  347. goto out_mapmr_err;
  348. dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
  349. __func__, mw, mw->mw_nents, mr->length);
  350. key = (u8)(mr->rkey & 0x000000FF);
  351. ib_update_fast_reg_key(mr, ++key);
  352. reg_wr->wr.next = NULL;
  353. reg_wr->wr.opcode = IB_WR_REG_MR;
  354. frmr->fr_cqe.done = frwr_wc_fastreg;
  355. reg_wr->wr.wr_cqe = &frmr->fr_cqe;
  356. reg_wr->wr.num_sge = 0;
  357. reg_wr->wr.send_flags = 0;
  358. reg_wr->mr = mr;
  359. reg_wr->key = mr->rkey;
  360. reg_wr->access = writing ?
  361. IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
  362. IB_ACCESS_REMOTE_READ;
  363. DECR_CQCOUNT(&r_xprt->rx_ep);
  364. rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
  365. if (rc)
  366. goto out_senderr;
  367. mw->mw_handle = mr->rkey;
  368. mw->mw_length = mr->length;
  369. mw->mw_offset = mr->iova;
  370. *out = mw;
  371. return mw->mw_nents;
  372. out_dmamap_err:
  373. pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
  374. mw->mw_sg, mw->mw_nents);
  375. rpcrdma_defer_mr_recovery(mw);
  376. return -EIO;
  377. out_mapmr_err:
  378. pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
  379. frmr->fr_mr, n, mw->mw_nents);
  380. rpcrdma_defer_mr_recovery(mw);
  381. return -EIO;
  382. out_senderr:
  383. pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
  384. rpcrdma_defer_mr_recovery(mw);
  385. return -ENOTCONN;
  386. }
  387. static struct ib_send_wr *
  388. __frwr_prepare_linv_wr(struct rpcrdma_mw *mw)
  389. {
  390. struct rpcrdma_frmr *f = &mw->frmr;
  391. struct ib_send_wr *invalidate_wr;
  392. f->fr_state = FRMR_IS_INVALID;
  393. invalidate_wr = &f->fr_invwr;
  394. memset(invalidate_wr, 0, sizeof(*invalidate_wr));
  395. f->fr_cqe.done = frwr_wc_localinv;
  396. invalidate_wr->wr_cqe = &f->fr_cqe;
  397. invalidate_wr->opcode = IB_WR_LOCAL_INV;
  398. invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
  399. return invalidate_wr;
  400. }
  401. /* Invalidate all memory regions that were registered for "req".
  402. *
  403. * Sleeps until it is safe for the host CPU to access the
  404. * previously mapped memory regions.
  405. *
  406. * Caller ensures that req->rl_registered is not empty.
  407. */
  408. static void
  409. frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
  410. {
  411. struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
  412. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  413. struct rpcrdma_mw *mw, *tmp;
  414. struct rpcrdma_frmr *f;
  415. int rc;
  416. dprintk("RPC: %s: req %p\n", __func__, req);
  417. /* ORDER: Invalidate all of the req's MRs first
  418. *
  419. * Chain the LOCAL_INV Work Requests and post them with
  420. * a single ib_post_send() call.
  421. */
  422. f = NULL;
  423. invalidate_wrs = pos = prev = NULL;
  424. list_for_each_entry(mw, &req->rl_registered, mw_list) {
  425. pos = __frwr_prepare_linv_wr(mw);
  426. if (!invalidate_wrs)
  427. invalidate_wrs = pos;
  428. else
  429. prev->next = pos;
  430. prev = pos;
  431. f = &mw->frmr;
  432. }
  433. /* Strong send queue ordering guarantees that when the
  434. * last WR in the chain completes, all WRs in the chain
  435. * are complete.
  436. */
  437. f->fr_invwr.send_flags = IB_SEND_SIGNALED;
  438. f->fr_cqe.done = frwr_wc_localinv_wake;
  439. reinit_completion(&f->fr_linv_done);
  440. INIT_CQCOUNT(&r_xprt->rx_ep);
  441. /* Transport disconnect drains the receive CQ before it
  442. * replaces the QP. The RPC reply handler won't call us
  443. * unless ri_id->qp is a valid pointer.
  444. */
  445. rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
  446. if (rc)
  447. goto reset_mrs;
  448. wait_for_completion(&f->fr_linv_done);
  449. /* ORDER: Now DMA unmap all of the req's MRs, and return
  450. * them to the free MW list.
  451. */
  452. unmap:
  453. list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
  454. list_del_init(&mw->mw_list);
  455. ib_dma_unmap_sg(ia->ri_device,
  456. mw->mw_sg, mw->mw_nents, mw->mw_dir);
  457. rpcrdma_put_mw(r_xprt, mw);
  458. }
  459. return;
  460. reset_mrs:
  461. pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
  462. rdma_disconnect(ia->ri_id);
  463. /* Find and reset the MRs in the LOCAL_INV WRs that did not
  464. * get posted. This is synchronous, and slow.
  465. */
  466. list_for_each_entry(mw, &req->rl_registered, mw_list) {
  467. f = &mw->frmr;
  468. if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
  469. __frwr_reset_mr(ia, mw);
  470. bad_wr = bad_wr->next;
  471. }
  472. }
  473. goto unmap;
  474. }
  475. /* Use a slow, safe mechanism to invalidate all memory regions
  476. * that were registered for "req".
  477. */
  478. static void
  479. frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
  480. bool sync)
  481. {
  482. struct rpcrdma_mw *mw;
  483. while (!list_empty(&req->rl_registered)) {
  484. mw = list_first_entry(&req->rl_registered,
  485. struct rpcrdma_mw, mw_list);
  486. list_del_init(&mw->mw_list);
  487. if (sync)
  488. frwr_op_recover_mr(mw);
  489. else
  490. rpcrdma_defer_mr_recovery(mw);
  491. }
  492. }
  493. const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
  494. .ro_map = frwr_op_map,
  495. .ro_unmap_sync = frwr_op_unmap_sync,
  496. .ro_unmap_safe = frwr_op_unmap_safe,
  497. .ro_recover_mr = frwr_op_recover_mr,
  498. .ro_open = frwr_op_open,
  499. .ro_maxpages = frwr_op_maxpages,
  500. .ro_init_mr = frwr_op_init_mr,
  501. .ro_release_mr = frwr_op_release_mr,
  502. .ro_displayname = "frwr",
  503. };