physical_ops.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * Copyright (c) 2015 Oracle. All rights reserved.
  3. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  4. */
  5. /* No-op chunk preparation. All client memory is pre-registered.
  6. * Sometimes referred to as ALLPHYSICAL mode.
  7. *
  8. * Physical registration is simple because all client memory is
  9. * pre-registered and never deregistered. This mode is good for
  10. * adapter bring up, but is considered not safe: the server is
  11. * trusted not to abuse its access to client memory not involved
  12. * in RDMA I/O.
  13. */
  14. #include "xprt_rdma.h"
  15. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  16. # define RPCDBG_FACILITY RPCDBG_TRANS
  17. #endif
  18. static int
  19. physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
  20. struct rpcrdma_create_data_internal *cdata)
  21. {
  22. struct ib_mr *mr;
  23. /* Obtain an rkey to use for RPC data payloads.
  24. */
  25. mr = ib_get_dma_mr(ia->ri_pd,
  26. IB_ACCESS_LOCAL_WRITE |
  27. IB_ACCESS_REMOTE_WRITE |
  28. IB_ACCESS_REMOTE_READ);
  29. if (IS_ERR(mr)) {
  30. pr_err("%s: ib_get_dma_mr for failed with %lX\n",
  31. __func__, PTR_ERR(mr));
  32. return -ENOMEM;
  33. }
  34. ia->ri_dma_mr = mr;
  35. return 0;
  36. }
  37. /* PHYSICAL memory registration conveys one page per chunk segment.
  38. */
  39. static size_t
  40. physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
  41. {
  42. return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
  43. rpcrdma_max_segments(r_xprt));
  44. }
  45. static int
  46. physical_op_init(struct rpcrdma_xprt *r_xprt)
  47. {
  48. return 0;
  49. }
  50. /* The client's physical memory is already exposed for
  51. * remote access via RDMA READ or RDMA WRITE.
  52. */
  53. static int
  54. physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
  55. int nsegs, bool writing)
  56. {
  57. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  58. rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
  59. seg->mr_rkey = ia->ri_dma_mr->rkey;
  60. seg->mr_base = seg->mr_dma;
  61. seg->mr_nsegs = 1;
  62. return 1;
  63. }
  64. /* Unmap a memory region, but leave it registered.
  65. */
  66. static int
  67. physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
  68. {
  69. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  70. rpcrdma_unmap_one(ia->ri_device, seg);
  71. return 1;
  72. }
  73. /* DMA unmap all memory regions that were mapped for "req".
  74. */
  75. static void
  76. physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
  77. {
  78. struct ib_device *device = r_xprt->rx_ia.ri_device;
  79. unsigned int i;
  80. for (i = 0; req->rl_nchunks; --req->rl_nchunks)
  81. rpcrdma_unmap_one(device, &req->rl_segments[i++]);
  82. }
  83. static void
  84. physical_op_destroy(struct rpcrdma_buffer *buf)
  85. {
  86. }
  87. const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
  88. .ro_map = physical_op_map,
  89. .ro_unmap_sync = physical_op_unmap_sync,
  90. .ro_unmap = physical_op_unmap,
  91. .ro_open = physical_op_open,
  92. .ro_maxpages = physical_op_maxpages,
  93. .ro_init = physical_op_init,
  94. .ro_destroy = physical_op_destroy,
  95. .ro_displayname = "physical",
  96. };