|
@@ -83,6 +83,18 @@ physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+/* DMA unmap all memory regions that were mapped for "req".
|
|
|
+ */
|
|
|
+static void
|
|
|
+physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|
|
+{
|
|
|
+ struct ib_device *device = r_xprt->rx_ia.ri_device;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ for (i = 0; req->rl_nchunks; --req->rl_nchunks)
|
|
|
+ rpcrdma_unmap_one(device, &req->rl_segments[i++]);
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
physical_op_destroy(struct rpcrdma_buffer *buf)
|
|
|
{
|
|
@@ -90,6 +102,7 @@ physical_op_destroy(struct rpcrdma_buffer *buf)
|
|
|
|
|
|
const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
|
|
|
.ro_map = physical_op_map,
|
|
|
+ .ro_unmap_sync = physical_op_unmap_sync,
|
|
|
.ro_unmap = physical_op_unmap,
|
|
|
.ro_open = physical_op_open,
|
|
|
.ro_maxpages = physical_op_maxpages,
|