|
|
@@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|
|
if (unlikely(mapped_segs == mr->mr.max_segs))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- if (mr->mr.length == 0) {
|
|
|
- mr->mr.user_base = addr;
|
|
|
- mr->mr.iova = addr;
|
|
|
- }
|
|
|
-
|
|
|
m = mapped_segs / RVT_SEGSZ;
|
|
|
n = mapped_segs % RVT_SEGSZ;
|
|
|
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
|
|
|
@@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|
|
* @sg_nents: number of entries in sg
|
|
|
* @sg_offset: offset in bytes into sg
|
|
|
*
|
|
|
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
|
|
|
+ *
|
|
|
* Return: number of sg elements mapped to the memory region
|
|
|
*/
|
|
|
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
|
|
int sg_nents, unsigned int *sg_offset)
|
|
|
{
|
|
|
struct rvt_mr *mr = to_imr(ibmr);
|
|
|
+ int ret;
|
|
|
|
|
|
mr->mr.length = 0;
|
|
|
mr->mr.page_shift = PAGE_SHIFT;
|
|
|
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
|
|
|
- rvt_set_page);
|
|
|
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
|
|
|
+ mr->mr.user_base = ibmr->iova;
|
|
|
+ mr->mr.iova = ibmr->iova;
|
|
|
+ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
|
|
|
+ mr->mr.length = (size_t)ibmr->length;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
|
|
|
ibmr->rkey = key;
|
|
|
mr->mr.lkey = key;
|
|
|
mr->mr.access_flags = access;
|
|
|
+ mr->mr.iova = ibmr->iova;
|
|
|
atomic_set(&mr->mr.lkey_invalid, 0);
|
|
|
|
|
|
return 0;
|