|
@@ -1065,6 +1065,129 @@ err_free:
|
|
|
return ERR_PTR(ret);
|
|
|
}
|
|
|
|
|
|
+int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
|
|
|
+ u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
|
|
|
+ struct ib_udata *udata)
|
|
|
+{
|
|
|
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
|
|
|
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
|
|
|
+ struct hns_roce_cmd_mailbox *mailbox;
|
|
|
+ struct device *dev = hr_dev->dev;
|
|
|
+ unsigned long mtpt_idx;
|
|
|
+ u32 pdn = 0;
|
|
|
+ int npages;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!mr->enabled)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
|
|
+ if (IS_ERR(mailbox))
|
|
|
+ return PTR_ERR(mailbox);
|
|
|
+
|
|
|
+ mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
|
|
|
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
|
|
|
+ HNS_ROCE_CMD_QUERY_MPT,
|
|
|
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
|
|
|
+ if (ret)
|
|
|
+ goto free_cmd_mbox;
|
|
|
+
|
|
|
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
|
|
|
+ if (ret)
|
|
|
+ dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
|
|
|
+
|
|
|
+ mr->enabled = 0;
|
|
|
+
|
|
|
+ if (flags & IB_MR_REREG_PD)
|
|
|
+ pdn = to_hr_pd(pd)->pdn;
|
|
|
+
|
|
|
+ if (flags & IB_MR_REREG_TRANS) {
|
|
|
+ if (mr->size != ~0ULL) {
|
|
|
+ npages = ib_umem_page_count(mr->umem);
|
|
|
+
|
|
|
+ if (hr_dev->caps.pbl_hop_num)
|
|
|
+ hns_roce_mhop_free(hr_dev, mr);
|
|
|
+ else
|
|
|
+ dma_free_coherent(dev, npages * 8, mr->pbl_buf,
|
|
|
+ mr->pbl_dma_addr);
|
|
|
+ }
|
|
|
+ ib_umem_release(mr->umem);
|
|
|
+
|
|
|
+ mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
|
|
|
+ mr_access_flags, 0);
|
|
|
+ if (IS_ERR(mr->umem)) {
|
|
|
+ ret = PTR_ERR(mr->umem);
|
|
|
+ mr->umem = NULL;
|
|
|
+ goto free_cmd_mbox;
|
|
|
+ }
|
|
|
+ npages = ib_umem_page_count(mr->umem);
|
|
|
+
|
|
|
+ if (hr_dev->caps.pbl_hop_num) {
|
|
|
+ ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
|
|
|
+ if (ret)
|
|
|
+ goto release_umem;
|
|
|
+ } else {
|
|
|
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
|
|
|
+ &(mr->pbl_dma_addr),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!mr->pbl_buf) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto release_umem;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
|
|
|
+ mr_access_flags, virt_addr,
|
|
|
+ length, mailbox->buf);
|
|
|
+ if (ret) {
|
|
|
+ if (flags & IB_MR_REREG_TRANS)
|
|
|
+ goto release_umem;
|
|
|
+ else
|
|
|
+ goto free_cmd_mbox;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (flags & IB_MR_REREG_TRANS) {
|
|
|
+ ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
|
|
|
+ if (ret) {
|
|
|
+ if (mr->size != ~0ULL) {
|
|
|
+ npages = ib_umem_page_count(mr->umem);
|
|
|
+
|
|
|
+ if (hr_dev->caps.pbl_hop_num)
|
|
|
+ hns_roce_mhop_free(hr_dev, mr);
|
|
|
+ else
|
|
|
+ dma_free_coherent(dev, npages * 8,
|
|
|
+ mr->pbl_buf,
|
|
|
+ mr->pbl_dma_addr);
|
|
|
+ }
|
|
|
+
|
|
|
+ goto release_umem;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
|
|
|
+ goto release_umem;
|
|
|
+ }
|
|
|
+
|
|
|
+ mr->enabled = 1;
|
|
|
+ if (flags & IB_MR_REREG_ACCESS)
|
|
|
+ mr->access = mr_access_flags;
|
|
|
+
|
|
|
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+release_umem:
|
|
|
+ ib_umem_release(mr->umem);
|
|
|
+
|
|
|
+free_cmd_mbox:
|
|
|
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int hns_roce_dereg_mr(struct ib_mr *ibmr)
|
|
|
{
|
|
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
|