|
@@ -191,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
|
|
|
ib_attr->max_total_mcast_qp_attach = 0;
|
|
|
ib_attr->max_ah = dev_attr->max_ah;
|
|
|
|
|
|
- ib_attr->max_fmr = dev_attr->max_fmr;
|
|
|
- ib_attr->max_map_per_fmr = 1; /* ? */
|
|
|
+ ib_attr->max_fmr = 0;
|
|
|
+ ib_attr->max_map_per_fmr = 0;
|
|
|
|
|
|
ib_attr->max_srq = dev_attr->max_srq;
|
|
|
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
|
|
@@ -3231,100 +3231,6 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-/* Fast Memory Regions */
|
|
|
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
|
|
|
- struct ib_fmr_attr *fmr_attr)
|
|
|
-{
|
|
|
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
|
|
|
- struct bnxt_re_dev *rdev = pd->rdev;
|
|
|
- struct bnxt_re_fmr *fmr;
|
|
|
- int rc;
|
|
|
-
|
|
|
- if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
|
|
|
- fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
|
|
|
- dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
- }
|
|
|
- fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
|
|
|
- if (!fmr)
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
-
|
|
|
- fmr->rdev = rdev;
|
|
|
- fmr->qplib_fmr.pd = &pd->qplib_pd;
|
|
|
- fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
|
|
|
-
|
|
|
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
|
|
|
- if (rc)
|
|
|
- goto fail;
|
|
|
-
|
|
|
- fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
|
|
|
- fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
|
|
|
- fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
|
|
|
-
|
|
|
- atomic_inc(&rdev->mr_count);
|
|
|
- return &fmr->ib_fmr;
|
|
|
-fail:
|
|
|
- kfree(fmr);
|
|
|
- return ERR_PTR(rc);
|
|
|
-}
|
|
|
-
|
|
|
-int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
|
|
|
- u64 iova)
|
|
|
-{
|
|
|
- struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
|
|
|
- ib_fmr);
|
|
|
- struct bnxt_re_dev *rdev = fmr->rdev;
|
|
|
- int rc;
|
|
|
-
|
|
|
- fmr->qplib_fmr.va = iova;
|
|
|
- fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
|
|
|
-
|
|
|
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
|
|
|
- list_len, true);
|
|
|
- if (rc)
|
|
|
- dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
|
|
|
- fmr->ib_fmr.lkey);
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-int bnxt_re_unmap_fmr(struct list_head *fmr_list)
|
|
|
-{
|
|
|
- struct bnxt_re_dev *rdev;
|
|
|
- struct bnxt_re_fmr *fmr;
|
|
|
- struct ib_fmr *ib_fmr;
|
|
|
- int rc = 0;
|
|
|
-
|
|
|
- /* Validate each FMRs inside the fmr_list */
|
|
|
- list_for_each_entry(ib_fmr, fmr_list, list) {
|
|
|
- fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
|
|
|
- rdev = fmr->rdev;
|
|
|
-
|
|
|
- if (rdev) {
|
|
|
- rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
|
|
|
- &fmr->qplib_fmr, true);
|
|
|
- if (rc)
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
|
|
|
-{
|
|
|
- struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
|
|
|
- ib_fmr);
|
|
|
- struct bnxt_re_dev *rdev = fmr->rdev;
|
|
|
- int rc;
|
|
|
-
|
|
|
- rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
|
|
|
- if (rc)
|
|
|
- dev_err(rdev_to_dev(rdev), "Failed to free FMR");
|
|
|
-
|
|
|
- kfree(fmr);
|
|
|
- atomic_dec(&rdev->mr_count);
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
/* uverbs */
|
|
|
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
|
|
|
u64 virt_addr, int mr_access_flags,
|