|
@@ -800,7 +800,9 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
|
|
+static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
|
|
|
+ enum ib_mr_type mr_type,
|
|
|
+ u32 max_num_sg)
|
|
|
{
|
|
|
struct iwch_dev *rhp;
|
|
|
struct iwch_pd *php;
|
|
@@ -809,6 +811,10 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
|
|
u32 stag = 0;
|
|
|
int ret = 0;
|
|
|
|
|
|
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
|
|
|
+ max_num_sg > T3_MAX_FASTREG_DEPTH)
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
php = to_iwch_pd(pd);
|
|
|
rhp = php->rhp;
|
|
|
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
|
@@ -816,10 +822,10 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
|
|
goto err;
|
|
|
|
|
|
mhp->rhp = rhp;
|
|
|
- ret = iwch_alloc_pbl(mhp, pbl_depth);
|
|
|
+ ret = iwch_alloc_pbl(mhp, max_num_sg);
|
|
|
if (ret)
|
|
|
goto err1;
|
|
|
- mhp->attr.pbl_size = pbl_depth;
|
|
|
+ mhp->attr.pbl_size = max_num_sg;
|
|
|
ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
|
|
|
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
|
|
if (ret)
|
|
@@ -1443,7 +1449,7 @@ int iwch_register_device(struct iwch_dev *dev)
|
|
|
dev->ibdev.alloc_mw = iwch_alloc_mw;
|
|
|
dev->ibdev.bind_mw = iwch_bind_mw;
|
|
|
dev->ibdev.dealloc_mw = iwch_dealloc_mw;
|
|
|
- dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
|
|
|
+ dev->ibdev.alloc_mr = iwch_alloc_mr;
|
|
|
dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
|
|
|
dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
|
|
|
dev->ibdev.attach_mcast = iwch_multicast_attach;
|