|
@@ -853,7 +853,9 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
|
|
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
|
|
+ enum ib_mr_type mr_type,
|
|
|
+ u32 max_num_sg)
|
|
|
{
|
|
|
struct c4iw_dev *rhp;
|
|
|
struct c4iw_pd *php;
|
|
@@ -862,6 +864,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
|
|
u32 stag = 0;
|
|
|
int ret = 0;
|
|
|
|
|
|
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
|
|
|
+ max_num_sg > t4_max_fr_depth(use_dsgl))
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
php = to_c4iw_pd(pd);
|
|
|
rhp = php->rhp;
|
|
|
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
|
@@ -871,10 +877,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
|
|
}
|
|
|
|
|
|
mhp->rhp = rhp;
|
|
|
- ret = alloc_pbl(mhp, pbl_depth);
|
|
|
+ ret = alloc_pbl(mhp, max_num_sg);
|
|
|
if (ret)
|
|
|
goto err1;
|
|
|
- mhp->attr.pbl_size = pbl_depth;
|
|
|
+ mhp->attr.pbl_size = max_num_sg;
|
|
|
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
|
|
|
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
|
|
if (ret)
|