|
@@ -48,8 +48,7 @@ enum {
|
|
#define MLX5_UMR_ALIGN 2048
|
|
#define MLX5_UMR_ALIGN 2048
|
|
|
|
|
|
static int clean_mr(struct mlx5_ib_mr *mr);
|
|
static int clean_mr(struct mlx5_ib_mr *mr);
|
|
-static int max_umr_order(struct mlx5_ib_dev *dev);
|
|
|
|
-static int use_umr(struct mlx5_ib_dev *dev, int order);
|
|
|
|
|
|
+static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
|
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
|
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
|
|
|
|
|
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
@@ -184,7 +183,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
mr->order = ent->order;
|
|
mr->order = ent->order;
|
|
- mr->umred = 1;
|
|
|
|
|
|
+ mr->allocated_from_cache = 1;
|
|
mr->dev = dev;
|
|
mr->dev = dev;
|
|
|
|
|
|
MLX5_SET(mkc, mkc, free, 1);
|
|
MLX5_SET(mkc, mkc, free, 1);
|
|
@@ -497,7 +496,7 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
|
|
int i;
|
|
int i;
|
|
|
|
|
|
c = order2idx(dev, order);
|
|
c = order2idx(dev, order);
|
|
- last_umr_cache_entry = order2idx(dev, max_umr_order(dev));
|
|
|
|
|
|
+ last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
|
|
if (c < 0 || c > last_umr_cache_entry) {
|
|
if (c < 0 || c > last_umr_cache_entry) {
|
|
mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
|
|
mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
|
|
return NULL;
|
|
return NULL;
|
|
@@ -677,12 +676,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
|
|
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
|
|
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
|
|
queue_work(cache->wq, &ent->work);
|
|
queue_work(cache->wq, &ent->work);
|
|
|
|
|
|
- if (i > MAX_UMR_CACHE_ENTRY) {
|
|
|
|
|
|
+ if (i > MR_CACHE_LAST_STD_ENTRY) {
|
|
mlx5_odp_init_mr_cache_entry(ent);
|
|
mlx5_odp_init_mr_cache_entry(ent);
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!use_umr(dev, ent->order))
|
|
|
|
|
|
+ if (ent->order > mr_cache_max_order(dev))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
ent->page = PAGE_SHIFT;
|
|
ent->page = PAGE_SHIFT;
|
|
@@ -809,28 +808,24 @@ err_free:
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
}
|
|
}
|
|
|
|
|
|
-static int get_octo_len(u64 addr, u64 len, int page_size)
|
|
|
|
|
|
+static int get_octo_len(u64 addr, u64 len, int page_shift)
|
|
{
|
|
{
|
|
|
|
+ u64 page_size = 1ULL << page_shift;
|
|
u64 offset;
|
|
u64 offset;
|
|
int npages;
|
|
int npages;
|
|
|
|
|
|
offset = addr & (page_size - 1);
|
|
offset = addr & (page_size - 1);
|
|
- npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
|
|
|
|
|
|
+ npages = ALIGN(len + offset, page_size) >> page_shift;
|
|
return (npages + 1) / 2;
|
|
return (npages + 1) / 2;
|
|
}
|
|
}
|
|
|
|
|
|
-static int max_umr_order(struct mlx5_ib_dev *dev)
|
|
|
|
|
|
+static int mr_cache_max_order(struct mlx5_ib_dev *dev)
|
|
{
|
|
{
|
|
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
|
|
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
|
|
- return MAX_UMR_CACHE_ENTRY + 2;
|
|
|
|
|
|
+ return MR_CACHE_LAST_STD_ENTRY + 2;
|
|
return MLX5_MAX_UMR_SHIFT;
|
|
return MLX5_MAX_UMR_SHIFT;
|
|
}
|
|
}
|
|
|
|
|
|
-static int use_umr(struct mlx5_ib_dev *dev, int order)
|
|
|
|
-{
|
|
|
|
- return order <= max_umr_order(dev);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
|
|
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
|
|
int access_flags, struct ib_umem **umem,
|
|
int access_flags, struct ib_umem **umem,
|
|
int *npages, int *page_shift, int *ncont,
|
|
int *npages, int *page_shift, int *ncont,
|
|
@@ -904,7 +899,8 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
|
|
|
|
|
+static struct mlx5_ib_mr *alloc_mr_from_cache(
|
|
|
|
+ struct ib_pd *pd, struct ib_umem *umem,
|
|
u64 virt_addr, u64 len, int npages,
|
|
u64 virt_addr, u64 len, int npages,
|
|
int page_shift, int order, int access_flags)
|
|
int page_shift, int order, int access_flags)
|
|
{
|
|
{
|
|
@@ -936,16 +932,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
|
mr->mmkey.size = len;
|
|
mr->mmkey.size = len;
|
|
mr->mmkey.pd = to_mpd(pd)->pdn;
|
|
mr->mmkey.pd = to_mpd(pd)->pdn;
|
|
|
|
|
|
- err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
|
|
|
|
- MLX5_IB_UPD_XLT_ENABLE);
|
|
|
|
-
|
|
|
|
- if (err) {
|
|
|
|
- mlx5_mr_cache_free(dev, mr);
|
|
|
|
- return ERR_PTR(err);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- mr->live = 1;
|
|
|
|
-
|
|
|
|
return mr;
|
|
return mr;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1111,7 +1097,8 @@ free_xlt:
|
|
static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
u64 virt_addr, u64 length,
|
|
u64 virt_addr, u64 length,
|
|
struct ib_umem *umem, int npages,
|
|
struct ib_umem *umem, int npages,
|
|
- int page_shift, int access_flags)
|
|
|
|
|
|
+ int page_shift, int access_flags,
|
|
|
|
+ bool populate)
|
|
{
|
|
{
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
struct mlx5_ib_mr *mr;
|
|
struct mlx5_ib_mr *mr;
|
|
@@ -1126,15 +1113,19 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
if (!mr)
|
|
if (!mr)
|
|
return ERR_PTR(-ENOMEM);
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
|
|
|
|
- sizeof(*pas) * ((npages + 1) / 2) * 2;
|
|
|
|
|
|
+ mr->ibmr.pd = pd;
|
|
|
|
+ mr->access_flags = access_flags;
|
|
|
|
+
|
|
|
|
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
|
|
|
+ if (populate)
|
|
|
|
+ inlen += sizeof(*pas) * roundup(npages, 2);
|
|
in = kvzalloc(inlen, GFP_KERNEL);
|
|
in = kvzalloc(inlen, GFP_KERNEL);
|
|
if (!in) {
|
|
if (!in) {
|
|
err = -ENOMEM;
|
|
err = -ENOMEM;
|
|
goto err_1;
|
|
goto err_1;
|
|
}
|
|
}
|
|
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
|
|
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
|
|
- if (!(access_flags & IB_ACCESS_ON_DEMAND))
|
|
|
|
|
|
+ if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
|
|
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
|
|
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
|
|
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
|
|
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
|
|
|
|
|
|
@@ -1143,23 +1134,27 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
|
|
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
|
|
|
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
|
|
+ MLX5_SET(mkc, mkc, free, !populate);
|
|
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
|
|
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
|
|
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
|
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
|
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
|
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
|
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
|
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
|
MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
|
|
MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
|
|
MLX5_SET(mkc, mkc, lr, 1);
|
|
MLX5_SET(mkc, mkc, lr, 1);
|
|
|
|
+ MLX5_SET(mkc, mkc, umr_en, 1);
|
|
|
|
|
|
MLX5_SET64(mkc, mkc, start_addr, virt_addr);
|
|
MLX5_SET64(mkc, mkc, start_addr, virt_addr);
|
|
MLX5_SET64(mkc, mkc, len, length);
|
|
MLX5_SET64(mkc, mkc, len, length);
|
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
|
|
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
|
|
MLX5_SET(mkc, mkc, translations_octword_size,
|
|
MLX5_SET(mkc, mkc, translations_octword_size,
|
|
- get_octo_len(virt_addr, length, 1 << page_shift));
|
|
|
|
|
|
+ get_octo_len(virt_addr, length, page_shift));
|
|
MLX5_SET(mkc, mkc, log_page_size, page_shift);
|
|
MLX5_SET(mkc, mkc, log_page_size, page_shift);
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
|
|
|
|
- get_octo_len(virt_addr, length, 1 << page_shift));
|
|
|
|
|
|
+ if (populate) {
|
|
|
|
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
|
|
|
|
+ get_octo_len(virt_addr, length, page_shift));
|
|
|
|
+ }
|
|
|
|
|
|
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
|
|
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
|
|
if (err) {
|
|
if (err) {
|
|
@@ -1168,9 +1163,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
}
|
|
}
|
|
mr->mmkey.type = MLX5_MKEY_MR;
|
|
mr->mmkey.type = MLX5_MKEY_MR;
|
|
mr->desc_size = sizeof(struct mlx5_mtt);
|
|
mr->desc_size = sizeof(struct mlx5_mtt);
|
|
- mr->umem = umem;
|
|
|
|
mr->dev = dev;
|
|
mr->dev = dev;
|
|
- mr->live = 1;
|
|
|
|
kvfree(in);
|
|
kvfree(in);
|
|
|
|
|
|
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
|
|
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
|
|
@@ -1210,6 +1203,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
int ncont;
|
|
int ncont;
|
|
int order;
|
|
int order;
|
|
int err;
|
|
int err;
|
|
|
|
+ bool use_umr = true;
|
|
|
|
|
|
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
|
|
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
|
|
start, virt_addr, length, access_flags);
|
|
start, virt_addr, length, access_flags);
|
|
@@ -1228,27 +1222,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
|
|
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
|
|
&page_shift, &ncont, &order);
|
|
&page_shift, &ncont, &order);
|
|
|
|
|
|
- if (err < 0)
|
|
|
|
|
|
+ if (err < 0)
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
|
|
|
|
- if (use_umr(dev, order)) {
|
|
|
|
- mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
|
|
|
|
- order, access_flags);
|
|
|
|
|
|
+ if (order <= mr_cache_max_order(dev)) {
|
|
|
|
+ mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
|
|
|
+ page_shift, order, access_flags);
|
|
if (PTR_ERR(mr) == -EAGAIN) {
|
|
if (PTR_ERR(mr) == -EAGAIN) {
|
|
mlx5_ib_dbg(dev, "cache empty for order %d", order);
|
|
mlx5_ib_dbg(dev, "cache empty for order %d", order);
|
|
mr = NULL;
|
|
mr = NULL;
|
|
}
|
|
}
|
|
- } else if (access_flags & IB_ACCESS_ON_DEMAND &&
|
|
|
|
- !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
|
|
|
- err = -EINVAL;
|
|
|
|
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
|
|
|
|
- goto error;
|
|
|
|
|
|
+ } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
|
|
|
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
|
|
|
|
+ err = -EINVAL;
|
|
|
|
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
|
|
|
|
+ goto error;
|
|
|
|
+ }
|
|
|
|
+ use_umr = false;
|
|
}
|
|
}
|
|
|
|
|
|
if (!mr) {
|
|
if (!mr) {
|
|
mutex_lock(&dev->slow_path_mutex);
|
|
mutex_lock(&dev->slow_path_mutex);
|
|
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
|
|
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
|
|
- page_shift, access_flags);
|
|
|
|
|
|
+ page_shift, access_flags, !use_umr);
|
|
mutex_unlock(&dev->slow_path_mutex);
|
|
mutex_unlock(&dev->slow_path_mutex);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1266,8 +1262,22 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
update_odp_mr(mr);
|
|
update_odp_mr(mr);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- return &mr->ibmr;
|
|
|
|
|
|
+ if (use_umr) {
|
|
|
|
+ int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
|
|
|
|
+
|
|
|
|
+ if (access_flags & IB_ACCESS_ON_DEMAND)
|
|
|
|
+ update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
|
|
|
|
|
|
|
|
+ err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
|
|
|
|
+ update_xlt_flags);
|
|
|
|
+ if (err) {
|
|
|
|
+ mlx5_ib_dereg_mr(&mr->ibmr);
|
|
|
|
+ return ERR_PTR(err);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mr->live = 1;
|
|
|
|
+ return &mr->ibmr;
|
|
error:
|
|
error:
|
|
ib_umem_release(umem);
|
|
ib_umem_release(umem);
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
@@ -1355,7 +1365,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|
/*
|
|
/*
|
|
* UMR can't be used - MKey needs to be replaced.
|
|
* UMR can't be used - MKey needs to be replaced.
|
|
*/
|
|
*/
|
|
- if (mr->umred) {
|
|
|
|
|
|
+ if (mr->allocated_from_cache) {
|
|
err = unreg_umr(dev, mr);
|
|
err = unreg_umr(dev, mr);
|
|
if (err)
|
|
if (err)
|
|
mlx5_ib_warn(dev, "Failed to unregister MR\n");
|
|
mlx5_ib_warn(dev, "Failed to unregister MR\n");
|
|
@@ -1368,12 +1378,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|
return err;
|
|
return err;
|
|
|
|
|
|
mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
|
|
mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
|
|
- page_shift, access_flags);
|
|
|
|
|
|
+ page_shift, access_flags, true);
|
|
|
|
|
|
if (IS_ERR(mr))
|
|
if (IS_ERR(mr))
|
|
return PTR_ERR(mr);
|
|
return PTR_ERR(mr);
|
|
|
|
|
|
- mr->umred = 0;
|
|
|
|
|
|
+ mr->allocated_from_cache = 0;
|
|
|
|
+ mr->live = 1;
|
|
} else {
|
|
} else {
|
|
/*
|
|
/*
|
|
* Send a UMR WQE
|
|
* Send a UMR WQE
|
|
@@ -1461,7 +1472,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
|
|
static int clean_mr(struct mlx5_ib_mr *mr)
|
|
static int clean_mr(struct mlx5_ib_mr *mr)
|
|
{
|
|
{
|
|
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
|
|
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
|
|
- int umred = mr->umred;
|
|
|
|
|
|
+ int allocated_from_cache = mr->allocated_from_cache;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
if (mr->sig) {
|
|
if (mr->sig) {
|
|
@@ -1479,20 +1490,20 @@ static int clean_mr(struct mlx5_ib_mr *mr)
|
|
|
|
|
|
mlx5_free_priv_descs(mr);
|
|
mlx5_free_priv_descs(mr);
|
|
|
|
|
|
- if (!umred) {
|
|
|
|
|
|
+ if (!allocated_from_cache) {
|
|
|
|
+ u32 key = mr->mmkey.key;
|
|
|
|
+
|
|
err = destroy_mkey(dev, mr);
|
|
err = destroy_mkey(dev, mr);
|
|
|
|
+ kfree(mr);
|
|
if (err) {
|
|
if (err) {
|
|
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
|
|
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
|
|
- mr->mmkey.key, err);
|
|
|
|
|
|
+ key, err);
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
mlx5_mr_cache_free(dev, mr);
|
|
mlx5_mr_cache_free(dev, mr);
|
|
}
|
|
}
|
|
|
|
|
|
- if (!umred)
|
|
|
|
- kfree(mr);
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|