|
@@ -57,7 +57,7 @@ static int clean_mr(struct mlx5_ib_mr *mr);
|
|
|
|
|
|
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
|
{
|
|
|
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
|
|
|
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
|
|
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
/* Wait until all page fault handlers using the mr complete. */
|
|
@@ -120,7 +120,7 @@ static void reg_mr_callback(int status, void *context)
|
|
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
|
|
u8 key;
|
|
|
unsigned long flags;
|
|
|
- struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
|
|
|
+ struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
|
|
|
int err;
|
|
|
|
|
|
spin_lock_irqsave(&ent->lock, flags);
|
|
@@ -147,7 +147,7 @@ static void reg_mr_callback(int status, void *context)
|
|
|
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
|
|
|
key = dev->mdev->priv.mkey_key++;
|
|
|
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
|
|
|
- mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
|
|
|
+ mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
|
|
|
|
|
|
cache->last_add = jiffies;
|
|
|
|
|
@@ -158,10 +158,10 @@ static void reg_mr_callback(int status, void *context)
|
|
|
spin_unlock_irqrestore(&ent->lock, flags);
|
|
|
|
|
|
write_lock_irqsave(&table->lock, flags);
|
|
|
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
|
|
|
- &mr->mmr);
|
|
|
+ err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
|
|
|
+ &mr->mmkey);
|
|
|
if (err)
|
|
|
- pr_err("Error inserting to mr tree. 0x%x\n", -err);
|
|
|
+ pr_err("Error inserting to mkey tree. 0x%x\n", -err);
|
|
|
write_unlock_irqrestore(&table->lock, flags);
|
|
|
}
|
|
|
|
|
@@ -202,7 +202,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|
|
spin_lock_irq(&ent->lock);
|
|
|
ent->pending++;
|
|
|
spin_unlock_irq(&ent->lock);
|
|
|
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
|
|
|
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
|
|
|
sizeof(*in), reg_mr_callback,
|
|
|
mr, &mr->out);
|
|
|
if (err) {
|
|
@@ -691,14 +691,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
|
|
|
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
|
|
|
seg->start_addr = 0;
|
|
|
|
|
|
- err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
|
|
|
+ err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
|
|
|
NULL);
|
|
|
if (err)
|
|
|
goto err_in;
|
|
|
|
|
|
kfree(in);
|
|
|
- mr->ibmr.lkey = mr->mmr.key;
|
|
|
- mr->ibmr.rkey = mr->mmr.key;
|
|
|
+ mr->ibmr.lkey = mr->mmkey.key;
|
|
|
+ mr->ibmr.rkey = mr->mmkey.key;
|
|
|
mr->umem = NULL;
|
|
|
|
|
|
return &mr->ibmr;
|
|
@@ -897,7 +897,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
|
|
|
|
|
memset(&umrwr, 0, sizeof(umrwr));
|
|
|
umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
|
|
|
- prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
|
|
|
+ prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
|
|
|
page_shift, virt_addr, len, access_flags);
|
|
|
|
|
|
mlx5_ib_init_umr_context(&umr_context);
|
|
@@ -914,9 +914,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- mr->mmr.iova = virt_addr;
|
|
|
- mr->mmr.size = len;
|
|
|
- mr->mmr.pd = to_mpd(pd)->pdn;
|
|
|
+ mr->mmkey.iova = virt_addr;
|
|
|
+ mr->mmkey.size = len;
|
|
|
+ mr->mmkey.pd = to_mpd(pd)->pdn;
|
|
|
|
|
|
mr->live = 1;
|
|
|
|
|
@@ -1027,7 +1027,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
|
|
|
wr.wr.opcode = MLX5_IB_WR_UMR;
|
|
|
wr.npages = sg.length / sizeof(u64);
|
|
|
wr.page_shift = PAGE_SHIFT;
|
|
|
- wr.mkey = mr->mmr.key;
|
|
|
+ wr.mkey = mr->mmkey.key;
|
|
|
wr.target.offset = start_page_index;
|
|
|
|
|
|
mlx5_ib_init_umr_context(&umr_context);
|
|
@@ -1100,7 +1100,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
|
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
|
|
|
in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
|
|
|
1 << page_shift));
|
|
|
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
|
|
|
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
|
|
|
NULL, NULL);
|
|
|
if (err) {
|
|
|
mlx5_ib_warn(dev, "create mkey failed\n");
|
|
@@ -1111,7 +1111,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
|
mr->live = 1;
|
|
|
kvfree(in);
|
|
|
|
|
|
- mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
|
|
|
+ mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
|
|
|
|
|
|
return mr;
|
|
|
|
|
@@ -1130,8 +1130,8 @@ static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
|
|
{
|
|
|
mr->npages = npages;
|
|
|
atomic_add(npages, &dev->mdev->priv.reg_pages);
|
|
|
- mr->ibmr.lkey = mr->mmr.key;
|
|
|
- mr->ibmr.rkey = mr->mmr.key;
|
|
|
+ mr->ibmr.lkey = mr->mmkey.key;
|
|
|
+ mr->ibmr.rkey = mr->mmkey.key;
|
|
|
mr->ibmr.length = length;
|
|
|
mr->access_flags = access_flags;
|
|
|
}
|
|
@@ -1179,7 +1179,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
|
goto error;
|
|
|
}
|
|
|
|
|
|
- mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
|
|
|
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
|
|
|
|
|
|
mr->umem = umem;
|
|
|
set_mr_fileds(dev, mr, npages, length, access_flags);
|
|
@@ -1205,7 +1205,7 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
|
|
|
|
memset(&umrwr.wr, 0, sizeof(umrwr));
|
|
|
umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
|
|
|
- prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
|
|
|
+ prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
|
|
|
|
|
|
mlx5_ib_init_umr_context(&umr_context);
|
|
|
down(&umrc->sem);
|
|
@@ -1259,7 +1259,7 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
|
|
|
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
|
|
|
}
|
|
|
|
|
|
- prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
|
|
|
+ prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
|
|
|
page_shift);
|
|
|
|
|
|
if (flags & IB_MR_REREG_PD) {
|
|
@@ -1371,7 +1371,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|
|
|
|
|
if (flags & IB_MR_REREG_PD) {
|
|
|
ib_mr->pd = pd;
|
|
|
- mr->mmr.pd = to_mpd(pd)->pdn;
|
|
|
+ mr->mmkey.pd = to_mpd(pd)->pdn;
|
|
|
}
|
|
|
|
|
|
if (flags & IB_MR_REREG_ACCESS)
|
|
@@ -1380,8 +1380,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|
|
if (flags & IB_MR_REREG_TRANS) {
|
|
|
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
|
|
|
set_mr_fileds(dev, mr, npages, len, access_flags);
|
|
|
- mr->mmr.iova = addr;
|
|
|
- mr->mmr.size = len;
|
|
|
+ mr->mmkey.iova = addr;
|
|
|
+ mr->mmkey.size = len;
|
|
|
}
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
update_odp_mr(mr);
|
|
@@ -1461,7 +1461,7 @@ static int clean_mr(struct mlx5_ib_mr *mr)
|
|
|
err = destroy_mkey(dev, mr);
|
|
|
if (err) {
|
|
|
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
|
|
|
- mr->mmr.key, err);
|
|
|
+ mr->mmkey.key, err);
|
|
|
return err;
|
|
|
}
|
|
|
} else {
|
|
@@ -1587,13 +1587,13 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
|
|
}
|
|
|
|
|
|
in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
|
|
|
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
|
|
|
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
|
|
|
NULL, NULL, NULL);
|
|
|
if (err)
|
|
|
goto err_destroy_psv;
|
|
|
|
|
|
- mr->ibmr.lkey = mr->mmr.key;
|
|
|
- mr->ibmr.rkey = mr->mmr.key;
|
|
|
+ mr->ibmr.lkey = mr->mmkey.key;
|
|
|
+ mr->ibmr.rkey = mr->mmkey.key;
|
|
|
mr->umem = NULL;
|
|
|
kfree(in);
|
|
|
|