|
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
|
int shrink = 0;
|
|
|
int c;
|
|
|
|
|
|
+ if (!mr->allocated_from_cache)
|
|
|
+ return;
|
|
|
+
|
|
|
c = order2idx(dev, mr->order);
|
|
|
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
|
|
|
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
|
|
@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
|
umem = NULL;
|
|
|
}
|
|
|
#endif
|
|
|
-
|
|
|
clean_mr(dev, mr);
|
|
|
|
|
|
+ /*
|
|
|
+ * We should unregister the DMA address from the HCA before
|
|
|
+ * remove the DMA mapping.
|
|
|
+ */
|
|
|
+ mlx5_mr_cache_free(dev, mr);
|
|
|
if (umem) {
|
|
|
ib_umem_release(umem);
|
|
|
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
|
|
}
|
|
|
-
|
|
|
if (!mr->allocated_from_cache)
|
|
|
kfree(mr);
|
|
|
- else
|
|
|
- mlx5_mr_cache_free(dev, mr);
|
|
|
}
|
|
|
|
|
|
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
|