Browse Source

IB/mlx5: Serialize access to the VMA list

User-space applications can do mmap and munmap directly at
any time.

Since the VMA list is not protected with a mutex, concurrent
accesses to the VMA list from the mmap and munmap can cause
data corruption. Add a mutex around the list.

Cc: <stable@vger.kernel.org> # v4.7
Fixes: 7c2344c3bbf9 ("IB/mlx5: Implements disassociate_ucontext API")
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Majd Dibbiny <majd@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Majd Dibbiny 7 năm trước cách đây
mục cha
commit
ad9a3668a4

+ 8 - 0
drivers/infiniband/hw/mlx5/main.c

@@ -1463,6 +1463,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 	}
 	}
 
 
 	INIT_LIST_HEAD(&context->vma_private_list);
 	INIT_LIST_HEAD(&context->vma_private_list);
+	mutex_init(&context->vma_private_list_mutex);
 	INIT_LIST_HEAD(&context->db_page_list);
 	INIT_LIST_HEAD(&context->db_page_list);
 	mutex_init(&context->db_page_mutex);
 	mutex_init(&context->db_page_mutex);
 
 
@@ -1624,7 +1625,9 @@ static void  mlx5_ib_vma_close(struct vm_area_struct *area)
 	 * mlx5_ib_disassociate_ucontext().
 	 * mlx5_ib_disassociate_ucontext().
 	 */
 	 */
 	mlx5_ib_vma_priv_data->vma = NULL;
 	mlx5_ib_vma_priv_data->vma = NULL;
+	mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
 	list_del(&mlx5_ib_vma_priv_data->list);
 	list_del(&mlx5_ib_vma_priv_data->list);
+	mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
 	kfree(mlx5_ib_vma_priv_data);
 	kfree(mlx5_ib_vma_priv_data);
 }
 }
 
 
@@ -1644,10 +1647,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	vma_prv->vma = vma;
 	vma_prv->vma = vma;
+	vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
 	vma->vm_private_data = vma_prv;
 	vma->vm_private_data = vma_prv;
 	vma->vm_ops =  &mlx5_ib_vm_ops;
 	vma->vm_ops =  &mlx5_ib_vm_ops;
 
 
+	mutex_lock(&ctx->vma_private_list_mutex);
 	list_add(&vma_prv->list, vma_head);
 	list_add(&vma_prv->list, vma_head);
+	mutex_unlock(&ctx->vma_private_list_mutex);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1690,6 +1696,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
 	 * mlx5_ib_vma_close.
 	 * mlx5_ib_vma_close.
 	 */
 	 */
 	down_write(&owning_mm->mmap_sem);
 	down_write(&owning_mm->mmap_sem);
+	mutex_lock(&context->vma_private_list_mutex);
 	list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
 	list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
 				 list) {
 				 list) {
 		vma = vma_private->vma;
 		vma = vma_private->vma;
@@ -1704,6 +1711,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
 		list_del(&vma_private->list);
 		list_del(&vma_private->list);
 		kfree(vma_private);
 		kfree(vma_private);
 	}
 	}
+	mutex_unlock(&context->vma_private_list_mutex);
 	up_write(&owning_mm->mmap_sem);
 	up_write(&owning_mm->mmap_sem);
 	mmput(owning_mm);
 	mmput(owning_mm);
 	put_task_struct(owning_process);
 	put_task_struct(owning_process);

+ 4 - 0
drivers/infiniband/hw/mlx5/mlx5_ib.h

@@ -115,6 +115,8 @@ enum {
 struct mlx5_ib_vma_private_data {
 struct mlx5_ib_vma_private_data {
 	struct list_head list;
 	struct list_head list;
 	struct vm_area_struct *vma;
 	struct vm_area_struct *vma;
+	/* protect vma_private_list add/del */
+	struct mutex *vma_private_list_mutex;
 };
 };
 
 
 struct mlx5_ib_ucontext {
 struct mlx5_ib_ucontext {
@@ -129,6 +131,8 @@ struct mlx5_ib_ucontext {
 	/* Transport Domain number */
 	/* Transport Domain number */
 	u32			tdn;
 	u32			tdn;
 	struct list_head	vma_private_list;
 	struct list_head	vma_private_list;
+	/* protect vma_private_list add/del */
+	struct mutex		vma_private_list_mutex;
 
 
 	unsigned long		upd_xlt_page;
 	unsigned long		upd_xlt_page;
 	/* protect ODP/KSM */
 	/* protect ODP/KSM */