浏览代码

net: vhost: lock the vqs one by one

This patch changes the way that lock all vqs
at the same, to lock them one by one. It will
be used for next patch to avoid the deadlock.

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Tonghao Zhang 6 年之前
父节点
当前提交
78139c94dc
共有 1 个文件被更改,包括 7 次插入17 次删除
  1. 7 17
      drivers/vhost/vhost.c

+ 7 - 17
drivers/vhost/vhost.c

@@ -294,8 +294,11 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
 {
 {
 	int i;
 	int i;
 
 
-	for (i = 0; i < d->nvqs; ++i)
+	for (i = 0; i < d->nvqs; ++i) {
+		mutex_lock(&d->vqs[i]->mutex);
 		__vhost_vq_meta_reset(d->vqs[i]);
 		__vhost_vq_meta_reset(d->vqs[i]);
+		mutex_unlock(&d->vqs[i]->mutex);
+	}
 }
 }
 
 
 static void vhost_vq_reset(struct vhost_dev *dev,
 static void vhost_vq_reset(struct vhost_dev *dev,
@@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 #define vhost_get_used(vq, x, ptr) \
 #define vhost_get_used(vq, x, ptr) \
 	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 
 
-static void vhost_dev_lock_vqs(struct vhost_dev *d)
-{
-	int i = 0;
-	for (i = 0; i < d->nvqs; ++i)
-		mutex_lock_nested(&d->vqs[i]->mutex, i);
-}
-
-static void vhost_dev_unlock_vqs(struct vhost_dev *d)
-{
-	int i = 0;
-	for (i = 0; i < d->nvqs; ++i)
-		mutex_unlock(&d->vqs[i]->mutex);
-}
-
 static int vhost_new_umem_range(struct vhost_umem *umem,
 static int vhost_new_umem_range(struct vhost_umem *umem,
 				u64 start, u64 size, u64 end,
 				u64 start, u64 size, u64 end,
 				u64 userspace_addr, int perm)
 				u64 userspace_addr, int perm)
@@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
 		if (msg->iova <= vq_msg->iova &&
 		if (msg->iova <= vq_msg->iova &&
 		    msg->iova + msg->size - 1 >= vq_msg->iova &&
 		    msg->iova + msg->size - 1 >= vq_msg->iova &&
 		    vq_msg->type == VHOST_IOTLB_MISS) {
 		    vq_msg->type == VHOST_IOTLB_MISS) {
+			mutex_lock(&node->vq->mutex);
 			vhost_poll_queue(&node->vq->poll);
 			vhost_poll_queue(&node->vq->poll);
+			mutex_unlock(&node->vq->mutex);
+
 			list_del(&node->node);
 			list_del(&node->node);
 			kfree(node);
 			kfree(node);
 		}
 		}
@@ -986,7 +978,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
 	int ret = 0;
 	int ret = 0;
 
 
 	mutex_lock(&dev->mutex);
 	mutex_lock(&dev->mutex);
-	vhost_dev_lock_vqs(dev);
 	switch (msg->type) {
 	switch (msg->type) {
 	case VHOST_IOTLB_UPDATE:
 	case VHOST_IOTLB_UPDATE:
 		if (!dev->iotlb) {
 		if (!dev->iotlb) {
@@ -1020,7 +1011,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
 		break;
 		break;
 	}
 	}
 
 
-	vhost_dev_unlock_vqs(dev);
 	mutex_unlock(&dev->mutex);
 	mutex_unlock(&dev->mutex);
 
 
 	return ret;
 	return ret;