|
@@ -423,6 +423,34 @@ static void vfio_group_put(struct vfio_group *group)
|
|
|
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
|
|
|
}
|
|
|
|
|
|
+struct vfio_group_put_work {
|
|
|
+ struct work_struct work;
|
|
|
+ struct vfio_group *group;
|
|
|
+};
|
|
|
+
|
|
|
+static void vfio_group_put_bg(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct vfio_group_put_work *do_work;
|
|
|
+
|
|
|
+ do_work = container_of(work, struct vfio_group_put_work, work);
|
|
|
+
|
|
|
+ vfio_group_put(do_work->group);
|
|
|
+ kfree(do_work);
|
|
|
+}
|
|
|
+
|
|
|
+static void vfio_group_schedule_put(struct vfio_group *group)
|
|
|
+{
|
|
|
+ struct vfio_group_put_work *do_work;
|
|
|
+
|
|
|
+ do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
|
|
|
+ if (WARN_ON(!do_work))
|
|
|
+ return;
|
|
|
+
|
|
|
+ INIT_WORK(&do_work->work, vfio_group_put_bg);
|
|
|
+ do_work->group = group;
|
|
|
+ schedule_work(&do_work->work);
|
|
|
+}
|
|
|
+
|
|
|
/* Assume group_lock or group reference is held */
|
|
|
static void vfio_group_get(struct vfio_group *group)
|
|
|
{
|
|
@@ -762,7 +790,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- vfio_group_put(group);
|
|
|
+ /*
|
|
|
+ * If we're the last reference to the group, the group will be
|
|
|
+ * released, which includes unregistering the iommu group notifier.
|
|
|
+ * We hold a read-lock on that notifier list, unregistering needs
|
|
|
+ * a write-lock... deadlock. Release our reference asynchronously
|
|
|
+ * to avoid that situation.
|
|
|
+ */
|
|
|
+ vfio_group_schedule_put(group);
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|