|
@@ -114,12 +114,15 @@ out:
|
|
|
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
|
|
{
|
|
|
struct gvt_dma *entry;
|
|
|
+ kvm_pfn_t pfn;
|
|
|
|
|
|
mutex_lock(&vgpu->vdev.cache_lock);
|
|
|
+
|
|
|
entry = __gvt_cache_find(vgpu, gfn);
|
|
|
- mutex_unlock(&vgpu->vdev.cache_lock);
|
|
|
+ pfn = (entry == NULL) ? 0 : entry->pfn;
|
|
|
|
|
|
- return entry == NULL ? 0 : entry->pfn;
|
|
|
+ mutex_unlock(&vgpu->vdev.cache_lock);
|
|
|
+ return pfn;
|
|
|
}
|
|
|
|
|
|
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
|
@@ -497,7 +500,16 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
|
|
goto undo_iommu;
|
|
|
}
|
|
|
|
|
|
- return kvmgt_guest_init(mdev);
|
|
|
+ ret = kvmgt_guest_init(mdev);
|
|
|
+ if (ret)
|
|
|
+ goto undo_group;
|
|
|
+
|
|
|
+ atomic_set(&vgpu->vdev.released, 0);
|
|
|
+ return ret;
|
|
|
+
|
|
|
+undo_group:
|
|
|
+ vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY,
|
|
|
+ &vgpu->vdev.group_notifier);
|
|
|
|
|
|
undo_iommu:
|
|
|
vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
|
|
@@ -509,17 +521,26 @@ out:
|
|
|
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
|
|
{
|
|
|
struct kvmgt_guest_info *info;
|
|
|
+ int ret;
|
|
|
|
|
|
if (!handle_valid(vgpu->handle))
|
|
|
return;
|
|
|
|
|
|
- vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
|
|
|
+ if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
|
|
+ return;
|
|
|
+
|
|
|
+ ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
|
|
|
&vgpu->vdev.iommu_notifier);
|
|
|
- vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
|
|
|
+ WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
|
|
|
+
|
|
|
+ ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
|
|
|
&vgpu->vdev.group_notifier);
|
|
|
+ WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
|
|
|
|
|
|
info = (struct kvmgt_guest_info *)vgpu->handle;
|
|
|
kvmgt_guest_exit(info);
|
|
|
+
|
|
|
+ vgpu->vdev.kvm = NULL;
|
|
|
vgpu->handle = 0;
|
|
|
}
|
|
|
|
|
@@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
|
|
|
{
|
|
|
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
|
|
|
vdev.release_work);
|
|
|
+
|
|
|
__intel_vgpu_release(vgpu);
|
|
|
}
|
|
|
|
|
@@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
|
|
|
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
+ if (!slot) {
|
|
|
+ srcu_read_unlock(&kvm->srcu, idx);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
|
@@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
|
|
|
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
+ if (!slot) {
|
|
|
+ srcu_read_unlock(&kvm->srcu, idx);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
|
@@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
|
|
|
|
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
|
|
{
|
|
|
- struct intel_vgpu *vgpu;
|
|
|
-
|
|
|
if (!info) {
|
|
|
gvt_err("kvmgt_guest_info invalid\n");
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- vgpu = info->vgpu;
|
|
|
-
|
|
|
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
|
|
kvmgt_protect_table_destroy(info);
|
|
|
- gvt_cache_destroy(vgpu);
|
|
|
+ gvt_cache_destroy(info->vgpu);
|
|
|
vfree(info);
|
|
|
|
|
|
return true;
|