|
@@ -77,7 +77,7 @@ struct kvmgt_guest_info {
|
|
struct gvt_dma {
|
|
struct gvt_dma {
|
|
struct rb_node node;
|
|
struct rb_node node;
|
|
gfn_t gfn;
|
|
gfn_t gfn;
|
|
- kvm_pfn_t pfn;
|
|
|
|
|
|
+ unsigned long iova;
|
|
};
|
|
};
|
|
|
|
|
|
static inline bool handle_valid(unsigned long handle)
|
|
static inline bool handle_valid(unsigned long handle)
|
|
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
|
|
static void intel_vgpu_release_work(struct work_struct *work);
|
|
static void intel_vgpu_release_work(struct work_struct *work);
|
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
|
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
|
|
|
|
|
|
|
|
+static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
|
|
|
|
+ unsigned long *iova)
|
|
|
|
+{
|
|
|
|
+ struct page *page;
|
|
|
|
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
|
|
|
+ dma_addr_t daddr;
|
|
|
|
+
|
|
|
|
+ page = pfn_to_page(pfn);
|
|
|
|
+ if (is_error_page(page))
|
|
|
|
+ return -EFAULT;
|
|
|
|
+
|
|
|
|
+ daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
|
|
|
|
+ PCI_DMA_BIDIRECTIONAL);
|
|
|
|
+ if (dma_mapping_error(dev, daddr))
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ *iova = (unsigned long)(daddr >> PAGE_SHIFT);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
|
|
|
|
+{
|
|
|
|
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
|
|
|
+ dma_addr_t daddr;
|
|
|
|
+
|
|
|
|
+ daddr = (dma_addr_t)(iova << PAGE_SHIFT);
|
|
|
|
+ dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
+}
|
|
|
|
+
|
|
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
|
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
|
{
|
|
{
|
|
struct rb_node *node = vgpu->vdev.cache.rb_node;
|
|
struct rb_node *node = vgpu->vdev.cache.rb_node;
|
|
@@ -111,21 +140,22 @@ out:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
|
|
|
|
|
+static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
|
{
|
|
{
|
|
struct gvt_dma *entry;
|
|
struct gvt_dma *entry;
|
|
- kvm_pfn_t pfn;
|
|
|
|
|
|
+ unsigned long iova;
|
|
|
|
|
|
mutex_lock(&vgpu->vdev.cache_lock);
|
|
mutex_lock(&vgpu->vdev.cache_lock);
|
|
|
|
|
|
entry = __gvt_cache_find(vgpu, gfn);
|
|
entry = __gvt_cache_find(vgpu, gfn);
|
|
- pfn = (entry == NULL) ? 0 : entry->pfn;
|
|
|
|
|
|
+ iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
|
|
|
|
|
|
mutex_unlock(&vgpu->vdev.cache_lock);
|
|
mutex_unlock(&vgpu->vdev.cache_lock);
|
|
- return pfn;
|
|
|
|
|
|
+ return iova;
|
|
}
|
|
}
|
|
|
|
|
|
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
|
|
|
|
|
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
|
|
|
+ unsigned long iova)
|
|
{
|
|
{
|
|
struct gvt_dma *new, *itr;
|
|
struct gvt_dma *new, *itr;
|
|
struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
|
|
struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
|
|
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
|
return;
|
|
return;
|
|
|
|
|
|
new->gfn = gfn;
|
|
new->gfn = gfn;
|
|
- new->pfn = pfn;
|
|
|
|
|
|
+ new->iova = iova;
|
|
|
|
|
|
mutex_lock(&vgpu->vdev.cache_lock);
|
|
mutex_lock(&vgpu->vdev.cache_lock);
|
|
while (*link) {
|
|
while (*link) {
|
|
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
|
|
}
|
|
}
|
|
|
|
|
|
g1 = gfn;
|
|
g1 = gfn;
|
|
|
|
+ gvt_dma_unmap_iova(vgpu, this->iova);
|
|
rc = vfio_unpin_pages(dev, &g1, 1);
|
|
rc = vfio_unpin_pages(dev, &g1, 1);
|
|
WARN_ON(rc != 1);
|
|
WARN_ON(rc != 1);
|
|
__gvt_cache_remove_entry(vgpu, this);
|
|
__gvt_cache_remove_entry(vgpu, this);
|
|
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
|
|
mutex_lock(&vgpu->vdev.cache_lock);
|
|
mutex_lock(&vgpu->vdev.cache_lock);
|
|
while ((node = rb_first(&vgpu->vdev.cache))) {
|
|
while ((node = rb_first(&vgpu->vdev.cache))) {
|
|
dma = rb_entry(node, struct gvt_dma, node);
|
|
dma = rb_entry(node, struct gvt_dma, node);
|
|
|
|
+ gvt_dma_unmap_iova(vgpu, dma->iova);
|
|
gfn = dma->gfn;
|
|
gfn = dma->gfn;
|
|
|
|
|
|
vfio_unpin_pages(dev, &gfn, 1);
|
|
vfio_unpin_pages(dev, &gfn, 1);
|
|
@@ -1348,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
|
|
|
|
|
|
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
|
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
|
{
|
|
{
|
|
- unsigned long pfn;
|
|
|
|
|
|
+ unsigned long iova, pfn;
|
|
struct kvmgt_guest_info *info;
|
|
struct kvmgt_guest_info *info;
|
|
struct device *dev;
|
|
struct device *dev;
|
|
int rc;
|
|
int rc;
|
|
@@ -1357,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
|
return INTEL_GVT_INVALID_ADDR;
|
|
return INTEL_GVT_INVALID_ADDR;
|
|
|
|
|
|
info = (struct kvmgt_guest_info *)handle;
|
|
info = (struct kvmgt_guest_info *)handle;
|
|
- pfn = gvt_cache_find(info->vgpu, gfn);
|
|
|
|
- if (pfn != 0)
|
|
|
|
- return pfn;
|
|
|
|
|
|
+ iova = gvt_cache_find(info->vgpu, gfn);
|
|
|
|
+ if (iova != INTEL_GVT_INVALID_ADDR)
|
|
|
|
+ return iova;
|
|
|
|
|
|
pfn = INTEL_GVT_INVALID_ADDR;
|
|
pfn = INTEL_GVT_INVALID_ADDR;
|
|
dev = mdev_dev(info->vgpu->vdev.mdev);
|
|
dev = mdev_dev(info->vgpu->vdev.mdev);
|
|
@@ -1368,9 +1400,13 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
|
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
|
|
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
|
|
return INTEL_GVT_INVALID_ADDR;
|
|
return INTEL_GVT_INVALID_ADDR;
|
|
}
|
|
}
|
|
|
|
+ /* transfer to host iova for GFX to use DMA */
|
|
|
|
+ rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
|
|
|
|
+ if (rc)
|
|
|
|
+ gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
|
|
|
|
|
|
- gvt_cache_add(info->vgpu, gfn, pfn);
|
|
|
|
- return pfn;
|
|
|
|
|
|
+ gvt_cache_add(info->vgpu, gfn, iova);
|
|
|
|
+ return iova;
|
|
}
|
|
}
|
|
|
|
|
|
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|
|
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|