|
@@ -147,6 +147,67 @@ static void tce_iommu_release(void *iommu_data)
|
|
|
kfree(container);
|
|
|
}
|
|
|
|
|
|
+static int tce_iommu_clear(struct tce_container *container,
|
|
|
+ struct iommu_table *tbl,
|
|
|
+ unsigned long entry, unsigned long pages)
|
|
|
+{
|
|
|
+ unsigned long oldtce;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ for ( ; pages; --pages, ++entry) {
|
|
|
+ oldtce = iommu_clear_tce(tbl, entry);
|
|
|
+ if (!oldtce)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ page = pfn_to_page(oldtce >> PAGE_SHIFT);
|
|
|
+ WARN_ON(!page);
|
|
|
+ if (page) {
|
|
|
+ if (oldtce & TCE_PCI_WRITE)
|
|
|
+ SetPageDirty(page);
|
|
|
+ put_page(page);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static long tce_iommu_build(struct tce_container *container,
|
|
|
+ struct iommu_table *tbl,
|
|
|
+ unsigned long entry, unsigned long tce, unsigned long pages)
|
|
|
+{
|
|
|
+ long i, ret = 0;
|
|
|
+ struct page *page = NULL;
|
|
|
+ unsigned long hva;
|
|
|
+ enum dma_data_direction direction = iommu_tce_direction(tce);
|
|
|
+
|
|
|
+ for (i = 0; i < pages; ++i) {
|
|
|
+ unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
|
|
|
+
|
|
|
+ ret = get_user_pages_fast(tce & PAGE_MASK, 1,
|
|
|
+ direction != DMA_TO_DEVICE, &page);
|
|
|
+ if (unlikely(ret != 1)) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ hva = (unsigned long) page_address(page) + offset;
|
|
|
+
|
|
|
+ ret = iommu_tce_build(tbl, entry + i, hva, direction);
|
|
|
+ if (ret) {
|
|
|
+ put_page(page);
|
|
|
+ pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
|
|
|
+ __func__, entry << tbl->it_page_shift,
|
|
|
+ tce, ret);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ tce += IOMMU_PAGE_SIZE_4K;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ tce_iommu_clear(container, tbl, entry, i);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static long tce_iommu_ioctl(void *iommu_data,
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
{
|
|
@@ -195,7 +256,7 @@ static long tce_iommu_ioctl(void *iommu_data,
|
|
|
case VFIO_IOMMU_MAP_DMA: {
|
|
|
struct vfio_iommu_type1_dma_map param;
|
|
|
struct iommu_table *tbl = container->tbl;
|
|
|
- unsigned long tce, i;
|
|
|
+ unsigned long tce;
|
|
|
|
|
|
if (!tbl)
|
|
|
return -ENXIO;
|
|
@@ -229,17 +290,9 @@ static long tce_iommu_ioctl(void *iommu_data,
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
|
|
|
- ret = iommu_put_tce_user_mode(tbl,
|
|
|
- (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
|
|
|
- tce);
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
- tce += IOMMU_PAGE_SIZE_4K;
|
|
|
- }
|
|
|
- if (ret)
|
|
|
- iommu_clear_tces_and_put_pages(tbl,
|
|
|
- param.iova >> IOMMU_PAGE_SHIFT_4K, i);
|
|
|
+ ret = tce_iommu_build(container, tbl,
|
|
|
+ param.iova >> IOMMU_PAGE_SHIFT_4K,
|
|
|
+ tce, param.size >> IOMMU_PAGE_SHIFT_4K);
|
|
|
|
|
|
iommu_flush_tce(tbl);
|
|
|
|
|
@@ -273,7 +326,7 @@ static long tce_iommu_ioctl(void *iommu_data,
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- ret = iommu_clear_tces_and_put_pages(tbl,
|
|
|
+ ret = tce_iommu_clear(container, tbl,
|
|
|
param.iova >> IOMMU_PAGE_SHIFT_4K,
|
|
|
param.size >> IOMMU_PAGE_SHIFT_4K);
|
|
|
iommu_flush_tce(tbl);
|
|
@@ -357,6 +410,7 @@ static void tce_iommu_detach_group(void *iommu_data,
|
|
|
/* pr_debug("tce_vfio: detaching group #%u from iommu %p\n",
|
|
|
iommu_group_id(iommu_group), iommu_group); */
|
|
|
container->tbl = NULL;
|
|
|
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
|
|
|
iommu_release_ownership(tbl);
|
|
|
}
|
|
|
mutex_unlock(&container->lock);
|