|
@@ -396,13 +396,13 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
|
|
dma_addr_t iova, size_t size)
|
|
|
{
|
|
|
struct iova_domain *iovad = &cookie->iovad;
|
|
|
- unsigned long shift = iova_shift(iovad);
|
|
|
|
|
|
/* The MSI case is only ever cleaning up its most recent allocation */
|
|
|
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
|
|
|
cookie->msi_iova -= size;
|
|
|
else
|
|
|
- free_iova_fast(iovad, iova >> shift, size >> shift);
|
|
|
+ free_iova_fast(iovad, iova_pfn(iovad, iova),
|
|
|
+ size >> iova_shift(iovad));
|
|
|
}
|
|
|
|
|
|
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
|
|
@@ -617,11 +617,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
|
|
{
|
|
|
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
|
|
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
|
|
- struct iova_domain *iovad = &cookie->iovad;
|
|
|
- size_t iova_off = iova_offset(iovad, phys);
|
|
|
+ size_t iova_off = 0;
|
|
|
dma_addr_t iova;
|
|
|
|
|
|
- size = iova_align(iovad, size + iova_off);
|
|
|
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
|
|
|
+ iova_off = iova_offset(&cookie->iovad, phys);
|
|
|
+ size = iova_align(&cookie->iovad, size + iova_off);
|
|
|
+ }
|
|
|
+
|
|
|
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
|
|
if (!iova)
|
|
|
return DMA_ERROR_CODE;
|