|
@@ -549,6 +549,14 @@ static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
|
|
|
DOMAIN_FLAG_STATIC_IDENTITY);
|
|
|
}
|
|
|
|
|
|
+static inline int domain_pfn_supported(struct dmar_domain *domain,
|
|
|
+ unsigned long pfn)
|
|
|
+{
|
|
|
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
+
|
|
|
+ return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
|
|
|
+}
|
|
|
+
|
|
|
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
|
|
|
{
|
|
|
unsigned long sagaw;
|
|
@@ -822,14 +830,13 @@ out:
|
|
|
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
|
|
|
unsigned long pfn, int *target_level)
|
|
|
{
|
|
|
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
struct dma_pte *parent, *pte = NULL;
|
|
|
int level = agaw_to_level(domain->agaw);
|
|
|
int offset;
|
|
|
|
|
|
BUG_ON(!domain->pgd);
|
|
|
|
|
|
- if (addr_width < BITS_PER_LONG && pfn >> addr_width)
|
|
|
+ if (!domain_pfn_supported(domain, pfn))
|
|
|
/* Address beyond IOMMU's addressing capabilities. */
|
|
|
return NULL;
|
|
|
|
|
@@ -912,12 +919,11 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
|
|
|
unsigned long start_pfn,
|
|
|
unsigned long last_pfn)
|
|
|
{
|
|
|
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
unsigned int large_page = 1;
|
|
|
struct dma_pte *first_pte, *pte;
|
|
|
|
|
|
- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
|
|
|
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
|
|
|
+ BUG_ON(!domain_pfn_supported(domain, start_pfn));
|
|
|
+ BUG_ON(!domain_pfn_supported(domain, last_pfn));
|
|
|
BUG_ON(start_pfn > last_pfn);
|
|
|
|
|
|
/* we don't need lock here; nobody else touches the iova range */
|
|
@@ -978,10 +984,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
|
|
|
unsigned long start_pfn,
|
|
|
unsigned long last_pfn)
|
|
|
{
|
|
|
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
-
|
|
|
- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
|
|
|
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
|
|
|
+ BUG_ON(!domain_pfn_supported(domain, start_pfn));
|
|
|
+ BUG_ON(!domain_pfn_supported(domain, last_pfn));
|
|
|
BUG_ON(start_pfn > last_pfn);
|
|
|
|
|
|
dma_pte_clear_range(domain, start_pfn, last_pfn);
|
|
@@ -1083,11 +1087,10 @@ struct page *domain_unmap(struct dmar_domain *domain,
|
|
|
unsigned long start_pfn,
|
|
|
unsigned long last_pfn)
|
|
|
{
|
|
|
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
struct page *freelist = NULL;
|
|
|
|
|
|
- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
|
|
|
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
|
|
|
+ BUG_ON(!domain_pfn_supported(domain, start_pfn));
|
|
|
+ BUG_ON(!domain_pfn_supported(domain, last_pfn));
|
|
|
BUG_ON(start_pfn > last_pfn);
|
|
|
|
|
|
/* we don't need lock here; nobody else touches the iova range */
|
|
@@ -1974,12 +1977,11 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
|
{
|
|
|
struct dma_pte *first_pte = NULL, *pte = NULL;
|
|
|
phys_addr_t uninitialized_var(pteval);
|
|
|
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
unsigned long sg_res;
|
|
|
unsigned int largepage_lvl = 0;
|
|
|
unsigned long lvl_pages = 0;
|
|
|
|
|
|
- BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
|
|
|
+ BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
|
|
|
|
|
|
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
|
|
|
return -EINVAL;
|