Browse Source

ia64: replace ZONE_DMA with ZONE_DMA32

ia64 uses ZONE_DMA for allocations below 32-bits.  These days we
name the zone for that ZONE_DMA32, which will allow to use the
dma-direct and generic swiotlb code as-is, so rename it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Christian König <christian.koenig@amd.com>
Christoph Hellwig 7 years ago
parent
commit
d5c23ebf1b
4 changed files with 8 additions and 8 deletions
  1. 1 1
      arch/ia64/Kconfig
  2. 1 1
      arch/ia64/kernel/pci-swiotlb.c
  3. 2 2
      arch/ia64/mm/contig.c
  4. 4 4
      arch/ia64/mm/discontig.c

+ 1 - 1
arch/ia64/Kconfig

@@ -66,7 +66,7 @@ config 64BIT
 	select ATA_NONSTANDARD if ATA
 	select ATA_NONSTANDARD if ATA
 	default y
 	default y
 
 
-config ZONE_DMA
+config ZONE_DMA32
 	def_bool y
 	def_bool y
 	depends on !IA64_SGI_SN2
 	depends on !IA64_SGI_SN2
 
 

+ 1 - 1
arch/ia64/kernel/pci-swiotlb.c

@@ -20,7 +20,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
 					 unsigned long attrs)
 					 unsigned long attrs)
 {
 {
 	if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
 	if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
-		gfp |= GFP_DMA;
+		gfp |= GFP_DMA32;
 	return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
 	return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
 }
 }
 
 

+ 2 - 2
arch/ia64/mm/contig.c

@@ -237,9 +237,9 @@ paging_init (void)
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
 
 
 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-	max_zone_pfns[ZONE_DMA] = max_dma;
+	max_zone_pfns[ZONE_DMA32] = max_dma;
 #endif
 #endif
 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
 

+ 4 - 4
arch/ia64/mm/discontig.c

@@ -38,7 +38,7 @@ struct early_node_data {
 	struct ia64_node_data *node_data;
 	struct ia64_node_data *node_data;
 	unsigned long pernode_addr;
 	unsigned long pernode_addr;
 	unsigned long pernode_size;
 	unsigned long pernode_size;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
 	unsigned long num_dma_physpages;
 	unsigned long num_dma_physpages;
 #endif
 #endif
 	unsigned long min_pfn;
 	unsigned long min_pfn;
@@ -669,7 +669,7 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
 {
 {
 	unsigned long end = start + len;
 	unsigned long end = start + len;
 
 
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
 	if (start <= __pa(MAX_DMA_ADDRESS))
 	if (start <= __pa(MAX_DMA_ADDRESS))
 		mem_data[node].num_dma_physpages +=
 		mem_data[node].num_dma_physpages +=
 			(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
 			(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
@@ -724,8 +724,8 @@ void __init paging_init(void)
 	}
 	}
 
 
 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
-	max_zone_pfns[ZONE_DMA] = max_dma;
+#ifdef CONFIG_ZONE_DMA32
+	max_zone_pfns[ZONE_DMA32] = max_dma;
 #endif
 #endif
 	max_zone_pfns[ZONE_NORMAL] = max_pfn;
 	max_zone_pfns[ZONE_NORMAL] = max_pfn;
 	free_area_init_nodes(max_zone_pfns);
 	free_area_init_nodes(max_zone_pfns);