浏览代码

atomisp: USE_KMEM_CACHE is always defined so remove the dead code

Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Alan Cox 8 年之前
父节点
当前提交
1d3da3fed1

+ 1 - 1
drivers/staging/media/atomisp/pci/atomisp2/Makefile

@@ -371,7 +371,7 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
 #DEFINES += -DUSE_INTERRUPTS
 #DEFINES += -DUSE_SSSE3
 #DEFINES += -DPUNIT_CAMERA_BUSY
-DEFINES += -DUSE_KMEM_CACHE
+#DEFINES += -DUSE_KMEM_CACHE
 
 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400

+ 0 - 16
drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c

@@ -60,11 +60,7 @@ static unsigned int get_pages_from_dynamic_pool(void *pool,
 
 			page_obj[i].page = hmm_page->page;
 			page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
-#ifdef USE_KMEM_CACHE
 			kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
-#else
-			atomisp_kernel_free(hmm_page);
-#endif
 
 			if (i == size)
 				return i;
@@ -117,12 +113,8 @@ static void free_pages_to_dynamic_pool(void *pool,
 		}
 		return;
 	}
-#ifdef USE_KMEM_CACHE
 	hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
 						GFP_KERNEL);
-#else
-	hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page));
-#endif
 	if (!hmm_page) {
 		dev_err(atomisp_dev, "out of memory for hmm_page.\n");
 
@@ -164,7 +156,6 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
 		return -ENOMEM;
 	}
 
-#ifdef USE_KMEM_CACHE
 	dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
 						sizeof(struct hmm_page), 0,
 						SLAB_HWCACHE_ALIGN, NULL);
@@ -172,7 +163,6 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
 		atomisp_kernel_free(dypool_info);
 		return -ENOMEM;
 	}
-#endif
 
 	INIT_LIST_HEAD(&dypool_info->pages_list);
 	spin_lock_init(&dypool_info->list_lock);
@@ -219,19 +209,13 @@ static void hmm_dynamic_pool_exit(void **pool)
 			hmm_mem_stat.dyc_size--;
 			hmm_mem_stat.sys_size--;
 		}
-#ifdef USE_KMEM_CACHE
 		kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
-#else
-		atomisp_kernel_free(hmm_page);
-#endif
 		spin_lock_irqsave(&dypool_info->list_lock, flags);
 	}
 
 	spin_unlock_irqrestore(&dypool_info->list_lock, flags);
 
-#ifdef USE_KMEM_CACHE
 	kmem_cache_destroy(dypool_info->pgptr_cache);
-#endif
 
 	atomisp_kernel_free(dypool_info);
 

+ 0 - 2
drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h

@@ -101,9 +101,7 @@ struct hmm_dynamic_pool_info {
 	/* list lock is used to protect the free pages block lists */
 	spinlock_t		list_lock;
 
-#ifdef USE_KMEM_CACHE
 	struct kmem_cache	*pgptr_cache;
-#endif
 	bool			initialized;
 
 	unsigned int		pool_size;

+ 0 - 2
drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h

@@ -119,9 +119,7 @@ struct isp_mmu {
 	phys_addr_t base_address;
 
 	struct mutex pt_mutex;
-#ifdef USE_KMEM_CACHE
 	struct kmem_cache *tbl_cache;
-#endif
 };
 
 /* flags for PDE and PTE */

+ 0 - 12
drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c

@@ -103,14 +103,10 @@ static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
 	 * The slab allocator(kmem_cache and kmalloc family) doesn't handle
 	 * GFP_DMA32 flag, so we have to use buddy allocator.
 	 */
-#ifdef USE_KMEM_CACHE
 	if (totalram_pages > (unsigned long)NR_PAGES_2GB)
 		virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
 	else
 		virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
-#else
-	virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
-#endif
 	if (!virt)
 		return (phys_addr_t)NULL_PAGE;
 
@@ -144,11 +140,7 @@ static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
 	set_memory_wb((unsigned long)virt, 1);
 #endif
 
-#ifdef USE_KMEM_CACHE
 	kmem_cache_free(mmu->tbl_cache, virt);
-#else
-	free_page((unsigned long)virt);
-#endif
 }
 
 static void mmu_remap_error(struct isp_mmu *mmu,
@@ -559,13 +551,11 @@ int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
 
 	mutex_init(&mmu->pt_mutex);
 
-#ifdef USE_KMEM_CACHE
 	mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
 					   ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
 					   NULL);
 	if (!mmu->tbl_cache)
 		return -ENOMEM;
-#endif
 
 	return 0;
 }
@@ -600,7 +590,5 @@ void isp_mmu_exit(struct isp_mmu *mmu)
 
 	free_page_table(mmu, l1_pt);
 
-#ifdef USE_KMEM_CACHE
 	kmem_cache_destroy(mmu->tbl_cache);
-#endif
 }