浏览代码

nds32: To simplify the implementation of update_mmu_cache()

The checking code is done in kmap_atomic() so that we don't need to
check it in update_mmu_cache() again. There is no need to implement
it for cache aliasing or cache non-aliasing versions. We can just
implement one version for both.

Signed-off-by: Greentime Hu <greentime@andestech.com>
Greentime Hu 7 年之前
父节点
当前提交
bc9cb86e65
共有 1 个文件被更改,包括 8 次插入39 次删除
  1. 8 39
      arch/nds32/mm/cacheflush.c

+ 8 - 39
arch/nds32/mm/cacheflush.c

@@ -45,7 +45,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 	flush_icache_range(kaddr, kaddr + len);
 	flush_icache_range(kaddr, kaddr + len);
 	kunmap_atomic((void *)kaddr);
 	kunmap_atomic((void *)kaddr);
 }
 }
-#ifndef CONFIG_CPU_CACHE_ALIASING
+
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 		      pte_t * pte)
 		      pte_t * pte)
 {
 {
@@ -67,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 
 
 	if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
 	if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
 	    (vma->vm_flags & VM_EXEC)) {
 	    (vma->vm_flags & VM_EXEC)) {
-
-		if (!PageHighMem(page)) {
-			cpu_cache_wbinval_page((unsigned long)
-					       page_address(page),
-					       vma->vm_flags & VM_EXEC);
-		} else {
-			unsigned long kaddr = (unsigned long)kmap_atomic(page);
-			cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
-			kunmap_atomic((void *)kaddr);
-		}
+		unsigned long kaddr;
+		local_irq_save(flags);
+		kaddr = (unsigned long)kmap_atomic(page);
+		cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+		kunmap_atomic((void *)kaddr);
+		local_irq_restore(flags);
 	}
 	}
 }
 }
-#else
+#ifdef CONFIG_CPU_CACHE_ALIASING
 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
 
 
 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
@@ -349,31 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size)
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
-		      pte_t * pte)
-{
-	struct page *page;
-	unsigned long flags;
-	unsigned long pfn = pte_pfn(*pte);
-
-	if (!pfn_valid(pfn))
-		return;
-
-	if (vma->vm_mm == current->active_mm) {
-		local_irq_save(flags);
-		__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
-		__nds32__tlbop_rwr(*pte);
-		__nds32__isb();
-		local_irq_restore(flags);
-	}
-
-	page = pfn_to_page(pfn);
-	if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
-	    (vma->vm_flags & VM_EXEC)) {
-		local_irq_save(flags);
-		cpu_dcache_wbinval_page((unsigned long)page_address(page));
-		local_irq_restore(flags);
-	}
-}
 #endif
 #endif