|
@@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm)
|
|
rp3440, etc. So, avoid it if the mm isn't too big. */
|
|
rp3440, etc. So, avoid it if the mm isn't too big. */
|
|
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
|
mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
|
- flush_tlb_all();
|
|
|
|
|
|
+ if (mm->context)
|
|
|
|
+ flush_tlb_all();
|
|
flush_cache_all();
|
|
flush_cache_all();
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
|
|
pfn = pte_pfn(*ptep);
|
|
pfn = pte_pfn(*ptep);
|
|
if (!pfn_valid(pfn))
|
|
if (!pfn_valid(pfn))
|
|
continue;
|
|
continue;
|
|
|
|
+ if (unlikely(mm->context))
|
|
|
|
+ flush_tlb_page(vma, addr);
|
|
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
|
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm)
|
|
void flush_cache_range(struct vm_area_struct *vma,
|
|
void flush_cache_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
|
|
+ pgd_t *pgd;
|
|
|
|
+ unsigned long addr;
|
|
|
|
+
|
|
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
end - start >= parisc_cache_flush_threshold) {
|
|
end - start >= parisc_cache_flush_threshold) {
|
|
- flush_tlb_range(vma, start, end);
|
|
|
|
|
|
+ if (vma->vm_mm->context)
|
|
|
|
+ flush_tlb_range(vma, start, end);
|
|
flush_cache_all();
|
|
flush_cache_all();
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- flush_user_dcache_range_asm(start, end);
|
|
|
|
- if (vma->vm_flags & VM_EXEC)
|
|
|
|
- flush_user_icache_range_asm(start, end);
|
|
|
|
- flush_tlb_range(vma, start, end);
|
|
|
|
|
|
+ if (vma->vm_mm->context == mfsp(3)) {
|
|
|
|
+ flush_user_dcache_range_asm(start, end);
|
|
|
|
+ if (vma->vm_flags & VM_EXEC)
|
|
|
|
+ flush_user_icache_range_asm(start, end);
|
|
|
|
+ flush_tlb_range(vma, start, end);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ pgd = vma->vm_mm->pgd;
|
|
|
|
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
|
|
|
|
+ unsigned long pfn;
|
|
|
|
+ pte_t *ptep = get_ptep(pgd, addr);
|
|
|
|
+ if (!ptep)
|
|
|
|
+ continue;
|
|
|
|
+ pfn = pte_pfn(*ptep);
|
|
|
|
+ if (pfn_valid(pfn)) {
|
|
|
|
+ if (unlikely(vma->vm_mm->context))
|
|
|
|
+ flush_tlb_page(vma, addr);
|
|
|
|
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
|
|
|
+ }
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
void
|
|
void
|
|
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
|
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
|
{
|
|
{
|
|
- BUG_ON(!vma->vm_mm->context);
|
|
|
|
-
|
|
|
|
if (pfn_valid(pfn)) {
|
|
if (pfn_valid(pfn)) {
|
|
- flush_tlb_page(vma, vmaddr);
|
|
|
|
|
|
+ if (likely(vma->vm_mm->context))
|
|
|
|
+ flush_tlb_page(vma, vmaddr);
|
|
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
|
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
|
}
|
|
}
|
|
}
|
|
}
|