cacheflush.c 1.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/kernel.h>
  4. #include <linux/mm.h>
  5. #include <linux/fs.h>
  6. #include <linux/syscalls.h>
  7. #include <linux/spinlock.h>
  8. #include <asm/page.h>
  9. #include <asm/cache.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/cachectl.h>
  12. void flush_dcache_page(struct page *page)
  13. {
  14. struct address_space *mapping = page_mapping(page);
  15. unsigned long addr;
  16. if (mapping && !mapping_mapped(mapping)) {
  17. set_bit(PG_arch_1, &(page)->flags);
  18. return;
  19. }
  20. /*
  21. * We could delay the flush for the !page_mapping case too. But that
  22. * case is for exec env/arg pages and those are %99 certainly going to
  23. * get faulted into the tlb (and thus flushed) anyways.
  24. */
  25. addr = (unsigned long) page_address(page);
  26. dcache_wb_range(addr, addr + PAGE_SIZE);
  27. }
  28. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  29. pte_t *pte)
  30. {
  31. unsigned long addr;
  32. struct page *page;
  33. unsigned long pfn;
  34. pfn = pte_pfn(*pte);
  35. if (unlikely(!pfn_valid(pfn)))
  36. return;
  37. page = pfn_to_page(pfn);
  38. addr = (unsigned long) page_address(page);
  39. if (vma->vm_flags & VM_EXEC ||
  40. pages_do_alias(addr, address & PAGE_MASK))
  41. cache_wbinv_all();
  42. clear_bit(PG_arch_1, &(page)->flags);
  43. }