cacheflush.c 1.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/cache.h>
  4. #include <linux/highmem.h>
  5. #include <linux/mm.h>
  6. #include <asm/cache.h>
  7. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  8. {
  9. unsigned long start;
  10. start = (unsigned long) kmap_atomic(page);
  11. cache_wbinv_range(start, start + PAGE_SIZE);
  12. kunmap_atomic((void *)start);
  13. }
  14. void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  15. unsigned long vaddr, int len)
  16. {
  17. unsigned long kaddr;
  18. kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
  19. cache_wbinv_range(kaddr, kaddr + len);
  20. kunmap_atomic((void *)kaddr);
  21. }
  22. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  23. pte_t *pte)
  24. {
  25. unsigned long addr, pfn;
  26. struct page *page;
  27. void *va;
  28. if (!(vma->vm_flags & VM_EXEC))
  29. return;
  30. pfn = pte_pfn(*pte);
  31. if (unlikely(!pfn_valid(pfn)))
  32. return;
  33. page = pfn_to_page(pfn);
  34. if (page == ZERO_PAGE(0))
  35. return;
  36. va = page_address(page);
  37. addr = (unsigned long) va;
  38. if (va == NULL && PageHighMem(page))
  39. addr = (unsigned long) kmap_atomic(page);
  40. cache_wbinv_range(addr, addr + PAGE_SIZE);
  41. if (va == NULL && PageHighMem(page))
  42. kunmap_atomic((void *) addr);
  43. }