cacheflush.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2005-2017 Andes Technology Corporation
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/module.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/proc-fns.h>
  10. #include <asm/shmparam.h>
  11. #include <asm/cache_info.h>
  12. extern struct cache_info L1_cache_info[2];
  13. #ifndef CONFIG_CPU_CACHE_ALIASING
  14. void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
  15. pte_t * pte)
  16. {
  17. struct page *page;
  18. unsigned long pfn = pte_pfn(*pte);
  19. unsigned long flags;
  20. if (!pfn_valid(pfn))
  21. return;
  22. if (vma->vm_mm == current->active_mm) {
  23. local_irq_save(flags);
  24. __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
  25. __nds32__tlbop_rwr(*pte);
  26. __nds32__isb();
  27. local_irq_restore(flags);
  28. }
  29. page = pfn_to_page(pfn);
  30. if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
  31. (vma->vm_flags & VM_EXEC)) {
  32. if (!PageHighMem(page)) {
  33. cpu_cache_wbinval_page((unsigned long)
  34. page_address(page),
  35. vma->vm_flags & VM_EXEC);
  36. } else {
  37. unsigned long kaddr = (unsigned long)kmap_atomic(page);
  38. cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
  39. kunmap_atomic((void *)kaddr);
  40. }
  41. }
  42. }
  43. #else
  44. extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
  45. static inline unsigned long aliasing(unsigned long addr, unsigned long page)
  46. {
  47. return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
  48. }
  49. static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
  50. {
  51. unsigned long kaddr, pte;
  52. #define BASE_ADDR0 0xffffc000
  53. kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  54. pte = (pa | PAGE_KERNEL);
  55. __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  56. __nds32__tlbop_rwlk(pte);
  57. __nds32__isb();
  58. return kaddr;
  59. }
  60. static inline void kunmap01(unsigned long kaddr)
  61. {
  62. __nds32__tlbop_unlk(kaddr);
  63. __nds32__tlbop_inv(kaddr);
  64. __nds32__isb();
  65. }
  66. static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
  67. {
  68. unsigned long kaddr, pte;
  69. #define BASE_ADDR1 0xffff8000
  70. kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  71. pte = (pa | PAGE_KERNEL);
  72. __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  73. __nds32__tlbop_rwlk(pte);
  74. __nds32__isb();
  75. return kaddr;
  76. }
  77. void flush_cache_mm(struct mm_struct *mm)
  78. {
  79. unsigned long flags;
  80. local_irq_save(flags);
  81. cpu_dcache_wbinval_all();
  82. cpu_icache_inval_all();
  83. local_irq_restore(flags);
  84. }
  85. void flush_cache_dup_mm(struct mm_struct *mm)
  86. {
  87. }
  88. void flush_cache_range(struct vm_area_struct *vma,
  89. unsigned long start, unsigned long end)
  90. {
  91. unsigned long flags;
  92. if ((end - start) > 8 * PAGE_SIZE) {
  93. cpu_dcache_wbinval_all();
  94. if (vma->vm_flags & VM_EXEC)
  95. cpu_icache_inval_all();
  96. return;
  97. }
  98. local_irq_save(flags);
  99. while (start < end) {
  100. if (va_present(vma->vm_mm, start))
  101. cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
  102. start += PAGE_SIZE;
  103. }
  104. local_irq_restore(flags);
  105. return;
  106. }
  107. void flush_cache_page(struct vm_area_struct *vma,
  108. unsigned long addr, unsigned long pfn)
  109. {
  110. unsigned long vto, flags;
  111. local_irq_save(flags);
  112. vto = kremap0(addr, pfn << PAGE_SHIFT);
  113. cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
  114. kunmap01(vto);
  115. local_irq_restore(flags);
  116. }
  117. void flush_cache_vmap(unsigned long start, unsigned long end)
  118. {
  119. cpu_dcache_wbinval_all();
  120. cpu_icache_inval_all();
  121. }
  122. void flush_cache_vunmap(unsigned long start, unsigned long end)
  123. {
  124. cpu_dcache_wbinval_all();
  125. cpu_icache_inval_all();
  126. }
  127. void copy_user_highpage(struct page *to, struct page *from,
  128. unsigned long vaddr, struct vm_area_struct *vma)
  129. {
  130. unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
  131. kto = ((unsigned long)page_address(to) & PAGE_MASK);
  132. kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
  133. pto = page_to_phys(to);
  134. pfrom = page_to_phys(from);
  135. if (aliasing(vaddr, (unsigned long)kfrom))
  136. cpu_dcache_wb_page((unsigned long)kfrom);
  137. if (aliasing(vaddr, (unsigned long)kto))
  138. cpu_dcache_inval_page((unsigned long)kto);
  139. local_irq_save(flags);
  140. vto = kremap0(vaddr, pto);
  141. vfrom = kremap1(vaddr, pfrom);
  142. copy_page((void *)vto, (void *)vfrom);
  143. kunmap01(vfrom);
  144. kunmap01(vto);
  145. local_irq_restore(flags);
  146. }
  147. EXPORT_SYMBOL(copy_user_highpage);
  148. void clear_user_highpage(struct page *page, unsigned long vaddr)
  149. {
  150. unsigned long vto, flags, kto;
  151. kto = ((unsigned long)page_address(page) & PAGE_MASK);
  152. local_irq_save(flags);
  153. if (aliasing(kto, vaddr) && kto != 0) {
  154. cpu_dcache_inval_page(kto);
  155. cpu_icache_inval_page(kto);
  156. }
  157. vto = kremap0(vaddr, page_to_phys(page));
  158. clear_page((void *)vto);
  159. kunmap01(vto);
  160. local_irq_restore(flags);
  161. }
  162. EXPORT_SYMBOL(clear_user_highpage);
  163. void flush_dcache_page(struct page *page)
  164. {
  165. struct address_space *mapping;
  166. mapping = page_mapping(page);
  167. if (mapping && !mapping_mapped(mapping))
  168. set_bit(PG_dcache_dirty, &page->flags);
  169. else {
  170. int i, pc;
  171. unsigned long vto, kaddr, flags;
  172. kaddr = (unsigned long)page_address(page);
  173. cpu_dcache_wbinval_page(kaddr);
  174. pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
  175. local_irq_save(flags);
  176. for (i = 0; i < pc; i++) {
  177. vto =
  178. kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
  179. cpu_dcache_wbinval_page(vto);
  180. kunmap01(vto);
  181. }
  182. local_irq_restore(flags);
  183. }
  184. }
  185. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  186. unsigned long vaddr, void *dst, void *src, int len)
  187. {
  188. unsigned long line_size, start, end, vto, flags;
  189. local_irq_save(flags);
  190. vto = kremap0(vaddr, page_to_phys(page));
  191. dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
  192. memcpy(dst, src, len);
  193. if (vma->vm_flags & VM_EXEC) {
  194. line_size = L1_cache_info[DCACHE].line_size;
  195. start = (unsigned long)dst & ~(line_size - 1);
  196. end =
  197. ((unsigned long)dst + len + line_size - 1) & ~(line_size -
  198. 1);
  199. cpu_cache_wbinval_range(start, end, 1);
  200. }
  201. kunmap01(vto);
  202. local_irq_restore(flags);
  203. }
  204. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  205. unsigned long vaddr, void *dst, void *src, int len)
  206. {
  207. unsigned long vto, flags;
  208. local_irq_save(flags);
  209. vto = kremap0(vaddr, page_to_phys(page));
  210. src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
  211. memcpy(dst, src, len);
  212. kunmap01(vto);
  213. local_irq_restore(flags);
  214. }
  215. void flush_anon_page(struct vm_area_struct *vma,
  216. struct page *page, unsigned long vaddr)
  217. {
  218. unsigned long flags;
  219. if (!PageAnon(page))
  220. return;
  221. if (vma->vm_mm != current->active_mm)
  222. return;
  223. local_irq_save(flags);
  224. if (vma->vm_flags & VM_EXEC)
  225. cpu_icache_inval_page(vaddr & PAGE_MASK);
  226. cpu_dcache_wbinval_page((unsigned long)page_address(page));
  227. local_irq_restore(flags);
  228. }
  229. void flush_kernel_dcache_page(struct page *page)
  230. {
  231. unsigned long flags;
  232. local_irq_save(flags);
  233. cpu_dcache_wbinval_page((unsigned long)page_address(page));
  234. local_irq_restore(flags);
  235. }
  236. void flush_icache_range(unsigned long start, unsigned long end)
  237. {
  238. unsigned long line_size, flags;
  239. line_size = L1_cache_info[DCACHE].line_size;
  240. start = start & ~(line_size - 1);
  241. end = (end + line_size - 1) & ~(line_size - 1);
  242. local_irq_save(flags);
  243. cpu_cache_wbinval_range(start, end, 1);
  244. local_irq_restore(flags);
  245. }
  246. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  247. {
  248. unsigned long flags;
  249. local_irq_save(flags);
  250. cpu_cache_wbinval_page((unsigned long)page_address(page),
  251. vma->vm_flags & VM_EXEC);
  252. local_irq_restore(flags);
  253. }
  254. void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
  255. pte_t * pte)
  256. {
  257. struct page *page;
  258. unsigned long flags;
  259. unsigned long pfn = pte_pfn(*pte);
  260. if (!pfn_valid(pfn))
  261. return;
  262. if (vma->vm_mm == current->active_mm) {
  263. local_irq_save(flags);
  264. __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
  265. __nds32__tlbop_rwr(*pte);
  266. __nds32__isb();
  267. local_irq_restore(flags);
  268. }
  269. page = pfn_to_page(pfn);
  270. if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
  271. (vma->vm_flags & VM_EXEC)) {
  272. local_irq_save(flags);
  273. cpu_dcache_wbinval_page((unsigned long)page_address(page));
  274. local_irq_restore(flags);
  275. }
  276. }
  277. #endif