cacheflush.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2005-2017 Andes Technology Corporation
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/module.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/proc-fns.h>
  10. #include <asm/shmparam.h>
  11. #include <asm/cache_info.h>
  12. extern struct cache_info L1_cache_info[2];
  13. #ifndef CONFIG_CPU_CACHE_ALIASING
  14. void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
  15. pte_t * pte)
  16. {
  17. struct page *page;
  18. unsigned long pfn = pte_pfn(*pte);
  19. unsigned long flags;
  20. if (!pfn_valid(pfn))
  21. return;
  22. if (vma->vm_mm == current->active_mm) {
  23. local_irq_save(flags);
  24. __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
  25. __nds32__tlbop_rwr(*pte);
  26. __nds32__isb();
  27. local_irq_restore(flags);
  28. }
  29. page = pfn_to_page(pfn);
  30. if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
  31. (vma->vm_flags & VM_EXEC)) {
  32. if (!PageHighMem(page)) {
  33. cpu_cache_wbinval_page((unsigned long)
  34. page_address(page),
  35. vma->vm_flags & VM_EXEC);
  36. } else {
  37. unsigned long kaddr = (unsigned long)kmap_atomic(page);
  38. cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
  39. kunmap_atomic((void *)kaddr);
  40. }
  41. }
  42. }
  43. #else
  44. extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
  45. static inline unsigned long aliasing(unsigned long addr, unsigned long page)
  46. {
  47. return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
  48. }
  49. static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
  50. {
  51. unsigned long kaddr, pte;
  52. #define BASE_ADDR0 0xffffc000
  53. kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  54. pte = (pa | PAGE_KERNEL);
  55. __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  56. __nds32__tlbop_rwlk(pte);
  57. __nds32__isb();
  58. return kaddr;
  59. }
  60. static inline void kunmap01(unsigned long kaddr)
  61. {
  62. __nds32__tlbop_unlk(kaddr);
  63. __nds32__tlbop_inv(kaddr);
  64. __nds32__isb();
  65. }
  66. static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
  67. {
  68. unsigned long kaddr, pte;
  69. #define BASE_ADDR1 0xffff8000
  70. kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  71. pte = (pa | PAGE_KERNEL);
  72. __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  73. __nds32__tlbop_rwlk(pte);
  74. __nds32__isb();
  75. return kaddr;
  76. }
  77. void flush_cache_mm(struct mm_struct *mm)
  78. {
  79. unsigned long flags;
  80. local_irq_save(flags);
  81. cpu_dcache_wbinval_all();
  82. cpu_icache_inval_all();
  83. local_irq_restore(flags);
  84. }
  85. void flush_cache_dup_mm(struct mm_struct *mm)
  86. {
  87. }
  88. void flush_cache_range(struct vm_area_struct *vma,
  89. unsigned long start, unsigned long end)
  90. {
  91. unsigned long flags;
  92. if ((end - start) > 8 * PAGE_SIZE) {
  93. cpu_dcache_wbinval_all();
  94. if (vma->vm_flags & VM_EXEC)
  95. cpu_icache_inval_all();
  96. return;
  97. }
  98. local_irq_save(flags);
  99. while (start < end) {
  100. if (va_present(vma->vm_mm, start))
  101. cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
  102. start += PAGE_SIZE;
  103. }
  104. local_irq_restore(flags);
  105. return;
  106. }
  107. void flush_cache_page(struct vm_area_struct *vma,
  108. unsigned long addr, unsigned long pfn)
  109. {
  110. unsigned long vto, flags;
  111. local_irq_save(flags);
  112. vto = kremap0(addr, pfn << PAGE_SHIFT);
  113. cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
  114. kunmap01(vto);
  115. local_irq_restore(flags);
  116. }
  117. void flush_cache_vmap(unsigned long start, unsigned long end)
  118. {
  119. cpu_dcache_wbinval_all();
  120. cpu_icache_inval_all();
  121. }
  122. void flush_cache_vunmap(unsigned long start, unsigned long end)
  123. {
  124. cpu_dcache_wbinval_all();
  125. cpu_icache_inval_all();
  126. }
  127. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  128. struct page *to)
  129. {
  130. cpu_dcache_wbinval_page((unsigned long)vaddr);
  131. cpu_icache_inval_page((unsigned long)vaddr);
  132. copy_page(vto, vfrom);
  133. cpu_dcache_wbinval_page((unsigned long)vto);
  134. cpu_icache_inval_page((unsigned long)vto);
  135. }
  136. void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
  137. {
  138. cpu_dcache_wbinval_page((unsigned long)vaddr);
  139. cpu_icache_inval_page((unsigned long)vaddr);
  140. clear_page(addr);
  141. cpu_dcache_wbinval_page((unsigned long)addr);
  142. cpu_icache_inval_page((unsigned long)addr);
  143. }
  144. void copy_user_highpage(struct page *to, struct page *from,
  145. unsigned long vaddr, struct vm_area_struct *vma)
  146. {
  147. unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
  148. kto = ((unsigned long)page_address(to) & PAGE_MASK);
  149. kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
  150. pto = page_to_phys(to);
  151. pfrom = page_to_phys(from);
  152. local_irq_save(flags);
  153. if (aliasing(vaddr, (unsigned long)kfrom))
  154. cpu_dcache_wb_page((unsigned long)kfrom);
  155. vto = kremap0(vaddr, pto);
  156. vfrom = kremap1(vaddr, pfrom);
  157. copy_page((void *)vto, (void *)vfrom);
  158. kunmap01(vfrom);
  159. kunmap01(vto);
  160. local_irq_restore(flags);
  161. }
  162. EXPORT_SYMBOL(copy_user_highpage);
  163. void clear_user_highpage(struct page *page, unsigned long vaddr)
  164. {
  165. unsigned long vto, flags, kto;
  166. kto = ((unsigned long)page_address(page) & PAGE_MASK);
  167. local_irq_save(flags);
  168. if (aliasing(kto, vaddr) && kto != 0) {
  169. cpu_dcache_inval_page(kto);
  170. cpu_icache_inval_page(kto);
  171. }
  172. vto = kremap0(vaddr, page_to_phys(page));
  173. clear_page((void *)vto);
  174. kunmap01(vto);
  175. local_irq_restore(flags);
  176. }
  177. EXPORT_SYMBOL(clear_user_highpage);
  178. void flush_dcache_page(struct page *page)
  179. {
  180. struct address_space *mapping;
  181. mapping = page_mapping(page);
  182. if (mapping && !mapping_mapped(mapping))
  183. set_bit(PG_dcache_dirty, &page->flags);
  184. else {
  185. unsigned long kaddr, flags;
  186. kaddr = (unsigned long)page_address(page);
  187. local_irq_save(flags);
  188. cpu_dcache_wbinval_page(kaddr);
  189. if (mapping) {
  190. unsigned long vaddr, kto;
  191. vaddr = page->index << PAGE_SHIFT;
  192. if (aliasing(vaddr, kaddr)) {
  193. kto = kremap0(vaddr, page_to_phys(page));
  194. cpu_dcache_wbinval_page(kto);
  195. kunmap01(kto);
  196. }
  197. }
  198. local_irq_restore(flags);
  199. }
  200. }
  201. EXPORT_SYMBOL(flush_dcache_page);
  202. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  203. unsigned long vaddr, void *dst, void *src, int len)
  204. {
  205. unsigned long line_size, start, end, vto, flags;
  206. local_irq_save(flags);
  207. vto = kremap0(vaddr, page_to_phys(page));
  208. dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
  209. memcpy(dst, src, len);
  210. if (vma->vm_flags & VM_EXEC) {
  211. line_size = L1_cache_info[DCACHE].line_size;
  212. start = (unsigned long)dst & ~(line_size - 1);
  213. end =
  214. ((unsigned long)dst + len + line_size - 1) & ~(line_size -
  215. 1);
  216. cpu_cache_wbinval_range(start, end, 1);
  217. }
  218. kunmap01(vto);
  219. local_irq_restore(flags);
  220. }
  221. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  222. unsigned long vaddr, void *dst, void *src, int len)
  223. {
  224. unsigned long vto, flags;
  225. local_irq_save(flags);
  226. vto = kremap0(vaddr, page_to_phys(page));
  227. src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
  228. memcpy(dst, src, len);
  229. kunmap01(vto);
  230. local_irq_restore(flags);
  231. }
  232. void flush_anon_page(struct vm_area_struct *vma,
  233. struct page *page, unsigned long vaddr)
  234. {
  235. unsigned long kaddr, flags, ktmp;
  236. if (!PageAnon(page))
  237. return;
  238. if (vma->vm_mm != current->active_mm)
  239. return;
  240. local_irq_save(flags);
  241. if (vma->vm_flags & VM_EXEC)
  242. cpu_icache_inval_page(vaddr & PAGE_MASK);
  243. kaddr = (unsigned long)page_address(page);
  244. if (aliasing(vaddr, kaddr)) {
  245. ktmp = kremap0(vaddr, page_to_phys(page));
  246. cpu_dcache_wbinval_page(ktmp);
  247. kunmap01(ktmp);
  248. }
  249. local_irq_restore(flags);
  250. }
  251. void flush_kernel_dcache_page(struct page *page)
  252. {
  253. unsigned long flags;
  254. local_irq_save(flags);
  255. cpu_dcache_wbinval_page((unsigned long)page_address(page));
  256. local_irq_restore(flags);
  257. }
  258. EXPORT_SYMBOL(flush_kernel_dcache_page);
  259. void flush_kernel_vmap_range(void *addr, int size)
  260. {
  261. unsigned long flags;
  262. local_irq_save(flags);
  263. cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
  264. local_irq_restore(flags);
  265. }
  266. EXPORT_SYMBOL(flush_kernel_vmap_range);
  267. void invalidate_kernel_vmap_range(void *addr, int size)
  268. {
  269. unsigned long flags;
  270. local_irq_save(flags);
  271. cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
  272. local_irq_restore(flags);
  273. }
  274. EXPORT_SYMBOL(invalidate_kernel_vmap_range);
  275. void flush_icache_range(unsigned long start, unsigned long end)
  276. {
  277. unsigned long line_size, flags;
  278. line_size = L1_cache_info[DCACHE].line_size;
  279. start = start & ~(line_size - 1);
  280. end = (end + line_size - 1) & ~(line_size - 1);
  281. local_irq_save(flags);
  282. cpu_cache_wbinval_range(start, end, 1);
  283. local_irq_restore(flags);
  284. }
  285. EXPORT_SYMBOL(flush_icache_range);
  286. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  287. {
  288. unsigned long flags;
  289. local_irq_save(flags);
  290. cpu_cache_wbinval_page((unsigned long)page_address(page),
  291. vma->vm_flags & VM_EXEC);
  292. local_irq_restore(flags);
  293. }
  294. void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
  295. pte_t * pte)
  296. {
  297. struct page *page;
  298. unsigned long flags;
  299. unsigned long pfn = pte_pfn(*pte);
  300. if (!pfn_valid(pfn))
  301. return;
  302. if (vma->vm_mm == current->active_mm) {
  303. local_irq_save(flags);
  304. __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
  305. __nds32__tlbop_rwr(*pte);
  306. __nds32__isb();
  307. local_irq_restore(flags);
  308. }
  309. page = pfn_to_page(pfn);
  310. if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
  311. (vma->vm_flags & VM_EXEC)) {
  312. local_irq_save(flags);
  313. cpu_dcache_wbinval_page((unsigned long)page_address(page));
  314. local_irq_restore(flags);
  315. }
  316. }
  317. #endif