cache.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7. * Copyright (C) 1999 SuSE GmbH Nuernberg
  8. * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9. *
  10. * Cache and TLB management
  11. *
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <linux/sched/mm.h>
  21. #include <asm/pdc.h>
  22. #include <asm/cache.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlbflush.h>
  25. #include <asm/page.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/processor.h>
  28. #include <asm/sections.h>
  29. #include <asm/shmparam.h>
  30. int split_tlb __read_mostly;
  31. int dcache_stride __read_mostly;
  32. int icache_stride __read_mostly;
  33. EXPORT_SYMBOL(dcache_stride);
  34. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  35. EXPORT_SYMBOL(flush_dcache_page_asm);
  36. void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  37. void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  38. /* On some machines (e.g. ones with the Merced bus), there can be
  39. * only a single PxTLB broadcast at a time; this must be guaranteed
  40. * by software. We put a spinlock around all TLB flushes to
  41. * ensure this.
  42. */
  43. DEFINE_SPINLOCK(pa_tlb_lock);
  44. struct pdc_cache_info cache_info __read_mostly;
  45. #ifndef CONFIG_PA20
  46. static struct pdc_btlb_info btlb_info __read_mostly;
  47. #endif
  48. #ifdef CONFIG_SMP
  49. void
  50. flush_data_cache(void)
  51. {
  52. on_each_cpu(flush_data_cache_local, NULL, 1);
  53. }
  54. void
  55. flush_instruction_cache(void)
  56. {
  57. on_each_cpu(flush_instruction_cache_local, NULL, 1);
  58. }
  59. #endif
  60. void
  61. flush_cache_all_local(void)
  62. {
  63. flush_instruction_cache_local(NULL);
  64. flush_data_cache_local(NULL);
  65. }
  66. EXPORT_SYMBOL(flush_cache_all_local);
  67. /* Virtual address of pfn. */
  68. #define pfn_va(pfn) __va(PFN_PHYS(pfn))
  69. void
  70. update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
  71. {
  72. unsigned long pfn = pte_pfn(*ptep);
  73. struct page *page;
  74. /* We don't have pte special. As a result, we can be called with
  75. an invalid pfn and we don't need to flush the kernel dcache page.
  76. This occurs with FireGL card in C8000. */
  77. if (!pfn_valid(pfn))
  78. return;
  79. page = pfn_to_page(pfn);
  80. if (page_mapping_file(page) &&
  81. test_bit(PG_dcache_dirty, &page->flags)) {
  82. flush_kernel_dcache_page_addr(pfn_va(pfn));
  83. clear_bit(PG_dcache_dirty, &page->flags);
  84. } else if (parisc_requires_coherency())
  85. flush_kernel_dcache_page_addr(pfn_va(pfn));
  86. }
  87. void
  88. show_cache_info(struct seq_file *m)
  89. {
  90. char buf[32];
  91. seq_printf(m, "I-cache\t\t: %ld KB\n",
  92. cache_info.ic_size/1024 );
  93. if (cache_info.dc_loop != 1)
  94. snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
  95. seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
  96. cache_info.dc_size/1024,
  97. (cache_info.dc_conf.cc_wt ? "WT":"WB"),
  98. (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
  99. ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
  100. seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
  101. cache_info.it_size,
  102. cache_info.dt_size,
  103. cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
  104. );
  105. #ifndef CONFIG_PA20
  106. /* BTLB - Block TLB */
  107. if (btlb_info.max_size==0) {
  108. seq_printf(m, "BTLB\t\t: not supported\n" );
  109. } else {
  110. seq_printf(m,
  111. "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
  112. "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
  113. "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
  114. btlb_info.max_size, (int)4096,
  115. btlb_info.max_size>>8,
  116. btlb_info.fixed_range_info.num_i,
  117. btlb_info.fixed_range_info.num_d,
  118. btlb_info.fixed_range_info.num_comb,
  119. btlb_info.variable_range_info.num_i,
  120. btlb_info.variable_range_info.num_d,
  121. btlb_info.variable_range_info.num_comb
  122. );
  123. }
  124. #endif
  125. }
  126. void __init
  127. parisc_cache_init(void)
  128. {
  129. if (pdc_cache_info(&cache_info) < 0)
  130. panic("parisc_cache_init: pdc_cache_info failed");
  131. #if 0
  132. printk("ic_size %lx dc_size %lx it_size %lx\n",
  133. cache_info.ic_size,
  134. cache_info.dc_size,
  135. cache_info.it_size);
  136. printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  137. cache_info.dc_base,
  138. cache_info.dc_stride,
  139. cache_info.dc_count,
  140. cache_info.dc_loop);
  141. printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  142. *(unsigned long *) (&cache_info.dc_conf),
  143. cache_info.dc_conf.cc_alias,
  144. cache_info.dc_conf.cc_block,
  145. cache_info.dc_conf.cc_line,
  146. cache_info.dc_conf.cc_shift);
  147. printk(" wt %d sh %d cst %d hv %d\n",
  148. cache_info.dc_conf.cc_wt,
  149. cache_info.dc_conf.cc_sh,
  150. cache_info.dc_conf.cc_cst,
  151. cache_info.dc_conf.cc_hv);
  152. printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  153. cache_info.ic_base,
  154. cache_info.ic_stride,
  155. cache_info.ic_count,
  156. cache_info.ic_loop);
  157. printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
  158. cache_info.it_sp_base,
  159. cache_info.it_sp_stride,
  160. cache_info.it_sp_count,
  161. cache_info.it_loop,
  162. cache_info.it_off_base,
  163. cache_info.it_off_stride,
  164. cache_info.it_off_count);
  165. printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
  166. cache_info.dt_sp_base,
  167. cache_info.dt_sp_stride,
  168. cache_info.dt_sp_count,
  169. cache_info.dt_loop,
  170. cache_info.dt_off_base,
  171. cache_info.dt_off_stride,
  172. cache_info.dt_off_count);
  173. printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  174. *(unsigned long *) (&cache_info.ic_conf),
  175. cache_info.ic_conf.cc_alias,
  176. cache_info.ic_conf.cc_block,
  177. cache_info.ic_conf.cc_line,
  178. cache_info.ic_conf.cc_shift);
  179. printk(" wt %d sh %d cst %d hv %d\n",
  180. cache_info.ic_conf.cc_wt,
  181. cache_info.ic_conf.cc_sh,
  182. cache_info.ic_conf.cc_cst,
  183. cache_info.ic_conf.cc_hv);
  184. printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
  185. cache_info.dt_conf.tc_sh,
  186. cache_info.dt_conf.tc_page,
  187. cache_info.dt_conf.tc_cst,
  188. cache_info.dt_conf.tc_aid,
  189. cache_info.dt_conf.tc_sr);
  190. printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
  191. cache_info.it_conf.tc_sh,
  192. cache_info.it_conf.tc_page,
  193. cache_info.it_conf.tc_cst,
  194. cache_info.it_conf.tc_aid,
  195. cache_info.it_conf.tc_sr);
  196. #endif
  197. split_tlb = 0;
  198. if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
  199. if (cache_info.dt_conf.tc_sh == 2)
  200. printk(KERN_WARNING "Unexpected TLB configuration. "
  201. "Will flush I/D separately (could be optimized).\n");
  202. split_tlb = 1;
  203. }
  204. /* "New and Improved" version from Jim Hull
  205. * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
  206. * The following CAFL_STRIDE is an optimized version, see
  207. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
  208. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
  209. */
  210. #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
  211. dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
  212. icache_stride = CAFL_STRIDE(cache_info.ic_conf);
  213. #undef CAFL_STRIDE
  214. #ifndef CONFIG_PA20
  215. if (pdc_btlb_info(&btlb_info) < 0) {
  216. memset(&btlb_info, 0, sizeof btlb_info);
  217. }
  218. #endif
  219. if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
  220. PDC_MODEL_NVA_UNSUPPORTED) {
  221. printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
  222. #if 0
  223. panic("SMP kernel required to avoid non-equivalent aliasing");
  224. #endif
  225. }
  226. }
  227. void __init disable_sr_hashing(void)
  228. {
  229. int srhash_type, retval;
  230. unsigned long space_bits;
  231. switch (boot_cpu_data.cpu_type) {
  232. case pcx: /* We shouldn't get this far. setup.c should prevent it. */
  233. BUG();
  234. return;
  235. case pcxs:
  236. case pcxt:
  237. case pcxt_:
  238. srhash_type = SRHASH_PCXST;
  239. break;
  240. case pcxl:
  241. srhash_type = SRHASH_PCXL;
  242. break;
  243. case pcxl2: /* pcxl2 doesn't support space register hashing */
  244. return;
  245. default: /* Currently all PA2.0 machines use the same ins. sequence */
  246. srhash_type = SRHASH_PA20;
  247. break;
  248. }
  249. disable_sr_hashing_asm(srhash_type);
  250. retval = pdc_spaceid_bits(&space_bits);
  251. /* If this procedure isn't implemented, don't panic. */
  252. if (retval < 0 && retval != PDC_BAD_OPTION)
  253. panic("pdc_spaceid_bits call failed.\n");
  254. if (space_bits != 0)
  255. panic("SpaceID hashing is still on!\n");
  256. }
  257. static inline void
  258. __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  259. unsigned long physaddr)
  260. {
  261. preempt_disable();
  262. flush_dcache_page_asm(physaddr, vmaddr);
  263. if (vma->vm_flags & VM_EXEC)
  264. flush_icache_page_asm(physaddr, vmaddr);
  265. preempt_enable();
  266. }
  267. static inline void
  268. __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  269. unsigned long physaddr)
  270. {
  271. preempt_disable();
  272. purge_dcache_page_asm(physaddr, vmaddr);
  273. if (vma->vm_flags & VM_EXEC)
  274. flush_icache_page_asm(physaddr, vmaddr);
  275. preempt_enable();
  276. }
  277. void flush_dcache_page(struct page *page)
  278. {
  279. struct address_space *mapping = page_mapping_file(page);
  280. struct vm_area_struct *mpnt;
  281. unsigned long offset;
  282. unsigned long addr, old_addr = 0;
  283. pgoff_t pgoff;
  284. if (mapping && !mapping_mapped(mapping)) {
  285. set_bit(PG_dcache_dirty, &page->flags);
  286. return;
  287. }
  288. flush_kernel_dcache_page(page);
  289. if (!mapping)
  290. return;
  291. pgoff = page->index;
  292. /* We have carefully arranged in arch_get_unmapped_area() that
  293. * *any* mappings of a file are always congruently mapped (whether
  294. * declared as MAP_PRIVATE or MAP_SHARED), so we only need
  295. * to flush one address here for them all to become coherent */
  296. flush_dcache_mmap_lock(mapping);
  297. vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  298. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  299. addr = mpnt->vm_start + offset;
  300. /* The TLB is the engine of coherence on parisc: The
  301. * CPU is entitled to speculate any page with a TLB
  302. * mapping, so here we kill the mapping then flush the
  303. * page along a special flush only alias mapping.
  304. * This guarantees that the page is no-longer in the
  305. * cache for any process and nor may it be
  306. * speculatively read in (until the user or kernel
  307. * specifically accesses it, of course) */
  308. flush_tlb_page(mpnt, addr);
  309. if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
  310. != (addr & (SHM_COLOUR - 1))) {
  311. __flush_cache_page(mpnt, addr, page_to_phys(page));
  312. if (old_addr)
  313. printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
  314. old_addr = addr;
  315. }
  316. }
  317. flush_dcache_mmap_unlock(mapping);
  318. }
  319. EXPORT_SYMBOL(flush_dcache_page);
  320. /* Defined in arch/parisc/kernel/pacache.S */
  321. EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
  322. EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
  323. EXPORT_SYMBOL(flush_data_cache_local);
  324. EXPORT_SYMBOL(flush_kernel_icache_range_asm);
  325. #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
  326. static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
  327. #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
  328. static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
  329. void __init parisc_setup_cache_timing(void)
  330. {
  331. unsigned long rangetime, alltime;
  332. unsigned long size, start;
  333. unsigned long threshold;
  334. alltime = mfctl(16);
  335. flush_data_cache();
  336. alltime = mfctl(16) - alltime;
  337. size = (unsigned long)(_end - _text);
  338. rangetime = mfctl(16);
  339. flush_kernel_dcache_range((unsigned long)_text, size);
  340. rangetime = mfctl(16) - rangetime;
  341. printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
  342. alltime, size, rangetime);
  343. threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
  344. if (threshold > cache_info.dc_size)
  345. threshold = cache_info.dc_size;
  346. if (threshold)
  347. parisc_cache_flush_threshold = threshold;
  348. printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
  349. parisc_cache_flush_threshold/1024);
  350. /* calculate TLB flush threshold */
  351. /* On SMP machines, skip the TLB measure of kernel text which
  352. * has been mapped as huge pages. */
  353. if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
  354. threshold = max(cache_info.it_size, cache_info.dt_size);
  355. threshold *= PAGE_SIZE;
  356. threshold /= num_online_cpus();
  357. goto set_tlb_threshold;
  358. }
  359. size = 0;
  360. start = (unsigned long) _text;
  361. rangetime = mfctl(16);
  362. while (start < (unsigned long) _end) {
  363. flush_tlb_kernel_range(start, start + PAGE_SIZE);
  364. start += PAGE_SIZE;
  365. size += PAGE_SIZE;
  366. }
  367. rangetime = mfctl(16) - rangetime;
  368. alltime = mfctl(16);
  369. flush_tlb_all();
  370. alltime = mfctl(16) - alltime;
  371. printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
  372. alltime, size, rangetime);
  373. threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
  374. printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
  375. threshold/1024);
  376. set_tlb_threshold:
  377. if (threshold > parisc_tlb_flush_threshold)
  378. parisc_tlb_flush_threshold = threshold;
  379. printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
  380. parisc_tlb_flush_threshold/1024);
  381. }
  382. extern void purge_kernel_dcache_page_asm(unsigned long);
  383. extern void clear_user_page_asm(void *, unsigned long);
  384. extern void copy_user_page_asm(void *, void *, unsigned long);
  385. void flush_kernel_dcache_page_addr(void *addr)
  386. {
  387. unsigned long flags;
  388. flush_kernel_dcache_page_asm(addr);
  389. purge_tlb_start(flags);
  390. pdtlb_kernel(addr);
  391. purge_tlb_end(flags);
  392. }
  393. EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
  394. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  395. struct page *pg)
  396. {
  397. /* Copy using kernel mapping. No coherency is needed (all in
  398. kunmap) for the `to' page. However, the `from' page needs to
  399. be flushed through a mapping equivalent to the user mapping
  400. before it can be accessed through the kernel mapping. */
  401. preempt_disable();
  402. flush_dcache_page_asm(__pa(vfrom), vaddr);
  403. copy_page_asm(vto, vfrom);
  404. preempt_enable();
  405. }
  406. EXPORT_SYMBOL(copy_user_page);
  407. /* __flush_tlb_range()
  408. *
  409. * returns 1 if all TLBs were flushed.
  410. */
  411. int __flush_tlb_range(unsigned long sid, unsigned long start,
  412. unsigned long end)
  413. {
  414. unsigned long flags;
  415. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  416. end - start >= parisc_tlb_flush_threshold) {
  417. flush_tlb_all();
  418. return 1;
  419. }
  420. /* Purge TLB entries for small ranges using the pdtlb and
  421. pitlb instructions. These instructions execute locally
  422. but cause a purge request to be broadcast to other TLBs. */
  423. while (start < end) {
  424. purge_tlb_start(flags);
  425. mtsp(sid, 1);
  426. pdtlb(start);
  427. pitlb(start);
  428. purge_tlb_end(flags);
  429. start += PAGE_SIZE;
  430. }
  431. return 0;
  432. }
  433. static void cacheflush_h_tmp_function(void *dummy)
  434. {
  435. flush_cache_all_local();
  436. }
  437. void flush_cache_all(void)
  438. {
  439. on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
  440. }
  441. static inline unsigned long mm_total_size(struct mm_struct *mm)
  442. {
  443. struct vm_area_struct *vma;
  444. unsigned long usize = 0;
  445. for (vma = mm->mmap; vma; vma = vma->vm_next)
  446. usize += vma->vm_end - vma->vm_start;
  447. return usize;
  448. }
  449. static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
  450. {
  451. pte_t *ptep = NULL;
  452. if (!pgd_none(*pgd)) {
  453. pud_t *pud = pud_offset(pgd, addr);
  454. if (!pud_none(*pud)) {
  455. pmd_t *pmd = pmd_offset(pud, addr);
  456. if (!pmd_none(*pmd))
  457. ptep = pte_offset_map(pmd, addr);
  458. }
  459. }
  460. return ptep;
  461. }
  462. void flush_cache_mm(struct mm_struct *mm)
  463. {
  464. struct vm_area_struct *vma;
  465. pgd_t *pgd;
  466. /* Flushing the whole cache on each cpu takes forever on
  467. rp3440, etc. So, avoid it if the mm isn't too big. */
  468. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  469. mm_total_size(mm) >= parisc_cache_flush_threshold) {
  470. if (mm->context)
  471. flush_tlb_all();
  472. flush_cache_all();
  473. return;
  474. }
  475. if (mm->context == mfsp(3)) {
  476. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  477. flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
  478. if (vma->vm_flags & VM_EXEC)
  479. flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
  480. flush_tlb_range(vma, vma->vm_start, vma->vm_end);
  481. }
  482. return;
  483. }
  484. pgd = mm->pgd;
  485. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  486. unsigned long addr;
  487. for (addr = vma->vm_start; addr < vma->vm_end;
  488. addr += PAGE_SIZE) {
  489. unsigned long pfn;
  490. pte_t *ptep = get_ptep(pgd, addr);
  491. if (!ptep)
  492. continue;
  493. pfn = pte_pfn(*ptep);
  494. if (!pfn_valid(pfn))
  495. continue;
  496. if (unlikely(mm->context)) {
  497. flush_tlb_page(vma, addr);
  498. __flush_cache_page(vma, addr, PFN_PHYS(pfn));
  499. } else {
  500. __purge_cache_page(vma, addr, PFN_PHYS(pfn));
  501. }
  502. }
  503. }
  504. }
  505. void flush_cache_range(struct vm_area_struct *vma,
  506. unsigned long start, unsigned long end)
  507. {
  508. pgd_t *pgd;
  509. unsigned long addr;
  510. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  511. end - start >= parisc_cache_flush_threshold) {
  512. if (vma->vm_mm->context)
  513. flush_tlb_range(vma, start, end);
  514. flush_cache_all();
  515. return;
  516. }
  517. if (vma->vm_mm->context == mfsp(3)) {
  518. flush_user_dcache_range_asm(start, end);
  519. if (vma->vm_flags & VM_EXEC)
  520. flush_user_icache_range_asm(start, end);
  521. flush_tlb_range(vma, start, end);
  522. return;
  523. }
  524. pgd = vma->vm_mm->pgd;
  525. for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
  526. unsigned long pfn;
  527. pte_t *ptep = get_ptep(pgd, addr);
  528. if (!ptep)
  529. continue;
  530. pfn = pte_pfn(*ptep);
  531. if (pfn_valid(pfn)) {
  532. if (unlikely(vma->vm_mm->context)) {
  533. flush_tlb_page(vma, addr);
  534. __flush_cache_page(vma, addr, PFN_PHYS(pfn));
  535. } else {
  536. __purge_cache_page(vma, addr, PFN_PHYS(pfn));
  537. }
  538. }
  539. }
  540. }
  541. void
  542. flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  543. {
  544. if (pfn_valid(pfn)) {
  545. if (likely(vma->vm_mm->context)) {
  546. flush_tlb_page(vma, vmaddr);
  547. __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
  548. } else {
  549. __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
  550. }
  551. }
  552. }
  553. void flush_kernel_vmap_range(void *vaddr, int size)
  554. {
  555. unsigned long start = (unsigned long)vaddr;
  556. unsigned long end = start + size;
  557. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  558. (unsigned long)size >= parisc_cache_flush_threshold) {
  559. flush_tlb_kernel_range(start, end);
  560. flush_data_cache();
  561. return;
  562. }
  563. flush_kernel_dcache_range_asm(start, end);
  564. flush_tlb_kernel_range(start, end);
  565. }
  566. EXPORT_SYMBOL(flush_kernel_vmap_range);
  567. void invalidate_kernel_vmap_range(void *vaddr, int size)
  568. {
  569. unsigned long start = (unsigned long)vaddr;
  570. unsigned long end = start + size;
  571. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  572. (unsigned long)size >= parisc_cache_flush_threshold) {
  573. flush_tlb_kernel_range(start, end);
  574. flush_data_cache();
  575. return;
  576. }
  577. purge_kernel_dcache_range_asm(start, end);
  578. flush_tlb_kernel_range(start, end);
  579. }
  580. EXPORT_SYMBOL(invalidate_kernel_vmap_range);