cache.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7. * Copyright (C) 1999 SuSE GmbH Nuernberg
  8. * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9. *
  10. * Cache and TLB management
  11. *
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <asm/pdc.h>
  21. #include <asm/cache.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/page.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/processor.h>
  27. #include <asm/sections.h>
  28. #include <asm/shmparam.h>
  29. int split_tlb __read_mostly;
  30. int dcache_stride __read_mostly;
  31. int icache_stride __read_mostly;
  32. EXPORT_SYMBOL(dcache_stride);
  33. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  34. EXPORT_SYMBOL(flush_dcache_page_asm);
  35. void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  36. /* On some machines (e.g. ones with the Merced bus), there can be
  37. * only a single PxTLB broadcast at a time; this must be guaranteed
  38. * by software. We put a spinlock around all TLB flushes to
  39. * ensure this.
  40. */
  41. DEFINE_SPINLOCK(pa_tlb_lock);
  42. struct pdc_cache_info cache_info __read_mostly;
  43. #ifndef CONFIG_PA20
  44. static struct pdc_btlb_info btlb_info __read_mostly;
  45. #endif
  46. #ifdef CONFIG_SMP
  47. void
  48. flush_data_cache(void)
  49. {
  50. on_each_cpu(flush_data_cache_local, NULL, 1);
  51. }
  52. void
  53. flush_instruction_cache(void)
  54. {
  55. on_each_cpu(flush_instruction_cache_local, NULL, 1);
  56. }
  57. #endif
  58. void
  59. flush_cache_all_local(void)
  60. {
  61. flush_instruction_cache_local(NULL);
  62. flush_data_cache_local(NULL);
  63. }
  64. EXPORT_SYMBOL(flush_cache_all_local);
  65. /* Virtual address of pfn. */
  66. #define pfn_va(pfn) __va(PFN_PHYS(pfn))
  67. void
  68. update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
  69. {
  70. unsigned long pfn = pte_pfn(*ptep);
  71. struct page *page;
  72. /* We don't have pte special. As a result, we can be called with
  73. an invalid pfn and we don't need to flush the kernel dcache page.
  74. This occurs with FireGL card in C8000. */
  75. if (!pfn_valid(pfn))
  76. return;
  77. page = pfn_to_page(pfn);
  78. if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
  79. flush_kernel_dcache_page_addr(pfn_va(pfn));
  80. clear_bit(PG_dcache_dirty, &page->flags);
  81. } else if (parisc_requires_coherency())
  82. flush_kernel_dcache_page_addr(pfn_va(pfn));
  83. }
  84. void
  85. show_cache_info(struct seq_file *m)
  86. {
  87. char buf[32];
  88. seq_printf(m, "I-cache\t\t: %ld KB\n",
  89. cache_info.ic_size/1024 );
  90. if (cache_info.dc_loop != 1)
  91. snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
  92. seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
  93. cache_info.dc_size/1024,
  94. (cache_info.dc_conf.cc_wt ? "WT":"WB"),
  95. (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
  96. ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
  97. seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
  98. cache_info.it_size,
  99. cache_info.dt_size,
  100. cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
  101. );
  102. #ifndef CONFIG_PA20
  103. /* BTLB - Block TLB */
  104. if (btlb_info.max_size==0) {
  105. seq_printf(m, "BTLB\t\t: not supported\n" );
  106. } else {
  107. seq_printf(m,
  108. "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
  109. "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
  110. "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
  111. btlb_info.max_size, (int)4096,
  112. btlb_info.max_size>>8,
  113. btlb_info.fixed_range_info.num_i,
  114. btlb_info.fixed_range_info.num_d,
  115. btlb_info.fixed_range_info.num_comb,
  116. btlb_info.variable_range_info.num_i,
  117. btlb_info.variable_range_info.num_d,
  118. btlb_info.variable_range_info.num_comb
  119. );
  120. }
  121. #endif
  122. }
  123. void __init
  124. parisc_cache_init(void)
  125. {
  126. if (pdc_cache_info(&cache_info) < 0)
  127. panic("parisc_cache_init: pdc_cache_info failed");
  128. #if 0
  129. printk("ic_size %lx dc_size %lx it_size %lx\n",
  130. cache_info.ic_size,
  131. cache_info.dc_size,
  132. cache_info.it_size);
  133. printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  134. cache_info.dc_base,
  135. cache_info.dc_stride,
  136. cache_info.dc_count,
  137. cache_info.dc_loop);
  138. printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  139. *(unsigned long *) (&cache_info.dc_conf),
  140. cache_info.dc_conf.cc_alias,
  141. cache_info.dc_conf.cc_block,
  142. cache_info.dc_conf.cc_line,
  143. cache_info.dc_conf.cc_shift);
  144. printk(" wt %d sh %d cst %d hv %d\n",
  145. cache_info.dc_conf.cc_wt,
  146. cache_info.dc_conf.cc_sh,
  147. cache_info.dc_conf.cc_cst,
  148. cache_info.dc_conf.cc_hv);
  149. printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  150. cache_info.ic_base,
  151. cache_info.ic_stride,
  152. cache_info.ic_count,
  153. cache_info.ic_loop);
  154. printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  155. *(unsigned long *) (&cache_info.ic_conf),
  156. cache_info.ic_conf.cc_alias,
  157. cache_info.ic_conf.cc_block,
  158. cache_info.ic_conf.cc_line,
  159. cache_info.ic_conf.cc_shift);
  160. printk(" wt %d sh %d cst %d hv %d\n",
  161. cache_info.ic_conf.cc_wt,
  162. cache_info.ic_conf.cc_sh,
  163. cache_info.ic_conf.cc_cst,
  164. cache_info.ic_conf.cc_hv);
  165. printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
  166. cache_info.dt_conf.tc_sh,
  167. cache_info.dt_conf.tc_page,
  168. cache_info.dt_conf.tc_cst,
  169. cache_info.dt_conf.tc_aid,
  170. cache_info.dt_conf.tc_pad1);
  171. printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
  172. cache_info.it_conf.tc_sh,
  173. cache_info.it_conf.tc_page,
  174. cache_info.it_conf.tc_cst,
  175. cache_info.it_conf.tc_aid,
  176. cache_info.it_conf.tc_pad1);
  177. #endif
  178. split_tlb = 0;
  179. if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
  180. if (cache_info.dt_conf.tc_sh == 2)
  181. printk(KERN_WARNING "Unexpected TLB configuration. "
  182. "Will flush I/D separately (could be optimized).\n");
  183. split_tlb = 1;
  184. }
  185. /* "New and Improved" version from Jim Hull
  186. * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
  187. * The following CAFL_STRIDE is an optimized version, see
  188. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
  189. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
  190. */
  191. #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
  192. dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
  193. icache_stride = CAFL_STRIDE(cache_info.ic_conf);
  194. #undef CAFL_STRIDE
  195. #ifndef CONFIG_PA20
  196. if (pdc_btlb_info(&btlb_info) < 0) {
  197. memset(&btlb_info, 0, sizeof btlb_info);
  198. }
  199. #endif
  200. if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
  201. PDC_MODEL_NVA_UNSUPPORTED) {
  202. printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
  203. #if 0
  204. panic("SMP kernel required to avoid non-equivalent aliasing");
  205. #endif
  206. }
  207. }
  208. void disable_sr_hashing(void)
  209. {
  210. int srhash_type, retval;
  211. unsigned long space_bits;
  212. switch (boot_cpu_data.cpu_type) {
  213. case pcx: /* We shouldn't get this far. setup.c should prevent it. */
  214. BUG();
  215. return;
  216. case pcxs:
  217. case pcxt:
  218. case pcxt_:
  219. srhash_type = SRHASH_PCXST;
  220. break;
  221. case pcxl:
  222. srhash_type = SRHASH_PCXL;
  223. break;
  224. case pcxl2: /* pcxl2 doesn't support space register hashing */
  225. return;
  226. default: /* Currently all PA2.0 machines use the same ins. sequence */
  227. srhash_type = SRHASH_PA20;
  228. break;
  229. }
  230. disable_sr_hashing_asm(srhash_type);
  231. retval = pdc_spaceid_bits(&space_bits);
  232. /* If this procedure isn't implemented, don't panic. */
  233. if (retval < 0 && retval != PDC_BAD_OPTION)
  234. panic("pdc_spaceid_bits call failed.\n");
  235. if (space_bits != 0)
  236. panic("SpaceID hashing is still on!\n");
  237. }
  238. static inline void
  239. __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  240. unsigned long physaddr)
  241. {
  242. preempt_disable();
  243. flush_dcache_page_asm(physaddr, vmaddr);
  244. if (vma->vm_flags & VM_EXEC)
  245. flush_icache_page_asm(physaddr, vmaddr);
  246. preempt_enable();
  247. }
  248. void flush_dcache_page(struct page *page)
  249. {
  250. struct address_space *mapping = page_mapping(page);
  251. struct vm_area_struct *mpnt;
  252. unsigned long offset;
  253. unsigned long addr, old_addr = 0;
  254. pgoff_t pgoff;
  255. if (mapping && !mapping_mapped(mapping)) {
  256. set_bit(PG_dcache_dirty, &page->flags);
  257. return;
  258. }
  259. flush_kernel_dcache_page(page);
  260. if (!mapping)
  261. return;
  262. pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  263. /* We have carefully arranged in arch_get_unmapped_area() that
  264. * *any* mappings of a file are always congruently mapped (whether
  265. * declared as MAP_PRIVATE or MAP_SHARED), so we only need
  266. * to flush one address here for them all to become coherent */
  267. flush_dcache_mmap_lock(mapping);
  268. vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  269. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  270. addr = mpnt->vm_start + offset;
  271. /* The TLB is the engine of coherence on parisc: The
  272. * CPU is entitled to speculate any page with a TLB
  273. * mapping, so here we kill the mapping then flush the
  274. * page along a special flush only alias mapping.
  275. * This guarantees that the page is no-longer in the
  276. * cache for any process and nor may it be
  277. * speculatively read in (until the user or kernel
  278. * specifically accesses it, of course) */
  279. flush_tlb_page(mpnt, addr);
  280. if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
  281. != (addr & (SHM_COLOUR - 1))) {
  282. __flush_cache_page(mpnt, addr, page_to_phys(page));
  283. if (old_addr)
  284. printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
  285. old_addr = addr;
  286. }
  287. }
  288. flush_dcache_mmap_unlock(mapping);
  289. }
  290. EXPORT_SYMBOL(flush_dcache_page);
  291. /* Defined in arch/parisc/kernel/pacache.S */
  292. EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
  293. EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
  294. EXPORT_SYMBOL(flush_data_cache_local);
  295. EXPORT_SYMBOL(flush_kernel_icache_range_asm);
  296. #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
  297. static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
  298. #define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
  299. static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
  300. void __init parisc_setup_cache_timing(void)
  301. {
  302. unsigned long rangetime, alltime;
  303. unsigned long size, start;
  304. alltime = mfctl(16);
  305. flush_data_cache();
  306. alltime = mfctl(16) - alltime;
  307. size = (unsigned long)(_end - _text);
  308. rangetime = mfctl(16);
  309. flush_kernel_dcache_range((unsigned long)_text, size);
  310. rangetime = mfctl(16) - rangetime;
  311. printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
  312. alltime, size, rangetime);
  313. /* Racy, but if we see an intermediate value, it's ok too... */
  314. parisc_cache_flush_threshold = size * alltime / rangetime;
  315. parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
  316. if (!parisc_cache_flush_threshold)
  317. parisc_cache_flush_threshold = FLUSH_THRESHOLD;
  318. if (parisc_cache_flush_threshold > cache_info.dc_size)
  319. parisc_cache_flush_threshold = cache_info.dc_size;
  320. printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
  321. parisc_cache_flush_threshold/1024);
  322. /* calculate TLB flush threshold */
  323. alltime = mfctl(16);
  324. flush_tlb_all();
  325. alltime = mfctl(16) - alltime;
  326. size = PAGE_SIZE;
  327. start = (unsigned long) _text;
  328. rangetime = mfctl(16);
  329. while (start < (unsigned long) _end) {
  330. flush_tlb_kernel_range(start, start + PAGE_SIZE);
  331. start += PAGE_SIZE;
  332. size += PAGE_SIZE;
  333. }
  334. rangetime = mfctl(16) - rangetime;
  335. printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
  336. alltime, size, rangetime);
  337. parisc_tlb_flush_threshold = size * alltime / rangetime;
  338. parisc_tlb_flush_threshold *= num_online_cpus();
  339. parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
  340. if (!parisc_tlb_flush_threshold)
  341. parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
  342. printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
  343. parisc_tlb_flush_threshold/1024);
  344. }
  345. extern void purge_kernel_dcache_page_asm(unsigned long);
  346. extern void clear_user_page_asm(void *, unsigned long);
  347. extern void copy_user_page_asm(void *, void *, unsigned long);
  348. void flush_kernel_dcache_page_addr(void *addr)
  349. {
  350. unsigned long flags;
  351. flush_kernel_dcache_page_asm(addr);
  352. purge_tlb_start(flags);
  353. pdtlb_kernel(addr);
  354. purge_tlb_end(flags);
  355. }
  356. EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
  357. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  358. struct page *pg)
  359. {
  360. /* Copy using kernel mapping. No coherency is needed (all in
  361. kunmap) for the `to' page. However, the `from' page needs to
  362. be flushed through a mapping equivalent to the user mapping
  363. before it can be accessed through the kernel mapping. */
  364. preempt_disable();
  365. flush_dcache_page_asm(__pa(vfrom), vaddr);
  366. preempt_enable();
  367. copy_page_asm(vto, vfrom);
  368. }
  369. EXPORT_SYMBOL(copy_user_page);
  370. /* __flush_tlb_range()
  371. *
  372. * returns 1 if all TLBs were flushed.
  373. */
  374. int __flush_tlb_range(unsigned long sid, unsigned long start,
  375. unsigned long end)
  376. {
  377. unsigned long flags, size;
  378. size = (end - start);
  379. if (size >= parisc_tlb_flush_threshold) {
  380. flush_tlb_all();
  381. return 1;
  382. }
  383. /* Purge TLB entries for small ranges using the pdtlb and
  384. pitlb instructions. These instructions execute locally
  385. but cause a purge request to be broadcast to other TLBs. */
  386. if (likely(!split_tlb)) {
  387. while (start < end) {
  388. purge_tlb_start(flags);
  389. mtsp(sid, 1);
  390. pdtlb(start);
  391. purge_tlb_end(flags);
  392. start += PAGE_SIZE;
  393. }
  394. return 0;
  395. }
  396. /* split TLB case */
  397. while (start < end) {
  398. purge_tlb_start(flags);
  399. mtsp(sid, 1);
  400. pdtlb(start);
  401. pitlb(start);
  402. purge_tlb_end(flags);
  403. start += PAGE_SIZE;
  404. }
  405. return 0;
  406. }
  407. static void cacheflush_h_tmp_function(void *dummy)
  408. {
  409. flush_cache_all_local();
  410. }
  411. void flush_cache_all(void)
  412. {
  413. on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
  414. }
  415. static inline unsigned long mm_total_size(struct mm_struct *mm)
  416. {
  417. struct vm_area_struct *vma;
  418. unsigned long usize = 0;
  419. for (vma = mm->mmap; vma; vma = vma->vm_next)
  420. usize += vma->vm_end - vma->vm_start;
  421. return usize;
  422. }
  423. static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
  424. {
  425. pte_t *ptep = NULL;
  426. if (!pgd_none(*pgd)) {
  427. pud_t *pud = pud_offset(pgd, addr);
  428. if (!pud_none(*pud)) {
  429. pmd_t *pmd = pmd_offset(pud, addr);
  430. if (!pmd_none(*pmd))
  431. ptep = pte_offset_map(pmd, addr);
  432. }
  433. }
  434. return ptep;
  435. }
  436. void flush_cache_mm(struct mm_struct *mm)
  437. {
  438. struct vm_area_struct *vma;
  439. pgd_t *pgd;
  440. /* Flushing the whole cache on each cpu takes forever on
  441. rp3440, etc. So, avoid it if the mm isn't too big. */
  442. if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
  443. flush_cache_all();
  444. return;
  445. }
  446. if (mm->context == mfsp(3)) {
  447. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  448. flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
  449. if ((vma->vm_flags & VM_EXEC) == 0)
  450. continue;
  451. flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
  452. }
  453. return;
  454. }
  455. pgd = mm->pgd;
  456. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  457. unsigned long addr;
  458. for (addr = vma->vm_start; addr < vma->vm_end;
  459. addr += PAGE_SIZE) {
  460. unsigned long pfn;
  461. pte_t *ptep = get_ptep(pgd, addr);
  462. if (!ptep)
  463. continue;
  464. pfn = pte_pfn(*ptep);
  465. if (!pfn_valid(pfn))
  466. continue;
  467. __flush_cache_page(vma, addr, PFN_PHYS(pfn));
  468. }
  469. }
  470. }
  471. void
  472. flush_user_dcache_range(unsigned long start, unsigned long end)
  473. {
  474. if ((end - start) < parisc_cache_flush_threshold)
  475. flush_user_dcache_range_asm(start,end);
  476. else
  477. flush_data_cache();
  478. }
  479. void
  480. flush_user_icache_range(unsigned long start, unsigned long end)
  481. {
  482. if ((end - start) < parisc_cache_flush_threshold)
  483. flush_user_icache_range_asm(start,end);
  484. else
  485. flush_instruction_cache();
  486. }
  487. void flush_cache_range(struct vm_area_struct *vma,
  488. unsigned long start, unsigned long end)
  489. {
  490. unsigned long addr;
  491. pgd_t *pgd;
  492. BUG_ON(!vma->vm_mm->context);
  493. if ((end - start) >= parisc_cache_flush_threshold) {
  494. flush_cache_all();
  495. return;
  496. }
  497. if (vma->vm_mm->context == mfsp(3)) {
  498. flush_user_dcache_range_asm(start, end);
  499. if (vma->vm_flags & VM_EXEC)
  500. flush_user_icache_range_asm(start, end);
  501. return;
  502. }
  503. pgd = vma->vm_mm->pgd;
  504. for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
  505. unsigned long pfn;
  506. pte_t *ptep = get_ptep(pgd, addr);
  507. if (!ptep)
  508. continue;
  509. pfn = pte_pfn(*ptep);
  510. if (pfn_valid(pfn))
  511. __flush_cache_page(vma, addr, PFN_PHYS(pfn));
  512. }
  513. }
  514. void
  515. flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  516. {
  517. BUG_ON(!vma->vm_mm->context);
  518. if (pfn_valid(pfn)) {
  519. flush_tlb_page(vma, vmaddr);
  520. __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
  521. }
  522. }