cacheflush.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License
  4. * as published by the Free Software Foundation; either version
  5. * 2 of the License, or (at your option) any later version.
  6. */
  7. #ifndef _ASM_POWERPC_CACHEFLUSH_H
  8. #define _ASM_POWERPC_CACHEFLUSH_H
  9. #ifdef __KERNEL__
  10. #include <linux/mm.h>
  11. #include <asm/cputable.h>
  12. #include <asm/cpu_has_feature.h>
  13. /*
  14. * No cache flushing is required when address mappings are changed,
  15. * because the caches on PowerPCs are physically addressed.
  16. */
  17. #define flush_cache_all() do { } while (0)
  18. #define flush_cache_mm(mm) do { } while (0)
  19. #define flush_cache_dup_mm(mm) do { } while (0)
  20. #define flush_cache_range(vma, start, end) do { } while (0)
  21. #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
  22. #define flush_icache_page(vma, page) do { } while (0)
  23. #define flush_cache_vmap(start, end) do { } while (0)
  24. #define flush_cache_vunmap(start, end) do { } while (0)
  25. #ifdef CONFIG_BOOK3S_64
  26. /*
  27. * Book3s has no ptesync after setting a pte, so without this ptesync it's
  28. * possible for a kernel virtual mapping access to return a spurious fault
  29. * if it's accessed right after the pte is set. The page fault handler does
  30. * not expect this type of fault. flush_cache_vmap is not exactly the right
  31. * place to put this, but it seems to work well enough.
  32. */
  33. #define flush_cache_vmap(start, end) do { asm volatile("ptesync"); } while (0)
  34. #else
  35. #define flush_cache_vmap(start, end) do { } while (0)
  36. #endif
  37. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  38. extern void flush_dcache_page(struct page *page);
  39. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  40. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  41. extern void flush_icache_range(unsigned long, unsigned long);
  42. extern void flush_icache_user_range(struct vm_area_struct *vma,
  43. struct page *page, unsigned long addr,
  44. int len);
  45. extern void __flush_dcache_icache(void *page_va);
  46. extern void flush_dcache_icache_page(struct page *page);
  47. #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
  48. extern void __flush_dcache_icache_phys(unsigned long physaddr);
  49. #else
  50. static inline void __flush_dcache_icache_phys(unsigned long physaddr)
  51. {
  52. BUG();
  53. }
  54. #endif
  55. #ifdef CONFIG_PPC32
  56. /*
  57. * Write any modified data cache blocks out to memory and invalidate them.
  58. * Does not invalidate the corresponding instruction cache blocks.
  59. */
  60. static inline void flush_dcache_range(unsigned long start, unsigned long stop)
  61. {
  62. void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
  63. unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
  64. unsigned long i;
  65. for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
  66. dcbf(addr);
  67. mb(); /* sync */
  68. }
  69. /*
  70. * Write any modified data cache blocks out to memory.
  71. * Does not invalidate the corresponding cache lines (especially for
  72. * any corresponding instruction cache).
  73. */
  74. static inline void clean_dcache_range(unsigned long start, unsigned long stop)
  75. {
  76. void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
  77. unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
  78. unsigned long i;
  79. for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
  80. dcbst(addr);
  81. mb(); /* sync */
  82. }
  83. /*
  84. * Like above, but invalidate the D-cache. This is used by the 8xx
  85. * to invalidate the cache so the PPC core doesn't get stale data
  86. * from the CPM (no cache snooping here :-).
  87. */
  88. static inline void invalidate_dcache_range(unsigned long start,
  89. unsigned long stop)
  90. {
  91. void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
  92. unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
  93. unsigned long i;
  94. for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
  95. dcbi(addr);
  96. mb(); /* sync */
  97. }
  98. #endif /* CONFIG_PPC32 */
  99. #ifdef CONFIG_PPC64
  100. extern void flush_dcache_range(unsigned long start, unsigned long stop);
  101. extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
  102. #endif
  103. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  104. do { \
  105. memcpy(dst, src, len); \
  106. flush_icache_user_range(vma, page, vaddr, len); \
  107. } while (0)
  108. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  109. memcpy(dst, src, len)
  110. #endif /* __KERNEL__ */
  111. #endif /* _ASM_POWERPC_CACHEFLUSH_H */