cacheflush.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License
  4. * as published by the Free Software Foundation; either version
  5. * 2 of the License, or (at your option) any later version.
  6. */
  7. #ifndef _ASM_POWERPC_CACHEFLUSH_H
  8. #define _ASM_POWERPC_CACHEFLUSH_H
  9. #ifdef __KERNEL__
  10. #include <linux/mm.h>
  11. #include <asm/cputable.h>
  12. #include <asm/cpu_has_feature.h>
  13. /*
  14. * No cache flushing is required when address mappings are changed,
  15. * because the caches on PowerPCs are physically addressed.
  16. */
  17. #define flush_cache_all() do { } while (0)
  18. #define flush_cache_mm(mm) do { } while (0)
  19. #define flush_cache_dup_mm(mm) do { } while (0)
  20. #define flush_cache_range(vma, start, end) do { } while (0)
  21. #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
  22. #define flush_icache_page(vma, page) do { } while (0)
  23. #define flush_cache_vunmap(start, end) do { } while (0)
  24. #ifdef CONFIG_PPC_BOOK3S_64
  25. /*
  26. * Book3s has no ptesync after setting a pte, so without this ptesync it's
  27. * possible for a kernel virtual mapping access to return a spurious fault
  28. * if it's accessed right after the pte is set. The page fault handler does
  29. * not expect this type of fault. flush_cache_vmap is not exactly the right
  30. * place to put this, but it seems to work well enough.
  31. */
  32. #define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0)
  33. #else
  34. #define flush_cache_vmap(start, end) do { } while (0)
  35. #endif
  36. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  37. extern void flush_dcache_page(struct page *page);
  38. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  39. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  40. extern void flush_icache_range(unsigned long, unsigned long);
  41. extern void flush_icache_user_range(struct vm_area_struct *vma,
  42. struct page *page, unsigned long addr,
  43. int len);
  44. extern void __flush_dcache_icache(void *page_va);
  45. extern void flush_dcache_icache_page(struct page *page);
  46. #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
  47. extern void __flush_dcache_icache_phys(unsigned long physaddr);
  48. #else
  49. static inline void __flush_dcache_icache_phys(unsigned long physaddr)
  50. {
  51. BUG();
  52. }
  53. #endif
  54. #ifdef CONFIG_PPC32
  55. /*
  56. * Write any modified data cache blocks out to memory and invalidate them.
  57. * Does not invalidate the corresponding instruction cache blocks.
  58. */
  59. static inline void flush_dcache_range(unsigned long start, unsigned long stop)
  60. {
  61. void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
  62. unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
  63. unsigned long i;
  64. for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
  65. dcbf(addr);
  66. mb(); /* sync */
  67. }
  68. /*
  69. * Write any modified data cache blocks out to memory.
  70. * Does not invalidate the corresponding cache lines (especially for
  71. * any corresponding instruction cache).
  72. */
  73. static inline void clean_dcache_range(unsigned long start, unsigned long stop)
  74. {
  75. void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
  76. unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
  77. unsigned long i;
  78. for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
  79. dcbst(addr);
  80. mb(); /* sync */
  81. }
  82. /*
  83. * Like above, but invalidate the D-cache. This is used by the 8xx
  84. * to invalidate the cache so the PPC core doesn't get stale data
  85. * from the CPM (no cache snooping here :-).
  86. */
  87. static inline void invalidate_dcache_range(unsigned long start,
  88. unsigned long stop)
  89. {
  90. void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
  91. unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
  92. unsigned long i;
  93. for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
  94. dcbi(addr);
  95. mb(); /* sync */
  96. }
  97. #endif /* CONFIG_PPC32 */
  98. #ifdef CONFIG_PPC64
  99. extern void flush_dcache_range(unsigned long start, unsigned long stop);
  100. extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
  101. #endif
  102. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  103. do { \
  104. memcpy(dst, src, len); \
  105. flush_icache_user_range(vma, page, vaddr, len); \
  106. } while (0)
  107. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  108. memcpy(dst, src, len)
  109. #endif /* __KERNEL__ */
  110. #endif /* _ASM_POWERPC_CACHEFLUSH_H */