cache.S 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Cache maintenance
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/errno.h>
  20. #include <linux/linkage.h>
  21. #include <linux/init.h>
  22. #include <asm/assembler.h>
  23. #include <asm/cpufeature.h>
  24. #include <asm/alternative.h>
  25. /*
  26. * flush_icache_range(start,end)
  27. *
  28. * Ensure that the I and D caches are coherent within specified region.
  29. * This is typically used when code has been written to a memory region,
  30. * and will be executed.
  31. *
  32. * - start - virtual start address of region
  33. * - end - virtual end address of region
  34. */
  35. ENTRY(flush_icache_range)
  36. /* FALLTHROUGH */
  37. /*
  38. * __flush_cache_user_range(start,end)
  39. *
  40. * Ensure that the I and D caches are coherent within specified region.
  41. * This is typically used when code has been written to a memory region,
  42. * and will be executed.
  43. *
  44. * - start - virtual start address of region
  45. * - end - virtual end address of region
  46. */
  47. ENTRY(__flush_cache_user_range)
  48. dcache_line_size x2, x3
  49. sub x3, x2, #1
  50. bic x4, x0, x3
  51. 1:
  52. user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
  53. add x4, x4, x2
  54. cmp x4, x1
  55. b.lo 1b
  56. dsb ish
  57. icache_line_size x2, x3
  58. sub x3, x2, #1
  59. bic x4, x0, x3
  60. 1:
  61. USER(9f, ic ivau, x4 ) // invalidate I line PoU
  62. add x4, x4, x2
  63. cmp x4, x1
  64. b.lo 1b
  65. dsb ish
  66. isb
  67. mov x0, #0
  68. ret
  69. 9:
  70. mov x0, #-EFAULT
  71. ret
  72. ENDPROC(flush_icache_range)
  73. ENDPROC(__flush_cache_user_range)
  74. /*
  75. * __flush_dcache_area(kaddr, size)
  76. *
  77. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  78. * are cleaned and invalidated to the PoC.
  79. *
  80. * - kaddr - kernel address
  81. * - size - size in question
  82. */
  83. ENTRY(__flush_dcache_area)
  84. dcache_by_line_op civac, sy, x0, x1, x2, x3
  85. ret
  86. ENDPIPROC(__flush_dcache_area)
  87. /*
  88. * __clean_dcache_area_pou(kaddr, size)
  89. *
  90. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  91. * are cleaned to the PoU.
  92. *
  93. * - kaddr - kernel address
  94. * - size - size in question
  95. */
  96. ENTRY(__clean_dcache_area_pou)
  97. dcache_by_line_op cvau, ish, x0, x1, x2, x3
  98. ret
  99. ENDPROC(__clean_dcache_area_pou)
  100. /*
  101. * __inval_cache_range(start, end)
  102. * - start - start address of region
  103. * - end - end address of region
  104. */
  105. ENTRY(__inval_cache_range)
  106. /* FALLTHROUGH */
  107. /*
  108. * __dma_inv_range(start, end)
  109. * - start - virtual start address of region
  110. * - end - virtual end address of region
  111. */
  112. __dma_inv_range:
  113. dcache_line_size x2, x3
  114. sub x3, x2, #1
  115. tst x1, x3 // end cache line aligned?
  116. bic x1, x1, x3
  117. b.eq 1f
  118. dc civac, x1 // clean & invalidate D / U line
  119. 1: tst x0, x3 // start cache line aligned?
  120. bic x0, x0, x3
  121. b.eq 2f
  122. dc civac, x0 // clean & invalidate D / U line
  123. b 3f
  124. 2: dc ivac, x0 // invalidate D / U line
  125. 3: add x0, x0, x2
  126. cmp x0, x1
  127. b.lo 2b
  128. dsb sy
  129. ret
  130. ENDPIPROC(__inval_cache_range)
  131. ENDPROC(__dma_inv_range)
  132. /*
  133. * __dma_clean_range(start, end)
  134. * - start - virtual start address of region
  135. * - end - virtual end address of region
  136. */
  137. __dma_clean_range:
  138. dcache_line_size x2, x3
  139. sub x3, x2, #1
  140. bic x0, x0, x3
  141. 1:
  142. alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
  143. dc cvac, x0
  144. alternative_else
  145. dc civac, x0
  146. alternative_endif
  147. add x0, x0, x2
  148. cmp x0, x1
  149. b.lo 1b
  150. dsb sy
  151. ret
  152. ENDPROC(__dma_clean_range)
  153. /*
  154. * __dma_flush_range(start, end)
  155. * - start - virtual start address of region
  156. * - end - virtual end address of region
  157. */
  158. ENTRY(__dma_flush_range)
  159. dcache_line_size x2, x3
  160. sub x3, x2, #1
  161. bic x0, x0, x3
  162. 1: dc civac, x0 // clean & invalidate D / U line
  163. add x0, x0, x2
  164. cmp x0, x1
  165. b.lo 1b
  166. dsb sy
  167. ret
  168. ENDPIPROC(__dma_flush_range)
  169. /*
  170. * __dma_map_area(start, size, dir)
  171. * - start - kernel virtual start address
  172. * - size - size of region
  173. * - dir - DMA direction
  174. */
  175. ENTRY(__dma_map_area)
  176. add x1, x1, x0
  177. cmp w2, #DMA_FROM_DEVICE
  178. b.eq __dma_inv_range
  179. b __dma_clean_range
  180. ENDPIPROC(__dma_map_area)
  181. /*
  182. * __dma_unmap_area(start, size, dir)
  183. * - start - kernel virtual start address
  184. * - size - size of region
  185. * - dir - DMA direction
  186. */
  187. ENTRY(__dma_unmap_area)
  188. add x1, x1, x0
  189. cmp w2, #DMA_TO_DEVICE
  190. b.ne __dma_inv_range
  191. ret
  192. ENDPIPROC(__dma_unmap_area)